New Post

Python Code || Path Planning with Grey Wolf Optimization (GWO) ~xRay Pixy

Image
Learn how to implement an obstacle-avoiding path planning for a robot using the Grey Wolf Optimization (GWO) in a static environment. #optimization #algorithm #metaheuristic #robotics #deeplearning #ArtificialIntelligence #MachineLearning #computervision #research #projects #thesis #Python

Whale Optimization Algorithm Code Implementation || WOA CODE || ~xRay Pixy

Whale Optimization Algorithm Code Implementation


Whale Optimization Algorithm Code Files


function obj_fun(test_fun)
switch test_fun
    case 'F1'
        x = -100:2:100; y=x;
    case 'F2'
        x = -10:2:10; y=x;
end
end


function [LB,UB,D,FitFun]=test_fun_info(C)
switch C
    case 'F1'
        FitFun = @F1;
        LB = -100; 
        UB = 100;
        D = 30;
    case 'F2'
        FitFun = @F2;
        LB = -10;
        UB = 10;
        D = 30;
end
% F1 Test Function
    function r = F1(x)
        r = sum(x.^2);
    end
% F2 Test Function
    function r = F2(x)
        r = sum(abs(x))+prod(abs(x));
    end
end

function Position = initialize(Pop_Size,D,UB,LB)
SS_Bounds = size(UB,2);

if SS_Bounds == 1
    Position = rand(Pop_Size,D).*(UB-LB)+LB;
end

if SS_Bounds>1
    for i = 1:D
        UB_i = UB(i);
        LB_i = LB(i);
        Position(:,i) = rand(Pop_Size,1).*(UB_i-LB_i)+LB_i; 
    end
end
end

function [Best_Val,Best_Pos,Convergence_Curve]=WOA(Pop_Size,MaxT,LB,UB,D,FitFun)
Best_Pos = zeros(1,D);
Best_Val = inf;

Position = initialize(Pop_Size,D,UB,LB);
Convergence_Curve = zeros(1,MaxT);

T = 0;

while T<MaxT
    for i = 1:size(Position,1)
        CheckUB = Position(i,:)>UB;
        CheckLB = Position(i,:)<LB;
        Position(i,:) = (Position(i,:).*(~(CheckUB+CheckLB)))+UB.*CheckUB+LB.*CheckLB;
        %Calculate Fitness Values
        Fitness_Val = FitFun(Position(i,:));
        %Compare Fitness Values
        if Fitness_Val<Best_Val
            Best_Val = Fitness_Val;
            Best_Pos = Position(i,:);
        end
    end
    a = 2-T*((2)/MaxT);
    a2 = -1+T*((-1)/MaxT);
    
    %Agents Position Update (New Positions)
    for i=1:size(Position,1)
        r1=rand();
        r2=rand();
        A = 2*a*r1-a;
        C = 2 * r2;
        b = 1;
        l = (a2-1)*rand+1;
        p = rand();
        for j = 1:size(Position,2)
            if p<0.5
                if abs(A)>1
                    rand_best_index=floor(Pop_Size*rand()+1);
                    X_rand = Position(rand_best_index,:);
                    D_X_rand = abs(C*X_rand(j)-Position(i,j));
                    Position(i,j) = X_rand(j)-A*D_X_rand;
                elseif abs(A)<1
                    D_Best = abs(C*Best_Pos(j)-Position(i,j));
                    Position(i,j) = Best_Pos(j)-A*D_Best;
                end
            elseif p>=0.5
                distance2Best = abs(Best_Pos(j)-Position(i,j));
                Position(i,j) = distance2Best * exp(b.*1).*cos(1.*2*pi)+Best_Pos(j);    
            end
        end  
    end
    T = T + 1;  %Counter Increment
    Convergence_Curve(T) = Best_Val;
    [T Best_Val]
end
end


clc
clear all
Pop_Size = 100;
Objective_Fun = 'F2';
MaxT = 500;

[LB,UB,D,FitFun] = test_fun_info(Objective_Fun);
[Best_Val,Best_Pos,Sol_Convergence]=WOA(Pop_Size,MaxT,LB,UB,D,FitFun);


subplot(1,1,1);
semilogy(Sol_Convergence,'Color','r');
title('Convergence Curve');
xlabel('Iteration');
ylabel('Best Value');
axis tight
grid on
box on
legend ('WOA')

display(['Best Position',num2str(Best_Pos)]);
display(['Best_Value ',num2str(Best_Val)]);









Comments

Popular Post

PARTICLE SWARM OPTIMIZATION ALGORITHM NUMERICAL EXAMPLE

Cuckoo Search Algorithm for Optimization Problems

Particle Swarm Optimization (PSO)

PSO (Particle Swarm Optimization) Example Step-by-Step

how is the LBP |Local Binary Pattern| values calculated? Step-by-Step with Example

PSO Python Code || Particle Swarm Optimization in Python || ~xRay Pixy

Bat algorithm Explanation Step by Step with example

Grey Wolf Optimization Algorithm

Grey Wolf Optimization Algorithm Numerical Example