\documentclass[10pt]{studiamnew}
\usepackage{graphicx}
\usepackage{amsmath}
\usepackage{amssymb}
\sloppy

\newtheorem{theorem}{Theorem}[section]
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{corollary}[theorem]{Corollary}
\newtheorem{proposition}[theorem]{Proposition}
\newtheorem{axiom}[theorem]{Axiom}
\newtheorem{case}[theorem]{Case}
\newtheorem{condition}[theorem]{Condition}
\newtheorem{conjecture}[theorem]{Conjecture}
\newtheorem{criterion}[theorem]{Criterion}
\newtheorem{definition}[theorem]{Definition}

\theoremstyle{definition}
\newtheorem{remark}[theorem]{Remark}
\newtheorem{algorithm}[theorem]{Algorithm}
\newtheorem{problem}[theorem]{Problem}
\newtheorem{example}[theorem]{Example}
\newtheorem{exercise}[theorem]{Exercise}
\newtheorem{solution}[theorem]{Solution}
\newtheorem{notation}[theorem]{Notation}

\renewcommand{\theequation}{\thesection.\arabic{equation}}
\numberwithin{equation}{section}

\begin{document}
%
\setcounter{page}{1}
\setcounter{firstpage}{1}
\setcounter{lastpage}{4}
\renewcommand{\currentvolume}{??}
\renewcommand{\currentyear}{??}
\renewcommand{\currentissue}{??}
%

%% Place the running title of the paper with 40 letters or less in []
 %% and the full title of the paper in { }.
%\title { New Hybrid Conjugate Gradient method as a Convex combination of PRP and RMIL+ methods}
\title[CGHLB, Hybrid Convex Combination of PRP and RMIL+ ] %Use the shortened version of the full title
     { New Hybrid Conjugate Gradient Method as a Convex Combination of PRP and RMIL+ Methods\\}


\author{ Ghania Hadji }
\address{Faculty of Sciences,  Department of Mathematics, Badji Mokhtar University, B.P. 12 Annaba 23000, Algeria\\ Faculty of Science and Technology, Department of Mathematics and Informatics, Mohamed Cherif Messaadia University, P.O.Box 1553, Souk Ahras 41000, Algeria}
 \email{g.hadji@univ-soukahras.dz}
%
\author{ Yamina Laskri}
\address{ESTI,Faculty of Sciences, Department of Mathematics, Badji Mokhtar University, B.P. 12 Annaba 23000, Algeria.}
 \email{yamina.laskri@univ-annaba.org}
%
\author{ Tahar Bechouat}
\address{ Faculty of Science and Technology, Department of Mathematics and Informatics, Mohamed Cherif Messaadia University, P.O.Box 1553, Souk Ahras 41000, Algeria}
 \email{t.bachaouette@univ-soukahras.dz}
%
\author{ Rachid Benzine}
\address{Laboratory LANOS ,Faculty of Sciences, Department of Mathematics, Badji Mokhtar University, B.P. 12 Annaba 23000, Algeria.}
 \email{rabenzine@yahoo.fr}
%
% It is required to enter 2020 MSC.
\subjclass{ 90C26, 65H10,  65K05, 90C26 , 90C06 .}
% Please provide minimum  5 keywords.
 \keywords{Unconstrained optimization, hybrid conjugate gradient method, line
search, descent property, global convergence.}


%The abstract of your paper

\begin{abstract}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
The Conjugate Gradient (CG) method is a powerful iterative approach for
solving large-scale minimization problems, characterized by its simplicity,
low computation cost and good convergence. In this paper, a new hybrid
conjugate gradient HLB method (HLB: Hadji-Laskri-Bechouat) is proposed and
analysed for unconstrained optimization. We compute the parameter $\beta
_{k}^{HLB}$ as a convex combination of the Polak-Ribi\`{e}re-Polyak$\left(
\beta _{k}^{PRP}\right) $ and the Mohd Rivaie-Mustafa Mamat
and Abdelrhaman Abashar $\left( \beta _{k}^{RMIL+}\right) $ i.e $\beta
_{k}^{HLB}=\left( 1-\theta _{k}\right) \beta _{k}^{PRP}+\theta _{k}\beta
_{k}^{RMIL+}$. \ By comparing numerically CGHLB with PRP and RMIL+ and by
using the Dolan and More CPU performance, we deduce that CGHLB is more
efficient.  
\end{abstract}


\maketitle

\section{Introduction}	
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
Consider the nonlinear unconstrained optimization problem

\begin{equation}
\underset{x\in 
%TCIMACRO{\U{211d} }%
%BeginExpansion
\mathbb{R}
%EndExpansion
^{n}}{\min }f\left( x\right)  \tag{$1.1$}
\end{equation}

Where $f:%
%TCIMACRO{\U{211d} }%
%BeginExpansion
\mathbb{R}
%EndExpansion
^{n}\rightarrow 
%TCIMACRO{\U{211d} }%
%BeginExpansion
\mathbb{R}
%EndExpansion
$ is a continuously differentiable function, bounded from below. The
gradient of $f$ is denoted by $g\left( x\right) .$ To solve this problem, we
start from an initial point $x_{0}\in 
%TCIMACRO{\U{211d} }%
%BeginExpansion
\mathbb{R}
%EndExpansion
^{n}.$ Nonlinear conjugate gradient methods generate sequences $\left\{
x_{k}\right\} $ of the following form:%
\begin{equation}
x_{k+1}=x_{k}+\alpha _{k}d_{k,}\text{ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ }%
k=0,1,2,....,  \tag{$1.2$}
\end{equation}

where $x_{k}$ is the current iterate point and $\alpha _{k}>0$ is the step
size which is obtained by line search [6] .

\bigskip The iterative formula of the conjugate gradient method is given by
(1.2), where %$\alpha _{k}$ is a steplength which is computed by carrying out a line search, and
 $d_{k}$ is the search direction defined by%
\begin{equation}
d_{k+1}=\left\{ 
\begin{array}{c}
-g_{k}\text{ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ si }k=1 \\ 
-g_{k+1}+\beta _{k}d_{k}\text{ \ si }k\geq 2%
\end{array}%
\right.  \tag{1.3}
\end{equation}

where $\beta _{k}$ is a scalar and $g\left( x\right) $ denotes $\nabla
f\left( x\right) $ [7]. If $f$ \ is a strictly convex quadratic function, namely,%
\begin{equation}
f(x)=\frac{1}{2}x^{T}Hx+b^{T}x,  \tag{1.3bis}
\end{equation}

where $H$ is a positive definite matrix and if $\alpha _{k}$ is the exact
one-dimensional minimizer along the direction $d_{k}$, i.e.,%
\begin{equation}
\alpha _{k}=\arg \underset{\alpha >0}{\min }\left\{ f(x+\alpha d_{k})\right\}
\tag{1.3tris}
\end{equation}

then (1.2), (1.3), (1.3bis), (1.3tris) is called the linear conjugate
gradient method. Otherwise, (1.2), (1.3) is called the nonlinear conjugate
gradient method.
Conjugate gradient methods can broadly be classified based on the used strategies of  the  way in which the search direction is updated and the algorithms dealing with the step size minimization  along a direction [24].  In [26] , a convex combination of LS and FR ( [1] ) is proposed with a newton descent direction. 

The line search in the non linear conjugate gradient methods is often based
on the standard Wolfe conditions [19] :

\begin{equation}
f\left( x_{k}+\alpha _{k}d_{k}\right) -f\left( x_{k}\right) \leq \rho \alpha
_{k}g_{k}^{t}d_{k}  \tag{1.4}
\end{equation}

\begin{equation}
g_{k+1}^{t}d_{k}\geq \delta g_{k}^{t}d_{k}  \tag{1.5}
\end{equation}

where $0<\rho \leq \delta $ $<1$.

Conjugate gradient methods differ in their way of defining the scalar
parameter $\beta _{k}$. In the literature, there have been proposed several
choices for $\beta _{k}$ which give rise to distinct conjugate gradient
methods [12], [22] . The most well known conjugate gradient methods are the
Hestenes--Stiefel (HS) method [14], the Fletcher- Reeves (FR) method [1], [10],
the Polak-Ribi\`{e}re-Polyak (PRP) method [16 ] , [28 ], the Conjugate Descent
method(CD) [10], the Liu-Storey (LS) method [15], the Dai-Yuan (DY) method
[08], [09] , Hager and Zhang (HZ) method [13] and the  RMIL+ method [17] , [18] . The update parameters of
these methods are respectively specified as follows:

$\beta _{k}^{HS}=\frac{g_{k+1}^{T}y_{k}}{d_{k}^{T}y_{k}},$ $\beta _{k}^{FR}=%
\frac{\left\Vert g_{k+1}\right\Vert ^{2}}{\left\Vert g_{k}\right\Vert ^{2}}%
,\ \beta _{k}^{PRP}=\frac{g_{k+1}^{T}y_{k}}{\left\Vert g_{k}\right\Vert ^{2}}%
,\ \beta _{k}^{CD}=-\frac{\left\Vert g_{k+1}\right\Vert ^{2}}{d_{k}^{T}g_{k}}%
\ ,$

$\ \beta _{k}^{LS}=-\frac{g_{k+1}^{T}y_{k}}{d_{k}^{T}g_{k}},$ $\beta
_{k}^{DY}=\frac{\left\Vert g_{k+1}\right\Vert ^{2}}{d_{k}^{T}y_{k}},$ $\beta
_{k}^{HZ}=\left( y_{k}-2d_{k}\frac{\left\Vert y_{k}\right\Vert ^{2}}{%
d_{k}^{T}y_{k}}\right) ^{T}\frac{g_{k+1}}{d_{k}^{T}y_{k}}$,\\
$\ \beta _{k}^{RMIL+}=\frac{g_{k+1}^{T}(g_{k+1}-g_{k}-d_{k})}{\left\Vert d_{k}\right\Vert ^{2}}$ .

Some of these methods, such as Fletcher and Reeves (FR) $\left[ 10\right] $,
Dai and Yuan (DY) $\left[ 8\right] $ and Conjugate Descent (CD) $\left[ 10%
\right] $ have strong convergence properties, but they may have modest
practical performance due to jamming. On the other hand, the methods of
Polak and Ribi\`{e}re and Polyak (PRP) $\left[ 16\right] $, Hestenes and
Stiefel (HS) $\left[ 14\right] $ or Liu and Story (LS) $\left[ 15\right] $
may not generally be convergent, but they often have better computational
performance.

In the process of obtaining more robust and efficient conjugate gradient
methods, some researchers suggested the hybrid conjugate gradient algorithm
which combined the good features of the methods involve in the hybridization. Even though conjugate gradient improvement using hybridization is a classic deeply investigated problem; it still an attractive topic for the research community due to its contemporary use in numerous prominent disciplines [27] .

The first hybrid conjugate gradient method was given by Touati-Ahmed and
Storey (1990) $\left[ 20\right] $ to avoid jamming phenomenon.

The researchers were motived by the works of Andrei $\left[ 3\right] $, $%
\left[ 5\right] $; Dai and Yuan $\left[ 9\right] ;$ Zhang and Zhou $[21].$
Their parameter $\beta _{k}^{N}$ is computed as a convex combination of $%
\beta _{k}^{FR}$ and $\beta _{k}^{\ast }$ other algorithms, i.e%
\begin{equation*}
\beta _{k}^{N}=\left( 1-\theta _{k}\right) \beta _{k}^{FR}+\theta _{k}\beta
_{k}^{\ast }
\end{equation*}

The Wolfe line search was employed to determine the step length $\alpha
_{k}>0$ and the new method proved to be more robust numerical wise as
compared to FR and other methods. The global convergence was establised
under some suitable conditions.

In ($\left[ 5\right] )\ $Andrei has proposed a new hybrid conjugate gradient
algorithm where the parameter $\beta _{k}^{A}$ is computed as a convex
combination of the Polak- Ribi\`{e}re- Polyak and the Dai- Yuan conjugate
gradient algorithms i.e

\begin{equation*}
\beta _{k}^{A}=\left( 1-\theta _{k}\right) \beta _{k}^{PRP}+\theta _{k}\beta
_{k}^{DY}
\end{equation*}

and $\theta _{k}$ is presented to satisfy the conjugacy condition

\begin{equation*}
\theta _{k}=\theta _{k}^{CCOMB}=\frac{\left( y_{k}^{t}g_{k+1}\right) \left(
y_{k}^{t}s_{k}\right) -\left( y_{k}^{t}g_{k+1}\right) \left(
g_{k}^{t}g_{k}\right) }{\left( y_{k}^{t}g_{k+1}\right) \left(
y_{k}^{t}s_{k}\right) -\left\Vert g_{k+1}\right\Vert ^{2}\left\Vert
g_{k}\right\Vert ^{2}}
\end{equation*}

where $s_{k}=x_{k+1}-x_{k}.$ To satisfy Newton direction he takes

\begin{equation*}
\theta _{k}=\theta _{k}^{NDOMB}=\frac{\left(
y_{k}^{t}g_{k+1}-s_{k}^{t}g_{k+1}\right) \left\Vert g_{k}\right\Vert
^{2}-\left( y_{k}^{t}g_{k+1}\right) \left( y_{k}^{t}s_{k}\right) }{%
\left\Vert g_{k+1}\right\Vert ^{2}\left\Vert g_{k}\right\Vert ^{2}-\left(
y_{k}^{t}g_{k+1}\right) \left( y_{k}^{t}s_{k}\right) }
\end{equation*}

but in the combination of HS and DY from Newton direction, he puts

\begin{equation*}
\theta _{k}=\frac{-s_{k}^{t}g_{k+1}}{g_{k}^{t}g_{k+1}}.
\end{equation*}

On the other hand, from Newton direction with modified secant condition
(Hybrid M-Andrei), Andrei has proposed another method

\begin{equation*}
\beta _{k}^{HYBRIDM}=\left( 1-\theta _{k}\right) \beta _{k}^{HS}+\theta
_{k}\beta _{k}^{DY}
\end{equation*}

where 
\begin{equation*}
\theta _{k}=\frac{\left( \frac{\delta \eta _{k}}{s_{k}^{t}s_{k}}-1\right)
s_{k}^{t}g_{k+1}-\frac{y_{k}^{t}g_{k+1}}{y_{k}^{t}s_{k}}\delta \eta _{k}}{%
g_{k}^{t}g_{k+1}+\frac{g_{k}^{t}g_{k+1}}{y_{k}^{t}s_{k}}\delta \eta _{k}}
\end{equation*}

$\delta $ is parameter. In $\left[ 11\right] $ Salah Gazi Shareef and
Hussein Ageel Khatab have introduced a new hybrid CG method

\begin{equation*}
\beta _{k}^{New}=\left( 1-\theta _{k}\right) \beta _{k}^{PRP}+\theta
_{k}\beta _{k}^{BA}
\end{equation*}

where $\beta _{k}^{BA}$ is selected in $\left[ 2\right] $.

Recently  Delladji \textit {et al.} [25] proposed a hybridazation of PRP and HZ shemes using the congugacy condition.\\

In this paper, we present another hybrid CG algorithm noted CGHLB (HLB is an
abbreviation to Hadji; Laskri and Bechouat), witch is a convex combination of
the PRP ($\left[ 16\right] )$ and RMIL+ ($\left[ 17\right] )$ conjugate
gradient algorithms.We are interested to combine these two methods in a
hybrid CG algorithm because PRP has good computational properties and RMIL+
has strong convergence properties. In section 2, we introduce our hybrid CG
method and prove that it generates descent directions. In Section 3 we
present and prove global convergence results. Numerical results and a
conclusion are presented in section 4. By comparing numerically CGHLB with
PRP and RMIL+ and by using the Dolan and More CPU performance, we deduce
that CGHLB is more efficient.\ 


\section{HLB conjugate gradient method}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
The iterates $x_{0}$,$\ x_{1},........$ of the proposed HLB algorithm are computed by
means of the recurrence $\left( 1.2\right) $ where the step size $\alpha
_{k}>0$ is determined according to the wolfe line search conditions $\left(
1.4\right) ,\ \left( 1.5\right) $. The directions $d_{k}$ are generated by
the rule:

\begin{equation}
d_{k}=\left\{ 
\begin{array}{c}
-g_{0}\text{ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ if }k=0 \\ 
-g_{k}+\beta _{K-1}^{HLB}d_{k-1}\text{ \ \ \ \ \ \ \ \ \ if }k\geq 1%
\end{array}%
\right.  \tag{2.1}
\end{equation}

where 
\begin{equation*}
\beta _{k}^{HLB}=\left( 1-\theta _{k}\right) \beta _{k}^{PRP}+\theta
_{k}\beta _{k}^{RMIL+}
\end{equation*}

i.e 
\begin{equation}
\beta _{k}^{HLB}=\left( 1-\theta _{k}\right) \frac{g_{k+1}^{t}y_{k}}{%
\left\Vert g_{k}\right\Vert ^{2}}+\theta _{k}\frac{g_{k+1}^{t}\left(
g_{k+1}-g_{k}-d_{k}\right) }{\left\Vert d_{k}\right\Vert ^{2}}  \tag{2.2}
\end{equation}

HLB is an abbreviation to Hadji; Laskri and Bechouat; $\theta _{k}\ $is a
scalar parameter which will be determined in a specific way to be described
in the folloing section. Observe that if $\theta _{k}=0$ then $\beta
_{k}^{HLB}=\beta _{k}^{PRP}$ and if $\theta _{k}=1$, then $\beta
_{k}^{HLB}=\beta _{k}^{RMIL+}$.\ On the other hand if $0<\theta _{k}<1$,
then $\beta _{k}^{HLB}$ is a convex combination of $\beta _{k}^{PRP}$and $%
\beta _{k}^{RMIL+}$. The parameter $\theta _{k}$ is selected in such away
that at every iteration the conjugacy condition is satisfied .\ It can be
noted that,

\begin{equation}
d_{k+1}=-g_{k+1}+\left( 1-\theta _{k}\right) \frac{g_{k+1}^{t}y_{k}}{%
\left\Vert g_{k}\right\Vert ^{2}}d_{k}+\theta _{k}\frac{g_{k+1}^{t}\left(
g_{k+1}-g_{k}-d_{k}\right) }{\left\Vert d_{k}\right\Vert ^{2}}d_{k} 
\tag{2.3}
\end{equation}

so multiply both sides of above equation by $y_{k}$ and by using the
conjugacy condition $\left( d_{k+1}^{t}y_{k}=0\right) \ $we have:

\begin{equation}
0=-g_{k+1}^{t}y_{k}+\left( 1-\theta _{k}\right) \frac{g_{k+1}^{t}y_{k}}{%
\left\Vert g_{k}\right\Vert ^{2}}d_{k}^{t}y_{k}+\theta _{k}\frac{%
g_{k+1}^{t}\left( g_{k+1}-g_{k}-d_{k}\right) }{\left\Vert d_{k}\right\Vert
^{2}}d_{k}^{t}y_{k}  \tag{2,4}
\end{equation}

After a simple calculation we get

\begin{equation}
\theta _{k}=\frac{g_{k+1}^{t}y_{k}\left\Vert g_{k}\right\Vert ^{2}\left\Vert
d_{k}\right\Vert ^{2}-\left( g_{k+1}^{t}y_{k}\right) \left(
d_{k}^{t}y_{k}\right) \left\Vert d_{k}\right\Vert ^{2}}{\left( \left(
g_{k+1}^{t} \left( y_{k}-d_{k}\right)\right) \left\Vert g_{k}\right\Vert   %g_{k+1}^{t}
^{2}-\left( g_{k+1}^{t}y_{k}\right) \left\Vert
d_{k}\right\Vert ^{2}\right) \left( d_{k}^{t}y_{k}\right) }  \tag{2.5}
\end{equation}

So, to ensure the convergence of this method when the parameter $\theta _{k}$
goes out of interval $\left] 0,1\right[ ;\ $i.e. when $\theta _{k}\leq 0$ or 
$\theta _{k}\geq 1$, we prefer to take $\beta _{k}^{HLB}$ as following:

\begin{equation}
\beta _{k}^{HLB}=\left\{ 
\begin{array}{c}
\left( 1-\theta _{k}\right) \beta _{k}^{PRP}+\theta _{k}\beta _{k}^{RMIL+}%
\text{ \ \ \ if }0<\theta _{k}<1 \\ 
\beta _{k}^{PRP}\text{\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \
\ if\ \ \ }\theta _{k}\leq 0 \\ 
\beta _{k}^{RMIL+}\text{ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ if\
\ }\theta _{k}\geq 1%
\end{array}%
\right.  \tag{2.5(bis)}
\end{equation}

We are now able to present our new algorithm, the Conjugate Gradient CGHLB
Algorithm:

\textbf{CGHLB Algorithm}

\textbf{Step 1: Initialization:}

 set$,$ $k=0$, select the initial point $x_{o}\in 
%TCIMACRO{\U{211d} }%
%BeginExpansion
\mathbb{R}
%EndExpansion
^{n}.$select the parameters $0<\rho \leq \delta <1$, and $\varepsilon
>0\qquad $

compute $f\left( x_{0}\right) $, and $g_{0}=\nabla $ $f\left( x_{0}\right) $%
. consider $d_{0}=-g_{0}$

\textbf{Step 2:\ Test for continuation of iterations}:

If $\left\Vert g_{k}\right\Vert \leq \varepsilon $ then stop else set . $%
d_{k}=-g_{k}$

\textbf{Step 3: Line search}:

Compute $\alpha _{k} > 0$ satisfying the Wolfe line search
condition $\left( 1,4\right) $ and $\left( 1,5\right) $ and update the
variables, $x_{k+1}=x_{k}+\alpha _{k}d_{k}$; compute $f\left( x_{k+1}\right) 
$, $g_{k+1}$ and $s_{k}=$ $x_{k+1}-x_{k}$; $y_{k}=$ $g_{k+1}-g_{k}$.

\textbf{Step \textbf{4}: } \textbf{$\theta _{k}$ Parameter computation}:

If ${\left( \left(
g_{k+1}^{t} \left( y_{k}-d_{k}\right)\right) \left\Vert g_{k}\right\Vert   %g_{k+1}^{t}
^{2}-\left( g_{k+1}^{t}y_{k}\right) \left\Vert
d_{k}\right\Vert ^{2}\right) \left( d_{k}^{t}y_{k}\right) }=0$;\\  then set $\theta
_{k}=0$, otherwise, compute $\theta _{k}$ as in $\left( 2.5\right) $.

\textbf{Step 5: } \textbf{\ $\beta _{k}^{HLB}$Conjugate gradient parameter
computation}:

If $0<\theta _{k}<1$, then compute $\beta _{k}^{HLB}$ as in $\left(
2.2\right) $

If $\theta _{k}\geq 1$, then set $\beta _{k}^{HLB}=\beta _{k}^{RMIL+}$ 

If\ $\theta _{k}\leq 0$,  then set $\beta _{k}^{HLB}=$ $\beta _{k}^{PRP}$ 

\textbf{Step 6: Direction computation}:

compute $d_{k+1}=-g_{k+1}+\beta _{k}^{HLB}d_{k}$

Set k=k+1 and go to step 3.

The following theorem shows that our method assures the descent condition,
when $0<\theta _{k}<1$ .

\begin{theorem}
In the algorithm $\left( 1.2\right) ,\left( 1.3\right) $ and $\left(
2.5\right) $ assume that $d_{k\text{ }}$is a descent direction $\left(
g_{k}^{t}d_{k}<0\right) $, and \textit{\ }$\alpha _{k}$\textit{\ is
determined by the Wolfe line search }$\left( 1.4\right) ;\left( 1.5\right) $%
. If $0<\theta _{k}<1$ then the direction $d_{k+1}$given by $\left(
2.3\right) $ is a descent direction.
\end{theorem}

\begin{proof}
Multiply both sides of $\left( 2,3\right) $ by $g_{k+1}$ we have:%
\begin{eqnarray*}
g_{k+1}^{T}d_{k+1} &=&-\left\Vert g_{k+1}\right\Vert ^{2}+\left( 1-\theta
_{k}\right) \frac{g_{k+1}^{t}y_{k}}{\left\Vert g_{k}\right\Vert ^{2}}%
d_{k}^{t}g_{k+1} \\
&&+\theta _{k}\frac{g_{k+1}^{t}\left( g_{k+1}-g_{k}-d_{k}\right) }{%
\left\Vert d_{k}\right\Vert ^{2}}d_{k}^{t}g_{k+1}
\end{eqnarray*}

\begin{eqnarray*}
g_{k+1}^{T}d_{k+1} &=&-\left( 1-\theta _{k}+\theta _{k}\right) \left\Vert
g_{k+1}\right\Vert ^{2}+\left( 1-\theta _{k}\right) \frac{g_{k+1}^{t}y_{k}}{%
\left\Vert g_{k}\right\Vert ^{2}}d_{k}^{t}g_{k+1} \\
&&+\theta _{k}\frac{g_{k+1}^{t}\left( g_{k+1}-g_{k}-d_{k}\right) }{%
\left\Vert d_{k}\right\Vert ^{2}}d_{k}^{t}g_{k+1}
\end{eqnarray*}

\begin{eqnarray*}
g_{k+1}^{T}d_{k+1} &=&\left[ -\left( 1-\theta _{k}\right) \left\Vert
g_{k+1}\right\Vert ^{2}+\left( 1-\theta _{k}\right) \frac{g_{k+1}^{t}y_{k}}{%
\left\Vert g_{k}\right\Vert ^{2}}d_{k}^{t}g_{k+1}\right] \\
&&+\left[ -\left( \theta _{k}\right) \left\Vert g_{k+1}\right\Vert
^{2}+\theta _{k}\frac{g_{k+1}^{t}\left( g_{k+1}-g_{k}-d_{k}\right) }{%
\left\Vert d_{k}\right\Vert ^{2}}d_{k}^{t}g_{k+1}\right]
\end{eqnarray*}

\begin{eqnarray*}
g_{k+1}^{T}d_{k+1} &=&\left( 1-\theta _{k}\right) \left[ -\left\Vert
g_{k+1}\right\Vert ^{2}+\frac{g_{k+1}^{t}y_{k}}{\left\Vert g_{k}\right\Vert
^{2}}d_{k}^{t}g_{k+1}\right] \\
&&+\left( \theta _{k}\right) \left[ -\left\Vert g_{k+1}\right\Vert ^{2}+%
\frac{g_{k+1}^{t}\left( g_{k+1}-g_{k}-d_{k}\right) }{\left\Vert
d_{k}\right\Vert ^{2}}d_{k}^{t}g_{k+1}\right]
\end{eqnarray*}

since $0<\theta _{k}<1$ then

\begin{eqnarray*}
g_{k+1}^{T}d_{k+1} &\leq &\left[ -\left\Vert g_{k+1}\right\Vert ^{2}+\frac{%
g_{k+1}^{t}y_{k}}{\left\Vert g_{k}\right\Vert ^{2}}d_{k}^{t}g_{k+1}\right] 
%\tag{2.6}
 \\
&&+\left[ -\left\Vert g_{k+1}\right\Vert ^{2}+\frac{g_{k+1}^{t}\left(
g_{k+1}-g_{k}-d_{k}\right) }{\left\Vert d_{k}\right\Vert ^{2}}%
d_{k}^{t}g_{k+1}\right]   \hspace{3cm}    \text{                      (2.6)}   
 \\ 
\end{eqnarray*} %\tag{2.6}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\begin{equation*}
%g_{k+1}^{T}d_{k+1} \leq \left[ -\left\Vert g_{k+1}\right\Vert ^{2}+\frac{%
%g_{k+1}^{t}y_{k}}{\left\Vert g_{k}\right\Vert ^{2}}d_{k}^{t}g_{k+1}\right] 
%\\%\tag{2.6} \\
%&&
%+\left[ -\left\Vert g_{k+1}\right\Vert ^{2}+\frac{g_{k+1}^{t}\left(
%g_{k+1}-g_{k}-d_{k}\right) }{\left\Vert d_{k}\right\Vert ^{2}}%
%d_{k}^{t}g_{k+1}\right] \tag{2.6}
% \\
%\end{equation*}

%\begin{equation*}\label{Equ3}
 %\begin{split}
%g_{k+1}^{T}d_{k+1} \leq \left[ -\left\Vert g_{k+1}\right\Vert ^{2}+\frac{%
%g_{k+1}^{t}y_{k}}{\left\Vert g_{k}\right\Vert ^{2}}d_{k}^{t}g_{k+1}\right] \\ %\tag{2.6}
%&  +\left[ -\left\Vert g_{k+1}\right\Vert ^{2}+\frac{g_{k+1}^{t}\left(
%g_{k+1}-g_{k}-d_{k}\right) }{\left\Vert d_{k}\right\Vert ^{2}}%
%d_{k}^{t}g_{k+1}\right].  \textit{ (2.6)}            
 %\end{split}% \tag{2.6}
%\end{equation*}


%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
If the step length $\alpha _{k}$ is chosen by an exact line search. Then $%
g_{k+1}^{T}d_{k}=0.$

If the step length $\alpha _{k}$ is chosen by an inexact line search $\left(
g_{k+1}^{T}d_{k}\neq 0\right) $ then we have:

\begin{equation*}
g_{k+1}^{T}d_{k+1}<0
\end{equation*}

because the algorithms of $\left( PRP\right) $ and $\left( RMIL+\right) $
satisfied the descent property.

The proof is completed.
\end{proof}


%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Global convergence properties}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
The following assumptions are often needed to prove the convergence of the
nonlinear CG :

\textbf{Assumption 1}

\textit{ The level set }$\Omega =\left \{ x\in 
%TCIMACRO{\U{211d} }%
%BeginExpansion
\mathbb{R}
%EndExpansion
^{n}/f\left( x\right) \leq f\left( x_{0}\right) \right \} $\textit{ is
bounded, where }$x_{0}$\textit{\ is the starting point.}

\textbf{Assumption 2}

\textit{ In some neighborhood }$N$\textit{\ of }$\Omega ,$\textit{\ the
objective function is continuously differentiable and its gradient is
Lipschitz continuous, namely, there exists a constant }$l>0$ \textit{such
that:}%
\begin{equation*}
\left\Vert g\left( x\right) -g\left( y\right) \right\Vert \leq l\left\Vert
x-y\right\Vert \ \ for\ any\ x,y\in N
\end{equation*}

Under these assumptions on $f$\ , there exists a constant $\mu $\ such that $%
\left\Vert g\left( x\right) \right\Vert \leq \mu $, for all $x\in $\ $\Omega 
$.

\begin{lemma}
$\left[ 23\right] \ $\textit{Suppose Assumption 1 and 2  hold , and consider any
conjugate gradient method }$\left( 1.2\right) $ and $\left( 1.3\right) $;
where $d_{k}$ is a descent direction and $\alpha _{k}$ is obtained by the
strong Wolfe line search. If 
\begin{equation}
\sum_{k=1}^{\infty}\frac{1}{\left\Vert %\overset{\infty }{\underset{k=1}{\sum }}\frac{1}{\left\Vert % Zohra \dsum devenu \sum
d_{k}\right\Vert ^{2}}=+\infty  \tag{3.1}
\end{equation}

then 
\begin{equation}
\underset{k\rightarrow \infty }{\lim \inf }\left\Vert g_{k}\right\Vert =0 
\tag{3.2}
\end{equation}
\end{lemma}

Assume that the function $f$ is uniformly convex function , i.e, there
exists a constant $\Gamma \geq 0$ such that, 
\begin{equation}
for\text{ }all\text{ }x,y\in \Omega :\left( \nabla f\left( x\right) -\nabla
f\left( y\right) \right) ^{t}\left( x-y\right) \geq \Gamma \left\Vert
x-y\right\Vert ^{2}  \tag{3.3}
\end{equation}

and the steplength $\alpha _{k}$ is given by the strong Wolfe line search.

\begin{equation}
f\left( x_{k}+\alpha _{k}d_{k}\right) -f\left( x_{k}\right) \leq \sigma
_{1}\alpha _{k}g_{k}^{t}d_{k}  \tag{3.4}
\end{equation}

\begin{equation}
\left\vert g_{k+1}^{t}d_{k}\right\vert \leq -\sigma _{2}g_{k}^{t}d_{k} 
\tag{3.5}
\end{equation}%
For uniformly convex function which satisfies the above assumptions, we can
prove that the norm of $d_{k+1}$ given by $\left( 2.3\right) $ is bounded
above.

Using the above lemma, we obtain the following theorem.

\begin{theorem}
Suppose that Assumption 1 and 2  hold . Consider the algorithm $\left( 1.2\right)
;\left( 2.3\right) $and $\left( 2.5\right) ,$ where $0\leq \theta _{k}\leq 1$
  and $\alpha _{k}$  is obtained by the strong Wolfe line search  $( \left(
3.4\right) $ and $\left( 3.5\right) ) .$

If $d_{k}$ tends to zero and there exists non negative constants $\eta _{1}\ 
$and $\eta _{2}$ such that;

\begin{equation}
\left\Vert g_{k}\right\Vert ^{2}\geq \eta _{1}\left\Vert s_{k}\right\Vert
^{2}\ and\ \left\Vert g_{k+1}\right\Vert ^{2}\leq \eta _{2}\left\Vert
s_{k}\right\Vert  \tag{3.6}
\end{equation}%
and $f$ is uniformly convex function, then

\begin{equation}
\underset{k\rightarrow \infty }{\lim }g_{k}=0\text{ }  \tag{3.7}
\end{equation}
\end{theorem}

\begin{proof}
From $\left( 3,3\right) $ it follows that

\begin{equation*}
y_{k}^{t}s_{k}\geq \Gamma \left\Vert s_{k}\right\Vert ^{2}
\end{equation*}

since $0\leq \theta _{k}\leq 1$ , from uniform convexity and $\left(
3.6\right) $ we have

\begin{equation*}
\left\vert \beta _{k}^{HLB}\right\vert \leq \left\vert \frac{g_{k+1}^{t}y_{k}%
}{\left\Vert g_{k}\right\Vert ^{2}}\right\vert +\left\vert \frac{%
g_{k+1}^{t}\left( g_{k+1}-g_{k}-d_{k}\right) }{\left\Vert d_{k}\right\Vert
^{2}}\right\vert
\end{equation*}

\begin{equation*}
\leq \frac{\left\vert g_{k+1}^{t}y_{k}\right\vert }{\left\Vert
g_{k}\right\Vert ^{2}}+\frac{\left\vert g_{k+1}^{t}y_{k}\right\vert }{%
\left\Vert d_{k}\right\Vert ^{2}}+\frac{\left\vert
g_{k+1}^{t}d_{k}\right\vert }{\left\Vert d_{k}\right\Vert ^{2}}
\end{equation*}

\begin{equation*}
\leq \frac{\left\Vert g_{k+1}\right\Vert \left\Vert y_{k}\right\Vert }{%
\left\Vert g_{k}\right\Vert ^{2}}+\frac{\left\Vert g_{k+1}\right\Vert
\left\Vert y_{k}\right\Vert }{\left\Vert d_{k}\right\Vert ^{2}}+\frac{%
\left\Vert g_{k+1}\right\Vert \left\Vert d_{k}\right\Vert }{\left\Vert
d_{k}\right\Vert ^{2}}
\end{equation*}

from Lipschitz condition%
\begin{equation*}
\left\Vert y_{k}\right\Vert \leq \mathit{l}\left\Vert s_{k}\right\Vert
\end{equation*}

\begin{equation*}
\left\vert \beta _{k}^{HLB}\right\vert \leq \frac{\left\Vert
g_{k+1}\right\Vert \left\Vert y_{k}\right\Vert }{\eta _{1}\left\Vert
s_{k}\right\Vert ^{2}}+\frac{\left\Vert g_{k+1}\right\Vert \left\Vert
y_{k}\right\Vert }{\left\Vert d_{k}\right\Vert ^{2}}+\frac{\left\Vert
g_{k+1}\right\Vert }{\left\Vert d_{k}\right\Vert }
\end{equation*}

\begin{equation*}
\leq \frac{\mu l\left\Vert s_{k}\right\Vert }{\eta _{1}\left\Vert
s_{k}\right\Vert ^{2}}+\frac{\mu l\left\Vert s_{k}\right\Vert \alpha _{k}^{2}%
}{\left\Vert s_{k}\right\Vert ^{2}}+\frac{\mu \alpha _{k}}{\left\Vert
s_{k}\right\Vert }
\end{equation*}

\begin{equation*}
=\frac{\mu l}{\eta _{1}\left\Vert s_{k}\right\Vert }+\frac{\mu l\alpha
_{k}^{2}}{\left\Vert s_{k}\right\Vert }+\frac{\mu \alpha _{k}}{\left\Vert
s_{k}\right\Vert }
\end{equation*}

Hence

\begin{equation*}
\left\Vert d_{k+1}\right\Vert \leq \left\Vert g_{k+1}\right\Vert +\left\vert
\beta _{k}^{HLB}\right\vert \left\Vert d_{k}\right\Vert
\end{equation*}

\begin{equation*}
\leq \mu +\frac{\mu l\left\Vert s_{k}\right\Vert }{\eta _{1}\alpha
_{k}\left\Vert s_{k}\right\Vert }+\frac{\mu l\left\Vert s_{k}\right\Vert
\alpha _{k}^{2}}{\alpha _{k}\left\Vert s_{k}\right\Vert }+\frac{\mu \alpha
_{k}\left\Vert s_{k}\right\Vert }{\alpha _{k}\left\Vert s_{k}\right\Vert }
\end{equation*}

\begin{equation*}
=2\mu +\mu l\alpha _{k}+\frac{\mu l}{\eta _{1}\alpha _{k}}
\end{equation*}

which implies that $\left( 3.1\right) $ is true.Therefore, by lemma 1 we
have $\left( 3.2\right) $, which for uniformly convex functions is
equivalent to $\left( 3.7\right) .$
\end{proof}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%Bachaouet experiments%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{ Numerical results and discussion}
%This section presents a few computed examples that illustrate the performance of the HLB  proposed method . All computations are carried out in Matlab with 16 significant decimal digits.
 In the present numerical
experiments, we analyze the efficiency of $\beta ^{HLB}$, as compared to the classic methods:  $\beta ^{PRP}$ and $\beta ^{RMIL+}$. These comparisons
are based on the number of iterations and CPU time per second to reach
the optimum. All the comparisons are done with two or three different initial
points and different number of  variables  ranging from $2$ to $20000$ . All variables have
been experimented to each function test [4] . For the numerical tests,
the strong Wolfe line searches parameters have been experimently fixed to  $\rho =10^{-3}$ and $\delta
=10^{-4}$. All tests were terminated when the stopping criteria $\left\Vert
g_{k}\right\Vert \leq \varepsilon $ is fulfilled, where $\varepsilon
=10^{-6} $.  When the iteration number exceeds $2000$ or the CPU execution time exceeded $500$ seconds, the test is considered as failed.
%exceeded $500$ seconds %The first condition where the test is failed is when the number
%of iterations exceeded $2000$ and the second is when CPU execution time
%exceeded $500$ seconds.\\ \\ \\

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{figure}[!h]
\begin{center}
  % Requires \usepackage{graphicx}
  % replace aims_logo.eps by your figure file name
  \includegraphics [scale=0.5]{C:/Users/mostapha/Desktop/Studia final/Figure2.jpg}
  \caption{Performance Profile based on the CPU time}
\label{AIMS}
  \end{center}
\end{figure}


%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

Figures 1 and 2 show that the method of $\beta ^{HLB}$ is
superior when compared to $\beta ^{PRP}$ and $\beta ^{RMIL+}$ with the least
duration of CPU time . The highest percentage of successful comparison is with $\beta ^{HLB}$
at $98.34\%$, followed by $\beta ^{RMIL+}$ with $93.72\%$. However, the
successful rate comparison for $\beta ^{PRP}$ is low at $90.05\%$. Hence,
our method ($\beta ^{HLB}$) successfully solves the test problems, and it is
competitive with the well-known conjugate gradient methods for unconstrained
optimization.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{figure}[!h]% personalize using gait view angles and auto-encoders
	\centering
	\includegraphics[scale=0.5]{C:/Users/mostapha/Desktop/Studia final/Figure1.jpg}%{C:/Users/mostapha/Downloads/AIMS_TEX_related_files/pocopq01.PNG}
	\caption{Performance Profile based on the iteration number}
	\label{fg:DAL model}
\end{figure}

%
\begin{center}
\begin{eqnarray*}
&&\text{\textbf{Tabel\ 1.} A list of test problems.} \\&&
\tiny
\begin{tabular}[t]{llll}
\hline
No. & Function & Dimension & Initial points \\ \hline
01 & Alpine 1 & $4,5,7,10,12,30,100$ & $\left( 1,...,1\right) $ \\ 
02 & Beale & $2$ & $\left( -1,-1\right) ;\left( 0,0\right) ;\left(
1,1\right) $ \\ 
03 & Booth & $2$ & $\left( -1,-1\right) ;\left( 1,1\right) ;\left(
3,3\right) $ \\ 
04 & Branin & $2$ & $\left( -1,-1\right) ;\left( 0,0\right) ;\left(
1,1\right) $ \\ 
05 & Diagonal 1 & $2,4,6,8,10,20,100,200$ & $\left( 1,...,1\right) ;\left(
2,...,2\right) ;\left( 3,...,3\right) $ \\ 
06 & Diagonal 2 & $2,4,10,100,200,400,500,600,1000$ & $\left(
-1,...,-1\right) ;\left( 0,...,0\right) ;\left( 1,...,1\right) $ \\ 
07 & Diagonal 4 & $1000,5000,8000,10000,14000,16000,20000$ & $\left(
2,...,2\right) ;\left( 5,...,5\right) ;\left( 10,...,10\right) $ \\ 
08 & Exponential & $2,4,6,8,10,12,14,15,16,20$ & $\left( 1,...,1\right) $ \\ 
09 & Griewank & $10,100,500,1000,2000,5000,10000$ & $\left( -2,...,-2\right)
;\left( 2,...,2\right) $ \\ 
10 & Hager & $2,4,10,100,200,500,800,1000$ & $\left( -1,...,-1\right)
;\left( 0,...,0\right) $ \\ 
11 & Himmelblau & $2,4,10,100,1000,5000,10000,20000$ & $\left(
-5,...,-5\right) ;\left( 5,...,5\right) $ \\ 
12 & Leon & $2$ & $\left( -0.5,-0.5\right) ;\left( 0,0\right) ;\left(
0.5,0.5\right) $ \\ 
13 & Matyas & $2$ & $\left( 1,1\right) ;\left( 2,2\right) ;\left( 5,5\right) 
$ \\ 
14 & Penalty & $2,10,100,500,1000,2500,4000,5000,10000$ & $\left(
-1,...,-1\right) ;\left( 0,...,0\right) ;\left( 1,...,1\right) $ \\ 
15 & Perquadratic & $2,4,8,10,20,50,200$ & $\left( -5,...,-5\right) ;\left(
3,...,3\right) ;\left( 5,...,5\right) $ \\ 
16 & Power & $2,4,8,10,20,50,100,500$ & $\left( -2,...,-2\right) ;\left(
2,...,2\right) $ \\ 
17 & Qing & $2,10,100,200,300,400,500,1000,2000$ & $\left( -2,...,-2\right)
;\left( 2,...,2\right) $ \\ 
18 & Quadratic & $2,10,100,200,500,750,1000$ & $\left( 2,...,2\right)
;\left( 4,...,4\right) $ \\ 
19 & Quartic & $2,4,10,100,200,500$ & $\left( 1,...,1\right) ;\left(
2,...,2\right) $ \\ 
20 & Rastrigin & $2,10,100,200,500$ & $\left( -5,...,-5\right) ;\left(
5,...,5\right) $ \\ 
21 & Raydan 1 & $2,4,10,20,50,80,90,100$ & $\left( -2,...,-2\right) ;\left(
2,...,2\right) $ \\ 
22 & Raydan 2 & $2,10,100,500,1000,2000,3000$ & $\left( -2,...,-2\right)
;\left( 2,...,2\right) $ \\ 
23 & Rosenbrock & $2,10,10,50,100,200,1000,2000,5000,10000$ & $\left(
0,...,0\right) $ \\ 
24 & Schwefel 2. 20 & $2,4,10,20$ & $\left( -1,...,-1\right) ;\left(
2,...,2\right) $ \\ 
25 & Schwefel 2. 21 & $5,10,15,20$ & $\left( 1,...,1\right) ;\left(
2,...,2\right) $ \\ 
26 & Schwefel 2. 23 & $2,5,10,20$ & $\left( -1,...,-1\right) ;\left(
1,...,1\right) $ \\ 
27 & Sphere & $2,10,20,100,1000,5000,20000$ & $\left( -4,...,-4\right)
;\left( 4,...,4\right) $ \\ 
28 & Styblinski & $2,10,100,500,1000,2000,5000$ & $\left( 0,...,0\right)
;\left( 2,...,2\right) $ \\ 
29 & Sumsquares & $2,10,20,100,300,500,1000$ & $\left( 5,...,5\right)
;\left( 10,...,10\right) $ \\ \hline
  \normalsize
\end{tabular}%
\end{eqnarray*}
\end{center}

\bigskip%.....ZOHRA%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Conclusion}

Numerous studies have been devoted to develop and improve hybrid conjugate gradient methods . In this paper we have presented a new convex hybridation of the PRP and the  RMIL+  conjugate gradient algorithms;  HLB. The global convergence of our method is demonstrated for $0<\theta<1$ . Numerical experiments reveal that our method is reaching the optimum in less iteration number and CPU time comparing to RMIL+ and PRP.







%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%



%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\begin{thebibliography}{99}

\bibitem{1}Al-Baali, M., \emph{Descent property and global convergence\ of
Fletcher-Reeves method with inexact line search} , IMA J. Numer. Anal., \textbf{5}(1985), no. 1, 121-124.

\bibitem{2}Al-Bayati, A.Y., Al-Assady, N.H., \emph{Conjugate Gradient Method} ,
Technical Research report,Technical Research, school of computer studies, Leeds University , 1986.

\bibitem{3}Andrei, N., \emph{Hybrid Conjugate Gradient Algorithm for
Unconstrained Optimization} ,J. Optim. Theory Appl., \textbf{141}(2009), no. 2, 249-264.

\bibitem{4}Andrei, N., \emph{An unconstrained optimization test functions collection}, Adv. Model. Optim., \textbf{10}(2008), no. 1, 147-161.      

\bibitem{5}Andrei, N., \emph{Another hybrid conjugate gradient algorithm for
unconstrained optimization}, Numer. Algorithms., \textbf{47}(2008), no. 2, 143-156 .

\bibitem{6} Bongartz, I. ,  Conn, A.R.,  Gould, N.I.M., Toint,  P.L., \emph{CUTE:
constrained and unconstrained testing environments} , ACM Trans. Math. Softw., \textbf{21}(1995) , no. 1, 123-160.

\bibitem{7} Daniel, J.W., \emph{The conjugate gradient method for linear and
nonlinear operator equations}, SIAM J. Optim., \textbf{10}(1967), no. 1, 10-26.

\bibitem{8}Dai, Y.H., Yuan,  Y., \emph{A nonlinear conjugate gradient method with a strong global convergence property}, SIAM J. Optim., \textbf{10}(1999),  no. 1, 177-182 .

\bibitem{9}Dai,  Y.H., Yuan,  Y., \emph{An efficient hybrid conjugate gradient
method for unconstrained optimization}, Ann. Oper. Res., \textbf{103}(2001), no. 1, 33-47.

\bibitem{10}Fletcher, R., \emph{Practical Methods of Optimization,  vol. 1:
Unconstrained Optimization}, John Wiley \& Sons, New York, 1987.

\bibitem{11}Gazi, S., Khatab,  H., \emph{New iterative conjugate gradient method
for nonlinear unconstrained optimization using homptopy technique}, IOSR
journal of Mathmatics , (2014), 78-82. 

\bibitem{12} Hager, W.W.,  Zhang, H., \emph{A survey of nonlinear conjugate
gradient methods}, Pac. J. Optim., \textbf{2}(2006), no. 1, 35-58.

\bibitem{13}Hager, W.W., Zhang, H., \emph{A new conjugate gradient method with
guaranteed descent and an effcient line search} , SIAM J. Optim., \textbf{16}(2005), no. 1, 170-192.

\bibitem{14} Hestenes, M., \emph{Methods of conjugate gradients for solving linear
systems}, Research Journal of the National Bureau of Standards, \textbf{49}(1952), no. 22.
409-436.

\bibitem{15} Liu,  Y.,  Storey, C., \emph{Efficient generalized conjugate gradient
algorithms , Part 1}, J. Optim. Theory Appl., \textbf{69}(1991), no. 1, 129-137 .

\bibitem{16}Polyak, B.T., \emph{The conjugate gradient method in extremal
problems}, Comput. Math. Math. Phys., \textbf{9}(1969), no. 4,  94-112.

\bibitem{17} Rivaie,  M., Mustafa, M.,  Abashar, A., \emph{A new class of nonlinear conjugate gradient coefficients with exact and inexact line searches} ,  Appl. Math. Comput., \textbf{268}(2015) , 1152-1163.

\bibitem{18}Rivaie,  M., Mustafa , M., June,  L.W., Mohd,  I., \emph{A new class of
nonlinear conjugate gradient coefficient with global convergence
properties} , Appl. Math. Comput.,  \textbf{218}(2012), no. 22, 11323-11332.

\bibitem{19}Shanno, D.F., \emph{Conjugate gradient methods with inexact
searches} , Math. Oper. Res ., \textbf{3}(1978), no. 3, 244-256.

\bibitem{20}Touati-Ahmed, D., Storey,  C.,\emph{ Efficient hybrid conjugate
gradient technique}, J. Optim. Theory Appl., \textbf{64}(1990), no. 2, 379-397.

\bibitem{21}Zhang,  L., Zhou, W., \emph{Two descent hybrid conjugate gradient
methodq for optimization}, J. Comput. Appl. Math., \textbf{216}(2008), no. 1 , 251-264.

\bibitem{22}Zhifeng,  D., \emph{Comments on hybrid conjugate gradient algorithm
for unconstrained optimization}, J. Optim. Theory Appl., \textbf{175}(2017), no. 1, 286-291.

\bibitem{23}Zoutendijk, G., \emph{Nonlinear Programming , Computational Methods},
 Integer and Nonlinear Programming (J. Abadie, ed.), North-Holland,
Amsterdam, (1970), 37-86.

\bibitem{24}Andrei, N., \emph{Nonlinear Conjugate Gradient Methods for Unconstrained Optimization}, Springer International Publishing, 2020.

\bibitem{25} Delladji, S., Belloufi, M., Sellami, B., \emph{Behavior of the combination of PRP and HZ methods for unconstrained optimization}, Numer. Algebra Control Optim., \textbf{11}(2021), no. 3, 377-389.

\bibitem{26}Djordjević,  S.S., \emph{ New hybrid conjugate gradient method as a convex combination of LS and FR methods},  Acta Math. Sci. Ser. B (Engl. Ed.), \textbf{39}(2019), no 1,  214-228.

\bibitem{27} Wang,  L.J.,  Xu, L.,  Xie, Y.X.,  Du, Y.X.,  Han, X., \emph{A new hybrid conjugate gradient method for
dynamic force reconstruction}, Advances in Mechanical Engineering, \textbf{11}(2019), no. 1, 1–21.


\bibitem{28} Polak, E., Ribiere, G., \emph{Note sur la convergence des m\'{e}thodes de directions conjug\'{e}es},  ESAIM: Math. Model. Numer. Anal., \textbf{3}(1969), no. R1, 35-43.



\end{thebibliography}
\end{document}
