\documentclass[10pt]{studiamnew}
\usepackage{graphicx}
\usepackage{amsmath}
\usepackage{amssymb}
\sloppy

\newtheorem{theorem}{Theorem}[section]
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{corollary}[theorem]{Corollary}
\newtheorem{proposition}[theorem]{Proposition}
\newtheorem{axiom}[theorem]{Axiom}
\newtheorem{Algorithm}[theorem]{Algorithm}
\newtheorem{Assumption}[theorem]{Assumption}
\newtheorem{case}[theorem]{Case}
\newtheorem{condition}[theorem]{Condition}
\newtheorem{conjecture}[theorem]{Conjecture}
\newtheorem{criterion}[theorem]{Criterion}
\newtheorem{definition}[theorem]{Definition}

\theoremstyle{definition}
\newtheorem{remark}[theorem]{Remark}
\newtheorem{algorithm}[theorem]{Algorithm}
\newtheorem{problem}[theorem]{Problem}
\newtheorem{example}[theorem]{Example}
\newtheorem{exercise}[theorem]{Exercise}
\newtheorem{solution}[theorem]{Solution}
\newtheorem{notation}[theorem]{Notation}

\renewcommand{\theequation}{\thesection.\arabic{equation}}
\numberwithin{equation}{section}

\begin{document}
%
\setcounter{page}{1}
\setcounter{firstpage}{1}
\setcounter{lastpage}{4}
\renewcommand{\currentvolume}{??}
\renewcommand{\currentyear}{??}
\renewcommand{\currentissue}{??}
%
\title{Modified inertia Halpern method for split null point problem in Banach spaces.}
\author{H. A. Abass}
\address{School of Mathematics, Statistics and Computer Science,\\ University of KwaZulu-Natal, Durban, South Africa.}
\address{DSI-NRF Center of Excellence in Mathematical and Statistical Sciences (CoE-MaSS).}
\email{hammedabass548@gmail.com, abassh@ukzn.ac.za.}
%
\author{G. C. Ugwunnadi}
\address{Department of Mathematics,\\ University of Eswatini, Kwaluseni.}
\address{Department of Mathematics and Applied Mathematics,\\ Sefako Makgatho Health Sciences University,\\ P.O. Box 94 Medunsa 0204, Pretoria, South Africa.}
\email{ Ugwunnadi4u@yahoo.com}
%
\author{O. K. Narain}
\address{School of Mathematics, Statistics and Computer Science,\\ University of KwaZulu-Natal, Durban, South Africa.}
\email{Naraino@ukzn.ac.za.}
%
\subjclass{47H06, 47H09, 47J05, 47J25.}
\keywords{Monotone variational inclusion problem; split feasibility problem; firmly nonexpansive-type mapping; fixed point problem; Inertial method.}
\begin{abstract}
In this paper, we study split null point problem in reflexive Banach spaces. Using the Bregman  technique together with a modified inertial Halpern method, we approximate a solution of split null point problem. Also, we establish a strong convergence result for approximating the solution of the aforementioned problems. It is worth mentioning that the iterative algorithm employ in this study is design in such a way that it does not require prior knowledge of operator norm. We display some numerical examples to illustrate the performance of the proposed iterative method. The result discuss in this paper extends and complements many related results in literature. 
\end{abstract}
\maketitle

\section{Introduction}
\noindent Let $E$ be a reflexive Banach space with $E^*$ its dual and $Q$ be a nonempty closed and convex subset of $E$. Let $g: E \rightarrow (-\infty, + \infty]$ be a proper, lower semicontinuous and convex function, then the Fenchel conjugate of $g$ denoted as $g^*: E^* \rightarrow (-\infty,+ \infty]$ is define as
\begin{align*}
g^*(x^*)=\sup\{\langle x^*, x\rangle-g(x): x \in E\},~x^*\in E^*.
\end{align*}
Let the domain of $g$ be denoted as $dom (g)=\{x \in E: g(x)< +\infty\}$, hence for any $x \in intdom (g)$ and $y \in E$, we define the right-hand derivative of $g$ at $x$ in the direction of $y$ by
\begin{align*}
g^{0}(x, y)=\lim_{t \rightarrow 0^+}\frac{g(x+ty)-g(x)}{t}.
\end{align*} 

Let $g:E \rightarrow (-\infty, + \infty]$ be a function, then $g$ is said to be: 
\begin{itemize}
	\item[(i)] G$\hat{a}$teaux differentiable at $x$ if $\lim_{t \rightarrow 0^+}\frac{g(x+ty)-g(x)}{t}$ exists for any $y$. In this case, $g^0(x,y)$ coincides with $\nabla g(x)$ (the value of the gradient $\nabla g$ of $g$ at $x$);
	\item[(ii)] G$\hat{a}$teaux differentiable, if it is G$\hat{a}$teaux differentiable for any $x \in intdom g$;
	\item[(iii)] Fr$\acute{e}$chet differentiable at $x$, if its limit is attained uniformly in $\|y\|=1$;
	\item[(iv)] Uniformly Fr$\acute{e}$chet differentiable on a subset $Q$ of $E$, if the above limit is attained uniformly for $x \in Q$ and $\|y\|=1$.
	\item[(v)] essentially smooth, if the subdifferential of $g$ denoted as $\partial{g}$ is both locally bounded and single-valued on its domain, where $\partial{g(x)}= \{w \in E: g(x) -g(y) \geq \langle w,y-x \rangle, ~y \in E \};$
	\item[(vi)] essentially strictly convex, if $(\partial g)^{-1}$ is locally bounded on its domain and $g$ is strictly convex on every convex subset of $dom~ \partial g$;
	\item[(vii)] Legendre, if it is both essentially smooth and essentially strictly convex. See \cite{Bauc,Bau} for more details on Legendre functions.
\end{itemize}
Alternatively, a function $g$ is said to be Legendre if it satisfies the following conditions:
\begin{itemize}
	\item[(i)] The $intdom (g)$ is nonempty, $g$ is G$\hat{a}$teaux differentiable on $intdom (g)$ and $dom \nabla g=intdom (g)$;
	\item[(ii)] The $intdom g^*$ is nonempty, $g^*$ is G$\hat{a}$teaux differentiable on $intdom g^*$ and $dom \nabla g^*=int dom (g)$.
\end{itemize}
Let $E$ be a Banach space and $B_s:=\{z \in E: \|z\| \leq s\}$ for all $s > 0$. Then, a function $g : E \rightarrow \mathbb{R}$ is said to be uniformly convex on bounded subsets of $E$, [ see pp. 203 and 221]~ \cite{Za} if $\rho_{s}t > 0$ for all $s, t > 0,$ where $\rho_s: [0, + \infty) \rightarrow [0, \infty]$ is defined by
\begin{align*}
\rho_{s}(t)=\inf_{x, y \in B_{s}, \|x-y\|=t, \alpha \in (0,1)}\frac{\alpha g(x)+ (1-\alpha)g(y)-g(\alpha(x)+ (1-\alpha)y)}{\alpha(1-\alpha)},
\end{align*}
for all $t \geq 0$, with $\rho_s$ denoting the gauge of uniform convexity of $g$. The function $g$ is also said to be uniformly smooth on bounded subsets of $E$, [ see pp. 221]~\cite{Za}, if $\lim_{t \downarrow 0} \frac{\sigma_{s}}{t}$ for all $s > 0,$ where $\sigma_s: [0, +\infty) \rightarrow [0,\infty]$ is defined by
\begin{align*}
\sigma_{s}(t)=\sup_{x \in B, y \in S_E, \alpha \in (0,1)}\frac{\alpha g(x)+ (1-\alpha)ty)+ (1-\alpha)g(x-\alpha t y)-g(x)}{\alpha(1-\alpha)},
\end{align*}
for all $t \geq 0$, and uniformly convex if the function $\delta g : [0, + \infty) \rightarrow [0, + \infty)$ defined by
\begin{align*}
\delta g(t): =\sup\big\{\frac{1}{2}g(x)+ \frac{1}{2}g(y)-g(\frac{x+y}{2}): \|y-x\|=t\},
\end{align*} 
satisfies $\lim_{t \downarrow 0} \frac{\delta g(t)}{t}=0.$
\begin{definition}\cite{Breg}
	Let $E$ be a Banach space. A function $g:E \rightarrow (-\infty, \infty]$ is said to be proper if the interior of its domain $dom(g)$ is nonempty. Let $g:E \rightarrow (-\infty, \infty]$ be a convex and G\^{a}teaux differentiable function. Then the Bregman distance corresponding to $g$ is the function $D_g: dom(g) \times int dom(g) \rightarrow \mathbb{R}$ defined by
	\begin{align}\label{1}
	D_g(x, y):=g(x)-g(y)- \langle x-y, \nabla^{g}_{E}(y)\rangle,~\forall~x, y \in E.
	\end{align}
	It is clear that $D_g(x, y) \geq 0$ for all $x, y \in E$.
\end{definition}
\noindent It is well-known that Bregman distance $D_g$ does not satisfy all the properties of a metric function because $D_g$ fail to satisfy the symmetric and triangular inequality property. However, the Bregman distance satisfies the following so-called three point identity: for any $x \in dom (g)$ and $y, z \in int dom (g)$,
\begin{align}\label{2}
D_g(x,z)=D_g(x,y) +D_g(y,z) + \langle x-y, \nabla^{g}_{E}(y)-\nabla^{g}_{E}(z)\rangle.
\end{align}
In particular,
\begin{align*}
D_g(x,y)=-D_g(y,x) + \langle y-x, \nabla^{g}_{E}(y)-\nabla^{g}_{E}(x)\rangle,~\forall~x, y \in E.
\end{align*} 
\noindent The relationship between $D_g$ and $\|.\|$ is guaranteed when $g$ is strongly convex with strong convexity constant $\rho>0$ i.e.
\begin{align}\label{2.3a}
D_g(x,y) \geq \frac{\rho}{2}\|x-y\|^2,~\forall~x \in dom (g),~y \in int dom (g).
\end{align}
\noindent Let $g: E \rightarrow \mathbb{R}$ be a strictly convex and G\^{a}teaux differentiable function and $T: Q \rightarrow int dom(g)$  be a mapping, a point $x \in Q$ is called a fixed point of $T$, if for all $x \in Q,~ Tx=x$. We denote by $Fix(T)$ the set of all fixed points of $T$. Furthermore, a point $p \in Q$ is called an asymptotic fixed point of $T$ if $Q$ contains a sequence $\{x_n\}$ which converges weakly to $p$ such that $\lim\limits_{n \rightarrow \infty}\|Tx_n-x_n\|=0$. We denote by $\hat{Fix}(T)$ the set of asymptotic fixed points of $T$.\\
Let $Q$ be a nonempty closed and convex subset of int(dom g), then we define an operator $T: Q \rightarrow int(dom g)$ to be :
\begin{itemize}
	\item[(i)] Bregman relatively nonexpansive, if $Fix(T) \neq \emptyset$, and 
	\begin{align*}
	D_f(p, Tx) \leq D_f(p, x),~\forall~p \in Fix(T),~ x \in Q~\text{and}~\hat{Fix(T)}=Fix(T).
	\end{align*}
	\item[(ii)] Bregman quasi-nonexpansive mapping if $Fix(T) \neq \emptyset$ and
	\begin{align*}
	D_f(p, Tx)\leq D_f(p, x), \forall~x \in Q~\text{and}~p \in Fix(T).
	\end{align*}
	\item[(iii)] Bregman firmly nonexpansive (BFNE), if 
	\begin{align*}
	\langle \nabla_{E}^{g}(Tx)-\nabla_{E}^{g}(Ty), Tx-Ty\rangle \leq \langle \nabla_{E}^{g}(x)-\nabla_{E}^{g}(y), Tx-Ty\rangle,~\forall~x, y \in E.
	\end{align*}
\end{itemize}
\begin{definition}\cite{Gaz}
	Let $Q$ be a nonempty, closed and convex subset of a reflexive Banach space $E$ and $g:E \rightarrow (-\infty, +\infty]$ be a strongly coercive Bregman function. Let $\beta$ and $\gamma$ be real numbers with $\beta \in (-\infty, 1)$ and $\gamma \in [0, \infty)$, respectively. Then a mapping $T: Q \rightarrow E$ with $Fix(T) \neq \emptyset$ is called Bregman $(\beta, \gamma)$-demigeneralized if for any $x \in Q$ and $p \in Fix(T)$,
	\begin{align*}
	\langle x-p, \nabla_{E}^{g}(x)-\nabla_{E}^{g}(Tx)\rangle \geq (1-\beta)D_g(x, Tx) + \gamma D_g(Tx,x),~\forall~x\in E~\text{and}~p \in F(T).
	\end{align*}
\end{definition}
\noindent For modelling inverse problems which arises from phase retrievals and medical image reconstruction, (see \cite{Bry}), Censor and Elfving \cite{Cen} introduced the Split Feasibility Problem (SFP) in 1994, which is to find 
\begin{align}\label{1.3}
u^* \in C~\text{such that}~Ku^* \in Q;
\end{align}
where $C$ and $Q$ are nonempty, closed and convex subsets of real Banach spaces $E_1$ and $E_2$ respectively, and $K: E_1\rightarrow E_2$ is a bounded linear operator. The SFP have been well studied in the framework of real Hilbert spaces, uniformly convex and uniformly smooth Banach spaces, see (\cite{HAM12, Cho, Kazi, Shehu} and other references contained in). Different optimization problems have been formulated in terms of SFP \eqref{1.3}, for instance, If $Q=\{b\}$ in SFP \eqref{1.3} is a singleton, then we have the following convexly constrained linear inverse problem (in short, CCLIP) defined as follows:
\begin{align*}
\text{Find a point}~u^* \in C ~\text{such that}~Ku^* =b.
\end{align*}
The Split Null Point Problem (SNPP) introduced by Bryne et al. \cite{Bryne} is formulated as finding a point 
\begin{align}
x \in H_1~ \text{such that}~ 0 \in B_1(x) ~\text{and}~ 0 \in B_2(Kx),
\end{align}
where $H_1$ and $H_2$ are real Hilbert spaces, $B_1:H_1 \rightarrow 2^{H_1}$ and $B_2:H_2 \rightarrow 2^{H_2}$ are multivalued mappings and $K:H_1 \rightarrow H_2$ are real Hilbert spaces.\\ In 2018, Jailoka and Suantai \cite{Jai} introduced the following Halpern iterative method for approximating the split null point and fixed point problems for maximal monotone operators and multivalued demicontractive mapping $T$ as follows:
\begin{align*}
\begin{cases}
u, x_1 \in H_1,\\
y_n=J_{\lambda_n}^{B_1}(x_n + \gamma K^*(J_{\lambda_n}^{B_2}-I)Kx_n),\\
u_n=(1-\delta)y_n + \delta z_n,\\
x_{n+1}=\alpha_n u + (1-\alpha_n)u_n,~n \geq 1,
\end{cases}
\end{align*}
where $z_n \in Ty_n$. Also, Oyewole et al. \cite{Oye} introduced a new iterative method with self adaptive step-size for approximating solutions of a SFP for sum of two monotone operators and fixed point problem of a demimetric mapping in real Hilbert spaces. Strong convergence result was proved and numerical experiment to illustrate the performance of the algorithm were displayed.\\ In the framework of uniformly convex and smooth Banach spaces, Takahashi and Takahashi \cite{Takah} introduced a shrinking projection method to approximate a solution of SNPP. Using their iterative method, they proved a strong convergence theorem.\\
\noindent {\bf Question}: Can the results of \cite{AB, Ans, Bryne, Izu, Jai, Ok, Oye, Takah} be establish in a more general Banach spaces (reflexive Banach spaces)?\\
\\ Let $B:E \rightarrow 2^{E^*}$ be a set-valued mapping. We define the domain and range of $B$ by $dom B=\{x \in E: Bx \neq \emptyset\}$ and $ran B=\bigcup_{x \in E}Bx$, respectively. The graph of B denoted by $G(B) =\{(x, x^*) \in E \times E^*: x^* \in Bx\}$. The mapping $B \subset E \times E^*$ is said to be monotone \cite{Rock} if $\langle x-y, x^*-y^*\rangle \geq 0 $ whenever $(x, x^*), (y, y^*) \in B$. It is also said to be maximal monotone \cite{Rocky} if its graph is not contained in the graph of any other monotone operator on $E$. If $B \subset E\times E^*$ is maximal monotone, then the set $B^{-1}(0) =\{z \in E : 0 \in Bz\}$ is closed and convex. Also, the resolvent associated with $B$ and $\lambda$ for any $\lambda > 0$ is the mapping $J_{\lambda B}^{g}: E \rightarrow 2^{E}$ with $Fix(J_{\lambda B}^{g})=B^{-1}(0)$ defined by 
\begin{align*}
J_{\lambda B}^{g}:=(\nabla_{E}^{g}+ \lambda B)^{-1} \circ \nabla_{E}^{g}.
\end{align*}
It is worth mentioning that a mapping $B: E \rightarrow 2^{E^*}$ is called Bregman inverse strongly monotone (BISM) on the set $C$ if 
\begin{align*}
C \cap (dom g) \cap (int~ dom~ g) \neq \emptyset,
\end{align*}
and for any $x, y \in C \cap (int~ dom~ g),~ \eta \in Ax$ and $\xi \in Ay,$ we have
\begin{align*}
\langle \eta-\xi, (\nabla_{E^*}^{g^*}(x)-\eta)-\nabla_{E^*}^{g^*}(\nabla_{E}^{g}(y)-\xi)\rangle \geq 0.
\end{align*}
\noindent The anti-resolvent $B_{\lambda}^{g}:E \rightarrow 2^{E}$ associated with the mapping $b: E \rightarrow 2^{E^*}$ and $\lambda > 0$ is defined by
\begin{align}
B_{\lambda}^{g}:=\nabla_{E}^{g}\circ (\nabla_{E}^{g}-\lambda B).
\end{align}
\noindent Let $A : E \rightarrow E^*$ be a single-valued monotone mapping and $B: E \rightarrow 2^{E^*}$ be a multivalued monotone mapping. Then, the Monotone Variational Inclusion Problem (MVIP) (also known as the problem of finding a zero of sum of two monotone mappings) is to find $x \in E$ such that
\begin{align}\label{1.31}
0^* \in A(x)+ B(x).
\end{align}
We denote by $\Omega$, the solution set of problem \eqref{1.31}.\\ 
\noindent A simple and efficient method for solving \eqref{1.31} is the forward-backward splitting method introduced by Lions and Mercier \cite{Lion} in a Hilbert space $H$. It is known that this method converges weakly to an element in \eqref{1.31} under the assumption that $A$ is $\alpha$-inverse strongly monotone. Note that the inverse strongly monotonicity of $A$ is a strict assumption. To avoid this assumption, Tseng \cite{Tsn} introduced the following algorithm which is known as Tseng's splitting algorithm for solving \eqref{1.31} as follows:
\begin{align}\label{1.8c}
\begin{cases}
x_1 \in H,\\
y_n=J_{\lambda_n}^{B}(x_n-\lambda_n Ax_n),\\
x_{n+1}=y_n-\lambda_n(Ay_n-Ax_n),~\forall~n \geq 1,
\end{cases}
\end{align}
where $A: H \rightarrow H$ is monotone and $L$-Lipschitz continuous and $\{\lambda_n\}$ is the sequence of suitable stepsize in $(0, \frac{1}{L})$. He proved that the sequence $\{x_n\}$ generated by \eqref{1.8c} converges weakly to an element in \eqref{1.31}. It is well-known that the step size of Tseng's splitting method requires prior knowledge of the Lipschitz constant of the mapping. However, from a practical point of view, the Lipschitz constant is very difficult to approximate.\\
\noindent It is well known that many interesting  problems arising from mechanics, economics, finance, nonlinear programming, applied sciences, optimization such as equilibrium and variational inequality problems  can be solved using MVIP.  Considerable efforts have been devoted to develop efficient iterative method to approximate solutions of MVIP in which the resolvent operator technique is one of the vital technique. \\ Many authors have considered approximating solutions of \eqref{1.31} together with fixed point problems in real Hilbert and Banach spaces, see \cite{AB, AC, Af, Oye, She}.\\ For instance, Okeke and Izuchukwu \cite{Ok} studied and analysed an iterative method for approximating split feasibility problem and variational inclusion problem in $p$-uniformly convex Banach spaces which are uniformly smooth, they proved a strong convergence result for approximating the solution of the aforementioned problems.  Shehu \cite{Shes} considered the splitting method for finding zeros of the sum of maximal monotone operator and Lipschitz continuous monotone operator in Banach space. He proved weak and strong convergence results and give some applications of his main result. In the framework of 2-uniformly convex real Banach spaces which are also uniformly smooth, Abass et al. \cite{AA} investigated  a shrinking algorithm for finding zeros of the sum of maximal monotone operators and Lipschitz continuous monotone operators which is also a common fixed point for finite family of relatively quasi-nonexpansive mappings.\\
\noindent Suppose $A=0$ in \eqref{1.31}, then \eqref{1.31} reduces to the following Monotone Inclusion Problem (MIP), which is to find $x \in E$ such that
\begin{align}\label{1.4}
0^* \in B(x).
\end{align}
Many results on MIP have been extended by authors from real Hilbert spaces to more general Banach spaces. For instance, Reich and Sabach \cite{Reich} introduced some iterative algorithms and proved two strong convergence results for approximating a common solution of a finite family of MIP \eqref{1.4} in a reflexive Banach spaces. Recently, Timnak et al. \cite{Tim} introduced a new Halpern-type iterative scheme for finding a common zero of finitely many maximal monotone mappings in a reflexive Banach spaces and prove the following strong convergence theorem.
\begin{theorem}
	Let $E$ be a reflexive Banach space and $f: E \rightarrow \mathbb{R}$ be a strongly coercive Bregman function which is bounded on bounded subsets and uniformly convex and uniformly smooth on bounded subset of $E$. Let $A_i: E \rightarrow 2^{E^*}, i=1, 2,...,$ be $N$ maximal monotone operators such that $Z:=\cap_{i=1}^{N}A_i^{-1}(0^*)\neq \emptyset$. Let $\{\alpha_n\}_{n \in \mathbb{N}}$ and $\{\beta_n\}_{n \in \mathbb{N}}$ be two sequences in $(0,1)$ satisfying the following control conditions:\\
	(i)~$\lim\limits_{n \rightarrow \infty}\alpha_n=0$ and $\sum\limits_{n=1}^{\infty}\alpha_n=\infty;$\\
	(ii)~$0 < \liminf\limits_{ n \rightarrow \infty} \beta_n \leq \limsup\limits_{n\rightarrow \infty}\beta_n < 1$.\\
	Let $\{x_n\}_{n \in \mathbb{N}}$ be a sequence generated by
	\begin{align}\label{1.5}
	\begin{cases}
	u \in E,~x_1 \in E~\text{chosen arbitrarily},\\
	y_n=\nabla f^*[\beta_n \nabla f(x_n)+ (1-\beta_n)\nabla f (Res_{\lambda_{n} A_N}^{f} )\cdots (Res_{r_1 A_1}^{f}(x_n))],\\
	x_{n+1}=\nabla f^*[\alpha_n \nabla f(u)+ (1-\alpha_n)\nabla f(y_n)],
	\end{cases}
	\end{align}
	for $n \in \mathbb{N}$, where $\nabla f$ is the gradient of $f$. If $r_i > 0$, for each $i=1,2,...,N$, then the sequence $\{x_n\}_{n \in \mathbb{N}}$ defined in \eqref{1.5} converges strongly to $proj_{Z}^{f}u$ as $n \rightarrow \infty$.
\end{theorem}
\noindent Very recently, Ogbuisi and Izuchukwu \cite{Chidu} introduced an iterative algorithm and obtained a strong convergence result for approximating a zero of sum of two maximal monotone operators which is also a fixed point of a Bregman strongly nonexpansive mapping in the framework of a reflexive Banach spaces.\\\
\noindent  We will also like to emphasize that approximating a common solution of SNPP  have some possible applications to mathematical models whose constraints can be expressed as SNPP. In fact, this happens in practical problems like signal processing, network resource allocation, image recovery, to mention a few, (see \cite{Ii}). It is worth mentioning that the problem considered in this article generalizes the ones in \cite{Ans,Cens, Moudafi}.\\\\
Inspired by the results discussed above, we introduce an iterative algorithm which does not require the prior knowledge of operator norm as this may give difficulty in computing, to approximate a solution of split null point problem involving single-valued, multi-valued monotone and Lipschitz continuous monotone mappings in reflexive Banach spaces.  Using our iterative algorithm, we prove a strong convergence result for approximating solutions of the aforementioned problems. Finally, we illustrate some numerical experiments to show the performance and behavior of our main result. The result discussed in this paper complements and extends many related results in literature. \\
We state our contributions in this article as follows:
\begin{enumerate}
	\item The main result in this paper generalizes the results in \cite{Bello}, \cite{Ogb} and \cite{Ok}  from $p$-uniformly Banach spaces which are also uniformly smooth to reflexive Banach spaces and \cite{Af, Ans, Cens, Moudafi, Og, Ok, Tim} from real Hilbert spaces to a reflexive Banach spaces.\\ 
	\item The iterative method defined in this article is design in such a way that it does not depend on the operator norm, see \cite{Gaz, Oye}.\\
	\item We proved a strong convergence result which is more desirable than the weak convergence result obtained in  \cite{Sunt}.\\
	\item The sequence of stepsizes of our algorithms is chosen without the prior knowledge of the Lipschitz constant and the uniform smoothness constant of the mapping, see \cite{Shes}.
\end{enumerate}

\section{Preliminaries}

\noindent We state some known and useful results which will be needed in the proof of our main result. In the sequel, we denote strong and weak convergence by "$\rightarrow$" and "$\rightharpoonup$", respectively.

\noindent
\begin{definition}
	A function $g : E \rightarrow \mathbb{R}$ is said to be  strongly coercive if
	\begin{align*}
	\lim_{\|x\|\rightarrow \infty}\frac{g(x)}{\|x\|}=\infty.
	\end{align*}
	\begin{definition}
		A mapping $T: C \rightarrow E$ is said to be demiclosed at $p$ if $\{x_n\}$ is a sequence in $C$ such that $\{x_n\}$ converges weakly to some $x^* \in C$ and $\{Tx_n\}$ converges strongly to $p$, then $Tx^*=p$.
	\end{definition}
\end{definition}
\begin{lemma}\label{2.2}\cite{Tim}
	Let $E$ be a Banach space, $s > 0$ be a constant, $\rho_s$ be the gauge of uniform convexity of $g$ and $g: E \rightarrow \mathbb{R}$ be a strongly coercive Bregman function. Then,\\
	(i) For any $x, y \in B_s$ and $\alpha \in (0,1)$, we have
	\begin{align*}
	D_g\big(x, \nabla_{E^*}^{g^*}[\alpha \nabla_{E}^{g}\nabla_{E}^{g}(y) + (1-\alpha)\nabla_{E}^{g}(z)]\big) &\leq \alpha D_g(x,y) + (1-\alpha)D_g(x,z)\\&-\alpha(1-\alpha)\rho_s(\|\nabla_{E}^{g}(y)-\nabla_{E}^{g}(z)\|),
	\end{align*}
	(ii)~For any $x, y \in B_s:=\{z\in E:\|z\| \leq s\},~s > 0$,
	\begin{align*}
	\rho_s(\|x-y\|) \leq D_g(x,y).
	\end{align*}
\end{lemma}
\begin{lemma}\label{2.3}\cite{But}
	Let $E$ be a reflexive Banach space, $g: E \rightarrow \mathbb{R}$ be a strongly coercive Bregman function and $V$ be a function defined by
	\begin{align*}
	V(x, x^*)=g(x)-\langle x, x^*\rangle + g^*(x^*),~x \in E,~x^* \in E^*.
	\end{align*}
	The following assertions also hold:
	\begin{align*}
	D_g(x, \nabla^{g^*}_{E^*}(x^*))=V(x,x^*), ~\text{for all}~x\in E~\text{and}~x^* \in E^*.
	\end{align*}
	\begin{align*}
	V(x, x^*) + \langle \nabla^{g^*}_{E^*}(x^*)-x, y^*\rangle \leq V(x, x^* + y^*)~\text{for all}~x \in E \text{and}~x^*, y^* \in E^*.
	\end{align*}
	Also, following a similar approach as in Lemma \ref{2.3} and for any $x \in E, y^{*},~ z^{*} \in B_{r}$ and $\alpha \in (0,1),$ we have
	\begin{align}\label{2.1ca}
	V_{g}(x, \alpha y^{*} + (1-\alpha)z^{*}) \leq \alpha V_{g}(x, y^*) + (1-\alpha)V_{g}(x, z^*)-\alpha(1-\alpha)\rho_{r}^{*}(\|y^*-x^*\|).
	\end{align}
\end{lemma}
\begin{lemma}\label{2.4}\cite{Gaz}
	Let $E_1$ and $E_2$ be two Banach spaces. Let $F:E_1 \rightarrow E_2$ be a bounded linear operator and $T:E_2 \rightarrow E_2$ be a Bregman $(\phi,\sigma)$-demigeneralized for some $\phi \in (-\infty, 1)$ and $\sigma \in [0, \infty)$. Suppose that $K=ran(A) \cap Fix(T) \neq \emptyset$ (where $ran(A)$ denotes the range of $(A)$. Then for any $(x,q) \in E_1 \times K,$
	\begin{align}\label{As1}
	\langle x-q, F^*(\nabla_{E_2}^{g_2}(T(Fx)))\rangle & \geq (1-\phi)D_{g_2}(Fx, T(Fx)) + \sigma D_{g_2}(T(Fx), Fx)\nonumber\\
	& \geq (1-\phi)D_{g_2}(Fx, T(Fx)).
	\end{align}
	So, given any real numbers $\xi_1$ and $\xi_2,$ the mapping $L_1:E_1 \rightarrow [0, \infty)$ and $L_2:E_2 \rightarrow [0. \infty)$ formulated for $x \in E_1$ as
	\begin{align}
	L_1(x)=
	\begin{cases}
	\frac{D_{g_2}(Fx, TFx)}{D_{g_1}^{*}(F^*(\nabla_{E_2}^{g_2}(Fx)), F^*(\nabla_{E_2}^{g_2}(TFx))}, ~\text{if}~ &(I-T)Fx\neq 0,\\
	\xi_1,              &\text{otherwise},
	\end{cases}
	\end{align}
	and 
	\begin{align}\label{As2}
	L_2(x)=
	\begin{cases}
	\frac{D_{g_1}^{*}(\nabla_{E_1}^{g_1}(x)-\gamma F^*(\nabla_{E_2}^{g_2}(Fx)-\nabla_{E_2}^{g_2}(TFx)), \nabla_{E_1}^{g_1}(x))}{D_{g_1}^{*}(F^*(\nabla_{E_2}^{g_2}(Fx)), F^*(\nabla_{E_2}^{g_2}(TFx))}, ~\text{if}~, &(I-T)Fx\neq 0,\\
	\xi_2,              &\text{otherwise},
	\end{cases}
	\end{align}
	are well-defined, where $\gamma$ is any nonnegative real number. Moreover, for any $(x, p) \in E_1 \times K,$ we have 
	\begin{align}\label{2.4a}
	D_{g_1}(q, y) \leq D_{g_1}(q,x)-(\gamma(1-\phi)L_1(x)-L_2(x))D_{g_1^{*}}(F^*(\nabla_{E_2}^{g_2}(Fx)), F^*(\nabla_{E_2}^{g_2}(TFx)),
	\end{align}
	where
	\begin{align*}
	y=(\nabla_{E_1}^{g_1})^{-1}[\nabla_{E_1}^{g_1}(x)-\gamma F^*(\nabla_{E_2}^{g_2}(Fx)-\nabla_{E_2}^{g_2}(TFx))].
	\end{align*}
	\begin{remark}
		From Definition 2.2 of \cite{Gaz}, It can be seen that $J_{\lambda B}^{g}$ is $(0,1)-$ demigeneralized. Therefore, we conclude from \eqref{2.4a} that
	\end{remark}
	\begin{align}\label{2.5a}
	D_{g_1}(q, y) \leq D_{g_1}(q,x)-(\gamma L_1(x)-L_2(x))D_{g_1^{*}}(F^*(\nabla_{E_2}^{g_2}(Fx)), F^*(\nabla_{E_2}^{g_2}(J_{\lambda B}^{g}Fx)),
	\end{align}
	where $T=J_{\lambda B}^{g}$ and $B:E \rightarrow 2^{E^{*}}$ is a maximal monotone operator.
\end{lemma}
\begin{lemma}\label{2.5}\cite{But}
	Let $E$ be a Banach space and $g: E \rightarrow \mathbb{R}$ a G$\hat{a}$teaux differentiable function which is uniformly convex on bounded subsets of $E$. Let $\{x_n\}_{n \in \mathbb{N}}$ and $\{y_n\}_{n\in \mathbb{N}}$ be bounded sequences in E. Then, 
	\begin{align*}
	\lim_{n \rightarrow \infty}D_g(y_n,x_n)=0\Rightarrow \lim_{n \rightarrow \infty}\|y_n-x_n\|=0.
	\end{align*}
\end{lemma}
%\begin{lemma}\label{2.6}\cite{Chidu}
%	Let $B: E \rightarrow 2^{E^*}$ be a maximal monotone operator and $A: E \rightarrow E^*$ be a BISM mapping such that $(A+B)^{-1}(0^*) \neq \emptyset$. Let $g: E \rightarrow \mathbb{R}$ be a Legendre function, which is uniformly Fr\'{e}chet differentiable and bounded on bounded subset of $E$. Then,
%	\begin{align*}
%	D_g(u, Res_{\lambda B}^{g}\circ A^{g}(x)) + %D_g(Res_{\lambda B}^{g}(x), x) \leq %D_g(u,x),~\text{for any} ~u \in (A+ %B)^{-1}(0^*),~x \in E~\text{and}~ \lambda > %0.
%	\end{align*}
%\end{lemma}
%\begin{lemma}\label{2.7}\cite{Chidu}
%	Let $B: E \rightarrow 2^{E^*}$ be a maximal monotone operator and $A: E \rightarrow E^*$ be a BISM mapping such that $(A+B)^{-1}(0^*) \neq \emptyset$. Let $g: E \rightarrow \mathbb{R}$ be a Legendre function, which is uniformly Fr\'{e}chet differentiable and bounded on bounded subset of $E$. Then, \\
%	(i)~$(A+B)^{-1}(0^*) = Fix(Res_{\lambda B}^{g}\circ A_{\lambda}^{g});$\\
%	(ii)~$Res_{\lambda B}^{g}\circ A_{\lambda}^{g}$ is a BSNE operator with $Fix(Res_{\lambda B}^{g}\circ A_{\lambda}^{g})=\hat{Fix}(Res_{\lambda B}^{g}\circ A_{\lambda}^{g})$.
%\end{lemma}
\begin{lemma}\label{ASX}\cite{Bar}
	Let $A:E \rightarrow E^{*}$ be a monotone, hemicontinuous and bounded operator, and $B: E \rightarrow 2^{E^{*}}$ be a maximal monotone operator. Then $A+ B$ is maximal monotone.
\end{lemma}
\begin{lemma}\label{2.8}\cite{Reich}
	Let $g: E \rightarrow \mathbb{R}$ be a G$\hat{a}$teaux differentiable and totally convex function. If $x_0 \in E$ and the sequence $\{D_g(x_n, x_0)\}$ is bounded, then the sequence $\{x_n\}$ is also bounded.
\end{lemma}
\begin{definition}
	Let $C$ be a nonempty closed and convex subset of a reflexive Banach space $E$ and $g:E \rightarrow (-\infty, +\infty]$ be a strongly coercive Bregman function. A Bregman projection of $x \in int(dom (g))$ onto $C \subset int(dom g)$ is the unique vector $P_{C}^{g}(x) \in C$ satisfying
	\begin{align*}
	D_g(P_C^{g}(x), x)=int\{D_g(y,x): y \in C\}.
	\end{align*}
\end{definition}
\begin{lemma}\label{2.9}\cite{Rei}
	Let $C$ be a nonempty closed and convex subset of a reflexive Banach space $E$ and $x \in E$. Let $g: E \rightarrow \mathbb{R}$ be a strongly coercive Bregman function. Then,\\
	(i)~$z=P_{C}^{g}(x)$ if and only if $\langle \nabla^{g}_{E}(x)-\nabla^{g}_{E}(z), y-z\rangle \leq 0,~\forall~y \in C$.\\
	(ii)~$D_g(y, P_{C}^{g}(x)) + D_g(P_{C}^{g}(x), x) \leq D_g(y,x),~\forall~y \in C$.
\end{lemma}
\begin{lemma}\label{2.9a}\cite{Xu}
	Let $\{a_n\}$,  $\{\gamma_n\}$, $\{\delta_n\}$ and $\{t_n\}$ be sequences of nonnegative real numbers satisfying the following relation:
	\begin{align*}
	a_{n+1}\leq(1-t_n-\gamma_{n})a_n+\gamma_nn a_{n-1} + t_n s_n + \delta_n, ~~\forall n\geq 0,
	\end{align*} 
	where $\sum\limits_{n=n_0}^{\infty}t_n=+\infty,~\sum\limits_{n=n_0}^{\infty}\delta_n < +\infty,$ for each $n\geq n_0$ (where $n_0$ is a positive integer) and $\{\gamma_n\} \subset [0, \frac{1}{2}],~ \limsup\limits_{n\rightarrow \infty}s_n\leq 0$. Then, the sequence $\{a_n\}$ converges weakly to zero.
\end{lemma}
\begin{lemma}\label{2.10}\cite{Maing}
	Let $\Gamma_n$ be a sequence of real numbers that does not decrease at infinity, in the sense that there exists a subsequence $\{\Gamma_{n_k}\}_{k\geq 0}$ of $\{\Gamma_n\}$ which satisfies $\Gamma_{n_k} \leq \Gamma_{{n_j}+1}$ for all $j \geq 0$. Also, consider a sequence of integers $\{\tau(n)\}_{n \geq n_0}$ defined by
	\begin{align*}
	\tau(n):=\max\{k \leq n~|~ \Gamma_{n_k}\leq \Gamma_{n_{k}+1}\}.
	\end{align*}
	Then $\{\tau(n)\}_{n \geq n_0}$ is a nondecreasing sequence satisfying $\lim_{n \rightarrow \infty}\tau(n)=\infty.$ If it holds that $\Gamma_{\tau(n)} \leq \Gamma_{\tau(n)+1}$ for all $n \geq n_0$ then we have 
	\begin{align*}
	\Gamma_\tau(n) \leq \Gamma_{\tau(n)+1}.
	\end{align*}
\end{lemma}

\section{Main Result}
\noindent Throughout this section, we assume that
\begin{Assumption}
	\begin{enumerate}
		\noindent	\item $E_1$ and $E_2$ be two reflexive Banach spaces, $g_1:E_1 \rightarrow (-\infty, +\infty]$ and $g_2:E_2 \rightarrow (-\infty, +\infty]$ be strongly coercive Bregman functions which are bounded on bounded subsets and uniformly convex and uniformly smooth on bounded subsets of $E_1$ and $E_2$ with constant $\beta >0$, respectively.\\
		\item $\nabla_{E_1}^{g_1}$ and $\nabla_{E_2}^{g_2}$ be the gradients of $E_1$ dependent on $g_1$ and $E_2$ dependent on $g_2$ respectively.\\
		\item $A_1:E_1 \rightarrow E_1^{*}$ be a monotone and $L$-Lipschitz continuous mapping, $B_1:E_1 \rightarrow 2^{E_1^{*}}$ and $B_2:E_2 \rightarrow 2^{E_2^{*}}$ are maximal monotone mappings respectively, and $J_{\lambda B_2 }^{g_2}$ be the resolvent of $g_2$ on $B_2$ for $\lambda >0$, and $\lambda_{n}=\rho l^{m_n}$ where $m_n$ is the smallest nonnegative integer such that
		\begin{align}\label{Arm}
		\lambda_{n}\|A_1z_n-A_1y_n\| \leq \mu\|z_n-y_n\|.
		\end{align}
		\item Suppose that $K:E_1 \rightarrow E_2$ is a bounded linear operator such that $K \neq 0$ and $K^*:E_2^{*}\rightarrow E_1^{*}$ be the adjoint of $K$. Given that $\rho >0,~ l \in (0,1),~ \mu \in (0, \sigma)$, where $\sigma$ is a constant given by \eqref{2.3a}. \\
		\item The control sequence $\{\alpha_n\}, \{\beta_n\}$ and $\{\delta_n\}$ are sequences in $(0,1)$ such that $\alpha_n + \beta_n + \delta_n=1, \{\theta_n\} \subset [0, \frac{1}{2}]$ and the following conditions are satisfied:
	\end{enumerate}
	\begin{enumerate}
		\item[(i)] $\lim\limits_{n \rightarrow \infty} \alpha_n=0$,~$\sum\limits_{n=1}^{\infty}\alpha_n=\infty,$\\
		\item[(ii)] $0 < \liminf\limits_{n \rightarrow \infty}\beta_n \leq \limsup\limits_{n\rightarrow \infty}\beta_n< 1$,\\
		\item[(iii)] $0 < a \leq \theta_n < \delta_n \leq \frac{1}{2},~\forall~ n \geq 1.$
	\end{enumerate}
\end{Assumption}
\noindent
\hrule
\begin{Algorithm}\label{Alg1}
	Define a sequence $\{x_n\}_{n=1}^{\infty}$ generated arbitrarily by chosen $x_0, x_1 \in E_1$ and any fixed $u \in E_1,$ such that
	\begin{align}\label{3.2}
	\begin{cases}
	w_n=(\nabla_{E_1}^{g_1})^{-1}[\nabla_{E_1}^{g_1}(x_n) + \theta_n(\nabla_{E_1}^{g_1}(x_{n-1})-\nabla_{E_1}^{g_1}(x_n))],\\
	z_n=(\nabla_{E_1}^{g_1})^{-1}\big[\nabla_{E_1}^{g_1}(w_n)-\gamma K^*(\nabla_{E_2}^{g_2}(Kw_n)-\nabla_{E_2}^{g_2}(J_{\lambda B_2}^{g_2}Kw_n))\big]\\
	y_n=J_{\lambda_n B_1}^{g_1}\big[(\nabla_{E_1}^{g_1})(z_n)-\lambda_{n}A_1z_n\big]\\
	u_n=(\nabla_{E_1}^{g_1})^{-1}\big[\nabla_{E_1}^{g_1}(y_n)-\lambda_{n}(A_1y_n-A_1z_n)\big],\\
	x_{n+1}=(\nabla_{E_1}^{g_1})^{-1}\big[\alpha_n \nabla_{E_1}^{g_1}(u) + \beta_n \nabla_{E_1}^{g_1}(x_n) + \delta_n \nabla_{E_1}^{g_1}(u_n)\big].
	\end{cases}
	\end{align}
	Suppose that $\Omega:=\{p \in (A_1 + B_1)^{-1}(0): Kp \in B_2^{-1}(0)\}\neq \emptyset,$ let $\gamma > 0$, let the sequences $\{\xi_{1,n}\}_{n\in \mathbb{N}}$ and $\{\xi_{2,n}\}_{n \in \mathbb{N}}$ satisfy the following conditions:
	\begin{enumerate}
		\item[(i)] there exists a positive real number $\phi_1$ such that 
		\begin{align*}
		0 < \phi_1 < \liminf\limits_{ n \rightarrow \infty}\frac{\xi_{2,n}}{\xi_{1,n}} < \gamma,
		\end{align*}
	\end{enumerate}
	where
	\begin{align*}
	\xi_{1,n}=
	\begin{cases}
	\frac{D_{g_2}(Kw_n, J_{\lambda_n B_2}^{g_2}w_n)}{D_{g_1}^{*}(K^*(\nabla_{E_2}^{g_2}(Kw_n)), K^*(\nabla_{E_2}^{g_2}(J_{\lambda_n B_2}^{g_2}Kw_n))}, ~\text{if}~ &(I-J_{\lambda_n B_2}^{g_2})Kw_n\neq 0,\\
	\xi_1,              &\text{otherwise},
	\end{cases}
	\end{align*}
	and 
	\begin{align*}
	\xi_{2,n}=
	\begin{cases}
	\frac{D_{g_1}^{*}(\nabla_{E_1}^{g_1}(w_n)-\gamma K^*(\nabla_{E_2}^{g_2}(Kw_n)-\nabla_{E_2}^{g_2}(J_{\lambda_n B_2}^{g_2}Kw_n)), \nabla_{E_1}^{g_1}(w_n))}{D_{g_1}^{*}(K^*(\nabla_{E_2}^{g_2}(Kw_n)), K^*(\nabla_{E_2}^{g_2}(J_{\lambda_n B_2}^{g_2}Kw_n))}, ~\text{if} &(I-J_{\lambda_n B_2}^{g_2})Kw_n\neq 0,\\
	\xi_2,              &\text{otherwise}.
	\end{cases}
	\end{align*}
	
	\noindent Then, the sequence $\{x_n\}$ generated iteratively converges strongly to $z=P_{\Omega}^{g_1}u,$ where $P_{\Omega}^{g_1}$ is the Bregman projection of $E_1$ onto $\Omega$.
\end{Algorithm}
\hrule
\begin{proof}
	It can be seen in Lemma 3.2 of \cite{Sunt} that the Armijo linesearch rule defined by \eqref{Arm} is well-defined and 
	\begin{align*}
	\min\big\{\rho, \frac{\mu l}{L}\} \leq \lambda_{n} \leq \rho.
	\end{align*}
	Now, let $x^* \in \Omega$ then, using definition of $u_n$ in \eqref{Alg1} we have from \eqref{1} that
	\begin{align}\label{3.3c}
	D_{g_1}(x^*, u_n)&=D_{g_1}\big(x^*, (\nabla_{E_1}^{g_1})^{-1}[\nabla_{E_1}^{g_1}(y_n)-\lambda_{n}(A_1y_n-A_1z_n)]\big)\nonumber\\
	&=g_1(x^*)-g_1(u_n)-\langle x^*-u_n, \nabla g_1(y_n)-\lambda_{n}(A_1y_n-A_1z_n)\rangle\nonumber \\
	&=g_1(x^*)-g_1(u_n)-\langle x^*-u_n, \nabla g_1(y_n)\rangle + \lambda_{n} \langle x^*-u_n, A_1y_n-A_1z_n\rangle\nonumber\\
	&=g_1(x^*)-g_1(y_n)-\langle x^*-y_n, \nabla g_1(y_n)\rangle + \langle x^*-y_n, \nabla g_1(y_n)\rangle\nonumber\\&+g_1(y_n)-g_1(u_n)-\langle x^*-u_n, \nabla g_1(y_n)\rangle + \lambda_{n} \langle x^*-u_n, A_1y_n-A_1z_n\rangle \nonumber\\
	&=g_1(x^*)-g_1(y_n)-\langle x^*-y_n, \nabla g_1(y_n)\rangle -g_1(u_n)+ g_1(y_n)\nonumber\\&+ \langle u_n-y_n, \nabla g_1(y_n)\rangle + \lambda_{n} \langle x^*-u_n, A_1y_n-A_1z_n\rangle\nonumber\\
	&=D_{g_1}(x^*, y_n)-D_{g_1}(u_n, y_n)+ \lambda_{n} \langle x^*-u_n, A_1 y_n-A_1z_n\rangle.
	\end{align} 
	Using \eqref{2}, we get
	\begin{align}\label{3.4c}
	D_{g_1}(x^*, u_n) & = D_{g_1}(x^*, z_n)-D_{g_1}(y_n, z_n) + \langle x^*-y_n, \nabla g_1(z_n)-\nabla g_1(y_n)\rangle.
	\end{align}
	On substituting \eqref{3.4c} into \eqref{3.3c}, we obtain
	\begin{align}\label{3.5c}
	D_{g_1}(x^*, u_n) & =D_{g_1}(x^*, z_n)-D_{g_1}(y_n, z_n)-D_{g_1}(u_n, y_n)\nonumber\\& + \langle x^*-y_n, \nabla g_1(z_n)-\nabla g_1(y_n)\rangle + \lambda_{n} \langle x^*-u_n, A_1y_n-A_1z_n\rangle\nonumber\\
	&=D_{g_1}(x^*, z_n)-D_{g_1}(y_n, z_n)-D_{g_1}(u_n, y_n)+ \langle x^*-y_n, \nabla g_1(z_n)-\nabla g_1(y_n)\rangle\nonumber\\& + \lambda_{n} \langle  y_n-u_n, A_1y_n-A_1z_n\rangle -\lambda_{n} \langle y_n-x^*, A_1y_n-A_1z_n\rangle\nonumber\\
	& =D_{g_1}(x^*, z_n)-D_{g_1}(y_n, z_n)-D_{g_1}(u_n, y_n) + \lambda_{n} \langle  y_n-u_n, A_1y_n-A_1z_n\rangle\nonumber\\&-\langle y_n-x^*, \nabla g_1(z_n)-\nabla g_1(y_n)-\lambda_{n}(A_1z_n-A_1y_n)\rangle.
	\end{align}
	By applying the definition of $y_n,$ we have $\nabla g_1(z_n)-\lambda_{n} A_1z_n \in \nabla g_1(y_n) + \lambda_{n} B_1$. Since $B_1:E_1 \rightarrow 2^{E_1^{*}}$ is a maximal monotone mapping, there exists $a_n \in B_1y_n$ such that $\nabla g_1(z_n)-\lambda_{n} A_1z_n=\nabla g_1(y_n) + \lambda_{n} a_n,$ it follows that
	\begin{align}\label{3.6c}
	a_n=\frac{1}{\lambda_{n}}(\nabla g_1(z_n)-\nabla g_1(y_n)-\lambda_{n}A_1z_n).
	\end{align}
	Since $0 \in (A_1+ B_1)x^*$ and $A_1y_n + a_n \in (A_1+ B_1)y_n,$ it follows from Lemma \ref{ASX} that $A_1+B_1$ is maximal monotone, hence
	\begin{align}\label{3.7c}
	\langle y_n-x^*, A_1y_n + a_n\rangle \geq 0.
	\end{align}
	On substituting \eqref{3.6c} into \eqref{3.7c}, we get
	\begin{align*}
	\frac{1}{\lambda_{n}}\langle y_n-x^*, \nabla g_1(z_n)-\nabla g_1(y_n)-\lambda_{n}A_1z_n+ \lambda_{n}A_1y_n\rangle \geq 0.
	\end{align*}
	That is
	\begin{align}\label{3.8f}
	\langle y_n-x^*, \nabla g_1(z_n)-\nabla g_1(y_n)-\lambda_{n}(A_1z_n-A_1y_n)\rangle \geq 0.
	\end{align}
	Combining \eqref{3.5c} and \eqref{3.8f}, and using \eqref{2.3a}, we have
	\begin{align}\label{3.9c}
	D_{g_1}(x^*, u_n) & \leq D_{g_1}(x^*, z_n)-D_{g_1}(y_n, z_n)-D_{g_1}(u_n, y_n) + \lambda_{n}\langle y_n-u_n, A_1y_n-A_1z_n\rangle\nonumber\\
	& \leq D_{g_1}(x^*, z_n)-D_{g_1}(y_n, z_n)-D_{g_1}(u_n, y_n) + \lambda_{n}||y_n-u_n||~||A_1y_n-A_1z_n||\nonumber\\
	&\leq D_{g_1}(x^*, z_n)-D_{g_1}(y_n, z_n)-D_{g_1}(u_n, y_n) + \mu\|y_n-u_n\|~\|y_n-z_n\|\nonumber\\
	& \leq D_{g_1}(x^*, z_n)-D_{g_1}(y_n, z_n)-D_{g_1}(u_n, y_n) + \mu \big(\|y_n-u_n\|^2+ \|y_n-z_n\|^2\big)\nonumber\\
	& \leq D_{g_1}(x^*, z_n)-\big(1-\frac{\mu}{\sigma}\big)D_{g_1}(y_n, z_n)-\big(1-\frac{\mu}{\sigma}\big)D_{g_1}(y_n, u_n)\\
	& \leq D_{g_1}(x^*, z_n)\label{3.10c}.
	\end{align}
	Also, from \eqref{2.5a} and \eqref{Alg1}, we get
	\begin{align}
	D_{g_1}(x^*, z_n)&=D_{g_1}\bigg((\nabla_{E_1}^{g_1})^{-1}\big(\nabla_{E_1}^{g_1}(w_n)-\gamma K^{*}(\nabla_{E_2}^{g_2}(Kw_n-\nabla_{E_2}^{g_2}(J_{\lambda B_2}^{g_2}Kw_n)))\big)\bigg)\nonumber\\
	&\leq D_{g_1}(x^*, w_n)-(\gamma \xi_{1,n}-\xi_{2,n})D_{g^{*}_{1}}\big(K^{*}(\nabla_{E_2}^{g_2}(Kw_n)), K^*(\nabla_{E_2}^{g_2}(J_{\lambda B_2}^{g_2}Kw_n))\big)\label{3.11c}\\
	&\leq D_{g_1}(x^*, w_n)\label{3.12c}\\
	&=D_{g_1}\bigg(x^*, (\nabla_{E_1}^{g_1})^{-1}\big(\nabla_{E_1}^{g_1}(x_n)+ \theta_n(\nabla_{E_1}^{g_1}(x_{n-1})-\nabla_{E_1}^{g_1}(x_n))\big)\bigg)\nonumber\\
	&\leq (1-\theta_n)D_{g_1}(x^*, x_n) + \theta_n D_{g_1}(x^*, x_{n-1})\label{3.13c}.
	\end{align}
	From  \eqref{2.1ca}, \eqref{Alg1}, \eqref{3.9c} and \eqref{3.10c}, we get
	\begin{align}
	D_{g_1}(x^*, x_{n+1}) & \leq D_{g_1}\bigg(x^*, (\nabla_{E_1}^{g_1})^{-1}\big(\alpha_n \nabla_{E_1}^{g_1}(u) + \beta_n \nabla_{E_1}^{g_1}(x_n) + \delta_n \nabla_{E_1}^{g_1}(u_n)\big)\bigg)\nonumber\\
	& \leq V_{g_1}\bigg(x^*, \alpha_n \nabla_{E_1}^{g_1}(u) + \beta_n \nabla_{E_1}^{g_1}(x_n)+ \delta_n \nabla_{E_1}^{g_1}(u_n)\bigg)\nonumber\\
	&=g_1(x^*)-\langle x^*, \alpha_n \nabla_{E_1}^{g_1}(u) + \beta_n \nabla_{E_1}^{g_1}(x_n) + \delta_n \nabla_{E_1}^{g_1}(u_n)\rangle\nonumber\\& + g_1^{*}\big(\alpha_n \nabla_{E_1}^{g_1}(u)+ \beta_n \nabla_{E_1}^{g_1}(x_n)+ \delta_n \nabla_{E_1}^{g_1}(u_n)\big)\nonumber\\
	& \leq \alpha_n g_1(x^*) + \beta_n g_1(x^*) + \delta_n g_1(x^*)-\beta_n \langle x^*, \nabla_{E_1}^{g_1}(x_n)\rangle\nonumber\\&-\delta_n \langle x^*, \nabla_{E_1}^{g_1}(u_n)\rangle-\alpha_n\langle x^*, \nabla_{E_1}^{g_1}(u)\rangle + \beta_n g_1^{*}(\nabla_{E_1}^{g_1}(x_n))\nonumber\\& + \delta_n g_1^{*}(\nabla_{E_1}^{g_1}(u_n)) + \alpha_n g_1^{*}(\nabla_{E_1}^{g_1}(u))-\beta_n \delta_n \rho_{r}^{*}(\|\nabla_{E_1}^{g_1}(x_n)-\nabla_{E_1}^{g_1}(u_n)\|)\nonumber\\&-\beta_n \delta_n \rho_{r}^{*}(\|\nabla_{E_1}^{g_1}(x_n)-\nabla_{E_1}^{g_1}(u)\|)\nonumber\\
	& \leq \beta_n\bigg(g_1(x^*)-\langle x^*, \nabla_{E_1}^{g_1}(x_n)\rangle + g_1^{*}(\nabla_{E_1}^{g_1}(x_n))\bigg)\nonumber\\& + \delta_n \bigg(g_1(x^*)-\langle x^*, \nabla_{E_1}^{g_1}(u_n)\rangle + g_1^{*}(\nabla_{E_1}^{g_1}(u_n))\bigg)\nonumber\\& + \alpha_n\bigg( g_1(x^*)-\langle x^*, \nabla_{E_1}^{g_1}(u)\rangle + g_1^{*}\bigg(\nabla_{E_1}^{g_1}(u))\bigg)\nonumber\\&-\beta_n\delta_n \rho_{r}^{*}\big(\|\nabla_{E_1}^{g_1}(x_n)-\nabla_{E_1}^{g_1}(u_n)\|\big)\nonumber\\
	&=\beta_n V_{g_1}(x^*, \nabla_{E_1}^{g_1(x_n)}) + \delta_n V_{g_1}(x^*, \nabla_{E_1}^{g_1}(u_n)) + \alpha_n V_{g_1}(x^*, \nabla_{E_1}^{g_1}(u))\nonumber\\&-\beta_n\delta_n\rho_{r}^{*}\big(\|\nabla_{E_1}^{g_1}(x_n)-\nabla_{E_1}^{g_1}(u_n)\|\big) \nonumber\\
	& \leq \beta_n D_{g_1}(x^*, x_n) + \delta_n D_{g_1}(x^*, u_n) + \alpha_n D_{g_1}(x^*, u)\nonumber\\&-\beta_n\delta_n\rho_{r}^{*}\big(\|\nabla_{E_1}^{g_1}(x_n)-\nabla_{E_1}^{g_1}(u_n)\|\big)\nonumber\\
	& \leq \beta_n D_{g_1}(x^*, x_n)\nonumber\\& + \delta_n \bigg( D_{g_1}(x^*, w_n)-\big(1-\frac{\mu}{\sigma}\big)D_{g_1}(y_n, z_n)-\big(1-\frac{\mu}{\sigma}\big)D_{g_1}(y_n, u_n)\nonumber\\&-(\gamma \xi_{1,n}-\xi_{2,n})D_{g^{*}_{1}}\big(K^{*}(\nabla_{E_2}^{g_2}(Kw_n)), K^*(\nabla_{E_2}^{g_2}(J_{\lambda B_2}^{g_2}Kw_n))\big)\nonumber\\& + \alpha_n D_{g_1}(x^*, u)-\beta_n\delta_n\rho_{r}^{*}\big(\|\nabla_{E_1}^{g_1}(x_n)-\nabla_{E_1}^{g_1}(u_n)\|\big)\nonumber\\
	& \leq \beta_n D_{g_1}(x^*, x_n) + \delta_n(1-\theta_n)\big(D_{g_1}(x^*, x_n) + \delta_n \theta_n D_{g_1}(x^*, x_{n-1})\nonumber\\&-\delta_n\big(1-\frac{\mu}{\sigma}\big)\big(D_{g_1}(y_n, z_n)-D_{g_1}(y_n, u_n)\big)\nonumber\\&-\delta_n(\gamma \xi_{1,n}-\xi_{2,n})D_{g^{*}_{1}}\big(K^{*}(\nabla_{E_2}^{g_2}(Kw_n)), K^*(\nabla_{E_2}^{g_2}(J_{\lambda B_2}^{g_2}Kw_n))\big)\nonumber\\&+ \alpha_n D_{g_1}(x^*, u)-\beta_n\delta_n\rho_{r}^{*}\big(\|\nabla_{E_1}^{g_1}(x_n)-\nabla_{E_1}^{g_1}(u_n)\|\big)\nonumber\\
	& \leq (1-\alpha_n-\delta_n \theta_n)D_{g_1}(x^*, x_n) + \delta_n \theta_n D_{g_1}(x^*, x_{n-1}) + \alpha_n D_{g_1}(x^*, u)\nonumber\\&-\delta_n\big(1-\frac{\mu}{\sigma}\big)\big(D_{g_1}(y_n, z_n)-D_{g_1}(y_n, u_n)\big)\nonumber\\&-\delta_n(\gamma \xi_{1,n}-\xi_{2,n})D_{g^{*}_{1}}\big(K^{*}(\nabla_{E_2}^{g_2}(Kw_n)), K^*(\nabla_{E_2}^{g_2}(J_{\lambda B_2}^{g_2}Kw_n))\nonumber\\&-\beta_n\delta_n\rho_{r}^{*}\big(\|\nabla_{E_1}^{g_1}(x_n)-\nabla_{E_1}^{g_1}(u_n)\|\big)\label{3.14c}\\
	&\leq  (1-\alpha_n-\delta_n \theta_n)D_{g_1}(x^*, x_n) + \delta_n \theta_n D_{g_1}(x^*, x_{n-1}) + \alpha_n D_{g_1}(x^*, u)\nonumber\\
	&\leq \max \{D_{g_1}(x^*, x_n), D_{g_1}(x^*, x_{n-1}), D_{g_1}(x^*, u)\},~\forall~n \geq 1.
	\end{align}
	By induction, we obtain that
	\begin{align*}
	D_{g_1}(x^*, x_n) \leq \max \{D_{g_1}(x^*, x_1), D_{g_1}(x^*, x_0), D_{g_1}(x^*, u)\}.
	\end{align*}
	Hence, $\{D_{g_1}(x^*, x_n)\}$ is bounded and therefore we conclude that from Lemma \ref{2.8} that $\{x_n\}$ is bounded. More so, $\{w_n\}, \{z_n\}, \{y_n\}$ and $\{u_n\}$ are bounded. The remaining proof is divided into two cases.\\
	Case A: If there exists $n_0 \in \mathbb{N}$ such that $\{D_{g_1}(x^*, x_n)\}_{n=n_0}^{N}$ is decreasing, then $\{D_{g_1}(x^*, x_n)\}_{n \in \mathbb{N}}$ is convergent. Thus, we have that $D_{g_1}(x^*, x_n)-D_{g_1}(x^*, x_{n+1}) \rightarrow 0$, as $n \rightarrow \infty$. Hence, from \eqref{3.14c}, we have that
	\begin{align}\label{3.16c}
	&\delta_n\big(1-\frac{\mu}{\sigma}\big)\bigg(D_{g_1}(y_n, z_n)-D_{g_1}(y_n, u_n)\bigg)\nonumber\\&-\delta_n(\gamma \xi_{1,n}-\xi_{2,n})D_{g^{*}_{1}}\big(K^{*}(\nabla_{E_2}^{g_2}(Kw_n)), K^*(\nabla_{E_2}^{g_2}(J_{\lambda B_2}^{g_2}Kw_n))\big)\nonumber\\ & \leq (1-\alpha_n)D_{g_1}^{*}(x^*, x_n)-D_{g_1}(x^*, x_{n+1}) + \delta_n \theta_n \big(D_{g_1}(x^*, x_{n-1})-D_{g_1}(x^*, x_n)\big)\nonumber\\&+ \alpha_n D_{g_1}(x^*, u).
	\end{align}
	On applying condition (i) and (ii), we obtain that
	\begin{align}\label{3.17c}
	\lim_{n \rightarrow \infty}D_{g_1}(y_n, z_n)=0=\lim_{n \rightarrow \infty}D_{g_1}(y_n, u_n).
	\end{align}
	From Lemma \ref{2.5}, we get that
	\begin{align}\label{3.18c}
	\lim_{n \rightarrow \infty}\|y_n-z_n\|=0=\lim_{n \rightarrow \infty}\|y_n-u_n\|.
	\end{align}
	Since $g_1$ is bounded and uniformly smooth on bounded sets of $E_1$, it follows that $\nabla_{E_1}^{g_1}$ is uniformly continuous on bounded subsets of $E_1$. Thus, we conclude from \eqref{3.18c} that
	\begin{align}\label{3.19cc}
	\lim_{n \rightarrow \infty}\|\nabla_{E_1}^{g_1}(y_n)-\nabla_{E_1}^{g_1}(z_n)\|=0.
	\end{align}
	From \eqref{3.18c}, we have
	\begin{align}\label{3.19c}
	\lim_{n \rightarrow \infty}\|u_n-z_n\|=0.
	\end{align}
	Also, from \eqref{3.16c}, we have
	\begin{align}
	&\lim_{n \rightarrow \infty} \beta_n \delta_n\rho_{r}^{*}\bigg(\|\nabla_{E_1}^{g_1}(x_n)-\nabla_{E_1}^{g_1}(u_n)\|\bigg)=0\\&=\lim_{n \rightarrow \infty}\delta_n(\gamma \xi_{1,n}-\xi_{2,n})D_{g^{*}_{1}}\big(K^{*}(\nabla_{E_2}^{g_2}(Kw_n)), K^*(\nabla_{E_2}^{g_2}(J_{\lambda B_2}^{g_2}Kw_n))\big).
	\end{align}
	By Lemma \ref{2.5} and from properties of the functions $\rho_r,~ D_{g_1}^{*}$ and $K$, we have
	\begin{align}\label{3.22c}
	\lim_{n \rightarrow \infty}\|K^{*}(\nabla_{E_2}^{g_2}(Kw_n))-K^{*}(\nabla_{E_2}^{g_2}(J_{\lambda B_2}^{g_2}Kw_n))\|=0,
	\end{align}
	and
	\begin{align}\label{3.23c}
	\lim_{n \rightarrow \infty}(\|\nabla_{E_1}^{g_1}(x_n)-\nabla_{E_1}^{g_1}(u_n)\|)=0.
	\end{align}
	Employing Lemma \ref{2.5}, we arrive at
	\begin{align}\label{3.24c}
	\lim_{n \rightarrow \infty}\|Kw_n-J_{\lambda B_2}^{g_2}Kw_n)=0.
	\end{align}
	and
	\begin{align}\label{3.25c}
	\lim_{n \rightarrow \infty}\|u_n-x_n\|=0.
	\end{align}
	In view of \eqref{Alg1}, we obtain that
	\begin{align}\label{3.26c}
	\lim_{n \rightarrow \infty}\|z_n-w_n\|=0.
	\end{align}
	From \eqref{3.19c} and \eqref{3.25c}, we ge that
	\begin{align}\label{3.27c}
	\lim_{n \rightarrow \infty}\|u_n-x_n\|=0.
	\end{align}
	From \eqref{Alg1}, it is easy to see that
	\begin{align}\label{3.28c}
	\|\nabla_{E_1}^{g_1}(x_{n+1})-\nabla_{E_1}^{g_1}(x_n)\| &\leq \alpha_n\|\nabla_{E_1}^{g_1}(u)-\nabla_{E_1}^{g_1}(x_n)\| + \beta_n\|\nabla_{E_1}^{g_1}(x_n)-\nabla_{E_1}^{g_1}(x_n)\|\nonumber\\&+ \delta_n\|\nabla_{E_1}^{g_1}(u_n)-\nabla_{E_1}^{g_1}(x_n)\|.
	\end{align}
	Hence, we have from \eqref{3.28c} and condition (i) of \eqref{Alg1} that
	\begin{align}\label{3.29c}
	\lim_{n \rightarrow \infty}\|\nabla_{E_1}^{g_1}(x_{n+1})-\nabla_{E_1}^{g_1}(x_n)\|=0.
	\end{align}
	Since $\nabla_{E_1}^{g_1}$ is norm to norm uniformly continuous on bounded subset of $E_{1}^{*},$ we have
	\begin{align}\label{3.30c}
	\lim_{n \rightarrow \infty}\|x_{n+1}-x_n\|=0.
	\end{align}
	From \eqref{3.18c} and \eqref{3.25c}, we get that
	\begin{align}\label{3.31c}
	\lim_{n \rightarrow \infty}\|y_n-x_n\|=0.
	\end{align}
	From \eqref{Alg1}, we obtain from \eqref{3.30c}
	\begin{align}\label{3.32c}
	\|\nabla_{E_1}^{g_1}(w_n)-\nabla_{E_1}^{g_1}(x_n)\|= \theta_n\|\nabla_{E_1}^{g_1}(x_{n-1})-\nabla_{E_1}^{g_1}(x_n)\| \rightarrow 0,~ \text{as}~n \rightarrow \infty..
	\end{align}
	Using the fact that $\nabla_{E_1}^{g_1}$ is norm to norm uniformly continuous on bounded subset of $E_1^{*}$, we have 
	\begin{align}\label{3.33c}
	\lim_{n \rightarrow \infty}\|w_n-x_n\|=0.
	\end{align}
	Lastly, with \eqref{3.26c} and \eqref{3.33c}, we arrive at
	\begin{align}\label{3.34c}
	\lim_{n \rightarrow \infty}\|z_n-x_n\|=0.
	\end{align}
	Since $\{x_n\}_{n \in \mathbb{N}}$ is bounded and $E_1$ is reflexive, we deduce that there exists a subsequence $\{x_{n_j}\}_{j \in \mathbb{N}}$ of $\{x_n\}_{n \in \mathbb{N}}$ which converges weakly to $z$.  Also, from \eqref{3.27c}, \eqref{3.31c}, \eqref{3.33c} and \eqref{3.34c}, we have that there exist subsequences $\{u_{n_j}\}_{j \in \mathbb{N}}$ of $\{u_n\}_{n \in \mathbb{N}},$ $\{y_{n_j}\}_{j \in \mathbb{N}}$ of $\{y_n\}_{n \in \mathbb{N}}$, $\{w_{n_j}\}_{j \in \mathbb{N}}$ of $\{w_n\}_{n \in \mathbb{N}}$ and $\{z_{n_j}\}_{j \in \mathbb{N}}$ of $\{z_n\}_{n \in \mathbb{N}}$ converge weakly to $z$ respectively. Hence, from \eqref{3.24c} and the demiclosedness principle we have that $J_{\lambda B_2}^{g_2}(Kz)=Kz,$ therefore we conclude that $Kz \in B_2^{-1}(0).$ To show that $z \in (A_1+B_1)^{-1}(0)$. Let $(v,w) \in G(A_1 + B_1),$ we have $ w-A_1v \in B_1v$. From the definition of $y_n,$ we observe that
	\begin{align*}
	\nabla_{E_1}^{g_1}(z_n)-\lambda_{n}A_1z_n \in \nabla_{E_1}^{g_1}(y_n) + \lambda_{n}B_1y_n,
	\end{align*}
	or equivalently
	\begin{align*}
	\frac{1}{\lambda_{n}}( \nabla_{E_1}^{g_1}(z_n)-\nabla_{E_1}^{g_1}(y_n)-\lambda_{n}A_1z_n) \in B_1y_n.
	\end{align*}
	By the maximal monotonicity of $B_1$, we get
	\begin{align*}
	\langle v-y_n, w-A_1v + \frac{1}{\lambda_{n}}(\nabla_{E_1}^{g_1}(z_n)-\nabla_{E_1}^{g_1}(y_n)-\lambda_{n} A_1z_n)\rangle \geq 0.
	\end{align*}
	Also, from the monotonicity of $A_1$, we have
	\begin{align}\label{3.35c}
	\big\langle v-y_n, w\rangle & \geq \langle v-y_n, A_1v + \frac{1}{\lambda_{n}}(\nabla_{E_1}^{g_1}(z_n)-\nabla_{E_1}^{g_1}(y_n)-\lambda_{n}A_1z_n)\big\rangle \nonumber\\
	& =\langle v-y_n, A_1v-A_1z_n\rangle + \frac{1}{\lambda_{n}}\langle v-y_n, \nabla_{E_1}^{g_1}(z_n)-\nabla_{E_1}^{g_1}(y_n)\rangle\nonumber\\
	&=\langle v-y_n, A_1v-A_1y_n\rangle + \langle v-y_n, A_1y_n-A_1z_n\rangle\nonumber\\& + \frac{1}{\lambda_{n}}\langle v-y_n, \nabla_{E_1}^{g_1}(z_n)-\nabla_{E_1}^{g_1}(y_n)\rangle\nonumber\\
	& \geq \langle v-y_n, A_1y_n-A_1z_n\rangle+ \frac{1}{\lambda_{n}}\langle v-y_n, \nabla_{E_1}^{g_1}(z_n)-\nabla_{E_1}^{g_1}(y_n)\rangle. 
	\end{align}
	Since $A_1$ is Lipschitz continuous and $y_{n_j}\rightharpoonup z$, it follows from \eqref{3.18c} and \eqref{3.19cc} that
	\begin{align*}
	\langle v-z,w\rangle \geq 0.
	\end{align*}
	By the monotonicity of $A_1 + B_1,$ we get $0 \in (A_1+B_1)z,$ that is $z \in (A_1+B_1)^{-1}(0).$ Hence $z \in \Omega$.\\ Next, we show that $\{x_n\}$ converges strongly to $z$, where $z=P_{\Omega}^{g_1}u.$\\ From Lemma \ref{2.9}, we have
	\begin{align}
	\limsup_{n\rightarrow \infty}\langle x_n-x^*, \nabla_{E_1}^{g_1}(u)-\nabla_{E_1}^{g_1}(x^*)\rangle &=\lim_{j\rightarrow \infty}\langle x_{n_j}-x^*, \nabla_{E_1}^{g_1}(u)-\nabla_{E_1}^{g_1}(x^*)\rangle\nonumber\\
	&=\langle z-x^*, \nabla_{E_1}^{g_1}(u)-\nabla_{E_1}^{g_1}(x^*)\rangle\nonumber\\
	& \leq 0,
	\end{align}
	and hence from \eqref{3.30c}, we obtain 
	\begin{align}\label{3.37c}
	\limsup_{n\rightarrow \infty}\langle x_{n+1}-x^*, \nabla_{E_1}^{g_1}(u)-\nabla_{E_1}^{g_1}(x^*)\rangle \leq 0.
	\end{align}
	Using Lemma \ref{2.3}, \eqref{3.10c} and \eqref{3.12c}, we obtain
	\begin{align}\label{3.38c}
	D_{g_1}(z, x_{n+1}) & \leq D_{g_1}\bigg(z, (\nabla_{E_1}^{g_1})^{-1}(\beta_n \nabla_{E_1}^{g_1}(x_n)+ \delta_n \nabla_{E_1}^{g_1}(u_n)+ \alpha_n \nabla_{E_1}^{g_1}(u))\bigg)\nonumber\\
	& =V_{g_1}(z, \beta_n \nabla_{E_1}^{g_1}(x_n)+ \delta_n \nabla_{E_1}^{g_1}+ \alpha_n \nabla_{E_1}^{g_1}(u))-\alpha_n(\nabla_{E_1}^{g_1}(u)-\nabla_{E_1}^{g_1}(z)) \nonumber\\&+ \alpha_n \langle x_{n+1}-z, \nabla_{E_1}^{g_1}(u)-\nabla_{E_1}^{g_1}(z)\rangle\nonumber\\
	& =\beta_n D_{g_1}(z, x_n) + \delta_n D_{g_1}(z, u_n) + \alpha_n \langle x_{n+1}-z, \nabla_{E_1}^{g_1}(u)-\nabla_{E_1}^{g_1}(z)\rangle\nonumber\\
	& \leq \beta_n D_{g_1}(z, x_n) + \delta_n D_{g_1}(z, w_n) +\alpha_n \langle x_{n+1}-z, \nabla_{E_1}^{g_1}(u)-\nabla_{E_1}^{g_1}(z)\rangle\nonumber\\
	& \leq \beta_n D_{g_1}(z, x_n) + \delta_n ((1-\theta_n)D_{g_1}(z, x_n) + \theta_n D_{g_1}(z, x_{n-1}))\nonumber\\& + \alpha_n \langle x_{n+1}-z, \nabla_{E_1}^{g_1}(u)-\nabla_{E_1}^{g_1}(z)\rangle\nonumber\\
	&\leq (1-\alpha_n-\delta_n \theta_n)D_{g_1}(z, x_n) + \delta_n \theta_n D_{g_1}(z, x_{n-1})\nonumber\\& + \alpha_n \langle x_{n+1}-z, \nabla_{E_1}^{g_1}(u)-\nabla_{E_1}^{g_1}(z)\rangle.
	\end{align}
	By applying \eqref{3.38c} and Lemma \ref{2.9a}, we have that $x_n \rightarrow z$.\\
	Case B: Assume $\{D_{g_1}(z, x_n)\}$ is non-decreasing. Set $\Gamma_n$ of Lemma \ref{2.10}, as $\Gamma_n:=D_{g_1}(z, x_n)$ and let $\tau: \mathbb{N} \rightarrow \mathbb{N}$ be a mapping for all $n \geq n_0$ (for some $n_0$ large enough), defined by
	\begin{align*}
	\tau(n):=\max \{k \in \mathbb{N}: k \leq n, \Gamma_{k}\leq \Gamma_{k+1}\}.
	\end{align*}
	Then $\tau$ is non-decreasing sequence such that $\tau(n) \rightarrow \infty$ as $n \rightarrow \infty$. Thus
	\begin{align*}
	0 < \Gamma_{\tau(n)} \leq \Gamma_{\tau(n)+1},~\forall~n \geq n_0,
	\end{align*}
	this implies that
	\begin{align*}
	D_{g_1}(z, x_{\tau(n)}) \leq D_{g_1}(z, x_{\tau(n)+1}),~n > n_0.
	\end{align*}
	Since $\{D_{g_1}(z, x_{\tau(n)})\}$ is bounded, therefore $\lim_{n \rightarrow \infty}D_{g_1}(z, x_{\tau(n)})$ exists. Then the following estimates can be obtained, using same argument as in case A above.
	\begin{align}\label{3.39c}
	\begin{cases}
	\lim\limits_{n \rightarrow \infty}\|y_{\tau(n)}-z_{\tau(n)}\|=0,\\
	\lim\limits_{n \rightarrow \infty}\|Kw_{\tau(n)}-J_{\lambda B_2}^{g_2}Kw_{\tau(n)}\|=0,\\
	\lim\limits_{n \rightarrow \infty}\|z_{\tau(n)}-x_{\tau(n)}\|=0,\\
	\lim\limits_{n \rightarrow \infty}\|w_{\tau(n)}-x_{\tau(n)}\|=0,\\
	\limsup\limits_{n\rightarrow \infty}\langle x_{\tau(n)}-z,\nabla_{E_1}^{g_1}(u)-\nabla_{E_1}^{g_1}(z)\rangle \leq 0. 
	\end{cases}
	\end{align}
	From \eqref{3.38c} and $\Gamma_{\tau(n)} \leq \Gamma_{\tau(n)+1},$~we have
	\begin{align*}
	D_{g_1}(z, x_{\tau(n)}) &\leq (1-\alpha_{\tau(n)})D_{g_1}(z, x_{\tau(n)}) + \delta_{\tau(n)}\theta_{\tau(n)}\big(D_{g_1}(z, x_{\tau(n)-1}-D_{g_1}(z, x_{\tau(n)})\big) \nonumber\\& + \alpha_{\tau(n)}\langle x_{\tau(n)+1}-z, \nabla_{E_1}^{g_1}(u)-\nabla_{E_1}^{g_1}(z)\rangle.
	\end{align*}
	and hence
	\begin{align*}
	\lim_{n \rightarrow \infty} \Gamma_{\tau(n)}=\lim_{n \rightarrow \infty}\Gamma_{\tau(n)+1}=0,
	\end{align*}
	for all $n \geq n_0,$ we have $\Gamma_{\tau(n)} \leq \Gamma_{\tau(n)+1}$, if $n \neq \tau(n)$ (that is, $\tau(n) < n)$, because $\Gamma_{k+1} \leq \Gamma_{k},$ for $\tau(n) \leq k \leq n$. This gives for all $n \geq n_0$
	\begin{align*}
	0 < \Gamma_n \leq \max \{\Gamma_{\tau(n)}, \Gamma_{\tau(n)+1}\}=\Gamma_{\tau(n)+1}.
	\end{align*}
	This implies that $\lim\limits_{n \rightarrow \infty}\Gamma_n=0$ which yields that $\lim\limits_{n \rightarrow \infty}D_{g_1}(z, x_n)=0.$  Hence, $x_n \rightarrow z=P_{\Omega}^{g_1}u$ as $n \rightarrow \infty.$
\end{proof}
\begin{remark}
	Our main result improve and generalize the main results of \cite{Izu, Jai, Oye, Shes, Takah} in the following ways:
	\begin{enumerate}
		\item[(i)] We extend Theorem 3.1 of \cite{Shes} from 2-uniformly Banach spaces which are uniformly smooth to a reflexive Banach space and also extend the results of \cite{Izu, Jai,Takah} from real Hilbert spaces to reflexive Banach spaces.
		\item[(ii)] We relax the strict assumption of the mapping $A$ in \cite{Izu, Jai, Oye} with the weaker assumption that A is a monotone and $L$-Lipschitz continuous mapping.
	\end{enumerate}
\end{remark}

\section{\bf Numerical Examples.}
\noindent In this section, we give a couple of examples to implement our main result.
\begin{example}\label{ex1}
	This is an implementation of our result in infinite dimensional Hilbert space with our application to split feasibility problem. Let $C$ and $Q$ be nonempty, closed and convex subsets of real Hilbert spaces $H_1$ and $H_2$, respectively. Let $K: H_1 \rightarrow H_2$ be a bounded linear operator with its adjoint $K^{*}$ and $\varTheta$ denote the solution set of \eqref{1.3}. Let $H_1=H_2=L_2([0,1])$ with norm
	\begin{align*}
	\|x\|_{2} =\bigg(\int_{0}^{1}|x(t)|^{2} dt\bigg)^{\frac{1}{2}},
	\end{align*}
	and inner product
	\begin{align*}
	\langle x, y\rangle =\int_{0}^{1} x(t) y(t) dt,
	\end{align*}
	for all $x, y \in L_2([0,1]).$\\ Now, let
	\begin{align*}
	C=\{x \in L_2([0,1]):\|x\| \leq 1\},
	\end{align*}
	and
	\begin{align*}
	Q=\{x \in L_2([0,1]): \langle \frac{t}{2}, x\rangle=0\}.
	\end{align*}
	Let $K: L_2([0,1]) \rightarrow L_2([0,1])$ be a mapping defined by $(Kx)(t)=\frac{x(t)}{3}$ for all $x \in L_2([0,1]).$ Then, we have $(K^*x)(t)=\frac{x(t)}{3}$ and $\|K\|=\frac{1}{3}$. We see that the $\varTheta \neq \emptyset$ because $x^*(t) =0$ is a solution. We define $A_1(x)=\nabla\bigg(\frac{1}{2}\|Kx-P_{Q}Kx\|^2\bigg)=K^{*}(I-P_{Q})Kx,~B_1(x)=N_C(x)$ and $B_2(x)=N_Q(x)$ for all $x \in L_2([0,1])$. For our algorithm, we take $\alpha_n=\frac{1}{12n+3}, \beta_n=\frac{8n+1}{12n+3},~ \delta_n=\frac{4n+1}{12n+3}, \gamma=0.002, l=0.0001, \mu=0.03$ and $\theta_n=\frac{1}{4}$. We present the result of this experiment in Figure \ref{fig1} with $\|x_{n+1}-x_n\|_2 = 10^{-4}$ and varying initial values of $x_0$ and $x_1$ as follows:
	\begin{itemize}
		\item[(I)] $x_0=t^{\frac{2}{3}}+11t$ and $x_1=t;$ 
		\item[(II)] $x_0=2t$ and $x_1=\cos t;$
		\item[(III)] $x_0=-2t+5$ and $x_1=t+1;$
		\item[(IV)] $x_0=2t$ and $x_1=\frac{7t^2}{11};$
	\end{itemize}
	\begin{figure}[h!]
		\begin{center}		\includegraphics[width=6.0cm]{aba1.pdf} %
			\includegraphics[width=6.0cm]{aba2.pdf} %
			\includegraphics[width=6.0cm]{aba3.pdf}%
			\includegraphics[width=6.0cm]{aba4.pdf}
		\end{center}
		\caption{Example \protect \ref{ex1}. Top left: Case I, Top right: II, Bottom left: III, Bottom right: IV. \label{fig1}}
	\end{figure}
\end{example}

\begin{example}\label{ex2}
	Let $E_1=E_2=E=\mathbb{R}^2$ be the two-dimensional Euclidean space of the real number with an inner product $\langle \cdot,\cdot \rangle : \mathbb{R}^2 \times \mathbb{R}^2 \to \mathbb{R}$ be defined by $\langle x,y \rangle =x \cdot y=x_1y_1+x_2y_2$ where $x=(x_1,x_2) \in \mathbb{R}^2$ and $y=(y_1,y_2) \in \mathbb{R}^2$ and a usual norm $\|\cdot \|: \mathbb{R}^2 \to \mathbb{R} $ be defined by $\|x\|=(x_{1}^2+x_{1}^2)^{\frac{1}{2}}$ where $x=(x_1,x_2) \in \mathbb{R}^2.$ Let $B_1 : \mathbb{R}^2 \to \mathbb{R}^2$ and $B_2 : \mathbb{R}^2 \to \mathbb{R}^2$ be defined respectively by
	\begin{align*}
	B_1=
	\begin{pmatrix}
	1 & 2\\
	0 & 1
	\end{pmatrix},
	B_2=
	\begin{pmatrix}
	1 & 2\\
	2 & 5
	\end{pmatrix}.
	\end{align*}
	Since $B_1$ and $B_2$ are positive definite, they are maximal monotone operators. Also, let $A_1: \mathbb{R}^2 \to \mathbb{R}^2$ be defined by
	\begin{align*}
	A_1(x)=
	\begin{pmatrix}
	3 & 0\\
	0 & 3
	\end{pmatrix}
	\begin{pmatrix}
	x_1\\
	x_2
	\end{pmatrix}.
	\end{align*}
	
	\noindent Now, define $h_i: \mathbb{R} \to (-\infty,+\infty]$ by $h_i(x)=\frac{x^2}{2}$ for $i=1,2,$ then $\nabla h_i(x)=x.$ We also define $g_1=g_2=g$ by
	\[g: \mathbb{R}^2 \to (-\infty,+\infty], \quad g(x)=h_1(x_1)+h_2(x_2)=\frac{x_{1}^{2}}{2}+\frac{x_{2}^{2}}{2},\quad x=(x_1,x_2). \]
	Therefore, we have
	\[\nabla g(x)=(\nabla h_1(x_1),\nabla h_2(x_2))=(x_1,x_2)=\begin{pmatrix}
	1 & 0\\
	0 & 1
	\end{pmatrix}\begin{pmatrix}
	x_1 \\ x_2
	\end{pmatrix} .\]
	For ${\lambda > 0 },$ we compute the resolvents of $B_1$ and $B_2$ as follows:
	\begin{align*}
	J_{\lambda B_1}^{g_1}=\nabla g_1 + rB_1=
	\begin{pmatrix}
	1+\lambda & 2\lambda\\
	0 & 1+\lambda
	\end{pmatrix}, \quad (\nabla g_1 + rB_1 )^{-1}=\dfrac{1}{(1+\lambda)^2}\begin{pmatrix}
	1+\lambda & -2\lambda\\
	0 & 1+\lambda
	\end{pmatrix}
	\end{align*}
	and
	\begin{align*}
	J_{\lambda B_2}^{g_2}=\nabla g_1 + rB_2=
	\begin{pmatrix}
	1+\lambda & 2 \lambda\\
	2\lambda & 1+ \lambda
	\end{pmatrix}, \quad (\nabla g_1 + rB_2 )^{-1}=\dfrac{1}{1+6\lambda +\lambda^2}\begin{pmatrix}
	1+5\lambda & -2\lambda\\
	-2\lambda & 1+\lambda 
	\end{pmatrix}.
	\end{align*}
	Let the operator $K : \mathbb{R}^2 \to \mathbb{R}^2$ be defined by $K(x)=(2x_1-x_2,x_1+2x_2)$ for all $x=(x_1,x_2) \in \mathbb{R}^2$ and $K^*: \mathbb{R}^2 \to \mathbb{R}^2$ be defined by $K^*(y)=(2y_1-y_2,y_1+2y_2)$ for all $y=(y_1,y_2) \in \mathbb{R}^2.$ For this experiment, we choose the parameters $\alpha_n=\frac{3n}{4n^2+5n+3}, \beta_n=\frac{n^2+3}{4n^2+5n+3},~ \delta_n=\frac{3n^2+2n}{4n^2+5n+3}, \gamma=0.002, l=0.0001, \mu=0.03$ and $\theta_n=\frac{1}{4}$. For $u=0.1$ and initial values of $x_0$ and $x_1,$ we report our test for the following cases in Figure \ref{fig2} with $\|x_{n+1}-x_n\|=10^{-5}.$
	\begin{itemize}
		\item[Case 1] $x_0=[5,-5]$ and $x_1=[3,5];$
		\item[Case 2]  $x_0=[-5,-5]$ and $x_1=[10,10];$
		\item[Case 3] $x_0=[10,10]$ and $x_1=[20,20];$
		\item[Case 4] $x_0=[10,-5]$ and $x_1=[5,15].$
	\end{itemize}
	\begin{figure}[h!]
		\begin{center}		\includegraphics[width=6.0cm]{abat1.pdf} %
			\includegraphics[width=6.0cm]{abat2.pdf} %
			\includegraphics[width=6.0cm]{abat3.pdf}%
			\includegraphics[width=6.0cm]{abat4.pdf}
		\end{center}
		\caption{Example \protect \ref{ex2}. Top left: Case 1, Top right: Case 2, Bottom left: Case 3, Bottom right: Case 4. \label{fig2}}
	\end{figure}
	
\end{example} 
\noindent {\bf Conflict of Interest:} No conflict of interest.\\\\
\noindent {\bf {Acknowledgement:}} The first author acknowledge with thanks the bursary and financial support from Department of Science and Technology and National Research Foundation, Republic of South Africa Center of Excellence in Mathematical and Statistical Sciences (DSI-NRF COE-MaSS) Post-Doctoral Fellowship.
Opinions expressed and conclusions arrived are those of the authors and are not necessarily to be attributed to the CoE-MaSS. We would like to thank Professor Vasile Berinde for his suggestions to improve our article. We would also like to appreciate Dr. Oyewole K. O. for his contributions on our graphical representations.
\begin{thebibliography}{111}
	\bibitem{AB} {\rm H. A. Abass, C. Izuchukwu, O. T. Mewomo, Q. L. Dong}, Strong convergence of an inertial forward-backward splitting method for accretive operators in real Banach space, {\it Fixed Point Theory}, {\bf 20}, no. 2, (2020), 397-412.
	
	\bibitem{AC} {\rm H. A. Abass, K. O. Aremu, L. O. Jolaoso and O.T. Mewomo}, An inertial forward-backward splitting method for approximating solutions of certain optimization problem, {\it J. Nonlinear Funct. Anal.} 2020, (2020), Article ID 6.
	
	\bibitem{HAM12} {\rm H. A. Abass, G. C. Godwin, O. K. Narain and V. Darvish}, Inertial Extragradient Method for Solving Variational Inequality and Fixed Point Problems of a Bregman Demigeneralized Mapping in a Reflexive Banach Spaces. {\it Numerical Functional Analysis and Optimization}, (2022): 1-28.
	
	\bibitem{AA} {\rm H. A. Abass, A. A. Mebawondu, O. K. Narain and J. K. Kim}, Outer approximation method for zeros of sum of monotone operators and fixed point problems in Banach spaces, {\it Nonlinear Funct. Anal. and Appl.}, {\bf 26}, no. 3, (2021), 451-474.
	
	\bibitem{Af} {\rm K. Afassinou, O. K. Narain and O. E. Otunuga}, Iterative algorithm for approximating solutions of split monotone variational inclusion, variational inequality and fixed point problems in real Hilbert spaces, {\it Nonlinear Funct. Anal. and Appl.}, {\bf 25}, no. 3, (2020), 491-510.
	%\bibitem{Al} {\rm A. Akbar and E. Shahrosvand}, Split equality common null point problem for Bregman quasi-nonexpansive mappings, {\it Filomat}, {\bf 32}:11 (2018), 3917-3932.
	%\bibitem{Ao} {\rm K. Aoyama, Y. Kamimura, W. Takahashi and M. Toyoda} Approximation of common fixed point of a countable family of nonexpansive mappings in a Banach space, Nonlinear Anal. {\bf 67}, (2007), 2350-2360.
	%\bibitem{Bashir} {\rm B. Ali and M. H. Harbau}, Convergence theorems for Bregman K-mappings and mixed equilibrium problems in reflexive Banach spaces, {\it J. Funct. Spaces},Vol. 2016, Article ID 5161682, 18 pages.
	\bibitem{Ans} {\rm Q. H. Ansari and A. Rehan}, Iterative methods for generalized split feasibility problems in Banach spaces, {\it Carapathian J. Math.}, {\bf 33}, no. 1, (2017), 9-26.
	%\bibitem{Aoy} {\rm K. Aoyama, Y. Kimura, W. Takahashi and M. Toyoda}, Approximation of common fixed points of a countable family of nonexpansive mappings in a Banach space, {\it Nonlinear Anal.}, {\bf 67}, (2007), 2350-2360.
	\bibitem{Bar} {\rm V. Barbu}, Nonlinear differential equations of monotone types nonlinear differential in Banach spaces. {\it Springer}, New York, (2010).
	\bibitem{Bauc} {\rm H. H. Bauschke, J. M. Borwein}, Legendre functions and method of random Bregman functions, {\it J. Convex Anal.}, {\bf 4}, (1997), 27-67.
	\bibitem{Bau} {\rm H. H. Bauschke, J. M. Borwein and P. L. Combettes}, Essentially smoothness, essentially strict convexity and Legendre functions in Banach spaces, {\it Commun. Contemp. Math.}, {\bf 3}, (2001), 615-647.
	\bibitem{Bello} {\rm J. Y. Bello and Y. Shehu}, An iterative method for split inclusion problem without prior knowledge of operator norm, {\it J. Fixed Point Theory Appl.}, {\bf 19} (3), (2017).
	\bibitem{Ogb} {\rm F. U. Ogbuisi and O. T. Mewomo}, Iterative solution of split variational inclusion problem in a real Banach spaces, {\it Afr. Mat.}, {\bf 28}, (2017), 295-309.
	%\bibitem{Bl} {\rm E. Blum and W. Oettli}, From Optimization and variational inequalities to equilibrium problems, {\it Math. Stud.}, {\bf 63}, (1994), 123-145.
	\bibitem{Breg} {\rm L. M. Bregman}, The relaxation method for finding the common point of convex sets and its application to solution of problems in convex programming, {\it U.S.S.R Comput. Math. Phys.}, {\bf 7}, (1967), 200-217.
	\bibitem{Bry} {\rm C. Bryne}, Iterative oblique projection onto convex subsets and the split feasibility problems, {\it Inverse Probl.}, {\bf 18}, (2002), 441-453.
	\bibitem{Bryne} {\rm C. Bryne, Y. Censor and A. Gibali}, The split common null point problem,{\it J. Nonlinear Convex Anal.}, (2012), {\bf 13} : 759-775.
	\bibitem{Bur} {\rm M. Burwein, S. Reich and S. Sabach}, A characterization of Bregman firmly nonexpansive operators using a new monotonicity concept. {\it J. Nonlinear Convex Anal.}, {\bf 12}, (2011), 161-184.
	\bibitem{Bu} {\rm D. Butnariu and A. N. Iusem}, Totally convex functions for fixed points computation and infinite dimensional optimization, {\it Kluwer Academic Publishers}, Dordrecht (2000).
	\bibitem{But} {\rm D. Butnairu and E. Resmerita}, Bregman distances, totally convex functions and a method for solving operator equations in Banach spaces, {\it Abstract and Applied Analysis}, {\bf 2006}, (2006), Art. ID 84919, 1-39.
	\bibitem{Cens} {\rm Y. Censor and A. Segal}, The split common fixed point problem for directed operators, {\it J. Convex Anal.}, {\bf 16}, no. 2, (2009), 587-600.
	\bibitem{Cen} {\rm Y. Censor, T. Elfving}, A multiprojection algorithms using Bregman projections in a product space, {\it Numer. Algor.}, {\bf 8}, (1994), 221-239.
	\bibitem{Cho} {\rm P. Cholamjiak, P. Sunthrayuth}, A Halpern-type iteration for solving the split feasibility problem and fixed point problem of Bregman relatively nonexpansive semigroup in Banach spaces, {\it Filomat}, {\bf 32}, no. 9, (2018), 3211-3227.
	%\bibitem{Cio} {\rm I. Cioranescu}, Geometry of Banach spaces, Duality mappings and nonlinear problems. Kluwer Academic Publishers, Dordrecht, (1990).
	%\bibitem{23} {\rm X. P. Ding}, Perturbed proximal point algorithms for generalized quasi-variational
	inclusions. {\it J. Math. Anal. Appl.} 210(1), (1997) :88–101. DOI: 10.1006/jmaa.1997.5370
	\bibitem{Gaz} {\rm H. Gazmeh and E. Naraghirad}, The split common null point problem for Bregman generalized resolvents in two Banach spaces, {\it Optimization}, (2020), DOI:10.1080/02331934.2020.1751157.
	\bibitem{Ii} {\rm H. Iiduka}, Acceleration method for convex optimization over the fixed point set of a nonexpansive mappings, {\it Math. Prog. Series A}, {\bf 149}, (2015), 131-165.
	\bibitem{Izu} {\rm C. Izuchukwu, C. C. Okeke and F. O. Isiogugu}, A viscosity iterative technique for split variational inclusion and fixed point problems between a Hilbert and a Banach space, {\it J. Fixed Point Theory Appl.}, {\bf 20}, (157), (2018).
	\bibitem{Jai} {\rm P. Jailoka and S. Suantai}, Split null point problems for demicontractive multivalued mappings, {\it Mediterr. J. Math.}, (2018): {\bf 15}: 1-19.
	%\bibitem{Kass} {\rm G. Kassay, S. Reich and S. Sabach}, Iterative methods for solving systems of variational inequalities in Banach spaces, {\it SIAM Optim.}, {\bf 21}, (2011), 1319-1344.
	%\bibitem{Kim} {\rm Y. Kimura and S. Saejung}, Strong convergence for a common fixed points of two different generalizations of cutter operators, {\it Linear Nonlinear Anal.}, {\bf 1}, (2015), 53-65.
	\bibitem{Kazi} {\rm K. R. Kazmi, R. Ali and S. Yousuf}, Generalized equilibrium and fixed point problems for Bregman relatively nonexpansive mappings in Banach spaces, {\it J. Fixed Point Theory Appl.}, (2018), 20:151.
	\bibitem{var1}
	{\rm D. Kinderlehrer, G. Stampacchia,} {\it An Introduction to Variational Inequalities and Their Applications.} Society for
	Industrial and Applied Mathematics, Philadelphia (2000).
	\bibitem{Lion} {\rm P. L. Lions and B. Mercier}, Splitting algorithms for the sum of two nonlinear operators, {\it SIAM J. Numer. Anal.}, {\bf 16}, (1979), 964-979.
	\bibitem{Maing} {\rm P. E. Mainge} Viscosity approximation process for quasi nonexpansive mappings in Hilbert space.{\it Comput. Math. Appl.} {\bf 59}, (2010), 74–79.
	%\bibitem{Mou} {\rm A. Moudafi}, A second order differential proximal methods for equilibrium problems, {\it J. Inequal. Pure Appl. Math.}, (2013),4, 18.
	\bibitem{Marq} {\rm V. Martin Marquez, S. Reich and S. Sabach}, Bregman strongly nonexpansive operators in reflexive  Banach spaces, {\it J. Math. Anal. Appl.}, {\bf 400}, (2013), 597-614.
	\bibitem{Moudafi} {\rm A. Moudafi}, A note on the split common fixed point problem for quasi-nonexpansive operator, {\it Nonlinear Anal.}, {\bf 74}, (2011), 4083-4087.
	\bibitem{Chidu} {\rm F. U. Ogbuisi and C. Izuchukwu}, Approximating a zero of sum of two monotone operators which solves a fixed point problem in reflexive Banach spaces, {\it Numer. Funct. Anal.}, {\bf 40}, (13), (2019), DOI:10.1080/01630563.2019.1628050.
	\bibitem{Og} {\rm F. U. Ogbuisi and O. T. Mewomo}, Iterative solution of split variational inclusion problem in real Banach spaces, {\it Afr. Mat.}, (2017), {\bf 28}, 295-309.
	\bibitem{Ok} {\rm C. C. Okeke and C. Izuchukwu}, Strong convergence theorem for split feasibility problems and variational inclusion problems in real Banach spaces, {\it Rendiconti de Circolo Matematico di Palermo series 2}, doi.10.1007/s12215-020-00508-3.
	\bibitem{Oye} {\rm O. K. Oyewole, H. A. Abass and O. T. Mewomo}, A strong convergence algorithm for a fixed point constraint split null point problem,  {\it Rendiconti de Circolo Matematico di Palermo series 2}, (2020), 1-20.
	\bibitem{Rei} {\rm S. Reich and S. Sabach}, A strong convergence theorem for a proximal-type algorithm in reflexive Banach spaces, {\it J. Nonlinear Convex Anal.}, 10, (2009), 471-485.
	\bibitem{Rock} {\rm R.T. Rockafellar}, On the maximality of sums of nonlinear monotone operators, {\it Trans. Amer. Math. Soc.}, (1970), 149: 75-88.
	\bibitem{Rocky} {\rm R. T. Rockafellar}, Characterization of the subdifferentials of convex functions, {\it Pac. J. Math. }, (1966), 17: 497-510.
	\bibitem{Reich} {\rm S. Reich and S. Sabach}, Two strong convergence theorems for a proximal method in reflexive Banach spaces, {\it Numer. Funct. Anal. Optim.}, {\bf 31}, (2010), 24-44.
	\bibitem{3} {\rm S. Reich, G., Sabach},  Iterative methods for solving systems of variational
	inequalities in reflexive banach spaces. {\it J. Nonlinear Convex Anal}. 10: (2009),
	471–485.
	\bibitem{Sch} {\rm F. Schopfer, T. Schuster and A.K. Louis}, An iterative regularization method for the solution of the split feasibilty problem in Banach spaces, {\it Inverse Probl.}, {\bf 24}, (5), 055008, (2008).
	\bibitem{Shehu} {\rm Y. Shehu, F. U. Ogbuisi and O. S. Iyiola}, Convergence analysis of an iterative algorithm for fixed point problems and split feasibility problems in certain Banach spaces,{\it Optimization}, {\bf 65}, (2016), 299-323.
	\bibitem{Shes} {\rm Y. Shehu}, Convergence results of forward-backward algorithms for sum of monotone operators in Banach spaces, {\it Results Math.}, {\bf 74}, (2019), 138.
	\bibitem{15} {\rm Y. Shehu,  F. U. Ogbuisi}. Approximation of common fixed points of left
	Bregman strongly nonexpansive mappings and solutions of equilibrium problems. {\it J.
		Appl. Anal.} {\bf21}(2), (2015):63–77. DOI: 10.1515/jaa-2015-0007.
	\bibitem{She} {\rm Y. Shehu and F. U. Ogbuisi}, An iterative method for solving split monotone variational inclusion and fixed point problem {\it RACSAM}, 110, (2016), 503-518.
	\bibitem{Sunt} {\rm P. Sunthrayuth, N. Pholasa and P. Cholamjiak}, Mann-type algorithms for solving the monotone inclusion problem and the fixed point problem in reflexive Banach spaces. {\it Ricerche di Matematica}, (2021), 1-28.
	%\bibitem{Tai} {\rm A. Taiwo, T. O. Alakoya and O. T. Mewomo}, Halpern type iterative process for solving split common fixed point and monotone variational inclusion problem between Banach spaces, {\it Numer. Algor.}, (2019), DOI.org/10.1007/s11075-020-00937-2.
	\bibitem{Takah} {\rm S. Takahashi and W. Takahashi}, The split common null point problem and the shrinking projection method in Banach spaces, {\it Optimization}, {\bf 65}, 2, (2016), 281-287.
	\bibitem{Tie} {\rm J. V. Tie}, Convex Analysis: An introductory text. Wiley, New York (1984).
	\bibitem{Tim} {\rm S. Timnak, E. Naraghirad and N. Hussain}, Strong convergence of Halpern iteration for products of finitely many resolvents of maximal monotone operators in Banach spaces, {\bf Filomat}, {\bf 31}:15 (2017), 4673-4693.
	\bibitem{Tsn} {\rm P. Tseng}, A modified forward-backward splitting method for maximal monotone mappings, {\it SIAM J. Control Optim.}, {\bf 38}, (2000), 431-446.
	\bibitem{Xia} {\rm F. Q. Xia and N. J. Huang}, Variational inclusions with a general H-monotone operators in Banach spaces, {\it Comput. Math. Appl.}, {\bf 54}:1, (2010), 24-30.
	\bibitem{Xu} {\rm H. K. Xu}, Iterative algorithms for nonlinear operators, {\it J. London Math. Soc.}, (2), {\bf 66}, 1 (2002), 240-256.
	\bibitem{Za} {\rm C, Zalinescu}, Convex Analysis in General Vector spaces, World Scientific Publishing Co. Inc., River Edge NJ,2002.
\end{thebibliography} 
\end{document}
