algorithmicx例子
相应代码: [plain] view plain copy
- \documentclass[11pt]{ctexart}
- \usepackage[top=2cm, bottom=2cm, left=2cm, right=2cm]{geometry}
- \usepackage{algorithm}
- \usepackage{algorithmicx}
- \usepackage{algpseudocode}
- \usepackage{amsmath}
- \floatname{algorithm}{算法}
- \renewcommand{\algorithmicrequire}{\textbf{输入:}}
- \renewcommand{\algorithmicensure}{\textbf{输出:}}
- \begin{document}
- \begin{algorithm}
- \caption{用归并排序求逆序数}
- \begin{algorithmic}[1] %每行显示行号
- \Require $Array$数组,$n$数组大小
- \Ensure 逆序数
- \Function {MergerSort}{$Array, left, right$}
- \State $result \gets 0$
- \If {$left < right$}
- \State $middle \gets (left + right) / 2$
- \State $result \gets result +$ \Call{MergerSort}{$Array, left, middle$}
- \State $result \gets result +$ \Call{MergerSort}{$Array, middle, right$}
- \State $result \gets result +$ \Call{Merger}{$Array,left,middle,right$}
- \EndIf
- \State \Return{$result$}
- \EndFunction
- \State
- \Function{Merger}{$Array, left, middle, right$}
- \State $i\gets left$
- \State $j\gets middle$
- \State $k\gets 0$
- \State $result \gets 0$
- \While{$i<middle$ \textbf{and} $j<right$}
- \If{$Array[i]<Array[j]$}
- \State $B[k++]\gets Array[i++]$
- \Else
- \State $B[k++] \gets Array[j++]$
- \State $result \gets result + (middle - i)$
- \EndIf
- \EndWhile
- \While{$i<middle$}
- \State $B[k++] \gets Array[i++]$
- \EndWhile
- \While{$j<right$}
- \State $B[k++] \gets Array[j++]$
- \EndWhile
- \For{$i = 0 \to k-1$}
- \State $Array[left + i] \gets B[i]$
- \EndFor
- \State \Return{$result$}
- \EndFunction
- \end{algorithmic}
- \end{algorithm}
- \end{document}
algorithm例子
前期准备[plain] view plain copy- \usepackage{algorithm}
- \usepackage{algpseudocode}
- \usepackage{amsmath}
- \renewcommand{\algorithmicrequire}{\textbf{Input:}} % Use Input in the format of Algorithm
- \renewcommand{\algorithmicensure}{\textbf{Output:}} % Use Output in the format of Algorithm
example 1
代码: [plain] view plain copy
- \begin{algorithm}[htb]
- \caption{ Framework of ensemble learning for our system.}
- \label{alg:Framwork}
- \begin{algorithmic}[1]
- \Require
- The set of positive samples for current batch, $P_n$;
- The set of unlabelled samples for current batch, $U_n$;
- Ensemble of classifiers on former batches, $E_{n-1}$;
- \Ensure
- Ensemble of classifiers on the current batch, $E_n$;
- \State Extracting the set of reliable negative and/or positive samples $T_n$ from $U_n$ with help of $P_n$;
- \label{code:fram:extract}
- \State Training ensemble of classifiers $E$ on $T_n \cup P_n$, with help of data in former batches;
- \label{code:fram:trainbase}
- \State $E_n=E_{n-1}cup E$;
- \label{code:fram:add}
- \State Classifying samples in $U_n-T_n$ by $E_n$;
- \label{code:fram:classify}
- \State Deleting some weak classifiers in $E_n$ so as to keep the capacity of $E_n$;
- \label{code:fram:select} \\
- \Return $E_n$;
- \end{algorithmic}
- \end{algorithm}
example 2
代码: [plain] view plain copy
- \begin{algorithm}[h]
- \caption{An example for format For \& While Loop in Algorithm}
- \begin{algorithmic}[1]
- \For{each $i\in [1,9]$}
- \State initialize a tree $T_{i}$ with only a leaf (the root);
- \State $T=T\cup T_{i};$
- \EndFor
- \ForAll {$c$ such that $c\in RecentMBatch(E_{n-1})$}
- \label{code:TrainBase:getc}
- \State $T=T\cup PosSample(c)$;
- \label{code:TrainBase:pos}
- \EndFor;
- \For{$i=1$; $i<n$; $i++$ }
- \State $//$ Your source here;
- \EndFor
- \For{$i=1$ to $n$}
- \State $//$ Your source here;
- \EndFor
- \State $//$ Reusing recent base classifiers.
- \label{code:recentStart}
- \While {$(|E_n| \leq L_1 )and( D \neq \phi)$}
- \State Selecting the most recent classifier $c_i$ from $D$;
- \State $D=D-c_i$;
- \State $E_n=E_n+c_i$;
- \EndWhile
- \label{code:recentEnd}
- \end{algorithmic}
- \end{algorithm}
example 3
代码: [plain] view plain copy
- \begin{algorithm}[h]
- \caption{Conjugate Gradient Algorithm with Dynamic Step-Size Control}
- \label{alg::conjugateGradient}
- \begin{algorithmic}[1]
- \Require
- $f(x)$: objective funtion;
- $x_0$: initial solution;
- $s$: step size;
- \Ensure
- optimal $x^{*}$
- \State initial $g_0=0$ and $d_0=0$;
- \Repeat
- \State compute gradient directions $g_k=\bigtriangledown f(x_k)$;
- \State compute Polak-Ribiere parameter $\beta_k=\frac{g_k^{T}(g_k-g_{k-1})}{\parallel g_{k-1} \parallel^{2}}$;
- \State compute the conjugate directions $d_k=-g_k+\beta_k d_{k-1}$;
- \State compute the step size $\alpha_k=s/\parallel d_k \parallel_{2}$;
- \Until{($f(x_k)>f(x_{k-1})$)}
- \end{algorithmic}
- \end{algorithm}
example 4
代码: [plain] view plain copy
- \makeatletter
- \def\BState{\State\hskip-\ALG@thistlm}
- \makeatother
- \begin{algorithm}
- \caption{My algorithm}\label{euclid}
- \begin{algorithmic}[1]
- \Procedure{MyProcedure}{}
- \State $\textit{stringlen} \gets \text{length of }\textit{string}$
- \State $i \gets \textit{patlen}$
- \BState \emph{top}:
- \If {$i > \textit{stringlen}$} \Return false
- \EndIf
- \State $j \gets \textit{patlen}$
- \BState \emph{loop}:
- \If {$\textit{string}(i) = \textit{path}(j)$}
- \State $j \gets j-1$.
- \State $i \gets i-1$.
- \State \textbf{goto} \emph{loop}.
- \State \textbf{close};
- \EndIf
- \State $i \gets i+\max(\textit{delta}_1(\textit{string}(i)),\textit{delta}_2(j))$.
- \State \textbf{goto} \emph{top}.
- \EndProcedure
- \end{algorithmic}
- \end{algorithm}
algorithm2e例子
algorithm2e包可能会与其它包产生冲突,一个常见的错误提示是“Too many }'...”。为了解决这个问题,要在引入algorithm2e包之前加入下面的命令:[plain] view plain copy
- \makeatletter
- \newif\if@restonecol
- \makeatother
- \let\algorithm\relax
- \let\endalgorithm\relax
所以前期准备:[plain] view plain copy
- \makeatletter
- \newif\if@restonecol
- \makeatother
- \let\algorithm\relax
- \let\endalgorithm\relax
- \usepackage[linesnumbered,ruled,vlined]{algorithm2e}%[ruled,vlined]{
- \usepackage{algpseudocode}
- \usepackage{amsmath}
- \renewcommand{\algorithmicrequire}{\textbf{Input:}} % Use Input in the format of Algorithm
- \renewcommand{\algorithmicensure}{\textbf{Output:}} % Use Output in the format of Algorithm
example 1
代码:[plain] view plain copy
- \begin{algorithm}
- \caption{identify Row Context}
- \KwIn{$r_i$, $Backgrd(T_i)$=${T_1,T_2,\ldots ,T_n}$ and similarity threshold $\theta_r$}
- \KwOut{$con(r_i)$}
- $con(r_i)= \Phi$\;
- \For{$j=1;j \le n;j \ne i$}
- {
- float $maxSim=0$\;
- $r^{maxSim}=null$\;
- \While{not end of $T_j$}
- {
- compute Jaro($r_i,r_m$)($r_m\in T_j$)\;
- \If{$(Jaro(r_i,r_m) \ge \theta_r)\wedge (Jaro(r_i,r_m)\ge r^{maxSim})$}
- {
- replace $r^{maxSim}$ with $r_m$\;
- }
- }
- $con(r_i)=con(r_i)\cup {r^{maxSim}}$\;
- }
- return $con(r_i)$\;
- \end{algorithm}
example 2
代码:[plain] view plain copy
- \begin{algorithm}
- \caption{Service checkpoint image storage node and routing path selection}
- \LinesNumbered
- \KwIn{host server $PM_s$ that $SerImg_k$ is fetched from, $subnet_s$ that $PM_s$ belongs to, $pod_s$ that $PM_s$ belongs to}
- \KwOut{Service image storage server $storageserver$,and the image transfer path $path$}
- $storageserver$ = Storage node selection($PM_s$, $SerImg_k$,$subnet_s$,$pod_s$)\;
- \If{ $storageserver$ $\neq$ null}
- {
- select a path from $storageserver$ to $PM_s$ and assign the path to $path$\;
- }
- \textbf{final} \;
- \textbf{return} $storageserver$ and $path$;
- \end{algorithm}
example 3
代码:[plain] view plain copy
- \begin{algorithm}
- \caption{Storage node selection}
- \LinesNumbered
- \KwIn{host server $PM_s$ that the checkpoint image $Img$ is fetched from, $subnet_s$ that $PM_s$ belongs to, $pod_s$ that $PM_s$ belongs to}
- \KwOut{Image storage server $storageserver$}
- \For{ each host server $PM_i$ in the same subnet with $PM_s$ }
- {
- \If{ $PM_i$ is not a service providing node or checkpoint image storage node of $S_k$ }
- {
- add $PM_i$ to $candidateList$ \;
- }
- }
- sort $candidateList$ by reliability desc\;
- init $storageserver$ ;
- \For{ each $PM_k$ in $candidateList$}
- {
- \If{ $SP(PM_k)$ $\geq$ $E(SP)$ of $pod_i$ and $BM_k$ $\le$ size of $Img$ }
- {
- assign $PM_k$ to $storageserver$\;
- goto final\;
- }
- }
- clear $candidateList$\;
- add all other subnets in $pod_s$ to $netList$\;
- \For{ each subnet $subnet_j$ in $netList$}
- {
- clear $candidateList$\;
- \For {each $PM_i$ in $subnet_j$ }
- {
- \If{ $PM_i$ is not a service providing node or checkpoint image storage node of $S_k$ }
- {
- add $PM_i$ to $candidateList$\;
- }
- }
- sort all host in $candidateList$ by reliability desc\;
- \For{ each $PM_k$ in $candidateList$}
- {
- \If{$SP(PM_k)$ $\geq$ $E(SP)$ of $pod_i$ and $BM_k$ $\le$ size of $Img$}
- {
- assign $PM_k$ to $storageserver$ \;
- goto final\;
- }
- }
- }
- \textbf{final} \;
- \textbf{return} $storageserver$;
- \end{algorithm}
example 4
代码:[plain] view plain copy
- \begin{algorithm}
- \caption{Delta checkpoint image storage node and routing path selection}
- \LinesNumbered
- \KwIn{host server $PM_s$ that generates the delta checkpoint image $DImg_{kt}$, $subnet_s$ that $PM_s$ belongs to, $pod_s$ that $PM_s$ belongs to}
- \KwOut{Delta image storage server $storageserver$,and the image transfer path $Path$}
- $storageserver$ = Storage node selection($PM_s$, $DImg_{kt}$,$subnet_s$,$pod_s$)\;
- \If{ $storageserver$ $\equiv$ null}
- {
- the delta checkpoint image is stored in the central storage server\;
- goto final\;
- }
- construct weighted topological graph $graph_s$ of $pod_s$\;
- calculate the shortest path from $storageserver$ to $PM_s$ in $graph_s$ by using the Dijkstra algorithm\;
- \textbf{final} \;
- \textbf{return} $storageserver$ and $path$;
- \end{algorithm}
example 5
[plain] view plain copy
- \documentclass[8pt,twocolumn]{ctexart}
- \usepackage{amssymb}
- \usepackage{bm}
- \usepackage{textcomp} %命令\textacutedbl的包,二阶导符号
- % Page length commands go here in the preamble
- \setlength{\oddsidemargin}{-0.25in} % Left margin of 1 in + 0 in = 1 in
- \setlength{\textwidth}{9in} % 纸张宽度Right margin of 8.5 in - 1 in - 6.5 in = 1 in
- \setlength{\topmargin}{-.75in} % Top margin of 2 in -0.75 in = 1 in
- \setlength{\textheight}{9.2in} % Lower margin of 11 in - 9 in - 1 in = 1 in
- \setlength{\parindent}{0in}
- \makeatletter
- \newif\if@restonecol
- \makeatother
- \let\algorithm\relax
- \let\endalgorithm\relax
- \usepackage[linesnumbered,ruled,vlined]{algorithm2e}%[ruled,vlined]{
- \usepackage{algpseudocode}
- \renewcommand{\algorithmicrequire}{\textbf{Input:}}
- \renewcommand{\algorithmicensure}{\textbf{Output:}}
- \begin{document}
- \begin{algorithm}
- \caption{component matrices computing}
- \LinesNumbered
- \KwIn{$\mathcal{X}\in\mathbb{R}^{l_1\times l_2\times\cdots\times l_N},\varepsilon,\lambda,\delta,R$}
- \KwOut{$A^{(j)}s$ for $j=1$ to $N$}
- \textbf{Initialize} all $A^{(j)}s$ //which can be seen as the $0^{th}$ round iterations\;
- {$l$\hspace*{-1pt}\textacutedbl}$=L$ //if we need to judge whether $(11)$ is true then {$l$\hspace*{-1pt}\textacutedbl} denotes $L|_{t-1}$\;
- \For{ each $A_{i_jr}^{{j}}(1\le j\le N,1\le i_j\le I_j,1\le r\le R)$ }
- {//$1^{st}$ round iterations\;
- $g_{i_jr}^{(j)'}=g_{i_jr}^{(j)}$\;
- $A_{i_jr}^{(j)'}=A_{i_jr}^{(j)}$//if the rollback shown as $(12)$ is needed,$A_{i_jr}^{(j)'}$ denotes $A_{i_jr}^{(j)}|_{t-1}$\;
- $A_{i_jr}^{(j)}=A_{i_jr}^{(j)}-\mathrm{{\bf sign}}\left(g_{i_jr}^{(j)}\right)\cdot\delta_{i_jr}^{(j)}$\;
- }
- \Repeat(//other rounds of iterations for computing component matrices){$\bm{L\le \varepsilon}$ or maximum iterations exhausted}
- {
- $l'=L$ //if we need to judge whether $(11)$ is true then $l'$ denotes $L|_t$\;
- \For{ each $A_{i_jr}^{{j}}(1\le j\le N,1\le i_j\le I_j,1\le r\le R)$}
- {
- \If{$g_{i_jr}^{(j)}\cdot g_{i_jr}^{(j)'}>0$}
- {
- $A_{i_jr}^{(j)'}=A_{i_jr}^{(j)} $\;
- $g_{i_jr}^{(j)'}=g_{i_jr}^{(j)} $\;
- $\delta_{i_jr}^{(j)}=\bm{\min}\left(\delta_{i_jr}^{(j)}\cdot\eta^{+},Max\_Step\_Size\right)$\;
- $A_{i_jr}^{(j)}=A_{i_jr}^{(j)}-\mathrm{{\bf sign}}\left(g_{i_jr}^{(j)}\right)\cdot\delta_{i_jr}^{(j)}$\;
- }
- \ElseIf{$g_{i_jr}^{(j)}\cdot g_{i_jr}^{(j)'}<0$}
- {
- \If{$l'>l$\hspace*{-1pt}\textacutedbl}
- {
- $g_{i_jr}^{(j)'}=g_{i_jr}^{(j)}$\;
- $A_{i_jr}^{(j)}=A_{i_jr}^{(j)'}$// if $(11)$ is true then rollback as $(12)$\;
- $\delta_{i_jr}^{(j)}=\bm{\max}\left(\delta_{i_jr}^{(j)}\times\eta^{-},Min\_Step\_Size\right)$\;
- }
- \Else
- {
- $A_{i_jr}^{(j)'}=A_{i_jr}^{(j)} $\;
- $g_{i_jr}^{(j)'}=g_{i_jr}^{(j)} $\;
- $\delta_{i_jr}^{(j)}=\bm{\max}\left(\delta_{i_jr}^{(j)}\cdot\eta^{-},Min\_Step\_Size\right)$\;
- $A_{i_jr}^{(j)}=A_{i_jr}^{(j)}-\mathrm{{\bf sign}}\left(g_{i_jr}^{(j)}\right)\cdot\delta_{i_jr}^{(j)}$\;
- }
- }
- \Else
- {
- $A_{i_jr}^{(j)'}=A_{i_jr}^{(j)} $\;
- $g_{i_jr}^{(j)'}=g_{i_jr}^{(j)} $\;
- $A_{i_jr}^{(j)}=A_{i_jr}^{(j)}-\mathrm{{\bf sign}}\left(g_{i_jr}^{(j)}\right)\cdot\delta_{i_jr}^{(j)}$\;
- }
- }
- $l$\hspace*{-1pt}\textacutedbl$=l'$\;
- }
- \end{algorithm}
- \end{document}
example 6
[plain] view plain copy
- \usepackage[ruled,linesnumbered]{algorithm2e}
- \usepackage{amsmath}
- \begin{algorithm}
- \caption{Learning algorithm of R2P}
- \label{alg:r2p}
- \KwIn{ratings $R$, joint demographic representations $Y$,learning rate $\eta$,maximum iterative number $maxIter$, negative sampling number $k$\;}
- \KwOut{interaction matrix $\bm{W}$, movie vectors $V$\;}
- Initialize $\bm{W},V$ randomly\;
- $t = 0$\;
- For convenience, define $\vec{\varphi}_n = \sum_{m\in S_n}r_{m,n}\vec{v}_m$\; %\varphi_n\bm{W}\vec{y}_n
- \While{not converged \rm{or} $t>maxIter$}
- {
- t = t+1\;
- \For{$n=1;n \le N;n++$}
- {
- $\bm{W} = \bm{W}+\eta\big(1-\sigma\left(\vec{\varphi}_n^T\bm{W}\vec{y}_n\right)\big)\vec{\varphi}_n\vec{y}_n^T$\;\label{algline:W}
- \For{$m\in S_n$}
- {
- $\vec{v}_m=\vec{v}_m+ \eta\left(1-\sigma\left(\vec{\varphi}_n^T\bm{W}\vec{y}_n\right)\right)r_{m,n}\bm{W}\vec{y}_n$\;\label{algline:V}
- }
- \For{$i=1;i\le k;i++$}
- {
- sample negative sample $\vec{y}_i$ from $P_n$\;
- $\bm{W} = \bm{W}-\eta\big(1-\sigma\left(-\vec{\varphi}_n^T\bm{W}\vec{y}_n\right)\big)\vec{\varphi}_n\vec{y}_i^T$\;
- \For{$m\in S_n$}
- {
- $\vec{v}_m=\vec{v}_m- \eta\left(1-\sigma\left(-\vec{\varphi}_n^T\bm{W}\vec{y}_n\right)\right)r_{m,n}\bm{W}\vec{y}_i$\;
- }
- }
- }
- $\bm{W} = \bm{W}-2\lambda\eta\bm{W}$\;
- $V=V-2\lambda\eta V$
- }
- return $\bm{W},V$\;
- %\end{algorithmic}
- \end{algorithm}