\documentstyle[12pt]{article} \setlength\textwidth{6in} \setlength\oddsidemargin{0.25in} \topmargin -20mm \footskip 12mm %\textwidth 38pc \textheight 56pc \def\Bbb#1{{\mathchoice{\mbox{\bf #1}}{\mbox{\bf #1}}% {\mbox{$\bf #1$}}{\mbox{$\scriptscriptstyle \bf #1$}}}} \def\N{\Bbb N} \def\R{\Bbb R} \def\C{\Bbb C} \def\Z{\Bbb Z} \def\T{\Bbb T} \def\Q{\Bbb Q} \def\E{\Bbb E} \def\O{{\cal O}} \def\sgn{{\rm sgn}} \def\H{{\cal H}_P} \def\e{\epsilon} \def\A{{\cal A}} \def\cT{{\cal T}} \def\cF{{\cal F}} \begin{document} \title{On a weak type $(1,1)$ inequality for a maximal conjugate function} \author{Nakhl\'e H. \ Asmar and Stephen J.\ Montgomery-Smith } \date{} \maketitle % % % % % % % % \section{Introduction} \newtheorem{maintheorem}{Theorem}[section] \newtheorem{remarks}[maintheorem]{Remarks} Throughout this paper, $N$ denotes a fixed but arbitrary positive integer, $\T$ denotes the circle group, and $\T^N$ denotes the product of $N$ copies of $\T$. The normalized Lebesgue measure on $\T^N$ will be symbolized by $P$. For a measurable function $f$, we let $\|f\|^*_1=\sup_{y>0}y \lambda_f(y)$ where $\lambda_f (y)=P\left(\{x\in \T^N:\ |f(x)|>y\} \right)$. The integers will be denoted by $\Z$ and the complex numbers by $\C$. Let $\cF_n=\sigma(e^{i\theta_1},e^{i\theta_2},\ldots,e^{i\theta_n})$ denote the $\sigma$-algebra on $\T^N$ generated by the first $n$ coordinate functions. For $f\in L^1(\T^N)$, the conditional expectation of $f$ with respect to $\cF_n$ will be denoted $\E(f|\cF_n)$. Let %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% $$d_0(f)=\E(f|\cF_0)=\int_{\T^N} fdP,$$ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% and for $j=1,\ldots, N$, let $d_j(f)=\E(f|\cF_j)-\E(f|\cF_{j-1})$. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% We have the martingale difference decomposition %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{equation} f=\sum_{j=0}^N d_j(f). \label{martingale-difference} \end{equation} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Consider the maximal function corresponding to (\ref{martingale-difference}) %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{equation} D(f)= \sup_{1\leq n\leq N} \left| \sum_{j=0}^n d_j(f) \right| = \sup_{1\leq n\leq N} \left| \E(f|\cF_n) \right|. \label{max-martingale} \end{equation} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% A well-known weak type $(1,1)$ maximal inequality due to Doob states that there is a constant $a$ independent of $f$ and $N$ such that %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{equation} \|Df\|^*_1\leq a \|f\|_1. \label{doob's-inequality} \end{equation} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Now we recall the conjugate function operator $f\mapsto \widetilde{f}$, defined for all $f\in L^2(\T)$ by the multiplier relation $$\widehat{\widetilde{f}}(n)=-i\sgn (n)\widehat{f}(n), \ {\rm for\ all}\ n\in \Z.$$ By Kolmogorov's Theorem \cite[Chap.\ IV, Theorem (3.16)]{zyg}, the operator $f\mapsto \widetilde{f}$ is of weak type $(1,1)$. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Denote an element of $\T^N$ by $(\theta_1,\theta_2,\ldots,\theta_N)$. Let $H_j$ denote the one-dimensional conjugate function operator defined for functions on $\T^N$ with respect to the $\theta_j$ variable. As an operator on $L^2(\T^N)$, $H_j$ is given by the multiplier relation $\widehat{H_j(f)}(z_1,z_2,\ldots,z_N)= -i\sgn(z_j)\widehat{f}(z_1,z_2,\ldots,z_N)$, for all $(z_1,z_2,\ldots,z_N)\in \Z^N$. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Plainly, the operators $H_j$, $j=1,\ldots,N$, are of weak type $(1,1)$ on $L^1(\T^N)$ with the same constant as in Kolmogorov's theorem for $L^1(\T)$. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% The conjugate function that we consider is defined for all $f\in L^1(\T^N)$ by %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{equation} H(f)=\sum_{j=1}^N H_j(d_j(f)). \label{conj-function} \end{equation} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Since both $H_j$ and $d_j$ are multipliers, they commute. We have %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{equation} H(f)=\sum_{j=1}^N d_j(H(f)). \label{conj-mart-dif} \end{equation} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% The maximal function that we are interested in is defined by %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{equation} M(f)=\sup_{1\leq n\leq N} \left| \sum_{j=1}^n d_j(H_j(f)) \right| =D(H(f)), \label{max-conj-ft} \end{equation} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% where $D$ is as in (\ref{max-martingale}). Thus $M$ is the composition of two operators of weak type $(1,1)$. (The fact that $H$ is of weak type $(1,1)$ is known, and will not be needed in the proofs. See Remarks \ref{remarks} (a), below. This fact will also follow from our main theorem.) Our goal is to prove the following result. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{maintheorem} There is a constant $A$ independent of $N$ such that for all $f\in L^1(\T^N)$ we have %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{equation} \|Mf\|^*_1\leq A \|f\|_1, \label{desired-inequality} \end{equation} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% where $M$ is the maximal operator given by (\ref{max-conj-ft}). \label{maintheorem} \end{maintheorem} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% The proof of this theorem is presented in the following section, and is of independent interest. We will show that by changing the time in the Brownian motion that Burkholder, Gundy, and Silverstein used in \cite{bgs} from a continuous range $[0,\infty)$ to a semi-continuous range $\{1,2,\ldots\}\times [0,\infty)$, the proofs in \cite{bgs} can be carried out on $\T^N$, yielding inequalities which are independent of $N$ (e.g., the good $\lambda$'' inequality). We end this section with some remarks concerning the operator $H$ that will not be used in the sequel. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{remarks} {\rm (a) The operator $f\mapsto Hf$ that we defined in (\ref{conj-mart-dif}) is a conjugate function operator of the kind that was introduced and studied by Helson \cite{hel}. Helson's definition is in terms of orders on the dual group $\Z^N$. In our case, the operator $H$ can be recast in terms of a lexicographic order on $\Z^N$. As shown in \cite{hel}, the operator $H$ is bounded from $L^1(\T^N)$ into $L^p(\T^N)$, for any $0 n$, then $$\E(d_k(c_{1,\tau_1},\dots,c_{k,\tau_k}) | \A_{n,t}) = \E(\E(d_k(c_{1,\tau_1},\dots,c_{k,\tau_k}) | \A_{k,0}) | \A_{n,t}) = 0 ,$$ by Lemma (\ref{lemma5.1}) and (\ref{value-at-0}). Similarly, by the same lemma, it also follows that if $k = n$, then $$\E(d_k(c_{1,\tau_1},\dots,c_{k,\tau_k}) | \A_{n,t}) = d_k(c_{1,\tau_1},\dots,c_{k,t \wedge \tau_k})$$ and hence $\E(F_\infty | \A_{n,t}) = F_{n,t}$. This proves that $(F_{n,t})$\ is a martingale. The rest of the lemma is obvious.\\ % % % % % % % {\bf Proof of Steps 1, 2, 4} Because of Lemma (\ref{lemma5.2}), Step 2 follows from Doob's Maximal Inequality for continuous time martingales (see \cite[Chapter VII, Section 11]{doob}). Step 1 also follows from the uniform distribution of Brownian motion over $\T$ (see \cite[Corollary 3.6.2]{peter}). Step 4 is also a consequence of the same property of Brownian motion. We give details. We have % % % % % % \begin{eqnarray*} \tilde F^* & = & \sup_{(n,t)}| \tilde F_{n,t} | \geq \sup_n | \tilde F_{n,\tau_n} |\\ & = & \sup_n \left| \sum_{m=0}^n H_m (d_m (f)) (c_{1,\tau_1},\ldots,c_{m,\tau_m}) \right|. \end{eqnarray*} But since $(c_{1,\tau_1},\ldots,c_{m,\tau_m})$ is equidistributed with $(\theta_1,\ldots,\theta_m)$, the right side of the displayed inequalities is equidistributed with $\sup_n \left| \sum_{m=0}^n H_m (d_m (f)) (\theta_1,\ldots,\theta_m) \right|,$ and Step 4 follows.\\ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% {\bf Proof of Step 3.} The proof may be done as in \cite[Theorem~4]{bgs}. We provide the details to show the role of analyticity on $\T^N$. Here we call a function $\phi\in L^1(\T^N)$ analytic if its Fourier transform is supported in the half-space $$\O=\{0\} \bigcup_{j=1}^N\{(m_1,m_2,\ldots,m_N)\in \Z^N :\ m_j>0, m_{j+1}=\ldots, m_N=0\}.$$ The following basic properties of analytic functions on $\T^N$ are easy to prove. \begin{itemize} \item A function $\phi\in L^1(\T^N)$ is analytic if and only if each term in its martingale difference decomposition, $d_j(\phi)$ ($j=1,\ldots,N)$, is analytic in the $j$-th variable $\theta_j$ and has zero mean, i.e., $d_j(\phi)\in H^1_0(\T)$. \item If $\phi$ is analytic then $\phi^2$ is also analytic. (This follows from $\O +\O=\O$.) \item If $\phi$ is a trigonometric polynomial on $\T^N$, then $\phi+i H(\phi)$ is analytic. \end{itemize} % % % % % Getting back to the proof of Step~3, let %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% $$g(r_1\theta_1,\dots,r_N\theta_N) = f(r_1\theta_1,\dots,r_N\theta_N) + i H (f)(r_1\theta_1,\dots,r_N\theta_N),$$ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% and let %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% $$h = g^2.$$ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Both $g$\ and $h$\ are analytic on $\T^N$. Hence the functions $d_m (g)(\theta_1,\ldots , r_m\theta_m)$ and $d_m (h)(\theta_1,\ldots , r_m\theta_m)$ are analytic in the $m$-th variable. Form the functions $G_{n,t}$\ and $H_{n,t}$\ as in (\ref{brownian-martingale}). By Lemma (\ref{lemma5.2}), $G_{n,t}$\ and $H_{n,t}$ are martingales relative to $\A_{n,t}$. We claim that, because of analyticity, we have \begin{equation} H_{n,t} = G_{n,t}^2. \label{square-of-analytic} \end{equation} To see this, write $$g(\theta_1,\dots,\theta_N) = \sum_{k=1}^N d_k(g)(\theta_1,\dots,\theta_k)$$ and $$h(\theta_1,\dots,\theta_N) = \sum_{k=1}^N d_k(h)(\theta_1,\dots,\theta_k).$$ Then, since all the exponents of $\theta_n$ are positive, we get % % % % $$\left( \sum_{k=1}^{n-1} d_k(g)(\theta_1,\dots,\theta_k) + d_n(g)(\theta_1,\dots,r_n\theta_n) \right)^2 = \sum_{k=1}^{n-1} d_k(h)(\theta_1,\dots,\theta_k) + d_n(h)(\theta_1,\dots,r_n\theta_n)$$ % % % and (\ref{square-of-analytic}) easily follows. Consequently, since the functions $H_{n,t}$ form a martingale relative to the $\sigma$-algebra $\A_{n,t}$, we have that $G_{n,t}^2$ is a martingale relative to this $\sigma$-algebra. With this fact in hands, we can now proceed with the proof of Step 3 in exactly the same way as in \cite[pp. 148-149]{bgs}. We need a lemma. %%%%%%%%%% %%%%%%%%%% %%%%%%%%%%% %%%%%%%%%% %%%%%%%%%% %%%%%%%%%%% \begin{lemma5.3} Suppose that $\mu$ and $\nu$ are stopping times with $\mu\leq \nu$ a.\ e. Let $f$ be a real-valued trigonometric polynomial on $\T^N$ with $\int f dP = 0$. Then, $$\| \tilde{F}_\nu -\tilde{F}_\mu \|_2= \| F_\nu -F_\mu \|_2.$$ \label{lemma5.3} \end{lemma5.3} % % % {\bf Proof.} Using the fact that $G_{n,t}^2$ is a martingale, we get $$0=\E(G_0^2)=\E(G_\mu^2).$$ Similarly, $\E(G_{\nu}^2)=0$. Hence, $\E F_\mu^2 = \E \tilde{F}_\mu^2$ and $\E F_\nu^2 = \E \tilde{F}_\nu^2$. Next, we show that $\E(F_\mu F_\nu)= \E(F_\mu^2)$, and $\E(\tilde{F}_\mu \tilde{F}_\nu)= \E(\tilde{F}_\mu^2)$. We start with the first equality. Using Doob's Optional Sampling Theorem and basic properties of the conditional expectation, we see that % % % $$\E(F_\nu | F_\mu)=F_\mu ,$$ % % % % % % $$F_\mu \E(F_\nu | F_\mu)=F_\mu^2 ,$$ % % % and so % % % % % $$\E(F_\mu F_\nu | F_\mu)=F_\mu^2 .$$ % % % Integrating both sides of the last equality, we get $\E(F_\mu F_\nu)= \E(F_\mu^2)$. The second equality can be proved similarly. Thus \begin{eqnarray*} \E(F_\mu -F_\nu)^2 &=& \E F^2_\mu + \E F^2_\nu -2 \E (F_\mu F_\nu)\\ &=& \E F^2_\mu + \E F^2_\nu -2 \E (F_\mu^2) \\ &=& \E F^2_\nu - \E (F_\mu^2) \\ &=& \E(\tilde{F}_\mu - \tilde{F}_\nu)^2, \end{eqnarray*} which completes the proof.\\ %%%%%%%%%% %%%%%%%%%% %%%%%%%%%%% %%%%%%%%%% %%%%%%%%%% %%%%%%%%%%% The above lemma enables us to establish a fundamental inequality. This is our version of the good $\lambda$' inequality for conjugate functions on $\T^N$. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{lemma5.4} With the notation of the previous lemma, let $\alpha \ge 1$\ and $\beta > 1$. Then there is a constant $c$, depending only on $\alpha$\ and $\beta$, such that whenever $\lambda > 0$\ satisfies $$P(G^* > \lambda) \le \alpha P(G^* > \beta \lambda) ,$$ then $$P(G^* > \lambda) \le c\,P(c\,F^* > \lambda) .$$ \label{lemma5.4} \end{lemma5.4} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% {\bf Proof.} Define stopping times $$\mu = \inf\{\, (n,t)\in \cT : |G_{n,t}| > \lambda \} ,\\ \nu = \inf\{\, (n,t)\in \cT : |G_{n,t}| > \beta \lambda \} .$$ If the set $\{\, (n,t) : |G_{n,t}| > \lambda \}$\ is empty, then we set $\mu = \infty$. Otherwise $\mu$\ is such that $|G_{n,t}| \le \lambda$\ whenever $(n,t) < \mu$, and $|G_\mu| = \lambda$. We define $\nu$\ similarly. Also, we have that $\mu \le \nu$, that $|G_\mu| = \lambda$\ on the set $\{\mu\ne\infty\} = \{G_\infty^* > \lambda\}$, and that $|G_\nu| = \beta\lambda$\ on the set $\{\nu\ne\infty\} = \{G^* > \beta\lambda\}$. Thus if $\lambda$\ satisfies the hypothesis of the lemma, then \begin{eqnarray*} \E(\chi_{G^*>\lambda} (F_\nu-F_\mu)^2) & = & \left\| F_\nu - F_\mu\right\|_2^2 \\ & = & \frac{1}{2} \left\| G_\nu - G_\mu\right\|_2^2 \\ & \ge & \frac{1}{2} (\beta \lambda - \lambda)^2 P(G^*>\beta \lambda) \\ & \ge & c \lambda^2 P(G_\infty^* > \lambda ). \end{eqnarray*} Also $$\E(\chi_{G^*>\lambda} (F_\nu-F_\mu)^4) \le \left\| G_\nu - G_\mu\right\|_4^4 \le c \lambda^4 P(G_\infty^* > \lambda ).$$ Thus, by a lemma of Paley and Zygmund \cite[Chapter V, (8,26)]{zyg}, $$P(G^* > \lambda) \le c P(c|F_\nu - F_\mu| > \lambda) .$$ Since $|F_\nu - F_\mu| \le 2 F^*$, the lemma follows. \bigskip Now let us finish by proving Step~3. It is sufficient to show $\left\| G^* \right\|^*_{1,\infty} \le c \, \left\| F^* \right\|^*_{1,\infty}$. Suppose that $$\left\| G^* \right\|^*_{1,\infty} = \sup_{\lambda>0} \lambda P(G^*>\lambda) = A .$$ Pick $\lambda_0$\ such that $2\lambda_0 P(G^*>2\lambda_0) \ge A/2$. Then $\lambda_0 P(G^*>\lambda_0) \le A$, and thus $\lambda_0$\ satisfies the hypothesis of the lemma with $\alpha = 4$\ and $\beta = 2$. Then it follows that $$\| F^*\|^*_{1,\infty} \ge \lambda_0 P(c F^* > \lambda_0) \ge c A/4 ,$$ as desired. {\bf Acknowledgements} The research of the authors was supported by grants from the National Science Foundation (U.\ S.\ A.). \begin{thebibliography}{Dillo 83} \bibitem{ams} N.\ Asmar, and S. Montgomery-Smith, {\em Hahn's Embedding Theorem for orders and analysis on groups with ordered dual groups}, Colloq. Math., {\bf LXX} (1996), 235--252. \bibitem{bg} D.\ L.\ Burkholder, and R.\ F.\ Gundy, {\em Extrapolation and interpolation of quasi-linear operators on martingales}, Acta Math.\ {\bf 124}\ (1970), 249--304. \bibitem{bgs} D.\ L.\ Burkholder, R.\ F.\ Gundy, and M.\ L.\ Silverstein {\em A maximal characterization of the class $H^p$}, Trans.\ Amer.\ Math.\ Soc., {\bf 157}\ (1971), 137--153. \bibitem{doob} J.\ L.\ Doob, Stochastic Processes'', Wiley Publications in Statistics, New York 1953. \bibitem{doob2} J.\ L.\ Doob, {\em Semimartingales and subharmonic functions}, Trans.\ Amer.\ Math.\ Soc., {\bf 77}\ (1954), 86--121. \bibitem{hel} H.\ Helson, {\em Conjugate series in several variables}, Pac.\ J.\ Math., {\bf 9}\ (1959), 513--523. \bibitem{peter} K.\ E.\ Petersen, Brownian Motion, Hardy Spaces and Bounded Mean Oscillation,'' London Math. Soc. Lecture Notes Series, No. 28, Cambridge University Press, 1977. \bibitem{zyg} A.\ Zygmund, ` Trigonometric series'', 2nd Edition, 2 vols.\ , Cambridge University Press, 1959. \end{thebibliography} \end{document}