book.tex 53 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446
  1. \documentclass[12pt]{book}
  2. \usepackage[T1]{fontenc}
  3. \usepackage[utf8]{inputenc}
  4. \usepackage{lmodern}
  5. \usepackage{hyperref}
  6. \usepackage{graphicx}
  7. \usepackage[english]{babel}
  8. \usepackage{listings}
  9. \usepackage{amsmath}
  10. \usepackage{amsthm}
  11. \usepackage{amssymb}
  12. \usepackage{natbib}
  13. \usepackage{stmaryrd}
  14. \usepackage{xypic}
  15. \usepackage{semantic}
  16. \lstset{%
  17. language=Lisp,
  18. basicstyle=\ttfamily\small,
  19. escapechar=@
  20. }
  21. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  22. % 'dedication' environment: To add a dedication paragraph at the start of book %
  23. % Source: http://www.tug.org/pipermail/texhax/2010-June/015184.html %
  24. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  25. \newenvironment{dedication}
  26. {
  27. \cleardoublepage
  28. \thispagestyle{empty}
  29. \vspace*{\stretch{1}}
  30. \hfill\begin{minipage}[t]{0.66\textwidth}
  31. \raggedright
  32. }
  33. {
  34. \end{minipage}
  35. \vspace*{\stretch{3}}
  36. \clearpage
  37. }
  38. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  39. % Chapter quote at the start of chapter %
  40. % Source: http://tex.stackexchange.com/a/53380 %
  41. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  42. \makeatletter
  43. \renewcommand{\@chapapp}{}% Not necessary...
  44. \newenvironment{chapquote}[2][2em]
  45. {\setlength{\@tempdima}{#1}%
  46. \def\chapquote@author{#2}%
  47. \parshape 1 \@tempdima \dimexpr\textwidth-2\@tempdima\relax%
  48. \itshape}
  49. {\par\normalfont\hfill--\ \chapquote@author\hspace*{\@tempdima}\par\bigskip}
  50. \makeatother
  51. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  52. \newcommand{\itm}[1]{\ensuremath{\mathit{#1}}}
  53. \newcommand{\Stmt}{\itm{stmt}}
  54. \newcommand{\Exp}{\itm{exp}}
  55. \newcommand{\Instr}{\itm{instr}}
  56. \newcommand{\Prog}{\itm{prog}}
  57. \newcommand{\Arg}{\itm{arg}}
  58. \newcommand{\Int}{\itm{int}}
  59. \newcommand{\Var}{\itm{var}}
  60. \newcommand{\Op}{\itm{op}}
  61. \newcommand{\key}[1]{\texttt{#1}}
  62. \newcommand{\READ}{(\key{read})}
  63. \newcommand{\UNIOP}[2]{(\key{#1}\,#2)}
  64. \newcommand{\BINOP}[3]{(\key{#1}\,#2\,#3)}
  65. \newcommand{\LET}[3]{(\key{let}\,([#1\;#2])\,#3)}
  66. \newcommand{\ASSIGN}[2]{(\key{assign}\,#1\;#2)}
  67. \newcommand{\RETURN}[1]{(\key{return}\,#1)}
  68. \newcommand{\INT}[1]{(\key{int}\;#1)}
  69. \newcommand{\REG}[1]{(\key{reg}\;#1)}
  70. \newcommand{\VAR}[1]{(\key{var}\;#1)}
  71. \newcommand{\STACKLOC}[1]{(\key{stack}\;#1)}
  72. \newcommand{\IF}[3]{(\key{if}\,#1\;#2\;#3)}
  73. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  74. \title{\Huge \textbf{Essentials of Compilation} \\
  75. \huge An Incremental Approach}
  76. \author{\textsc{Jeremy G. Siek} \\
  77. %\thanks{\url{http://homes.soic.indiana.edu/jsiek/}} \\
  78. Indiana University \\
  79. \\
  80. with contributions from: \\
  81. Carl Factora
  82. }
  83. \begin{document}
  84. \frontmatter
  85. \maketitle
  86. \begin{dedication}
  87. This book is dedicated to the programming language wonks at Indiana
  88. University.
  89. \end{dedication}
  90. \tableofcontents
  91. %\listoffigures
  92. %\listoftables
  93. \mainmatter
  94. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  95. \chapter*{Preface}
  96. Talk about nano-pass \citep{Sarkar:2004fk,Keep:2012aa} and incremental
  97. compilers \citep{Ghuloum:2006bh}.
  98. %\section*{Structure of book}
  99. % You might want to add short description about each chapter in this book.
  100. %\section*{About the companion website}
  101. %The website\footnote{\url{https://github.com/amberj/latex-book-template}} for %this file contains:
  102. %\begin{itemize}
  103. % \item A link to (freely downlodable) latest version of this document.
  104. % \item Link to download LaTeX source for this document.
  105. % \item Miscellaneous material (e.g. suggested readings etc).
  106. %\end{itemize}
  107. \section*{Acknowledgments}
  108. Need to give thanks to
  109. \begin{itemize}
  110. \item Kent Dybvig
  111. \item Daniel P. Friedman
  112. \item Abdulaziz Ghuloum
  113. \item Oscar Waddell
  114. \item Dipanwita Sarkar
  115. \item Ronald Garcia
  116. \item Bor-Yuh Evan Chang
  117. \end{itemize}
  118. %\mbox{}\\
  119. %\noindent Amber Jain \\
  120. %\noindent \url{http://amberj.devio.us/}
  121. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  122. \chapter{Abstract Syntax Trees, Matching, and Recursion}
  123. \label{ch:trees-recur}
  124. \section{Abstract Syntax Trees}
  125. % \begin{enumerate}
  126. % \item language representation
  127. % \item reading grammars
  128. % \end{enumerate}
  129. Abstract syntax trees (AST) are used to represent and model the syntax of a
  130. language. In compiler implementation, we use them to represent intermediary
  131. languages (IL). Representing ILs with ASTs allow us to categorize expressions
  132. our language along with the restricting the context in which they can
  133. appear. A simple example is the representation of the untyped
  134. \mbox{\(\lambda\)-calculus} with simple arithmetic operators. For our
  135. purposes, we use Racket syntax.
  136. \begin{verbatim}
  137. op ::= + | - | *
  138. exp ::= n | (op exp*) | x | (lambda (x) exp) | (exp exp)
  139. \end{verbatim}
  140. With this specification, we can more easily perform \textit{syntax
  141. transformations} on any expression within the given language (i.e.,
  142. \(\lambda\)-calculus). In the above AST, the syntax {\tt exp*} signifies
  143. \textit{zero or more} {\tt exp}. Later on in this chapter, we show how
  144. to transform an arbitrary \(\lambda\)-term into the equivalent
  145. \textit{de-Bruijinized} \(\lambda\)-term.
  146. \section{Using Match}
  147. % \begin{enumerate}
  148. % \item Syntax transformation
  149. % \item Some Racket examples (factorial?)
  150. % \end{enumerate}
  151. Racket provides a built-in pattern-matcher, {\tt match}, that we can use to
  152. perform syntax transformations. As a preliminary example, we include a
  153. familiar definition of factorial, without using match.
  154. \begin{verbatim}
  155. (define (! n)
  156. (if (zero? n) 1
  157. (* n (! (sub1 n)))))
  158. \end{verbatim}
  159. In this form of factorial, we are simply conditioning on the inputted
  160. natural number, {\tt n}. If we rewrite factorial to use {\tt match}, we can
  161. match on the actual value of {\tt n}.
  162. \begin{verbatim}
  163. (define (! n)
  164. (match n
  165. (0 1)
  166. (n (* n (! (sub1 n))))))
  167. \end{verbatim}
  168. Of course, we can also use {\tt match} to pattern match on more complex
  169. expressions.
  170. If we were told to write a function that takes a \(\lambda\)-term as input,
  171. we can match on the values of \textit{syntax-expressions} ({\tt sexp}). We
  172. can then represent the language of Figure ?? with the following function
  173. that uses {\tt match}.
  174. \begin{verbatim}
  175. (lambda (exp)
  176. (match exp
  177. ((? number?) ...)
  178. ((? symbol?) ...)
  179. (`(,op exp* ...)
  180. #:when (memv op '(+ - *))
  181. ...)
  182. (`(lambda (,x) ,b) ...)
  183. (`(,e1 ,e2) ...)))
  184. \end{verbatim}
  185. It's easy to get lost in Racket's {\tt match} syntax. To understand this,
  186. we can represent the possible ways of writing \textit{left-hand side} (LHS)
  187. match expressions.
  188. \begin{verbatim}
  189. exp ::= val | (unquote val) | (exp exp*)
  190. lhs ::= val | (quote val*) | (quasi-quote exp) | (? Racket-pred)
  191. \end{verbatim}
  192. \section{Recursion}
  193. % \begin{enumerate}
  194. % \item \textit{What is a base case?}
  195. % \item Using on a language (lambda calculus ->
  196. % \end{enumerate}
  197. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  198. \chapter{Integers and Variables}
  199. \label{ch:int-exp}
  200. %\begin{chapquote}{Author's name, \textit{Source of this quote}}
  201. %``This is a quote and I don't know who said this.''
  202. %\end{chapquote}
  203. \section{The $S_0$ Language}
  204. The $S_0$ language includes integers, operations on integers,
  205. (arithmetic and input), and variable definitions. The syntax of the
  206. $S_0$ language is defined by the grammar in
  207. Figure~\ref{fig:s0-syntax}. This language is rich enough to exhibit
  208. several compilation techniques but simple enough so that we can
  209. implement a compiler for it in two weeks of hard work. To give the
  210. reader a feeling for the scale of this first compiler, the instructor
  211. solution for the $S_0$ compiler consists of 6 recursive functions and
  212. a few small helper functions that together span 256 lines of code.
  213. \begin{figure}[htbp]
  214. \centering
  215. \fbox{
  216. \begin{minipage}{0.85\textwidth}
  217. \[
  218. \begin{array}{lcl}
  219. \Op &::=& \key{+} \mid \key{-} \mid \key{*} \mid \key{read} \\
  220. \Exp &::=& \Int \mid (\Op \; \Exp^{*}) \mid \Var \mid \LET{\Var}{\Exp}{\Exp}
  221. \end{array}
  222. \]
  223. \end{minipage}
  224. }
  225. \caption{The syntax of the $S_0$ language. The abbreviation \Op{} is
  226. short for operator, \Exp{} is short for expression, \Int{} for integer,
  227. and \Var{} for variable.}
  228. \label{fig:s0-syntax}
  229. \end{figure}
  230. The result of evaluating an expression is a value. For $S_0$, values
  231. are integers. To make it straightforward to map these integers onto
  232. x86-64 assembly~\citep{Matz:2013aa}, we restrict the integers to just
  233. those representable with 64-bits, the range $-2^{63}$ to $2^{63}$.
  234. We will walk through some examples of $S_0$ programs, commenting on
  235. aspects of the language that will be relevant to compiling it. We
  236. start with one of the simplest $S_0$ programs; it adds two integers.
  237. \[
  238. \BINOP{+}{10}{32}
  239. \]
  240. The result is $42$, as you might expected.
  241. %
  242. The next example demonstrates that expressions may be nested within
  243. each other, in this case nesting several additions and negations.
  244. \[
  245. \BINOP{+}{10}{ \UNIOP{-}{ \BINOP{+}{12}{20} } }
  246. \]
  247. What is the result of the above program?
  248. The \key{let} construct stores a value in a variable which can then be
  249. used within the body of the \key{let}. So the following program stores
  250. $32$ in $x$ and then computes $\BINOP{+}{10}{x}$, producing $42$.
  251. \[
  252. \LET{x}{ \BINOP{+}{12}{20} }{ \BINOP{+}{10}{x} }
  253. \]
  254. When there are multiple \key{let}'s for the same variable, the closest
  255. enclosing \key{let} is used. Consider the following program with two
  256. \key{let}'s that define variables named $x$.
  257. \[
  258. \LET{x}{32}{ \BINOP{+}{ \LET{x}{10}{x} }{ x } }
  259. \]
  260. For the purposes of showing which variable uses correspond to which
  261. definitions, the following shows the $x$'s annotated with subscripts
  262. to distinguish them.
  263. \[
  264. \LET{x_1}{32}{ \BINOP{+}{ \LET{x_2}{10}{x_2} }{ x_1 } }
  265. \]
  266. The \key{read} operation prompts the user of the program for an
  267. integer. Given an input of $10$, the following program produces $42$.
  268. \[
  269. \BINOP{+}{(\key{read})}{32}
  270. \]
  271. We include the \key{read} operation in $S_0$ to demonstrate that order
  272. of evaluation can make a different. Given the input $52$ then $10$,
  273. the following produces $42$ (and not $-42$).
  274. \[
  275. \LET{x}{\READ}{ \LET{y}{\READ}{ \BINOP{-}{x}{y} } }
  276. \]
  277. The initializing expression is always evaluated before the body of the
  278. \key{let}, so in the above, the \key{read} for $x$ is performed before
  279. the \key{read} for $y$.
  280. %
  281. The behavior of the following program is somewhat subtle because
  282. Scheme does not specify an evaluation order for arguments of an
  283. operator such as $-$.
  284. \[
  285. \BINOP{-}{\READ}{\READ}
  286. \]
  287. Given the input $42$ then $10$, the above program can result in either
  288. $42$ or $-42$, depending on the whims of the Scheme implementation.
  289. The goal for this chapter is to implement a compiler that translates
  290. any program $p \in S_0$ into a x86-64 assembly program $p'$ such that
  291. the assembly program exhibits the same behavior on an x86 computer as
  292. the $S_0$ program running in a Scheme implementation.
  293. \[
  294. \xymatrix{
  295. p \in S_0 \ar[rr]^{\text{compile}} \ar[drr]_{\text{run in Scheme}\quad} && p' \in \text{x86-64} \ar[d]^{\quad\text{run on an x86 machine}}\\
  296. & & n \in \mathbb{Z}
  297. }
  298. \]
  299. In the next section we introduce enough of the x86-64 assembly
  300. language to compile $S_0$.
  301. \section{The x86-64 Assembly Language}
  302. An x86-64 program is a sequence of instructions. The instructions
  303. manipulate 16 variables called \emph{registers} and can also load and
  304. store values into \emph{memory}. Memory is a mapping of 64-bit
  305. addresses to 64-bit values. The syntax $n(r)$ is used to read the
  306. address $a$ stored in register $r$ and then offset it by $n$ bytes (8
  307. bits), producing the address $a + n$. The arithmetic instructions,
  308. such as $\key{addq}\,s\,d$, read from the source $s$ and destination
  309. argument $d$, apply the arithmetic operation, then stores the result
  310. in the destination $d$. In this case, computing $d \gets d + s$. The
  311. move instruction, $\key{movq}\,s\,d$ reads from $s$ and stores the
  312. result in $d$. The $\key{callq}\,\mathit{label}$ instruction executes
  313. the procedure specified by the label, which we shall use to implement
  314. \key{read}. Figure~\ref{fig:x86-a} defines the syntax for this subset
  315. of the x86-64 assembly language.
  316. \begin{figure}[tbp]
  317. \fbox{
  318. \begin{minipage}{0.96\textwidth}
  319. \[
  320. \begin{array}{lcl}
  321. \itm{register} &::=& \key{rsp} \mid \key{rbp} \mid \key{rax} \mid \key{rbx} \mid \key{rcx}
  322. \mid \key{rdx} \mid \key{rsi} \mid \key{rdi} \mid \\
  323. && \key{r8} \mid \key{r9} \mid \key{r10}
  324. \mid \key{r11} \mid \key{r12} \mid \key{r13}
  325. \mid \key{r14} \mid \key{r15} \\
  326. \Arg &::=& \key{\$}\Int \mid \key{\%}\itm{register} \mid \Int(\key{\%}\itm{register}) \\
  327. \Instr &::=& \key{addq} \; \Arg, \Arg \mid
  328. \key{subq} \; \Arg, \Arg \mid
  329. \key{imulq} \; \Arg,\Arg \mid
  330. \key{negq} \; \Arg \mid \\
  331. && \key{movq} \; \Arg, \Arg \mid
  332. \key{callq} \; \mathit{label} \mid
  333. \key{pushq}\;\Arg \mid \key{popq}\;\Arg \mid \key{retq} \\
  334. \Prog &::= & \key{.globl \_main}\\
  335. & & \key{\_main:} \; \Instr^{+}
  336. \end{array}
  337. \]
  338. \end{minipage}
  339. }
  340. \caption{A subset of the x86-64 assembly language.}
  341. \label{fig:x86-a}
  342. \end{figure}
  343. Figure~\ref{fig:p0-x86} depicts an x86-64 program that is equivalent
  344. to $\BINOP{+}{10}{32}$. The \key{globl} directive says that the
  345. \key{\_main} procedure is externally visible, which is necessary so
  346. that the operating system can call it. The label \key{\_main:}
  347. indicates the beginning of the \key{\_main} procedure. The
  348. instruction $\key{movq}\,\$10, \%\key{rax}$ puts $10$ into the
  349. register \key{rax}. The following instruction $\key{addq}\,\key{\$}32,
  350. \key{\%rax}$ adds $32$ to the $10$ in \key{rax} and puts the result,
  351. $42$, back into \key{rax}. The instruction \key{retq} finishes the
  352. \key{\_main} function by returning the integer in the \key{rax}
  353. register to the operating system.
  354. \begin{figure}[htbp]
  355. \centering
  356. \begin{minipage}{0.6\textwidth}
  357. \begin{lstlisting}
  358. .globl _main
  359. _main:
  360. movq $10, %rax
  361. addq $32, %rax
  362. retq
  363. \end{lstlisting}
  364. \end{minipage}
  365. \caption{A simple x86-64 program equivalent to $\BINOP{+}{10}{32}$.}
  366. \label{fig:p0-x86}
  367. \end{figure}
  368. The next example exhibits the use of memory. Figure~\ref{fig:p1-x86}
  369. lists an x86-64 program that is equivalent to $\BINOP{+}{52}{
  370. \UNIOP{-}{10} }$. To understand how this x86-64 program uses memory,
  371. we need to explain a region of memory called called the
  372. \emph{procedure call stack} (\emph{stack} for short). The stack
  373. consists of a separate \emph{frame} for each procedure call. The
  374. memory layout for an individual frame is shown in
  375. Figure~\ref{fig:frame}. The register \key{rsp} is called the
  376. \emph{stack pointer} and points to the item at the top of the
  377. stack. The stack grows downward in memory, so we increase the size of
  378. the stack by subtracting from the stack pointer. The frame size is
  379. required to be a multiple of 16 bytes. The register \key{rbp} is the
  380. \emph{base pointer} which serves two purposes: 1) it saves the
  381. location of the stack pointer for the procedure that called the
  382. current one and 2) it is used to access variables associated with the
  383. current procedure. We number the variables from $1$ to $n$. Variable
  384. $1$ is stored at address $-8\key{(\%rbp)}$, variable $2$ at
  385. $-16\key{(\%rbp)}$, etc.
  386. \begin{figure}
  387. \centering
  388. \begin{minipage}{0.6\textwidth}
  389. \begin{lstlisting}
  390. .globl _main
  391. _main:
  392. pushq %rbp
  393. movq %rsp, %rbp
  394. subq $16, %rsp
  395. movq $10, -8(%rbp)
  396. negq -8(%rbp)
  397. movq $52, %rax
  398. addq -8(%rbp), %rax
  399. addq $16, %rsp
  400. popq %rbp
  401. retq
  402. \end{lstlisting}
  403. \end{minipage}
  404. \caption{An x86-64 program equivalent to $\BINOP{+}{52}{\UNIOP{-}{10} }$.}
  405. \label{fig:p1-x86}
  406. \end{figure}
  407. \begin{figure}
  408. \centering
  409. \begin{tabular}{|r|l|} \hline
  410. Position & Contents \\ \hline
  411. 8(\key{\%rbp}) & return address \\
  412. 0(\key{\%rbp}) & old \key{rbp} \\
  413. -8(\key{\%rbp}) & variable $1$ \\
  414. -16(\key{\%rbp}) & variable $2$ \\
  415. \ldots & \ldots \\
  416. 0(\key{\%rsp}) & variable $n$\\ \hline
  417. \end{tabular}
  418. \caption{Memory layout of a frame.}
  419. \label{fig:frame}
  420. \end{figure}
  421. Getting back to the program in Figure~\ref{fig:p1-x86}, the first
  422. three instructions are the typical prelude for a procedure. The
  423. instruction \key{pushq \%rbp} saves the base pointer for the procedure
  424. that called the current one onto the stack and subtracts $8$ from the
  425. stack pointer. The second instruction \key{movq \%rsp, \%rbp} changes
  426. the base pointer to the top of the stack. The instruction \key{subq
  427. \$16, \%rsp} moves the stack pointer down to make enough room for
  428. storing variables. This program just needs one variable ($8$ bytes)
  429. but because the frame size is required to be a multiple of 16 bytes,
  430. it rounds to 16 bytes.
  431. The next four instructions carry out the work of computing
  432. $\BINOP{+}{52}{\UNIOP{-}{10} }$. The first instruction \key{movq \$10,
  433. -8(\%rbp)} stores $10$ in variable $1$. The instruction \key{negq
  434. -8(\%rbp)} changes variable $1$ to $-10$. The \key{movq \$52, \%rax}
  435. places $52$ in the register \key{rax} and \key{addq -8(\%rbp), \%rax}
  436. adds the contents of variable $1$ to \key{rax}, at which point
  437. \key{rax} contains $42$.
  438. The last three instructions are the typical \emph{conclusion} of a
  439. procedure. The \key{addq \$16, \%rsp} instruction moves the stack
  440. pointer back to point at the old base pointer. The amount added here
  441. needs to match the amount that was subtracted in the prelude of the
  442. procedure. Then \key{popq \%rbp} returns the old base pointer to
  443. \key{rbp} and adds $8$ to the stack pointer. The \key{retq}
  444. instruction jumps back to the procedure that called this one and
  445. subtracts 8 from the stack pointer.
  446. The compiler will need a convenient representation for manipulating
  447. x86 programs, so we define an abstract syntax for x86 in
  448. Figure~\ref{fig:x86-ast-a}. The \itm{info} field of the \key{program}
  449. AST node is for storing auxilliary information that needs to be
  450. communicated from one pass to the next. The function \key{print-x86}
  451. provided in the supplemental code converts an x86 abstract syntax tree
  452. into the text representation for x86 (Figure~\ref{fig:x86-a}).
  453. \begin{figure}[tbp]
  454. \fbox{
  455. \begin{minipage}{0.96\textwidth}
  456. \[
  457. \begin{array}{lcl}
  458. \Arg &::=& \INT{\Int} \mid \REG{\itm{register}}
  459. \mid \STACKLOC{\Int} \\
  460. \Instr &::=& (\key{add} \; \Arg\; \Arg) \mid
  461. (\key{sub} \; \Arg\; \Arg) \mid
  462. (\key{imul} \; \Arg\;\Arg) \mid
  463. (\key{neg} \; \Arg) \mid \\
  464. && (\key{mov} \; \Arg\; \Arg) \mid
  465. (\key{call} \; \mathit{label}) \mid
  466. (\key{push}\;\Arg) \mid (\key{pop}\;\Arg) \mid (\key{ret}) \\
  467. \Prog &::= & (\key{program} \;\itm{info} \; \Instr^{+})
  468. \end{array}
  469. \]
  470. \end{minipage}
  471. }
  472. \caption{Abstract syntax for x86-64 assembly.}
  473. \label{fig:x86-ast-a}
  474. \end{figure}
  475. \section{From $S_0$ to x86-64 through $C_0$}
  476. \label{sec:plan-s0-x86}
  477. To compile one language to another it helps to focus on the
  478. differences between the two languages. It is these differences that
  479. the compiler will need to bridge. What are the differences between
  480. $S_0$ and x86-64 assembly? Here we list some of the most important the
  481. differences.
  482. \begin{enumerate}
  483. \item x86-64 arithmetic instructions typically take two arguments and
  484. update the second argument in place. In contrast, $S_0$ arithmetic
  485. operations only read their arguments and produce a new value.
  486. \item An argument to an $S_0$ operator can be any expression, whereas
  487. x86-64 instructions restrict their arguments to integers, registers,
  488. and memory locations.
  489. \item An $S_0$ program can have any number of variables whereas x86-64
  490. has only 16 registers.
  491. \item Variables in $S_0$ can overshadow other variables with the same
  492. name. The registers and memory locations of x86-64 all have unique
  493. names.
  494. \end{enumerate}
  495. We ease the challenge of compiling from $S_0$ to x86 by breaking down
  496. the problem into several steps, dealing with the above differences one
  497. at a time. The main question then becomes: in what order to we tackle
  498. these differences? This is often one of the most challenging questions
  499. that a compiler writer must answer because some orderings may be much
  500. more difficult to implement than others. It is difficult to know ahead
  501. of time which orders will be better so often some trial-and-error is
  502. involved. However, we can try to plan ahead and choose the orderings
  503. based on what we find out.
  504. For example, to handle difference \#2 (nested expressions), we shall
  505. introduce new variables and pull apart the nested expressions into a
  506. sequence of assignment statements. To deal with difference \#3 we
  507. will be replacing variables with registers and/or stack
  508. locations. Thus, it makes sense to deal with \#2 before \#3 so that
  509. \#3 can replace both the original variables and the new ones. Next,
  510. consider where \#1 should fit in. Because it has to do with the format
  511. of x86 instructions, it makes more sense after we have flattened the
  512. nested expressions (\#2). Finally, when should we deal with \#4
  513. (variable overshadowing)? We shall be solving this problem by
  514. renaming variables to make sure they have unique names. Recall that
  515. our plan for \#2 involves moving nested expressions, which could be
  516. problematic if it changes the shadowing of variables. However, if we
  517. deal with \#4 first, then it will not be an issue. Thus, we arrive at
  518. the following ordering.
  519. \[
  520. \xymatrix{
  521. 4 \ar[r] & 2 \ar[r] & 1 \ar[r] & 3
  522. }
  523. \]
  524. We further simplify the translation from $S_0$ to x86 by identifying
  525. an intermediate language named $C_0$, roughly half-way between $S_0$
  526. and x86, to provide a rest stop along the way. The name $C_0$ comes
  527. from this language being vaguely similar to the $C$ language. The
  528. differences \#4 and \#1, regarding variables and nested expressions,
  529. are handled by the passes \textsf{uniquify} and \textsf{flatten} that
  530. bring us to $C_0$.
  531. \[\large
  532. \xymatrix@=50pt{
  533. S_0 \ar@/^/[r]^-{\textsf{uniquify}} &
  534. S_0 \ar@/^/[r]^-{\textsf{flatten}} &
  535. C_0
  536. }
  537. \]
  538. The syntax for $C_0$ is defined in Figure~\ref{fig:c0-syntax}. The
  539. $C_0$ language supports the same operators as $S_0$ but the arguments
  540. of operators are now restricted to just variables and integers. The
  541. \key{let} construct of $S_0$ is replaced by an assignment statement
  542. and there is a \key{return} construct to specify the return value of
  543. the program. A program consists of a sequence of statements that
  544. include at least one \key{return} statement.
  545. \begin{figure}[htbp]
  546. \[
  547. \begin{array}{lcl}
  548. \Arg &::=& \Int \mid \Var \\
  549. \Exp &::=& \Arg \mid (\Op \; \Arg^{*})\\
  550. \Stmt &::=& \ASSIGN{\Var}{\Exp} \mid \RETURN{\Arg} \\
  551. \Prog & ::= & (\key{program}\;\itm{info}\;\Stmt^{+})
  552. \end{array}
  553. \]
  554. \caption{The $C_0$ intermediate language.}
  555. \label{fig:c0-syntax}
  556. \end{figure}
  557. To get from $C_0$ to x86-64 assembly requires three more steps, which
  558. we discuss below.
  559. \[\large
  560. \xymatrix@=50pt{
  561. C_0 \ar@/^/[r]^-{\textsf{select\_instr.}}
  562. & \text{x86}^{*} \ar@/^/[r]^-{\textsf{assign\_homes}}
  563. & \text{x86}^{*} \ar@/^/[r]^-{\textsf{patch\_instr.}}
  564. & \text{x86}
  565. }
  566. \]
  567. We handle difference \#1, concerning the format of arithmetic
  568. instructions, in the \textsf{select\_instructions} pass. The result
  569. of this pass produces programs consisting of x86-64 instructions that
  570. use variables.
  571. %
  572. As there are only 16 registers, we cannot always map variables to
  573. registers (difference \#3). Fortunately, the stack can grow quite, so
  574. we can map variables to locations on the stack. This is handled in the
  575. \textsf{assign\_homes} pass. The topic of
  576. Chapter~\ref{ch:register-allocation} is implementing a smarter
  577. approach in which we make a best-effort to map variables to registers,
  578. resorting to the stack only when necessary.
  579. The final pass in our journey to x86 handles an indiosycracy of x86
  580. assembly. Many x86 instructions have two arguments but only one of the
  581. arguments may be a memory reference. Because we are mapping variables
  582. to stack locations, many of our generated instructions will violate
  583. this restriction. The purpose of the \textsf{patch\_instructions} pass
  584. is to fix this problem by replacing every bad instruction with a short
  585. sequence of instructions that use the \key{rax} register.
  586. \section{Uniquify Variables}
  587. The purpose of this pass is to make sure that each \key{let} uses a
  588. unique variable name. For example, the \textsf{uniquify} pass could
  589. translate
  590. \[
  591. \LET{x}{32}{ \BINOP{+}{ \LET{x}{10}{x} }{ x } }
  592. \]
  593. to
  594. \[
  595. \LET{x.1}{32}{ \BINOP{+}{ \LET{x.2}{10}{x.2} }{ x.1 } }
  596. \]
  597. We recommend implementing \textsf{uniquify} as a recursive function
  598. that mostly just copies the input program. However, when encountering
  599. a \key{let}, it should generate a unique name for the variable (the
  600. Racket function \key{gensym} is handy for this) and associate the old
  601. name with the new unique name in an association list. The
  602. \textsf{uniquify} function will need to access this association list
  603. when it gets to a variable reference, so we add another paramter to
  604. \textsf{uniquify} for the association list.
  605. \section{Flatten Expressions}
  606. The purpose of the \textsf{flatten} pass is to get rid of nested
  607. expressions, such as the $\UNIOP{-}{10}$ in the following program,
  608. without changing the behavior of the program.
  609. \[
  610. \BINOP{+}{52}{ \UNIOP{-}{10} }
  611. \]
  612. This can be accomplished by introducing a new variable, assigning the
  613. nested expression to the new variable, and then using the new variable
  614. in place of the nested expressions. For example, the above program is
  615. translated to the following one.
  616. \[
  617. \begin{array}{l}
  618. \ASSIGN{ \itm{x} }{ \UNIOP{-}{10} } \\
  619. \RETURN{ \BINOP{+}{52}{ \itm{x} } }
  620. \end{array}
  621. \]
  622. We recommend implementing \textsf{flatten} as a recursive function
  623. that returns two things, 1) the newly flattened expression, and 2) a
  624. list of assignment statements, one for each of the new variables
  625. introduced while flattening the expression.
  626. Take special care for programs such as the following that initialize
  627. variables with integers or other variables.
  628. \[
  629. \LET{a}{42}{ \LET{b}{a}{ b }}
  630. \]
  631. This program should be translated to
  632. \[
  633. \ASSIGN{a}{42} \;
  634. \ASSIGN{b}{a} \;
  635. \RETURN{b}
  636. \]
  637. and not the following, which could result from a naive implementation
  638. of \textsf{flatten}.
  639. \[
  640. \ASSIGN{x.1}{42}\;
  641. \ASSIGN{a}{x.1}\;
  642. \ASSIGN{x.2}{a}\;
  643. \ASSIGN{b}{x.2}\;
  644. \RETURN{b}
  645. \]
  646. \section{Select Instructions}
  647. In the \textsf{select\_instructions} pass we begin the work of
  648. translating from $C_0$ to x86. The target language of this pass is a
  649. pseudo-x86 language that still uses variables, so we add an AST node
  650. of the form $\VAR{\itm{var}}$. The \textsf{select\_instructions} pass
  651. deals with the differing format of arithmetic operations. For example,
  652. in $C_0$ an addition operation could take the following form:
  653. \[
  654. \ASSIGN{x}{ \BINOP{+}{10}{32} }
  655. \]
  656. To translate to x86, we need to express this addition using the
  657. \key{add} instruction that does an inplace update. So we first move
  658. $10$ to $x$ then perform the \key{add}.
  659. \[
  660. (\key{mov}\,\INT{10}\, \VAR{x})\; (\key{add} \;\INT{32}\; \VAR{x})
  661. \]
  662. There are some cases that require special care to avoid generating
  663. needlessly complicated code. If one of the arguments is the same as
  664. the left-hand side of the assignment, then there is no need for the
  665. extra move instruction. For example, the following
  666. \[
  667. \ASSIGN{x}{ \BINOP{+}{10}{x} }
  668. \quad\text{should translate to}\quad
  669. (\key{add} \; \INT{10}\; \VAR{x})
  670. \]
  671. Regarding the \RETURN{e} statement of $C_0$, we recommend treating it
  672. as an assignment to the \key{rax} register and let the procedure
  673. conclusion handle the transfer of control back to the calling
  674. procedure.
  675. \section{Assign Homes}
  676. As discussed in Section~\ref{sec:plan-s0-x86}, the
  677. \textsf{assign\_homes} pass places all of the variables on the stack.
  678. Consider again the example $S_0$ program $\BINOP{+}{52}{ \UNIOP{-}{10} }$,
  679. which after \textsf{select\_instructions} looks like the following.
  680. \[
  681. \begin{array}{l}
  682. (\key{mov}\;\INT{10}\; \VAR{x})\\
  683. (\key{neg}\; \VAR{x})\\
  684. (\key{mov}\; \INT{52}\; \REG{\itm{rax}})\\
  685. (\key{add}\; \VAR{x} \REG{\itm{rax}})
  686. \end{array}
  687. \]
  688. The one and only variable $x$ is assigned to stack location
  689. \key{-8(\%rbp)}, so the \textsf{assign\_homes} pass translates the
  690. above to
  691. \[
  692. \begin{array}{l}
  693. (\key{mov}\;\INT{10}\; \STACKLOC{{-}8})\\
  694. (\key{neg}\; \STACKLOC{{-}8})\\
  695. (\key{mov}\; \INT{52}\; \REG{\itm{rax}})\\
  696. (\key{add}\; \STACKLOC{{-}8}\; \REG{\itm{rax}})
  697. \end{array}
  698. \]
  699. In the process of assigning stack locations to variables, it is
  700. convenient to compute and store the size of the frame which will be
  701. needed later to generate the procedure conclusion.
  702. \section{Patch Instructions}
  703. The purpose of this pass is to make sure that each instruction adheres
  704. to the restrictions regarding which arguments can be memory
  705. references. For most instructions, the rule is that at most one
  706. argument may be a memory reference.
  707. Consider again the following example.
  708. \[
  709. \LET{a}{42}{ \LET{b}{a}{ b }}
  710. \]
  711. After \textsf{assign\_homes} pass, the above has been translated to
  712. \[
  713. \begin{array}{l}
  714. (\key{mov} \;\INT{42}\; \STACKLOC{{-}8})\\
  715. (\key{mov}\;\STACKLOC{{-}8}\; \STACKLOC{{-}16})\\
  716. (\key{mov}\;\STACKLOC{{-}16}\; \REG{\itm{rax}})
  717. \end{array}
  718. \]
  719. The second \key{mov} instruction is problematic because both arguments
  720. are stack locations. We suggest fixing this problem by moving from the
  721. source to \key{rax} and then from \key{rax} to the destination, as
  722. follows.
  723. \[
  724. \begin{array}{l}
  725. (\key{mov} \;\INT{42}\; \STACKLOC{{-}8})\\
  726. (\key{mov}\;\STACKLOC{{-}8}\; \REG{\itm{rax}})\\
  727. (\key{mov}\;\REG{\itm{rax}}\; \STACKLOC{{-}16})\\
  728. (\key{mov}\;\STACKLOC{{-}16}\; \REG{\itm{rax}})
  729. \end{array}
  730. \]
  731. The \key{imul} instruction is a special case because the destination
  732. argument must be a register.
  733. \section{Testing with Interpreters}
  734. The typical way to test a compiler is to run the generated assembly
  735. code on a diverse set of programs and check whether they behave as
  736. expected. However, when a compiler is structured as our is, with many
  737. passes, when there is an error in the generated assembly code it can
  738. be hard to determine which pass contains the source of the error. A
  739. good way to isolate the error is to not only test the generated
  740. assembly code but to also test the output of every pass. This requires
  741. having interpreters for all the intermediate languages. Indeed, the
  742. file \key{interp.rkt} in the supplemental code provides interpreters
  743. for all the intermediate languages described in this book, starting
  744. with interpreters for $S_0$, $C_0$, and x86 (in abstract syntax).
  745. The file \key{run-tests.rkt} automates the process of running the
  746. interpreters on the output programs of each pass and checking their
  747. result.
  748. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  749. \chapter{Register Allocation}
  750. \label{ch:register-allocation}
  751. In Chapter~\ref{ch:int-exp} we simplified the generation of x86
  752. assembly by placing all variables on the stack. We can improve the
  753. performance of the generated code considerably if we instead try to
  754. place as many variables as possible into registers. The CPU can
  755. access a register in a single cycle, whereas accessing the stack can
  756. take from several cycles (to go to cache) to hundreds of cycles (to go
  757. to main memory). Figure~\ref{fig:reg-eg} shows a program with four
  758. variables that serves as a running example. We show the source program
  759. and also the output of instruction selection. At that point the
  760. program is almost x86 assembly but not quite; it still contains
  761. variables instead of stack locations or registers.
  762. \begin{figure}
  763. \begin{minipage}{0.45\textwidth}
  764. Source program:
  765. \begin{lstlisting}
  766. (let ([v 1])
  767. (let ([w 46])
  768. (let ([x (+ v 7)])
  769. (let ([y (+ 4 x)])
  770. (let ([z (+ x w)])
  771. (- z y))))))
  772. \end{lstlisting}
  773. \end{minipage}
  774. \begin{minipage}{0.45\textwidth}
  775. After instruction selection:
  776. \begin{lstlisting}
  777. (program (v w x y z)
  778. (mov (int 1) (var v))
  779. (mov (int 46) (var w))
  780. (mov (var v) (var x))
  781. (add (int 7) (var x))
  782. (mov (var x) (var y))
  783. (add (int 4) (var y))
  784. (mov (var x) (var z))
  785. (add (var w) (var z))
  786. (mov (var z) (reg rax))
  787. (sub (var y) (reg rax)))
  788. \end{lstlisting}
  789. \end{minipage}
  790. \caption{Running example for this chapter.}
  791. \label{fig:reg-eg}
  792. \end{figure}
  793. The goal of register allocation is to fit as many variables into
  794. registers as possible. It is often the case that we have more
  795. variables than registers, so we can't naively map each variable to a
  796. register. Fortunately, it is also common for different variables to be
  797. needed during different periods of time, and in such cases the
  798. variables can be mapped to the same register. Consider variables $x$
  799. and $y$ in Figure~\ref{fig:reg-eg}. After the variable $x$ is moved
  800. to $z$ it is no longer needed. Variable $y$, on the other hand, is
  801. used only after this point, so $x$ and $y$ could share the same
  802. register. The topic of the next section is how we compute where a
  803. variable is needed.
  804. \section{Liveness Analysis}
  805. A variable is \emph{live} if the variable is used at some later point
  806. in the program and there is not an intervening assignment to the
  807. variable.
  808. %
  809. To understand the latter condition, consider the following code
  810. fragment in which there are two writes to $b$. Are $a$ and
  811. $b$ both live at the same time?
  812. \begin{lstlisting}[numbers=left,numberstyle=\tiny]
  813. (mov (int 5) (var a)) ; @$a \gets 5$@
  814. (mov (int 30) (var b)) ; @$b \gets 30$@
  815. (mov (var a) (var c)) ; @$c \gets x$@
  816. (mov (int 10) (var b)) ; @$b \gets 10$@
  817. (add (var b) (var c)) ; @$c \gets c + b$@
  818. \end{lstlisting}
  819. The answer is no because the value $30$ written to $b$ on line 2 is
  820. never used. The variable $b$ is read on line 5 and there is an
  821. intervening write to $b$ on line 4, so the read on line 5 receives the
  822. value written on line 4, not line 2.
  823. The live variables can be computed by traversing the instruction
  824. sequence back to front (i.e., backwards in execution order). Let
  825. $I_1,\ldots, I_n$ be the instruction sequence. We write
  826. $L_{\mathsf{after}}(k)$ for the set of live variables after
  827. instruction $I_k$ and $L_{\mathsf{before}}(k)$ for the set of live
  828. variables before instruction $I_k$. The live variables after an
  829. instruction are always the same as the live variables before the next
  830. instruction.
  831. \begin{equation*}
  832. L_{\mathsf{after}}(k) = L_{\mathsf{before}}(k+1)
  833. \end{equation*}
  834. To start things off, there are no live variables after the last
  835. instruction, so
  836. \begin{equation*}
  837. L_{\mathsf{after}}(n) = \emptyset
  838. \end{equation*}
  839. We then apply the following rule repeatedly, traversing the
  840. instruction sequence back to front.
  841. \begin{equation*}
  842. L_{\mathtt{before}}(k) = (L_{\mathtt{after}}(k) - W(k)) \cup R(k),
  843. \end{equation*}
  844. where $W(k)$ are the variables written to by instruction $I_k$ and
  845. $R(k)$ are the variables read by instruction $I_k$.
  846. Figure~\ref{fig:live-eg} shows the results of live variables analysis
  847. for the running example. Next to each instruction we write its
  848. $L_{\mathtt{after}}$ set.
  849. \begin{figure}[tbp]
  850. \begin{lstlisting}
  851. (program (v w x y z)
  852. (mov (int 1) (var v)) @$\{ v \}$@
  853. (mov (int 46) (var w)) @$\{ v, w \}$@
  854. (mov (var v) (var x)) @$\{ w, x \}$@
  855. (add (int 7) (var x)) @$\{ w, x \}$@
  856. (mov (var x) (var y)) @$\{ w, x, y\}$@
  857. (add (int 4) (var y)) @$\{ w, x, y \}$@
  858. (mov (var x) (var z)) @$\{ w, y, z \}$@
  859. (add (var w) (var z)) @$\{ y, z \}$@
  860. (mov (var z) (reg rax)) @$\{ y \}$@
  861. (sub (var y) (reg rax))) @$\{\}$@
  862. \end{lstlisting}
  863. \caption{Running example program annotated with live-after sets.}
  864. \label{fig:live-eg}
  865. \end{figure}
  866. \section{Building the Interference Graph}
  867. Based on the liveness analysis, we know the program regions where each
  868. variable is needed. However, during register allocation, we need to
  869. answer questions of the specific form: are variables $u$ and $v$ ever
  870. live at the same time? (And therefore cannot be assigned to the same
  871. register.) To make this question easier to answer, we create an
  872. explicit data structure, an \emph{interference graph}. An
  873. interference graph is an undirected graph that has an edge between two
  874. variables if they are live at the same time, that is, if they
  875. interfere with each other.
  876. The most obvious way to compute the interference graph is to look at
  877. the set of live variables between each statement in the program, and
  878. add an edge to the graph for every pair of variables in the same set.
  879. This approach is less than ideal for two reasons. First, it can be
  880. rather expensive because it takes $O(n^2)$ time to look at every pair
  881. in a set of $n$ live variables. Second, there is a special case in
  882. which two variables that are live at the same time do not actually
  883. interfere with each other: when they both contain the same value
  884. because we have assigned one to the other.
  885. A better way to compute the edges of the intereference graph is given
  886. by the following rules.
  887. \begin{itemize}
  888. \item If instruction $I_k$ is a move: (\key{mov} $s$\, $d$), then add
  889. the edge $(d,v)$ for every $v \in L_{\mathsf{after}}(k)$ unless $v =
  890. d$ or $v = s$.
  891. \item If instruction $I_k$ is not a move but some other arithmetic
  892. instruction such as (\key{add} $s$\, $d$), then add the edge $(d,v)$
  893. for every $v \in L_{\mathsf{after}}(k)$ unless $v = d$.
  894. \item If instruction $I_k$ is of the form (\key{call}
  895. $\mathit{label}$), then add an edge $(r,v)$ for every caller-save
  896. register $r$ and every variable $v \in L_{\mathsf{after}}(k)$.
  897. \end{itemize}
  898. Working from the top to bottom of Figure~\ref{fig:live-eg}, $z$
  899. interferes with $x$, $y$ interferes with $z$, and $w$ interferes with
  900. $y$ and $z$. The resulting interference graph is shown in
  901. Figure~\ref{fig:interfere}.
  902. \begin{figure}[tbp]
  903. \large
  904. \[
  905. \xymatrix@=40pt{
  906. v \ar@{-}[r] & w \ar@{-}[r]\ar@{-}[d]\ar@{-}[dr] & x \ar@{-}[dl]\\
  907. & y \ar@{-}[r] & z
  908. }
  909. \]
  910. \caption{Interference graph for the running example.}
  911. \label{fig:interfere}
  912. \end{figure}
  913. \section{Graph Coloring via Sudoku}
  914. We now come to the main event, mapping variables to registers (or to
  915. stack locations in the event that we run out of registers). We need
  916. to make sure not to map two variables to the same register if the two
  917. variables interfere with each other. In terms of the interference
  918. graph, this means we cannot map adjacent nodes to the same register.
  919. If we think of registers as colors, the register allocation problem
  920. becomes the widely-studied graph coloring
  921. problem~\citep{Balakrishnan:1996ve,Rosen:2002bh}.
  922. The reader may be more familar with the graph coloring problem then he
  923. or she realizes; the popular game of Sudoku is an instance of the
  924. graph coloring problem. The following describes how to build a graph
  925. out of a Sudoku board.
  926. \begin{itemize}
  927. \item There is one node in the graph for each Sudoku square.
  928. \item There is an edge between two nodes if the corresponding squares
  929. are in the same row or column, or if the squares are in the same
  930. $3\times 3$ region.
  931. \item Choose nine colors to correspond to the numbers $1$ to $9$.
  932. \item Based on the initial assignment of numbers to squares in the
  933. Sudoku board, assign the corresponding colors to the corresponding
  934. nodes in the graph.
  935. \end{itemize}
  936. If you can color the remaining nodes in the graph with the nine
  937. colors, then you've also solved the corresponding game of Sudoku.
  938. Given that Sudoku is graph coloring, one can use Sudoku strategies to
  939. come up with an algorithm for allocating registers. For example, one
  940. of the basic techniques for Sudoku is Pencil Marks. The idea is that
  941. you use a process of elimination to determine what numbers still make
  942. sense for a square, and write down those numbers in the square
  943. (writing very small). At first, each number might be a
  944. possibility, but as the board fills up, more and more of the
  945. possibilities are crossed off (or erased). For example, if the number
  946. $1$ is assigned to a square, then by process of elimination, you can
  947. cross off the $1$ pencil mark from all the squares in the same row,
  948. column, and region. Many Sudoku computer games provide automatic
  949. support for Pencil Marks. This heuristic also reduces the degree of
  950. branching in the search tree.
  951. The Pencil Marks technique corresponds to the notion of color
  952. \emph{saturation} due to \cite{Brelaz:1979eu}. The
  953. saturation of a node, in Sudoku terms, is the number of possibilities
  954. that have been crossed off using the process of elimination mentioned
  955. above. In graph terminology, we have the following definition:
  956. \begin{equation*}
  957. \mathrm{saturation}(u) = |\{ c \;|\; \exists v. v \in \mathrm{Adj}(u)
  958. \text{ and } \mathrm{color}(v) = c \}|
  959. \end{equation*}
  960. where $\mathrm{Adj}(u)$ is the set of nodes adjacent to $u$ and
  961. the notation $|S|$ stands for the size of the set $S$.
  962. Using the Pencil Marks technique leads to a simple strategy for
  963. filling in numbers: if there is a square with only one possible number
  964. left, then write down that number! But what if there are no squares
  965. with only one possibility left? One brute-force approach is to just
  966. make a guess. If that guess ultimately leads to a solution, great. If
  967. not, backtrack to the guess and make a different guess. Of course,
  968. this is horribly time consuming. One standard way to reduce the amount
  969. of backtracking is to use the most-constrained-first heuristic. That
  970. is, when making a guess, always choose a square with the fewest
  971. possibilities left (the node with the highest saturation). The idea
  972. is that choosing highly constrained squares earlier rather than later
  973. is better because later there may not be any possibilities left.
  974. In some sense, register allocation is easier than Sudoku because we
  975. can always cheat and add more numbers by spilling variables to the
  976. stack. Also, we'd like to minimize the time needed to color the graph,
  977. and backtracking is expensive. Thus, it makes sense to keep the
  978. most-constrained-first heuristic but drop the backtracking in favor of
  979. greedy search (guess and just keep going).
  980. Figure~\ref{fig:satur-algo} gives the pseudo-code for this simple
  981. greedy algorithm for register allocation based on saturation and the
  982. most-constrained-first heuristic, which is roughly equivalent to the
  983. DSATUR algorithm of \cite{Brelaz:1979eu} (also known as
  984. saturation degree ordering
  985. (SDO)~\citep{Gebremedhin:1999fk,Omari:2006uq}). Just as in Sudoku,
  986. the algorithm represents colors with integers, with the first $k$
  987. colors corresponding to the $k$ registers in a given machine and the
  988. rest of the integers corresponding to stack locations.
  989. \begin{figure}[btp]
  990. \centering
  991. \begin{lstlisting}[basicstyle=\rmfamily,deletekeywords={for,from,with,is,not,in,find},morekeywords={while},columns=fullflexible]
  992. Algorithm: DSATUR
  993. Input: a graph @$G$@
  994. Output: an assignment @$\mathrm{color}[v]$@ for each node @$v \in G$@
  995. @$W \gets \mathit{vertices}(G)$@
  996. while @$W \neq \emptyset$@ do
  997. pick a node @$u$@ from @$W$@ with the highest saturation,
  998. breaking ties randomly
  999. find the lowest color @$c$@ that is not in @$\{ \mathrm{color}[v] \;|\; v \in \mathrm{Adj}(v)\}$@
  1000. @$\mathrm{color}[u] \gets c$@
  1001. @$W \gets W - \{u\}$@
  1002. \end{lstlisting}
  1003. \caption{Saturation-based greedy graph coloring algorithm.}
  1004. \label{fig:satur-algo}
  1005. \end{figure}
  1006. With this algorithm in hand, let us return to the running example and
  1007. consider how to color the interference graph in
  1008. Figure~\ref{fig:interfere}. Initially, all of the nodes are not yet
  1009. colored and they are unsaturated, so we annotate each of them with a
  1010. dash for their color and an empty set for the saturation.
  1011. \[
  1012. \xymatrix{
  1013. v:-,\{\} \ar@{-}[r] & w:-,\{\} \ar@{-}[r]\ar@{-}[d]\ar@{-}[dr] & x:-,\{\} \ar@{-}[dl]\\
  1014. & y:-,\{\} \ar@{-}[r] & z:-,\{\}
  1015. }
  1016. \]
  1017. We select a maximally saturated node and color it $0$. In this case we
  1018. have a 5-way tie, so we arbitrarily pick $y$. The color $0$ is no
  1019. longer available for $w$, $x$, and $z$ because they interfere with
  1020. $y$.
  1021. \[
  1022. \xymatrix{
  1023. v:-,\{\} \ar@{-}[r] & w:-,\{0\} \ar@{-}[r]\ar@{-}[d]\ar@{-}[dr] & x:-,\{0\} \ar@{-}[dl]\\
  1024. & y:0,\{\} \ar@{-}[r] & z:-,\{0\}
  1025. }
  1026. \]
  1027. Now we repeat the process, selecting another maximally saturated node.
  1028. This time there is a three-way tie between $w$, $x$, and $z$. We color
  1029. $w$ with $1$.
  1030. \[
  1031. \xymatrix{
  1032. v:-,\{1\} \ar@{-}[r] & w:1,\{0\} \ar@{-}[r]\ar@{-}[d]\ar@{-}[dr] & x:-,\{0,1\} \ar@{-}[dl]\\
  1033. & y:0,\{1\} \ar@{-}[r] & z:-,\{0,1\}
  1034. }
  1035. \]
  1036. The most saturated nodes are now $x$ and $z$. We color $x$ with the
  1037. next avialable color which is $2$.
  1038. \[
  1039. \xymatrix{
  1040. v:-,\{1\} \ar@{-}[r] & w:1,\{0,2\} \ar@{-}[r]\ar@{-}[d]\ar@{-}[dr] & x:2,\{0,1\} \ar@{-}[dl]\\
  1041. & y:0,\{1,2\} \ar@{-}[r] & z:-,\{0,1\}
  1042. }
  1043. \]
  1044. We have only two nodes left to color, $v$ and $z$, but $z$ is
  1045. more highly saturaded, so we color $z$ with $2$.
  1046. \[
  1047. \xymatrix{
  1048. v:-,\{1\} \ar@{-}[r] & w:1,\{0,2\} \ar@{-}[r]\ar@{-}[d]\ar@{-}[dr] & x:2,\{0,1\} \ar@{-}[dl]\\
  1049. & y:0,\{1,2\} \ar@{-}[r] & z:2,\{0,1\}
  1050. }
  1051. \]
  1052. The last iteration of the coloring algorithm assigns color $0$ to $v$.
  1053. \[
  1054. \xymatrix{
  1055. v:0,\{1\} \ar@{-}[r] & w:1,\{0,2\} \ar@{-}[r]\ar@{-}[d]\ar@{-}[dr] & x:2,\{0,1\} \ar@{-}[dl]\\
  1056. & y:0,\{1,2\} \ar@{-}[r] & z:2,\{0,1\}
  1057. }
  1058. \]
  1059. With the coloring complete, we can finalize assignment of variables to
  1060. registers and stack locations. Recall that if we have $k$ registers,
  1061. we map the first $k$ colors to registers and the rest to stack
  1062. lcoations. Suppose for the moment that we just have one extra register
  1063. to use for register allocation, just \key{rbx}. Then the following is
  1064. the mapping of colors to registers and stack allocations.
  1065. \[
  1066. \{ 0 \mapsto \key{\%rbx}, \; 1 \mapsto \key{-8(\%rbp)}, \; 2 \mapsto \key{-16(\%rbp)}, \ldots \}
  1067. \]
  1068. Putting this together with the above coloring of the variables, we
  1069. arrive at the following assignment.
  1070. \[
  1071. \{ v \mapsto \key{\%rbx}, \;
  1072. w \mapsto \key{-8(\%rbp)}, \;
  1073. x \mapsto \key{-16(\%rbp)}, \;
  1074. y \mapsto \key{\%rbx}, \;
  1075. z\mapsto \key{-16(\%rbp)} \}
  1076. \]
  1077. Applying this assignment to our running example
  1078. (Figure~\ref{fig:reg-eg}) yields the following program.
  1079. % why frame size of 32? -JGS
  1080. \begin{lstlisting}
  1081. (program 32
  1082. (mov (int 1) (reg rbx))
  1083. (mov (int 46) (stack-loc -8))
  1084. (mov (reg rbx) (stack-loc -16))
  1085. (add (int 7) (stack-loc -16))
  1086. (mov (stack-loc 16) (reg rbx))
  1087. (add (int 4) (reg rbx))
  1088. (mov (stack-loc -16) (stack-loc -16))
  1089. (add (stack-loc -8) (stack-loc -16))
  1090. (mov (stack-loc -16) (reg rax))
  1091. (sub (reg rbx) (reg rax)))
  1092. \end{lstlisting}
  1093. This program is almost an x86 program. The remaining step is to apply
  1094. the patch instructions pass. In this example, the trivial move of
  1095. \key{-16(\%rbp)} to itself is deleted and the addition of
  1096. \key{-8(\%rbp)} to \key{-16(\%rbp)} is fixed by going through
  1097. \key{\%rax}. The following shows the portion of the program that
  1098. changed.
  1099. \begin{lstlisting}
  1100. (add (int 4) (reg rbx))
  1101. (mov (stack-loc -8) (reg rax)
  1102. (add (reg rax) (stack-loc -16))
  1103. \end{lstlisting}
  1104. An overview of all of the passes involved in register allocation is
  1105. shown in Figure~\ref{fig:reg-alloc-passes}.
  1106. \begin{figure}[tbp]
  1107. \[
  1108. \xymatrix{
  1109. C_0 \ar@/^/[r]^-{\textsf{select\_instr.}}
  1110. & \text{x86}^{*} \ar[d]^-{\textsf{uncover\_live}} \\
  1111. & \text{x86}^{*} \ar[d]^-{\textsf{build\_interference}} \\
  1112. & \text{x86}^{*} \ar[d]_-{\textsf{allocate\_register}} \\
  1113. & \text{x86}^{*} \ar@/^/[r]^-{\textsf{patch\_instr.}}
  1114. & \text{x86}
  1115. }
  1116. \]
  1117. \caption{Diagram of the passes for register allocation.}
  1118. \label{fig:reg-alloc-passes}
  1119. \end{figure}
  1120. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  1121. \chapter{Booleans, Type Checking, and Control Flow}
  1122. \label{ch:bool-types}
  1123. \section{The $S_1$ Language}
  1124. \begin{figure}[htbp]
  1125. \centering
  1126. \fbox{
  1127. \begin{minipage}{0.85\textwidth}
  1128. \[
  1129. \begin{array}{lcl}
  1130. \Op &::=& \ldots \mid \key{and} \mid \key{or} \mid \key{not} \mid \key{eq?} \\
  1131. \Exp &::=& \ldots \mid \key{\#t} \mid \key{\#f} \mid
  1132. \IF{\Exp}{\Exp}{\Exp}
  1133. \end{array}
  1134. \]
  1135. \end{minipage}
  1136. }
  1137. \caption{The $S_1$ language, an extension of $S_0$
  1138. (Figure~\ref{fig:s0-syntax}).}
  1139. \label{fig:s1-syntax}
  1140. \end{figure}
  1141. \section{Type Checking $S_1$ Programs}
  1142. % T ::= Integer | Boolean
  1143. It is common practice to specify a type system by writing rules for
  1144. each kind of AST node. For example, the rule for \key{if} is:
  1145. \begin{quote}
  1146. For any expressions $e_1, e_2, e_3$ and any type $T$, if $e_1$ has
  1147. type \key{bool}, $e_2$ has type $T$, and $e_3$ has type $T$, then
  1148. $\IF{e_1}{e_2}{e_3}$ has type $T$.
  1149. \end{quote}
  1150. It is also common practice to write rules using a horizontal line,
  1151. with the conditions written above the line and the conclusion written
  1152. below the line.
  1153. \begin{equation*}
  1154. \inference{e_1 \text{ has type } \key{bool} &
  1155. e_2 \text{ has type } T & e_3 \text{ has type } T}
  1156. {\IF{e_1}{e_2}{e_3} \text{ has type } T}
  1157. \end{equation*}
  1158. Because the phrase ``has type'' is repeated so often in these type
  1159. checking rules, it is abbreviated to just a colon. So the above rule
  1160. is abbreviated to the following.
  1161. \begin{equation*}
  1162. \inference{e_1 : \key{bool} & e_2 : T & e_3 : T}
  1163. {\IF{e_1}{e_2}{e_3} : T}
  1164. \end{equation*}
  1165. The $\LET{x}{e_1}{e_2}$ construct poses an interesting challenge. The
  1166. variable $x$ is assigned the value of $e_1$ and then $x$ can be used
  1167. inside $e_2$. When we get to an occurrence of $x$ inside $e_2$, how do
  1168. we know what type the variable should be? The answer is that we need
  1169. a way to map from variable names to types. Such a mapping is called a
  1170. \emph{type environment} (aka. \emph{symbol table}). The capital Greek
  1171. letter gamma, written $\Gamma$, is used for referring to type
  1172. environments environments. The notation $\Gamma, x : T$ stands for
  1173. making a copy of the environment $\Gamma$ and then associating $T$
  1174. with the variable $x$ in the new environment. We write $\Gamma(x)$ to
  1175. lookup the associated type for $x$. The type checking rules for
  1176. \key{let} and variables are as follows.
  1177. \begin{equation*}
  1178. \inference{e_1 : T_1 \text{ in } \Gamma &
  1179. e_2 : T_2 \text{ in } \Gamma,x:T_1}
  1180. {\LET{x}{e_1}{e_2} : T_2 \text{ in } \Gamma}
  1181. \qquad
  1182. \inference{\Gamma(x) = T}
  1183. {x : T \text{ in } \Gamma}
  1184. \end{equation*}
  1185. Type checking has roots in logic, and logicians have a tradition of
  1186. writing the environment on the left-hand side and separating it from
  1187. the expression with a turn-stile ($\vdash$). The turn-stile does not
  1188. have any intrinsic meaning per se. It is punctuation that separates
  1189. the environment $\Gamma$ from the expression $e$. So the above typing
  1190. rules are written as follows.
  1191. \begin{equation*}
  1192. \inference{\Gamma \vdash e_1 : T_1 &
  1193. \Gamma,x:T_1 \vdash e_2 : T_2}
  1194. {\Gamma \vdash \LET{x}{e_1}{e_2} : T_2}
  1195. \qquad
  1196. \inference{\Gamma(x) = T}
  1197. {\Gamma \vdash x : T}
  1198. \end{equation*}
  1199. Overall, the statement $\Gamma \vdash e : T$ is an example of what is
  1200. called a \emph{judgment}. In particular, this judgment says, ``In
  1201. environment $\Gamma$, expression $e$ has type $T$.''
  1202. Figure~\ref{fig:S1-type-system} shows the type checking rules for
  1203. $S_1$.
  1204. \begin{figure}
  1205. \begin{gather*}
  1206. \inference{\Gamma(x) = T}
  1207. {\Gamma \vdash x : T}
  1208. \qquad
  1209. \inference{\Gamma \vdash e_1 : T_1 &
  1210. \Gamma,x:T_1 \vdash e_2 : T_2}
  1211. {\Gamma \vdash \LET{x}{e_1}{e_2} : T_2}
  1212. \\[2ex]
  1213. \inference{}{\Gamma \vdash n : \key{Integer}}
  1214. \quad
  1215. \inference{\Gamma \vdash e_i : T_i \ ^{\forall i \in 1\ldots n} & \Delta(\Op,T_1,\ldots,T_n) = T}
  1216. {\Gamma \vdash (\Op \; e_1 \ldots e_n) : T}
  1217. \\[2ex]
  1218. \inference{}{\Gamma \vdash \key{\#t} : \key{Boolean}}
  1219. \quad
  1220. \inference{}{\Gamma \vdash \key{\#f} : \key{Boolean}}
  1221. \quad
  1222. \inference{\Gamma \vdash e_1 : \key{bool} \\
  1223. \Gamma \vdash e_2 : T &
  1224. \Gamma \vdash e_3 : T}
  1225. {\Gamma \vdash \IF{e_1}{e_2}{e_3} : T}
  1226. \end{gather*}
  1227. \caption{Type System for $S_1$.}
  1228. \label{fig:S1-type-system}
  1229. \end{figure}
  1230. \begin{figure}
  1231. \begin{align*}
  1232. \Delta(\key{+},\key{Integer},\key{Integer}) &= \key{Integer} \\
  1233. \Delta(\key{-},\key{Integer},\key{Integer}) &= \key{Integer} \\
  1234. \Delta(\key{-},\key{Integer}) &= \key{Integer} \\
  1235. \Delta(\key{*},\key{Integer},\key{Integer}) &= \key{Integer} \\
  1236. \Delta(\key{read}) &= \key{Integer} \\
  1237. \Delta(\key{and},\key{Boolean},\key{Boolean}) &= \key{Boolean} \\
  1238. \Delta(\key{or},\key{Boolean},\key{Boolean}) &= \key{Boolean} \\
  1239. \Delta(\key{not},\key{Boolean}) &= \key{Boolean} \\
  1240. \Delta(\key{eq?},\key{Integer},\key{Integer}) &= \key{Boolean} \\
  1241. \Delta(\key{eq?},\key{Boolean},\key{Boolean}) &= \key{Boolean}
  1242. \end{align*}
  1243. \caption{Types for the primitives operators.}
  1244. \end{figure}
  1245. \section{The $C_1$ Language}
  1246. \begin{figure}[htbp]
  1247. \[
  1248. \begin{array}{lcl}
  1249. \Arg &::=& \ldots \mid \key{\#t} \mid \key{\#f} \\
  1250. \Stmt &::=& \ldots \mid \IF{\Exp}{\Stmt^{*}}{\Stmt^{*}}
  1251. \end{array}
  1252. \]
  1253. \caption{The $C_1$ intermediate language, an extension of $C_0$
  1254. (Figure~\ref{fig:c0-syntax}).}
  1255. \label{fig:c1-syntax}
  1256. \end{figure}
  1257. \section{Flatten Expressions}
  1258. \section{Select Instructions}
  1259. \section{Register Allocation}
  1260. \section{Patch Instructions}
  1261. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  1262. \chapter{Tuples and Heap Allocation}
  1263. \label{ch:tuples}
  1264. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  1265. \chapter{Functions}
  1266. \label{ch:functions}
  1267. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  1268. \chapter{Lexically Scoped Functions}
  1269. \label{ch:lambdas}
  1270. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  1271. \chapter{Mutable Data}
  1272. \label{ch:mutable-data}
  1273. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  1274. \chapter{The Dynamic Type}
  1275. \label{ch:type-dynamic}
  1276. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  1277. \chapter{Parametric Polymorphism}
  1278. \label{ch:parametric-polymorphism}
  1279. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  1280. \chapter{High-level Optimization}
  1281. \label{ch:high-level-optimization}
  1282. \bibliographystyle{plainnat}
  1283. \bibliography{all}
  1284. \end{document}
  1285. %% LocalWords: Dybvig Waddell Abdulaziz Ghuloum Dipanwita
  1286. %% LocalWords: Sarkar lcl Matz aa representable