book.tex 65 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805
  1. \documentclass[12pt]{book}
  2. \usepackage[T1]{fontenc}
  3. \usepackage[utf8]{inputenc}
  4. \usepackage{lmodern}
  5. \usepackage{hyperref}
  6. \usepackage{graphicx}
  7. \usepackage[english]{babel}
  8. \usepackage{listings}
  9. \usepackage{amsmath}
  10. \usepackage{amsthm}
  11. \usepackage{amssymb}
  12. \usepackage{natbib}
  13. \usepackage{stmaryrd}
  14. \usepackage{xypic}
  15. \usepackage{semantic}
  16. % Computer Modern is already the default. -Jeremy
  17. %\renewcommand{\ttdefault}{cmtt}
  18. \lstset{%
  19. language=Lisp,
  20. basicstyle=\ttfamily\small,
  21. escapechar=@,
  22. columns=fullflexible
  23. }
  24. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  25. % 'dedication' environment: To add a dedication paragraph at the start of book %
  26. % Source: http://www.tug.org/pipermail/texhax/2010-June/015184.html %
  27. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  28. \newenvironment{dedication}
  29. {
  30. \cleardoublepage
  31. \thispagestyle{empty}
  32. \vspace*{\stretch{1}}
  33. \hfill\begin{minipage}[t]{0.66\textwidth}
  34. \raggedright
  35. }
  36. {
  37. \end{minipage}
  38. \vspace*{\stretch{3}}
  39. \clearpage
  40. }
  41. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  42. % Chapter quote at the start of chapter %
  43. % Source: http://tex.stackexchange.com/a/53380 %
  44. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  45. \makeatletter
  46. \renewcommand{\@chapapp}{}% Not necessary...
  47. \newenvironment{chapquote}[2][2em]
  48. {\setlength{\@tempdima}{#1}%
  49. \def\chapquote@author{#2}%
  50. \parshape 1 \@tempdima \dimexpr\textwidth-2\@tempdima\relax%
  51. \itshape}
  52. {\par\normalfont\hfill--\ \chapquote@author\hspace*{\@tempdima}\par\bigskip}
  53. \makeatother
  54. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  55. \newcommand{\itm}[1]{\ensuremath{\mathit{#1}}}
  56. \newcommand{\Stmt}{\itm{stmt}}
  57. \newcommand{\Exp}{\itm{exp}}
  58. \newcommand{\Instr}{\itm{instr}}
  59. \newcommand{\Prog}{\itm{prog}}
  60. \newcommand{\Arg}{\itm{arg}}
  61. \newcommand{\Int}{\itm{int}}
  62. \newcommand{\Var}{\itm{var}}
  63. \newcommand{\Op}{\itm{op}}
  64. \newcommand{\key}[1]{\texttt{#1}}
  65. \newcommand{\READ}{(\key{read})}
  66. \newcommand{\UNIOP}[2]{(\key{#1}\,#2)}
  67. \newcommand{\BINOP}[3]{(\key{#1}\,#2\,#3)}
  68. \newcommand{\LET}[3]{(\key{let}\,([#1\;#2])\,#3)}
  69. \newcommand{\ASSIGN}[2]{(\key{assign}\,#1\;#2)}
  70. \newcommand{\RETURN}[1]{(\key{return}\,#1)}
  71. \newcommand{\INT}[1]{(\key{int}\;#1)}
  72. \newcommand{\REG}[1]{(\key{reg}\;#1)}
  73. \newcommand{\VAR}[1]{(\key{var}\;#1)}
  74. \newcommand{\STACKLOC}[1]{(\key{stack}\;#1)}
  75. \newcommand{\IF}[3]{(\key{if}\,#1\;#2\;#3)}
  76. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  77. \title{\Huge \textbf{Essentials of Compilation} \\
  78. \huge An Incremental Approach}
  79. \author{\textsc{Jeremy G. Siek} \\
  80. %\thanks{\url{http://homes.soic.indiana.edu/jsiek/}} \\
  81. Indiana University \\
  82. \\
  83. with contributions from: \\
  84. Carl Factora
  85. }
  86. \begin{document}
  87. \frontmatter
  88. \maketitle
  89. \begin{dedication}
  90. This book is dedicated to the programming language wonks at Indiana
  91. University.
  92. \end{dedication}
  93. \tableofcontents
  94. %\listoffigures
  95. %\listoftables
  96. \mainmatter
  97. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  98. \chapter*{Preface}
  99. Talk about nano-pass \citep{Sarkar:2004fk,Keep:2012aa} and incremental
  100. compilers \citep{Ghuloum:2006bh}.
  101. %\section*{Structure of book}
  102. % You might want to add short description about each chapter in this book.
  103. %\section*{About the companion website}
  104. %The website\footnote{\url{https://github.com/amberj/latex-book-template}} for %this file contains:
  105. %\begin{itemize}
  106. % \item A link to (freely downlodable) latest version of this document.
  107. % \item Link to download LaTeX source for this document.
  108. % \item Miscellaneous material (e.g. suggested readings etc).
  109. %\end{itemize}
  110. \section*{Acknowledgments}
  111. Need to give thanks to
  112. \begin{itemize}
  113. \item Kent Dybvig
  114. \item Daniel P. Friedman
  115. \item Abdulaziz Ghuloum
  116. \item Oscar Waddell
  117. \item Dipanwita Sarkar
  118. \item Ronald Garcia
  119. \item Bor-Yuh Evan Chang
  120. \end{itemize}
  121. %\mbox{}\\
  122. %\noindent Amber Jain \\
  123. %\noindent \url{http://amberj.devio.us/}
  124. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  125. \chapter{Preliminaries}
  126. \label{ch:trees-recur}
  127. In this chapter, we review the basic tools that are needed for
  128. implementing a compiler. We use abstract syntax trees (ASTs) in the
  129. form of S-expressions to represent programs (Section~\ref{sec:ast})
  130. and pattern matching to inspect an AST node
  131. (Section~\ref{sec:pattern-matching}). We use recursion to construct
  132. and deconstruct entire ASTs (Section~\ref{sec:recursion}).
  133. \section{Abstract Syntax Trees}
  134. \label{sec:ast}
  135. The primary data structure that is commonly used for representing
  136. programs is the \emph{abstract syntax tree} (AST). When considering
  137. some part of a program, a compiler needs to ask what kind of part it
  138. is and what sub-parts it has. For example, the program on the left is
  139. represented by the AST on the right.
  140. \begin{center}
  141. \begin{minipage}{0.4\textwidth}
  142. \begin{lstlisting}
  143. (+ 50 (- 8))
  144. \end{lstlisting}
  145. \end{minipage}
  146. \begin{minipage}{0.4\textwidth}
  147. \begin{equation}
  148. \xymatrix@=15pt{
  149. & *+[Fo]{+} \ar[dl]\ar[dr]& \\
  150. *+[Fo]{\tt 50} & & *+[Fo]{-} \ar[d] \\
  151. & & *+[Fo]{\tt 8}
  152. } \label{eq:arith-prog}
  153. \end{equation}
  154. \end{minipage}
  155. \end{center}
  156. We shall use the standard terminology for trees: each square above is
  157. called a \emph{node}. The arrows connect a node to its \emph{children}
  158. (which are also nodes). The top-most node is the \emph{root}. Every
  159. node except for the root has a \emph{parent} (the node it is the child
  160. of).
  161. When deciding how to compile the above program, we need to know that
  162. the root node an addition and that it has two children: the integer
  163. \texttt{50} and the negation of \texttt{8}. The abstract syntax tree
  164. data structure directly supports these queries and hence is a good
  165. choice. In this book, we will often write down the textual
  166. representation of a program even when we really have in mind the AST,
  167. simply because the textual representation is easier to typeset. We
  168. recommend that, in your mind, you should alway interpret programs as
  169. abstract syntax trees.
  170. \section{Grammars}
  171. \label{sec:grammar}
  172. A programming language can be thought of as a \emph{set} of programs.
  173. The set is typically infinite (one can always create larger and larger
  174. programs), so one cannot simply describe a language by listing all of
  175. the programs in the language. Instead we write down a set of rules, a
  176. \emph{grammar}, for building programs. We shall write our rules in a
  177. variant of Backus-Naur Form (BNF)~\citep{Backus:1960aa,Knuth:1964aa}.
  178. As an example, we describe a small language, named $\itm{arith}$, of
  179. integers and arithmetic operations. The first rule says that any
  180. integer is in the language:
  181. \begin{equation}
  182. \itm{arith} ::= \Int \label{eq:arith-int}
  183. \end{equation}
  184. Each rule has a left-hand-side and a right-hand-side. The way to read
  185. a rule is that if you have all the program parts on the
  186. right-hand-side, then you can create and AST node and categorize it
  187. according to the left-hand-side. (We do not define $\Int$ because the
  188. reader already knows what an integer is.)
  189. The second rule says that, given an $\itm{arith}$, you can build
  190. another arith by negating it.
  191. \begin{equation}
  192. \itm{arith} ::= (\key{-} \; \itm{arith}) \label{eq:arith-neg}
  193. \end{equation}
  194. By rule \eqref{eq:arith-int}, \texttt{8} is an $\itm{arith}$, then by
  195. rule \eqref{eq:arith-neg}, the following AST is an $\itm{arith}$.
  196. \begin{center}
  197. \begin{minipage}{0.25\textwidth}
  198. \begin{lstlisting}
  199. (- 8)
  200. \end{lstlisting}
  201. \end{minipage}
  202. \begin{minipage}{0.25\textwidth}
  203. \begin{equation}
  204. \xymatrix@=15pt{
  205. *+[Fo]{-} \ar[d] \\
  206. *+[Fo]{\tt 8}
  207. }
  208. \label{eq:arith-neg8}
  209. \end{equation}
  210. \end{minipage}
  211. \end{center}
  212. The third and last rule for the $\itm{arith}$ language is for addition:
  213. \begin{equation}
  214. \itm{arith} ::= (\key{+} \; \itm{arith} \; \itm{arith}) \label{eq:arith-add}
  215. \end{equation}
  216. Now we can see that the AST \eqref{eq:arith-prog} is in $\itm{arith}$.
  217. We know that \lstinline{50} is in $\itm{arith}$ by rule
  218. \eqref{eq:arith-int} and we have shown that \texttt{(- 8)} is in
  219. $\itm{arith}$, so we can apply rule \eqref{eq:arith-add} to show that
  220. \texttt{(+ 50 (- 8))} is in the $\itm{arith}$ language.
  221. If you have an AST for which the above three rules do not apply, then
  222. the AST is not in $\itm{arith}$. For example, the AST \texttt{(- 50
  223. (+ 8))} is not in $\itm{arith}$ because there are no rules for $+$
  224. with only one argument, nor for $-$ with two arguments. Whenever we
  225. define a language through a grammar, we implicitly mean for the
  226. language to be the smallest set of programs that are justified by the
  227. rules. That is, the language only includes those programs that the
  228. rules allow.
  229. It is common to have many rules with the same left-hand side, so the
  230. following vertical bar notation is used to gather several rules on one
  231. line. We refer to each clause between a vertical bar as an
  232. ``alternative''.
  233. \[
  234. \itm{arith} ::= \Int \mid (\key{-} \; \itm{arith}) \mid
  235. (\key{+} \; \itm{arith} \; \itm{arith})
  236. \]
  237. \section{S-Expressions}
  238. \label{sec:s-expr}
  239. Racket, as a descendant of Lisp~\citep{McCarthy:1960dz}, has
  240. particularly convenient support for creating and manipulating abstract
  241. syntax trees with its \emph{symbolic expression} feature, or
  242. S-expression for short. We can create an S-expression simply by
  243. writing a backquote followed by the textual representation of the
  244. AST. For example, an S-expression to represent the AST
  245. \eqref{eq:arith-prog} is created by the following Racket expression:
  246. \begin{center}
  247. \texttt{`(+ 50 (- 8))}
  248. \end{center}
  249. To build larger S-expressions one often needs to splice together
  250. several smaller S-expressions. Racket provides the comma operator to
  251. splice an S-expression into a larger one. For example, instead of
  252. creating the S-expression for AST \eqref{eq:arith-prog} all at once,
  253. we could have first created an S-expression for AST
  254. \eqref{eq:arith-neg8} and then spliced that into the addition
  255. S-expression.
  256. \begin{lstlisting}
  257. (define ast1.4 `(- 8))
  258. (define ast1.1 `(+ 50 ,ast1.4))
  259. \end{lstlisting}
  260. In general, the Racket expression that follows the comma (splice)
  261. can be any expression that computes an S-expression.
  262. \section{Pattern Matching}
  263. \label{sec:pattern-matching}
  264. As mentioned above, one of the operations that a compiler needs to
  265. perform on an AST is to access the children of a node. Racket
  266. provides the \texttt{match} form to access the parts of an
  267. S-expression. Consider the following example and the output on the
  268. right.
  269. \begin{center}
  270. \begin{minipage}{0.5\textwidth}
  271. \begin{lstlisting}
  272. (match ast1.1
  273. [`(,op ,child1 ,child2)
  274. (print op) (newline)
  275. (print child1) (newline)
  276. (print child2)])
  277. \end{lstlisting}
  278. \end{minipage}
  279. \vrule
  280. \begin{minipage}{0.25\textwidth}
  281. \begin{lstlisting}
  282. '+
  283. 50
  284. '(- 8)
  285. \end{lstlisting}
  286. \end{minipage}
  287. \end{center}
  288. The \texttt{match} form takes AST \eqref{eq:arith-prog} and binds its
  289. parts to the three variables \texttt{op}, \texttt{child1}, and
  290. \texttt{child2}. In general, a match clause consists of a
  291. \emph{pattern} and a \emph{body}. The pattern is a quoted S-expression
  292. that may contain pattern-variables (preceded by a comma). The body
  293. may contain any Racket code.
  294. A \texttt{match} form may contain several clauses, as in the following
  295. function \texttt{arith-kind} that recognizes which kind of AST node is
  296. represented by a given S-expression. The \texttt{match} proceeds
  297. through the clauses in order, checking whether the pattern can match
  298. the input S-expression. The body of the first clause that matches is
  299. executed. The output of \texttt{arith-kind} for several S-expressions
  300. is shown on the right. In the below \texttt{match}, we see another
  301. form of pattern: the \texttt{(? integer?)} tests the predicate
  302. \texttt{integer?} on the input S-expression.
  303. \begin{center}
  304. \begin{minipage}{0.5\textwidth}
  305. \begin{lstlisting}
  306. (define (arith-kind arith)
  307. (match arith
  308. [(? integer?) `int]
  309. [`(- ,c1) `neg]
  310. [`(+ ,c1 ,c2) `add]))
  311. (arith-kind `50)
  312. (arith-kind `(- 8))
  313. (arith-kind `(+ 50 (- 8)))
  314. \end{lstlisting}
  315. \end{minipage}
  316. \vrule
  317. \begin{minipage}{0.25\textwidth}
  318. \begin{lstlisting}
  319. 'int
  320. 'neg
  321. 'add
  322. \end{lstlisting}
  323. \end{minipage}
  324. \end{center}
  325. %% From this grammar, we have defined {\tt arith} by constraining its
  326. %% syntax. Effectively, we have defined {\tt arith} by first defining
  327. %% what a legal expression (or program) within the language is. To
  328. %% clarify further, we can think of {\tt arith} as a \textit{set} of
  329. %% expressions, where, under syntax constraints, \mbox{{\tt (+ 1 1)}} and
  330. %% {\tt -1} are inhabitants and {\tt (+ 3.2 3)} and {\tt (++ 2 2)} are
  331. %% not (see ~Figure\ref{fig:ast}).
  332. %% The relationship between a grammar and an AST is then similar to that
  333. %% of a set and an inhabitant. From this, every syntaxically valid
  334. %% expression, under the constraints of a grammar, can be represented by
  335. %% an abstract syntax tree. This is because {\tt arith} is essentially a
  336. %% specification of a Tree-like data-structure. In this case, tree nodes
  337. %% are the arithmetic operators {\tt +} and {\tt -}, and the leaves are
  338. %% integer constants. From this, we can represent any expression of {\tt
  339. %% arith} using a \textit{syntax expression} (s-exp).
  340. %% \begin{figure}[htbp]
  341. %% \centering
  342. %% \fbox{
  343. %% \begin{minipage}{0.85\textwidth}
  344. %% \[
  345. %% \begin{array}{lcl}
  346. %% exp &::=& sexp \mid (sexp*) \mid (unquote \; sexp) \\
  347. %% sexp &::=& Val \mid Var \mid (quote \; exp) \mid (quasiquote \; exp)
  348. %% \end{array}
  349. %% \]
  350. %% \end{minipage}
  351. %% }
  352. %% \caption{\textit{s-exp} syntax: $Val$ and $Var$ are shorthand for Value and Variable.}
  353. %% \label{fig:sexp-syntax}
  354. %% \end{figure}
  355. %% For our purposes, we will treat s-exps equivalent to \textit{possibly
  356. %% deeply-nested lists}. For the sake of brevity, the symbols $single$
  357. %% $quote$ ('), $backquote$ (`), and $comma$ (,) are reader sugar for
  358. %% {\tt quote}, {\tt quasiquote}, and {\tt unquote}. We provide several
  359. %% examples of s-exps and functions that return s-exps below. We use the
  360. %% {\tt >} symbol to represent interaction with a Racket REPL.
  361. %% \begin{verbatim}
  362. %% (define 1plus1 `(1 + 1))
  363. %% (define (1plusX x) `(1 + ,x))
  364. %% (define (XplusY x y) `(,x + ,y))
  365. %% > 1plus1
  366. %% '(1 + 1)
  367. %% > (1plusX 1)
  368. %% '(1 + 1)
  369. %% > (XplusY 1 1)
  370. %% '(1 + 1)
  371. %% > `,1plus1
  372. %% '(1 + 1)
  373. %% \end{verbatim}
  374. %% In any expression wrapped with {\tt quasiquote} ({\tt `}), sub-expressions
  375. %% wrapped with an {\tt unquote} expression are evaluated before the entire
  376. %% expression is returned wrapped in a {\tt quote} expression.
  377. % \marginpar{\scriptsize Introduce s-expressions, quote, and quasi-quote, and comma in
  378. % this section. Make sure to include examples of ASTs. The description
  379. % here of grammars is incomplete. It doesn't really say what grammars are or what they do, it
  380. % just shows an example. I would recommend reading my blog post: a crash course on
  381. % notation in PL theory, especially the sections on Definition by Rules
  382. % and Language Syntax and Grammars. -JGS}
  383. % \marginpar{\scriptsize The lambda calculus is more complex of an example that what we really
  384. % need at this point. I think we can make due with just integers and arithmetic. -JGS}
  385. % \marginpar{\scriptsize Regarding de-Bruijnizing as an example... that strikes me
  386. % as something that may be foreign to many readers. The examples in this
  387. % first chapter should try to be simple and hopefully connect with things
  388. % that the reader is already familiar with. -JGS}
  389. % \begin{enumerate}
  390. % \item Syntax transformation
  391. % \item Some Racket examples (factorial?)
  392. % \end{enumerate}
  393. %% For our purposes, our compiler will take a Scheme-like expression and
  394. %% transform it to X86\_64 Assembly. Along the way, we transform each
  395. %% input expression into a handful of \textit{intermediary languages}
  396. %% (IL). A key tool for transforming one language into another is
  397. %% \textit{pattern matching}.
  398. %% Racket provides a built-in pattern-matcher, {\tt match}, that we can
  399. %% use to perform operations on s-exps. As a preliminary example, we
  400. %% include a familiar definition of factorial, first without using match.
  401. %% \begin{verbatim}
  402. %% (define (! n)
  403. %% (if (zero? n) 1
  404. %% (* n (! (sub1 n)))))
  405. %% \end{verbatim}
  406. %% In this form of factorial, we are simply conditioning (viz. {\tt zero?})
  407. %% on the inputted natural number, {\tt n}. If we rewrite factorial using
  408. %% {\tt match}, we can match on the actual value of {\tt n}.
  409. %% \begin{verbatim}
  410. %% (define (! n)
  411. %% (match n
  412. %% (0 1)
  413. %% (n (* n (! (sub1 n))))))
  414. %% \end{verbatim}
  415. %% In this definition of factorial, the first {\tt match} line (viz. {\tt (0 1)})
  416. %% can be read as "if {\tt n} is 0, then return 1." The second line matches on an
  417. %% arbitrary variable, {\tt n}, and does not place any constraints on it. We could
  418. %% have also written this line as {\tt (else (* n (! (sub1 n))))}, where {\tt n}
  419. %% is scoped by {\tt match}. Of course, we can also use {\tt match} to pattern
  420. %% match on more complex expressions.
  421. \section{Recursion}
  422. \label{sec:recursion}
  423. Programs are inherently recursive in that an $\itm{arith}$ AST is made
  424. up of smaller $\itm{arith}$ ASTs. Thus, the natural way to process in
  425. entire program is with a recursive function. As a first example of
  426. such a function, we define \texttt{arith?} below, which takes an
  427. arbitrary S-expression, {\tt sexp}, and determines whether or not {\tt
  428. sexp} is in {\tt arith}. Note that each match clause corresponds to
  429. one of the grammar rules.
  430. \begin{center}
  431. \begin{minipage}{0.7\textwidth}
  432. \begin{lstlisting}
  433. (define (arith? sexp)
  434. (match sexp
  435. [(? integer?) #t]
  436. [`(- ,e) (arith? e)]
  437. [`(+ ,e1 ,e2)
  438. (and (arith? e1) (arith? e2))]
  439. [else #f]))
  440. (arith? `(+ 50 (- 8)))
  441. (arith? `(- 50 (+ 8)))
  442. \end{lstlisting}
  443. \end{minipage}
  444. \vrule
  445. \begin{minipage}{0.25\textwidth}
  446. \begin{lstlisting}
  447. #t
  448. #f
  449. \end{lstlisting}
  450. \end{minipage}
  451. \end{center}
  452. UNDER CONSTRUCTION
  453. Here, {\tt \#:when} puts constraints on the value of matched expressions.
  454. In this case, we make sure that every sub-expression in \textit{op} position
  455. is either {\tt +} or {\tt -}. Otherwise, we return an error, signaling a
  456. non-{\tt arith} expression. As we mentioned earlier, every expression
  457. wrapped in an {\tt unquote} is evaluated first. When used in a LHS {\tt match}
  458. sub-expression, these expressions evaluate to the actual value of the matched
  459. expression (i.e., {\tt arith-exp}). Thus, {\tt `(,e1 ,op ,e2)} and
  460. {\tt `(e1 op e2)} are not equivalent.
  461. % \begin{enumerate}
  462. % \item \textit{What is a base case?}
  463. % \item Using on a language (lambda calculus ->
  464. % \end{enumerate}
  465. Before getting into more complex {\tt match} examples, we first introduce
  466. the concept of \textit{structural recursion}, which is the general name for
  467. recurring over Tree-like or \textit{possibly deeply-nested list} structures.
  468. The key to performing structural recursion, which from now on we refer to
  469. simply as recursion, is to have some form of specification for the structure
  470. we are recurring on. Luckily, we are already familiar with one: a BNF or grammar.
  471. For example, let's take the grammar for $S_0$, which we include below.
  472. Writing a recursive program that takes an arbitrary expression of $S_0$
  473. should handle each expression in the grammar. An example program that
  474. we can write is an $interpreter$. To keep our interpreter simple, we
  475. ignore the {\tt read} operator.
  476. \begin{figure}[htbp]
  477. \centering
  478. \fbox{
  479. \begin{minipage}{0.85\textwidth}
  480. \[
  481. \begin{array}{lcl}
  482. \Op &::=& \key{+} \mid \key{-} \mid \key{*} \mid \key{read} \\
  483. \Exp &::=& \Int \mid (\Op \; \Exp^{*}) \mid \Var \mid \LET{\Var}{\Exp}{\Exp}
  484. \end{array}
  485. \]
  486. \end{minipage}
  487. }
  488. \caption{The syntax of the $S_0$ language. The abbreviation \Op{} is
  489. short for operator, \Exp{} is short for expression, \Int{} for integer,
  490. and \Var{} for variable.}
  491. %\label{fig:s0-syntax}
  492. \end{figure}
  493. \begin{verbatim}
  494. \end{verbatim}
  495. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  496. \chapter{Integers and Variables}
  497. \label{ch:int-exp}
  498. %\begin{chapquote}{Author's name, \textit{Source of this quote}}
  499. %``This is a quote and I don't know who said this.''
  500. %\end{chapquote}
  501. \section{The $S_0$ Language}
  502. The $S_0$ language includes integers, operations on integers,
  503. (arithmetic and input), and variable definitions. The syntax of the
  504. $S_0$ language is defined by the grammar in
  505. Figure~\ref{fig:s0-syntax}. This language is rich enough to exhibit
  506. several compilation techniques but simple enough so that we can
  507. implement a compiler for it in two weeks of hard work. To give the
  508. reader a feeling for the scale of this first compiler, the instructor
  509. solution for the $S_0$ compiler consists of 6 recursive functions and
  510. a few small helper functions that together span 256 lines of code.
  511. \begin{figure}[btp]
  512. \centering
  513. \fbox{
  514. \begin{minipage}{0.85\textwidth}
  515. \[
  516. \begin{array}{lcl}
  517. \Op &::=& \key{+} \mid \key{-} \mid \key{*} \mid \key{read} \\
  518. \Exp &::=& \Int \mid (\Op \; \Exp^{*}) \mid \Var \mid \LET{\Var}{\Exp}{\Exp}
  519. \end{array}
  520. \]
  521. \end{minipage}
  522. }
  523. \caption{The syntax of the $S_0$ language. The abbreviation \Op{} is
  524. short for operator, \Exp{} is short for expression, \Int{} for integer,
  525. and \Var{} for variable.}
  526. \label{fig:s0-syntax}
  527. \end{figure}
  528. The result of evaluating an expression is a value. For $S_0$, values
  529. are integers. To make it straightforward to map these integers onto
  530. x86-64 assembly~\citep{Matz:2013aa}, we restrict the integers to just
  531. those representable with 64-bits, the range $-2^{63}$ to $2^{63}$.
  532. We will walk through some examples of $S_0$ programs, commenting on
  533. aspects of the language that will be relevant to compiling it. We
  534. start with one of the simplest $S_0$ programs; it adds two integers.
  535. \[
  536. \BINOP{+}{10}{32}
  537. \]
  538. The result is $42$, as you might expected.
  539. %
  540. The next example demonstrates that expressions may be nested within
  541. each other, in this case nesting several additions and negations.
  542. \[
  543. \BINOP{+}{10}{ \UNIOP{-}{ \BINOP{+}{12}{20} } }
  544. \]
  545. What is the result of the above program?
  546. The \key{let} construct stores a value in a variable which can then be
  547. used within the body of the \key{let}. So the following program stores
  548. $32$ in $x$ and then computes $\BINOP{+}{10}{x}$, producing $42$.
  549. \[
  550. \LET{x}{ \BINOP{+}{12}{20} }{ \BINOP{+}{10}{x} }
  551. \]
  552. When there are multiple \key{let}'s for the same variable, the closest
  553. enclosing \key{let} is used. Consider the following program with two
  554. \key{let}'s that define variables named $x$.
  555. \[
  556. \LET{x}{32}{ \BINOP{+}{ \LET{x}{10}{x} }{ x } }
  557. \]
  558. For the purposes of showing which variable uses correspond to which
  559. definitions, the following shows the $x$'s annotated with subscripts
  560. to distinguish them.
  561. \[
  562. \LET{x_1}{32}{ \BINOP{+}{ \LET{x_2}{10}{x_2} }{ x_1 } }
  563. \]
  564. The \key{read} operation prompts the user of the program for an
  565. integer. Given an input of $10$, the following program produces $42$.
  566. \[
  567. \BINOP{+}{(\key{read})}{32}
  568. \]
  569. We include the \key{read} operation in $S_0$ to demonstrate that order
  570. of evaluation can make a different. Given the input $52$ then $10$,
  571. the following produces $42$ (and not $-42$).
  572. \[
  573. \LET{x}{\READ}{ \LET{y}{\READ}{ \BINOP{-}{x}{y} } }
  574. \]
  575. The initializing expression is always evaluated before the body of the
  576. \key{let}, so in the above, the \key{read} for $x$ is performed before
  577. the \key{read} for $y$.
  578. %
  579. The behavior of the following program is somewhat subtle because
  580. Scheme does not specify an evaluation order for arguments of an
  581. operator such as $-$.
  582. \[
  583. \BINOP{-}{\READ}{\READ}
  584. \]
  585. Given the input $42$ then $10$, the above program can result in either
  586. $42$ or $-42$, depending on the whims of the Scheme implementation.
  587. The goal for this chapter is to implement a compiler that translates
  588. any program $p \in S_0$ into a x86-64 assembly program $p'$ such that
  589. the assembly program exhibits the same behavior on an x86 computer as
  590. the $S_0$ program running in a Scheme implementation.
  591. \[
  592. \xymatrix{
  593. p \in S_0 \ar[rr]^{\text{compile}} \ar[drr]_{\text{run in Scheme}\quad} && p' \in \text{x86-64} \ar[d]^{\quad\text{run on an x86 machine}}\\
  594. & & n \in \mathbb{Z}
  595. }
  596. \]
  597. In the next section we introduce enough of the x86-64 assembly
  598. language to compile $S_0$.
  599. \section{The x86-64 Assembly Language}
  600. An x86-64 program is a sequence of instructions. The instructions
  601. manipulate 16 variables called \emph{registers} and can also load and
  602. store values into \emph{memory}. Memory is a mapping of 64-bit
  603. addresses to 64-bit values. The syntax $n(r)$ is used to read the
  604. address $a$ stored in register $r$ and then offset it by $n$ bytes (8
  605. bits), producing the address $a + n$. The arithmetic instructions,
  606. such as $\key{addq}\,s\,d$, read from the source $s$ and destination
  607. argument $d$, apply the arithmetic operation, then stores the result
  608. in the destination $d$. In this case, computing $d \gets d + s$. The
  609. move instruction, $\key{movq}\,s\,d$ reads from $s$ and stores the
  610. result in $d$. The $\key{callq}\,\mathit{label}$ instruction executes
  611. the procedure specified by the label, which we shall use to implement
  612. \key{read}. Figure~\ref{fig:x86-a} defines the syntax for this subset
  613. of the x86-64 assembly language.
  614. \begin{figure}[tbp]
  615. \fbox{
  616. \begin{minipage}{0.96\textwidth}
  617. \[
  618. \begin{array}{lcl}
  619. \itm{register} &::=& \key{rsp} \mid \key{rbp} \mid \key{rax} \mid \key{rbx} \mid \key{rcx}
  620. \mid \key{rdx} \mid \key{rsi} \mid \key{rdi} \mid \\
  621. && \key{r8} \mid \key{r9} \mid \key{r10}
  622. \mid \key{r11} \mid \key{r12} \mid \key{r13}
  623. \mid \key{r14} \mid \key{r15} \\
  624. \Arg &::=& \key{\$}\Int \mid \key{\%}\itm{register} \mid \Int(\key{\%}\itm{register}) \\
  625. \Instr &::=& \key{addq} \; \Arg, \Arg \mid
  626. \key{subq} \; \Arg, \Arg \mid
  627. \key{imulq} \; \Arg,\Arg \mid
  628. \key{negq} \; \Arg \mid \\
  629. && \key{movq} \; \Arg, \Arg \mid
  630. \key{callq} \; \mathit{label} \mid
  631. \key{pushq}\;\Arg \mid \key{popq}\;\Arg \mid \key{retq} \\
  632. \Prog &::= & \key{.globl \_main}\\
  633. & & \key{\_main:} \; \Instr^{+}
  634. \end{array}
  635. \]
  636. \end{minipage}
  637. }
  638. \caption{A subset of the x86-64 assembly language.}
  639. \label{fig:x86-a}
  640. \end{figure}
  641. Figure~\ref{fig:p0-x86} depicts an x86-64 program that is equivalent
  642. to $\BINOP{+}{10}{32}$. The \key{globl} directive says that the
  643. \key{\_main} procedure is externally visible, which is necessary so
  644. that the operating system can call it. The label \key{\_main:}
  645. indicates the beginning of the \key{\_main} procedure. The
  646. instruction $\key{movq}\,\$10, \%\key{rax}$ puts $10$ into the
  647. register \key{rax}. The following instruction $\key{addq}\,\key{\$}32,
  648. \key{\%rax}$ adds $32$ to the $10$ in \key{rax} and puts the result,
  649. $42$, back into \key{rax}. The instruction \key{retq} finishes the
  650. \key{\_main} function by returning the integer in the \key{rax}
  651. register to the operating system.
  652. \begin{figure}[htbp]
  653. \centering
  654. \begin{minipage}{0.6\textwidth}
  655. \begin{lstlisting}
  656. .globl _main
  657. _main:
  658. movq $10, %rax
  659. addq $32, %rax
  660. retq
  661. \end{lstlisting}
  662. \end{minipage}
  663. \caption{A simple x86-64 program equivalent to $\BINOP{+}{10}{32}$.}
  664. \label{fig:p0-x86}
  665. \end{figure}
  666. The next example exhibits the use of memory. Figure~\ref{fig:p1-x86}
  667. lists an x86-64 program that is equivalent to $\BINOP{+}{52}{
  668. \UNIOP{-}{10} }$. To understand how this x86-64 program uses memory,
  669. we need to explain a region of memory called called the
  670. \emph{procedure call stack} (\emph{stack} for short). The stack
  671. consists of a separate \emph{frame} for each procedure call. The
  672. memory layout for an individual frame is shown in
  673. Figure~\ref{fig:frame}. The register \key{rsp} is called the
  674. \emph{stack pointer} and points to the item at the top of the
  675. stack. The stack grows downward in memory, so we increase the size of
  676. the stack by subtracting from the stack pointer. The frame size is
  677. required to be a multiple of 16 bytes. The register \key{rbp} is the
  678. \emph{base pointer} which serves two purposes: 1) it saves the
  679. location of the stack pointer for the procedure that called the
  680. current one and 2) it is used to access variables associated with the
  681. current procedure. We number the variables from $1$ to $n$. Variable
  682. $1$ is stored at address $-8\key{(\%rbp)}$, variable $2$ at
  683. $-16\key{(\%rbp)}$, etc.
  684. \begin{figure}
  685. \centering
  686. \begin{minipage}{0.6\textwidth}
  687. \begin{lstlisting}
  688. .globl _main
  689. _main:
  690. pushq %rbp
  691. movq %rsp, %rbp
  692. subq $16, %rsp
  693. movq $10, -8(%rbp)
  694. negq -8(%rbp)
  695. movq $52, %rax
  696. addq -8(%rbp), %rax
  697. addq $16, %rsp
  698. popq %rbp
  699. retq
  700. \end{lstlisting}
  701. \end{minipage}
  702. \caption{An x86-64 program equivalent to $\BINOP{+}{52}{\UNIOP{-}{10} }$.}
  703. \label{fig:p1-x86}
  704. \end{figure}
  705. \begin{figure}
  706. \centering
  707. \begin{tabular}{|r|l|} \hline
  708. Position & Contents \\ \hline
  709. 8(\key{\%rbp}) & return address \\
  710. 0(\key{\%rbp}) & old \key{rbp} \\
  711. -8(\key{\%rbp}) & variable $1$ \\
  712. -16(\key{\%rbp}) & variable $2$ \\
  713. \ldots & \ldots \\
  714. 0(\key{\%rsp}) & variable $n$\\ \hline
  715. \end{tabular}
  716. \caption{Memory layout of a frame.}
  717. \label{fig:frame}
  718. \end{figure}
  719. Getting back to the program in Figure~\ref{fig:p1-x86}, the first
  720. three instructions are the typical prelude for a procedure. The
  721. instruction \key{pushq \%rbp} saves the base pointer for the procedure
  722. that called the current one onto the stack and subtracts $8$ from the
  723. stack pointer. The second instruction \key{movq \%rsp, \%rbp} changes
  724. the base pointer to the top of the stack. The instruction \key{subq
  725. \$16, \%rsp} moves the stack pointer down to make enough room for
  726. storing variables. This program just needs one variable ($8$ bytes)
  727. but because the frame size is required to be a multiple of 16 bytes,
  728. it rounds to 16 bytes.
  729. The next four instructions carry out the work of computing
  730. $\BINOP{+}{52}{\UNIOP{-}{10} }$. The first instruction \key{movq \$10,
  731. -8(\%rbp)} stores $10$ in variable $1$. The instruction \key{negq
  732. -8(\%rbp)} changes variable $1$ to $-10$. The \key{movq \$52, \%rax}
  733. places $52$ in the register \key{rax} and \key{addq -8(\%rbp), \%rax}
  734. adds the contents of variable $1$ to \key{rax}, at which point
  735. \key{rax} contains $42$.
  736. The last three instructions are the typical \emph{conclusion} of a
  737. procedure. The \key{addq \$16, \%rsp} instruction moves the stack
  738. pointer back to point at the old base pointer. The amount added here
  739. needs to match the amount that was subtracted in the prelude of the
  740. procedure. Then \key{popq \%rbp} returns the old base pointer to
  741. \key{rbp} and adds $8$ to the stack pointer. The \key{retq}
  742. instruction jumps back to the procedure that called this one and
  743. subtracts 8 from the stack pointer.
  744. The compiler will need a convenient representation for manipulating
  745. x86 programs, so we define an abstract syntax for x86 in
  746. Figure~\ref{fig:x86-ast-a}. The \itm{info} field of the \key{program}
  747. AST node is for storing auxilliary information that needs to be
  748. communicated from one pass to the next. The function \key{print-x86}
  749. provided in the supplemental code converts an x86 abstract syntax tree
  750. into the text representation for x86 (Figure~\ref{fig:x86-a}).
  751. \begin{figure}[tbp]
  752. \fbox{
  753. \begin{minipage}{0.96\textwidth}
  754. \vspace{-10pt}
  755. \[
  756. \begin{array}{lcl}
  757. \Arg &::=& \INT{\Int} \mid \REG{\itm{register}}
  758. \mid \STACKLOC{\Int} \\
  759. \Instr &::=& (\key{add} \; \Arg\; \Arg) \mid
  760. (\key{sub} \; \Arg\; \Arg) \mid
  761. (\key{imul} \; \Arg\;\Arg) \mid
  762. (\key{neg} \; \Arg) \mid \\
  763. && (\key{mov} \; \Arg\; \Arg) \mid
  764. (\key{call} \; \mathit{label}) \mid
  765. (\key{push}\;\Arg) \mid (\key{pop}\;\Arg) \mid (\key{ret}) \\
  766. \Prog &::= & (\key{program} \;\itm{info} \; \Instr^{+})
  767. \end{array}
  768. \]
  769. \end{minipage}
  770. }
  771. \caption{Abstract syntax for x86-64 assembly.}
  772. \label{fig:x86-ast-a}
  773. \end{figure}
  774. \section{From $S_0$ to x86-64 via $C_0$}
  775. \label{sec:plan-s0-x86}
  776. To compile one language to another it helps to focus on the
  777. differences between the two languages. It is these differences that
  778. the compiler will need to bridge. What are the differences between
  779. $S_0$ and x86-64 assembly? Here we list some of the most important the
  780. differences.
  781. \begin{enumerate}
  782. \item x86-64 arithmetic instructions typically take two arguments and
  783. update the second argument in place. In contrast, $S_0$ arithmetic
  784. operations only read their arguments and produce a new value.
  785. \item An argument to an $S_0$ operator can be any expression, whereas
  786. x86-64 instructions restrict their arguments to integers, registers,
  787. and memory locations.
  788. \item An $S_0$ program can have any number of variables whereas x86-64
  789. has only 16 registers.
  790. \item Variables in $S_0$ can overshadow other variables with the same
  791. name. The registers and memory locations of x86-64 all have unique
  792. names.
  793. \end{enumerate}
  794. We ease the challenge of compiling from $S_0$ to x86 by breaking down
  795. the problem into several steps, dealing with the above differences one
  796. at a time. The main question then becomes: in what order do we tackle
  797. these differences? This is often one of the most challenging questions
  798. that a compiler writer must answer because some orderings may be much
  799. more difficult to implement than others. It is difficult to know ahead
  800. of time which orders will be better so often some trial-and-error is
  801. involved. However, we can try to plan ahead and choose the orderings
  802. based on what we find out.
  803. For example, to handle difference \#2 (nested expressions), we shall
  804. introduce new variables and pull apart the nested expressions into a
  805. sequence of assignment statements. To deal with difference \#3 we
  806. will be replacing variables with registers and/or stack
  807. locations. Thus, it makes sense to deal with \#2 before \#3 so that
  808. \#3 can replace both the original variables and the new ones. Next,
  809. consider where \#1 should fit in. Because it has to do with the format
  810. of x86 instructions, it makes more sense after we have flattened the
  811. nested expressions (\#2). Finally, when should we deal with \#4
  812. (variable overshadowing)? We shall solve this problem by renaming
  813. variables to make sure they have unique names. Recall that our plan
  814. for \#2 involves moving nested expressions, which could be problematic
  815. if it changes the shadowing of variables. However, if we deal with \#4
  816. first, then it will not be an issue. Thus, we arrive at the following
  817. ordering.
  818. \[
  819. \xymatrix{
  820. 4 \ar[r] & 2 \ar[r] & 1 \ar[r] & 3
  821. }
  822. \]
  823. We further simplify the translation from $S_0$ to x86 by identifying
  824. an intermediate language named $C_0$, roughly half-way between $S_0$
  825. and x86, to provide a rest stop along the way. The name $C_0$ comes
  826. from this language being vaguely similar to the $C$ language. The
  827. differences \#4 and \#1, regarding variables and nested expressions,
  828. are handled by the passes \textsf{uniquify} and \textsf{flatten} that
  829. bring us to $C_0$.
  830. \[\large
  831. \xymatrix@=50pt{
  832. S_0 \ar@/^/[r]^-{\textsf{uniquify}} &
  833. S_0 \ar@/^/[r]^-{\textsf{flatten}} &
  834. C_0
  835. }
  836. \]
  837. The syntax for $C_0$ is defined in Figure~\ref{fig:c0-syntax}. The
  838. $C_0$ language supports the same operators as $S_0$ but the arguments
  839. of operators are now restricted to just variables and integers. The
  840. \key{let} construct of $S_0$ is replaced by an assignment statement
  841. and there is a \key{return} construct to specify the return value of
  842. the program. A program consists of a sequence of statements that
  843. include at least one \key{return} statement.
  844. \begin{figure}[tbp]
  845. \fbox{
  846. \begin{minipage}{0.96\textwidth}
  847. \[
  848. \begin{array}{lcl}
  849. \Arg &::=& \Int \mid \Var \\
  850. \Exp &::=& \Arg \mid (\Op \; \Arg^{*})\\
  851. \Stmt &::=& \ASSIGN{\Var}{\Exp} \mid \RETURN{\Arg} \\
  852. \Prog & ::= & (\key{program}\;\itm{info}\;\Stmt^{+})
  853. \end{array}
  854. \]
  855. \end{minipage}
  856. }
  857. \caption{The $C_0$ intermediate language.}
  858. \label{fig:c0-syntax}
  859. \end{figure}
  860. To get from $C_0$ to x86-64 assembly requires three more steps, which
  861. we discuss below.
  862. \[\large
  863. \xymatrix@=50pt{
  864. C_0 \ar@/^/[r]^-{\textsf{select\_instr.}}
  865. & \text{x86}^{*} \ar@/^/[r]^-{\textsf{assign\_homes}}
  866. & \text{x86}^{*} \ar@/^/[r]^-{\textsf{patch\_instr.}}
  867. & \text{x86}
  868. }
  869. \]
  870. We handle difference \#1, concerning the format of arithmetic
  871. instructions, in the \textsf{select\_instructions} pass. The result
  872. of this pass produces programs consisting of x86-64 instructions that
  873. use variables.
  874. %
  875. As there are only 16 registers, we cannot always map variables to
  876. registers (difference \#3). Fortunately, the stack can grow quite, so
  877. we can map variables to locations on the stack. This is handled in the
  878. \textsf{assign\_homes} pass. The topic of
  879. Chapter~\ref{ch:register-allocation} is implementing a smarter
  880. approach in which we make a best-effort to map variables to registers,
  881. resorting to the stack only when necessary.
  882. The final pass in our journey to x86 handles an indiosycracy of x86
  883. assembly. Many x86 instructions have two arguments but only one of the
  884. arguments may be a memory reference. Because we are mapping variables
  885. to stack locations, many of our generated instructions will violate
  886. this restriction. The purpose of the \textsf{patch\_instructions} pass
  887. is to fix this problem by replacing every bad instruction with a short
  888. sequence of instructions that use the \key{rax} register.
  889. \section{Uniquify Variables}
  890. The purpose of this pass is to make sure that each \key{let} uses a
  891. unique variable name. For example, the \textsf{uniquify} pass could
  892. translate
  893. \[
  894. \LET{x}{32}{ \BINOP{+}{ \LET{x}{10}{x} }{ x } }
  895. \]
  896. to
  897. \[
  898. \LET{x.1}{32}{ \BINOP{+}{ \LET{x.2}{10}{x.2} }{ x.1 } }
  899. \]
  900. We recommend implementing \textsf{uniquify} as a recursive function
  901. that mostly just copies the input program. However, when encountering
  902. a \key{let}, it should generate a unique name for the variable (the
  903. Racket function \key{gensym} is handy for this) and associate the old
  904. name with the new unique name in an association list. The
  905. \textsf{uniquify} function will need to access this association list
  906. when it gets to a variable reference, so we add another paramter to
  907. \textsf{uniquify} for the association list.
  908. \section{Flatten Expressions}
  909. The purpose of the \textsf{flatten} pass is to get rid of nested
  910. expressions, such as the $\UNIOP{-}{10}$ in the following program,
  911. without changing the behavior of the program.
  912. \[
  913. \BINOP{+}{52}{ \UNIOP{-}{10} }
  914. \]
  915. This can be accomplished by introducing a new variable, assigning the
  916. nested expression to the new variable, and then using the new variable
  917. in place of the nested expressions. For example, the above program is
  918. translated to the following one.
  919. \[
  920. \begin{array}{l}
  921. \ASSIGN{ \itm{x} }{ \UNIOP{-}{10} } \\
  922. \RETURN{ \BINOP{+}{52}{ \itm{x} } }
  923. \end{array}
  924. \]
  925. We recommend implementing \textsf{flatten} as a recursive function
  926. that returns two things, 1) the newly flattened expression, and 2) a
  927. list of assignment statements, one for each of the new variables
  928. introduced while flattening the expression.
  929. Take special care for programs such as the following that initialize
  930. variables with integers or other variables.
  931. \[
  932. \LET{a}{42}{ \LET{b}{a}{ b }}
  933. \]
  934. This program should be translated to
  935. \[
  936. \ASSIGN{a}{42} \;
  937. \ASSIGN{b}{a} \;
  938. \RETURN{b}
  939. \]
  940. and not the following, which could result from a naive implementation
  941. of \textsf{flatten}.
  942. \[
  943. \ASSIGN{x.1}{42}\;
  944. \ASSIGN{a}{x.1}\;
  945. \ASSIGN{x.2}{a}\;
  946. \ASSIGN{b}{x.2}\;
  947. \RETURN{b}
  948. \]
  949. \section{Select Instructions}
  950. In the \textsf{select\_instructions} pass we begin the work of
  951. translating from $C_0$ to x86. The target language of this pass is a
  952. pseudo-x86 language that still uses variables, so we add an AST node
  953. of the form $\VAR{\itm{var}}$. The \textsf{select\_instructions} pass
  954. deals with the differing format of arithmetic operations. For example,
  955. in $C_0$ an addition operation could take the following form:
  956. \[
  957. \ASSIGN{x}{ \BINOP{+}{10}{32} }
  958. \]
  959. To translate to x86, we need to express this addition using the
  960. \key{add} instruction that does an inplace update. So we first move
  961. $10$ to $x$ then perform the \key{add}.
  962. \[
  963. (\key{mov}\,\INT{10}\, \VAR{x})\; (\key{add} \;\INT{32}\; \VAR{x})
  964. \]
  965. There are some cases that require special care to avoid generating
  966. needlessly complicated code. If one of the arguments is the same as
  967. the left-hand side of the assignment, then there is no need for the
  968. extra move instruction. For example, the following
  969. \[
  970. \ASSIGN{x}{ \BINOP{+}{10}{x} }
  971. \quad\text{should translate to}\quad
  972. (\key{add} \; \INT{10}\; \VAR{x})
  973. \]
  974. Regarding the \RETURN{e} statement of $C_0$, we recommend treating it
  975. as an assignment to the \key{rax} register and let the procedure
  976. conclusion handle the transfer of control back to the calling
  977. procedure.
  978. \section{Assign Homes}
  979. As discussed in Section~\ref{sec:plan-s0-x86}, the
  980. \textsf{assign\_homes} pass places all of the variables on the stack.
  981. Consider again the example $S_0$ program $\BINOP{+}{52}{ \UNIOP{-}{10} }$,
  982. which after \textsf{select\_instructions} looks like the following.
  983. \[
  984. \begin{array}{l}
  985. (\key{mov}\;\INT{10}\; \VAR{x})\\
  986. (\key{neg}\; \VAR{x})\\
  987. (\key{mov}\; \INT{52}\; \REG{\itm{rax}})\\
  988. (\key{add}\; \VAR{x} \REG{\itm{rax}})
  989. \end{array}
  990. \]
  991. The one and only variable $x$ is assigned to stack location
  992. \key{-8(\%rbp)}, so the \textsf{assign\_homes} pass translates the
  993. above to
  994. \[
  995. \begin{array}{l}
  996. (\key{mov}\;\INT{10}\; \STACKLOC{{-}8})\\
  997. (\key{neg}\; \STACKLOC{{-}8})\\
  998. (\key{mov}\; \INT{52}\; \REG{\itm{rax}})\\
  999. (\key{add}\; \STACKLOC{{-}8}\; \REG{\itm{rax}})
  1000. \end{array}
  1001. \]
  1002. In the process of assigning stack locations to variables, it is
  1003. convenient to compute and store the size of the frame which will be
  1004. needed later to generate the procedure conclusion.
  1005. \section{Patch Instructions}
  1006. The purpose of this pass is to make sure that each instruction adheres
  1007. to the restrictions regarding which arguments can be memory
  1008. references. For most instructions, the rule is that at most one
  1009. argument may be a memory reference.
  1010. Consider again the following example.
  1011. \[
  1012. \LET{a}{42}{ \LET{b}{a}{ b }}
  1013. \]
  1014. After \textsf{assign\_homes} pass, the above has been translated to
  1015. \[
  1016. \begin{array}{l}
  1017. (\key{mov} \;\INT{42}\; \STACKLOC{{-}8})\\
  1018. (\key{mov}\;\STACKLOC{{-}8}\; \STACKLOC{{-}16})\\
  1019. (\key{mov}\;\STACKLOC{{-}16}\; \REG{\itm{rax}})
  1020. \end{array}
  1021. \]
  1022. The second \key{mov} instruction is problematic because both arguments
  1023. are stack locations. We suggest fixing this problem by moving from the
  1024. source to \key{rax} and then from \key{rax} to the destination, as
  1025. follows.
  1026. \[
  1027. \begin{array}{l}
  1028. (\key{mov} \;\INT{42}\; \STACKLOC{{-}8})\\
  1029. (\key{mov}\;\STACKLOC{{-}8}\; \REG{\itm{rax}})\\
  1030. (\key{mov}\;\REG{\itm{rax}}\; \STACKLOC{{-}16})\\
  1031. (\key{mov}\;\STACKLOC{{-}16}\; \REG{\itm{rax}})
  1032. \end{array}
  1033. \]
  1034. The \key{imul} instruction is a special case because the destination
  1035. argument must be a register.
  1036. \section{Testing with Interpreters}
  1037. The typical way to test a compiler is to run the generated assembly
  1038. code on a diverse set of programs and check whether they behave as
  1039. expected. However, when a compiler is structured as our is, with many
  1040. passes, when there is an error in the generated assembly code it can
  1041. be hard to determine which pass contains the source of the error. A
  1042. good way to isolate the error is to not only test the generated
  1043. assembly code but to also test the output of every pass. This requires
  1044. having interpreters for all the intermediate languages. Indeed, the
  1045. file \key{interp.rkt} in the supplemental code provides interpreters
  1046. for all the intermediate languages described in this book, starting
  1047. with interpreters for $S_0$, $C_0$, and x86 (in abstract syntax).
  1048. The file \key{run-tests.rkt} automates the process of running the
  1049. interpreters on the output programs of each pass and checking their
  1050. result.
  1051. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  1052. \chapter{Register Allocation}
  1053. \label{ch:register-allocation}
  1054. In Chapter~\ref{ch:int-exp} we simplified the generation of x86
  1055. assembly by placing all variables on the stack. We can improve the
  1056. performance of the generated code considerably if we instead try to
  1057. place as many variables as possible into registers. The CPU can
  1058. access a register in a single cycle, whereas accessing the stack can
  1059. take from several cycles (to go to cache) to hundreds of cycles (to go
  1060. to main memory). Figure~\ref{fig:reg-eg} shows a program with four
  1061. variables that serves as a running example. We show the source program
  1062. and also the output of instruction selection. At that point the
  1063. program is almost x86 assembly but not quite; it still contains
  1064. variables instead of stack locations or registers.
  1065. \begin{figure}
  1066. \begin{minipage}{0.45\textwidth}
  1067. Source program:
  1068. \begin{lstlisting}
  1069. (let ([v 1])
  1070. (let ([w 46])
  1071. (let ([x (+ v 7)])
  1072. (let ([y (+ 4 x)])
  1073. (let ([z (+ x w)])
  1074. (- z y))))))
  1075. \end{lstlisting}
  1076. \end{minipage}
  1077. \begin{minipage}{0.45\textwidth}
  1078. After instruction selection:
  1079. \begin{lstlisting}
  1080. (program (v w x y z)
  1081. (mov (int 1) (var v))
  1082. (mov (int 46) (var w))
  1083. (mov (var v) (var x))
  1084. (add (int 7) (var x))
  1085. (mov (var x) (var y))
  1086. (add (int 4) (var y))
  1087. (mov (var x) (var z))
  1088. (add (var w) (var z))
  1089. (mov (var z) (reg rax))
  1090. (sub (var y) (reg rax)))
  1091. \end{lstlisting}
  1092. \end{minipage}
  1093. \caption{Running example for this chapter.}
  1094. \label{fig:reg-eg}
  1095. \end{figure}
  1096. The goal of register allocation is to fit as many variables into
  1097. registers as possible. It is often the case that we have more
  1098. variables than registers, so we can't naively map each variable to a
  1099. register. Fortunately, it is also common for different variables to be
  1100. needed during different periods of time, and in such cases the
  1101. variables can be mapped to the same register. Consider variables $x$
  1102. and $y$ in Figure~\ref{fig:reg-eg}. After the variable $x$ is moved
  1103. to $z$ it is no longer needed. Variable $y$, on the other hand, is
  1104. used only after this point, so $x$ and $y$ could share the same
  1105. register. The topic of the next section is how we compute where a
  1106. variable is needed.
  1107. \section{Liveness Analysis}
  1108. A variable is \emph{live} if the variable is used at some later point
  1109. in the program and there is not an intervening assignment to the
  1110. variable.
  1111. %
  1112. To understand the latter condition, consider the following code
  1113. fragment in which there are two writes to $b$. Are $a$ and
  1114. $b$ both live at the same time?
  1115. \begin{lstlisting}[numbers=left,numberstyle=\tiny]
  1116. (mov (int 5) (var a)) ; @$a \gets 5$@
  1117. (mov (int 30) (var b)) ; @$b \gets 30$@
  1118. (mov (var a) (var c)) ; @$c \gets x$@
  1119. (mov (int 10) (var b)) ; @$b \gets 10$@
  1120. (add (var b) (var c)) ; @$c \gets c + b$@
  1121. \end{lstlisting}
  1122. The answer is no because the value $30$ written to $b$ on line 2 is
  1123. never used. The variable $b$ is read on line 5 and there is an
  1124. intervening write to $b$ on line 4, so the read on line 5 receives the
  1125. value written on line 4, not line 2.
  1126. The live variables can be computed by traversing the instruction
  1127. sequence back to front (i.e., backwards in execution order). Let
  1128. $I_1,\ldots, I_n$ be the instruction sequence. We write
  1129. $L_{\mathsf{after}}(k)$ for the set of live variables after
  1130. instruction $I_k$ and $L_{\mathsf{before}}(k)$ for the set of live
  1131. variables before instruction $I_k$. The live variables after an
  1132. instruction are always the same as the live variables before the next
  1133. instruction.
  1134. \begin{equation*}
  1135. L_{\mathsf{after}}(k) = L_{\mathsf{before}}(k+1)
  1136. \end{equation*}
  1137. To start things off, there are no live variables after the last
  1138. instruction, so
  1139. \begin{equation*}
  1140. L_{\mathsf{after}}(n) = \emptyset
  1141. \end{equation*}
  1142. We then apply the following rule repeatedly, traversing the
  1143. instruction sequence back to front.
  1144. \begin{equation*}
  1145. L_{\mathtt{before}}(k) = (L_{\mathtt{after}}(k) - W(k)) \cup R(k),
  1146. \end{equation*}
  1147. where $W(k)$ are the variables written to by instruction $I_k$ and
  1148. $R(k)$ are the variables read by instruction $I_k$.
  1149. Figure~\ref{fig:live-eg} shows the results of live variables analysis
  1150. for the running example. Next to each instruction we write its
  1151. $L_{\mathtt{after}}$ set.
  1152. \begin{figure}[tbp]
  1153. \begin{lstlisting}
  1154. (program (v w x y z)
  1155. (mov (int 1) (var v)) @$\{ v \}$@
  1156. (mov (int 46) (var w)) @$\{ v, w \}$@
  1157. (mov (var v) (var x)) @$\{ w, x \}$@
  1158. (add (int 7) (var x)) @$\{ w, x \}$@
  1159. (mov (var x) (var y)) @$\{ w, x, y\}$@
  1160. (add (int 4) (var y)) @$\{ w, x, y \}$@
  1161. (mov (var x) (var z)) @$\{ w, y, z \}$@
  1162. (add (var w) (var z)) @$\{ y, z \}$@
  1163. (mov (var z) (reg rax)) @$\{ y \}$@
  1164. (sub (var y) (reg rax))) @$\{\}$@
  1165. \end{lstlisting}
  1166. \caption{Running example program annotated with live-after sets.}
  1167. \label{fig:live-eg}
  1168. \end{figure}
  1169. \section{Building the Interference Graph}
  1170. Based on the liveness analysis, we know the program regions where each
  1171. variable is needed. However, during register allocation, we need to
  1172. answer questions of the specific form: are variables $u$ and $v$ ever
  1173. live at the same time? (And therefore cannot be assigned to the same
  1174. register.) To make this question easier to answer, we create an
  1175. explicit data structure, an \emph{interference graph}. An
  1176. interference graph is an undirected graph that has an edge between two
  1177. variables if they are live at the same time, that is, if they
  1178. interfere with each other.
  1179. The most obvious way to compute the interference graph is to look at
  1180. the set of live variables between each statement in the program, and
  1181. add an edge to the graph for every pair of variables in the same set.
  1182. This approach is less than ideal for two reasons. First, it can be
  1183. rather expensive because it takes $O(n^2)$ time to look at every pair
  1184. in a set of $n$ live variables. Second, there is a special case in
  1185. which two variables that are live at the same time do not actually
  1186. interfere with each other: when they both contain the same value
  1187. because we have assigned one to the other.
  1188. A better way to compute the edges of the intereference graph is given
  1189. by the following rules.
  1190. \begin{itemize}
  1191. \item If instruction $I_k$ is a move: (\key{mov} $s$\, $d$), then add
  1192. the edge $(d,v)$ for every $v \in L_{\mathsf{after}}(k)$ unless $v =
  1193. d$ or $v = s$.
  1194. \item If instruction $I_k$ is not a move but some other arithmetic
  1195. instruction such as (\key{add} $s$\, $d$), then add the edge $(d,v)$
  1196. for every $v \in L_{\mathsf{after}}(k)$ unless $v = d$.
  1197. \item If instruction $I_k$ is of the form (\key{call}
  1198. $\mathit{label}$), then add an edge $(r,v)$ for every caller-save
  1199. register $r$ and every variable $v \in L_{\mathsf{after}}(k)$.
  1200. \end{itemize}
  1201. Working from the top to bottom of Figure~\ref{fig:live-eg}, $z$
  1202. interferes with $x$, $y$ interferes with $z$, and $w$ interferes with
  1203. $y$ and $z$. The resulting interference graph is shown in
  1204. Figure~\ref{fig:interfere}.
  1205. \begin{figure}[tbp]
  1206. \large
  1207. \[
  1208. \xymatrix@=40pt{
  1209. v \ar@{-}[r] & w \ar@{-}[r]\ar@{-}[d]\ar@{-}[dr] & x \ar@{-}[dl]\\
  1210. & y \ar@{-}[r] & z
  1211. }
  1212. \]
  1213. \caption{Interference graph for the running example.}
  1214. \label{fig:interfere}
  1215. \end{figure}
  1216. \section{Graph Coloring via Sudoku}
  1217. We now come to the main event, mapping variables to registers (or to
  1218. stack locations in the event that we run out of registers). We need
  1219. to make sure not to map two variables to the same register if the two
  1220. variables interfere with each other. In terms of the interference
  1221. graph, this means we cannot map adjacent nodes to the same register.
  1222. If we think of registers as colors, the register allocation problem
  1223. becomes the widely-studied graph coloring
  1224. problem~\citep{Balakrishnan:1996ve,Rosen:2002bh}.
  1225. The reader may be more familar with the graph coloring problem then he
  1226. or she realizes; the popular game of Sudoku is an instance of the
  1227. graph coloring problem. The following describes how to build a graph
  1228. out of a Sudoku board.
  1229. \begin{itemize}
  1230. \item There is one node in the graph for each Sudoku square.
  1231. \item There is an edge between two nodes if the corresponding squares
  1232. are in the same row or column, or if the squares are in the same
  1233. $3\times 3$ region.
  1234. \item Choose nine colors to correspond to the numbers $1$ to $9$.
  1235. \item Based on the initial assignment of numbers to squares in the
  1236. Sudoku board, assign the corresponding colors to the corresponding
  1237. nodes in the graph.
  1238. \end{itemize}
  1239. If you can color the remaining nodes in the graph with the nine
  1240. colors, then you've also solved the corresponding game of Sudoku.
  1241. Given that Sudoku is graph coloring, one can use Sudoku strategies to
  1242. come up with an algorithm for allocating registers. For example, one
  1243. of the basic techniques for Sudoku is Pencil Marks. The idea is that
  1244. you use a process of elimination to determine what numbers still make
  1245. sense for a square, and write down those numbers in the square
  1246. (writing very small). At first, each number might be a
  1247. possibility, but as the board fills up, more and more of the
  1248. possibilities are crossed off (or erased). For example, if the number
  1249. $1$ is assigned to a square, then by process of elimination, you can
  1250. cross off the $1$ pencil mark from all the squares in the same row,
  1251. column, and region. Many Sudoku computer games provide automatic
  1252. support for Pencil Marks. This heuristic also reduces the degree of
  1253. branching in the search tree.
  1254. The Pencil Marks technique corresponds to the notion of color
  1255. \emph{saturation} due to \cite{Brelaz:1979eu}. The
  1256. saturation of a node, in Sudoku terms, is the number of possibilities
  1257. that have been crossed off using the process of elimination mentioned
  1258. above. In graph terminology, we have the following definition:
  1259. \begin{equation*}
  1260. \mathrm{saturation}(u) = |\{ c \;|\; \exists v. v \in \mathrm{Adj}(u)
  1261. \text{ and } \mathrm{color}(v) = c \}|
  1262. \end{equation*}
  1263. where $\mathrm{Adj}(u)$ is the set of nodes adjacent to $u$ and
  1264. the notation $|S|$ stands for the size of the set $S$.
  1265. Using the Pencil Marks technique leads to a simple strategy for
  1266. filling in numbers: if there is a square with only one possible number
  1267. left, then write down that number! But what if there are no squares
  1268. with only one possibility left? One brute-force approach is to just
  1269. make a guess. If that guess ultimately leads to a solution, great. If
  1270. not, backtrack to the guess and make a different guess. Of course,
  1271. this is horribly time consuming. One standard way to reduce the amount
  1272. of backtracking is to use the most-constrained-first heuristic. That
  1273. is, when making a guess, always choose a square with the fewest
  1274. possibilities left (the node with the highest saturation). The idea
  1275. is that choosing highly constrained squares earlier rather than later
  1276. is better because later there may not be any possibilities left.
  1277. In some sense, register allocation is easier than Sudoku because we
  1278. can always cheat and add more numbers by spilling variables to the
  1279. stack. Also, we'd like to minimize the time needed to color the graph,
  1280. and backtracking is expensive. Thus, it makes sense to keep the
  1281. most-constrained-first heuristic but drop the backtracking in favor of
  1282. greedy search (guess and just keep going).
  1283. Figure~\ref{fig:satur-algo} gives the pseudo-code for this simple
  1284. greedy algorithm for register allocation based on saturation and the
  1285. most-constrained-first heuristic, which is roughly equivalent to the
  1286. DSATUR algorithm of \cite{Brelaz:1979eu} (also known as
  1287. saturation degree ordering
  1288. (SDO)~\citep{Gebremedhin:1999fk,Omari:2006uq}). Just as in Sudoku,
  1289. the algorithm represents colors with integers, with the first $k$
  1290. colors corresponding to the $k$ registers in a given machine and the
  1291. rest of the integers corresponding to stack locations.
  1292. \begin{figure}[btp]
  1293. \centering
  1294. \begin{lstlisting}[basicstyle=\rmfamily,deletekeywords={for,from,with,is,not,in,find},morekeywords={while},columns=fullflexible]
  1295. Algorithm: DSATUR
  1296. Input: a graph @$G$@
  1297. Output: an assignment @$\mathrm{color}[v]$@ for each node @$v \in G$@
  1298. @$W \gets \mathit{vertices}(G)$@
  1299. while @$W \neq \emptyset$@ do
  1300. pick a node @$u$@ from @$W$@ with the highest saturation,
  1301. breaking ties randomly
  1302. find the lowest color @$c$@ that is not in @$\{ \mathrm{color}[v] \;|\; v \in \mathrm{Adj}(v)\}$@
  1303. @$\mathrm{color}[u] \gets c$@
  1304. @$W \gets W - \{u\}$@
  1305. \end{lstlisting}
  1306. \caption{Saturation-based greedy graph coloring algorithm.}
  1307. \label{fig:satur-algo}
  1308. \end{figure}
  1309. With this algorithm in hand, let us return to the running example and
  1310. consider how to color the interference graph in
  1311. Figure~\ref{fig:interfere}. Initially, all of the nodes are not yet
  1312. colored and they are unsaturated, so we annotate each of them with a
  1313. dash for their color and an empty set for the saturation.
  1314. \[
  1315. \xymatrix{
  1316. v:-,\{\} \ar@{-}[r] & w:-,\{\} \ar@{-}[r]\ar@{-}[d]\ar@{-}[dr] & x:-,\{\} \ar@{-}[dl]\\
  1317. & y:-,\{\} \ar@{-}[r] & z:-,\{\}
  1318. }
  1319. \]
  1320. We select a maximally saturated node and color it $0$. In this case we
  1321. have a 5-way tie, so we arbitrarily pick $y$. The color $0$ is no
  1322. longer available for $w$, $x$, and $z$ because they interfere with
  1323. $y$.
  1324. \[
  1325. \xymatrix{
  1326. v:-,\{\} \ar@{-}[r] & w:-,\{0\} \ar@{-}[r]\ar@{-}[d]\ar@{-}[dr] & x:-,\{0\} \ar@{-}[dl]\\
  1327. & y:0,\{\} \ar@{-}[r] & z:-,\{0\}
  1328. }
  1329. \]
  1330. Now we repeat the process, selecting another maximally saturated node.
  1331. This time there is a three-way tie between $w$, $x$, and $z$. We color
  1332. $w$ with $1$.
  1333. \[
  1334. \xymatrix{
  1335. v:-,\{1\} \ar@{-}[r] & w:1,\{0\} \ar@{-}[r]\ar@{-}[d]\ar@{-}[dr] & x:-,\{0,1\} \ar@{-}[dl]\\
  1336. & y:0,\{1\} \ar@{-}[r] & z:-,\{0,1\}
  1337. }
  1338. \]
  1339. The most saturated nodes are now $x$ and $z$. We color $x$ with the
  1340. next avialable color which is $2$.
  1341. \[
  1342. \xymatrix{
  1343. v:-,\{1\} \ar@{-}[r] & w:1,\{0,2\} \ar@{-}[r]\ar@{-}[d]\ar@{-}[dr] & x:2,\{0,1\} \ar@{-}[dl]\\
  1344. & y:0,\{1,2\} \ar@{-}[r] & z:-,\{0,1\}
  1345. }
  1346. \]
  1347. We have only two nodes left to color, $v$ and $z$, but $z$ is
  1348. more highly saturaded, so we color $z$ with $2$.
  1349. \[
  1350. \xymatrix{
  1351. v:-,\{1\} \ar@{-}[r] & w:1,\{0,2\} \ar@{-}[r]\ar@{-}[d]\ar@{-}[dr] & x:2,\{0,1\} \ar@{-}[dl]\\
  1352. & y:0,\{1,2\} \ar@{-}[r] & z:2,\{0,1\}
  1353. }
  1354. \]
  1355. The last iteration of the coloring algorithm assigns color $0$ to $v$.
  1356. \[
  1357. \xymatrix{
  1358. v:0,\{1\} \ar@{-}[r] & w:1,\{0,2\} \ar@{-}[r]\ar@{-}[d]\ar@{-}[dr] & x:2,\{0,1\} \ar@{-}[dl]\\
  1359. & y:0,\{1,2\} \ar@{-}[r] & z:2,\{0,1\}
  1360. }
  1361. \]
  1362. With the coloring complete, we can finalize assignment of variables to
  1363. registers and stack locations. Recall that if we have $k$ registers,
  1364. we map the first $k$ colors to registers and the rest to stack
  1365. lcoations. Suppose for the moment that we just have one extra register
  1366. to use for register allocation, just \key{rbx}. Then the following is
  1367. the mapping of colors to registers and stack allocations.
  1368. \[
  1369. \{ 0 \mapsto \key{\%rbx}, \; 1 \mapsto \key{-8(\%rbp)}, \; 2 \mapsto \key{-16(\%rbp)}, \ldots \}
  1370. \]
  1371. Putting this together with the above coloring of the variables, we
  1372. arrive at the following assignment.
  1373. \[
  1374. \{ v \mapsto \key{\%rbx}, \;
  1375. w \mapsto \key{-8(\%rbp)}, \;
  1376. x \mapsto \key{-16(\%rbp)}, \;
  1377. y \mapsto \key{\%rbx}, \;
  1378. z\mapsto \key{-16(\%rbp)} \}
  1379. \]
  1380. Applying this assignment to our running example
  1381. (Figure~\ref{fig:reg-eg}) yields the following program.
  1382. % why frame size of 32? -JGS
  1383. \begin{lstlisting}
  1384. (program 32
  1385. (mov (int 1) (reg rbx))
  1386. (mov (int 46) (stack-loc -8))
  1387. (mov (reg rbx) (stack-loc -16))
  1388. (add (int 7) (stack-loc -16))
  1389. (mov (stack-loc 16) (reg rbx))
  1390. (add (int 4) (reg rbx))
  1391. (mov (stack-loc -16) (stack-loc -16))
  1392. (add (stack-loc -8) (stack-loc -16))
  1393. (mov (stack-loc -16) (reg rax))
  1394. (sub (reg rbx) (reg rax)))
  1395. \end{lstlisting}
  1396. This program is almost an x86 program. The remaining step is to apply
  1397. the patch instructions pass. In this example, the trivial move of
  1398. \key{-16(\%rbp)} to itself is deleted and the addition of
  1399. \key{-8(\%rbp)} to \key{-16(\%rbp)} is fixed by going through
  1400. \key{\%rax}. The following shows the portion of the program that
  1401. changed.
  1402. \begin{lstlisting}
  1403. (add (int 4) (reg rbx))
  1404. (mov (stack-loc -8) (reg rax)
  1405. (add (reg rax) (stack-loc -16))
  1406. \end{lstlisting}
  1407. An overview of all of the passes involved in register allocation is
  1408. shown in Figure~\ref{fig:reg-alloc-passes}.
  1409. \begin{figure}[tbp]
  1410. \[
  1411. \xymatrix{
  1412. C_0 \ar@/^/[r]^-{\textsf{select\_instr.}}
  1413. & \text{x86}^{*} \ar[d]^-{\textsf{uncover\_live}} \\
  1414. & \text{x86}^{*} \ar[d]^-{\textsf{build\_interference}} \\
  1415. & \text{x86}^{*} \ar[d]_-{\textsf{allocate\_register}} \\
  1416. & \text{x86}^{*} \ar@/^/[r]^-{\textsf{patch\_instr.}}
  1417. & \text{x86}
  1418. }
  1419. \]
  1420. \caption{Diagram of the passes for register allocation.}
  1421. \label{fig:reg-alloc-passes}
  1422. \end{figure}
  1423. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  1424. \chapter{Booleans, Type Checking, and Control Flow}
  1425. \label{ch:bool-types}
  1426. \section{The $S_1$ Language}
  1427. \begin{figure}[htbp]
  1428. \centering
  1429. \fbox{
  1430. \begin{minipage}{0.85\textwidth}
  1431. \[
  1432. \begin{array}{lcl}
  1433. \Op &::=& \ldots \mid \key{and} \mid \key{or} \mid \key{not} \mid \key{eq?} \\
  1434. \Exp &::=& \ldots \mid \key{\#t} \mid \key{\#f} \mid
  1435. \IF{\Exp}{\Exp}{\Exp}
  1436. \end{array}
  1437. \]
  1438. \end{minipage}
  1439. }
  1440. \caption{The $S_1$ language, an extension of $S_0$
  1441. (Figure~\ref{fig:s0-syntax}).}
  1442. \label{fig:s1-syntax}
  1443. \end{figure}
  1444. \section{Type Checking $S_1$ Programs}
  1445. % T ::= Integer | Boolean
  1446. It is common practice to specify a type system by writing rules for
  1447. each kind of AST node. For example, the rule for \key{if} is:
  1448. \begin{quote}
  1449. For any expressions $e_1, e_2, e_3$ and any type $T$, if $e_1$ has
  1450. type \key{bool}, $e_2$ has type $T$, and $e_3$ has type $T$, then
  1451. $\IF{e_1}{e_2}{e_3}$ has type $T$.
  1452. \end{quote}
  1453. It is also common practice to write rules using a horizontal line,
  1454. with the conditions written above the line and the conclusion written
  1455. below the line.
  1456. \begin{equation*}
  1457. \inference{e_1 \text{ has type } \key{bool} &
  1458. e_2 \text{ has type } T & e_3 \text{ has type } T}
  1459. {\IF{e_1}{e_2}{e_3} \text{ has type } T}
  1460. \end{equation*}
  1461. Because the phrase ``has type'' is repeated so often in these type
  1462. checking rules, it is abbreviated to just a colon. So the above rule
  1463. is abbreviated to the following.
  1464. \begin{equation*}
  1465. \inference{e_1 : \key{bool} & e_2 : T & e_3 : T}
  1466. {\IF{e_1}{e_2}{e_3} : T}
  1467. \end{equation*}
  1468. The $\LET{x}{e_1}{e_2}$ construct poses an interesting challenge. The
  1469. variable $x$ is assigned the value of $e_1$ and then $x$ can be used
  1470. inside $e_2$. When we get to an occurrence of $x$ inside $e_2$, how do
  1471. we know what type the variable should be? The answer is that we need
  1472. a way to map from variable names to types. Such a mapping is called a
  1473. \emph{type environment} (aka. \emph{symbol table}). The capital Greek
  1474. letter gamma, written $\Gamma$, is used for referring to type
  1475. environments environments. The notation $\Gamma, x : T$ stands for
  1476. making a copy of the environment $\Gamma$ and then associating $T$
  1477. with the variable $x$ in the new environment. We write $\Gamma(x)$ to
  1478. lookup the associated type for $x$. The type checking rules for
  1479. \key{let} and variables are as follows.
  1480. \begin{equation*}
  1481. \inference{e_1 : T_1 \text{ in } \Gamma &
  1482. e_2 : T_2 \text{ in } \Gamma,x:T_1}
  1483. {\LET{x}{e_1}{e_2} : T_2 \text{ in } \Gamma}
  1484. \qquad
  1485. \inference{\Gamma(x) = T}
  1486. {x : T \text{ in } \Gamma}
  1487. \end{equation*}
  1488. Type checking has roots in logic, and logicians have a tradition of
  1489. writing the environment on the left-hand side and separating it from
  1490. the expression with a turn-stile ($\vdash$). The turn-stile does not
  1491. have any intrinsic meaning per se. It is punctuation that separates
  1492. the environment $\Gamma$ from the expression $e$. So the above typing
  1493. rules are written as follows.
  1494. \begin{equation*}
  1495. \inference{\Gamma \vdash e_1 : T_1 &
  1496. \Gamma,x:T_1 \vdash e_2 : T_2}
  1497. {\Gamma \vdash \LET{x}{e_1}{e_2} : T_2}
  1498. \qquad
  1499. \inference{\Gamma(x) = T}
  1500. {\Gamma \vdash x : T}
  1501. \end{equation*}
  1502. Overall, the statement $\Gamma \vdash e : T$ is an example of what is
  1503. called a \emph{judgment}. In particular, this judgment says, ``In
  1504. environment $\Gamma$, expression $e$ has type $T$.''
  1505. Figure~\ref{fig:S1-type-system} shows the type checking rules for
  1506. $S_1$.
  1507. \begin{figure}
  1508. \begin{gather*}
  1509. \inference{\Gamma(x) = T}
  1510. {\Gamma \vdash x : T}
  1511. \qquad
  1512. \inference{\Gamma \vdash e_1 : T_1 &
  1513. \Gamma,x:T_1 \vdash e_2 : T_2}
  1514. {\Gamma \vdash \LET{x}{e_1}{e_2} : T_2}
  1515. \\[2ex]
  1516. \inference{}{\Gamma \vdash n : \key{Integer}}
  1517. \quad
  1518. \inference{\Gamma \vdash e_i : T_i \ ^{\forall i \in 1\ldots n} & \Delta(\Op,T_1,\ldots,T_n) = T}
  1519. {\Gamma \vdash (\Op \; e_1 \ldots e_n) : T}
  1520. \\[2ex]
  1521. \inference{}{\Gamma \vdash \key{\#t} : \key{Boolean}}
  1522. \quad
  1523. \inference{}{\Gamma \vdash \key{\#f} : \key{Boolean}}
  1524. \quad
  1525. \inference{\Gamma \vdash e_1 : \key{bool} \\
  1526. \Gamma \vdash e_2 : T &
  1527. \Gamma \vdash e_3 : T}
  1528. {\Gamma \vdash \IF{e_1}{e_2}{e_3} : T}
  1529. \end{gather*}
  1530. \caption{Type System for $S_1$.}
  1531. \label{fig:S1-type-system}
  1532. \end{figure}
  1533. \begin{figure}
  1534. \begin{align*}
  1535. \Delta(\key{+},\key{Integer},\key{Integer}) &= \key{Integer} \\
  1536. \Delta(\key{-},\key{Integer},\key{Integer}) &= \key{Integer} \\
  1537. \Delta(\key{-},\key{Integer}) &= \key{Integer} \\
  1538. \Delta(\key{*},\key{Integer},\key{Integer}) &= \key{Integer} \\
  1539. \Delta(\key{read}) &= \key{Integer} \\
  1540. \Delta(\key{and},\key{Boolean},\key{Boolean}) &= \key{Boolean} \\
  1541. \Delta(\key{or},\key{Boolean},\key{Boolean}) &= \key{Boolean} \\
  1542. \Delta(\key{not},\key{Boolean}) &= \key{Boolean} \\
  1543. \Delta(\key{eq?},\key{Integer},\key{Integer}) &= \key{Boolean} \\
  1544. \Delta(\key{eq?},\key{Boolean},\key{Boolean}) &= \key{Boolean}
  1545. \end{align*}
  1546. \caption{Types for the primitives operators.}
  1547. \end{figure}
  1548. \section{The $C_1$ Language}
  1549. \begin{figure}[htbp]
  1550. \[
  1551. \begin{array}{lcl}
  1552. \Arg &::=& \ldots \mid \key{\#t} \mid \key{\#f} \\
  1553. \Stmt &::=& \ldots \mid \IF{\Exp}{\Stmt^{*}}{\Stmt^{*}}
  1554. \end{array}
  1555. \]
  1556. \caption{The $C_1$ intermediate language, an extension of $C_0$
  1557. (Figure~\ref{fig:c0-syntax}).}
  1558. \label{fig:c1-syntax}
  1559. \end{figure}
  1560. \section{Flatten Expressions}
  1561. \section{Select Instructions}
  1562. \section{Register Allocation}
  1563. \section{Patch Instructions}
  1564. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  1565. \chapter{Tuples and Heap Allocation}
  1566. \label{ch:tuples}
  1567. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  1568. \chapter{Garbage Collection}
  1569. \label{ch:gc}
  1570. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  1571. \chapter{Functions}
  1572. \label{ch:functions}
  1573. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  1574. \chapter{Lexically Scoped Functions}
  1575. \label{ch:lambdas}
  1576. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  1577. \chapter{Mutable Data}
  1578. \label{ch:mutable-data}
  1579. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  1580. \chapter{The Dynamic Type}
  1581. \label{ch:type-dynamic}
  1582. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  1583. \chapter{Parametric Polymorphism}
  1584. \label{ch:parametric-polymorphism}
  1585. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  1586. \chapter{High-level Optimization}
  1587. \label{ch:high-level-optimization}
  1588. \bibliographystyle{plainnat}
  1589. \bibliography{all}
  1590. \end{document}
  1591. %% LocalWords: Dybvig Waddell Abdulaziz Ghuloum Dipanwita
  1592. %% LocalWords: Sarkar lcl Matz aa representable