book.tex 86 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312
  1. \documentclass[11pt]{book}
  2. \usepackage[T1]{fontenc}
  3. \usepackage[utf8]{inputenc}
  4. \usepackage{lmodern}
  5. \usepackage{hyperref}
  6. \usepackage{graphicx}
  7. \usepackage[english]{babel}
  8. \usepackage{listings}
  9. \usepackage{amsmath}
  10. \usepackage{amsthm}
  11. \usepackage{amssymb}
  12. \usepackage{natbib}
  13. \usepackage{stmaryrd}
  14. \usepackage{xypic}
  15. \usepackage{semantic}
  16. \usepackage{wrapfig}
  17. %% For pictures
  18. \usepackage{tikz}
  19. \usetikzlibrary{arrows.meta}
  20. \tikzset{baseline=(current bounding box.center), >/.tip={Triangle[scale=1.4]}}
  21. % Computer Modern is already the default. -Jeremy
  22. %\renewcommand{\ttdefault}{cmtt}
  23. \lstset{%
  24. language=Lisp,
  25. basicstyle=\ttfamily\small,
  26. escapechar=@,
  27. columns=fullflexible
  28. }
  29. \newtheorem{theorem}{Theorem}
  30. \newtheorem{lemma}[theorem]{Lemma}
  31. \newtheorem{corollary}[theorem]{Corollary}
  32. \newtheorem{proposition}[theorem]{Proposition}
  33. \newtheorem{constraint}[theorem]{Constraint}
  34. \newtheorem{definition}[theorem]{Definition}
  35. \newtheorem{exercise}[theorem]{Exercise}
  36. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  37. % 'dedication' environment: To add a dedication paragraph at the start of book %
  38. % Source: http://www.tug.org/pipermail/texhax/2010-June/015184.html %
  39. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  40. \newenvironment{dedication}
  41. {
  42. \cleardoublepage
  43. \thispagestyle{empty}
  44. \vspace*{\stretch{1}}
  45. \hfill\begin{minipage}[t]{0.66\textwidth}
  46. \raggedright
  47. }
  48. {
  49. \end{minipage}
  50. \vspace*{\stretch{3}}
  51. \clearpage
  52. }
  53. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  54. % Chapter quote at the start of chapter %
  55. % Source: http://tex.stackexchange.com/a/53380 %
  56. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  57. \makeatletter
  58. \renewcommand{\@chapapp}{}% Not necessary...
  59. \newenvironment{chapquote}[2][2em]
  60. {\setlength{\@tempdima}{#1}%
  61. \def\chapquote@author{#2}%
  62. \parshape 1 \@tempdima \dimexpr\textwidth-2\@tempdima\relax%
  63. \itshape}
  64. {\par\normalfont\hfill--\ \chapquote@author\hspace*{\@tempdima}\par\bigskip}
  65. \makeatother
  66. \input{defs}
  67. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  68. \title{\Huge \textbf{Essentials of Compilation} \\
  69. \huge An Incremental Approach}
  70. \author{\textsc{Jeremy G. Siek} \\
  71. %\thanks{\url{http://homes.soic.indiana.edu/jsiek/}} \\
  72. Indiana University \\
  73. \\
  74. with contributions from: \\
  75. Carl Factora \\
  76. Cameron Swords
  77. }
  78. \begin{document}
  79. \frontmatter
  80. \maketitle
  81. \begin{dedication}
  82. This book is dedicated to the programming language wonks at Indiana
  83. University.
  84. \end{dedication}
  85. \tableofcontents
  86. %\listoffigures
  87. %\listoftables
  88. \mainmatter
  89. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  90. \chapter*{Preface}
  91. The tradition of compiler writing at Indiana University goes back to
  92. programming language research and courses taught by Daniel Friedman in
  93. the 1970's and 1980's. Dan had conducted research on lazy evaluation
  94. in the context of Lisp~\citep{McCarthy:1960dz} and then studied
  95. continuations and macros in the context of the
  96. Scheme~\citep{Sussman:1975ab}, a dialect of Lisp. One of students of
  97. those courses, Kent Dybvig, went on to build Chez
  98. Scheme~\citep{Dybvig:2006aa}, a production-quality and efficient
  99. compiler for Scheme. After completing his Ph.D. at the University of
  100. North Carolina, Kent returned to teach at Indiana University.
  101. Throughout the 1990's and early 2000's, Kent continued development of
  102. Chez Scheme and rotated with Dan in teaching the compiler course.
  103. Thanks to this collaboration between Dan and Kent, the compiler course
  104. evolved to incorporate novel pedagogical ideas while also including
  105. elements of effective real-world compilers. One of Dan's ideas was to
  106. split the compiler into many small passes over the input program and
  107. subsequent intermediate representations, so that the code for each
  108. pass would be easy to understood in isolation. (In contrast, most
  109. compilers of the time were organized into only a few monolithic passes
  110. for reasons of compile-time efficiency.) Kent and his students,
  111. Dipanwita Sarkar and Andrew Keep, developed infrastructure to support
  112. this approach and evolved the course, first to use micro-sized passes
  113. and then into even smaller nano
  114. passes~\citep{Sarkar:2004fk,Keep:2012aa}. I took this compiler course
  115. in the early 2000's, as part of my Ph.D. studies at Indiana
  116. University. Needless to say, I enjoyed the course immensely.
  117. One of my classmates, Abdulaziz Ghuloum, observed that the
  118. front-to-back organization of the course made it difficult for
  119. students to understand the rationale for the compiler
  120. design. Abdulaziz proposed an incremental approach in which the
  121. students build the compiler in stages; they start by implementing a
  122. complete compiler for a very small subset of the input language, then
  123. in each subsequent stage they add a feature to the input language and
  124. add or modify passes to handle the new feature~\citep{Ghuloum:2006bh}.
  125. In this way, the students see how the language features motivate
  126. aspects of the compiler design.
  127. After graduating from Indiana University in 2005, I went on to teach
  128. at the University of Colorado. I adapted the nano pass and incremental
  129. approaches to compiling a subset of the Python
  130. language~\citep{Siek:2012ab}. Python and Scheme are quite different
  131. on the surface but there is a large overlap in the compiler techniques
  132. required for the two languages. Thus, I was able to teach much of the
  133. same content from the Indiana compiler course. I very much enjoyed
  134. teaching the course organized in this way, and even better, many of
  135. the students learned a lot and got excited about compilers. (No, I
  136. didn't do a quantitative study to support this claim.)
  137. It is now 2016 and I too have returned to teach at Indiana University.
  138. In my absence the compiler course had switched from the front-to-back
  139. organization to a back-to-front organization. Seeing how well the
  140. incremental approach worked at Colorado, I found this rather
  141. unsatisfactory and have proceeded to reorganize the course, porting
  142. and adapting the structure of the Colorado course back into the land
  143. of Scheme. Of course, in the meantime Scheme has been superseded by
  144. Racket (at least in Indiana), so the course is now about implementing,
  145. in Racket~\citep{plt-tr}, a subset of Racket.
  146. This is the textbook for the incremental version of the compiler
  147. course at Indiana University (Spring 2016) and it is the first attempt
  148. to create a textbook for the Indiana compiler course. With this book
  149. I hope to make the Indiana compiler course available to people that
  150. have not had the chance to study here in person. Many of the compiler
  151. design decisions in this book are drawn from the assignment
  152. descriptions of \cite{Dybvig:2010aa}. I have captured what I think are
  153. the most important topics from \cite{Dybvig:2010aa} but have omitted
  154. topics that I think are less interesting conceptually and I have made
  155. simplifications to reduce complexity. In this way, this book leans
  156. more towards pedagogy than towards absolute efficiency. Also, the book
  157. differs in places where I saw the opportunity to make the topics more
  158. fun, such as in relating register allocation to Sudoku
  159. (Chapter~\ref{ch:register-allocation}).
  160. \section*{Prerequisites}
  161. This material in this book is challenging but rewarding. It is meant
  162. to prepare students for a lifelong career in programming languages. I
  163. do not recommend this book for students who only want to dabble in
  164. programming languages. The book uses the Racket language both for the
  165. implementation of the compiler and for the language that is
  166. compiled. Thus, a student should be proficient with Racket (or Scheme)
  167. prior to reading this book. There are many other excellent resources
  168. for learning Racket and
  169. Scheme~\citep{Dybvig:1987aa,Abelson:1996uq,Friedman:1996aa,Felleisen:2001aa,Felleisen:2013aa,Flatt:2014aa}. It
  170. is helpful but not necessary for the student to have prior exposure to
  171. the x86 (or x86-64) assembly language, as one might get from a
  172. computer systems course~\citep{Bryant:2005aa,Bryant:2010aa}. This
  173. book will introduce the basics of the x86-64 assembly language.
  174. %\section*{Structure of book}
  175. % You might want to add short description about each chapter in this book.
  176. %\section*{About the companion website}
  177. %The website\footnote{\url{https://github.com/amberj/latex-book-template}} for %this file contains:
  178. %\begin{itemize}
  179. % \item A link to (freely downlodable) latest version of this document.
  180. % \item Link to download LaTeX source for this document.
  181. % \item Miscellaneous material (e.g. suggested readings etc).
  182. %\end{itemize}
  183. \section*{Acknowledgments}
  184. Need to give thanks to
  185. \begin{itemize}
  186. \item Bor-Yuh Evan Chang
  187. \item Kent Dybvig
  188. \item Daniel P. Friedman
  189. \item Ronald Garcia
  190. \item Abdulaziz Ghuloum
  191. \item Ryan Newton
  192. \item Dipanwita Sarkar
  193. \item Andrew Keep
  194. \item Oscar Waddell
  195. \end{itemize}
  196. \mbox{}\\
  197. \noindent Jeremy G. Siek \\
  198. \noindent \url{http://homes.soic.indiana.edu/jsiek}
  199. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  200. \chapter{Preliminaries}
  201. \label{ch:trees-recur}
  202. In this chapter, we review the basic tools that are needed for
  203. implementing a compiler. We use abstract syntax trees (ASTs) in the
  204. form of S-expressions to represent programs (Section~\ref{sec:ast})
  205. and pattern matching to inspect individual nodes in an AST
  206. (Section~\ref{sec:pattern-matching}). We use recursion to construct
  207. and deconstruct entire ASTs (Section~\ref{sec:recursion}).
  208. \section{Abstract Syntax Trees}
  209. \label{sec:ast}
  210. The primary data structure that is commonly used for representing
  211. programs is the \emph{abstract syntax tree} (AST). When considering
  212. some part of a program, a compiler needs to ask what kind of part it
  213. is and what sub-parts it has. For example, the program on the left is
  214. represented by the AST on the right.
  215. \begin{center}
  216. \begin{minipage}{0.4\textwidth}
  217. \begin{lstlisting}
  218. (+ (read) (- 8))
  219. \end{lstlisting}
  220. \end{minipage}
  221. \begin{minipage}{0.4\textwidth}
  222. \begin{equation}
  223. \begin{tikzpicture}
  224. \node[draw, circle] (plus) at (0 , 0) {\key{+}};
  225. \node[draw, circle] (read) at (-1, -1.5) {{\footnotesize\key{read}}};
  226. \node[draw, circle] (minus) at (1 , -1.5) {$\key{-}$};
  227. \node[draw, circle] (8) at (1 , -3) {\key{8}};
  228. \draw[->] (plus) to (read);
  229. \draw[->] (plus) to (minus);
  230. \draw[->] (minus) to (8);
  231. \end{tikzpicture}
  232. \label{eq:arith-prog}
  233. \end{equation}
  234. \end{minipage}
  235. \end{center}
  236. We shall use the standard terminology for trees: each circle above is
  237. called a \emph{node}. The arrows connect a node to its \emph{children}
  238. (which are also nodes). The top-most node is the \emph{root}. Every
  239. node except for the root has a \emph{parent} (the node it is the child
  240. of). If a node has no children, it is a \emph{leaf} node. Otherwise
  241. it is an \emph{internal} node.
  242. When deciding how to compile the above program, we need to know that
  243. the root node operation is addition and that it has two children:
  244. \texttt{read} and a negation. The abstract syntax tree data structure
  245. directly supports these queries and hence is a good choice. In this
  246. book, we will often write down the textual representation of a program
  247. even when we really have in mind the AST because the textual
  248. representation is more concise. We recommend that, in your mind, you
  249. alway interpret programs as abstract syntax trees.
  250. \section{Grammars}
  251. \label{sec:grammar}
  252. A programming language can be thought of as a \emph{set} of programs.
  253. The set is typically infinite (one can always create larger and larger
  254. programs), so one cannot simply describe a language by listing all of
  255. the programs in the language. Instead we write down a set of rules, a
  256. \emph{grammar}, for building programs. We shall write our rules in a
  257. variant of Backus-Naur Form (BNF)~\citep{Backus:1960aa,Knuth:1964aa}.
  258. As an example, we describe a small language, named $R_0$, of
  259. integers and arithmetic operations. The first rule says that any
  260. integer is in the language:
  261. \begin{equation}
  262. R_0 ::= \Int \label{eq:arith-int}
  263. \end{equation}
  264. Each rule has a left-hand-side and a right-hand-side. The way to read
  265. a rule is that if you have all the program parts on the
  266. right-hand-side, then you can create and AST node and categorize it
  267. according to the left-hand-side. (We do not define $\Int$ because the
  268. reader already knows what an integer is.) We make the simplifying
  269. design decision that all of the languages in this book only handle
  270. machine-representable integers (those representable with 64-bits,
  271. i.e., the range $-2^{63}$ to $2^{63}$) which corresponds to the
  272. \texttt{fixnum} datatype in Racket. A name such as $R_0$ that is
  273. defined by the grammar rules is a \emph{non-terminal}.
  274. The second rule for the $R_0$ language is the \texttt{read}
  275. operation that receives an input integer from the user of the program.
  276. \begin{equation}
  277. R_0 ::= (\key{read}) \label{eq:arith-read}
  278. \end{equation}
  279. The third rule says that, given an $R_0$ node, you can build another
  280. $R_0$ node by negating it.
  281. \begin{equation}
  282. R_0 ::= (\key{-} \; R_0) \label{eq:arith-neg}
  283. \end{equation}
  284. Symbols such as \key{-} in typewriter font are \emph{terminal} symbols
  285. and must literally appear in the program for the rule to be
  286. applicable.
  287. We can apply the rules to build ASTs in the $R_0$
  288. language. For example, by rule \eqref{eq:arith-int}, \texttt{8} is an
  289. $R_0$, then by rule \eqref{eq:arith-neg}, the following AST is
  290. an $R_0$.
  291. \begin{center}
  292. \begin{minipage}{0.25\textwidth}
  293. \begin{lstlisting}
  294. (- 8)
  295. \end{lstlisting}
  296. \end{minipage}
  297. \begin{minipage}{0.25\textwidth}
  298. \begin{equation}
  299. \begin{tikzpicture}
  300. \node[draw, circle] (minus) at (0, 0) {$\text{--}$};
  301. \node[draw, circle] (8) at (0, -1.2) {$8$};
  302. \draw[->] (minus) to (8);
  303. \end{tikzpicture}
  304. \label{eq:arith-neg8}
  305. \end{equation}
  306. \end{minipage}
  307. \end{center}
  308. The last rule for the $R_0$ language is for addition:
  309. \begin{equation}
  310. R_0 ::= (\key{+} \; R_0 \; R_0) \label{eq:arith-add}
  311. \end{equation}
  312. Now we can see that the AST \eqref{eq:arith-prog} is in $R_0$.
  313. We know that \lstinline{(read)} is in $R_0$ by rule
  314. \eqref{eq:arith-read} and we have shown that \texttt{(- 8)} is in
  315. $R_0$, so we can apply rule \eqref{eq:arith-add} to show that
  316. \texttt{(+ (read) (- 8))} is in the $R_0$ language.
  317. If you have an AST for which the above four rules do not apply, then
  318. the AST is not in $R_0$. For example, the AST \texttt{(-
  319. (read) (+ 8))} is not in $R_0$ because there are no rules
  320. for \key{+} with only one argument, nor for \key{-} with two
  321. arguments. Whenever we define a language with a grammar, we
  322. implicitly mean for the language to be the smallest set of programs
  323. that are justified by the rules. That is, the language only includes
  324. those programs that the rules allow.
  325. It is common to have many rules with the same left-hand side, so there
  326. is a vertical bar notation for gathering several rules, as shown in
  327. Figure~\ref{fig:r0-syntax}. Each clause between a vertical bar is
  328. called an ``alternative''.
  329. \begin{figure}[tbp]
  330. \fbox{
  331. \begin{minipage}{\textwidth}
  332. \[
  333. R_0 ::= \Int \mid ({\tt \key{read}}) \mid (\key{-} \; R_0) \mid
  334. (\key{+} \; R_0 \; R_0)
  335. \]
  336. \end{minipage}
  337. }
  338. \caption{The syntax of the $R_0$ language.}
  339. \label{fig:r0-syntax}
  340. \end{figure}
  341. \section{S-Expressions}
  342. \label{sec:s-expr}
  343. Racket, as a descendant of Lisp, has
  344. convenient support for creating and manipulating abstract syntax trees
  345. with its \emph{symbolic expression} feature, or S-expression for
  346. short. We can create an S-expression simply by writing a backquote
  347. followed by the textual representation of the AST. (Technically
  348. speaking, this is called a \emph{quasiquote} in Racket.) For example,
  349. an S-expression to represent the AST \eqref{eq:arith-prog} is created
  350. by the following Racket expression:
  351. \begin{center}
  352. \texttt{`(+ (read) (- 8))}
  353. \end{center}
  354. To build larger S-expressions one often needs to splice together
  355. several smaller S-expressions. Racket provides the comma operator to
  356. splice an S-expression into a larger one. For example, instead of
  357. creating the S-expression for AST \eqref{eq:arith-prog} all at once,
  358. we could have first created an S-expression for AST
  359. \eqref{eq:arith-neg8} and then spliced that into the addition
  360. S-expression.
  361. \begin{lstlisting}
  362. (define ast1.4 `(- 8))
  363. (define ast1.1 `(+ (read) ,ast1.4))
  364. \end{lstlisting}
  365. In general, the Racket expression that follows the comma (splice)
  366. can be any expression that computes an S-expression.
  367. \section{Pattern Matching}
  368. \label{sec:pattern-matching}
  369. As mentioned above, one of the operations that a compiler needs to
  370. perform on an AST is to access the children of a node. Racket
  371. provides the \texttt{match} form to access the parts of an
  372. S-expression. Consider the following example and the output on the
  373. right.
  374. \begin{center}
  375. \begin{minipage}{0.5\textwidth}
  376. \begin{lstlisting}
  377. (match ast1.1
  378. [`(,op ,child1 ,child2)
  379. (print op) (newline)
  380. (print child1) (newline)
  381. (print child2)])
  382. \end{lstlisting}
  383. \end{minipage}
  384. \vrule
  385. \begin{minipage}{0.25\textwidth}
  386. \begin{lstlisting}
  387. '+
  388. '(read)
  389. '(- 8)
  390. \end{lstlisting}
  391. \end{minipage}
  392. \end{center}
  393. The \texttt{match} form takes AST \eqref{eq:arith-prog} and binds its
  394. parts to the three variables \texttt{op}, \texttt{child1}, and
  395. \texttt{child2}. In general, a match clause consists of a
  396. \emph{pattern} and a \emph{body}. The pattern is a quoted S-expression
  397. that may contain pattern-variables (preceded by a comma). The body
  398. may contain any Racket code.
  399. A \texttt{match} form may contain several clauses, as in the following
  400. function \texttt{leaf?} that recognizes when an $R_0$ node is
  401. a leaf. The \texttt{match} proceeds through the clauses in order,
  402. checking whether the pattern can match the input S-expression. The
  403. body of the first clause that matches is executed. The output of
  404. \texttt{leaf?} for several S-expressions is shown on the right. In the
  405. below \texttt{match}, we see another form of pattern: the \texttt{(?
  406. fixnum?)} applies the predicate \texttt{fixnum?} to the input
  407. S-expression to see if it is a machine-representable integer.
  408. \begin{center}
  409. \begin{minipage}{0.5\textwidth}
  410. \begin{lstlisting}
  411. (define (leaf? arith)
  412. (match arith
  413. [(? fixnum?) #t]
  414. [`(read) #t]
  415. [`(- ,c1) #f]
  416. [`(+ ,c1 ,c2) #f]))
  417. (leaf? `(read))
  418. (leaf? `(- 8))
  419. (leaf? `(+ (read) (- 8)))
  420. \end{lstlisting}
  421. \end{minipage}
  422. \vrule
  423. \begin{minipage}{0.25\textwidth}
  424. \begin{lstlisting}
  425. #t
  426. #f
  427. #f
  428. \end{lstlisting}
  429. \end{minipage}
  430. \end{center}
  431. \section{Recursion}
  432. \label{sec:recursion}
  433. Programs are inherently recursive in that an $R_0$ AST is made
  434. up of smaller $R_0$ ASTs. Thus, the natural way to process in
  435. entire program is with a recursive function. As a first example of
  436. such a function, we define \texttt{arith?} below, which takes an
  437. arbitrary S-expression, {\tt sexp}, and determines whether or not {\tt
  438. sexp} is in {\tt arith}. Note that each match clause corresponds to
  439. one grammar rule for $R_0$ and the body of each clause makes a
  440. recursive call for each child node. This pattern of recursive function
  441. is so common that it has a name, \emph{structural recursion}. In
  442. general, when a recursive function is defined using a sequence of
  443. match clauses that correspond to a grammar, and each clause body makes
  444. a recursive call on each child node, then we say the function is
  445. defined by structural recursion.
  446. \begin{center}
  447. \begin{minipage}{0.7\textwidth}
  448. \begin{lstlisting}
  449. (define (arith? sexp)
  450. (match sexp
  451. [(? fixnum?) #t]
  452. [`(read) #t]
  453. [`(- ,e) (arith? e)]
  454. [`(+ ,e1 ,e2)
  455. (and (arith? e1) (arith? e2))]
  456. [else #f]))
  457. (arith? `(+ (read) (- 8)))
  458. (arith? `(- (read) (+ 8)))
  459. \end{lstlisting}
  460. \end{minipage}
  461. \vrule
  462. \begin{minipage}{0.25\textwidth}
  463. \begin{lstlisting}
  464. #t
  465. #f
  466. \end{lstlisting}
  467. \end{minipage}
  468. \end{center}
  469. \section{Interpreters}
  470. \label{sec:interp-R0}
  471. The meaning, or semantics, of a program is typically defined in the
  472. specification of the language. For example, the Scheme language is
  473. defined in the report by \cite{SPERBER:2009aa}. The Racket language is
  474. defined in its reference manual~\citep{plt-tr}. In this book we use an
  475. interpreter to define the meaning of each language that we consider,
  476. following Reynold's advice in this
  477. regard~\citep{reynolds72:_def_interp}. Here we will warm up by writing
  478. an interpreter for the $R_0$ language, which will also serve
  479. as a second example of structural recursion. The \texttt{interp-R0}
  480. function is defined in Figure~\ref{fig:interp-R0}. The body of the
  481. function is a match on the input expression \texttt{e} and there is
  482. one clause per grammar rule for $R_0$. The clauses for
  483. internal AST nodes make recursive calls to \texttt{interp-R0} on
  484. each child node.
  485. \begin{figure}[tbp]
  486. \begin{lstlisting}
  487. (define (interp-R0 e)
  488. (match e
  489. [(? fixnum?) e]
  490. [`(read)
  491. (define r (read))
  492. (cond [(fixnum? r) r]
  493. [else (error 'interp-R0 "expected an integer" r)])]
  494. [`(- ,e)
  495. (fx- 0 (interp-R0 e))]
  496. [`(+ ,e1 ,e2)
  497. (fx+ (interp-R0 e1) (interp-R0 e2))]
  498. ))
  499. \end{lstlisting}
  500. \caption{Interpreter for the $R_0$ language.}
  501. \label{fig:interp-R0}
  502. \end{figure}
  503. Let us consider the result of interpreting some example $R_0$
  504. programs. The following program simply adds two integers.
  505. \[
  506. \BINOP{+}{10}{32}
  507. \]
  508. The result is $42$, as you might expected.
  509. %
  510. The next example demonstrates that expressions may be nested within
  511. each other, in this case nesting several additions and negations.
  512. \[
  513. \BINOP{+}{10}{ \UNIOP{-}{ \BINOP{+}{12}{20} } }
  514. \]
  515. What is the result of the above program?
  516. If we interpret the AST \eqref{eq:arith-prog} and give it the input
  517. \texttt{50}
  518. \begin{lstlisting}
  519. (interp-R0 ast1.1)
  520. \end{lstlisting}
  521. we get the answer to life, the universe, and everything:
  522. \begin{lstlisting}
  523. 42
  524. \end{lstlisting}
  525. Moving on, the \key{read} operation prompts the user of the program
  526. for an integer. Given an input of $10$, the following program produces
  527. $42$.
  528. \[
  529. \BINOP{+}{(\key{read})}{32}
  530. \]
  531. We include the \key{read} operation in $R_1$ to demonstrate that order
  532. of evaluation can make a different.
  533. The behavior of the following program is somewhat subtle because
  534. Racket does not specify an evaluation order for arguments of an
  535. operator such as $-$.
  536. \marginpar{\scriptsize This is not true of Racket. \\ --Jeremy}
  537. \[
  538. \BINOP{+}{\READ}{\UNIOP{-}{\READ}}
  539. \]
  540. Given the input $42$ then $10$, the above program can result in either
  541. $42$ or $-42$, depending on the whims of the Racket implementation.
  542. The job of a compiler is to translate programs in one language into
  543. programs in another language (typically but not always a language with
  544. a lower level of abstraction) in such a way that each output program
  545. behaves the same way as the input program. This idea is depicted in
  546. the following diagram. Suppose we have two languages, $\mathcal{L}_1$
  547. and $\mathcal{L}_2$, and an interpreter for each language. Suppose
  548. that the compiler translates program $P_1$ in language $\mathcal{L}_1$
  549. into program $P_2$ in language $\mathcal{L}_2$. Then interpreting
  550. $P_1$ and $P_2$ on the respective interpreters for the two languages,
  551. and given the same inputs $i$, should yield the same output $o$.
  552. \begin{equation} \label{eq:compile-correct}
  553. \begin{tikzpicture}[baseline=(current bounding box.center)]
  554. \node (p1) at (0, 0) {$P_1$};
  555. \node (p2) at (3, 0) {$P_2$};
  556. \node (o) at (3, -2.5) {o};
  557. \path[->] (p1) edge [above] node {compile} (p2);
  558. \path[->] (p2) edge [right] node {$\mathcal{L}_2$-interp(i)} (o);
  559. \path[->] (p1) edge [left] node {$\mathcal{L}_1$-interp(i)} (o);
  560. \end{tikzpicture}
  561. \end{equation}
  562. In the next section we will see our first example of a compiler, which
  563. is also be another example of structural recursion.
  564. \section{Partial Evaluation}
  565. \label{sec:partial-evaluation}
  566. In this section we consider a compiler that translates $R_0$
  567. programs into $R_0$ programs that are more efficient, that is,
  568. this compiler is an optimizer. Our optimizer will accomplish this by
  569. trying to eagerly compute the parts of the program that do not depend
  570. on any inputs. For example, given the following program
  571. \begin{lstlisting}
  572. (+ (read) (- (+ 5 3)))
  573. \end{lstlisting}
  574. our compiler will translate it into the program
  575. \begin{lstlisting}
  576. (+ (read) -8)
  577. \end{lstlisting}
  578. Figure~\ref{fig:pe-arith} gives the code for a simple partial
  579. evaluator for the $R_0$ language. The output of the partial
  580. evaluator is an $R_0$ program, which we build up using a
  581. combination of quasiquotes and commas. (Though no quasiquote is
  582. necessary for integers.) In Figure~\ref{fig:pe-arith}, the normal
  583. structural recursion is captured in the main \texttt{pe-arith}
  584. function whereas the code for partially evaluating negation and
  585. addition is factored out the into two separate helper functions:
  586. \texttt{pe-neg} and \texttt{pe-add}. The input to these helper
  587. functions is the output of partially evaluating the children nodes.
  588. \begin{figure}[tbp]
  589. \begin{lstlisting}
  590. (define (pe-neg r)
  591. (match r
  592. [(? fixnum?) (fx- 0 r)]
  593. [else `(- ,r)]))
  594. (define (pe-add r1 r2)
  595. (match (list r1 r2)
  596. [`(,n1 ,n2) #:when (and (fixnum? n1) (fixnum? n2))
  597. (fx+ r1 r2)]
  598. [else `(+ ,r1 ,r2)]))
  599. (define (pe-arith e)
  600. (match e
  601. [(? fixnum?) e]
  602. [`(read) `(read)]
  603. [`(- ,e1) (pe-neg (pe-arith e1))]
  604. [`(+ ,e1 ,e2) (pe-add (pe-arith e1) (pe-arith e2))]))
  605. \end{lstlisting}
  606. \caption{A partial evaluator for the $R_0$ language.}
  607. \label{fig:pe-arith}
  608. \end{figure}
  609. Our code for \texttt{pe-neg} and \texttt{pe-add} implements the simple
  610. idea of checking whether the inputs are integers and if they are, to
  611. go ahead perform the arithmetic. Otherwise, we use quasiquote to
  612. create an AST node for the appropriate operation (either negation or
  613. addition) and use comma to splice in the child nodes.
  614. To gain some confidence that the partial evaluator is correct, we can
  615. test whether it produces programs that get the same result as the
  616. input program. That is, we can test whether it satisfies Diagram
  617. \eqref{eq:compile-correct}. The following code runs the partial
  618. evaluator on several examples and tests the output program. The
  619. \texttt{assert} function is defined in Appendix~\ref{appendix:utilities}.
  620. \begin{lstlisting}
  621. (define (test-pe pe p)
  622. (assert "testing pe-arith"
  623. (equal? (interp-R0 p) (interp-R0 (pe-arith p)))))
  624. (test-pe `(+ (read) (- (+ 5 3))))
  625. (test-pe `(+ 1 (+ (read) 1)))
  626. (test-pe `(- (+ (read) (- 5))))
  627. \end{lstlisting}
  628. \begin{exercise}
  629. \normalfont % I don't like the italics for exercises. -Jeremy
  630. We challenge the reader to improve on the simple partial evaluator in
  631. Figure~\ref{fig:pe-arith} by replacing the \texttt{pe-neg} and
  632. \texttt{pe-add} helper functions with functions that know more about
  633. arithmetic. For example, your partial evaluator should translate
  634. \begin{lstlisting}
  635. (+ 1 (+ (read) 1))
  636. \end{lstlisting}
  637. into
  638. \begin{lstlisting}
  639. (+ 2 (read))
  640. \end{lstlisting}
  641. To accomplish this, we recommend that your partial evaluator produce
  642. output that takes the form of the $\itm{residual}$ non-terminal in the
  643. following grammar.
  644. \[
  645. \begin{array}{lcl}
  646. \Exp &::=& (\key{read}) \mid (\key{-} \;(\key{read})) \mid (\key{+} \; \Exp \; \Exp)\\
  647. \itm{residual} &::=& \Int \mid (\key{+}\; \Int\; \Exp) \mid \Exp
  648. \end{array}
  649. \]
  650. \end{exercise}
  651. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  652. \chapter{Integers and Variables}
  653. \label{ch:int-exp}
  654. This chapter concerns the challenge of compiling a subset of Racket,
  655. which we name $R_1$, to x86-64 assembly code~\citep{Matz:2013aa}. The
  656. chapter begins with a description of the $R_1$ language
  657. (Section~\ref{sec:s0}) and then a description of x86-64
  658. (Section~\ref{sec:x86-64}). The x86-64 assembly language is quite
  659. large, so we only discuss what is needed for compiling $R_1$. We
  660. introduce more of x86-64 in later chapters. Once we have introduced
  661. $R_1$ and x86-64, we reflect on their differences and come up with a
  662. plan breaking down the translation from $R_1$ to x86-64 into a handful
  663. of steps (Section~\ref{sec:plan-s0-x86}). The rest of the sections in
  664. this Chapter give detailed hints regarding each step
  665. (Sections~\ref{sec:uniquify-s0} through \ref{sec:patch-s0}). We hope
  666. to give enough hints that the well-prepared reader can implement a
  667. compiler from $R_1$ to x86-64 while at the same time leaving room for
  668. some fun and creativity.
  669. \section{The $R_1$ Language}
  670. \label{sec:s0}
  671. The $R_1$ language extends the $R_0$ language
  672. (Figure~\ref{fig:r0-syntax}) with variable definitions. The syntax of
  673. the $R_1$ language is defined by the grammar in
  674. Figure~\ref{fig:r1-syntax}. This language is rich enough to exhibit
  675. several compilation techniques but simple enough so that the reader
  676. can implement a compiler for it in a couple weeks of part-time work.
  677. To give the reader a feeling for the scale of this first compiler, the
  678. instructor solution for the $R_1$ compiler consists of 6 recursive
  679. functions and a few small helper functions that together span 256
  680. lines of code.
  681. \begin{figure}[btp]
  682. \centering
  683. \fbox{
  684. \begin{minipage}{\textwidth}
  685. \[
  686. R_1 ::= \Int \mid ({\tt \key{read}}) \mid (\key{-} \; R_1) \mid
  687. (\key{+} \; R_1 \; R_1) \mid \Var \mid \LET{\Var}{R_1}{R_1}
  688. \]
  689. \end{minipage}
  690. }
  691. \caption{The syntax of the $R_1$ language.
  692. The non-terminal \Var{} may be any Racket identifier.}
  693. \label{fig:r1-syntax}
  694. \end{figure}
  695. The \key{let} construct defines a variable for used within its body
  696. and initializes the variable with the value of an expression. So the
  697. following program initializes $x$ to $32$ and then evaluates the body
  698. $\BINOP{+}{10}{x}$, producing $42$.
  699. \[
  700. \LET{x}{ \BINOP{+}{12}{20} }{ \BINOP{+}{10}{x} }
  701. \]
  702. When there are multiple \key{let}'s for the same variable, the closest
  703. enclosing \key{let} is used. That is, variable definitions overshadow
  704. prior definitions. Consider the following program with two \key{let}'s
  705. that define variables named $x$. Can you figure out the result?
  706. \[
  707. \LET{x}{32}{ \BINOP{+}{ \LET{x}{10}{x} }{ x } }
  708. \]
  709. For the purposes of showing which variable uses correspond to which
  710. definitions, the following shows the $x$'s annotated with subscripts
  711. to distinguish them. Double check that your answer for the above is
  712. the same as your answer for this annotated version of the program.
  713. \[
  714. \LET{x_1}{32}{ \BINOP{+}{ \LET{x_2}{10}{x_2} }{ x_1 } }
  715. \]
  716. The initializing expression is always evaluated before the body of the
  717. \key{let}, so in the following, the \key{read} for $x$ is performed
  718. before the \key{read} for $y$. Given the input $52$ then $10$, the
  719. following produces $42$ (and not $-42$).
  720. \[
  721. \LET{x}{\READ}{ \LET{y}{\READ}{ \BINOP{-}{x}{y} } }
  722. \]
  723. Figure~\ref{fig:interp-R1} shows the interpreter for the $R_1$
  724. language. It extends the interpreter for $R_0$ with two new
  725. \key{match} clauses for variables and for \key{let}. For \key{let},
  726. we will need a way to communicate the initializing value of a variable
  727. to all the uses of a variable. To accomplish this, we maintain a
  728. mapping from variables to values, which is traditionally called an
  729. \emph{environment}. For simplicity, here we use an association list to
  730. represent the environment. The \key{interp-R1} function takes the
  731. current environment, \key{env}, as an extra parameter. When the
  732. interpreter encounters a variable, it finds the corresponding value
  733. using the \key{lookup} function (Appendix~\ref{appendix:utilities}).
  734. When the interpreter encounters a \key{let}, it evaluates the
  735. initializing expression, extends the environment with the result bound
  736. to the variable, then evaluates the body of the let.
  737. \begin{figure}[tbp]
  738. \begin{lstlisting}
  739. (define (interp-R1 env e)
  740. (match e
  741. [(? symbol?) (lookup e env)]
  742. [`(let ([,x ,e]) ,body)
  743. (define v (interp-R1 env e))
  744. (define new-env (cons (cons x v) env))
  745. (interp-R1 new-env body)]
  746. [(? fixnum?) e]
  747. [`(read)
  748. (define r (read))
  749. (cond [(fixnum? r) r]
  750. [else (error 'interp-R1 "expected an integer" r)])]
  751. [`(- ,e)
  752. (fx- 0 (interp-R1 env e))]
  753. [`(+ ,e1 ,e2)
  754. (fx+ (interp-R1 env e1) (interp-R1 env e2))]
  755. ))
  756. \end{lstlisting}
  757. \caption{Interpreter for the $R_1$ language.}
  758. \label{fig:interp-R1}
  759. \end{figure}
  760. The goal for this chapter is to implement a compiler that translates
  761. any program $P_1$ in $R_1$ into a x86-64 assembly program $P_2$ such
  762. that the assembly program exhibits the same behavior on an x86
  763. computer as the $R_1$ program running in a Racket implementation.
  764. That is, they both output the same integer $n$.
  765. \[
  766. \begin{tikzpicture}[baseline=(current bounding box.center)]
  767. \node (p1) at (0, 0) {$P_1$};
  768. \node (p2) at (4, 0) {$P_2$};
  769. \node (o) at (4, -2) {$n$};
  770. \path[->] (p1) edge [above] node {\footnotesize compile} (p2);
  771. \path[->] (p1) edge [left] node {\footnotesize run in Racket} (o);
  772. \path[->] (p2) edge [right] node {\footnotesize run on an x86 machine} (o);
  773. \end{tikzpicture}
  774. \]
  775. In the next section we introduce enough of the x86-64 assembly
  776. language to compile $R_1$.
  777. \section{The x86-64 Assembly Language}
  778. \label{sec:x86-64}
  779. An x86-64 program is a sequence of instructions. The instructions may
  780. refer to integer constants (called \emph{immediate values}), variables
  781. called \emph{registers}, and instructions may load and store values
  782. into \emph{memory}. Memory is a mapping of 64-bit addresses to 64-bit
  783. values. Figure~\ref{fig:x86-a} defines the syntax for the subset of
  784. the x86-64 assembly language needed for this chapter. (We use the
  785. AT\&T syntax that is expected by the GNU assembler inside \key{gcc}.)
  786. An immediate value is written using the notation \key{\$}$n$ where $n$
  787. is an integer.
  788. %
  789. A register is written with a \key{\%} followed by the register name,
  790. such as \key{\%rax}.
  791. %
  792. An access to memory is specified using the syntax $n(\key{\%}r)$,
  793. which reads register $r$, obtaining address $a$, and then offsets the
  794. address by $n$ bytes (8 bits), producing the address $a + n$. The
  795. address is then used to either load or store to memory depending on
  796. whether it occurs as a source or destination argument of an
  797. instruction.
  798. An arithmetic instruction, such as $\key{addq}\,s\,d$, reads from the
  799. source argument $s$ and destination argument $d$, applies the
  800. arithmetic operation, then write the result in the destination $d$. In
  801. this case, computing $d \gets d + s$.
  802. %
  803. The move instruction, $\key{movq}\,s\,d$ reads from $s$ and stores the
  804. result in $d$.
  805. %
  806. The $\key{callq}\,\mathit{label}$ instruction executes the procedure
  807. specified by the label, which we shall use to implement
  808. \key{read}.
  809. \begin{figure}[tbp]
  810. \fbox{
  811. \begin{minipage}{0.96\textwidth}
  812. \[
  813. \begin{array}{lcl}
  814. \Reg &::=& \key{rsp} \mid \key{rbp} \mid \key{rax} \mid \key{rbx} \mid \key{rcx}
  815. \mid \key{rdx} \mid \key{rsi} \mid \key{rdi} \mid \\
  816. && \key{r8} \mid \key{r9} \mid \key{r10}
  817. \mid \key{r11} \mid \key{r12} \mid \key{r13}
  818. \mid \key{r14} \mid \key{r15} \\
  819. \Arg &::=& \key{\$}\Int \mid \key{\%}\Reg \mid \Int(\key{\%}\Reg) \\
  820. \Instr &::=& \key{addq} \; \Arg, \Arg \mid
  821. \key{subq} \; \Arg, \Arg \mid
  822. % \key{imulq} \; \Arg,\Arg \mid
  823. \key{negq} \; \Arg \mid \key{movq} \; \Arg, \Arg \mid \\
  824. && \key{callq} \; \mathit{label} \mid
  825. \key{pushq}\;\Arg \mid \key{popq}\;\Arg \mid \key{retq} \\
  826. \Prog &::= & \key{.globl \_main}\\
  827. & & \key{\_main:} \; \Instr^{+}
  828. \end{array}
  829. \]
  830. \end{minipage}
  831. }
  832. \caption{A subset of the x86-64 assembly language (AT\&T syntax).}
  833. \label{fig:x86-a}
  834. \end{figure}
  835. \begin{wrapfigure}{r}{2.25in}
  836. \begin{lstlisting}
  837. .globl _main
  838. _main:
  839. movq $10, %rax
  840. addq $32, %rax
  841. retq
  842. \end{lstlisting}
  843. \caption{An x86-64 program equivalent to $\BINOP{+}{10}{32}$.}
  844. \label{fig:p0-x86}
  845. \end{wrapfigure}
  846. Figure~\ref{fig:p0-x86} depicts an x86-64 program that is equivalent to
  847. $\BINOP{+}{10}{32}$. The \key{globl} directive says that the
  848. \key{\_main} procedure is externally visible, which is necessary so
  849. that the operating system can call it. The label \key{\_main:}
  850. indicates the beginning of the \key{\_main} procedure which is where
  851. the operating system starting executing this program. The instruction
  852. \lstinline{movq $10, %rax} puts $10$ into register \key{rax}. The
  853. following instruction \lstinline{addq $32, %rax} adds $32$ to the
  854. $10$ in \key{rax} and puts the result, $42$, back into
  855. \key{rax}. The instruction \key{retq} finishes the \key{\_main}
  856. function by returning the integer in the \key{rax} register to the
  857. operating system.
  858. \begin{wrapfigure}{r}{2.25in}
  859. \begin{lstlisting}
  860. .globl _main
  861. _main:
  862. pushq %rbp
  863. movq %rsp, %rbp
  864. subq $16, %rsp
  865. movq $10, -8(%rbp)
  866. negq -8(%rbp)
  867. movq $52, %rax
  868. addq -8(%rbp), %rax
  869. addq $16, %rsp
  870. popq %rbp
  871. retq
  872. \end{lstlisting}
  873. \caption{An x86-64 program equivalent to $\BINOP{+}{52}{\UNIOP{-}{10} }$.}
  874. \label{fig:p1-x86}
  875. \end{wrapfigure}
  876. The next example exhibits the use of memory. Figure~\ref{fig:p1-x86}
  877. lists an x86-64 program that is equivalent to $\BINOP{+}{52}{
  878. \UNIOP{-}{10} }$. To understand how this x86-64 program works, we
  879. need to explain a region of memory called called the \emph{procedure
  880. call stack} (or \emph{stack} for short). The stack consists of a
  881. separate \emph{frame} for each procedure call. The memory layout for
  882. an individual frame is shown in Figure~\ref{fig:frame}. The register
  883. \key{rsp} is called the \emph{stack pointer} and points to the item at
  884. the top of the stack. The stack grows downward in memory, so we
  885. increase the size of the stack by subtracting from the stack
  886. pointer. The frame size is required to be a multiple of 16 bytes. The
  887. register \key{rbp} is the \emph{base pointer} which serves two
  888. purposes: 1) it saves the location of the stack pointer for the
  889. procedure that called the current one and 2) it is used to access
  890. variables associated with the current procedure. We number the
  891. variables from $1$ to $n$. Variable $1$ is stored at address
  892. $-8\key{(\%rbp)}$, variable $2$ at $-16\key{(\%rbp)}$, etc.
  893. \begin{figure}[tbp]
  894. \centering
  895. \begin{tabular}{|r|l|} \hline
  896. Position & Contents \\ \hline
  897. 8(\key{\%rbp}) & return address \\
  898. 0(\key{\%rbp}) & old \key{rbp} \\
  899. -8(\key{\%rbp}) & variable $1$ \\
  900. -16(\key{\%rbp}) & variable $2$ \\
  901. \ldots & \ldots \\
  902. 0(\key{\%rsp}) & variable $n$\\ \hline
  903. \end{tabular}
  904. \caption{Memory layout of a frame.}
  905. \label{fig:frame}
  906. \end{figure}
  907. Getting back to the program in Figure~\ref{fig:p1-x86}, the first
  908. three instructions are the typical prelude for a procedure. The
  909. instruction \key{pushq \%rbp} saves the base pointer for the procedure
  910. that called the current one onto the stack and subtracts $8$ from the
  911. stack pointer. The second instruction \key{movq \%rsp, \%rbp} changes
  912. the base pointer to the top of the stack. The instruction \key{subq
  913. \$16, \%rsp} moves the stack pointer down to make enough room for
  914. storing variables. This program just needs one variable ($8$ bytes)
  915. but because the frame size is required to be a multiple of 16 bytes,
  916. it rounds to 16 bytes.
  917. The next four instructions carry out the work of computing
  918. $\BINOP{+}{52}{\UNIOP{-}{10} }$. The first instruction \key{movq \$10,
  919. -8(\%rbp)} stores $10$ in variable $1$. The instruction \key{negq
  920. -8(\%rbp)} changes variable $1$ to $-10$. The \key{movq \$52, \%rax}
  921. places $52$ in the register \key{rax} and \key{addq -8(\%rbp), \%rax}
  922. adds the contents of variable $1$ to \key{rax}, at which point
  923. \key{rax} contains $42$.
  924. The last three instructions are the typical \emph{conclusion} of a
  925. procedure. These instructions are necessary to get the state of the
  926. machine back to where it was before the current procedure was called.
  927. The \key{addq \$16, \%rsp} instruction moves the stack pointer back to
  928. point at the old base pointer. The amount added here needs to match
  929. the amount that was subtracted in the prelude of the procedure. Then
  930. \key{popq \%rbp} returns the old base pointer to \key{rbp} and adds
  931. $8$ to the stack pointer. The \key{retq} instruction jumps back to
  932. the procedure that called this one and subtracts 8 from the stack
  933. pointer.
  934. The compiler will need a convenient representation for manipulating
  935. x86 programs, so we define an abstract syntax for x86 in
  936. Figure~\ref{fig:x86-ast-a}. The \itm{info} field of the \key{program}
  937. AST node is for storing auxilliary information that needs to be
  938. communicated from one step of the compiler to the next.
  939. \begin{figure}[tbp]
  940. \fbox{
  941. \begin{minipage}{\textwidth}
  942. \[
  943. \begin{array}{lcl}
  944. \Arg &::=& \INT{\Int} \mid \REG{\itm{register}}
  945. \mid \STACKLOC{\Int} \\
  946. \Instr &::=& (\key{addq} \; \Arg\; \Arg) \mid
  947. (\key{subq} \; \Arg\; \Arg) \mid
  948. % (\key{imulq} \; \Arg\;\Arg) \mid
  949. (\key{negq} \; \Arg) \mid (\key{movq} \; \Arg\; \Arg) \\
  950. &\mid& (\key{call} \; \mathit{label}) \mid
  951. (\key{pushq}\;\Arg) \mid
  952. (\key{popq}\;\Arg) \mid
  953. (\key{retq}) \\
  954. \Prog &::= & (\key{program} \;\itm{info} \; \Instr^{+})
  955. \end{array}
  956. \]
  957. \end{minipage}
  958. }
  959. \caption{Abstract syntax for x86-64 assembly.}
  960. \label{fig:x86-ast-a}
  961. \end{figure}
  962. \section{Planning the trip from $R_1$ to x86-64}
  963. \label{sec:plan-s0-x86}
  964. To compile one language to another it helps to focus on the
  965. differences between the two languages. It is these differences that
  966. the compiler will need to bridge. What are the differences between
  967. $R_1$ and x86-64 assembly? Here we list some of the most important the
  968. differences.
  969. \begin{enumerate}
  970. \item x86-64 arithmetic instructions typically take two arguments and
  971. update the second argument in place. In contrast, $R_1$ arithmetic
  972. operations only read their arguments and produce a new value.
  973. \item An argument to an $R_1$ operator can be any expression, whereas
  974. x86-64 instructions restrict their arguments to integers, registers,
  975. and memory locations.
  976. \item An $R_1$ program can have any number of variables whereas x86-64
  977. has only 16 registers.
  978. \item Variables in $R_1$ can overshadow other variables with the same
  979. name. The registers and memory locations of x86-64 all have unique
  980. names.
  981. \end{enumerate}
  982. We ease the challenge of compiling from $R_1$ to x86 by breaking down
  983. the problem into several steps, dealing with the above differences one
  984. at a time. The main question then becomes: in what order do we tackle
  985. these differences? This is often one of the most challenging questions
  986. that a compiler writer must answer because some orderings may be much
  987. more difficult to implement than others. It is difficult to know ahead
  988. of time which orders will be better so often some trial-and-error is
  989. involved. However, we can try to plan ahead and choose the orderings
  990. based on this planning.
  991. For example, to handle difference \#2 (nested expressions), we shall
  992. introduce new variables and pull apart the nested expressions into a
  993. sequence of assignment statements. To deal with difference \#3 we
  994. will be replacing variables with registers and/or stack
  995. locations. Thus, it makes sense to deal with \#2 before \#3 so that
  996. \#3 can replace both the original variables and the new ones. Next,
  997. consider where \#1 should fit in. Because it has to do with the format
  998. of x86 instructions, it makes more sense after we have flattened the
  999. nested expressions (\#2). Finally, when should we deal with \#4
  1000. (variable overshadowing)? We shall solve this problem by renaming
  1001. variables to make sure they have unique names. Recall that our plan
  1002. for \#2 involves moving nested expressions, which could be problematic
  1003. if it changes the shadowing of variables. However, if we deal with \#4
  1004. first, then it will not be an issue. Thus, we arrive at the following
  1005. ordering.
  1006. \[
  1007. \begin{tikzpicture}[baseline=(current bounding box.center)]
  1008. \foreach \i/\p in {4/1,2/2,1/3,3/4}
  1009. {
  1010. \node (\i) at (\p*1.5,0) {$\i$};
  1011. }
  1012. \foreach \x/\y in {4/2,2/1,1/3}
  1013. {
  1014. \draw[->] (\x) to (\y);
  1015. }
  1016. \end{tikzpicture}
  1017. \]
  1018. We further simplify the translation from $R_1$ to x86 by identifying
  1019. an intermediate language named $C_0$, roughly half-way between $R_1$
  1020. and x86, to provide a rest stop along the way. We name the language
  1021. $C_0$ because it is vaguely similar to the $C$
  1022. language~\citep{Kernighan:1988nx}. The differences \#4 and \#1,
  1023. regarding variables and nested expressions, will be handled by two
  1024. steps, \key{uniquify} and \key{flatten}, which bring us to
  1025. $C_0$.
  1026. \[
  1027. \begin{tikzpicture}[baseline=(current bounding box.center)]
  1028. \foreach \i/\p in {R_1/1,R_1/2,C_0/3}
  1029. {
  1030. \node (\p) at (\p*3,0) {\large $\i$};
  1031. }
  1032. \foreach \x/\y/\lbl in {1/2/uniquify,2/3/flatten}
  1033. {
  1034. \path[->,bend left=15] (\x) edge [above] node {\ttfamily\footnotesize \lbl} (\y);
  1035. }
  1036. \end{tikzpicture}
  1037. \]
  1038. Each of these steps in the compiler is implemented by a function,
  1039. typically a structurally recursive function that translates an input
  1040. AST into an output AST. We refer to such a function as a \emph{pass}
  1041. because it makes a pass over the AST.
  1042. The syntax for $C_0$ is defined in Figure~\ref{fig:c0-syntax}. The
  1043. $C_0$ language supports the same operators as $R_1$ but the arguments
  1044. of operators are now restricted to just variables and integers. The
  1045. \key{let} construct of $R_1$ is replaced by an assignment statement
  1046. and there is a \key{return} construct to specify the return value of
  1047. the program. A program consists of a sequence of statements that
  1048. include at least one \key{return} statement.
  1049. \begin{figure}[tbp]
  1050. \fbox{
  1051. \begin{minipage}{0.96\textwidth}
  1052. \[
  1053. \begin{array}{lcl}
  1054. \Arg &::=& \Int \mid \Var \\
  1055. \Exp &::=& \Arg \mid (\Op \; \Arg^{*})\\
  1056. \Stmt &::=& \ASSIGN{\Var}{\Exp} \mid \RETURN{\Arg} \\
  1057. \Prog & ::= & (\key{program}\;\itm{info}\;\Stmt^{+})
  1058. \end{array}
  1059. \]
  1060. \end{minipage}
  1061. }
  1062. \caption{The $C_0$ intermediate language.}
  1063. \label{fig:c0-syntax}
  1064. \end{figure}
  1065. To get from $C_0$ to x86-64 assembly it remains to handle difference
  1066. \#1 (the format of instructions) and difference \#3 (variables versus
  1067. registers). These two differences are intertwined, creating a bit of a
  1068. Gordian Knot. To handle difference \#3, we need to map some variables
  1069. to registers (there are only 16 registers) and the remaining variables
  1070. to locations on the stack (which is unbounded). To make good decisions
  1071. regarding this mapping, we need the program to be close to its final
  1072. form (in x86-64 assembly) so we know exactly when which variables are
  1073. used. However, the choice of x86-64 instruction depends on whether
  1074. the arguments are registers or stack locations, so we have a circular
  1075. dependency. We cut this knot by doing an optimistic selection of
  1076. instructions in the \key{select-instructions} pass, followed by the
  1077. \key{assign-homes} pass to map variables to registers or stack
  1078. locations, and conclude by finalizing the instruction selection in the
  1079. \key{patch-instructions} pass.
  1080. \[
  1081. \begin{tikzpicture}[baseline=(current bounding box.center)]
  1082. \node (1) at (0,0) {\large $C_0$};
  1083. \node (2) at (3,0) {\large $\text{x86}^{*}$};
  1084. \node (3) at (6,0) {\large $\text{x86}^{*}$};
  1085. \node (4) at (9,0) {\large $\text{x86}$};
  1086. \path[->,bend left=15] (1) edge [above] node {\ttfamily\footnotesize select-instr.} (2);
  1087. \path[->,bend left=15] (2) edge [above] node {\ttfamily\footnotesize assign-homes} (3);
  1088. \path[->,bend left=15] (3) edge [above] node {\ttfamily\footnotesize patch-instr.} (4);
  1089. \end{tikzpicture}
  1090. \]
  1091. The \key{select-instructions} pass is optimistic in the sense that it
  1092. treats variables as if they were all mapped to registers. The
  1093. \key{select-instructions} pass generates a program that consists of
  1094. x86-64 instructions but that still use variables, so it is an
  1095. intermediate language that is technically different than x86-64, which
  1096. explains the astericks in the diagram above.
  1097. In this Chapter we shall take the easy road to implementing
  1098. \key{assign-homes} and simply map all variables to stack locations.
  1099. The topic of Chapter~\ref{ch:register-allocation} is implementing a
  1100. smarter approach in which we make a best-effort to map variables to
  1101. registers, resorting to the stack only when necessary.
  1102. %% \marginpar{\scriptsize I'm confused: shouldn't `select instructions' do this?
  1103. %% After all, that selects the x86-64 instructions. Even if it is separate,
  1104. %% if we perform `patching' before register allocation, we aren't forced to rely on
  1105. %% \key{rax} as much. This can ultimately make a more-performant result. --
  1106. %% Cam}
  1107. Once variables have been assigned to their homes, we can finalize the
  1108. instruction selection by dealing with an indiosycracy of x86
  1109. assembly. Many x86 instructions have two arguments but only one of the
  1110. arguments may be a memory reference (the stack is a part of memory).
  1111. Because some variables may get mapped to stack locations, some of our
  1112. generated instructions may violate this restriction. The purpose of
  1113. the \key{patch-instructions} pass is to fix this problem by replacing
  1114. every violating instruction with a short sequence of instructions that
  1115. use the \key{rax} register. Once we have implemented a good register
  1116. allocator (Chapter~\ref{ch:register-allocation}), the need to patch
  1117. instructions will be relatively rare.
  1118. \section{Uniquify Variables}
  1119. \label{sec:uniquify-s0}
  1120. The purpose of this pass is to make sure that each \key{let} uses a
  1121. unique variable name. For example, the \key{uniquify} pass could
  1122. translate
  1123. \[
  1124. \LET{x}{32}{ \BINOP{+}{ \LET{x}{10}{x} }{ x } }
  1125. \]
  1126. to
  1127. \[
  1128. \LET{x.1}{32}{ \BINOP{+}{ \LET{x.2}{10}{x.2} }{ x.1 } }
  1129. \]
  1130. We recommend implementing \key{uniquify} as a recursive function that
  1131. mostly just copies the input program. However, when encountering a
  1132. \key{let}, it should generate a unique name for the variable (the
  1133. Racket function \key{gensym} is handy for this) and associate the old
  1134. name with the new unique name in an association list. The
  1135. \key{uniquify} function will need to access this association list when
  1136. it gets to a variable reference, so we add another paramter to
  1137. \key{uniquify} for the association list. It is quite common for a
  1138. compiler pass to need a map to store extra information about
  1139. variables. Such maps are often called \emph{symbol tables}.
  1140. The skeleton of the \key{uniquify} function is shown in
  1141. Figure~\ref{fig:uniquify-s0}. The function is curried so that it is
  1142. convenient to partially apply it to an association list and then apply
  1143. it to different expressions, as in the last clause for primitive
  1144. operations in Figure~\ref{fig:uniquify-s0}.
  1145. \begin{exercise}
  1146. \normalfont % I don't like the italics for exercises. -Jeremy
  1147. Complete the \key{uniquify} pass by filling in the blanks, that is,
  1148. implement the clauses for variables and for the \key{let} construct.
  1149. \end{exercise}
  1150. \begin{figure}[tbp]
  1151. \begin{lstlisting}
  1152. (define uniquify
  1153. (lambda (alist)
  1154. (lambda (e)
  1155. (match e
  1156. [(? symbol?) ___]
  1157. [(? integer?) e]
  1158. [`(let ([,x ,e]) ,body) ___]
  1159. [`(program ,info ,e)
  1160. `(program ,info ,((uniquify alist) e))]
  1161. [`(,op ,es ...)
  1162. `(,op ,@(map (uniquify alist) es))]
  1163. ))))
  1164. \end{lstlisting}
  1165. \caption{Skeleton for the \key{uniquify} pass.}
  1166. \label{fig:uniquify-s0}
  1167. \end{figure}
  1168. \begin{exercise}
  1169. \normalfont % I don't like the italics for exercises. -Jeremy
  1170. Test your \key{uniquify} pass by creating three example $R_1$ programs
  1171. and checking whether the output programs produce the same result as
  1172. the input programs. The $R_1$ programs should be designed to test the
  1173. most interesting parts of the \key{uniquify} pass, that is, the
  1174. programs should include \key{let} constructs, variables, and variables
  1175. that overshadow eachother. The three programs should be in a
  1176. subdirectory named \key{tests} and they shoul have the same file name
  1177. except for a different integer at the end of the name, followed by the
  1178. ending \key{.scm}. Use the \key{interp-tests} function
  1179. (Appendix~\ref{appendix:utilities}) from \key{utilities.rkt} to test
  1180. your \key{uniquify} pass on the example programs.
  1181. %% You can use the interpreter \key{interpret-S0} defined in the
  1182. %% \key{interp.rkt} file. The entire sequence of tests should be a short
  1183. %% Racket program so you can re-run all the tests by running the Racket
  1184. %% program. We refer to this as the \emph{regression test} program.
  1185. \end{exercise}
  1186. \section{Flatten Expressions}
  1187. \label{sec:flatten-s0}
  1188. The \key{flatten} pass will transform $R_1$ programs into $C_0$
  1189. programs. In particular, the purpose of the \key{flatten} pass is to
  1190. get rid of nested expressions, such as the $\UNIOP{-}{10}$ in the
  1191. following program.
  1192. \[
  1193. \BINOP{+}{52}{ \UNIOP{-}{10} }
  1194. \]
  1195. This can be accomplished by introducing a new variable, assigning the
  1196. nested expression to the new variable, and then using the new variable
  1197. in place of the nested expressions. For example, the above program is
  1198. translated to the following one.
  1199. \[
  1200. \begin{array}{l}
  1201. \ASSIGN{ \itm{x} }{ \UNIOP{-}{10} } \\
  1202. \ASSIGN{ \itm{y} }{ \BINOP{+}{52}{ \itm{x} } } \\
  1203. \RETURN{ y }
  1204. \end{array}
  1205. \]
  1206. We recommend implementing \key{flatten} as a structurally recursive
  1207. function that returns two things, 1) the newly flattened expression,
  1208. and 2) a list of assignment statements, one for each of the new
  1209. variables introduced while flattening the expression. You can return
  1210. multiple things from a function using the \key{values} form and you
  1211. can receive multiple things from a function call using the
  1212. \key{define-values} form. If you are not familiar with these
  1213. constructs, the Racket documentation will be of help.
  1214. Take special care for programs such as the following that initialize
  1215. variables with integers or other variables.
  1216. \[
  1217. \LET{a}{42}{ \LET{b}{a}{ b }}
  1218. \]
  1219. This program should be translated to
  1220. \[
  1221. \ASSIGN{a}{42} \;
  1222. \ASSIGN{b}{a} \;
  1223. \RETURN{b}
  1224. \]
  1225. and not the following, which could result from a naive implementation
  1226. of \key{flatten}.
  1227. \[
  1228. \ASSIGN{x.1}{42}\;
  1229. \ASSIGN{a}{x.1}\;
  1230. \ASSIGN{x.2}{a}\;
  1231. \ASSIGN{b}{x.2}\;
  1232. \RETURN{b}
  1233. \]
  1234. \begin{exercise}
  1235. \normalfont
  1236. Implement the \key{flatten} pass and test it on all of the example
  1237. programs that you created to test the \key{uniquify} pass and create
  1238. three new example programs that are designed to exercise all of the
  1239. interesting code in the \key{flatten} pass. Use the \key{interp-tests}
  1240. function (Appendix~\ref{appendix:utilities}) from \key{utilities.rkt} to
  1241. test your passes on the example programs.
  1242. \end{exercise}
  1243. \section{Select Instructions}
  1244. \label{sec:select-s0}
  1245. In the \key{select-instructions} pass we begin the work of
  1246. translating from $C_0$ to x86. The target language of this pass is a
  1247. pseudo-x86 language that still uses variables, so we add an AST node
  1248. of the form $\VAR{\itm{var}}$ to the x86 abstract syntax. The
  1249. \key{select-instructions} pass deals with the differing format of
  1250. arithmetic operations. For example, in $C_0$ an addition operation
  1251. could take the following form:
  1252. \[
  1253. \ASSIGN{x}{ \BINOP{+}{10}{32} }
  1254. \]
  1255. To translate to x86, we need to express this addition using the
  1256. \key{addq} instruction that does an inplace update. So we first move
  1257. $10$ to $x$ then perform the \key{addq}.
  1258. \[
  1259. (\key{mov}\,\INT{10}\, \VAR{x})\; (\key{addq} \;\INT{32}\; \VAR{x})
  1260. \]
  1261. There are some cases that require special care to avoid generating
  1262. needlessly complicated code. If one of the arguments is the same as
  1263. the left-hand side of the assignment, then there is no need for the
  1264. extra move instruction. For example, the following
  1265. \[
  1266. \ASSIGN{x}{ \BINOP{+}{10}{x} }
  1267. \quad\text{should translate to}\quad
  1268. (\key{addq} \; \INT{10}\; \VAR{x})
  1269. \]
  1270. Regarding the \RETURN{e} statement of $C_0$, we recommend treating it
  1271. as an assignment to the \key{rax} register and let the procedure
  1272. conclusion handle the transfer of control back to the calling
  1273. procedure.
  1274. \section{Assign Homes}
  1275. \label{sec:assign-s0}
  1276. As discussed in Section~\ref{sec:plan-s0-x86}, the
  1277. \key{assign-homes} pass places all of the variables on the stack.
  1278. Consider again the example $R_1$ program $\BINOP{+}{52}{ \UNIOP{-}{10} }$,
  1279. which after \key{select-instructions} looks like the following.
  1280. \[
  1281. \begin{array}{l}
  1282. (\key{movq}\;\INT{10}\; \VAR{x})\\
  1283. (\key{negq}\; \VAR{x})\\
  1284. (\key{movq}\; \INT{52}\; \REG{\itm{rax}})\\
  1285. (\key{addq}\; \VAR{x} \REG{\itm{rax}})
  1286. \end{array}
  1287. \]
  1288. The one and only variable $x$ is assigned to stack location
  1289. \key{-8(\%rbp)}, so the \key{assign-homes} pass translates the
  1290. above to
  1291. \[
  1292. \begin{array}{l}
  1293. (\key{movq}\;\INT{10}\; \STACKLOC{{-}8})\\
  1294. (\key{negq}\; \STACKLOC{{-}8})\\
  1295. (\key{movq}\; \INT{52}\; \REG{\itm{rax}})\\
  1296. (\key{addq}\; \STACKLOC{{-}8}\; \REG{\itm{rax}})
  1297. \end{array}
  1298. \]
  1299. In the process of assigning stack locations to variables, it is
  1300. convenient to compute and store the size of the frame which will be
  1301. needed later to generate the procedure conclusion.
  1302. \section{Patch Instructions}
  1303. \label{sec:patch-s0}
  1304. The purpose of this pass is to make sure that each instruction adheres
  1305. to the restrictions regarding which arguments can be memory
  1306. references. For most instructions, the rule is that at most one
  1307. argument may be a memory reference.
  1308. Consider again the following example.
  1309. \[
  1310. \LET{a}{42}{ \LET{b}{a}{ b }}
  1311. \]
  1312. After \key{assign-homes} pass, the above has been translated to
  1313. \[
  1314. \begin{array}{l}
  1315. (\key{movq} \;\INT{42}\; \STACKLOC{{-}8})\\
  1316. (\key{movq}\;\STACKLOC{{-}8}\; \STACKLOC{{-}16})\\
  1317. (\key{movq}\;\STACKLOC{{-}16}\; \REG{\itm{rax}})
  1318. \end{array}
  1319. \]
  1320. The second \key{movq} instruction is problematic because both arguments
  1321. are stack locations. We suggest fixing this problem by moving from the
  1322. source to \key{rax} and then from \key{rax} to the destination, as
  1323. follows.
  1324. \[
  1325. \begin{array}{l}
  1326. (\key{movq} \;\INT{42}\; \STACKLOC{{-}8})\\
  1327. (\key{movq}\;\STACKLOC{{-}8}\; \REG{\itm{rax}})\\
  1328. (\key{movq}\;\REG{\itm{rax}}\; \STACKLOC{{-}16})\\
  1329. (\key{movq}\;\STACKLOC{{-}16}\; \REG{\itm{rax}})
  1330. \end{array}
  1331. \]
  1332. %% The \key{imulq} instruction is a special case because the destination
  1333. %% argument must be a register.
  1334. \section{Print x86-64}
  1335. \label{sec:print-x86}
  1336. The last step of the compiler from $R_1$ to x86-64 is to convert the
  1337. x86-64 AST (defined in Figure~\ref{fig:x86-ast-a}) to the string
  1338. representation (defined in Figure~\ref{fig:x86-a}). The Racket
  1339. \key{format} and \key{string-append} functions are useful in this
  1340. regard. The main work that this step needs to perform is to create the
  1341. \key{\_main} function and the standard instructions for its prelude
  1342. and conclusion, as described in Section~\ref{sec:x86-64}. You need to
  1343. know the number of stack-allocated variables, which is convenient to
  1344. compute in the \key{assign-homes} pass (Section~\ref{sec:assign-s0})
  1345. and then store in the $\itm{info}$ field of the \key{program}.
  1346. %% \section{Testing with Interpreters}
  1347. %% The typical way to test a compiler is to run the generated assembly
  1348. %% code on a diverse set of programs and check whether they behave as
  1349. %% expected. However, when a compiler is structured as our is, with many
  1350. %% passes, when there is an error in the generated assembly code it can
  1351. %% be hard to determine which pass contains the source of the error. A
  1352. %% good way to isolate the error is to not only test the generated
  1353. %% assembly code but to also test the output of every pass. This requires
  1354. %% having interpreters for all the intermediate languages. Indeed, the
  1355. %% file \key{interp.rkt} in the supplemental code provides interpreters
  1356. %% for all the intermediate languages described in this book, starting
  1357. %% with interpreters for $R_1$, $C_0$, and x86 (in abstract syntax).
  1358. %% The file \key{run-tests.rkt} automates the process of running the
  1359. %% interpreters on the output programs of each pass and checking their
  1360. %% result.
  1361. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  1362. \chapter{Register Allocation}
  1363. \label{ch:register-allocation}
  1364. In Chapter~\ref{ch:int-exp} we simplified the generation of x86-64
  1365. assembly by placing all variables on the stack. We can improve the
  1366. performance of the generated code considerably if we instead try to
  1367. place as many variables as possible into registers. The CPU can
  1368. access a register in a single cycle, whereas accessing the stack can
  1369. take from several cycles (to go to cache) to hundreds of cycles (to go
  1370. to main memory). Figure~\ref{fig:reg-eg} shows a program with four
  1371. variables that serves as a running example. We show the source program
  1372. and also the output of instruction selection. At that point the
  1373. program is almost x86-64 assembly but not quite; it still contains
  1374. variables instead of stack locations or registers.
  1375. \begin{figure}
  1376. \begin{minipage}{0.45\textwidth}
  1377. Source program:
  1378. \begin{lstlisting}
  1379. (let ([v 1])
  1380. (let ([w 46])
  1381. (let ([x (+ v 7)])
  1382. (let ([y (+ 4 x)])
  1383. (let ([z (+ x w)])
  1384. (- z y))))))
  1385. \end{lstlisting}
  1386. \end{minipage}
  1387. \begin{minipage}{0.45\textwidth}
  1388. After instruction selection:
  1389. \begin{lstlisting}
  1390. (program (v w x y z)
  1391. (movq (int 1) (var v))
  1392. (movq (int 46) (var w))
  1393. (movq (var v) (var x))
  1394. (addq (int 7) (var x))
  1395. (movq (var x) (var y))
  1396. (addq (int 4) (var y))
  1397. (movq (var x) (var z))
  1398. (addq (var w) (var z))
  1399. (movq (var z) (reg rax))
  1400. (subq (var y) (reg rax)))
  1401. \end{lstlisting}
  1402. \end{minipage}
  1403. \caption{Running example for this chapter.}
  1404. \label{fig:reg-eg}
  1405. \end{figure}
  1406. The goal of register allocation is to fit as many variables into
  1407. registers as possible. It is often the case that we have more
  1408. variables than registers, so we can't naively map each variable to a
  1409. register. Fortunately, it is also common for different variables to be
  1410. needed during different periods of time, and in such cases the
  1411. variables can be mapped to the same register. Consider variables $x$
  1412. and $y$ in Figure~\ref{fig:reg-eg}. After the variable $x$ is moved
  1413. to $z$ it is no longer needed. Variable $y$, on the other hand, is
  1414. used only after this point, so $x$ and $y$ could share the same
  1415. register. The topic of the next section is how we compute where a
  1416. variable is needed.
  1417. \section{Liveness Analysis}
  1418. A variable is \emph{live} if the variable is used at some later point
  1419. in the program and there is not an intervening assignment to the
  1420. variable.
  1421. %
  1422. To understand the latter condition, consider the following code
  1423. fragment in which there are two writes to $b$. Are $a$ and
  1424. $b$ both live at the same time?
  1425. \begin{lstlisting}[numbers=left,numberstyle=\tiny]
  1426. (movq (int 5) (var a)) ; @$a \gets 5$@
  1427. (movq (int 30) (var b)) ; @$b \gets 30$@
  1428. (movq (var a) (var c)) ; @$c \gets x$@
  1429. (movq (int 10) (var b)) ; @$b \gets 10$@
  1430. (addq (var b) (var c)) ; @$c \gets c + b$@
  1431. \end{lstlisting}
  1432. The answer is no because the value $30$ written to $b$ on line 2 is
  1433. never used. The variable $b$ is read on line 5 and there is an
  1434. intervening write to $b$ on line 4, so the read on line 5 receives the
  1435. value written on line 4, not line 2.
  1436. The live variables can be computed by traversing the instruction
  1437. sequence back to front (i.e., backwards in execution order). Let
  1438. $I_1,\ldots, I_n$ be the instruction sequence. We write
  1439. $L_{\mathsf{after}}(k)$ for the set of live variables after
  1440. instruction $I_k$ and $L_{\mathsf{before}}(k)$ for the set of live
  1441. variables before instruction $I_k$. The live variables after an
  1442. instruction are always the same as the live variables before the next
  1443. instruction.
  1444. \begin{equation*}
  1445. L_{\mathsf{after}}(k) = L_{\mathsf{before}}(k+1)
  1446. \end{equation*}
  1447. To start things off, there are no live variables after the last
  1448. instruction, so
  1449. \begin{equation*}
  1450. L_{\mathsf{after}}(n) = \emptyset
  1451. \end{equation*}
  1452. We then apply the following rule repeatedly, traversing the
  1453. instruction sequence back to front.
  1454. \begin{equation*}
  1455. L_{\mathtt{before}}(k) = (L_{\mathtt{after}}(k) - W(k)) \cup R(k),
  1456. \end{equation*}
  1457. where $W(k)$ are the variables written to by instruction $I_k$ and
  1458. $R(k)$ are the variables read by instruction $I_k$.
  1459. Figure~\ref{fig:live-eg} shows the results of live variables analysis
  1460. for the running example. Next to each instruction we write its
  1461. $L_{\mathtt{after}}$ set.
  1462. \begin{figure}[tbp]
  1463. \begin{lstlisting}
  1464. (program (v w x y z)
  1465. (movq (int 1) (var v)) @$\{ v \}$@
  1466. (movq (int 46) (var w)) @$\{ v, w \}$@
  1467. (movq (var v) (var x)) @$\{ w, x \}$@
  1468. (addq (int 7) (var x)) @$\{ w, x \}$@
  1469. (movq (var x) (var y)) @$\{ w, x, y\}$@
  1470. (addq (int 4) (var y)) @$\{ w, x, y \}$@
  1471. (movq (var x) (var z)) @$\{ w, y, z \}$@
  1472. (addq (var w) (var z)) @$\{ y, z \}$@
  1473. (movq (var z) (reg rax)) @$\{ y \}$@
  1474. (subq (var y) (reg rax))) @$\{\}$@
  1475. \end{lstlisting}
  1476. \caption{Running example program annotated with live-after sets.}
  1477. \label{fig:live-eg}
  1478. \end{figure}
  1479. \section{Building the Interference Graph}
  1480. Based on the liveness analysis, we know the program regions where each
  1481. variable is needed. However, during register allocation, we need to
  1482. answer questions of the specific form: are variables $u$ and $v$ ever
  1483. live at the same time? (And therefore cannot be assigned to the same
  1484. register.) To make this question easier to answer, we create an
  1485. explicit data structure, an \emph{interference graph}. An
  1486. interference graph is an undirected graph that has an edge between two
  1487. variables if they are live at the same time, that is, if they
  1488. interfere with each other.
  1489. The most obvious way to compute the interference graph is to look at
  1490. the set of live variables between each statement in the program, and
  1491. add an edge to the graph for every pair of variables in the same set.
  1492. This approach is less than ideal for two reasons. First, it can be
  1493. rather expensive because it takes $O(n^2)$ time to look at every pair
  1494. in a set of $n$ live variables. Second, there is a special case in
  1495. which two variables that are live at the same time do not actually
  1496. interfere with each other: when they both contain the same value
  1497. because we have assigned one to the other.
  1498. A better way to compute the edges of the intereference graph is given
  1499. by the following rules.
  1500. \begin{itemize}
  1501. \item If instruction $I_k$ is a move: (\key{movq} $s$\, $d$), then add
  1502. the edge $(d,v)$ for every $v \in L_{\mathsf{after}}(k)$ unless $v =
  1503. d$ or $v = s$.
  1504. \item If instruction $I_k$ is not a move but some other arithmetic
  1505. instruction such as (\key{addq} $s$\, $d$), then add the edge $(d,v)$
  1506. for every $v \in L_{\mathsf{after}}(k)$ unless $v = d$.
  1507. \item If instruction $I_k$ is of the form (\key{call}
  1508. $\mathit{label}$), then add an edge $(r,v)$ for every caller-save
  1509. register $r$ and every variable $v \in L_{\mathsf{after}}(k)$.
  1510. \end{itemize}
  1511. Working from the top to bottom of Figure~\ref{fig:live-eg}, $z$
  1512. interferes with $x$, $y$ interferes with $z$, and $w$ interferes with
  1513. $y$ and $z$. The resulting interference graph is shown in
  1514. Figure~\ref{fig:interfere}.
  1515. \begin{figure}[tbp]
  1516. \large
  1517. \[
  1518. \begin{tikzpicture}[baseline=(current bounding box.center)]
  1519. \node (v) at (0,0) {$v$};
  1520. \node (w) at (2,0) {$w$};
  1521. \node (x) at (4,0) {$x$};
  1522. \node (y) at (2,-2) {$y$};
  1523. \node (z) at (4,-2) {$z$};
  1524. \draw (v) to (w);
  1525. \foreach \i in {w,x,y}
  1526. {
  1527. \foreach \j in {w,x,y}
  1528. {
  1529. \draw (\i) to (\j);
  1530. }
  1531. }
  1532. \draw (z) to (w);
  1533. \draw (z) to (y);
  1534. \end{tikzpicture}
  1535. \]
  1536. \caption{Interference graph for the running example.}
  1537. \label{fig:interfere}
  1538. \end{figure}
  1539. \section{Graph Coloring via Sudoku}
  1540. We now come to the main event, mapping variables to registers (or to
  1541. stack locations in the event that we run out of registers). We need
  1542. to make sure not to map two variables to the same register if the two
  1543. variables interfere with each other. In terms of the interference
  1544. graph, this means we cannot map adjacent nodes to the same register.
  1545. If we think of registers as colors, the register allocation problem
  1546. becomes the widely-studied graph coloring
  1547. problem~\citep{Balakrishnan:1996ve,Rosen:2002bh}.
  1548. The reader may be more familar with the graph coloring problem then he
  1549. or she realizes; the popular game of Sudoku is an instance of the
  1550. graph coloring problem. The following describes how to build a graph
  1551. out of a Sudoku board.
  1552. \begin{itemize}
  1553. \item There is one node in the graph for each Sudoku square.
  1554. \item There is an edge between two nodes if the corresponding squares
  1555. are in the same row or column, or if the squares are in the same
  1556. $3\times 3$ region.
  1557. \item Choose nine colors to correspond to the numbers $1$ to $9$.
  1558. \item Based on the initial assignment of numbers to squares in the
  1559. Sudoku board, assign the corresponding colors to the corresponding
  1560. nodes in the graph.
  1561. \end{itemize}
  1562. If you can color the remaining nodes in the graph with the nine
  1563. colors, then you've also solved the corresponding game of Sudoku.
  1564. Given that Sudoku is graph coloring, one can use Sudoku strategies to
  1565. come up with an algorithm for allocating registers. For example, one
  1566. of the basic techniques for Sudoku is Pencil Marks. The idea is that
  1567. you use a process of elimination to determine what numbers still make
  1568. sense for a square, and write down those numbers in the square
  1569. (writing very small). At first, each number might be a
  1570. possibility, but as the board fills up, more and more of the
  1571. possibilities are crossed off (or erased). For example, if the number
  1572. $1$ is assigned to a square, then by process of elimination, you can
  1573. cross off the $1$ pencil mark from all the squares in the same row,
  1574. column, and region. Many Sudoku computer games provide automatic
  1575. support for Pencil Marks. This heuristic also reduces the degree of
  1576. branching in the search tree.
  1577. The Pencil Marks technique corresponds to the notion of color
  1578. \emph{saturation} due to \cite{Brelaz:1979eu}. The
  1579. saturation of a node, in Sudoku terms, is the number of possibilities
  1580. that have been crossed off using the process of elimination mentioned
  1581. above. In graph terminology, we have the following definition:
  1582. \begin{equation*}
  1583. \mathrm{saturation}(u) = |\{ c \;|\; \exists v. v \in \mathrm{Adj}(u)
  1584. \text{ and } \mathrm{color}(v) = c \}|
  1585. \end{equation*}
  1586. where $\mathrm{Adj}(u)$ is the set of nodes adjacent to $u$ and
  1587. the notation $|S|$ stands for the size of the set $S$.
  1588. Using the Pencil Marks technique leads to a simple strategy for
  1589. filling in numbers: if there is a square with only one possible number
  1590. left, then write down that number! But what if there are no squares
  1591. with only one possibility left? One brute-force approach is to just
  1592. make a guess. If that guess ultimately leads to a solution, great. If
  1593. not, backtrack to the guess and make a different guess. Of course,
  1594. this is horribly time consuming. One standard way to reduce the amount
  1595. of backtracking is to use the most-constrained-first heuristic. That
  1596. is, when making a guess, always choose a square with the fewest
  1597. possibilities left (the node with the highest saturation). The idea
  1598. is that choosing highly constrained squares earlier rather than later
  1599. is better because later there may not be any possibilities left.
  1600. In some sense, register allocation is easier than Sudoku because we
  1601. can always cheat and add more numbers by spilling variables to the
  1602. stack. Also, we'd like to minimize the time needed to color the graph,
  1603. and backtracking is expensive. Thus, it makes sense to keep the
  1604. most-constrained-first heuristic but drop the backtracking in favor of
  1605. greedy search (guess and just keep going).
  1606. Figure~\ref{fig:satur-algo} gives the pseudo-code for this simple
  1607. greedy algorithm for register allocation based on saturation and the
  1608. most-constrained-first heuristic, which is roughly equivalent to the
  1609. DSATUR algorithm of \cite{Brelaz:1979eu} (also known as
  1610. saturation degree ordering
  1611. (SDO)~\citep{Gebremedhin:1999fk,Omari:2006uq}). Just as in Sudoku,
  1612. the algorithm represents colors with integers, with the first $k$
  1613. colors corresponding to the $k$ registers in a given machine and the
  1614. rest of the integers corresponding to stack locations.
  1615. \begin{figure}[btp]
  1616. \centering
  1617. \begin{lstlisting}[basicstyle=\rmfamily,deletekeywords={for,from,with,is,not,in,find},morekeywords={while},columns=fullflexible]
  1618. Algorithm: DSATUR
  1619. Input: a graph @$G$@
  1620. Output: an assignment @$\mathrm{color}[v]$@ for each node @$v \in G$@
  1621. @$W \gets \mathit{vertices}(G)$@
  1622. while @$W \neq \emptyset$@ do
  1623. pick a node @$u$@ from @$W$@ with the highest saturation,
  1624. breaking ties randomly
  1625. find the lowest color @$c$@ that is not in @$\{ \mathrm{color}[v] \;|\; v \in \mathrm{Adj}(v)\}$@
  1626. @$\mathrm{color}[u] \gets c$@
  1627. @$W \gets W - \{u\}$@
  1628. \end{lstlisting}
  1629. \caption{Saturation-based greedy graph coloring algorithm.}
  1630. \label{fig:satur-algo}
  1631. \end{figure}
  1632. With this algorithm in hand, let us return to the running example and
  1633. consider how to color the interference graph in
  1634. Figure~\ref{fig:interfere}. Initially, all of the nodes are not yet
  1635. colored and they are unsaturated, so we annotate each of them with a
  1636. dash for their color and an empty set for the saturation.
  1637. \[
  1638. \begin{tikzpicture}[baseline=(current bounding box.center)]
  1639. \node (v) at (0,0) {$v:-,\{\}$};
  1640. \node (w) at (3,0) {$w:-,\{\}$};
  1641. \node (x) at (6,0) {$x:-,\{\}$};
  1642. \node (y) at (3,-1.5) {$y:-,\{\}$};
  1643. \node (z) at (6,-1.5) {$z:-,\{\}$};
  1644. \draw (v) to (w);
  1645. \foreach \i in {w,x,y}
  1646. {
  1647. \foreach \j in {w,x,y}
  1648. {
  1649. \draw (\i) to (\j);
  1650. }
  1651. }
  1652. \draw (z) to (w);
  1653. \draw (z) to (y);
  1654. \end{tikzpicture}
  1655. \]
  1656. We select a maximally saturated node and color it $0$. In this case we
  1657. have a 5-way tie, so we arbitrarily pick $y$. The color $0$ is no
  1658. longer available for $w$, $x$, and $z$ because they interfere with
  1659. $y$.
  1660. \[
  1661. \begin{tikzpicture}[baseline=(current bounding box.center)]
  1662. \node (v) at (0,0) {$v:-,\{\}$};
  1663. \node (w) at (3,0) {$w:-,\{0\}$};
  1664. \node (x) at (6,0) {$x:-,\{0\}$};
  1665. \node (y) at (3,-1.5) {$y:0,\{\}$};
  1666. \node (z) at (6,-1.5) {$z:-,\{0\}$};
  1667. \draw (v) to (w);
  1668. \foreach \i in {w,x,y}
  1669. {
  1670. \foreach \j in {w,x,y}
  1671. {
  1672. \draw (\i) to (\j);
  1673. }
  1674. }
  1675. \draw (z) to (w);
  1676. \draw (z) to (y);
  1677. \end{tikzpicture}
  1678. \]
  1679. Now we repeat the process, selecting another maximally saturated node.
  1680. This time there is a three-way tie between $w$, $x$, and $z$. We color
  1681. $w$ with $1$.
  1682. \[
  1683. \begin{tikzpicture}[baseline=(current bounding box.center)]
  1684. \node (v) at (0,0) {$v:-,\{1\}$};
  1685. \node (w) at (3,0) {$w:1,\{0\}$};
  1686. \node (x) at (6,0) {$x:-,\{0,1\}$};
  1687. \node (y) at (3,-1.5) {$y:0,\{1\}$};
  1688. \node (z) at (6,-1.5) {$z:-,\{0,1\}$};
  1689. \draw (v) to (w);
  1690. \foreach \i in {w,x,y}
  1691. {
  1692. \foreach \j in {w,x,y}
  1693. {
  1694. \draw (\i) to (\j);
  1695. }
  1696. }
  1697. \draw (z) to (w);
  1698. \draw (z) to (y);
  1699. \end{tikzpicture}
  1700. \]
  1701. The most saturated nodes are now $x$ and $z$. We color $x$ with the
  1702. next available color which is $2$.
  1703. \[
  1704. \begin{tikzpicture}[baseline=(current bounding box.center)]
  1705. \node (v) at (0,0) {$v:-,\{1\}$};
  1706. \node (w) at (3,0) {$w:1,\{0,2\}$};
  1707. \node (x) at (6,0) {$x:2,\{0,1\}$};
  1708. \node (y) at (3,-1.5) {$y:0,\{1,2\}$};
  1709. \node (z) at (6,-1.5) {$z:-,\{0,1\}$};
  1710. \draw (v) to (w);
  1711. \foreach \i in {w,x,y}
  1712. {
  1713. \foreach \j in {w,x,y}
  1714. {
  1715. \draw (\i) to (\j);
  1716. }
  1717. }
  1718. \draw (z) to (w);
  1719. \draw (z) to (y);
  1720. \end{tikzpicture}
  1721. \]
  1722. We have only two nodes left to color, $v$ and $z$, but $z$ is
  1723. more highly saturated, so we color $z$ with $2$.
  1724. \[
  1725. \begin{tikzpicture}[baseline=(current bounding box.center)]
  1726. \node (v) at (0,0) {$v:-,\{1\}$};
  1727. \node (w) at (3,0) {$w:1,\{0,2\}$};
  1728. \node (x) at (6,0) {$x:2,\{0,1\}$};
  1729. \node (y) at (3,-1.5) {$y:0,\{1,2\}$};
  1730. \node (z) at (6,-1.5) {$z:2,\{0,1\}$};
  1731. \draw (v) to (w);
  1732. \foreach \i in {w,x,y}
  1733. {
  1734. \foreach \j in {w,x,y}
  1735. {
  1736. \draw (\i) to (\j);
  1737. }
  1738. }
  1739. \draw (z) to (w);
  1740. \draw (z) to (y);
  1741. \end{tikzpicture}
  1742. \]
  1743. The last iteration of the coloring algorithm assigns color $0$ to $v$.
  1744. \[
  1745. \begin{tikzpicture}[baseline=(current bounding box.center)]
  1746. \node (v) at (0,0) {$v:0,\{1\}$};
  1747. \node (w) at (3,0) {$w:1,\{0,2\}$};
  1748. \node (x) at (6,0) {$x:2,\{0,1\}$};
  1749. \node (y) at (3,-1.5) {$y:0,\{1,2\}$};
  1750. \node (z) at (6,-1.5) {$z:2,\{0,1\}$};
  1751. \draw (v) to (w);
  1752. \foreach \i in {w,x,y}
  1753. {
  1754. \foreach \j in {w,x,y}
  1755. {
  1756. \draw (\i) to (\j);
  1757. }
  1758. }
  1759. \draw (z) to (w);
  1760. \draw (z) to (y);
  1761. \end{tikzpicture}
  1762. \]
  1763. With the coloring complete, we can finalize assignment of variables to
  1764. registers and stack locations. Recall that if we have $k$ registers,
  1765. we map the first $k$ colors to registers and the rest to stack
  1766. locations.
  1767. Suppose for the moment that we just have one extra register
  1768. to use for register allocation, just \key{rbx}. Then the following is
  1769. the mapping of colors to registers and stack allocations.
  1770. \[
  1771. \{ 0 \mapsto \key{\%rbx}, \; 1 \mapsto \key{-8(\%rbp)}, \; 2 \mapsto \key{-16(\%rbp)}, \ldots \}
  1772. \]
  1773. Putting this together with the above coloring of the variables, we
  1774. arrive at the following assignment.
  1775. \[
  1776. \{ v \mapsto \key{\%rbx}, \;
  1777. w \mapsto \key{-8(\%rbp)}, \;
  1778. x \mapsto \key{-16(\%rbp)}, \;
  1779. y \mapsto \key{\%rbx}, \;
  1780. z\mapsto \key{-16(\%rbp)} \}
  1781. \]
  1782. Applying this assignment to our running example
  1783. (Figure~\ref{fig:reg-eg}) yields the following program.
  1784. % why frame size of 32? -JGS
  1785. \begin{lstlisting}
  1786. (program 32
  1787. (movq (int 1) (reg rbx))
  1788. (movq (int 46) (stack-loc -8))
  1789. (movq (reg rbx) (stack-loc -16))
  1790. (addq (int 7) (stack-loc -16))
  1791. (movq (stack-loc 16) (reg rbx))
  1792. (addq (int 4) (reg rbx))
  1793. (movq (stack-loc -16) (stack-loc -16))
  1794. (addq (stack-loc -8) (stack-loc -16))
  1795. (movq (stack-loc -16) (reg rax))
  1796. (subq (reg rbx) (reg rax)))
  1797. \end{lstlisting}
  1798. This program is almost an x86-64 program. The remaining step is to apply
  1799. the patch instructions pass. In this example, the trivial move of
  1800. \key{-16(\%rbp)} to itself is deleted and the addition of
  1801. \key{-8(\%rbp)} to \key{-16(\%rbp)} is fixed by going through
  1802. \key{\%rax}. The following shows the portion of the program that
  1803. changed.
  1804. \begin{lstlisting}
  1805. (addq (int 4) (reg rbx))
  1806. (movq (stack-loc -8) (reg rax)
  1807. (addq (reg rax) (stack-loc -16))
  1808. \end{lstlisting}
  1809. An overview of all of the passes involved in register allocation is
  1810. shown in Figure~\ref{fig:reg-alloc-passes}.
  1811. \begin{figure}[tbp]
  1812. \[
  1813. \begin{tikzpicture}[baseline=(current bounding box.center)]
  1814. \node (1) at (-3.5,0) {$C_0$};
  1815. \node (2) at (0,0) {$\text{x86-64}^{*}$};
  1816. \node (3) at (0,-1.5) {$\text{x86-64}^{*}$};
  1817. \node (4) at (0,-3) {$\text{x86-64}^{*}$};
  1818. \node (5) at (0,-4.5) {$\text{x86-64}^{*}$};
  1819. \node (6) at (3.5,-4.5) {$\text{x86-64}$};
  1820. \path[->] (1) edge [above] node {\ttfamily\scriptsize select-instr.} (2);
  1821. \path[->] (2) edge [right] node {\ttfamily\scriptsize uncover-live} (3);
  1822. \path[->] (3) edge [right] node {\ttfamily\scriptsize build-interference} (4);
  1823. \path[->] (4) edge [left] node {\ttfamily\scriptsize allocate-registers} (5);
  1824. \path[->] (5) edge [above] node {\ttfamily\scriptsize patch-instr.} (6);
  1825. \end{tikzpicture}
  1826. \]
  1827. \caption{Diagram of the passes for register allocation.}
  1828. \label{fig:reg-alloc-passes}
  1829. \end{figure}
  1830. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  1831. \chapter{Booleans, Type Checking, and Control Flow}
  1832. \label{ch:bool-types}
  1833. \section{The $R_2$ Language}
  1834. \begin{figure}[htbp]
  1835. \centering
  1836. \fbox{
  1837. \begin{minipage}{0.85\textwidth}
  1838. \[
  1839. \begin{array}{lcl}
  1840. \Op &::=& \ldots \mid \key{and} \mid \key{or} \mid \key{not} \mid \key{eq?} \\
  1841. \Exp &::=& \ldots \mid \key{\#t} \mid \key{\#f} \mid
  1842. \IF{\Exp}{\Exp}{\Exp}
  1843. \end{array}
  1844. \]
  1845. \end{minipage}
  1846. }
  1847. \caption{The $R_2$ language, an extension of $R_1$
  1848. (Figure~\ref{fig:s0-syntax}).}
  1849. \label{fig:s2-syntax}
  1850. \end{figure}
  1851. \section{Type Checking $R_2$ Programs}
  1852. \marginpar{\scriptsize Type checking is a difficult thing to cover, I think, without having 522 as a prerequisite for this course. -- Cam}
  1853. % T ::= Integer | Boolean
  1854. It is common practice to specify a type system by writing rules for
  1855. each kind of AST node. For example, the rule for \key{if} is:
  1856. \begin{quote}
  1857. For any expressions $e_1, e_2, e_3$ and any type $T$, if $e_1$ has
  1858. type \key{bool}, $e_2$ has type $T$, and $e_3$ has type $T$, then
  1859. $\IF{e_1}{e_2}{e_3}$ has type $T$.
  1860. \end{quote}
  1861. It is also common practice to write rules using a horizontal line,
  1862. with the conditions written above the line and the conclusion written
  1863. below the line.
  1864. \begin{equation*}
  1865. \inference{e_1 \text{ has type } \key{bool} &
  1866. e_2 \text{ has type } T & e_3 \text{ has type } T}
  1867. {\IF{e_1}{e_2}{e_3} \text{ has type } T}
  1868. \end{equation*}
  1869. Because the phrase ``has type'' is repeated so often in these type
  1870. checking rules, it is abbreviated to just a colon. So the above rule
  1871. is abbreviated to the following.
  1872. \begin{equation*}
  1873. \inference{e_1 : \key{bool} & e_2 : T & e_3 : T}
  1874. {\IF{e_1}{e_2}{e_3} : T}
  1875. \end{equation*}
  1876. The $\LET{x}{e_1}{e_2}$ construct poses an interesting challenge. The
  1877. variable $x$ is assigned the value of $e_1$ and then $x$ can be used
  1878. inside $e_2$. When we get to an occurrence of $x$ inside $e_2$, how do
  1879. we know what type the variable should be? The answer is that we need
  1880. a way to map from variable names to types. Such a mapping is called a
  1881. \emph{type environment} (aka. \emph{symbol table}). The capital Greek
  1882. letter gamma, written $\Gamma$, is used for referring to type
  1883. environments environments. The notation $\Gamma, x : T$ stands for
  1884. making a copy of the environment $\Gamma$ and then associating $T$
  1885. with the variable $x$ in the new environment. We write $\Gamma(x)$ to
  1886. lookup the associated type for $x$. The type checking rules for
  1887. \key{let} and variables are as follows.
  1888. \begin{equation*}
  1889. \inference{e_1 : T_1 \text{ in } \Gamma &
  1890. e_2 : T_2 \text{ in } \Gamma,x:T_1}
  1891. {\LET{x}{e_1}{e_2} : T_2 \text{ in } \Gamma}
  1892. \qquad
  1893. \inference{\Gamma(x) = T}
  1894. {x : T \text{ in } \Gamma}
  1895. \end{equation*}
  1896. Type checking has roots in logic, and logicians have a tradition of
  1897. writing the environment on the left-hand side and separating it from
  1898. the expression with a turn-stile ($\vdash$). The turn-stile does not
  1899. have any intrinsic meaning per se. It is punctuation that separates
  1900. the environment $\Gamma$ from the expression $e$. So the above typing
  1901. rules are written as follows.
  1902. \begin{equation*}
  1903. \inference{\Gamma \vdash e_1 : T_1 &
  1904. \Gamma,x:T_1 \vdash e_2 : T_2}
  1905. {\Gamma \vdash \LET{x}{e_1}{e_2} : T_2}
  1906. \qquad
  1907. \inference{\Gamma(x) = T}
  1908. {\Gamma \vdash x : T}
  1909. \end{equation*}
  1910. Overall, the statement $\Gamma \vdash e : T$ is an example of what is
  1911. called a \emph{judgment}. In particular, this judgment says, ``In
  1912. environment $\Gamma$, expression $e$ has type $T$.''
  1913. Figure~\ref{fig:S1-type-system} shows the type checking rules for
  1914. $R_2$.
  1915. \begin{figure}
  1916. \begin{gather*}
  1917. \inference{\Gamma(x) = T}
  1918. {\Gamma \vdash x : T}
  1919. \qquad
  1920. \inference{\Gamma \vdash e_1 : T_1 &
  1921. \Gamma,x:T_1 \vdash e_2 : T_2}
  1922. {\Gamma \vdash \LET{x}{e_1}{e_2} : T_2}
  1923. \\[2ex]
  1924. \inference{}{\Gamma \vdash n : \key{Integer}}
  1925. \quad
  1926. \inference{\Gamma \vdash e_i : T_i \ ^{\forall i \in 1\ldots n} & \Delta(\Op,T_1,\ldots,T_n) = T}
  1927. {\Gamma \vdash (\Op \; e_1 \ldots e_n) : T}
  1928. \\[2ex]
  1929. \inference{}{\Gamma \vdash \key{\#t} : \key{Boolean}}
  1930. \quad
  1931. \inference{}{\Gamma \vdash \key{\#f} : \key{Boolean}}
  1932. \quad
  1933. \inference{\Gamma \vdash e_1 : \key{bool} \\
  1934. \Gamma \vdash e_2 : T &
  1935. \Gamma \vdash e_3 : T}
  1936. {\Gamma \vdash \IF{e_1}{e_2}{e_3} : T}
  1937. \end{gather*}
  1938. \caption{Type System for $R_2$.}
  1939. \label{fig:S1-type-system}
  1940. \end{figure}
  1941. \begin{figure}
  1942. \begin{align*}
  1943. \Delta(\key{+},\key{Integer},\key{Integer}) &= \key{Integer} \\
  1944. \Delta(\key{-},\key{Integer},\key{Integer}) &= \key{Integer} \\
  1945. \Delta(\key{-},\key{Integer}) &= \key{Integer} \\
  1946. \Delta(\key{*},\key{Integer},\key{Integer}) &= \key{Integer} \\
  1947. \Delta(\key{read}) &= \key{Integer} \\
  1948. \Delta(\key{and},\key{Boolean},\key{Boolean}) &= \key{Boolean} \\
  1949. \Delta(\key{or},\key{Boolean},\key{Boolean}) &= \key{Boolean} \\
  1950. \Delta(\key{not},\key{Boolean}) &= \key{Boolean} \\
  1951. \Delta(\key{eq?},\key{Integer},\key{Integer}) &= \key{Boolean} \\
  1952. \Delta(\key{eq?},\key{Boolean},\key{Boolean}) &= \key{Boolean}
  1953. \end{align*}
  1954. \caption{Types for the primitives operators.}
  1955. \end{figure}
  1956. \section{The $C_1$ Language}
  1957. \begin{figure}[htbp]
  1958. \[
  1959. \begin{array}{lcl}
  1960. \Arg &::=& \ldots \mid \key{\#t} \mid \key{\#f} \\
  1961. \Stmt &::=& \ldots \mid \IF{\Exp}{\Stmt^{*}}{\Stmt^{*}}
  1962. \end{array}
  1963. \]
  1964. \caption{The $C_1$ intermediate language, an extension of $C_0$
  1965. (Figure~\ref{fig:c0-syntax}).}
  1966. \label{fig:c1-syntax}
  1967. \end{figure}
  1968. \section{Flatten Expressions}
  1969. \section{Select Instructions}
  1970. \section{Register Allocation}
  1971. \section{Patch Instructions}
  1972. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  1973. \chapter{Tuples and Heap Allocation}
  1974. \label{ch:tuples}
  1975. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  1976. \chapter{Garbage Collection}
  1977. \label{ch:gc}
  1978. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  1979. \chapter{Functions}
  1980. \label{ch:functions}
  1981. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  1982. \chapter{Lexically Scoped Functions}
  1983. \label{ch:lambdas}
  1984. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  1985. \chapter{Mutable Data}
  1986. \label{ch:mutable-data}
  1987. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  1988. \chapter{The Dynamic Type}
  1989. \label{ch:type-dynamic}
  1990. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  1991. \chapter{Parametric Polymorphism}
  1992. \label{ch:parametric-polymorphism}
  1993. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  1994. \chapter{High-level Optimization}
  1995. \label{ch:high-level-optimization}
  1996. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  1997. \chapter{Appendix}
  1998. \section{Interpreters}
  1999. \label{appendix:interp}
  2000. We provide several interpreters in the \key{interp.rkt} file. The
  2001. \key{interp-scheme} function takes an AST in one of the Racket-like
  2002. languages considered in this book ($R_1, R_2, \ldots$) and interprets
  2003. the program, returning the result value. The \key{interp-C} function
  2004. interprets an AST for a program in one of the C-like languages ($C_0,
  2005. C_1, \ldots$), and the \key{interp-x86} function interprets an AST for
  2006. an x86-64 program.
  2007. \section{Utility Functions}
  2008. \label{appendix:utilities}
  2009. The utility function described in this section can be found in the
  2010. \key{utilities.rkt} file.
  2011. The \key{assert} function displays the error message \key{msg} if the
  2012. Boolean \key{bool} is false.
  2013. \begin{lstlisting}
  2014. (define (assert msg bool) ...)
  2015. \end{lstlisting}
  2016. The \key{lookup} function ...
  2017. The \key{interp-tests} function takes a compiler name (a string) a
  2018. description of the passes a test family name (a string), and a list of
  2019. test numbers, and runs the compiler passes and the interpreters to
  2020. check whether the passes correct. The description of the passes is a
  2021. list with one entry per pass. An entry is a list with three things: a
  2022. string giving the name of the pass, the function that implements the
  2023. pass (a translator from AST to AST), and a function that implements
  2024. the interpreter (a function from AST to result value). The
  2025. interpreters from Appendix~\ref{appendix:interp} make a good choice.
  2026. The \key{interp-tests} function assumes that the subdirectory
  2027. \key{tests} has a bunch of Scheme programs whose names all start with
  2028. the family name, followed by an underscore and then the test number,
  2029. ending in \key{.scm}. Also, for each Scheme program there is a file
  2030. with the same number except that it ends with \key{.in} that provides
  2031. the input for the Scheme program.
  2032. \begin{lstlisting}
  2033. (define (interp-tests name passes test-family test-nums) ...
  2034. \end{lstlisting}
  2035. The compiler-tests function takes a compiler name (a string) a
  2036. description of the passes (see the comment for \key{interp-tests}) a
  2037. test family name (a string), and a list of test numbers (see the
  2038. comment for interp-tests), and runs the compiler to generate x86-64 (a
  2039. \key{.s} file) and then runs gcc to generate machine code. It runs
  2040. the machine code and checks that the output is 42.
  2041. \begin{lstlisting}
  2042. (define (compiler-tests name passes test-family test-nums) ...)
  2043. \end{lstlisting}
  2044. The compile-file function takes a description of the compiler passes
  2045. (see the comment for \key{interp-tests}) and returns a function that,
  2046. given a program file name (a string ending in \key{.scm}), applies all
  2047. of the passes and writes the output to a file whose name is the same
  2048. as the proram file name but with \key{.scm} replaced with \key{.s}.
  2049. \begin{lstlisting}
  2050. (define (compile-file passes)
  2051. (lambda (prog-file-name) ...))
  2052. \end{lstlisting}
  2053. \bibliographystyle{plainnat}
  2054. \bibliography{all}
  2055. \end{document}
  2056. %% LocalWords: Dybvig Waddell Abdulaziz Ghuloum Dipanwita
  2057. %% LocalWords: Sarkar lcl Matz aa representable