book.tex 81 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174
  1. \documentclass[12pt]{book}
  2. \usepackage[T1]{fontenc}
  3. \usepackage[utf8]{inputenc}
  4. \usepackage{lmodern}
  5. \usepackage{hyperref}
  6. \usepackage{graphicx}
  7. \usepackage[english]{babel}
  8. \usepackage{listings}
  9. \usepackage{amsmath}
  10. \usepackage{amsthm}
  11. \usepackage{amssymb}
  12. \usepackage{natbib}
  13. \usepackage{stmaryrd}
  14. \usepackage{xypic}
  15. \usepackage{semantic}
  16. \usepackage{wrapfig}
  17. % Computer Modern is already the default. -Jeremy
  18. %\renewcommand{\ttdefault}{cmtt}
  19. \lstset{%
  20. language=Lisp,
  21. basicstyle=\ttfamily\small,
  22. escapechar=@,
  23. columns=fullflexible
  24. }
  25. \newtheorem{theorem}{Theorem}
  26. \newtheorem{lemma}[theorem]{Lemma}
  27. \newtheorem{corollary}[theorem]{Corollary}
  28. \newtheorem{proposition}[theorem]{Proposition}
  29. \newtheorem{constraint}[theorem]{Constraint}
  30. \newtheorem{definition}[theorem]{Definition}
  31. \newtheorem{exercise}[theorem]{Exercise}
  32. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  33. % 'dedication' environment: To add a dedication paragraph at the start of book %
  34. % Source: http://www.tug.org/pipermail/texhax/2010-June/015184.html %
  35. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  36. \newenvironment{dedication}
  37. {
  38. \cleardoublepage
  39. \thispagestyle{empty}
  40. \vspace*{\stretch{1}}
  41. \hfill\begin{minipage}[t]{0.66\textwidth}
  42. \raggedright
  43. }
  44. {
  45. \end{minipage}
  46. \vspace*{\stretch{3}}
  47. \clearpage
  48. }
  49. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  50. % Chapter quote at the start of chapter %
  51. % Source: http://tex.stackexchange.com/a/53380 %
  52. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  53. \makeatletter
  54. \renewcommand{\@chapapp}{}% Not necessary...
  55. \newenvironment{chapquote}[2][2em]
  56. {\setlength{\@tempdima}{#1}%
  57. \def\chapquote@author{#2}%
  58. \parshape 1 \@tempdima \dimexpr\textwidth-2\@tempdima\relax%
  59. \itshape}
  60. {\par\normalfont\hfill--\ \chapquote@author\hspace*{\@tempdima}\par\bigskip}
  61. \makeatother
  62. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  63. \newcommand{\itm}[1]{\ensuremath{\mathit{#1}}}
  64. \newcommand{\Stmt}{\itm{stmt}}
  65. \newcommand{\Exp}{\itm{exp}}
  66. \newcommand{\Instr}{\itm{instr}}
  67. \newcommand{\Prog}{\itm{prog}}
  68. \newcommand{\Arg}{\itm{arg}}
  69. \newcommand{\Int}{\itm{int}}
  70. \newcommand{\Var}{\itm{var}}
  71. \newcommand{\Op}{\itm{op}}
  72. \newcommand{\key}[1]{\texttt{#1}}
  73. \newcommand{\READ}{(\key{read})}
  74. \newcommand{\UNIOP}[2]{(\key{#1}\,#2)}
  75. \newcommand{\BINOP}[3]{(\key{#1}\,#2\,#3)}
  76. \newcommand{\LET}[3]{(\key{let}\,([#1\;#2])\,#3)}
  77. \newcommand{\ASSIGN}[2]{(\key{assign}\,#1\;#2)}
  78. \newcommand{\RETURN}[1]{(\key{return}\,#1)}
  79. \newcommand{\INT}[1]{(\key{int}\;#1)}
  80. \newcommand{\REG}[1]{(\key{reg}\;#1)}
  81. \newcommand{\VAR}[1]{(\key{var}\;#1)}
  82. \newcommand{\STACKLOC}[1]{(\key{stack}\;#1)}
  83. \newcommand{\IF}[3]{(\key{if}\,#1\;#2\;#3)}
  84. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  85. \title{\Huge \textbf{Essentials of Compilation} \\
  86. \huge An Incremental Approach}
  87. \author{\textsc{Jeremy G. Siek} \\
  88. %\thanks{\url{http://homes.soic.indiana.edu/jsiek/}} \\
  89. Indiana University \\
  90. \\
  91. with contributions from: \\
  92. Carl Factora
  93. }
  94. \begin{document}
  95. \frontmatter
  96. \maketitle
  97. \begin{dedication}
  98. This book is dedicated to the programming language wonks at Indiana
  99. University.
  100. \end{dedication}
  101. \tableofcontents
  102. %\listoffigures
  103. %\listoftables
  104. \mainmatter
  105. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  106. \chapter*{Preface}
  107. Talk about nano-pass \citep{Sarkar:2004fk,Keep:2012aa} and incremental
  108. compilers \citep{Ghuloum:2006bh}.
  109. Talk about pre-requisites.
  110. %\section*{Structure of book}
  111. % You might want to add short description about each chapter in this book.
  112. %\section*{About the companion website}
  113. %The website\footnote{\url{https://github.com/amberj/latex-book-template}} for %this file contains:
  114. %\begin{itemize}
  115. % \item A link to (freely downlodable) latest version of this document.
  116. % \item Link to download LaTeX source for this document.
  117. % \item Miscellaneous material (e.g. suggested readings etc).
  118. %\end{itemize}
  119. \section*{Acknowledgments}
  120. Need to give thanks to
  121. \begin{itemize}
  122. \item Kent Dybvig
  123. \item Daniel P. Friedman
  124. \item Abdulaziz Ghuloum
  125. \item Oscar Waddell
  126. \item Dipanwita Sarkar
  127. \item Ronald Garcia
  128. \item Bor-Yuh Evan Chang
  129. \end{itemize}
  130. %\mbox{}\\
  131. %\noindent Amber Jain \\
  132. %\noindent \url{http://amberj.devio.us/}
  133. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  134. \chapter{Preliminaries}
  135. \label{ch:trees-recur}
  136. In this chapter, we review the basic tools that are needed for
  137. implementing a compiler. We use abstract syntax trees (ASTs) in the
  138. form of S-expressions to represent programs (Section~\ref{sec:ast})
  139. and pattern matching to inspect an AST node
  140. (Section~\ref{sec:pattern-matching}). We use recursion to construct
  141. and deconstruct entire ASTs (Section~\ref{sec:recursion}).
  142. \section{Abstract Syntax Trees}
  143. \label{sec:ast}
  144. The primary data structure that is commonly used for representing
  145. programs is the \emph{abstract syntax tree} (AST). When considering
  146. some part of a program, a compiler needs to ask what kind of part it
  147. is and what sub-parts it has. For example, the program on the left is
  148. represented by the AST on the right.
  149. \begin{center}
  150. \begin{minipage}{0.4\textwidth}
  151. \begin{lstlisting}
  152. (+ (read) (- 8))
  153. \end{lstlisting}
  154. \end{minipage}
  155. \begin{minipage}{0.4\textwidth}
  156. \begin{equation}
  157. \xymatrix@=15pt{
  158. & *++[Fo]{+} \ar[dl]\ar[dr]& \\
  159. *+[Fo]{\tt read} & & *++[Fo]{-} \ar[d] \\
  160. & & *++[Fo]{\tt 8}
  161. } \label{eq:arith-prog}
  162. \end{equation}
  163. \end{minipage}
  164. \end{center}
  165. We shall use the standard terminology for trees: each square above is
  166. called a \emph{node}. The arrows connect a node to its \emph{children}
  167. (which are also nodes). The top-most node is the \emph{root}. Every
  168. node except for the root has a \emph{parent} (the node it is the child
  169. of). If a node has no children, it is a \emph{leaf} node. Otherwise
  170. it is an \emph{internal} node.
  171. When deciding how to compile the above program, we need to know that
  172. the root node an addition and that it has two children: \texttt{read}
  173. and the negation of \texttt{8}. The abstract syntax tree data
  174. structure directly supports these queries and hence is a good
  175. choice. In this book, we will often write down the textual
  176. representation of a program even when we really have in mind the AST,
  177. simply because the textual representation is easier to typeset. We
  178. recommend that, in your mind, you should alway interpret programs as
  179. abstract syntax trees.
  180. \section{Grammars}
  181. \label{sec:grammar}
  182. A programming language can be thought of as a \emph{set} of programs.
  183. The set is typically infinite (one can always create larger and larger
  184. programs), so one cannot simply describe a language by listing all of
  185. the programs in the language. Instead we write down a set of rules, a
  186. \emph{grammar}, for building programs. We shall write our rules in a
  187. variant of Backus-Naur Form (BNF)~\citep{Backus:1960aa,Knuth:1964aa}.
  188. As an example, we describe a small language, named $\itm{arith}$, of
  189. integers and arithmetic operations. The first rule says that any
  190. integer is in the language:
  191. \begin{equation}
  192. \itm{arith} ::= \Int \label{eq:arith-int}
  193. \end{equation}
  194. Each rule has a left-hand-side and a right-hand-side. The way to read
  195. a rule is that if you have all the program parts on the
  196. right-hand-side, then you can create and AST node and categorize it
  197. according to the left-hand-side. (We do not define $\Int$ because the
  198. reader already knows what an integer is.) A name such as $\itm{arith}$
  199. that is defined by the rules, is a \emph{non-terminal}.
  200. The second rule for the $\itm{arith}$ language is the \texttt{read}
  201. function to receive an input integer from the user of the program.
  202. \begin{equation}
  203. \itm{arith} ::= (\key{read}) \label{eq:arith-read}
  204. \end{equation}
  205. The third rule says that, given an $\itm{arith}$, you can build
  206. another arith by negating it.
  207. \begin{equation}
  208. \itm{arith} ::= (\key{-} \; \itm{arith}) \label{eq:arith-neg}
  209. \end{equation}
  210. Symbols such as \key{-} that play an auxilliary role in the abstract
  211. syntax are called \emph{terminal} symbols.
  212. By rule \eqref{eq:arith-int}, \texttt{8} is an $\itm{arith}$, then by
  213. rule \eqref{eq:arith-neg}, the following AST is an $\itm{arith}$.
  214. \begin{center}
  215. \begin{minipage}{0.25\textwidth}
  216. \begin{lstlisting}
  217. (- 8)
  218. \end{lstlisting}
  219. \end{minipage}
  220. \begin{minipage}{0.25\textwidth}
  221. \begin{equation}
  222. \xymatrix@=15pt{
  223. *+[Fo]{-} \ar[d] \\
  224. *+[Fo]{\tt 8}
  225. }
  226. \label{eq:arith-neg8}
  227. \end{equation}
  228. \end{minipage}
  229. \end{center}
  230. The last rule for the $\itm{arith}$ language is for addition:
  231. \begin{equation}
  232. \itm{arith} ::= (\key{+} \; \itm{arith} \; \itm{arith}) \label{eq:arith-add}
  233. \end{equation}
  234. Now we can see that the AST \eqref{eq:arith-prog} is in $\itm{arith}$.
  235. We know that \lstinline{(read)} is in $\itm{arith}$ by rule
  236. \eqref{eq:arith-read} and we have shown that \texttt{(- 8)} is in
  237. $\itm{arith}$, so we can apply rule \eqref{eq:arith-add} to show that
  238. \texttt{(+ (read) (- 8))} is in the $\itm{arith}$ language.
  239. If you have an AST for which the above four rules do not apply, then
  240. the AST is not in $\itm{arith}$. For example, the AST \texttt{(- (read)
  241. (+ 8))} is not in $\itm{arith}$ because there are no rules for $+$
  242. with only one argument, nor for $-$ with two arguments. Whenever we
  243. define a language through a grammar, we implicitly mean for the
  244. language to be the smallest set of programs that are justified by the
  245. rules. That is, the language only includes those programs that the
  246. rules allow.
  247. It is common to have many rules with the same left-hand side, so the
  248. following vertical bar notation is used to gather several rules on one
  249. line. We refer to each clause between a vertical bar as an
  250. ``alternative''.
  251. \[
  252. \itm{arith} ::= \Int \mid (\key{read}) \mid (\key{-} \; \itm{arith}) \mid
  253. (\key{+} \; \itm{arith} \; \itm{arith})
  254. \]
  255. \section{S-Expressions}
  256. \label{sec:s-expr}
  257. Racket, as a descendant of Lisp~\citep{McCarthy:1960dz}, has
  258. particularly convenient support for creating and manipulating abstract
  259. syntax trees with its \emph{symbolic expression} feature, or
  260. S-expression for short. We can create an S-expression simply by
  261. writing a backquote followed by the textual representation of the
  262. AST. (Technically speaking, this is called a \emph{quasiquote} in
  263. Racket.) For example, an S-expression to represent the AST
  264. \eqref{eq:arith-prog} is created by the following Racket expression:
  265. \begin{center}
  266. \texttt{`(+ (read) (- 8))}
  267. \end{center}
  268. To build larger S-expressions one often needs to splice together
  269. several smaller S-expressions. Racket provides the comma operator to
  270. splice an S-expression into a larger one. For example, instead of
  271. creating the S-expression for AST \eqref{eq:arith-prog} all at once,
  272. we could have first created an S-expression for AST
  273. \eqref{eq:arith-neg8} and then spliced that into the addition
  274. S-expression.
  275. \begin{lstlisting}
  276. (define ast1.4 `(- 8))
  277. (define ast1.1 `(+ (read) ,ast1.4))
  278. \end{lstlisting}
  279. In general, the Racket expression that follows the comma (splice)
  280. can be any expression that computes an S-expression.
  281. \section{Pattern Matching}
  282. \label{sec:pattern-matching}
  283. As mentioned above, one of the operations that a compiler needs to
  284. perform on an AST is to access the children of a node. Racket
  285. provides the \texttt{match} form to access the parts of an
  286. S-expression. Consider the following example and the output on the
  287. right.
  288. \begin{center}
  289. \begin{minipage}{0.5\textwidth}
  290. \begin{lstlisting}
  291. (match ast1.1
  292. [`(,op ,child1 ,child2)
  293. (print op) (newline)
  294. (print child1) (newline)
  295. (print child2)])
  296. \end{lstlisting}
  297. \end{minipage}
  298. \vrule
  299. \begin{minipage}{0.25\textwidth}
  300. \begin{lstlisting}
  301. '+
  302. '(read)
  303. '(- 8)
  304. \end{lstlisting}
  305. \end{minipage}
  306. \end{center}
  307. The \texttt{match} form takes AST \eqref{eq:arith-prog} and binds its
  308. parts to the three variables \texttt{op}, \texttt{child1}, and
  309. \texttt{child2}. In general, a match clause consists of a
  310. \emph{pattern} and a \emph{body}. The pattern is a quoted S-expression
  311. that may contain pattern-variables (preceded by a comma). The body
  312. may contain any Racket code.
  313. A \texttt{match} form may contain several clauses, as in the following
  314. function \texttt{leaf?} that recognizes when an $\itm{arith}$ node is
  315. a leaf. The \texttt{match} proceeds through the clauses in order,
  316. checking whether the pattern can match the input S-expression. The
  317. body of the first clause that matches is executed. The output of
  318. \texttt{leaf?} for several S-expressions is shown on the right. In the
  319. below \texttt{match}, we see another form of pattern: the \texttt{(?
  320. fixnum?)} applies the predicate \texttt{fixnum?} to the input
  321. S-expression to see if it is a machine-representable integer.
  322. \begin{center}
  323. \begin{minipage}{0.5\textwidth}
  324. \begin{lstlisting}
  325. (define (leaf? arith)
  326. (match arith
  327. [(? fixnum?) #t]
  328. [`(read) #t]
  329. [`(- ,c1) #f]
  330. [`(+ ,c1 ,c2) #f]))
  331. (leaf? `(read))
  332. (leaf? `(- 8))
  333. (leaf? `(+ (read) (- 8)))
  334. \end{lstlisting}
  335. \end{minipage}
  336. \vrule
  337. \begin{minipage}{0.25\textwidth}
  338. \begin{lstlisting}
  339. #t
  340. #f
  341. #f
  342. \end{lstlisting}
  343. \end{minipage}
  344. \end{center}
  345. %% From this grammar, we have defined {\tt arith} by constraining its
  346. %% syntax. Effectively, we have defined {\tt arith} by first defining
  347. %% what a legal expression (or program) within the language is. To
  348. %% clarify further, we can think of {\tt arith} as a \textit{set} of
  349. %% expressions, where, under syntax constraints, \mbox{{\tt (+ 1 1)}} and
  350. %% {\tt -1} are inhabitants and {\tt (+ 3.2 3)} and {\tt (++ 2 2)} are
  351. %% not (see ~Figure\ref{fig:ast}).
  352. %% The relationship between a grammar and an AST is then similar to that
  353. %% of a set and an inhabitant. From this, every syntaxically valid
  354. %% expression, under the constraints of a grammar, can be represented by
  355. %% an abstract syntax tree. This is because {\tt arith} is essentially a
  356. %% specification of a Tree-like data-structure. In this case, tree nodes
  357. %% are the arithmetic operators {\tt +} and {\tt -}, and the leaves are
  358. %% integer constants. From this, we can represent any expression of {\tt
  359. %% arith} using a \textit{syntax expression} (s-exp).
  360. %% \begin{figure}[htbp]
  361. %% \centering
  362. %% \fbox{
  363. %% \begin{minipage}{0.85\textwidth}
  364. %% \[
  365. %% \begin{array}{lcl}
  366. %% exp &::=& sexp \mid (sexp*) \mid (unquote \; sexp) \\
  367. %% sexp &::=& Val \mid Var \mid (quote \; exp) \mid (quasiquote \; exp)
  368. %% \end{array}
  369. %% \]
  370. %% \end{minipage}
  371. %% }
  372. %% \caption{\textit{s-exp} syntax: $Val$ and $Var$ are shorthand for Value and Variable.}
  373. %% \label{fig:sexp-syntax}
  374. %% \end{figure}
  375. %% For our purposes, we will treat s-exps equivalent to \textit{possibly
  376. %% deeply-nested lists}. For the sake of brevity, the symbols $single$
  377. %% $quote$ ('), $backquote$ (`), and $comma$ (,) are reader sugar for
  378. %% {\tt quote}, {\tt quasiquote}, and {\tt unquote}. We provide several
  379. %% examples of s-exps and functions that return s-exps below. We use the
  380. %% {\tt >} symbol to represent interaction with a Racket REPL.
  381. %% \begin{verbatim}
  382. %% (define 1plus1 `(1 + 1))
  383. %% (define (1plusX x) `(1 + ,x))
  384. %% (define (XplusY x y) `(,x + ,y))
  385. %% > 1plus1
  386. %% '(1 + 1)
  387. %% > (1plusX 1)
  388. %% '(1 + 1)
  389. %% > (XplusY 1 1)
  390. %% '(1 + 1)
  391. %% > `,1plus1
  392. %% '(1 + 1)
  393. %% \end{verbatim}
  394. %% In any expression wrapped with {\tt quasiquote} ({\tt `}), sub-expressions
  395. %% wrapped with an {\tt unquote} expression are evaluated before the entire
  396. %% expression is returned wrapped in a {\tt quote} expression.
  397. % \marginpar{\scriptsize Introduce s-expressions, quote, and quasi-quote, and comma in
  398. % this section. Make sure to include examples of ASTs. The description
  399. % here of grammars is incomplete. It doesn't really say what grammars are or what they do, it
  400. % just shows an example. I would recommend reading my blog post: a crash course on
  401. % notation in PL theory, especially the sections on Definition by Rules
  402. % and Language Syntax and Grammars. -JGS}
  403. % \marginpar{\scriptsize The lambda calculus is more complex of an example that what we really
  404. % need at this point. I think we can make due with just integers and arithmetic. -JGS}
  405. % \marginpar{\scriptsize Regarding de-Bruijnizing as an example... that strikes me
  406. % as something that may be foreign to many readers. The examples in this
  407. % first chapter should try to be simple and hopefully connect with things
  408. % that the reader is already familiar with. -JGS}
  409. % \begin{enumerate}
  410. % \item Syntax transformation
  411. % \item Some Racket examples (factorial?)
  412. % \end{enumerate}
  413. %% For our purposes, our compiler will take a Scheme-like expression and
  414. %% transform it to X86\_64 Assembly. Along the way, we transform each
  415. %% input expression into a handful of \textit{intermediary languages}
  416. %% (IL). A key tool for transforming one language into another is
  417. %% \textit{pattern matching}.
  418. %% Racket provides a built-in pattern-matcher, {\tt match}, that we can
  419. %% use to perform operations on s-exps. As a preliminary example, we
  420. %% include a familiar definition of factorial, first without using match.
  421. %% \begin{verbatim}
  422. %% (define (! n)
  423. %% (if (zero? n) 1
  424. %% (* n (! (sub1 n)))))
  425. %% \end{verbatim}
  426. %% In this form of factorial, we are simply conditioning (viz. {\tt zero?})
  427. %% on the inputted natural number, {\tt n}. If we rewrite factorial using
  428. %% {\tt match}, we can match on the actual value of {\tt n}.
  429. %% \begin{verbatim}
  430. %% (define (! n)
  431. %% (match n
  432. %% (0 1)
  433. %% (n (* n (! (sub1 n))))))
  434. %% \end{verbatim}
  435. %% In this definition of factorial, the first {\tt match} line (viz. {\tt (0 1)})
  436. %% can be read as "if {\tt n} is 0, then return 1." The second line matches on an
  437. %% arbitrary variable, {\tt n}, and does not place any constraints on it. We could
  438. %% have also written this line as {\tt (else (* n (! (sub1 n))))}, where {\tt n}
  439. %% is scoped by {\tt match}. Of course, we can also use {\tt match} to pattern
  440. %% match on more complex expressions.
  441. \section{Recursion}
  442. \label{sec:recursion}
  443. Programs are inherently recursive in that an $\itm{arith}$ AST is made
  444. up of smaller $\itm{arith}$ ASTs. Thus, the natural way to process in
  445. entire program is with a recursive function. As a first example of
  446. such a function, we define \texttt{arith?} below, which takes an
  447. arbitrary S-expression, {\tt sexp}, and determines whether or not {\tt
  448. sexp} is in {\tt arith}. Note that each match clause corresponds to
  449. one grammar rule for $\itm{arith}$ and the body of each clause makes a
  450. recursive call for each child node. This pattern of recursive function
  451. is so common that it has a name, \emph{structural recursion}. In
  452. general, when a recursive function is defined using a set of match
  453. clauses that correspond to a grammar, and each clause body makes a
  454. recursive call on each child node, then we say the function is defined
  455. by structural recursion.
  456. \begin{center}
  457. \begin{minipage}{0.7\textwidth}
  458. \begin{lstlisting}
  459. (define (arith? sexp)
  460. (match sexp
  461. [(? fixnum?) #t]
  462. [`(read) #t]
  463. [`(- ,e) (arith? e)]
  464. [`(+ ,e1 ,e2)
  465. (and (arith? e1) (arith? e2))]
  466. [else #f]))
  467. (arith? `(+ (read) (- 8)))
  468. (arith? `(- (read) (+ 8)))
  469. \end{lstlisting}
  470. \end{minipage}
  471. \vrule
  472. \begin{minipage}{0.25\textwidth}
  473. \begin{lstlisting}
  474. #t
  475. #f
  476. \end{lstlisting}
  477. \end{minipage}
  478. \end{center}
  479. %% Here, {\tt \#:when} puts constraints on the value of matched expressions.
  480. %% In this case, we make sure that every sub-expression in \textit{op} position
  481. %% is either {\tt +} or {\tt -}. Otherwise, we return an error, signaling a
  482. %% non-{\tt arith} expression. As we mentioned earlier, every expression
  483. %% wrapped in an {\tt unquote} is evaluated first. When used in a LHS {\tt match}
  484. %% sub-expression, these expressions evaluate to the actual value of the matched
  485. %% expression (i.e., {\tt arith-exp}). Thus, {\tt `(,e1 ,op ,e2)} and
  486. %% {\tt `(e1 op e2)} are not equivalent.
  487. % \begin{enumerate}
  488. % \item \textit{What is a base case?}
  489. % \item Using on a language (lambda calculus ->
  490. % \end{enumerate}
  491. %% Before getting into more complex {\tt match} examples, we first
  492. %% introduce the concept of \textit{structural recursion}, which is the
  493. %% general name for recurring over Tree-like or \textit{possibly
  494. %% deeply-nested list} structures. The key to performing structural
  495. %% recursion, which from now on we refer to simply as recursion, is to
  496. %% have some form of specification for the structure we are recurring
  497. %% on. Luckily, we are already familiar with one: a BNF or grammar.
  498. %% For example, let's take the grammar for $S_0$, which we include below.
  499. %% Writing a recursive program that takes an arbitrary expression of $S_0$
  500. %% should handle each expression in the grammar. An example program that
  501. %% we can write is an $interpreter$. To keep our interpreter simple, we
  502. %% ignore the {\tt read} operator.
  503. %% \begin{figure}[htbp]
  504. %% \centering
  505. %% \fbox{
  506. %% \begin{minipage}{0.85\textwidth}
  507. %% \[
  508. %% \begin{array}{lcl}
  509. %% \Op &::=& \key{+} \mid \key{-} \mid \key{*} \mid \key{read} \\
  510. %% \Exp &::=& \Int \mid (\Op \; \Exp^{*}) \mid \Var \mid \LET{\Var}{\Exp}{\Exp}
  511. %% \end{array}
  512. %% \]
  513. %% \end{minipage}
  514. %% }
  515. %% \caption{The syntax of the $S_0$ language. The abbreviation \Op{} is
  516. %% short for operator, \Exp{} is short for expression, \Int{} for integer,
  517. %% and \Var{} for variable.}
  518. %% %\label{fig:s0-syntax}
  519. %% \end{figure}
  520. %% \begin{verbatim}
  521. %% \end{verbatim}
  522. \section{Interpreter}
  523. \label{sec:interp-arith}
  524. The meaning, or semantics, of a program is typically defined in the
  525. specification of the language. For example, the Scheme language is
  526. defined in the report by \cite{SPERBER:2009aa}. The Racket language is
  527. defined in its reference manual~\citep{plt-tr}. In this book we use an
  528. interpreter to define the meaning of each language that we consider,
  529. following Reynold's advice in this
  530. regard~\citep{reynolds72:_def_interp}. Here we will warm up by writing
  531. an interpreter for the $\itm{arith}$ language, which will also serve
  532. as a second example of structural recursion. The \texttt{interp-arith}
  533. function is defined in Figure~\ref{fig:interp-arith}. The body of the
  534. function is a match on the input expression \texttt{e} and there is
  535. one clause per grammar rule for $\itm{arith}$. The clauses for
  536. internal AST nodes make recursive calls to \texttt{interp-arith} on
  537. each child node.
  538. \begin{figure}[tbp]
  539. \begin{lstlisting}
  540. (define (interp-arith e)
  541. (match e
  542. [(? fixnum?) e]
  543. [`(read)
  544. (define r (read))
  545. (cond [(fixnum? r) r]
  546. [else (error 'interp-arith "expected an integer" r)])]
  547. [`(- ,e)
  548. (fx- 0 (interp-arith e))]
  549. [`(+ ,e1 ,e2)
  550. (fx+ (interp-arith e1) (interp-arith e2))]
  551. ))
  552. \end{lstlisting}
  553. \caption{Interpreter for the $\itm{arith}$ language.}
  554. \label{fig:interp-arith}
  555. \end{figure}
  556. We make the simplifying design decision that the $\itm{arith}$
  557. language (and all of the languages in this book) only handle
  558. machine-representable integers, that is, the \texttt{fixnum} datatype
  559. in Racket. Thus, we implement the arithmetic operations using the
  560. appropriate fixnum operators.
  561. If we interpret the AST \eqref{eq:arith-prog} and give it the input
  562. \texttt{50}
  563. \begin{lstlisting}
  564. (interp-arith ast1.1)
  565. \end{lstlisting}
  566. we get the answer to life, the universe, and everything
  567. \begin{lstlisting}
  568. 42
  569. \end{lstlisting}
  570. The job of a compiler is to translate programs in one language into
  571. programs in another language (typically but not always a language with
  572. a lower level of abstraction) in such a way that each output program
  573. behaves the same way as the input program. This idea is depicted in
  574. the following diagram. Suppose we have two languages, $\mathcal{L}_1$
  575. and $\mathcal{L}_2$, and an interpreter for each language. Suppose
  576. that the compiler translates program $P_1$ in language $\mathcal{L}_1$
  577. into program $P_2$ in language $\mathcal{L}_2$. Then interpreting
  578. $P_1$ and $P_2$ on the respective interpreters for the two languages,
  579. and given the same inputs $i$, should yield the same output $o$.
  580. \begin{equation} \label{eq:compile-correct}
  581. \xymatrix@=50pt{
  582. P_1 \ar[r]^{compile}\ar[dr]_{\mathcal{L}_1-interp(i)} & P_2 \ar[d]^{\mathcal{L}_2-interp(i)} \\
  583. & o
  584. }
  585. \end{equation}
  586. In the next section we will see our first example of a compiler, which
  587. is also be another example of structural recursion.
  588. \section{Partial Evaluation}
  589. \label{sec:partial-evaluation}
  590. In this section we consider a compiler that translates $\itm{arith}$
  591. programs into $\itm{arith}$ programs that are more efficient, that is,
  592. this compiler is an optimizer. Our optimizer will accomplish this by
  593. trying to eagerly compute the parts of the program that do not depend
  594. on any inputs. For example, given the following program
  595. \begin{lstlisting}
  596. (+ (read) (- (+ 5 3)))
  597. \end{lstlisting}
  598. our compiler will translate it into the program
  599. \begin{lstlisting}
  600. (+ (read) -8)
  601. \end{lstlisting}
  602. Figure~\ref{fig:pe-arith} gives the code for a simple partial
  603. evaluator for the $\itm{arith}$ language. The output of the partial
  604. evaluator is an $\itm{arith}$ program, which we build up using a
  605. combination of quasiquotes and commas. (Though no quasiquote is
  606. necessary for integers.) In Figure~\ref{fig:pe-arith}, the normal
  607. structural recursion is captured in the main \texttt{pe-arith}
  608. function whereas the code for partially evaluating negation and
  609. addition is factored out the into two separate helper functions:
  610. \texttt{pe-neg} and \texttt{pe-add}. The input to these helper
  611. functions is the output of partially evaluating the children nodes.
  612. \begin{figure}[tbp]
  613. \begin{lstlisting}
  614. (define (pe-neg r)
  615. (match r
  616. [(? fixnum?) (fx- 0 r)]
  617. [else `(- ,r)]))
  618. (define (pe-add r1 r2)
  619. (match (list r1 r2)
  620. [`(,n1 ,n2) #:when (and (fixnum? n1) (fixnum? n2))
  621. (fx+ r1 r2)]
  622. [else `(+ ,r1 ,r2)]))
  623. (define (pe-arith e)
  624. (match e
  625. [(? fixnum?) e]
  626. [`(read) `(read)]
  627. [`(- ,e1) (pe-neg (pe-arith e1))]
  628. [`(+ ,e1 ,e2) (pe-add (pe-arith e1) (pe-arith e2))]))
  629. \end{lstlisting}
  630. \caption{A partial evaluator for the $\itm{arith}$ language.}
  631. \label{fig:pe-arith}
  632. \end{figure}
  633. Our code for \texttt{pe-neg} and \texttt{pe-add} implements the simple
  634. idea of checking whether the inputs are integers and if they are, to
  635. go ahead perform the arithmetic. Otherwise, we use quasiquote to
  636. create an AST node for the appropriate operation (either negation or
  637. addition) and use comma to splice in the child nodes.
  638. To gain some confidence that the partial evaluator is correct, we can
  639. test whether it produces programs that get the same result as the
  640. input program. That is, we can test whether it satisfies Diagram
  641. \eqref{eq:compile-correct}. The following code runs the partial
  642. evaluator on several examples and tests the output program. The
  643. \texttt{assert} function is defined in Appendix~\ref{sec:utilities}.
  644. \begin{lstlisting}
  645. (define (test-pe pe p)
  646. (assert "testing pe-arith"
  647. (equal? (interp-arith p) (interp-arith (pe-arith p)))))
  648. (test-pe `(+ (read) (- (+ 5 3))))
  649. (test-pe `(+ 1 (+ (read) 1)))
  650. (test-pe `(- (+ (read) (- 5))))
  651. \end{lstlisting}
  652. \begin{exercise}
  653. We challenge the reader to improve on the simple partial evaluator in
  654. Figure~\ref{fig:pe-arith} by replacing the \texttt{pe-neg} and
  655. \texttt{pe-add} helper functions with functions that know more about
  656. arithmetic. For example, your partial evaluator should translate
  657. \begin{lstlisting}
  658. (+ 1 (+ (read) 1))
  659. \end{lstlisting}
  660. into
  661. \begin{lstlisting}
  662. (+ 2 (read))
  663. \end{lstlisting}
  664. To accomplish this, we recomend that your partial evaluator produce
  665. output that takes the form of the $\itm{residual}$ non-terminal in the
  666. following grammar.
  667. \[
  668. \begin{array}{lcl}
  669. e &::=& (\key{read}) \mid (\key{-} \;(\key{read})) \mid (\key{+} \;e\; e)\\
  670. \itm{residual} &::=& \Int \mid (\key{+}\; \Int\; e) \mid e
  671. \end{array}
  672. \]
  673. \end{exercise}
  674. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  675. \chapter{Integers and Variables}
  676. \label{ch:int-exp}
  677. This chapter concerns the challenge of compiling a subset of Racket,
  678. which we name $S_0$, to x86-64 assembly code. The chapter begins with
  679. a description of the $S_0$ language (Section~\ref{sec:s0}) and then a
  680. description of x86-64 (Section~\ref{sec:x86-64}). The x86-64 assembly
  681. language is quite large, so we only discuss what is needed for
  682. compiling $S_0$. We will introduce more of x86-64 in later
  683. chapters. Once we have introduced $S_0$ and x86-64, we reflect on
  684. their differences and come up with a plan for a handful of steps that
  685. will take us from $S_0$ to x86-64 (Section~\ref{sec:plan-s0-x86}).
  686. The rest of the sections in this Chapter give detailed hints regarding
  687. what each step should do and how to organize your code
  688. (Sections~\ref{sec:uniquify-s0}, \ref{sec:flatten-s0},
  689. \ref{sec:select-s0} \ref{sec:assign-s0}, and \ref{sec:patch-s0}). We
  690. hope to give enough hints that the well-prepared reader can implement
  691. a compiler from $S_0$ to x86-64 while at the same time leaving room
  692. for some fun and creativity.
  693. \section{The $S_0$ Language}
  694. \label{sec:s0}
  695. The $S_0$ language includes integers, operations on integers
  696. (arithmetic and input), and variable definitions. The syntax of the
  697. $S_0$ language is defined by the grammar in
  698. Figure~\ref{fig:s0-syntax}. This language is rich enough to exhibit
  699. several compilation techniques but simple enough so that we can
  700. implement a compiler for it in two weeks of hard work. To give the
  701. reader a feeling for the scale of this first compiler, the instructor
  702. solution for the $S_0$ compiler consists of 6 recursive functions and
  703. a few small helper functions that together span 256 lines of code.
  704. \begin{figure}[btp]
  705. \centering
  706. \fbox{
  707. \begin{minipage}{0.85\textwidth}
  708. \[
  709. \begin{array}{lcl}
  710. \Op &::=& \key{+} \mid \key{-} \mid \key{*} \mid \key{read} \\
  711. \Exp &::=& \Int \mid (\Op \; \Exp^{*}) \mid \Var \mid \LET{\Var}{\Exp}{\Exp}
  712. \end{array}
  713. \]
  714. \end{minipage}
  715. }
  716. \caption{The syntax of the $S_0$ language. The abbreviation \Op{} is
  717. short for operator, \Exp{} is short for expression, \Int{} for integer,
  718. and \Var{} for variable.}
  719. \label{fig:s0-syntax}
  720. \end{figure}
  721. The result of evaluating an expression is a value. For $S_0$, values
  722. are integers. To make it straightforward to map these integers onto
  723. x86-64 assembly~\citep{Matz:2013aa}, we restrict the integers to just
  724. those representable with 64-bits, the range $-2^{63}$ to $2^{63}$
  725. (``fixnums'' in Racket parlance).
  726. We start with some examples of $S_0$ programs, commenting on aspects
  727. of the language that will be relevant to compiling it. We start with
  728. one of the simplest $S_0$ programs; it adds two integers.
  729. \[
  730. \BINOP{+}{10}{32}
  731. \]
  732. The result is $42$, as you might expected.
  733. %
  734. The next example demonstrates that expressions may be nested within
  735. each other, in this case nesting several additions and negations.
  736. \[
  737. \BINOP{+}{10}{ \UNIOP{-}{ \BINOP{+}{12}{20} } }
  738. \]
  739. What is the result of the above program?
  740. The \key{let} construct defines a variable for used within it's body
  741. and initializes the variable with the value of an expression. So the
  742. following program initializes $x$ to $32$ and then evaluates the body
  743. $\BINOP{+}{10}{x}$, producing $42$.
  744. \[
  745. \LET{x}{ \BINOP{+}{12}{20} }{ \BINOP{+}{10}{x} }
  746. \]
  747. When there are multiple \key{let}'s for the same variable, the closest
  748. enclosing \key{let} is used. That is, variable definitions overshadow
  749. prior definitions. Consider the following program with two \key{let}'s
  750. that define variables named $x$. Can you figure out the result?
  751. \[
  752. \LET{x}{32}{ \BINOP{+}{ \LET{x}{10}{x} }{ x } }
  753. \]
  754. For the purposes of showing which variable uses correspond to which
  755. definitions, the following shows the $x$'s annotated with subscripts
  756. to distinguish them. Double check that your answer for the above is
  757. the same as your answer for this annotated version of the program.
  758. \[
  759. \LET{x_1}{32}{ \BINOP{+}{ \LET{x_2}{10}{x_2} }{ x_1 } }
  760. \]
  761. Moving on, the \key{read} operation prompts the user of the program
  762. for an integer. Given an input of $10$, the following program produces
  763. $42$.
  764. \[
  765. \BINOP{+}{(\key{read})}{32}
  766. \]
  767. We include the \key{read} operation in $S_0$ to demonstrate that order
  768. of evaluation can make a different. Given the input $52$ then $10$,
  769. the following produces $42$ (and not $-42$).
  770. \[
  771. \LET{x}{\READ}{ \LET{y}{\READ}{ \BINOP{-}{x}{y} } }
  772. \]
  773. The initializing expression is always evaluated before the body of the
  774. \key{let}, so in the above, the \key{read} for $x$ is performed before
  775. the \key{read} for $y$.
  776. %
  777. The behavior of the following program is somewhat subtle because
  778. Racket does not specify an evaluation order for arguments of an
  779. operator such as $-$.
  780. \[
  781. \BINOP{-}{\READ}{\READ}
  782. \]
  783. Given the input $42$ then $10$, the above program can result in either
  784. $42$ or $-42$, depending on the whims of the Racket implementation.
  785. The goal for this chapter is to implement a compiler that translates
  786. any program $P_1 \in S_0$ into a x86-64 assembly program $P_2$ such
  787. that the assembly program exhibits the same behavior on an x86
  788. computer as the $S_0$ program running in a Racket implementation.
  789. \[
  790. \xymatrix{
  791. P_1 \in S_0 \ar[rr]^{\text{compile}} \ar[drr]_{\text{run in Racket}\quad}
  792. && P_2 \in \text{x86-64} \ar[d]^{\quad\text{run on an x86 machine}}\\
  793. & & n \in \mathbb{Z}
  794. }
  795. \]
  796. In the next section we introduce enough of the x86-64 assembly
  797. language to compile $S_0$.
  798. \section{The x86-64 Assembly Language}
  799. \label{sec:x86-64}
  800. An x86-64 program is a sequence of instructions. The instructions may
  801. refer to integer constants (called \emph{immediate values}), variables
  802. called \emph{registers}, and instructions may load and store values
  803. into \emph{memory}. Memory is a mapping of 64-bit addresses to 64-bit
  804. values. Figure~\ref{fig:x86-a} defines the syntax for the subset of
  805. the x86-64 assembly language needed for this chapter.
  806. An immediate value is written using the notation \key{\$}$n$ where $n$
  807. is an integer.
  808. %
  809. A register is written with a \key{\%} followed by the register name,
  810. such as \key{\%rax}.
  811. %
  812. An access to memory is specified using the syntax $n(\key{\%}r)$,
  813. which reads register $r$, obtaining address $a$, and then offsets the
  814. address by $n$ bytes (8 bits), producing the address $a + n$. The
  815. address is then used to either load or store to memory depending on
  816. whether it occurs as a source or destination argument of an
  817. instruction.
  818. An arithmetic instruction, such as $\key{addq}\,s\,d$, reads from the
  819. source argument $s$ and destination argument $d$, applies the
  820. arithmetic operation, then write the result in the destination $d$. In
  821. this case, computing $d \gets d + s$.
  822. %
  823. The move instruction, $\key{movq}\,s\,d$ reads from $s$ and stores the
  824. result in $d$.
  825. %
  826. The $\key{callq}\,\mathit{label}$ instruction executes the procedure
  827. specified by the label, which we shall use to implement
  828. \key{read}.
  829. \begin{figure}[tbp]
  830. \fbox{
  831. \begin{minipage}{0.96\textwidth}
  832. \[
  833. \begin{array}{lcl}
  834. \itm{register} &::=& \key{rsp} \mid \key{rbp} \mid \key{rax} \mid \key{rbx} \mid \key{rcx}
  835. \mid \key{rdx} \mid \key{rsi} \mid \key{rdi} \mid \\
  836. && \key{r8} \mid \key{r9} \mid \key{r10}
  837. \mid \key{r11} \mid \key{r12} \mid \key{r13}
  838. \mid \key{r14} \mid \key{r15} \\
  839. \Arg &::=& \key{\$}\Int \mid \key{\%}\itm{register} \mid \Int(\key{\%}\itm{register}) \\
  840. \Instr &::=& \key{addq} \; \Arg, \Arg \mid
  841. \key{subq} \; \Arg, \Arg \mid
  842. \key{imulq} \; \Arg,\Arg \mid
  843. \key{negq} \; \Arg \mid \\
  844. && \key{movq} \; \Arg, \Arg \mid
  845. \key{callq} \; \mathit{label} \mid
  846. \key{pushq}\;\Arg \mid \key{popq}\;\Arg \mid \key{retq} \\
  847. \Prog &::= & \key{.globl \_main}\\
  848. & & \key{\_main:} \; \Instr^{+}
  849. \end{array}
  850. \]
  851. \end{minipage}
  852. }
  853. \caption{A subset of the x86-64 assembly language.}
  854. \label{fig:x86-a}
  855. \end{figure}
  856. \begin{wrapfigure}{r}{2.25in}
  857. \begin{lstlisting}
  858. .globl _main
  859. _main:
  860. movq $10, %rax
  861. addq $32, %rax
  862. retq
  863. \end{lstlisting}
  864. \caption{An x86-64 program equivalent to $\BINOP{+}{10}{32}$.}
  865. \label{fig:p0-x86}
  866. \end{wrapfigure}
  867. Figure~\ref{fig:p0-x86} depicts an x86-64 program that is equivalent to
  868. $\BINOP{+}{10}{32}$. The \key{globl} directive says that the
  869. \key{\_main} procedure is externally visible, which is necessary so
  870. that the operating system can call it. The label \key{\_main:}
  871. indicates the beginning of the \key{\_main} procedure which is where
  872. the operating system starting executing this program. The instruction
  873. \lstinline{movq $10, %rax} puts $10$ into register \key{rax}. The
  874. following instruction \lstinline{addq $32, %rax} adds $32$ to the
  875. $10$ in \key{rax} and puts the result, $42$, back into
  876. \key{rax}. The instruction \key{retq} finishes the \key{\_main}
  877. function by returning the integer in the \key{rax} register to the
  878. operating system.
  879. \begin{wrapfigure}{r}{2.25in}
  880. \begin{lstlisting}
  881. .globl _main
  882. _main:
  883. pushq %rbp
  884. movq %rsp, %rbp
  885. subq $16, %rsp
  886. movq $10, -8(%rbp)
  887. negq -8(%rbp)
  888. movq $52, %rax
  889. addq -8(%rbp), %rax
  890. addq $16, %rsp
  891. popq %rbp
  892. retq
  893. \end{lstlisting}
  894. \caption{An x86-64 program equivalent to $\BINOP{+}{52}{\UNIOP{-}{10} }$.}
  895. \label{fig:p1-x86}
  896. \end{wrapfigure}
  897. The next example exhibits the use of memory. Figure~\ref{fig:p1-x86}
  898. lists an x86-64 program that is equivalent to $\BINOP{+}{52}{
  899. \UNIOP{-}{10} }$. To understand how this x86-64 program works, we
  900. need to explain a region of memory called called the \emph{procedure
  901. call stack} (or \emph{stack} for short). The stack consists of a
  902. separate \emph{frame} for each procedure call. The memory layout for
  903. an individual frame is shown in Figure~\ref{fig:frame}. The register
  904. \key{rsp} is called the \emph{stack pointer} and points to the item at
  905. the top of the stack. The stack grows downward in memory, so we
  906. increase the size of the stack by subtracting from the stack
  907. pointer. The frame size is required to be a multiple of 16 bytes. The
  908. register \key{rbp} is the \emph{base pointer} which serves two
  909. purposes: 1) it saves the location of the stack pointer for the
  910. procedure that called the current one and 2) it is used to access
  911. variables associated with the current procedure. We number the
  912. variables from $1$ to $n$. Variable $1$ is stored at address
  913. $-8\key{(\%rbp)}$, variable $2$ at $-16\key{(\%rbp)}$, etc.
  914. \begin{figure}[tbp]
  915. \centering
  916. \begin{tabular}{|r|l|} \hline
  917. Position & Contents \\ \hline
  918. 8(\key{\%rbp}) & return address \\
  919. 0(\key{\%rbp}) & old \key{rbp} \\
  920. -8(\key{\%rbp}) & variable $1$ \\
  921. -16(\key{\%rbp}) & variable $2$ \\
  922. \ldots & \ldots \\
  923. 0(\key{\%rsp}) & variable $n$\\ \hline
  924. \end{tabular}
  925. \caption{Memory layout of a frame.}
  926. \label{fig:frame}
  927. \end{figure}
  928. Getting back to the program in Figure~\ref{fig:p1-x86}, the first
  929. three instructions are the typical prelude for a procedure. The
  930. instruction \key{pushq \%rbp} saves the base pointer for the procedure
  931. that called the current one onto the stack and subtracts $8$ from the
  932. stack pointer. The second instruction \key{movq \%rsp, \%rbp} changes
  933. the base pointer to the top of the stack. The instruction \key{subq
  934. \$16, \%rsp} moves the stack pointer down to make enough room for
  935. storing variables. This program just needs one variable ($8$ bytes)
  936. but because the frame size is required to be a multiple of 16 bytes,
  937. it rounds to 16 bytes.
  938. The next four instructions carry out the work of computing
  939. $\BINOP{+}{52}{\UNIOP{-}{10} }$. The first instruction \key{movq \$10,
  940. -8(\%rbp)} stores $10$ in variable $1$. The instruction \key{negq
  941. -8(\%rbp)} changes variable $1$ to $-10$. The \key{movq \$52, \%rax}
  942. places $52$ in the register \key{rax} and \key{addq -8(\%rbp), \%rax}
  943. adds the contents of variable $1$ to \key{rax}, at which point
  944. \key{rax} contains $42$.
  945. The last three instructions are the typical \emph{conclusion} of a
  946. procedure. These instructions are necessary to get the state of the
  947. machine back to where it was before the current procedure was called.
  948. The \key{addq \$16, \%rsp} instruction moves the stack pointer back to
  949. point at the old base pointer. The amount added here needs to match
  950. the amount that was subtracted in the prelude of the procedure. Then
  951. \key{popq \%rbp} returns the old base pointer to \key{rbp} and adds
  952. $8$ to the stack pointer. The \key{retq} instruction jumps back to
  953. the procedure that called this one and subtracts 8 from the stack
  954. pointer.
  955. The compiler will need a convenient representation for manipulating
  956. x86 programs, so we define an abstract syntax for x86 in
  957. Figure~\ref{fig:x86-ast-a}. The \itm{info} field of the \key{program}
  958. AST node is for storing auxilliary information that needs to be
  959. communicated from one step of the compiler to the next. The function
  960. \key{print-x86} provided in the supplemental code converts an x86
  961. abstract syntax tree into the text representation for x86
  962. (Figure~\ref{fig:x86-a}).
  963. \begin{figure}[tbp]
  964. \fbox{
  965. \begin{minipage}{0.96\textwidth}
  966. \vspace{-10pt}
  967. \[
  968. \begin{array}{lcl}
  969. \Arg &::=& \INT{\Int} \mid \REG{\itm{register}}
  970. \mid \STACKLOC{\Int} \\
  971. \Instr &::=& (\key{addq} \; \Arg\; \Arg) \mid
  972. (\key{subq} \; \Arg\; \Arg) \mid
  973. (\key{imulq} \; \Arg\;\Arg) \mid
  974. (\key{negq} \; \Arg) \mid \\
  975. && (\key{movq} \; \Arg\; \Arg) \mid
  976. (\key{call} \; \mathit{label}) \mid
  977. (\key{pushq}\;\Arg) \mid (\key{popq}\;\Arg) \mid (\key{retq}) \\
  978. \Prog &::= & (\key{program} \;\itm{info} \; \Instr^{+})
  979. \end{array}
  980. \]
  981. \end{minipage}
  982. }
  983. \caption{Abstract syntax for x86-64 assembly.}
  984. \label{fig:x86-ast-a}
  985. \end{figure}
  986. \section{From $S_0$ to x86-64 via $C_0$}
  987. \label{sec:plan-s0-x86}
  988. To compile one language to another it helps to focus on the
  989. differences between the two languages. It is these differences that
  990. the compiler will need to bridge. What are the differences between
  991. $S_0$ and x86-64 assembly? Here we list some of the most important the
  992. differences.
  993. \begin{enumerate}
  994. \item x86-64 arithmetic instructions typically take two arguments and
  995. update the second argument in place. In contrast, $S_0$ arithmetic
  996. operations only read their arguments and produce a new value.
  997. \item An argument to an $S_0$ operator can be any expression, whereas
  998. x86-64 instructions restrict their arguments to integers, registers,
  999. and memory locations.
  1000. \item An $S_0$ program can have any number of variables whereas x86-64
  1001. has only 16 registers.
  1002. \item Variables in $S_0$ can overshadow other variables with the same
  1003. name. The registers and memory locations of x86-64 all have unique
  1004. names.
  1005. \end{enumerate}
  1006. We ease the challenge of compiling from $S_0$ to x86 by breaking down
  1007. the problem into several steps, dealing with the above differences one
  1008. at a time. The main question then becomes: in what order do we tackle
  1009. these differences? This is often one of the most challenging questions
  1010. that a compiler writer must answer because some orderings may be much
  1011. more difficult to implement than others. It is difficult to know ahead
  1012. of time which orders will be better so often some trial-and-error is
  1013. involved. However, we can try to plan ahead and choose the orderings
  1014. based on this planning.
  1015. For example, to handle difference \#2 (nested expressions), we shall
  1016. introduce new variables and pull apart the nested expressions into a
  1017. sequence of assignment statements. To deal with difference \#3 we
  1018. will be replacing variables with registers and/or stack
  1019. locations. Thus, it makes sense to deal with \#2 before \#3 so that
  1020. \#3 can replace both the original variables and the new ones. Next,
  1021. consider where \#1 should fit in. Because it has to do with the format
  1022. of x86 instructions, it makes more sense after we have flattened the
  1023. nested expressions (\#2). Finally, when should we deal with \#4
  1024. (variable overshadowing)? We shall solve this problem by renaming
  1025. variables to make sure they have unique names. Recall that our plan
  1026. for \#2 involves moving nested expressions, which could be problematic
  1027. if it changes the shadowing of variables. However, if we deal with \#4
  1028. first, then it will not be an issue. Thus, we arrive at the following
  1029. ordering.
  1030. \[
  1031. \xymatrix{
  1032. 4 \ar[r] & 2 \ar[r] & 1 \ar[r] & 3
  1033. }
  1034. \]
  1035. We further simplify the translation from $S_0$ to x86 by identifying
  1036. an intermediate language named $C_0$, roughly half-way between $S_0$
  1037. and x86, to provide a rest stop along the way. We name the language
  1038. $C_0$ because it is vaguely similar to the $C$
  1039. language~\citep{Kernighan:1988nx}. The differences \#4 and \#1,
  1040. regarding variables and nested expressions, will be handled by two
  1041. steps, \key{uniquify} and \key{flatten}, which bring us to
  1042. $C_0$.
  1043. \[\large
  1044. \xymatrix@=50pt{
  1045. S_0 \ar@/^/[r]^-{\key{uniquify}} &
  1046. S_0 \ar@/^/[r]^-{\key{flatten}} &
  1047. C_0
  1048. }
  1049. \]
  1050. Each of these steps in the compiler is implemented by a function,
  1051. typically a structurally recursive function that translates an input
  1052. AST into an output AST. We refer to such a function as a \emph{pass}
  1053. because it makes a pass over the AST.
  1054. The syntax for $C_0$ is defined in Figure~\ref{fig:c0-syntax}. The
  1055. $C_0$ language supports the same operators as $S_0$ but the arguments
  1056. of operators are now restricted to just variables and integers. The
  1057. \key{let} construct of $S_0$ is replaced by an assignment statement
  1058. and there is a \key{return} construct to specify the return value of
  1059. the program. A program consists of a sequence of statements that
  1060. include at least one \key{return} statement.
  1061. \begin{figure}[tbp]
  1062. \fbox{
  1063. \begin{minipage}{0.96\textwidth}
  1064. \[
  1065. \begin{array}{lcl}
  1066. \Arg &::=& \Int \mid \Var \\
  1067. \Exp &::=& \Arg \mid (\Op \; \Arg^{*})\\
  1068. \Stmt &::=& \ASSIGN{\Var}{\Exp} \mid \RETURN{\Arg} \\
  1069. \Prog & ::= & (\key{program}\;\itm{info}\;\Stmt^{+})
  1070. \end{array}
  1071. \]
  1072. \end{minipage}
  1073. }
  1074. \caption{The $C_0$ intermediate language.}
  1075. \label{fig:c0-syntax}
  1076. \end{figure}
  1077. To get from $C_0$ to x86-64 assembly requires three more steps, which
  1078. we discuss below.
  1079. \[\large
  1080. \xymatrix@=50pt{
  1081. C_0 \ar@/^/[r]^-{\key{select\_instr.}}
  1082. & \text{x86}^{*} \ar@/^/[r]^-{\key{assign\_homes}}
  1083. & \text{x86}^{*} \ar@/^/[r]^-{\key{patch\_instr.}}
  1084. & \text{x86}
  1085. }
  1086. \]
  1087. We handle difference \#1, concerning the format of arithmetic
  1088. instructions, in the \key{select\_instructions} pass. The result
  1089. of this pass produces programs consisting of x86-64 instructions that
  1090. use variables.
  1091. %
  1092. As there are only 16 registers, we cannot always map variables to
  1093. registers (difference \#3). Fortunately, the stack can grow quite
  1094. large, so we can map variables to locations on the stack. This is
  1095. handled in the \key{assign\_homes} pass. The topic of
  1096. Chapter~\ref{ch:register-allocation} is implementing a smarter
  1097. approach in which we make a best-effort to map variables to registers,
  1098. resorting to the stack only when necessary.
  1099. The final pass in our journey to x86 handles an indiosycracy of x86
  1100. assembly. Many x86 instructions have two arguments but only one of the
  1101. arguments may be a memory reference. Because we are mapping variables
  1102. to stack locations, many of our generated instructions will violate
  1103. this restriction. The purpose of the \key{patch\_instructions} pass
  1104. is to fix this problem by replacing every violating instruction with a
  1105. short sequence of instructions that use the \key{rax} register.
  1106. \section{Uniquify Variables}
  1107. \label{sec:uniquify-s0}
  1108. The purpose of this pass is to make sure that each \key{let} uses a
  1109. unique variable name. For example, the \key{uniquify} pass could
  1110. translate
  1111. \[
  1112. \LET{x}{32}{ \BINOP{+}{ \LET{x}{10}{x} }{ x } }
  1113. \]
  1114. to
  1115. \[
  1116. \LET{x.1}{32}{ \BINOP{+}{ \LET{x.2}{10}{x.2} }{ x.1 } }
  1117. \]
  1118. We recommend implementing \key{uniquify} as a recursive function that
  1119. mostly just copies the input program. However, when encountering a
  1120. \key{let}, it should generate a unique name for the variable (the
  1121. Racket function \key{gensym} is handy for this) and associate the old
  1122. name with the new unique name in an association list. The
  1123. \key{uniquify} function will need to access this association list when
  1124. it gets to a variable reference, so we add another paramter to
  1125. \key{uniquify} for the association list. It is quite common for a
  1126. compiler pass to need a map to store extra information about
  1127. variables. Such maps are often called \emph{symbol tables}.
  1128. The skeleton of the \key{uniquify} function is shown in
  1129. Figure~\ref{fig:uniquify-s0}. The function is curried so that it is
  1130. convenient to partially apply it to an association list and then apply
  1131. it to different expressions, as in the last clause for primitive
  1132. operations in Figure~\ref{fig:uniquify-s0}.
  1133. \begin{exercise}
  1134. Complete the \key{uniquify} pass by filling in the blanks, that is,
  1135. implement the clauses for variables and for the \key{let} construct.
  1136. \end{exercise}
  1137. \begin{figure}[tbp]
  1138. \begin{lstlisting}
  1139. (define uniquify
  1140. (lambda (alist)
  1141. (lambda (e)
  1142. (match e
  1143. [(? symbol?) ___]
  1144. [(? integer?) e]
  1145. [`(let ([,x ,e]) ,body) ___]
  1146. [`(program ,info ,e)
  1147. `(program ,info ,((uniquify alist) e))]
  1148. [`(,op ,es ...)
  1149. `(,op ,@(map (uniquify alist) es))]
  1150. ))))
  1151. \end{lstlisting}
  1152. \caption{Skeleton for the \key{uniquify} pass.}
  1153. \label{fig:uniquify-s0}
  1154. \end{figure}
  1155. \begin{exercise}
  1156. Test your \key{uniquify} pass by creating three example $S_0$ programs
  1157. and checking whether the output programs produce the same result as
  1158. the input programs. The $S_0$ programs should be designed to test the
  1159. most interesting parts of the \key{uniquify} pass, that is, the
  1160. programs should include \key{let} constructs, variables, and variables
  1161. that overshadow eachother. The three programs should be in a
  1162. subdirectory named \key{tests} and they shoul have the same file name
  1163. except for a different integer at the end of the name, followed by the
  1164. ending \key{.scm}. Use the \key{interp-tests} function
  1165. (Appendix~\ref{sec:utilities}) from \key{utilities.rkt} to test your
  1166. \key{uniquify} pass on the example programs.
  1167. %% You can use the interpreter \key{interpret-S0} defined in the
  1168. %% \key{interp.rkt} file. The entire sequence of tests should be a short
  1169. %% Racket program so you can re-run all the tests by running the Racket
  1170. %% program. We refer to this as the \emph{regression test} program.
  1171. \end{exercise}
  1172. \section{Flatten Expressions}
  1173. \label{sec:flatten-s0}
  1174. The \key{flatten} pass will transform $S_0$ programs into $C_0$
  1175. programs. In particular, the purpose of the \key{flatten} pass is to
  1176. get rid of nested expressions, such as the $\UNIOP{-}{10}$ in the
  1177. following program.
  1178. \[
  1179. \BINOP{+}{52}{ \UNIOP{-}{10} }
  1180. \]
  1181. This can be accomplished by introducing a new variable, assigning the
  1182. nested expression to the new variable, and then using the new variable
  1183. in place of the nested expressions. For example, the above program is
  1184. translated to the following one.
  1185. \[
  1186. \begin{array}{l}
  1187. \ASSIGN{ \itm{x} }{ \UNIOP{-}{10} } \\
  1188. \ASSIGN{ \itm{y} }{ \BINOP{+}{52}{ \itm{x} } } \\
  1189. \RETURN{ y }
  1190. \end{array}
  1191. \]
  1192. We recommend implementing \key{flatten} as a structurally recursive
  1193. function that returns two things, 1) the newly flattened expression,
  1194. and 2) a list of assignment statements, one for each of the new
  1195. variables introduced while flattening the expression. You can return
  1196. multiple things from a function using the \key{values} form and you
  1197. can receive multiple things from a function call using the
  1198. \key{define-values} form. If you are not familiar with these
  1199. constructs, the Racket documentation will be of help.
  1200. Take special care for programs such as the following that initialize
  1201. variables with integers or other variables.
  1202. \[
  1203. \LET{a}{42}{ \LET{b}{a}{ b }}
  1204. \]
  1205. This program should be translated to
  1206. \[
  1207. \ASSIGN{a}{42} \;
  1208. \ASSIGN{b}{a} \;
  1209. \RETURN{b}
  1210. \]
  1211. and not the following, which could result from a naive implementation
  1212. of \key{flatten}.
  1213. \[
  1214. \ASSIGN{x.1}{42}\;
  1215. \ASSIGN{a}{x.1}\;
  1216. \ASSIGN{x.2}{a}\;
  1217. \ASSIGN{b}{x.2}\;
  1218. \RETURN{b}
  1219. \]
  1220. \begin{exercise}
  1221. Implement the \key{flatten} pass and test it on all of the example
  1222. programs that you created to test the \key{uniquify} pass and create
  1223. three new example programs that are designed to exercise all of the
  1224. interesting code in the \key{flatten} pass. Use the \key{interp-tests}
  1225. function (Appendix~\ref{sec:utilities}) from \key{utilities.rkt} to
  1226. test your passes on the example programs.
  1227. \end{exercise}
  1228. \section{Select Instructions}
  1229. \label{sec:select-s0}
  1230. In the \key{select\_instructions} pass we begin the work of
  1231. translating from $C_0$ to x86. The target language of this pass is a
  1232. pseudo-x86 language that still uses variables, so we add an AST node
  1233. of the form $\VAR{\itm{var}}$ to the x86 abstract syntax. The
  1234. \key{select\_instructions} pass deals with the differing format of
  1235. arithmetic operations. For example, in $C_0$ an addition operation
  1236. could take the following form:
  1237. \[
  1238. \ASSIGN{x}{ \BINOP{+}{10}{32} }
  1239. \]
  1240. To translate to x86, we need to express this addition using the
  1241. \key{addq} instruction that does an inplace update. So we first move
  1242. $10$ to $x$ then perform the \key{addq}.
  1243. \[
  1244. (\key{mov}\,\INT{10}\, \VAR{x})\; (\key{addq} \;\INT{32}\; \VAR{x})
  1245. \]
  1246. There are some cases that require special care to avoid generating
  1247. needlessly complicated code. If one of the arguments is the same as
  1248. the left-hand side of the assignment, then there is no need for the
  1249. extra move instruction. For example, the following
  1250. \[
  1251. \ASSIGN{x}{ \BINOP{+}{10}{x} }
  1252. \quad\text{should translate to}\quad
  1253. (\key{addq} \; \INT{10}\; \VAR{x})
  1254. \]
  1255. Regarding the \RETURN{e} statement of $C_0$, we recommend treating it
  1256. as an assignment to the \key{rax} register and let the procedure
  1257. conclusion handle the transfer of control back to the calling
  1258. procedure.
  1259. \section{Assign Homes}
  1260. \label{sec:assign-s0}
  1261. As discussed in Section~\ref{sec:plan-s0-x86}, the
  1262. \key{assign\_homes} pass places all of the variables on the stack.
  1263. Consider again the example $S_0$ program $\BINOP{+}{52}{ \UNIOP{-}{10} }$,
  1264. which after \key{select\_instructions} looks like the following.
  1265. \[
  1266. \begin{array}{l}
  1267. (\key{movq}\;\INT{10}\; \VAR{x})\\
  1268. (\key{negq}\; \VAR{x})\\
  1269. (\key{movq}\; \INT{52}\; \REG{\itm{rax}})\\
  1270. (\key{addq}\; \VAR{x} \REG{\itm{rax}})
  1271. \end{array}
  1272. \]
  1273. The one and only variable $x$ is assigned to stack location
  1274. \key{-8(\%rbp)}, so the \key{assign\_homes} pass translates the
  1275. above to
  1276. \[
  1277. \begin{array}{l}
  1278. (\key{movq}\;\INT{10}\; \STACKLOC{{-}8})\\
  1279. (\key{negq}\; \STACKLOC{{-}8})\\
  1280. (\key{movq}\; \INT{52}\; \REG{\itm{rax}})\\
  1281. (\key{addq}\; \STACKLOC{{-}8}\; \REG{\itm{rax}})
  1282. \end{array}
  1283. \]
  1284. In the process of assigning stack locations to variables, it is
  1285. convenient to compute and store the size of the frame which will be
  1286. needed later to generate the procedure conclusion.
  1287. \section{Patch Instructions}
  1288. \label{sec:patch-s0}
  1289. The purpose of this pass is to make sure that each instruction adheres
  1290. to the restrictions regarding which arguments can be memory
  1291. references. For most instructions, the rule is that at most one
  1292. argument may be a memory reference.
  1293. Consider again the following example.
  1294. \[
  1295. \LET{a}{42}{ \LET{b}{a}{ b }}
  1296. \]
  1297. After \key{assign\_homes} pass, the above has been translated to
  1298. \[
  1299. \begin{array}{l}
  1300. (\key{movq} \;\INT{42}\; \STACKLOC{{-}8})\\
  1301. (\key{movq}\;\STACKLOC{{-}8}\; \STACKLOC{{-}16})\\
  1302. (\key{movq}\;\STACKLOC{{-}16}\; \REG{\itm{rax}})
  1303. \end{array}
  1304. \]
  1305. The second \key{movq} instruction is problematic because both arguments
  1306. are stack locations. We suggest fixing this problem by moving from the
  1307. source to \key{rax} and then from \key{rax} to the destination, as
  1308. follows.
  1309. \[
  1310. \begin{array}{l}
  1311. (\key{movq} \;\INT{42}\; \STACKLOC{{-}8})\\
  1312. (\key{movq}\;\STACKLOC{{-}8}\; \REG{\itm{rax}})\\
  1313. (\key{movq}\;\REG{\itm{rax}}\; \STACKLOC{{-}16})\\
  1314. (\key{movq}\;\STACKLOC{{-}16}\; \REG{\itm{rax}})
  1315. \end{array}
  1316. \]
  1317. The \key{imulq} instruction is a special case because the destination
  1318. argument must be a register.
  1319. \section{Print x86}
  1320. \label{sec:print-x86}
  1321. [To do: talk about printing the AST to x86.]
  1322. %% \section{Testing with Interpreters}
  1323. %% The typical way to test a compiler is to run the generated assembly
  1324. %% code on a diverse set of programs and check whether they behave as
  1325. %% expected. However, when a compiler is structured as our is, with many
  1326. %% passes, when there is an error in the generated assembly code it can
  1327. %% be hard to determine which pass contains the source of the error. A
  1328. %% good way to isolate the error is to not only test the generated
  1329. %% assembly code but to also test the output of every pass. This requires
  1330. %% having interpreters for all the intermediate languages. Indeed, the
  1331. %% file \key{interp.rkt} in the supplemental code provides interpreters
  1332. %% for all the intermediate languages described in this book, starting
  1333. %% with interpreters for $S_0$, $C_0$, and x86 (in abstract syntax).
  1334. %% The file \key{run-tests.rkt} automates the process of running the
  1335. %% interpreters on the output programs of each pass and checking their
  1336. %% result.
  1337. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  1338. \chapter{Register Allocation}
  1339. \label{ch:register-allocation}
  1340. In Chapter~\ref{ch:int-exp} we simplified the generation of x86
  1341. assembly by placing all variables on the stack. We can improve the
  1342. performance of the generated code considerably if we instead try to
  1343. place as many variables as possible into registers. The CPU can
  1344. access a register in a single cycle, whereas accessing the stack can
  1345. take from several cycles (to go to cache) to hundreds of cycles (to go
  1346. to main memory). Figure~\ref{fig:reg-eg} shows a program with four
  1347. variables that serves as a running example. We show the source program
  1348. and also the output of instruction selection. At that point the
  1349. program is almost x86 assembly but not quite; it still contains
  1350. variables instead of stack locations or registers.
  1351. \begin{figure}
  1352. \begin{minipage}{0.45\textwidth}
  1353. Source program:
  1354. \begin{lstlisting}
  1355. (let ([v 1])
  1356. (let ([w 46])
  1357. (let ([x (+ v 7)])
  1358. (let ([y (+ 4 x)])
  1359. (let ([z (+ x w)])
  1360. (- z y))))))
  1361. \end{lstlisting}
  1362. \end{minipage}
  1363. \begin{minipage}{0.45\textwidth}
  1364. After instruction selection:
  1365. \begin{lstlisting}
  1366. (program (v w x y z)
  1367. (movq (int 1) (var v))
  1368. (movq (int 46) (var w))
  1369. (movq (var v) (var x))
  1370. (addq (int 7) (var x))
  1371. (movq (var x) (var y))
  1372. (addq (int 4) (var y))
  1373. (movq (var x) (var z))
  1374. (addq (var w) (var z))
  1375. (movq (var z) (reg rax))
  1376. (subq (var y) (reg rax)))
  1377. \end{lstlisting}
  1378. \end{minipage}
  1379. \caption{Running example for this chapter.}
  1380. \label{fig:reg-eg}
  1381. \end{figure}
  1382. The goal of register allocation is to fit as many variables into
  1383. registers as possible. It is often the case that we have more
  1384. variables than registers, so we can't naively map each variable to a
  1385. register. Fortunately, it is also common for different variables to be
  1386. needed during different periods of time, and in such cases the
  1387. variables can be mapped to the same register. Consider variables $x$
  1388. and $y$ in Figure~\ref{fig:reg-eg}. After the variable $x$ is moved
  1389. to $z$ it is no longer needed. Variable $y$, on the other hand, is
  1390. used only after this point, so $x$ and $y$ could share the same
  1391. register. The topic of the next section is how we compute where a
  1392. variable is needed.
  1393. \section{Liveness Analysis}
  1394. A variable is \emph{live} if the variable is used at some later point
  1395. in the program and there is not an intervening assignment to the
  1396. variable.
  1397. %
  1398. To understand the latter condition, consider the following code
  1399. fragment in which there are two writes to $b$. Are $a$ and
  1400. $b$ both live at the same time?
  1401. \begin{lstlisting}[numbers=left,numberstyle=\tiny]
  1402. (movq (int 5) (var a)) ; @$a \gets 5$@
  1403. (movq (int 30) (var b)) ; @$b \gets 30$@
  1404. (movq (var a) (var c)) ; @$c \gets x$@
  1405. (movq (int 10) (var b)) ; @$b \gets 10$@
  1406. (addq (var b) (var c)) ; @$c \gets c + b$@
  1407. \end{lstlisting}
  1408. The answer is no because the value $30$ written to $b$ on line 2 is
  1409. never used. The variable $b$ is read on line 5 and there is an
  1410. intervening write to $b$ on line 4, so the read on line 5 receives the
  1411. value written on line 4, not line 2.
  1412. The live variables can be computed by traversing the instruction
  1413. sequence back to front (i.e., backwards in execution order). Let
  1414. $I_1,\ldots, I_n$ be the instruction sequence. We write
  1415. $L_{\mathsf{after}}(k)$ for the set of live variables after
  1416. instruction $I_k$ and $L_{\mathsf{before}}(k)$ for the set of live
  1417. variables before instruction $I_k$. The live variables after an
  1418. instruction are always the same as the live variables before the next
  1419. instruction.
  1420. \begin{equation*}
  1421. L_{\mathsf{after}}(k) = L_{\mathsf{before}}(k+1)
  1422. \end{equation*}
  1423. To start things off, there are no live variables after the last
  1424. instruction, so
  1425. \begin{equation*}
  1426. L_{\mathsf{after}}(n) = \emptyset
  1427. \end{equation*}
  1428. We then apply the following rule repeatedly, traversing the
  1429. instruction sequence back to front.
  1430. \begin{equation*}
  1431. L_{\mathtt{before}}(k) = (L_{\mathtt{after}}(k) - W(k)) \cup R(k),
  1432. \end{equation*}
  1433. where $W(k)$ are the variables written to by instruction $I_k$ and
  1434. $R(k)$ are the variables read by instruction $I_k$.
  1435. Figure~\ref{fig:live-eg} shows the results of live variables analysis
  1436. for the running example. Next to each instruction we write its
  1437. $L_{\mathtt{after}}$ set.
  1438. \begin{figure}[tbp]
  1439. \begin{lstlisting}
  1440. (program (v w x y z)
  1441. (movq (int 1) (var v)) @$\{ v \}$@
  1442. (movq (int 46) (var w)) @$\{ v, w \}$@
  1443. (movq (var v) (var x)) @$\{ w, x \}$@
  1444. (addq (int 7) (var x)) @$\{ w, x \}$@
  1445. (movq (var x) (var y)) @$\{ w, x, y\}$@
  1446. (addq (int 4) (var y)) @$\{ w, x, y \}$@
  1447. (movq (var x) (var z)) @$\{ w, y, z \}$@
  1448. (addq (var w) (var z)) @$\{ y, z \}$@
  1449. (movq (var z) (reg rax)) @$\{ y \}$@
  1450. (subq (var y) (reg rax))) @$\{\}$@
  1451. \end{lstlisting}
  1452. \caption{Running example program annotated with live-after sets.}
  1453. \label{fig:live-eg}
  1454. \end{figure}
  1455. \section{Building the Interference Graph}
  1456. Based on the liveness analysis, we know the program regions where each
  1457. variable is needed. However, during register allocation, we need to
  1458. answer questions of the specific form: are variables $u$ and $v$ ever
  1459. live at the same time? (And therefore cannot be assigned to the same
  1460. register.) To make this question easier to answer, we create an
  1461. explicit data structure, an \emph{interference graph}. An
  1462. interference graph is an undirected graph that has an edge between two
  1463. variables if they are live at the same time, that is, if they
  1464. interfere with each other.
  1465. The most obvious way to compute the interference graph is to look at
  1466. the set of live variables between each statement in the program, and
  1467. add an edge to the graph for every pair of variables in the same set.
  1468. This approach is less than ideal for two reasons. First, it can be
  1469. rather expensive because it takes $O(n^2)$ time to look at every pair
  1470. in a set of $n$ live variables. Second, there is a special case in
  1471. which two variables that are live at the same time do not actually
  1472. interfere with each other: when they both contain the same value
  1473. because we have assigned one to the other.
  1474. A better way to compute the edges of the intereference graph is given
  1475. by the following rules.
  1476. \begin{itemize}
  1477. \item If instruction $I_k$ is a move: (\key{movq} $s$\, $d$), then add
  1478. the edge $(d,v)$ for every $v \in L_{\mathsf{after}}(k)$ unless $v =
  1479. d$ or $v = s$.
  1480. \item If instruction $I_k$ is not a move but some other arithmetic
  1481. instruction such as (\key{addq} $s$\, $d$), then add the edge $(d,v)$
  1482. for every $v \in L_{\mathsf{after}}(k)$ unless $v = d$.
  1483. \item If instruction $I_k$ is of the form (\key{call}
  1484. $\mathit{label}$), then add an edge $(r,v)$ for every caller-save
  1485. register $r$ and every variable $v \in L_{\mathsf{after}}(k)$.
  1486. \end{itemize}
  1487. Working from the top to bottom of Figure~\ref{fig:live-eg}, $z$
  1488. interferes with $x$, $y$ interferes with $z$, and $w$ interferes with
  1489. $y$ and $z$. The resulting interference graph is shown in
  1490. Figure~\ref{fig:interfere}.
  1491. \begin{figure}[tbp]
  1492. \large
  1493. \[
  1494. \xymatrix@=40pt{
  1495. v \ar@{-}[r] & w \ar@{-}[r]\ar@{-}[d]\ar@{-}[dr] & x \ar@{-}[dl]\\
  1496. & y \ar@{-}[r] & z
  1497. }
  1498. \]
  1499. \caption{Interference graph for the running example.}
  1500. \label{fig:interfere}
  1501. \end{figure}
  1502. \section{Graph Coloring via Sudoku}
  1503. We now come to the main event, mapping variables to registers (or to
  1504. stack locations in the event that we run out of registers). We need
  1505. to make sure not to map two variables to the same register if the two
  1506. variables interfere with each other. In terms of the interference
  1507. graph, this means we cannot map adjacent nodes to the same register.
  1508. If we think of registers as colors, the register allocation problem
  1509. becomes the widely-studied graph coloring
  1510. problem~\citep{Balakrishnan:1996ve,Rosen:2002bh}.
  1511. The reader may be more familar with the graph coloring problem then he
  1512. or she realizes; the popular game of Sudoku is an instance of the
  1513. graph coloring problem. The following describes how to build a graph
  1514. out of a Sudoku board.
  1515. \begin{itemize}
  1516. \item There is one node in the graph for each Sudoku square.
  1517. \item There is an edge between two nodes if the corresponding squares
  1518. are in the same row or column, or if the squares are in the same
  1519. $3\times 3$ region.
  1520. \item Choose nine colors to correspond to the numbers $1$ to $9$.
  1521. \item Based on the initial assignment of numbers to squares in the
  1522. Sudoku board, assign the corresponding colors to the corresponding
  1523. nodes in the graph.
  1524. \end{itemize}
  1525. If you can color the remaining nodes in the graph with the nine
  1526. colors, then you've also solved the corresponding game of Sudoku.
  1527. Given that Sudoku is graph coloring, one can use Sudoku strategies to
  1528. come up with an algorithm for allocating registers. For example, one
  1529. of the basic techniques for Sudoku is Pencil Marks. The idea is that
  1530. you use a process of elimination to determine what numbers still make
  1531. sense for a square, and write down those numbers in the square
  1532. (writing very small). At first, each number might be a
  1533. possibility, but as the board fills up, more and more of the
  1534. possibilities are crossed off (or erased). For example, if the number
  1535. $1$ is assigned to a square, then by process of elimination, you can
  1536. cross off the $1$ pencil mark from all the squares in the same row,
  1537. column, and region. Many Sudoku computer games provide automatic
  1538. support for Pencil Marks. This heuristic also reduces the degree of
  1539. branching in the search tree.
  1540. The Pencil Marks technique corresponds to the notion of color
  1541. \emph{saturation} due to \cite{Brelaz:1979eu}. The
  1542. saturation of a node, in Sudoku terms, is the number of possibilities
  1543. that have been crossed off using the process of elimination mentioned
  1544. above. In graph terminology, we have the following definition:
  1545. \begin{equation*}
  1546. \mathrm{saturation}(u) = |\{ c \;|\; \exists v. v \in \mathrm{Adj}(u)
  1547. \text{ and } \mathrm{color}(v) = c \}|
  1548. \end{equation*}
  1549. where $\mathrm{Adj}(u)$ is the set of nodes adjacent to $u$ and
  1550. the notation $|S|$ stands for the size of the set $S$.
  1551. Using the Pencil Marks technique leads to a simple strategy for
  1552. filling in numbers: if there is a square with only one possible number
  1553. left, then write down that number! But what if there are no squares
  1554. with only one possibility left? One brute-force approach is to just
  1555. make a guess. If that guess ultimately leads to a solution, great. If
  1556. not, backtrack to the guess and make a different guess. Of course,
  1557. this is horribly time consuming. One standard way to reduce the amount
  1558. of backtracking is to use the most-constrained-first heuristic. That
  1559. is, when making a guess, always choose a square with the fewest
  1560. possibilities left (the node with the highest saturation). The idea
  1561. is that choosing highly constrained squares earlier rather than later
  1562. is better because later there may not be any possibilities left.
  1563. In some sense, register allocation is easier than Sudoku because we
  1564. can always cheat and add more numbers by spilling variables to the
  1565. stack. Also, we'd like to minimize the time needed to color the graph,
  1566. and backtracking is expensive. Thus, it makes sense to keep the
  1567. most-constrained-first heuristic but drop the backtracking in favor of
  1568. greedy search (guess and just keep going).
  1569. Figure~\ref{fig:satur-algo} gives the pseudo-code for this simple
  1570. greedy algorithm for register allocation based on saturation and the
  1571. most-constrained-first heuristic, which is roughly equivalent to the
  1572. DSATUR algorithm of \cite{Brelaz:1979eu} (also known as
  1573. saturation degree ordering
  1574. (SDO)~\citep{Gebremedhin:1999fk,Omari:2006uq}). Just as in Sudoku,
  1575. the algorithm represents colors with integers, with the first $k$
  1576. colors corresponding to the $k$ registers in a given machine and the
  1577. rest of the integers corresponding to stack locations.
  1578. \begin{figure}[btp]
  1579. \centering
  1580. \begin{lstlisting}[basicstyle=\rmfamily,deletekeywords={for,from,with,is,not,in,find},morekeywords={while},columns=fullflexible]
  1581. Algorithm: DSATUR
  1582. Input: a graph @$G$@
  1583. Output: an assignment @$\mathrm{color}[v]$@ for each node @$v \in G$@
  1584. @$W \gets \mathit{vertices}(G)$@
  1585. while @$W \neq \emptyset$@ do
  1586. pick a node @$u$@ from @$W$@ with the highest saturation,
  1587. breaking ties randomly
  1588. find the lowest color @$c$@ that is not in @$\{ \mathrm{color}[v] \;|\; v \in \mathrm{Adj}(v)\}$@
  1589. @$\mathrm{color}[u] \gets c$@
  1590. @$W \gets W - \{u\}$@
  1591. \end{lstlisting}
  1592. \caption{Saturation-based greedy graph coloring algorithm.}
  1593. \label{fig:satur-algo}
  1594. \end{figure}
  1595. With this algorithm in hand, let us return to the running example and
  1596. consider how to color the interference graph in
  1597. Figure~\ref{fig:interfere}. Initially, all of the nodes are not yet
  1598. colored and they are unsaturated, so we annotate each of them with a
  1599. dash for their color and an empty set for the saturation.
  1600. \[
  1601. \xymatrix{
  1602. v:-,\{\} \ar@{-}[r] & w:-,\{\} \ar@{-}[r]\ar@{-}[d]\ar@{-}[dr] & x:-,\{\} \ar@{-}[dl]\\
  1603. & y:-,\{\} \ar@{-}[r] & z:-,\{\}
  1604. }
  1605. \]
  1606. We select a maximally saturated node and color it $0$. In this case we
  1607. have a 5-way tie, so we arbitrarily pick $y$. The color $0$ is no
  1608. longer available for $w$, $x$, and $z$ because they interfere with
  1609. $y$.
  1610. \[
  1611. \xymatrix{
  1612. v:-,\{\} \ar@{-}[r] & w:-,\{0\} \ar@{-}[r]\ar@{-}[d]\ar@{-}[dr] & x:-,\{0\} \ar@{-}[dl]\\
  1613. & y:0,\{\} \ar@{-}[r] & z:-,\{0\}
  1614. }
  1615. \]
  1616. Now we repeat the process, selecting another maximally saturated node.
  1617. This time there is a three-way tie between $w$, $x$, and $z$. We color
  1618. $w$ with $1$.
  1619. \[
  1620. \xymatrix{
  1621. v:-,\{1\} \ar@{-}[r] & w:1,\{0\} \ar@{-}[r]\ar@{-}[d]\ar@{-}[dr] & x:-,\{0,1\} \ar@{-}[dl]\\
  1622. & y:0,\{1\} \ar@{-}[r] & z:-,\{0,1\}
  1623. }
  1624. \]
  1625. The most saturated nodes are now $x$ and $z$. We color $x$ with the
  1626. next avialable color which is $2$.
  1627. \[
  1628. \xymatrix{
  1629. v:-,\{1\} \ar@{-}[r] & w:1,\{0,2\} \ar@{-}[r]\ar@{-}[d]\ar@{-}[dr] & x:2,\{0,1\} \ar@{-}[dl]\\
  1630. & y:0,\{1,2\} \ar@{-}[r] & z:-,\{0,1\}
  1631. }
  1632. \]
  1633. We have only two nodes left to color, $v$ and $z$, but $z$ is
  1634. more highly saturaded, so we color $z$ with $2$.
  1635. \[
  1636. \xymatrix{
  1637. v:-,\{1\} \ar@{-}[r] & w:1,\{0,2\} \ar@{-}[r]\ar@{-}[d]\ar@{-}[dr] & x:2,\{0,1\} \ar@{-}[dl]\\
  1638. & y:0,\{1,2\} \ar@{-}[r] & z:2,\{0,1\}
  1639. }
  1640. \]
  1641. The last iteration of the coloring algorithm assigns color $0$ to $v$.
  1642. \[
  1643. \xymatrix{
  1644. v:0,\{1\} \ar@{-}[r] & w:1,\{0,2\} \ar@{-}[r]\ar@{-}[d]\ar@{-}[dr] & x:2,\{0,1\} \ar@{-}[dl]\\
  1645. & y:0,\{1,2\} \ar@{-}[r] & z:2,\{0,1\}
  1646. }
  1647. \]
  1648. With the coloring complete, we can finalize assignment of variables to
  1649. registers and stack locations. Recall that if we have $k$ registers,
  1650. we map the first $k$ colors to registers and the rest to stack
  1651. lcoations. Suppose for the moment that we just have one extra register
  1652. to use for register allocation, just \key{rbx}. Then the following is
  1653. the mapping of colors to registers and stack allocations.
  1654. \[
  1655. \{ 0 \mapsto \key{\%rbx}, \; 1 \mapsto \key{-8(\%rbp)}, \; 2 \mapsto \key{-16(\%rbp)}, \ldots \}
  1656. \]
  1657. Putting this together with the above coloring of the variables, we
  1658. arrive at the following assignment.
  1659. \[
  1660. \{ v \mapsto \key{\%rbx}, \;
  1661. w \mapsto \key{-8(\%rbp)}, \;
  1662. x \mapsto \key{-16(\%rbp)}, \;
  1663. y \mapsto \key{\%rbx}, \;
  1664. z\mapsto \key{-16(\%rbp)} \}
  1665. \]
  1666. Applying this assignment to our running example
  1667. (Figure~\ref{fig:reg-eg}) yields the following program.
  1668. % why frame size of 32? -JGS
  1669. \begin{lstlisting}
  1670. (program 32
  1671. (movq (int 1) (reg rbx))
  1672. (movq (int 46) (stack-loc -8))
  1673. (movq (reg rbx) (stack-loc -16))
  1674. (addq (int 7) (stack-loc -16))
  1675. (movq (stack-loc 16) (reg rbx))
  1676. (addq (int 4) (reg rbx))
  1677. (movq (stack-loc -16) (stack-loc -16))
  1678. (addq (stack-loc -8) (stack-loc -16))
  1679. (movq (stack-loc -16) (reg rax))
  1680. (subq (reg rbx) (reg rax)))
  1681. \end{lstlisting}
  1682. This program is almost an x86 program. The remaining step is to apply
  1683. the patch instructions pass. In this example, the trivial move of
  1684. \key{-16(\%rbp)} to itself is deleted and the addition of
  1685. \key{-8(\%rbp)} to \key{-16(\%rbp)} is fixed by going through
  1686. \key{\%rax}. The following shows the portion of the program that
  1687. changed.
  1688. \begin{lstlisting}
  1689. (addq (int 4) (reg rbx))
  1690. (movq (stack-loc -8) (reg rax)
  1691. (addq (reg rax) (stack-loc -16))
  1692. \end{lstlisting}
  1693. An overview of all of the passes involved in register allocation is
  1694. shown in Figure~\ref{fig:reg-alloc-passes}.
  1695. \begin{figure}[tbp]
  1696. \[
  1697. \xymatrix{
  1698. C_0 \ar@/^/[r]^-{\key{select\_instr.}}
  1699. & \text{x86}^{*} \ar[d]^-{\key{uncover\_live}} \\
  1700. & \text{x86}^{*} \ar[d]^-{\key{build\_interference}} \\
  1701. & \text{x86}^{*} \ar[d]_-{\key{allocate\_register}} \\
  1702. & \text{x86}^{*} \ar@/^/[r]^-{\key{patch\_instr.}}
  1703. & \text{x86}
  1704. }
  1705. \]
  1706. \caption{Diagram of the passes for register allocation.}
  1707. \label{fig:reg-alloc-passes}
  1708. \end{figure}
  1709. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  1710. \chapter{Booleans, Type Checking, and Control Flow}
  1711. \label{ch:bool-types}
  1712. \section{The $S_1$ Language}
  1713. \begin{figure}[htbp]
  1714. \centering
  1715. \fbox{
  1716. \begin{minipage}{0.85\textwidth}
  1717. \[
  1718. \begin{array}{lcl}
  1719. \Op &::=& \ldots \mid \key{and} \mid \key{or} \mid \key{not} \mid \key{eq?} \\
  1720. \Exp &::=& \ldots \mid \key{\#t} \mid \key{\#f} \mid
  1721. \IF{\Exp}{\Exp}{\Exp}
  1722. \end{array}
  1723. \]
  1724. \end{minipage}
  1725. }
  1726. \caption{The $S_1$ language, an extension of $S_0$
  1727. (Figure~\ref{fig:s0-syntax}).}
  1728. \label{fig:s1-syntax}
  1729. \end{figure}
  1730. \section{Type Checking $S_1$ Programs}
  1731. % T ::= Integer | Boolean
  1732. It is common practice to specify a type system by writing rules for
  1733. each kind of AST node. For example, the rule for \key{if} is:
  1734. \begin{quote}
  1735. For any expressions $e_1, e_2, e_3$ and any type $T$, if $e_1$ has
  1736. type \key{bool}, $e_2$ has type $T$, and $e_3$ has type $T$, then
  1737. $\IF{e_1}{e_2}{e_3}$ has type $T$.
  1738. \end{quote}
  1739. It is also common practice to write rules using a horizontal line,
  1740. with the conditions written above the line and the conclusion written
  1741. below the line.
  1742. \begin{equation*}
  1743. \inference{e_1 \text{ has type } \key{bool} &
  1744. e_2 \text{ has type } T & e_3 \text{ has type } T}
  1745. {\IF{e_1}{e_2}{e_3} \text{ has type } T}
  1746. \end{equation*}
  1747. Because the phrase ``has type'' is repeated so often in these type
  1748. checking rules, it is abbreviated to just a colon. So the above rule
  1749. is abbreviated to the following.
  1750. \begin{equation*}
  1751. \inference{e_1 : \key{bool} & e_2 : T & e_3 : T}
  1752. {\IF{e_1}{e_2}{e_3} : T}
  1753. \end{equation*}
  1754. The $\LET{x}{e_1}{e_2}$ construct poses an interesting challenge. The
  1755. variable $x$ is assigned the value of $e_1$ and then $x$ can be used
  1756. inside $e_2$. When we get to an occurrence of $x$ inside $e_2$, how do
  1757. we know what type the variable should be? The answer is that we need
  1758. a way to map from variable names to types. Such a mapping is called a
  1759. \emph{type environment} (aka. \emph{symbol table}). The capital Greek
  1760. letter gamma, written $\Gamma$, is used for referring to type
  1761. environments environments. The notation $\Gamma, x : T$ stands for
  1762. making a copy of the environment $\Gamma$ and then associating $T$
  1763. with the variable $x$ in the new environment. We write $\Gamma(x)$ to
  1764. lookup the associated type for $x$. The type checking rules for
  1765. \key{let} and variables are as follows.
  1766. \begin{equation*}
  1767. \inference{e_1 : T_1 \text{ in } \Gamma &
  1768. e_2 : T_2 \text{ in } \Gamma,x:T_1}
  1769. {\LET{x}{e_1}{e_2} : T_2 \text{ in } \Gamma}
  1770. \qquad
  1771. \inference{\Gamma(x) = T}
  1772. {x : T \text{ in } \Gamma}
  1773. \end{equation*}
  1774. Type checking has roots in logic, and logicians have a tradition of
  1775. writing the environment on the left-hand side and separating it from
  1776. the expression with a turn-stile ($\vdash$). The turn-stile does not
  1777. have any intrinsic meaning per se. It is punctuation that separates
  1778. the environment $\Gamma$ from the expression $e$. So the above typing
  1779. rules are written as follows.
  1780. \begin{equation*}
  1781. \inference{\Gamma \vdash e_1 : T_1 &
  1782. \Gamma,x:T_1 \vdash e_2 : T_2}
  1783. {\Gamma \vdash \LET{x}{e_1}{e_2} : T_2}
  1784. \qquad
  1785. \inference{\Gamma(x) = T}
  1786. {\Gamma \vdash x : T}
  1787. \end{equation*}
  1788. Overall, the statement $\Gamma \vdash e : T$ is an example of what is
  1789. called a \emph{judgment}. In particular, this judgment says, ``In
  1790. environment $\Gamma$, expression $e$ has type $T$.''
  1791. Figure~\ref{fig:S1-type-system} shows the type checking rules for
  1792. $S_1$.
  1793. \begin{figure}
  1794. \begin{gather*}
  1795. \inference{\Gamma(x) = T}
  1796. {\Gamma \vdash x : T}
  1797. \qquad
  1798. \inference{\Gamma \vdash e_1 : T_1 &
  1799. \Gamma,x:T_1 \vdash e_2 : T_2}
  1800. {\Gamma \vdash \LET{x}{e_1}{e_2} : T_2}
  1801. \\[2ex]
  1802. \inference{}{\Gamma \vdash n : \key{Integer}}
  1803. \quad
  1804. \inference{\Gamma \vdash e_i : T_i \ ^{\forall i \in 1\ldots n} & \Delta(\Op,T_1,\ldots,T_n) = T}
  1805. {\Gamma \vdash (\Op \; e_1 \ldots e_n) : T}
  1806. \\[2ex]
  1807. \inference{}{\Gamma \vdash \key{\#t} : \key{Boolean}}
  1808. \quad
  1809. \inference{}{\Gamma \vdash \key{\#f} : \key{Boolean}}
  1810. \quad
  1811. \inference{\Gamma \vdash e_1 : \key{bool} \\
  1812. \Gamma \vdash e_2 : T &
  1813. \Gamma \vdash e_3 : T}
  1814. {\Gamma \vdash \IF{e_1}{e_2}{e_3} : T}
  1815. \end{gather*}
  1816. \caption{Type System for $S_1$.}
  1817. \label{fig:S1-type-system}
  1818. \end{figure}
  1819. \begin{figure}
  1820. \begin{align*}
  1821. \Delta(\key{+},\key{Integer},\key{Integer}) &= \key{Integer} \\
  1822. \Delta(\key{-},\key{Integer},\key{Integer}) &= \key{Integer} \\
  1823. \Delta(\key{-},\key{Integer}) &= \key{Integer} \\
  1824. \Delta(\key{*},\key{Integer},\key{Integer}) &= \key{Integer} \\
  1825. \Delta(\key{read}) &= \key{Integer} \\
  1826. \Delta(\key{and},\key{Boolean},\key{Boolean}) &= \key{Boolean} \\
  1827. \Delta(\key{or},\key{Boolean},\key{Boolean}) &= \key{Boolean} \\
  1828. \Delta(\key{not},\key{Boolean}) &= \key{Boolean} \\
  1829. \Delta(\key{eq?},\key{Integer},\key{Integer}) &= \key{Boolean} \\
  1830. \Delta(\key{eq?},\key{Boolean},\key{Boolean}) &= \key{Boolean}
  1831. \end{align*}
  1832. \caption{Types for the primitives operators.}
  1833. \end{figure}
  1834. \section{The $C_1$ Language}
  1835. \begin{figure}[htbp]
  1836. \[
  1837. \begin{array}{lcl}
  1838. \Arg &::=& \ldots \mid \key{\#t} \mid \key{\#f} \\
  1839. \Stmt &::=& \ldots \mid \IF{\Exp}{\Stmt^{*}}{\Stmt^{*}}
  1840. \end{array}
  1841. \]
  1842. \caption{The $C_1$ intermediate language, an extension of $C_0$
  1843. (Figure~\ref{fig:c0-syntax}).}
  1844. \label{fig:c1-syntax}
  1845. \end{figure}
  1846. \section{Flatten Expressions}
  1847. \section{Select Instructions}
  1848. \section{Register Allocation}
  1849. \section{Patch Instructions}
  1850. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  1851. \chapter{Tuples and Heap Allocation}
  1852. \label{ch:tuples}
  1853. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  1854. \chapter{Garbage Collection}
  1855. \label{ch:gc}
  1856. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  1857. \chapter{Functions}
  1858. \label{ch:functions}
  1859. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  1860. \chapter{Lexically Scoped Functions}
  1861. \label{ch:lambdas}
  1862. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  1863. \chapter{Mutable Data}
  1864. \label{ch:mutable-data}
  1865. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  1866. \chapter{The Dynamic Type}
  1867. \label{ch:type-dynamic}
  1868. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  1869. \chapter{Parametric Polymorphism}
  1870. \label{ch:parametric-polymorphism}
  1871. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  1872. \chapter{High-level Optimization}
  1873. \label{ch:high-level-optimization}
  1874. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  1875. \chapter{Appendix}
  1876. \section{Utility Functions}
  1877. \label{sec:utilities}
  1878. The utility function described in this section can be found in the
  1879. \key{utilities.rkt} file.
  1880. The \key{assert} function displays the error message \key{msg} if the
  1881. Boolean \key{bool} is false.
  1882. \begin{lstlisting}
  1883. (define (assert msg bool) ...)
  1884. \end{lstlisting}
  1885. The interp-tests function takes a compiler name (a string) a
  1886. description of the passes a test family name (a string), and a list of
  1887. test numbers, and runs the compiler passes and the interpreters to
  1888. check whether the passes correct. The description of the passes is a
  1889. list with one entry per pass. An entry is a list with three things: a
  1890. string giving the name of the pass, the function that implements the
  1891. pass (a translator from AST to AST), and a function that implements
  1892. the interpreter (a function from AST to result value). This function
  1893. assumes that the subdirectory \key{tests} has a bunch of Scheme
  1894. programs whose names all start with the family name, followed by an
  1895. underscore and then the test number, ending in \key{.scm}. Also, for
  1896. each Scheme program there is a file with the same number except that
  1897. it ends with \key{.in} that provides the input for the Scheme program.
  1898. \begin{lstlisting}
  1899. (define (interp-tests name passes test-family test-nums) ...
  1900. \end{lstlisting}
  1901. The compiler-tests function takes a compiler name (a string) a
  1902. description of the passes (see the comment for \key{interp-tests}) a
  1903. test family name (a string), and a list of test numbers (see the
  1904. comment for interp-tests), and runs the compiler to generate x86 (a
  1905. \key{.s} file) and then runs gcc to generate machine code. It runs
  1906. the machine code and checks that the output is 42.
  1907. \begin{lstlisting}
  1908. (define (compiler-tests name passes test-family test-nums) ...)
  1909. \end{lstlisting}
  1910. The compile-file function takes a description of the compiler passes
  1911. (see the comment for \key{interp-tests}) and returns a function that,
  1912. given a program file name (a string ending in \key{.scm}), applies all
  1913. of the passes and writes the output to a file whose name is the same
  1914. as the proram file name but with \key{.scm} replaced with \key{.s}.
  1915. \begin{lstlisting}
  1916. (define (compile-file passes)
  1917. (lambda (prog-file-name) ...))
  1918. \end{lstlisting}
  1919. \bibliographystyle{plainnat}
  1920. \bibliography{all}
  1921. \end{document}
  1922. %% LocalWords: Dybvig Waddell Abdulaziz Ghuloum Dipanwita
  1923. %% LocalWords: Sarkar lcl Matz aa representable