Home My Page Projects Code Snippets Project Openings SML/NJ
Summary Activity Forums Tracker Lists Tasks Docs Surveys News SCM Files

SCM Repository

[smlnj] Annotation of /sml/trunk/src/MLRISC/x86/mltree/x86.sml
ViewVC logotype

Annotation of /sml/trunk/src/MLRISC/x86/mltree/x86.sml

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1136 - (view) (download)

1 : jhr 1117 (* x86.sml
2 : monnier 247 *
3 :     * COPYRIGHT (c) 1998 Bell Laboratories.
4 : george 545 *
5 :     * This is a revised version that takes into account of
6 :     * the extended x86 instruction set, and has better handling of
7 :     * non-standard types. I've factored out the integer/floating point
8 :     * comparison code, added optimizations for conditional moves.
9 :     * The latter generates SETcc and CMOVcc (Pentium Pro only) instructions.
10 :     * To avoid problems, I have tried to incorporate as much of
11 :     * Lal's original magic incantations as possible.
12 : monnier 247 *
13 : george 545 * Some changes:
14 :     *
15 :     * 1. REMU/REMS/REMT are now supported
16 :     * 2. COND is supported by generating SETcc and/or CMOVcc; this
17 :     * may require at least a Pentium II to work.
18 :     * 3. Division by a constant has been optimized. Division by
19 :     * a power of 2 generates SHRL or SARL.
20 :     * 4. Better addressing mode selection has been implemented. This should
21 :     * improve array indexing on SML/NJ.
22 :     * 5. Generate testl/testb instead of andl whenever appropriate. This
23 :     * is recommended by the Intel Optimization Guide and seems to improve
24 :     * boxity tests on SML/NJ.
25 : leunga 731 *
26 :     * More changes for floating point:
27 :     * A new mode is implemented which generates pseudo 3-address instructions
28 :     * for floating point. These instructions are register allocated the
29 :     * normal way, with the virtual registers mapped onto a set of pseudo
30 :     * %fp registers. These registers are then mapped onto the %st registers
31 :     * with a new postprocessing phase.
32 :     *
33 : george 545 * -- Allen
34 : monnier 247 *)
35 : george 545 local
36 :     val rewriteMemReg = true (* should we rewrite memRegs *)
37 : leunga 731 val enableFastFPMode = true (* set this to false to disable the mode *)
38 : george 545 in
39 :    
40 : monnier 247 functor X86
41 :     (structure X86Instr : X86INSTR
42 : leunga 797 structure MLTreeUtils : MLTREE_UTILS
43 : george 933 where T = X86Instr.T
44 : george 555 structure ExtensionComp : MLTREE_EXTENSION_COMP
45 : george 933 where I = X86Instr and T = X86Instr.T
46 : george 984 structure MLTreeStream : MLTREE_STREAM
47 :     where T = ExtensionComp.T
48 : george 545 datatype arch = Pentium | PentiumPro | PentiumII | PentiumIII
49 :     val arch : arch ref
50 : leunga 593 val cvti2f :
51 : leunga 815 {ty: X86Instr.T.ty,
52 :     src: X86Instr.operand,
53 :     (* source operand, guaranteed to be non-memory! *)
54 :     an: Annotations.annotations ref (* cluster annotations *)
55 :     } ->
56 : leunga 593 {instrs : X86Instr.instruction list,(* the instructions *)
57 :     tempMem: X86Instr.operand, (* temporary for CVTI2F *)
58 :     cleanup: X86Instr.instruction list (* cleanup code *)
59 :     }
60 : leunga 731 (* When the following flag is set, we allocate floating point registers
61 :     * directly on the floating point stack
62 :     *)
63 :     val fast_floating_point : bool ref
64 : george 545 ) : sig include MLTREECOMP
65 :     val rewriteMemReg : bool
66 :     end =
67 : monnier 247 struct
68 : leunga 775 structure I = X86Instr
69 :     structure T = I.T
70 : george 984 structure TS = ExtensionComp.TS
71 : george 545 structure C = I.C
72 :     structure Shuffle = Shuffle(I)
73 : monnier 247 structure W32 = Word32
74 : george 545 structure A = MLRiscAnnotations
75 : george 909 structure CFG = ExtensionComp.CFG
76 : george 889 structure CB = CellsBasis
77 : monnier 247
78 : george 984 type instrStream = (I.instruction,C.cellset,CFG.cfg) TS.stream
79 :     type mltreeStream = (T.stm,T.mlrisc list,CFG.cfg) TS.stream
80 : leunga 565
81 :     datatype kind = REAL | INTEGER
82 : george 545
83 :     structure Gen = MLTreeGen
84 :     (structure T = T
85 : jhr 1117 structure Cells = C
86 : george 545 val intTy = 32
87 :     val naturalWidths = [32]
88 :     datatype rep = SE | ZE | NEITHER
89 :     val rep = NEITHER
90 :     )
91 :    
92 : monnier 411 fun error msg = MLRiscErrorMsg.error("X86",msg)
93 : monnier 247
94 : george 545 (* Should we perform automatic MemReg translation?
95 :     * If this is on, we can avoid doing RewritePseudo phase entirely.
96 :     *)
97 :     val rewriteMemReg = rewriteMemReg
98 : leunga 731
99 :     (* The following hardcoded *)
100 : leunga 744 fun isMemReg r = rewriteMemReg andalso
101 : george 889 let val r = CB.registerNum r
102 : leunga 744 in r >= 8 andalso r < 32
103 :     end
104 : leunga 731 fun isFMemReg r = if enableFastFPMode andalso !fast_floating_point
105 : george 889 then let val r = CB.registerNum r
106 : leunga 744 in r >= 8 andalso r < 32 end
107 : leunga 731 else true
108 : leunga 744 val isAnyFMemReg = List.exists (fn r =>
109 : george 889 let val r = CB.registerNum r
110 : leunga 744 in r >= 8 andalso r < 32 end
111 :     )
112 : monnier 247
113 : george 555 val ST0 = C.ST 0
114 :     val ST7 = C.ST 7
115 : leunga 797 val one = T.I.int_1
116 : george 555
117 : leunga 797 val opcodes8 = {INC=I.INCB,DEC=I.DECB,ADD=I.ADDB,SUB=I.SUBB,
118 :     NOT=I.NOTB,NEG=I.NEGB,
119 :     SHL=I.SHLB,SHR=I.SHRB,SAR=I.SARB,
120 :     OR=I.ORB,AND=I.ANDB,XOR=I.XORB}
121 :     val opcodes16 = {INC=I.INCW,DEC=I.DECW,ADD=I.ADDW,SUB=I.SUBW,
122 :     NOT=I.NOTW,NEG=I.NEGW,
123 :     SHL=I.SHLW,SHR=I.SHRW,SAR=I.SARW,
124 :     OR=I.ORW,AND=I.ANDW,XOR=I.XORW}
125 :     val opcodes32 = {INC=I.INCL,DEC=I.DECL,ADD=I.ADDL,SUB=I.SUBL,
126 :     NOT=I.NOTL,NEG=I.NEGL,
127 :     SHL=I.SHLL,SHR=I.SHRL,SAR=I.SARL,
128 :     OR=I.ORL,AND=I.ANDL,XOR=I.XORL}
129 :    
130 : george 545 (*
131 :     * The code generator
132 :     *)
133 : monnier 411 fun selectInstructions
134 : george 545 (instrStream as
135 : george 1003 TS.S.STREAM{emit=emitInstruction,defineLabel,entryLabel,pseudoOp,
136 :     annotation,getAnnotations,beginCluster,endCluster,exitBlock,comment,...}) =
137 :     let
138 :     val emit = emitInstruction o I.INSTR
139 :     exception EA
140 : monnier 411
141 : george 545 (* label where a trap is generated -- one per cluster *)
142 :     val trapLabel = ref (NONE: (I.instruction * Label.label) option)
143 : monnier 247
144 : leunga 731 (* flag floating point generation *)
145 :     val floatingPointUsed = ref false
146 :    
147 : george 545 (* effective address of an integer register *)
148 : leunga 731 fun IntReg r = if isMemReg r then I.MemReg r else I.Direct r
149 :     and RealReg r = if isFMemReg r then I.FDirect r else I.FPR r
150 : monnier 411
151 : george 545 (* Add an overflow trap *)
152 :     fun trap() =
153 : george 1136 let
154 :     val jmp =
155 : george 545 case !trapLabel of
156 : george 909 NONE => let val label = Label.label "trap" ()
157 : george 1136 val jmp =
158 :     I.ANNOTATION{i=I.jcc{cond=I.O,
159 :     opnd=I.ImmedLabel(T.LABEL label)},
160 :     a=MLRiscAnnotations.BRANCHPROB (Probability.unlikely)}
161 : george 545 in trapLabel := SOME(jmp, label); jmp end
162 :     | SOME(jmp, _) => jmp
163 : george 1003 in emitInstruction jmp end
164 : monnier 411
165 : george 545 val newReg = C.newReg
166 :     val newFreg = C.newFreg
167 : monnier 247
168 : leunga 731 fun fsize 32 = I.FP32
169 :     | fsize 64 = I.FP64
170 :     | fsize 80 = I.FP80
171 :     | fsize _ = error "fsize"
172 :    
173 : george 545 (* mark an expression with a list of annotations *)
174 : george 1009 fun mark'(i,[]) = emitInstruction(i)
175 : george 545 | mark'(i,a::an) = mark'(I.ANNOTATION{i=i,a=a},an)
176 : monnier 247
177 : george 545 (* annotate an expression and emit it *)
178 : george 1009 fun mark(i,an) = mark'(I.INSTR i,an)
179 : monnier 247
180 : george 1003 val emits = app emitInstruction
181 : leunga 731
182 : george 545 (* emit parallel copies for integers
183 :     * Translates parallel copies that involve memregs into
184 :     * individual copies.
185 :     *)
186 :     fun copy([], [], an) = ()
187 :     | copy(dst, src, an) =
188 :     let fun mvInstr{dst as I.MemReg rd, src as I.MemReg rs} =
189 : george 889 if CB.sameColor(rd,rs) then [] else
190 : george 545 let val tmpR = I.Direct(newReg())
191 : george 1003 in [I.move{mvOp=I.MOVL, src=src, dst=tmpR},
192 :     I.move{mvOp=I.MOVL, src=tmpR, dst=dst}]
193 : george 545 end
194 :     | mvInstr{dst=I.Direct rd, src=I.Direct rs} =
195 : george 889 if CB.sameColor(rd,rs) then []
196 : george 1009 else [I.COPY{k=CB.GP, sz=32, dst=[rd], src=[rs], tmp=NONE}]
197 : george 1003 | mvInstr{dst, src} = [I.move{mvOp=I.MOVL, src=src, dst=dst}]
198 : george 545 in
199 : leunga 731 emits (Shuffle.shuffle{mvInstr=mvInstr, ea=IntReg}
200 : leunga 744 {tmp=SOME(I.Direct(newReg())),
201 : george 545 dst=dst, src=src})
202 :     end
203 :    
204 :     (* conversions *)
205 :     val itow = Word.fromInt
206 :     val wtoi = Word.toInt
207 : george 761 fun toInt32 i = T.I.toInt32(32, i)
208 : george 545 val w32toi32 = Word32.toLargeIntX
209 :     val i32tow32 = Word32.fromLargeInt
210 : monnier 247
211 : george 545 (* One day, this is going to bite us when precision(LargeInt)>32 *)
212 :     fun wToInt32 w = Int32.fromLarge(Word32.toLargeIntX w)
213 : monnier 247
214 : george 545 (* some useful registers *)
215 :     val eax = I.Direct(C.eax)
216 :     val ecx = I.Direct(C.ecx)
217 :     val edx = I.Direct(C.edx)
218 : monnier 247
219 : leunga 775 fun immedLabel lab = I.ImmedLabel(T.LABEL lab)
220 : george 545
221 :     (* Is the expression zero? *)
222 : george 761 fun isZero(T.LI z) = T.I.isZero z
223 : george 545 | isZero(T.MARK(e,a)) = isZero e
224 :     | isZero _ = false
225 :     (* Does the expression set the zero bit?
226 :     * WARNING: we assume these things are not optimized out!
227 :     *)
228 :     fun setZeroBit(T.ANDB _) = true
229 :     | setZeroBit(T.ORB _) = true
230 :     | setZeroBit(T.XORB _) = true
231 :     | setZeroBit(T.SRA _) = true
232 :     | setZeroBit(T.SRL _) = true
233 :     | setZeroBit(T.SLL _) = true
234 : leunga 695 | setZeroBit(T.SUB _) = true
235 :     | setZeroBit(T.ADDT _) = true
236 :     | setZeroBit(T.SUBT _) = true
237 : george 545 | setZeroBit(T.MARK(e, _)) = setZeroBit e
238 :     | setZeroBit _ = false
239 : monnier 247
240 : leunga 695 fun setZeroBit2(T.ANDB _) = true
241 :     | setZeroBit2(T.ORB _) = true
242 :     | setZeroBit2(T.XORB _) = true
243 :     | setZeroBit2(T.SRA _) = true
244 :     | setZeroBit2(T.SRL _) = true
245 :     | setZeroBit2(T.SLL _) = true
246 :     | setZeroBit2(T.ADD(32, _, _)) = true (* can't use leal! *)
247 :     | setZeroBit2(T.SUB _) = true
248 :     | setZeroBit2(T.ADDT _) = true
249 :     | setZeroBit2(T.SUBT _) = true
250 :     | setZeroBit2(T.MARK(e, _)) = setZeroBit2 e
251 :     | setZeroBit2 _ = false
252 :    
253 : leunga 731 (* emit parallel copies for floating point
254 :     * Normal version.
255 :     *)
256 :     fun fcopy'(fty, [], [], _) = ()
257 :     | fcopy'(fty, dst as [_], src as [_], an) =
258 : george 1009 mark'(I.COPY{k=CB.FP, sz=fty, dst=dst,src=src,tmp=NONE}, an)
259 : leunga 731 | fcopy'(fty, dst, src, an) =
260 : george 1009 mark'(I.COPY{k=CB.FP, sz=fty, dst=dst,src=src,tmp=SOME(I.FDirect(newFreg()))}, an)
261 : monnier 247
262 : leunga 731 (* emit parallel copies for floating point.
263 :     * Fast version.
264 :     * Translates parallel copies that involve memregs into
265 :     * individual copies.
266 :     *)
267 :    
268 :     fun fcopy''(fty, [], [], _) = ()
269 :     | fcopy''(fty, dst, src, an) =
270 :     if true orelse isAnyFMemReg dst orelse isAnyFMemReg src then
271 :     let val fsize = fsize fty
272 : george 1003 fun mvInstr{dst, src} = [I.fmove{fsize=fsize, src=src, dst=dst}]
273 : leunga 731 in
274 :     emits (Shuffle.shuffle{mvInstr=mvInstr, ea=RealReg}
275 : leunga 744 {tmp=case dst of
276 : leunga 731 [_] => NONE
277 :     | _ => SOME(I.FPR(newReg())),
278 :     dst=dst, src=src})
279 :     end
280 :     else
281 : george 1009 mark'(I.COPY{k=CB.FP, sz=fty, dst=dst,
282 :     src=src,tmp=
283 : leunga 731 case dst of
284 :     [_] => NONE
285 :     | _ => SOME(I.FPR(newFreg()))}, an)
286 :    
287 :     fun fcopy x = if enableFastFPMode andalso !fast_floating_point
288 :     then fcopy'' x else fcopy' x
289 :    
290 : george 545 (* Translates MLTREE condition code to x86 condition code *)
291 :     fun cond T.LT = I.LT | cond T.LTU = I.B
292 :     | cond T.LE = I.LE | cond T.LEU = I.BE
293 :     | cond T.EQ = I.EQ | cond T.NE = I.NE
294 :     | cond T.GE = I.GE | cond T.GEU = I.AE
295 :     | cond T.GT = I.GT | cond T.GTU = I.A
296 : jhr 1119 | cond cc = error(concat["cond(", T.Basis.condToString cc, ")"])
297 : monnier 247
298 : leunga 815 fun zero dst = emit(I.BINARY{binOp=I.XORL, src=dst, dst=dst})
299 :    
300 : george 545 (* Move and annotate *)
301 :     fun move'(src as I.Direct s, dst as I.Direct d, an) =
302 : george 889 if CB.sameColor(s,d) then ()
303 : george 1009 else mark'(I.COPY{k=CB.GP, sz=32, dst=[d], src=[s], tmp=NONE}, an)
304 : leunga 815 | move'(I.Immed 0, dst as I.Direct d, an) =
305 :     mark(I.BINARY{binOp=I.XORL, src=dst, dst=dst}, an)
306 : george 545 | move'(src, dst, an) = mark(I.MOVE{mvOp=I.MOVL, src=src, dst=dst}, an)
307 : monnier 247
308 : george 545 (* Move only! *)
309 :     fun move(src, dst) = move'(src, dst, [])
310 : monnier 247
311 : george 545 val readonly = I.Region.readonly
312 : monnier 247
313 : george 545 (*
314 : george 761 * Compute an effective address.
315 : george 545 *)
316 : george 761 fun address(ea, mem) = let
317 : george 545 (* Keep building a bigger and bigger effective address expressions
318 :     * The input is a list of trees
319 :     * b -- base
320 :     * i -- index
321 :     * s -- scale
322 :     * d -- immed displacement
323 :     *)
324 :     fun doEA([], b, i, s, d) = makeAddressingMode(b, i, s, d)
325 :     | doEA(t::trees, b, i, s, d) =
326 :     (case t of
327 : george 761 T.LI n => doEAImmed(trees, toInt32 n, b, i, s, d)
328 : leunga 775 | T.CONST _ => doEALabel(trees, t, b, i, s, d)
329 :     | T.LABEL _ => doEALabel(trees, t, b, i, s, d)
330 :     | T.LABEXP le => doEALabel(trees, le, b, i, s, d)
331 : george 545 | T.ADD(32, t1, t2 as T.REG(_,r)) =>
332 :     if isMemReg r then doEA(t2::t1::trees, b, i, s, d)
333 :     else doEA(t1::t2::trees, b, i, s, d)
334 :     | T.ADD(32, t1, t2) => doEA(t1::t2::trees, b, i, s, d)
335 :     | T.SUB(32, t1, T.LI n) =>
336 : george 761 doEA(t1::T.LI(T.I.NEG(32,n))::trees, b, i, s, d)
337 :     | T.SLL(32, t1, T.LI n) => let
338 :     val n = T.I.toInt(32, n)
339 :     in
340 :     case n
341 :     of 0 => displace(trees, t1, b, i, s, d)
342 :     | 1 => indexed(trees, t1, t, 1, b, i, s, d)
343 :     | 2 => indexed(trees, t1, t, 2, b, i, s, d)
344 :     | 3 => indexed(trees, t1, t, 3, b, i, s, d)
345 :     | _ => displace(trees, t, b, i, s, d)
346 :     end
347 : george 545 | t => displace(trees, t, b, i, s, d)
348 :     )
349 : monnier 247
350 : george 545 (* Add an immed constant *)
351 :     and doEAImmed(trees, 0, b, i, s, d) = doEA(trees, b, i, s, d)
352 :     | doEAImmed(trees, n, b, i, s, I.Immed m) =
353 : george 761 doEA(trees, b, i, s, I.Immed(n+m))
354 : george 545 | doEAImmed(trees, n, b, i, s, I.ImmedLabel le) =
355 : leunga 775 doEA(trees, b, i, s,
356 :     I.ImmedLabel(T.ADD(32,le,T.LI(T.I.fromInt32(32, n)))))
357 : george 545 | doEAImmed(trees, n, b, i, s, _) = error "doEAImmed"
358 : monnier 247
359 : george 545 (* Add a label expression *)
360 :     and doEALabel(trees, le, b, i, s, I.Immed 0) =
361 :     doEA(trees, b, i, s, I.ImmedLabel le)
362 :     | doEALabel(trees, le, b, i, s, I.Immed m) =
363 :     doEA(trees, b, i, s,
364 : leunga 775 I.ImmedLabel(T.ADD(32,le,T.LI(T.I.fromInt32(32, m))))
365 : george 545 handle Overflow => error "doEALabel: constant too large")
366 :     | doEALabel(trees, le, b, i, s, I.ImmedLabel le') =
367 : leunga 775 doEA(trees, b, i, s, I.ImmedLabel(T.ADD(32,le,le')))
368 : george 545 | doEALabel(trees, le, b, i, s, _) = error "doEALabel"
369 : monnier 247
370 : george 545 and makeAddressingMode(NONE, NONE, _, disp) = disp
371 :     | makeAddressingMode(SOME base, NONE, _, disp) =
372 :     I.Displace{base=base, disp=disp, mem=mem}
373 :     | makeAddressingMode(base, SOME index, scale, disp) =
374 : george 761 I.Indexed{base=base, index=index, scale=scale,
375 : george 545 disp=disp, mem=mem}
376 : monnier 247
377 : george 545 (* generate code for tree and ensure that it is not in %esp *)
378 :     and exprNotEsp tree =
379 :     let val r = expr tree
380 : george 889 in if CB.sameColor(r, C.esp) then
381 : george 545 let val tmp = newReg()
382 :     in move(I.Direct r, I.Direct tmp); tmp end
383 :     else r
384 :     end
385 : monnier 247
386 : george 545 (* Add a base register *)
387 :     and displace(trees, t, NONE, i, s, d) = (* no base yet *)
388 :     doEA(trees, SOME(expr t), i, s, d)
389 :     | displace(trees, t, b as SOME base, NONE, _, d) = (* no index *)
390 :     (* make t the index, but make sure that it is not %esp! *)
391 :     let val i = expr t
392 : george 889 in if CB.sameColor(i, C.esp) then
393 : george 545 (* swap base and index *)
394 : george 889 if CB.sameColor(base, C.esp) then
395 : george 545 doEA(trees, SOME i, b, 0, d)
396 :     else (* base and index = %esp! *)
397 :     let val index = newReg()
398 :     in move(I.Direct i, I.Direct index);
399 :     doEA(trees, b, SOME index, 0, d)
400 :     end
401 :     else
402 :     doEA(trees, b, SOME i, 0, d)
403 :     end
404 :     | displace(trees, t, SOME base, i, s, d) = (* base and index *)
405 :     let val b = expr(T.ADD(32,T.REG(32,base),t))
406 :     in doEA(trees, SOME b, i, s, d) end
407 : monnier 247
408 : george 545 (* Add an indexed register *)
409 :     and indexed(trees, t, t0, scale, b, NONE, _, d) = (* no index yet *)
410 :     doEA(trees, b, SOME(exprNotEsp t), scale, d)
411 :     | indexed(trees, _, t0, _, NONE, i, s, d) = (* no base *)
412 :     doEA(trees, SOME(expr t0), i, s, d)
413 :     | indexed(trees, _, t0, _, SOME base, i, s, d) = (*base and index*)
414 :     let val b = expr(T.ADD(32, t0, T.REG(32, base)))
415 :     in doEA(trees, SOME b, i, s, d) end
416 :    
417 :     in case doEA([ea], NONE, NONE, 0, I.Immed 0) of
418 :     I.Immed _ => raise EA
419 :     | I.ImmedLabel le => I.LabelEA le
420 :     | ea => ea
421 :     end (* address *)
422 : monnier 247
423 : george 545 (* reduce an expression into an operand *)
424 : george 761 and operand(T.LI i) = I.Immed(toInt32(i))
425 : leunga 775 | operand(x as (T.CONST _ | T.LABEL _)) = I.ImmedLabel x
426 :     | operand(T.LABEXP le) = I.ImmedLabel le
427 : george 545 | operand(T.REG(_,r)) = IntReg r
428 :     | operand(T.LOAD(32,ea,mem)) = address(ea, mem)
429 :     | operand(t) = I.Direct(expr t)
430 : monnier 247
431 : george 545 and moveToReg(opnd) =
432 :     let val dst = I.Direct(newReg())
433 :     in move(opnd, dst); dst
434 :     end
435 : monnier 247
436 : george 545 and reduceOpnd(I.Direct r) = r
437 :     | reduceOpnd opnd =
438 :     let val dst = newReg()
439 :     in move(opnd, I.Direct dst); dst
440 :     end
441 : monnier 247
442 : george 545 (* ensure that the operand is either an immed or register *)
443 :     and immedOrReg(opnd as I.Displace _) = moveToReg opnd
444 :     | immedOrReg(opnd as I.Indexed _) = moveToReg opnd
445 :     | immedOrReg(opnd as I.MemReg _) = moveToReg opnd
446 :     | immedOrReg(opnd as I.LabelEA _) = moveToReg opnd
447 :     | immedOrReg opnd = opnd
448 : monnier 247
449 : george 545 and isImmediate(I.Immed _) = true
450 :     | isImmediate(I.ImmedLabel _) = true
451 :     | isImmediate _ = false
452 : monnier 247
453 : george 545 and regOrMem opnd = if isImmediate opnd then moveToReg opnd else opnd
454 :    
455 :     and isMemOpnd opnd =
456 :     (case opnd of
457 :     I.Displace _ => true
458 :     | I.Indexed _ => true
459 :     | I.MemReg _ => true
460 :     | I.LabelEA _ => true
461 : george 555 | I.FDirect f => true
462 : george 545 | _ => false
463 :     )
464 :    
465 :     (*
466 :     * Compute an integer expression and put the result in
467 :     * the destination register rd.
468 :     *)
469 : george 889 and doExpr(exp, rd : CB.cell, an) =
470 : george 545 let val rdOpnd = IntReg rd
471 : monnier 247
472 : george 889 fun equalRd(I.Direct r) = CB.sameColor(r, rd)
473 :     | equalRd(I.MemReg r) = CB.sameColor(r, rd)
474 : george 545 | equalRd _ = false
475 : monnier 247
476 : george 545 (* Emit a binary operator. If the destination is
477 :     * a memReg, do something smarter.
478 :     *)
479 :     fun genBinary(binOp, opnd1, opnd2) =
480 :     if isMemReg rd andalso
481 :     (isMemOpnd opnd1 orelse isMemOpnd opnd2) orelse
482 :     equalRd(opnd2)
483 :     then
484 :     let val tmpR = newReg()
485 :     val tmp = I.Direct tmpR
486 :     in move(opnd1, tmp);
487 :     mark(I.BINARY{binOp=binOp, src=opnd2, dst=tmp}, an);
488 :     move(tmp, rdOpnd)
489 :     end
490 :     else
491 :     (move(opnd1, rdOpnd);
492 :     mark(I.BINARY{binOp=binOp, src=opnd2, dst=rdOpnd}, an)
493 :     )
494 : monnier 247
495 : george 545 (* Generate a binary operator; it may commute *)
496 :     fun binaryComm(binOp, e1, e2) =
497 :     let val (opnd1, opnd2) =
498 :     case (operand e1, operand e2) of
499 :     (x as I.Immed _, y) => (y, x)
500 :     | (x as I.ImmedLabel _, y) => (y, x)
501 :     | (x, y as I.Direct _) => (y, x)
502 :     | (x, y) => (x, y)
503 :     in genBinary(binOp, opnd1, opnd2)
504 :     end
505 :    
506 :     (* Generate a binary operator; non-commutative *)
507 :     fun binary(binOp, e1, e2) =
508 :     genBinary(binOp, operand e1, operand e2)
509 :    
510 :     (* Generate a unary operator *)
511 :     fun unary(unOp, e) =
512 :     let val opnd = operand e
513 :     in if isMemReg rd andalso isMemOpnd opnd then
514 :     let val tmp = I.Direct(newReg())
515 :     in move(opnd, tmp); move(tmp, rdOpnd)
516 :     end
517 :     else move(opnd, rdOpnd);
518 :     mark(I.UNARY{unOp=unOp, opnd=rdOpnd}, an)
519 :     end
520 :    
521 :     (* Generate shifts; the shift
522 :     * amount must be a constant or in %ecx *)
523 :     fun shift(opcode, e1, e2) =
524 :     let val (opnd1, opnd2) = (operand e1, operand e2)
525 :     in case opnd2 of
526 :     I.Immed _ => genBinary(opcode, opnd1, opnd2)
527 :     | _ =>
528 :     if equalRd(opnd2) then
529 :     let val tmpR = newReg()
530 :     val tmp = I.Direct tmpR
531 :     in move(opnd1, tmp);
532 :     move(opnd2, ecx);
533 :     mark(I.BINARY{binOp=opcode, src=ecx, dst=tmp},an);
534 :     move(tmp, rdOpnd)
535 :     end
536 :     else
537 :     (move(opnd1, rdOpnd);
538 :     move(opnd2, ecx);
539 :     mark(I.BINARY{binOp=opcode, src=ecx, dst=rdOpnd},an)
540 :     )
541 :     end
542 :    
543 :     (* Division or remainder: divisor must be in %edx:%eax pair *)
544 :     fun divrem(signed, overflow, e1, e2, resultReg) =
545 :     let val (opnd1, opnd2) = (operand e1, operand e2)
546 :     val _ = move(opnd1, eax)
547 : leunga 815 val oper = if signed then (emit(I.CDQ); I.IDIVL1)
548 :     else (zero edx; I.DIVL1)
549 : george 545 in mark(I.MULTDIV{multDivOp=oper, src=regOrMem opnd2},an);
550 :     move(resultReg, rdOpnd);
551 :     if overflow then trap() else ()
552 :     end
553 :    
554 :     (* Optimize the special case for division *)
555 : george 761 fun divide(signed, overflow, e1, e2 as T.LI n') = let
556 :     val n = toInt32 n'
557 :     val w = T.I.toWord32(32, n')
558 :     fun isPowerOf2 w = W32.andb((w - 0w1), w) = 0w0
559 : george 545 fun log2 n = (* n must be > 0!!! *)
560 :     let fun loop(0w1,pow) = pow
561 : george 761 | loop(w,pow) = loop(W32.>>(w, 0w1),pow+1)
562 : george 545 in loop(n,0) end
563 :     in if n > 1 andalso isPowerOf2 w then
564 : george 761 let val pow = T.LI(T.I.fromInt(32,log2 w))
565 : george 545 in if signed then
566 :     (* signed; simulate round towards zero *)
567 : george 909 let val label = Label.anon()
568 : george 545 val reg1 = expr e1
569 :     val opnd1 = I.Direct reg1
570 :     in if setZeroBit e1 then ()
571 :     else emit(I.CMPL{lsrc=opnd1, rsrc=I.Immed 0});
572 :     emit(I.JCC{cond=I.GE, opnd=immedLabel label});
573 :     emit(if n = 2 then
574 :     I.UNARY{unOp=I.INCL, opnd=opnd1}
575 :     else
576 :     I.BINARY{binOp=I.ADDL,
577 : george 761 src=I.Immed(n - 1),
578 : george 545 dst=opnd1});
579 :     defineLabel label;
580 :     shift(I.SARL, T.REG(32, reg1), pow)
581 :     end
582 :     else (* unsigned *)
583 :     shift(I.SHRL, e1, pow)
584 :     end
585 :     else
586 :     (* note the only way we can overflow is if
587 :     * n = 0 or n = -1
588 :     *)
589 :     divrem(signed, overflow andalso (n = ~1 orelse n = 0),
590 :     e1, e2, eax)
591 :     end
592 :     | divide(signed, overflow, e1, e2) =
593 :     divrem(signed, overflow, e1, e2, eax)
594 : monnier 247
595 : george 545 fun rem(signed, overflow, e1, e2) =
596 :     divrem(signed, overflow, e1, e2, edx)
597 : leunga 815
598 :     (* Makes sure the destination must be a register *)
599 :     fun dstMustBeReg f =
600 :     if isMemReg rd then
601 :     let val tmpR = newReg()
602 :     val tmp = I.Direct(tmpR)
603 :     in f(tmpR, tmp); move(tmp, rdOpnd) end
604 :     else f(rd, rdOpnd)
605 :    
606 : george 545 (* unsigned integer multiplication *)
607 :     fun uMultiply(e1, e2) =
608 :     (* note e2 can never be (I.Direct edx) *)
609 :     (move(operand e1, eax);
610 : leunga 815 mark(I.MULTDIV{multDivOp=I.MULL1,
611 : george 545 src=regOrMem(operand e2)},an);
612 :     move(eax, rdOpnd)
613 :     )
614 :    
615 :     (* signed integer multiplication:
616 :     * The only forms that are allowed that also sets the
617 :     * OF and CF flags are:
618 :     *
619 : leunga 815 * (dst) (src1) (src2)
620 : george 545 * imul r32, r32/m32, imm8
621 : leunga 815 * (dst) (src)
622 : george 545 * imul r32, imm8
623 :     * imul r32, imm32
624 : leunga 815 * imul r32, r32/m32
625 :     * Note: destination must be a register!
626 : george 545 *)
627 :     fun multiply(e1, e2) =
628 : leunga 815 dstMustBeReg(fn (rd, rdOpnd) =>
629 :     let fun doit(i1 as I.Immed _, i2 as I.Immed _) =
630 :     (move(i1, rdOpnd);
631 :     mark(I.BINARY{binOp=I.IMULL, dst=rdOpnd, src=i2},an))
632 :     | doit(rm, i2 as I.Immed _) = doit(i2, rm)
633 :     | doit(imm as I.Immed(i), rm) =
634 :     mark(I.MUL3{dst=rd, src1=rm, src2=i},an)
635 :     | doit(r1 as I.Direct _, r2 as I.Direct _) =
636 :     (move(r1, rdOpnd);
637 :     mark(I.BINARY{binOp=I.IMULL, dst=rdOpnd, src=r2},an))
638 :     | doit(r1 as I.Direct _, rm) =
639 :     (move(r1, rdOpnd);
640 :     mark(I.BINARY{binOp=I.IMULL, dst=rdOpnd, src=rm},an))
641 :     | doit(rm, r as I.Direct _) = doit(r, rm)
642 :     | doit(rm1, rm2) =
643 : george 545 if equalRd rm2 then
644 :     let val tmpR = newReg()
645 :     val tmp = I.Direct tmpR
646 :     in move(rm1, tmp);
647 : leunga 815 mark(I.BINARY{binOp=I.IMULL, dst=tmp, src=rm2},an);
648 :     move(tmp, rdOpnd)
649 : george 545 end
650 :     else
651 : leunga 815 (move(rm1, rdOpnd);
652 :     mark(I.BINARY{binOp=I.IMULL, dst=rdOpnd, src=rm2},an)
653 : george 545 )
654 :     val (opnd1, opnd2) = (operand e1, operand e2)
655 : leunga 815 in doit(opnd1, opnd2)
656 : george 545 end
657 : leunga 815 )
658 : monnier 247
659 : george 545 (* Emit a load instruction; makes sure that the destination
660 :     * is a register
661 :     *)
662 :     fun genLoad(mvOp, ea, mem) =
663 :     dstMustBeReg(fn (_, dst) =>
664 :     mark(I.MOVE{mvOp=mvOp, src=address(ea, mem), dst=dst},an))
665 :    
666 :     (* Generate a zero extended loads *)
667 :     fun load8(ea, mem) = genLoad(I.MOVZBL, ea, mem)
668 :     fun load16(ea, mem) = genLoad(I.MOVZWL, ea, mem)
669 :     fun load8s(ea, mem) = genLoad(I.MOVSBL, ea, mem)
670 :     fun load16s(ea, mem) = genLoad(I.MOVSWL, ea, mem)
671 :     fun load32(ea, mem) = genLoad(I.MOVL, ea, mem)
672 :    
673 :     (* Generate a sign extended loads *)
674 :    
675 :     (* Generate setcc instruction:
676 :     * semantics: MV(rd, COND(_, T.CMP(ty, cc, t1, t2), yes, no))
677 : leunga 583 * Bug, if eax is either t1 or t2 then problem will occur!!!
678 :     * Note that we have to use eax as the destination of the
679 :     * setcc because it only works on the registers
680 :     * %al, %bl, %cl, %dl and %[abcd]h. The last four registers
681 :     * are inaccessible in 32 bit mode.
682 : george 545 *)
683 :     fun setcc(ty, cc, t1, t2, yes, no) =
684 : leunga 583 let val (cc, yes, no) =
685 :     if yes > no then (cc, yes, no)
686 :     else (T.Basis.negateCond cc, no, yes)
687 : george 545 in (* Clear the destination first.
688 :     * This this because stupid SETcc
689 :     * only writes to the low order
690 :     * byte. That's Intel architecture, folks.
691 :     *)
692 : leunga 695 case (yes, no, cc) of
693 :     (1, 0, T.LT) =>
694 :     let val tmp = I.Direct(expr(T.SUB(32,t1,t2)))
695 :     in move(tmp, rdOpnd);
696 :     emit(I.BINARY{binOp=I.SHRL,src=I.Immed 31,dst=rdOpnd})
697 :     end
698 :     | (1, 0, T.GT) =>
699 :     let val tmp = I.Direct(expr(T.SUB(32,t1,t2)))
700 :     in emit(I.UNARY{unOp=I.NOTL,opnd=tmp});
701 :     move(tmp, rdOpnd);
702 :     emit(I.BINARY{binOp=I.SHRL,src=I.Immed 31,dst=rdOpnd})
703 :     end
704 :     | (1, 0, _) => (* normal case *)
705 : george 545 let val cc = cmp(true, ty, cc, t1, t2, [])
706 : leunga 583 in mark(I.SET{cond=cond cc, opnd=eax}, an);
707 : leunga 695 emit(I.BINARY{binOp=I.ANDL,src=I.Immed 255, dst=eax});
708 : leunga 583 move(eax, rdOpnd)
709 :     end
710 : leunga 695 | (C1, C2, _) =>
711 : george 545 (* general case;
712 : leunga 583 * from the Intel optimization guide p3-5
713 :     *)
714 : leunga 695 let val _ = zero eax;
715 :     val cc = cmp(true, ty, cc, t1, t2, [])
716 : leunga 583 in case C1-C2 of
717 :     D as (1 | 2 | 3 | 4 | 5 | 8 | 9) =>
718 :     let val (base,scale) =
719 :     case D of
720 :     1 => (NONE, 0)
721 :     | 2 => (NONE, 1)
722 :     | 3 => (SOME C.eax, 1)
723 :     | 4 => (NONE, 2)
724 :     | 5 => (SOME C.eax, 2)
725 :     | 8 => (NONE, 3)
726 :     | 9 => (SOME C.eax, 3)
727 :     val addr = I.Indexed{base=base,
728 :     index=C.eax,
729 :     scale=scale,
730 :     disp=I.Immed C2,
731 : george 545 mem=readonly}
732 : leunga 583 val tmpR = newReg()
733 :     val tmp = I.Direct tmpR
734 :     in emit(I.SET{cond=cond cc, opnd=eax});
735 :     mark(I.LEA{r32=tmpR, addr=addr}, an);
736 :     move(tmp, rdOpnd)
737 :     end
738 :     | D =>
739 :     (emit(I.SET{cond=cond(T.Basis.negateCond cc),
740 :     opnd=eax});
741 :     emit(I.UNARY{unOp=I.DECL, opnd=eax});
742 :     emit(I.BINARY{binOp=I.ANDL,
743 :     src=I.Immed D, dst=eax});
744 :     if C2 = 0 then
745 :     move(eax, rdOpnd)
746 :     else
747 :     let val tmpR = newReg()
748 :     val tmp = I.Direct tmpR
749 :     in mark(I.LEA{addr=
750 :     I.Displace{
751 :     base=C.eax,
752 :     disp=I.Immed C2,
753 :     mem=readonly},
754 :     r32=tmpR}, an);
755 :     move(tmp, rdOpnd)
756 :     end
757 :     )
758 :     end
759 : george 545 end (* setcc *)
760 :    
761 :     (* Generate cmovcc instruction.
762 :     * on Pentium Pro and Pentium II only
763 :     *)
764 :     fun cmovcc(ty, cc, t1, t2, yes, no) =
765 :     let fun genCmov(dstR, _) =
766 :     let val _ = doExpr(no, dstR, []) (* false branch *)
767 :     val cc = cmp(true, ty, cc, t1, t2, []) (* compare *)
768 : leunga 1127 in mark(I.CMOV{cond=cond cc, src=regOrMem(operand yes),
769 :     dst=dstR}, an)
770 : george 545 end
771 :     in dstMustBeReg genCmov
772 :     end
773 :    
774 :     fun unknownExp exp = doExpr(Gen.compileRexp exp, rd, an)
775 : monnier 247
776 : leunga 606 (* Add n to rd *)
777 :     fun addN n =
778 :     let val n = operand n
779 :     val src = if isMemReg rd then immedOrReg n else n
780 :     in mark(I.BINARY{binOp=I.ADDL, src=src, dst=rdOpnd}, an) end
781 :    
782 : george 545 (* Generate addition *)
783 :     fun addition(e1, e2) =
784 : leunga 606 case e1 of
785 : george 889 T.REG(_,rs) => if CB.sameColor(rs,rd) then addN e2
786 : leunga 744 else addition1(e1,e2)
787 : leunga 606 | _ => addition1(e1,e2)
788 :     and addition1(e1, e2) =
789 :     case e2 of
790 : george 889 T.REG(_,rs) => if CB.sameColor(rs,rd) then addN e1
791 : leunga 744 else addition2(e1,e2)
792 : leunga 606 | _ => addition2(e1,e2)
793 :     and addition2(e1,e2) =
794 : george 545 (dstMustBeReg(fn (dstR, _) =>
795 :     mark(I.LEA{r32=dstR, addr=address(exp, readonly)}, an))
796 :     handle EA => binaryComm(I.ADDL, e1, e2))
797 : monnier 247
798 :    
799 : george 545 in case exp of
800 :     T.REG(_,rs) =>
801 :     if isMemReg rs andalso isMemReg rd then
802 :     let val tmp = I.Direct(newReg())
803 : leunga 731 in move'(I.MemReg rs, tmp, an);
804 : george 545 move'(tmp, rdOpnd, [])
805 :     end
806 :     else move'(IntReg rs, rdOpnd, an)
807 : george 761 | T.LI z => let
808 :     val n = toInt32 z
809 :     in
810 :     if n=0 then
811 :     (* As per Fermin's request, special optimization for rd := 0.
812 :     * Currently we don't bother with the size.
813 :     *)
814 :     if isMemReg rd then move'(I.Immed 0, rdOpnd, an)
815 :     else mark(I.BINARY{binOp=I.XORL, src=rdOpnd, dst=rdOpnd}, an)
816 :     else
817 :     move'(I.Immed(n), rdOpnd, an)
818 :     end
819 : leunga 775 | (T.CONST _ | T.LABEL _) =>
820 :     move'(I.ImmedLabel exp, rdOpnd, an)
821 :     | T.LABEXP le => move'(I.ImmedLabel le, rdOpnd, an)
822 : monnier 247
823 : george 545 (* 32-bit addition *)
824 : george 761 | T.ADD(32, e1, e2 as T.LI n) => let
825 :     val n = toInt32 n
826 :     in
827 :     case n
828 :     of 1 => unary(I.INCL, e1)
829 :     | ~1 => unary(I.DECL, e1)
830 :     | _ => addition(e1, e2)
831 :     end
832 :     | T.ADD(32, e1 as T.LI n, e2) => let
833 :     val n = toInt32 n
834 :     in
835 :     case n
836 :     of 1 => unary(I.INCL, e2)
837 :     | ~1 => unary(I.DECL, e2)
838 :     | _ => addition(e1, e2)
839 :     end
840 : george 545 | T.ADD(32, e1, e2) => addition(e1, e2)
841 : monnier 247
842 : leunga 695 (* 32-bit addition but set the flag!
843 :     * This is a stupid hack for now.
844 :     *)
845 : george 761 | T.ADD(0, e, e1 as T.LI n) => let
846 :     val n = T.I.toInt(32, n)
847 :     in
848 :     if n=1 then unary(I.INCL, e)
849 :     else if n = ~1 then unary(I.DECL, e)
850 :     else binaryComm(I.ADDL, e, e1)
851 :     end
852 :     | T.ADD(0, e1 as T.LI n, e) => let
853 :     val n = T.I.toInt(32, n)
854 :     in
855 :     if n=1 then unary(I.INCL, e)
856 :     else if n = ~1 then unary(I.DECL, e)
857 :     else binaryComm(I.ADDL, e1, e)
858 :     end
859 :     | T.ADD(0, e1, e2) => binaryComm(I.ADDL, e1, e2)
860 :    
861 : george 545 (* 32-bit subtraction *)
862 : george 761 | T.SUB(32, e1, e2 as T.LI n) => let
863 :     val n = toInt32 n
864 :     in
865 :     case n
866 :     of 0 => doExpr(e1, rd, an)
867 :     | 1 => unary(I.DECL, e1)
868 :     | ~1 => unary(I.INCL, e1)
869 :     | _ => binary(I.SUBL, e1, e2)
870 :     end
871 :     | T.SUB(32, e1 as T.LI n, e2) =>
872 :     if T.I.isZero n then unary(I.NEGL, e2)
873 :     else binary(I.SUBL, e1, e2)
874 : george 545 | T.SUB(32, e1, e2) => binary(I.SUBL, e1, e2)
875 : monnier 247
876 : george 545 | T.MULU(32, x, y) => uMultiply(x, y)
877 :     | T.DIVU(32, x, y) => divide(false, false, x, y)
878 :     | T.REMU(32, x, y) => rem(false, false, x, y)
879 : monnier 247
880 : george 545 | T.MULS(32, x, y) => multiply(x, y)
881 :     | T.DIVS(32, x, y) => divide(true, false, x, y)
882 :     | T.REMS(32, x, y) => rem(true, false, x, y)
883 : monnier 247
884 : george 545 | T.ADDT(32, x, y) => (binaryComm(I.ADDL, x, y); trap())
885 :     | T.SUBT(32, x, y) => (binary(I.SUBL, x, y); trap())
886 :     | T.MULT(32, x, y) => (multiply(x, y); trap())
887 :     | T.DIVT(32, x, y) => divide(true, true, x, y)
888 :     | T.REMT(32, x, y) => rem(true, true, x, y)
889 : monnier 247
890 : george 545 | T.ANDB(32, x, y) => binaryComm(I.ANDL, x, y)
891 :     | T.ORB(32, x, y) => binaryComm(I.ORL, x, y)
892 :     | T.XORB(32, x, y) => binaryComm(I.XORL, x, y)
893 :     | T.NOTB(32, x) => unary(I.NOTL, x)
894 : monnier 247
895 : george 545 | T.SRA(32, x, y) => shift(I.SARL, x, y)
896 :     | T.SRL(32, x, y) => shift(I.SHRL, x, y)
897 :     | T.SLL(32, x, y) => shift(I.SHLL, x, y)
898 : monnier 247
899 : george 545 | T.LOAD(8, ea, mem) => load8(ea, mem)
900 :     | T.LOAD(16, ea, mem) => load16(ea, mem)
901 :     | T.LOAD(32, ea, mem) => load32(ea, mem)
902 : monnier 498
903 : leunga 776 | T.SX(32,8,T.LOAD(8,ea,mem)) => load8s(ea, mem)
904 :     | T.SX(32,16,T.LOAD(16,ea,mem)) => load16s(ea, mem)
905 :     | T.ZX(32,8,T.LOAD(8,ea,mem)) => load8(ea, mem)
906 : leunga 779 | T.ZX(32,16,T.LOAD(16,ea,mem)) => load16(ea, mem)
907 : leunga 776
908 : leunga 1127 | T.COND(32, T.CMP(ty, cc, t1, t2), y as T.LI yes, n as T.LI no) =>
909 :     (case !arch of (* PentiumPro and higher has CMOVcc *)
910 :     Pentium => setcc(ty, cc, t1, t2, toInt32 yes, toInt32 no)
911 :     | _ => cmovcc(ty, cc, t1, t2, y, n)
912 :     )
913 : george 545 | T.COND(32, T.CMP(ty, cc, t1, t2), yes, no) =>
914 :     (case !arch of (* PentiumPro and higher has CMOVcc *)
915 :     Pentium => unknownExp exp
916 :     | _ => cmovcc(ty, cc, t1, t2, yes, no)
917 :     )
918 :     | T.LET(s,e) => (doStmt s; doExpr(e, rd, an))
919 :     | T.MARK(e, A.MARKREG f) => (f rd; doExpr(e, rd, an))
920 :     | T.MARK(e, a) => doExpr(e, rd, a::an)
921 :     | T.PRED(e,c) => doExpr(e, rd, A.CTRLUSE c::an)
922 : george 555 | T.REXT e =>
923 :     ExtensionComp.compileRext (reducer()) {e=e, rd=rd, an=an}
924 : george 545 (* simplify and try again *)
925 :     | exp => unknownExp exp
926 :     end (* doExpr *)
927 : monnier 247
928 : george 545 (* generate an expression and return its result register
929 :     * If rewritePseudo is on, the result is guaranteed to be in a
930 :     * non memReg register
931 :     *)
932 :     and expr(exp as T.REG(_, rd)) =
933 :     if isMemReg rd then genExpr exp else rd
934 :     | expr exp = genExpr exp
935 : monnier 247
936 : george 545 and genExpr exp =
937 :     let val rd = newReg() in doExpr(exp, rd, []); rd end
938 : monnier 247
939 : george 545 (* Compare an expression with zero.
940 :     * On the x86, TEST is superior to AND for doing the same thing,
941 :     * since it doesn't need to write out the result in a register.
942 :     *)
943 : leunga 695 and cmpWithZero(cc as (T.EQ | T.NE), e as T.ANDB(ty, a, b), an) =
944 : george 545 (case ty of
945 : leunga 695 8 => test(I.TESTB, a, b, an)
946 :     | 16 => test(I.TESTW, a, b, an)
947 :     | 32 => test(I.TESTL, a, b, an)
948 :     | _ => doExpr(e, newReg(), an);
949 :     cc)
950 :     | cmpWithZero(cc, e, an) =
951 :     let val e =
952 :     case e of (* hack to disable the lea optimization XXX *)
953 :     T.ADD(_, a, b) => T.ADD(0, a, b)
954 :     | e => e
955 :     in doExpr(e, newReg(), an); cc end
956 : monnier 247
957 : george 545 (* Emit a test.
958 :     * The available modes are
959 :     * r/m, r
960 :     * r/m, imm
961 :     * On selecting the right instruction: TESTL/TESTW/TESTB.
962 :     * When anding an operand with a constant
963 :     * that fits within 8 (or 16) bits, it is possible to use TESTB,
964 :     * (or TESTW) instead of TESTL. Because x86 is little endian,
965 :     * this works for memory operands too. However, with TESTB, it is
966 :     * not possible to use registers other than
967 :     * AL, CL, BL, DL, and AH, CH, BH, DH. So, the best way is to
968 :     * perform register allocation first, and if the operand registers
969 :     * are one of EAX, ECX, EBX, or EDX, replace the TESTL instruction
970 :     * by TESTB.
971 :     *)
972 : leunga 695 and test(testopcode, a, b, an) =
973 : george 545 let val (_, opnd1, opnd2) = commuteComparison(T.EQ, true, a, b)
974 :     (* translate r, r/m => r/m, r *)
975 :     val (opnd1, opnd2) =
976 :     if isMemOpnd opnd2 then (opnd2, opnd1) else (opnd1, opnd2)
977 : leunga 695 in mark(testopcode{lsrc=opnd1, rsrc=opnd2}, an)
978 : george 545 end
979 : monnier 247
980 : leunga 815 (* %eflags <- src *)
981 :     and moveToEflags src =
982 : george 889 if CB.sameColor(src, C.eflags) then ()
983 : leunga 815 else (move(I.Direct src, eax); emit(I.LAHF))
984 :    
985 :     (* dst <- %eflags *)
986 :     and moveFromEflags dst =
987 : george 889 if CB.sameColor(dst, C.eflags) then ()
988 : leunga 815 else (emit(I.SAHF); move(eax, I.Direct dst))
989 :    
990 : george 545 (* generate a condition code expression
991 : leunga 744 * The zero is for setting the condition code!
992 :     * I have no idea why this is used.
993 :     *)
994 :     and doCCexpr(T.CMP(ty, cc, t1, t2), rd, an) =
995 : leunga 815 (cmp(false, ty, cc, t1, t2, an);
996 :     moveFromEflags rd
997 :     )
998 :     | doCCexpr(T.CC(cond,rs), rd, an) =
999 : george 889 if CB.sameColor(rs,C.eflags) orelse CB.sameColor(rd,C.eflags) then
1000 : leunga 815 (moveToEflags rs; moveFromEflags rd)
1001 : leunga 744 else
1002 : leunga 815 move'(I.Direct rs, I.Direct rd, an)
1003 : george 545 | doCCexpr(T.CCMARK(e,A.MARKREG f),rd,an) = (f rd; doCCexpr(e,rd,an))
1004 :     | doCCexpr(T.CCMARK(e,a), rd, an) = doCCexpr(e,rd,a::an)
1005 :     | doCCexpr(T.CCEXT e, cd, an) =
1006 : george 555 ExtensionComp.compileCCext (reducer()) {e=e, ccd=cd, an=an}
1007 : george 545 | doCCexpr _ = error "doCCexpr"
1008 : monnier 247
1009 : george 545 and ccExpr e = error "ccExpr"
1010 : monnier 247
1011 : george 545 (* generate a comparison and sets the condition code;
1012 :     * return the actual cc used. If the flag swapable is true,
1013 :     * we can also reorder the operands.
1014 :     *)
1015 :     and cmp(swapable, ty, cc, t1, t2, an) =
1016 : leunga 695 (* == and <> can be always be reordered *)
1017 :     let val swapable = swapable orelse cc = T.EQ orelse cc = T.NE
1018 :     in (* Sometimes the comparison is not necessary because
1019 :     * the bits are already set!
1020 :     *)
1021 :     if isZero t1 andalso setZeroBit2 t2 then
1022 :     if swapable then
1023 :     cmpWithZero(T.Basis.swapCond cc, t2, an)
1024 :     else (* can't reorder the comparison! *)
1025 :     genCmp(ty, false, cc, t1, t2, an)
1026 :     else if isZero t2 andalso setZeroBit2 t1 then
1027 :     cmpWithZero(cc, t1, an)
1028 :     else genCmp(ty, swapable, cc, t1, t2, an)
1029 :     end
1030 : monnier 247
1031 : george 545 (* Give a and b which are the operands to a comparison (or test)
1032 :     * Return the appropriate condition code and operands.
1033 :     * The available modes are:
1034 :     * r/m, imm
1035 :     * r/m, r
1036 :     * r, r/m
1037 :     *)
1038 :     and commuteComparison(cc, swapable, a, b) =
1039 :     let val (opnd1, opnd2) = (operand a, operand b)
1040 :     in (* Try to fold in the operands whenever possible *)
1041 :     case (isImmediate opnd1, isImmediate opnd2) of
1042 :     (true, true) => (cc, moveToReg opnd1, opnd2)
1043 :     | (true, false) =>
1044 :     if swapable then (T.Basis.swapCond cc, opnd2, opnd1)
1045 :     else (cc, moveToReg opnd1, opnd2)
1046 :     | (false, true) => (cc, opnd1, opnd2)
1047 :     | (false, false) =>
1048 :     (case (opnd1, opnd2) of
1049 :     (_, I.Direct _) => (cc, opnd1, opnd2)
1050 :     | (I.Direct _, _) => (cc, opnd1, opnd2)
1051 :     | (_, _) => (cc, moveToReg opnd1, opnd2)
1052 :     )
1053 :     end
1054 :    
1055 :     (* generate a real comparison; return the real cc used *)
1056 :     and genCmp(ty, swapable, cc, a, b, an) =
1057 :     let val (cc, opnd1, opnd2) = commuteComparison(cc, swapable, a, b)
1058 :     in mark(I.CMPL{lsrc=opnd1, rsrc=opnd2}, an); cc
1059 :     end
1060 : monnier 247
1061 : george 545 (* generate code for jumps *)
1062 : leunga 775 and jmp(lexp as T.LABEL lab, labs, an) =
1063 : george 545 mark(I.JMP(I.ImmedLabel lexp, [lab]), an)
1064 : leunga 775 | jmp(T.LABEXP le, labs, an) = mark(I.JMP(I.ImmedLabel le, labs), an)
1065 :     | jmp(ea, labs, an) = mark(I.JMP(operand ea, labs), an)
1066 : george 545
1067 :     (* convert mlrisc to cellset:
1068 :     *)
1069 :     and cellset mlrisc =
1070 : jhr 900 let val addCCReg = CB.CellSet.add
1071 : george 545 fun g([],acc) = acc
1072 :     | g(T.GPR(T.REG(_,r))::regs,acc) = g(regs,C.addReg(r,acc))
1073 :     | g(T.FPR(T.FREG(_,f))::regs,acc) = g(regs,C.addFreg(f,acc))
1074 :     | g(T.CCR(T.CC(_,cc))::regs,acc) = g(regs,addCCReg(cc,acc))
1075 :     | g(T.CCR(T.FCC(_,cc))::regs,acc) = g(regs,addCCReg(cc,acc))
1076 :     | g(_::regs, acc) = g(regs, acc)
1077 :     in g(mlrisc, C.empty) end
1078 :    
1079 :     (* generate code for calls *)
1080 : blume 839 and call(ea, flow, def, use, mem, cutsTo, an, pops) =
1081 : leunga 815 let fun return(set, []) = set
1082 :     | return(set, a::an) =
1083 :     case #peek A.RETURN_ARG a of
1084 : jhr 900 SOME r => return(CB.CellSet.add(r, set), an)
1085 : leunga 815 | NONE => return(set, an)
1086 : blume 839 in
1087 :     mark(I.CALL{opnd=operand ea,defs=cellset(def),uses=cellset(use),
1088 :     return=return(C.empty,an),cutsTo=cutsTo,mem=mem,
1089 :     pops=pops},an)
1090 : leunga 815 end
1091 : george 545
1092 : leunga 815 (* generate code for integer stores; first move data to %eax
1093 :     * This is mainly because we can't allocate to registers like
1094 :     * ah, dl, dx etc.
1095 :     *)
1096 :     and genStore(mvOp, ea, d, mem, an) =
1097 :     let val src =
1098 : george 545 case immedOrReg(operand d) of
1099 :     src as I.Direct r =>
1100 : george 889 if CB.sameColor(r,C.eax)
1101 : leunga 744 then src else (move(src, eax); eax)
1102 : george 545 | src => src
1103 : leunga 815 in mark(I.MOVE{mvOp=mvOp, src=src, dst=address(ea,mem)},an)
1104 : george 545 end
1105 : leunga 815
1106 :     (* generate code for 8-bit integer stores *)
1107 :     (* movb has to use %eax as source. Stupid x86! *)
1108 :     and store8(ea, d, mem, an) = genStore(I.MOVB, ea, d, mem, an)
1109 : blume 818 and store16(ea, d, mem, an) =
1110 :     mark(I.MOVE{mvOp=I.MOVW, src=immedOrReg(operand d), dst=address(ea, mem)}, an)
1111 : george 545 and store32(ea, d, mem, an) =
1112 :     move'(immedOrReg(operand d), address(ea, mem), an)
1113 :    
1114 :     (* generate code for branching *)
1115 :     and branch(T.CMP(ty, cc, t1, t2), lab, an) =
1116 :     (* allow reordering of operands *)
1117 :     let val cc = cmp(true, ty, cc, t1, t2, [])
1118 :     in mark(I.JCC{cond=cond cc, opnd=immedLabel lab}, an) end
1119 :     | branch(T.FCMP(fty, fcc, t1, t2), lab, an) =
1120 :     fbranch(fty, fcc, t1, t2, lab, an)
1121 :     | branch(ccexp, lab, an) =
1122 : leunga 744 (doCCexpr(ccexp, C.eflags, []);
1123 : george 545 mark(I.JCC{cond=cond(Gen.condOf ccexp), opnd=immedLabel lab}, an)
1124 :     )
1125 :    
1126 :     (* generate code for floating point compare and branch *)
1127 :     and fbranch(fty, fcc, t1, t2, lab, an) =
1128 : leunga 731 let fun ignoreOrder (T.FREG _) = true
1129 :     | ignoreOrder (T.FLOAD _) = true
1130 :     | ignoreOrder (T.FMARK(e,_)) = ignoreOrder e
1131 :     | ignoreOrder _ = false
1132 :    
1133 :     fun compare'() = (* Sethi-Ullman style *)
1134 :     (if ignoreOrder t1 orelse ignoreOrder t2 then
1135 :     (reduceFexp(fty, t2, []); reduceFexp(fty, t1, []))
1136 :     else (reduceFexp(fty, t1, []); reduceFexp(fty, t2, []);
1137 :     emit(I.FXCH{opnd=C.ST(1)}));
1138 :     emit(I.FUCOMPP);
1139 :     fcc
1140 :     )
1141 :    
1142 :     fun compare''() =
1143 :     (* direct style *)
1144 :     (* Try to make lsrc the memory operand *)
1145 :     let val lsrc = foperand(fty, t1)
1146 :     val rsrc = foperand(fty, t2)
1147 :     val fsize = fsize fty
1148 :     fun cmp(lsrc, rsrc, fcc) =
1149 :     (emit(I.FCMP{fsize=fsize,lsrc=lsrc,rsrc=rsrc}); fcc)
1150 :     in case (lsrc, rsrc) of
1151 :     (I.FPR _, I.FPR _) => cmp(lsrc, rsrc, fcc)
1152 :     | (I.FPR _, mem) => cmp(mem,lsrc,T.Basis.swapFcond fcc)
1153 :     | (mem, I.FPR _) => cmp(lsrc, rsrc, fcc)
1154 :     | (lsrc, rsrc) => (* can't be both memory! *)
1155 :     let val ftmpR = newFreg()
1156 :     val ftmp = I.FPR ftmpR
1157 :     in emit(I.FMOVE{fsize=fsize,src=rsrc,dst=ftmp});
1158 :     cmp(lsrc, ftmp, fcc)
1159 :     end
1160 :     end
1161 :    
1162 :     fun compare() =
1163 :     if enableFastFPMode andalso !fast_floating_point
1164 :     then compare''() else compare'()
1165 :    
1166 : george 545 fun andil i = emit(I.BINARY{binOp=I.ANDL,src=I.Immed(i),dst=eax})
1167 : leunga 585 fun testil i = emit(I.TESTL{lsrc=eax,rsrc=I.Immed(i)})
1168 : george 545 fun xoril i = emit(I.BINARY{binOp=I.XORL,src=I.Immed(i),dst=eax})
1169 :     fun cmpil i = emit(I.CMPL{rsrc=I.Immed(i), lsrc=eax})
1170 :     fun j(cc, lab) = mark(I.JCC{cond=cc, opnd=immedLabel lab},an)
1171 :     fun sahf() = emit(I.SAHF)
1172 : leunga 731 fun branch(fcc) =
1173 : george 545 case fcc
1174 :     of T.== => (andil 0x4400; xoril 0x4000; j(I.EQ, lab))
1175 :     | T.?<> => (andil 0x4400; xoril 0x4000; j(I.NE, lab))
1176 :     | T.? => (sahf(); j(I.P,lab))
1177 :     | T.<=> => (sahf(); j(I.NP,lab))
1178 : leunga 585 | T.> => (testil 0x4500; j(I.EQ,lab))
1179 :     | T.?<= => (testil 0x4500; j(I.NE,lab))
1180 :     | T.>= => (testil 0x500; j(I.EQ,lab))
1181 :     | T.?< => (testil 0x500; j(I.NE,lab))
1182 : george 545 | T.< => (andil 0x4500; cmpil 0x100; j(I.EQ,lab))
1183 :     | T.?>= => (andil 0x4500; cmpil 0x100; j(I.NE,lab))
1184 :     | T.<= => (andil 0x4100; cmpil 0x100; j(I.EQ,lab);
1185 :     cmpil 0x4000; j(I.EQ,lab))
1186 : leunga 585 | T.?> => (sahf(); j(I.P,lab); testil 0x4100; j(I.EQ,lab))
1187 :     | T.<> => (testil 0x4400; j(I.EQ,lab))
1188 :     | T.?= => (testil 0x4400; j(I.NE,lab))
1189 : jhr 1119 | _ => error(concat[
1190 :     "fbranch(", T.Basis.fcondToString fcc, ")"
1191 :     ])
1192 : george 545 (*esac*)
1193 : leunga 731 val fcc = compare()
1194 :     in emit I.FNSTSW;
1195 :     branch(fcc)
1196 : monnier 411 end
1197 : monnier 247
1198 : leunga 731 (*========================================================
1199 :     * Floating point code generation starts here.
1200 :     * Some generic fp routines first.
1201 :     *========================================================*)
1202 :    
1203 :     (* Can this tree be folded into the src operand of a floating point
1204 :     * operations?
1205 :     *)
1206 :     and foldableFexp(T.FREG _) = true
1207 :     | foldableFexp(T.FLOAD _) = true
1208 :     | foldableFexp(T.CVTI2F(_, (16 | 32), _)) = true
1209 :     | foldableFexp(T.CVTF2F(_, _, t)) = foldableFexp t
1210 :     | foldableFexp(T.FMARK(t, _)) = foldableFexp t
1211 :     | foldableFexp _ = false
1212 :    
1213 :     (* Move integer e of size ty into a memory location.
1214 :     * Returns a quadruple:
1215 :     * (INTEGER,return ty,effect address of memory location,cleanup code)
1216 :     *)
1217 :     and convertIntToFloat(ty, e) =
1218 :     let val opnd = operand e
1219 :     in if isMemOpnd opnd andalso (ty = 16 orelse ty = 32)
1220 :     then (INTEGER, ty, opnd, [])
1221 :     else
1222 : leunga 815 let val {instrs, tempMem, cleanup} =
1223 :     cvti2f{ty=ty, src=opnd, an=getAnnotations()}
1224 : leunga 731 in emits instrs;
1225 :     (INTEGER, 32, tempMem, cleanup)
1226 :     end
1227 :     end
1228 :    
1229 :     (*========================================================
1230 :     * Sethi-Ullman based floating point code generation as
1231 :     * implemented by Lal
1232 :     *========================================================*)
1233 :    
1234 : george 545 and fld(32, opnd) = I.FLDS opnd
1235 :     | fld(64, opnd) = I.FLDL opnd
1236 : george 555 | fld(80, opnd) = I.FLDT opnd
1237 : george 545 | fld _ = error "fld"
1238 :    
1239 : leunga 565 and fild(16, opnd) = I.FILD opnd
1240 :     | fild(32, opnd) = I.FILDL opnd
1241 :     | fild(64, opnd) = I.FILDLL opnd
1242 :     | fild _ = error "fild"
1243 :    
1244 :     and fxld(INTEGER, ty, opnd) = fild(ty, opnd)
1245 :     | fxld(REAL, fty, opnd) = fld(fty, opnd)
1246 :    
1247 : george 545 and fstp(32, opnd) = I.FSTPS opnd
1248 :     | fstp(64, opnd) = I.FSTPL opnd
1249 : george 555 | fstp(80, opnd) = I.FSTPT opnd
1250 : george 545 | fstp _ = error "fstp"
1251 :    
1252 :     (* generate code for floating point stores *)
1253 : leunga 731 and fstore'(fty, ea, d, mem, an) =
1254 : george 545 (case d of
1255 :     T.FREG(fty, fs) => emit(fld(fty, I.FDirect fs))
1256 :     | _ => reduceFexp(fty, d, []);
1257 :     mark(fstp(fty, address(ea, mem)), an)
1258 :     )
1259 :    
1260 : leunga 731 (* generate code for floating point loads *)
1261 :     and fload'(fty, ea, mem, fd, an) =
1262 :     let val ea = address(ea, mem)
1263 :     in mark(fld(fty, ea), an);
1264 : george 889 if CB.sameColor(fd,ST0) then ()
1265 : leunga 744 else emit(fstp(fty, I.FDirect fd))
1266 : leunga 731 end
1267 :    
1268 :     and fexpr' e = (reduceFexp(64, e, []); C.ST(0))
1269 : george 545
1270 :     (* generate floating point expression and put the result in fd *)
1271 : leunga 731 and doFexpr'(fty, T.FREG(_, fs), fd, an) =
1272 : george 889 (if CB.sameColor(fs,fd) then ()
1273 : george 1009 else mark'(I.COPY{k=CB.FP, sz=64, dst=[fd], src=[fs], tmp=NONE}, an)
1274 : george 545 )
1275 : leunga 731 | doFexpr'(_, T.FLOAD(fty, ea, mem), fd, an) =
1276 :     fload'(fty, ea, mem, fd, an)
1277 :     | doFexpr'(fty, T.FEXT fexp, fd, an) =
1278 :     (ExtensionComp.compileFext (reducer()) {e=fexp, fd=fd, an=an};
1279 : george 889 if CB.sameColor(fd,ST0) then () else emit(fstp(fty, I.FDirect fd))
1280 : leunga 731 )
1281 :     | doFexpr'(fty, e, fd, an) =
1282 : george 545 (reduceFexp(fty, e, []);
1283 : george 889 if CB.sameColor(fd,ST0) then ()
1284 : leunga 744 else mark(fstp(fty, I.FDirect fd), an)
1285 : george 545 )
1286 :    
1287 :     (*
1288 :     * Generate floating point expression using Sethi-Ullman's scheme:
1289 :     * This function evaluates a floating point expression,
1290 :     * and put result in %ST(0).
1291 :     *)
1292 :     and reduceFexp(fty, fexp, an) =
1293 : george 555 let val ST = I.ST(C.ST 0)
1294 :     val ST1 = I.ST(C.ST 1)
1295 : leunga 593 val cleanupCode = ref [] : I.instruction list ref
1296 : george 545
1297 : leunga 565 datatype su_tree =
1298 :     LEAF of int * T.fexp * ans
1299 :     | BINARY of int * T.fty * fbinop * su_tree * su_tree * ans
1300 :     | UNARY of int * T.fty * I.funOp * su_tree * ans
1301 :     and fbinop = FADD | FSUB | FMUL | FDIV
1302 :     | FIADD | FISUB | FIMUL | FIDIV
1303 :     withtype ans = Annotations.annotations
1304 : monnier 247
1305 : leunga 565 fun label(LEAF(n, _, _)) = n
1306 :     | label(BINARY(n, _, _, _, _, _)) = n
1307 :     | label(UNARY(n, _, _, _, _)) = n
1308 : george 545
1309 : leunga 565 fun annotate(LEAF(n, x, an), a) = LEAF(n,x,a::an)
1310 :     | annotate(BINARY(n,t,b,x,y,an), a) = BINARY(n,t,b,x,y,a::an)
1311 :     | annotate(UNARY(n,t,u,x,an), a) = UNARY(n,t,u,x,a::an)
1312 : george 545
1313 : leunga 565 (* Generate expression tree with sethi-ullman numbers *)
1314 :     fun su(e as T.FREG _) = LEAF(1, e, [])
1315 :     | su(e as T.FLOAD _) = LEAF(1, e, [])
1316 :     | su(e as T.CVTI2F _) = LEAF(1, e, [])
1317 :     | su(T.CVTF2F(_, _, t)) = su t
1318 :     | su(T.FMARK(t, a)) = annotate(su t, a)
1319 :     | su(T.FABS(fty, t)) = suUnary(fty, I.FABS, t)
1320 :     | su(T.FNEG(fty, t)) = suUnary(fty, I.FCHS, t)
1321 :     | su(T.FSQRT(fty, t)) = suUnary(fty, I.FSQRT, t)
1322 :     | su(T.FADD(fty, t1, t2)) = suComBinary(fty,FADD,FIADD,t1,t2)
1323 :     | su(T.FMUL(fty, t1, t2)) = suComBinary(fty,FMUL,FIMUL,t1,t2)
1324 :     | su(T.FSUB(fty, t1, t2)) = suBinary(fty,FSUB,FISUB,t1,t2)
1325 :     | su(T.FDIV(fty, t1, t2)) = suBinary(fty,FDIV,FIDIV,t1,t2)
1326 :     | su _ = error "su"
1327 :    
1328 :     (* Try to fold the the memory operand or integer conversion *)
1329 :     and suFold(e as T.FREG _) = (LEAF(0, e, []), false)
1330 :     | suFold(e as T.FLOAD _) = (LEAF(0, e, []), false)
1331 :     | suFold(e as T.CVTI2F(_,(16 | 32),_)) = (LEAF(0, e, []), true)
1332 :     | suFold(T.CVTF2F(_, _, t)) = suFold t
1333 :     | suFold(T.FMARK(t, a)) =
1334 :     let val (t, integer) = suFold t
1335 :     in (annotate(t, a), integer) end
1336 :     | suFold e = (su e, false)
1337 :    
1338 :     (* Form unary tree *)
1339 :     and suUnary(fty, funary, t) =
1340 :     let val t = su t
1341 :     in UNARY(label t, fty, funary, t, [])
1342 : george 545 end
1343 : leunga 565
1344 :     (* Form binary tree *)
1345 :     and suBinary(fty, binop, ibinop, t1, t2) =
1346 :     let val t1 = su t1
1347 :     val (t2, integer) = suFold t2
1348 :     val n1 = label t1
1349 :     val n2 = label t2
1350 :     val n = if n1=n2 then n1+1 else Int.max(n1,n2)
1351 :     val myOp = if integer then ibinop else binop
1352 :     in BINARY(n, fty, myOp, t1, t2, [])
1353 : george 545 end
1354 : george 555
1355 : leunga 565 (* Try to fold in the operand if possible.
1356 :     * This only applies to commutative operations.
1357 :     *)
1358 :     and suComBinary(fty, binop, ibinop, t1, t2) =
1359 : leunga 731 let val (t1, t2) = if foldableFexp t2
1360 :     then (t1, t2) else (t2, t1)
1361 : leunga 565 in suBinary(fty, binop, ibinop, t1, t2) end
1362 :    
1363 :     and sameTree(LEAF(_, T.FREG(t1,f1), []),
1364 : leunga 744 LEAF(_, T.FREG(t2,f2), [])) =
1365 : george 889 t1 = t2 andalso CB.sameColor(f1,f2)
1366 : leunga 565 | sameTree _ = false
1367 :    
1368 :     (* Traverse tree and generate code *)
1369 :     fun gencode(LEAF(_, t, an)) = mark(fxld(leafEA t), an)
1370 :     | gencode(BINARY(_, _, binop, x, t2 as LEAF(0, y, a1), a2)) =
1371 :     let val _ = gencode x
1372 :     val (_, fty, src) = leafEA y
1373 :     fun gen(code) = mark(code, a1 @ a2)
1374 :     fun binary(oper32, oper64) =
1375 :     if sameTree(x, t2) then
1376 :     gen(I.FBINARY{binOp=oper64, src=ST, dst=ST})
1377 : george 555 else
1378 :     let val oper =
1379 : leunga 565 if isMemOpnd src then
1380 :     case fty of
1381 :     32 => oper32
1382 :     | 64 => oper64
1383 :     | _ => error "gencode: BINARY"
1384 :     else oper64
1385 :     in gen(I.FBINARY{binOp=oper, src=src, dst=ST}) end
1386 :     fun ibinary(oper16, oper32) =
1387 :     let val oper = case fty of
1388 :     16 => oper16
1389 :     | 32 => oper32
1390 :     | _ => error "gencode: IBINARY"
1391 :     in gen(I.FIBINARY{binOp=oper, src=src}) end
1392 :     in case binop of
1393 :     FADD => binary(I.FADDS, I.FADDL)
1394 :     | FSUB => binary(I.FDIVS, I.FSUBL)
1395 :     | FMUL => binary(I.FMULS, I.FMULL)
1396 :     | FDIV => binary(I.FDIVS, I.FDIVL)
1397 :     | FIADD => ibinary(I.FIADDS, I.FIADDL)
1398 :     | FISUB => ibinary(I.FIDIVS, I.FISUBL)
1399 :     | FIMUL => ibinary(I.FIMULS, I.FIMULL)
1400 :     | FIDIV => ibinary(I.FIDIVS, I.FIDIVL)
1401 :     end
1402 :     | gencode(BINARY(_, fty, binop, t1, t2, an)) =
1403 :     let fun doit(t1, t2, oper, operP, operRP) =
1404 :     let (* oper[P] => ST(1) := ST oper ST(1); [pop]
1405 :     * operR[P] => ST(1) := ST(1) oper ST; [pop]
1406 :     *)
1407 :     val n1 = label t1
1408 :     val n2 = label t2
1409 :     in if n1 < n2 andalso n1 <= 7 then
1410 :     (gencode t2;
1411 :     gencode t1;
1412 :     mark(I.FBINARY{binOp=operP, src=ST, dst=ST1}, an))
1413 :     else if n2 <= n1 andalso n2 <= 7 then
1414 :     (gencode t1;
1415 :     gencode t2;
1416 :     mark(I.FBINARY{binOp=operRP, src=ST, dst=ST1}, an))
1417 :     else
1418 :     let (* both labels > 7 *)
1419 :     val fs = I.FDirect(newFreg())
1420 :     in gencode t2;
1421 :     emit(fstp(fty, fs));
1422 :     gencode t1;
1423 :     mark(I.FBINARY{binOp=oper, src=fs, dst=ST}, an)
1424 :     end
1425 :     end
1426 :     in case binop of
1427 :     FADD => doit(t1,t2,I.FADDL,I.FADDP,I.FADDP)
1428 :     | FMUL => doit(t1,t2,I.FMULL,I.FMULP,I.FMULP)
1429 :     | FSUB => doit(t1,t2,I.FSUBL,I.FSUBP,I.FSUBRP)
1430 :     | FDIV => doit(t1,t2,I.FDIVL,I.FDIVP,I.FDIVRP)
1431 : george 545 | _ => error "gencode.BINARY"
1432 :     end
1433 : leunga 565 | gencode(UNARY(_, _, unaryOp, su, an)) =
1434 :     (gencode(su); mark(I.FUNARY(unaryOp),an))
1435 :    
1436 :     (* Generate code for a leaf.
1437 :     * Returns the type and an effective address
1438 :     *)
1439 :     and leafEA(T.FREG(fty, f)) = (REAL, fty, I.FDirect f)
1440 :     | leafEA(T.FLOAD(fty, ea, mem)) = (REAL, fty, address(ea, mem))
1441 : leunga 593 | leafEA(T.CVTI2F(_, 32, t)) = int2real(32, t)
1442 :     | leafEA(T.CVTI2F(_, 16, t)) = int2real(16, t)
1443 :     | leafEA(T.CVTI2F(_, 8, t)) = int2real(8, t)
1444 : leunga 565 | leafEA _ = error "leafEA"
1445 :    
1446 : leunga 731 and int2real(ty, e) =
1447 :     let val (_, ty, ea, cleanup) = convertIntToFloat(ty, e)
1448 :     in cleanupCode := !cleanupCode @ cleanup;
1449 :     (INTEGER, ty, ea)
1450 : george 545 end
1451 : leunga 731
1452 :     in gencode(su fexp);
1453 :     emits(!cleanupCode)
1454 : george 545 end (*reduceFexp*)
1455 : leunga 731
1456 :     (*========================================================
1457 :     * This section generates 3-address style floating
1458 :     * point code.
1459 :     *========================================================*)
1460 :    
1461 :     and isize 16 = I.I16
1462 :     | isize 32 = I.I32
1463 :     | isize _ = error "isize"
1464 :    
1465 :     and fstore''(fty, ea, d, mem, an) =
1466 :     (floatingPointUsed := true;
1467 :     mark(I.FMOVE{fsize=fsize fty, dst=address(ea,mem),
1468 :     src=foperand(fty, d)},
1469 :     an)
1470 :     )
1471 :    
1472 :     and fload''(fty, ea, mem, d, an) =
1473 :     (floatingPointUsed := true;
1474 :     mark(I.FMOVE{fsize=fsize fty, src=address(ea,mem),
1475 :     dst=RealReg d}, an)
1476 :     )
1477 :    
1478 :     and fiload''(ity, ea, d, an) =
1479 :     (floatingPointUsed := true;
1480 :     mark(I.FILOAD{isize=isize ity, ea=ea, dst=RealReg d}, an)
1481 :     )
1482 :    
1483 :     and fexpr''(e as T.FREG(_,f)) =
1484 :     if isFMemReg f then transFexpr e else f
1485 :     | fexpr'' e = transFexpr e
1486 :    
1487 :     and transFexpr e =
1488 :     let val fd = newFreg() in doFexpr''(64, e, fd, []); fd end
1489 :    
1490 :     (*
1491 :     * Process a floating point operand. Put operand in register
1492 :     * when possible. The operand should match the given fty.
1493 :     *)
1494 :     and foperand(fty, e as T.FREG(fty', f)) =
1495 :     if fty = fty' then RealReg f else I.FPR(fexpr'' e)
1496 :     | foperand(fty, T.CVTF2F(_, _, e)) =
1497 :     foperand(fty, e) (* nop on the x86 *)
1498 :     | foperand(fty, e as T.FLOAD(fty', ea, mem)) =
1499 :     (* fold operand when the precison matches *)
1500 :     if fty = fty' then address(ea, mem) else I.FPR(fexpr'' e)
1501 :     | foperand(fty, e) = I.FPR(fexpr'' e)
1502 :    
1503 :     (*
1504 :     * Process a floating point operand.
1505 :     * Try to fold in a memory operand or conversion from an integer.
1506 :     *)
1507 :     and fioperand(T.FREG(fty,f)) = (REAL, fty, RealReg f, [])
1508 :     | fioperand(T.FLOAD(fty, ea, mem)) =
1509 :     (REAL, fty, address(ea, mem), [])
1510 :     | fioperand(T.CVTF2F(_, _, e)) = fioperand(e) (* nop on the x86 *)
1511 :     | fioperand(T.CVTI2F(_, ty, e)) = convertIntToFloat(ty, e)
1512 :     | fioperand(T.FMARK(e,an)) = fioperand(e) (* XXX *)
1513 :     | fioperand(e) = (REAL, 64, I.FPR(fexpr'' e), [])
1514 :    
1515 :     (* Generate binary operator. Since the real binary operators
1516 :     * does not take memory as destination, we also ensure this
1517 :     * does not happen.
1518 :     *)
1519 :     and fbinop(targetFty,
1520 :     binOp, binOpR, ibinOp, ibinOpR, lsrc, rsrc, fd, an) =
1521 :     (* Put the mem operand in rsrc *)
1522 :     let val _ = floatingPointUsed := true;
1523 :     fun isMemOpnd(T.FREG(_, f)) = isFMemReg f
1524 :     | isMemOpnd(T.FLOAD _) = true
1525 :     | isMemOpnd(T.CVTI2F(_, (16 | 32), _)) = true
1526 :     | isMemOpnd(T.CVTF2F(_, _, t)) = isMemOpnd t
1527 :     | isMemOpnd(T.FMARK(t, _)) = isMemOpnd t
1528 :     | isMemOpnd _ = false
1529 :     val (binOp, ibinOp, lsrc, rsrc) =
1530 :     if isMemOpnd lsrc then (binOpR, ibinOpR, rsrc, lsrc)
1531 :     else (binOp, ibinOp, lsrc, rsrc)
1532 :     val lsrc = foperand(targetFty, lsrc)
1533 :     val (kind, fty, rsrc, code) = fioperand(rsrc)
1534 :     fun dstMustBeFreg f =
1535 :     if targetFty <> 64 then
1536 :     let val tmpR = newFreg()
1537 :     val tmp = I.FPR tmpR
1538 :     in mark(f tmp, an);
1539 :     emit(I.FMOVE{fsize=fsize targetFty,
1540 :     src=tmp, dst=RealReg fd})
1541 :     end
1542 :     else mark(f(RealReg fd), an)
1543 :     in case kind of
1544 :     REAL =>
1545 :     dstMustBeFreg(fn dst =>
1546 :     I.FBINOP{fsize=fsize fty, binOp=binOp,
1547 :     lsrc=lsrc, rsrc=rsrc, dst=dst})
1548 :     | INTEGER =>
1549 :     (dstMustBeFreg(fn dst =>
1550 :     I.FIBINOP{isize=isize fty, binOp=ibinOp,
1551 :     lsrc=lsrc, rsrc=rsrc, dst=dst});
1552 :     emits code
1553 :     )
1554 :     end
1555 : george 545
1556 : leunga 731 and funop(fty, unOp, src, fd, an) =
1557 :     let val src = foperand(fty, src)
1558 :     in mark(I.FUNOP{fsize=fsize fty,
1559 :     unOp=unOp, src=src, dst=RealReg fd},an)
1560 :     end
1561 :    
1562 :     and doFexpr''(fty, e, fd, an) =
1563 :     case e of
1564 : george 889 T.FREG(_,fs) => if CB.sameColor(fs,fd) then ()
1565 : leunga 731 else fcopy''(fty, [fd], [fs], an)
1566 :     (* Stupid x86 does everything as 80-bits internally. *)
1567 :    
1568 :     (* Binary operators *)
1569 :     | T.FADD(_, a, b) => fbinop(fty,
1570 :     I.FADDL, I.FADDL, I.FIADDL, I.FIADDL,
1571 :     a, b, fd, an)
1572 :     | T.FSUB(_, a, b) => fbinop(fty,
1573 :     I.FSUBL, I.FSUBRL, I.FISUBL, I.FISUBRL,
1574 :     a, b, fd, an)
1575 :     | T.FMUL(_, a, b) => fbinop(fty,
1576 :     I.FMULL, I.FMULL, I.FIMULL, I.FIMULL,
1577 :     a, b, fd, an)
1578 :     | T.FDIV(_, a, b) => fbinop(fty,
1579 :     I.FDIVL, I.FDIVRL, I.FIDIVL, I.FIDIVRL,
1580 :     a, b, fd, an)
1581 :    
1582 :     (* Unary operators *)
1583 :     | T.FNEG(_, a) => funop(fty, I.FCHS, a, fd, an)
1584 :     | T.FABS(_, a) => funop(fty, I.FABS, a, fd, an)
1585 :     | T.FSQRT(_, a) => funop(fty, I.FSQRT, a, fd, an)
1586 :    
1587 :     (* Load *)
1588 :     | T.FLOAD(fty,ea,mem) => fload''(fty, ea, mem, fd, an)
1589 :    
1590 :     (* Type conversions *)
1591 :     | T.CVTF2F(_, _, e) => doFexpr''(fty, e, fd, an)
1592 :     | T.CVTI2F(_, ty, e) =>
1593 :     let val (_, ty, ea, cleanup) = convertIntToFloat(ty, e)
1594 :     in fiload''(ty, ea, fd, an);
1595 :     emits cleanup
1596 :     end
1597 :    
1598 :     | T.FMARK(e,A.MARKREG f) => (f fd; doFexpr''(fty, e, fd, an))
1599 :     | T.FMARK(e, a) => doFexpr''(fty, e, fd, a::an)
1600 :     | T.FPRED(e, c) => doFexpr''(fty, e, fd, A.CTRLUSE c::an)
1601 :     | T.FEXT fexp =>
1602 :     ExtensionComp.compileFext (reducer()) {e=fexp, fd=fd, an=an}
1603 :     | _ => error("doFexpr''")
1604 :    
1605 :     (*========================================================
1606 :     * Tie the two styles of fp code generation together
1607 :     *========================================================*)
1608 :     and fstore(fty, ea, d, mem, an) =
1609 :     if enableFastFPMode andalso !fast_floating_point
1610 :     then fstore''(fty, ea, d, mem, an)
1611 :     else fstore'(fty, ea, d, mem, an)
1612 :     and fload(fty, ea, d, mem, an) =
1613 :     if enableFastFPMode andalso !fast_floating_point
1614 :     then fload''(fty, ea, d, mem, an)
1615 :     else fload'(fty, ea, d, mem, an)
1616 :     and fexpr e =
1617 :     if enableFastFPMode andalso !fast_floating_point
1618 :     then fexpr'' e else fexpr' e
1619 :     and doFexpr(fty, e, fd, an) =
1620 :     if enableFastFPMode andalso !fast_floating_point
1621 :     then doFexpr''(fty, e, fd, an)
1622 :     else doFexpr'(fty, e, fd, an)
1623 :    
1624 : leunga 797 (*================================================================
1625 :     * Optimizations for x := x op y
1626 :     * Special optimizations:
1627 :     * Generate a binary operator, result must in memory.
1628 :     * The source must not be in memory
1629 :     *================================================================*)
1630 :     and binaryMem(binOp, src, dst, mem, an) =
1631 :     mark(I.BINARY{binOp=binOp, src=immedOrReg(operand src),
1632 :     dst=address(dst,mem)}, an)
1633 :     and unaryMem(unOp, opnd, mem, an) =
1634 :     mark(I.UNARY{unOp=unOp, opnd=address(opnd,mem)}, an)
1635 :    
1636 :     and isOne(T.LI n) = n = one
1637 :     | isOne _ = false
1638 :    
1639 :     (*
1640 :     * Perform optimizations based on recognizing
1641 :     * x := x op y or
1642 :     * x := y op x
1643 :     * first.
1644 :     *)
1645 :     and store(ty, ea, d, mem, an,
1646 :     {INC,DEC,ADD,SUB,NOT,NEG,SHL,SHR,SAR,OR,AND,XOR},
1647 :     doStore
1648 :     ) =
1649 :     let fun default() = doStore(ea, d, mem, an)
1650 :     fun binary1(t, t', unary, binary, ea', x) =
1651 :     if t = ty andalso t' = ty then
1652 :     if MLTreeUtils.eqRexp(ea, ea') then
1653 :     if isOne x then unaryMem(unary, ea, mem, an)
1654 :     else binaryMem(binary, x, ea, mem, an)
1655 :     else default()
1656 :     else default()
1657 :     fun unary(t,unOp, ea') =
1658 :     if t = ty andalso MLTreeUtils.eqRexp(ea, ea') then
1659 :     unaryMem(unOp, ea, mem, an)
1660 :     else default()
1661 :     fun binary(t,t',binOp,ea',x) =
1662 :     if t = ty andalso t' = ty andalso
1663 :     MLTreeUtils.eqRexp(ea, ea') then
1664 :     binaryMem(binOp, x, ea, mem, an)
1665 :     else default()
1666 :    
1667 :     fun binaryCom1(t,unOp,binOp,x,y) =
1668 :     if t = ty then
1669 :     let fun again() =
1670 :     case y of
1671 :     T.LOAD(ty',ea',_) =>
1672 :     if ty' = ty andalso MLTreeUtils.eqRexp(ea, ea') then
1673 :     if isOne x then unaryMem(unOp, ea, mem, an)
1674 :     else binaryMem(binOp,x,ea,mem,an)
1675 :     else default()
1676 :     | _ => default()
1677 :     in case x of
1678 :     T.LOAD(ty',ea',_) =>
1679 :     if ty' = ty andalso MLTreeUtils.eqRexp(ea, ea') then
1680 :     if isOne y then unaryMem(unOp, ea, mem, an)
1681 :     else binaryMem(binOp,y,ea,mem,an)
1682 :     else again()
1683 :     | _ => again()
1684 :     end
1685 :     else default()
1686 :    
1687 :     fun binaryCom(t,binOp,x,y) =
1688 :     if t = ty then
1689 :     let fun again() =
1690 :     case y of
1691 :     T.LOAD(ty',ea',_) =>
1692 :     if ty' = ty andalso MLTreeUtils.eqRexp(ea, ea') then
1693 :     binaryMem(binOp,x,ea,mem,an)
1694 :     else default()
1695 :     | _ => default()
1696 :     in case x of
1697 :     T.LOAD(ty',ea',_) =>
1698 :     if ty' = ty andalso MLTreeUtils.eqRexp(ea, ea') then
1699 :     binaryMem(binOp,y,ea,mem,an)
1700 :     else again()
1701 :     | _ => again()
1702 :     end
1703 :     else default()
1704 :    
1705 :     in case d of
1706 :     T.ADD(t,x,y) => binaryCom1(t,INC,ADD,x,y)
1707 :     | T.SUB(t,T.LOAD(t',ea',_),x) => binary1(t,t',DEC,SUB,ea',x)
1708 :     | T.ORB(t,x,y) => binaryCom(t,OR,x,y)
1709 :     | T.ANDB(t,x,y) => binaryCom(t,AND,x,y)
1710 :     | T.XORB(t,x,y) => binaryCom(t,XOR,x,y)
1711 :     | T.SLL(t,T.LOAD(t',ea',_),x) => binary(t,t',SHL,ea',x)
1712 :     | T.SRL(t,T.LOAD(t',ea',_),x) => binary(t,t',SHR,ea',x)
1713 :     | T.SRA(t,T.LOAD(t',ea',_),x) => binary(t,t',SAR,ea',x)
1714 :     | T.NEG(t,T.LOAD(t',ea',_)) => unary(t,NEG,ea')
1715 :     | T.NOTB(t,T.LOAD(t',ea',_)) => unary(t,NOT,ea')
1716 :     | _ => default()
1717 :     end (* store *)
1718 :    
1719 : george 545 (* generate code for a statement *)
1720 :     and stmt(T.MV(_, rd, e), an) = doExpr(e, rd, an)
1721 :     | stmt(T.FMV(fty, fd, e), an) = doFexpr(fty, e, fd, an)
1722 :     | stmt(T.CCMV(ccd, e), an) = doCCexpr(e, ccd, an)
1723 :     | stmt(T.COPY(_, dst, src), an) = copy(dst, src, an)
1724 :     | stmt(T.FCOPY(fty, dst, src), an) = fcopy(fty, dst, src, an)
1725 : leunga 744 | stmt(T.JMP(e, labs), an) = jmp(e, labs, an)
1726 : blume 839 | stmt(T.CALL{funct, targets, defs, uses, region, pops, ...}, an) =
1727 :     call(funct,targets,defs,uses,region,[],an, pops)
1728 :     | stmt(T.FLOW_TO(T.CALL{funct, targets, defs, uses, region, pops, ...},
1729 : leunga 796 cutTo), an) =
1730 : blume 839 call(funct,targets,defs,uses,region,cutTo,an, pops)
1731 : george 545 | stmt(T.RET _, an) = mark(I.RET NONE, an)
1732 : leunga 797 | stmt(T.STORE(8, ea, d, mem), an) =
1733 :     store(8, ea, d, mem, an, opcodes8, store8)
1734 :     | stmt(T.STORE(16, ea, d, mem), an) =
1735 :     store(16, ea, d, mem, an, opcodes16, store16)
1736 :     | stmt(T.STORE(32, ea, d, mem), an) =
1737 :     store(32, ea, d, mem, an, opcodes32, store32)
1738 :    
1739 : george 545 | stmt(T.FSTORE(fty, ea, d, mem), an) = fstore(fty, ea, d, mem, an)
1740 : leunga 744 | stmt(T.BCC(cc, lab), an) = branch(cc, lab, an)
1741 : george 545 | stmt(T.DEFINE l, _) = defineLabel l
1742 :     | stmt(T.ANNOTATION(s, a), an) = stmt(s, a::an)
1743 : george 555 | stmt(T.EXT s, an) =
1744 :     ExtensionComp.compileSext (reducer()) {stm=s, an=an}
1745 : george 545 | stmt(s, _) = doStmts(Gen.compileStm s)
1746 :    
1747 :     and doStmt s = stmt(s, [])
1748 :     and doStmts ss = app doStmt ss
1749 :    
1750 :     and beginCluster' _ =
1751 :     ((* Must be cleared by the client.
1752 :     * if rewriteMemReg then memRegsUsed := 0w0 else ();
1753 :     *)
1754 : leunga 731 floatingPointUsed := false;
1755 :     trapLabel := NONE;
1756 :     beginCluster 0
1757 :     )
1758 : george 545 and endCluster' a =
1759 : monnier 247 (case !trapLabel
1760 : monnier 411 of NONE => ()
1761 : george 545 | SOME(_, lab) => (defineLabel lab; emit(I.INTO))
1762 : monnier 411 (*esac*);
1763 : leunga 731 (* If floating point has been used allocate an extra
1764 :     * register just in case we didn't use any explicit register
1765 :     *)
1766 :     if !floatingPointUsed then (newFreg(); ())
1767 :     else ();
1768 : george 545 endCluster(a)
1769 :     )
1770 :    
1771 :     and reducer() =
1772 : george 984 TS.REDUCER{reduceRexp = expr,
1773 : george 545 reduceFexp = fexpr,
1774 :     reduceCCexp = ccExpr,
1775 :     reduceStm = stmt,
1776 :     operand = operand,
1777 :     reduceOperand = reduceOpnd,
1778 :     addressOf = fn e => address(e, I.Region.memory), (*XXX*)
1779 : george 1009 emit = mark',
1780 : george 545 instrStream = instrStream,
1781 :     mltreeStream = self()
1782 :     }
1783 :    
1784 :     and self() =
1785 : george 984 TS.S.STREAM
1786 : leunga 815 { beginCluster = beginCluster',
1787 :     endCluster = endCluster',
1788 :     emit = doStmt,
1789 :     pseudoOp = pseudoOp,
1790 :     defineLabel = defineLabel,
1791 :     entryLabel = entryLabel,
1792 :     comment = comment,
1793 :     annotation = annotation,
1794 :     getAnnotations = getAnnotations,
1795 :     exitBlock = fn mlrisc => exitBlock(cellset mlrisc)
1796 : george 545 }
1797 :    
1798 :     in self()
1799 : monnier 247 end
1800 :    
1801 : george 545 end (* functor *)
1802 :    
1803 :     end (* local *)

root@smlnj-gforge.cs.uchicago.edu
ViewVC Help
Powered by ViewVC 1.0.0