Home My Page Projects Code Snippets Project Openings SML/NJ
Summary Activity Forums Tracker Lists Tasks Docs Surveys News SCM Files

SCM Repository

[smlnj] View of /sml/trunk/src/compiler/CodeGen/sparc/sparcPseudoInstrs.sml
ViewVC logotype

View of /sml/trunk/src/compiler/CodeGen/sparc/sparcPseudoInstrs.sml

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1003 - (download) (annotate)
Fri Dec 7 02:45:32 2001 UTC (18 years, 1 month ago) by george
File size: 3749 byte(s)
Changed the representation of instructions from being fully abstract
to being partially concrete. That is to say:

  from
	type instruction

  to
	type instr				(* machine instruction *)

	datatype instruction =
	    LIVE of {regs: C.cellset, spilled: C.cellset}
          | KILL of {regs: C.cellset, spilled: C.cellset}
          | COPYXXX of {k: CB.cellkind, dst: CB.cell list, src: CB.cell list}
          | ANNOTATION of {i: instruction, a: Annotations.annotation}
          | INSTR of instr

This makes the handling of certain special instructions that appear on
all architectures easier and uniform.

LIVE and KILL say that a list of registers are live or killed at the
program point where they appear. No spill code is generated when an
element of the 'regs' field is spilled, but the register is moved to
the 'spilled' (which is present, more for debugging than anything else).

LIVE replaces the (now deprecated) DEFFREG instruction on the alpha.
We used to generate:

	DEFFREG f1
	f1 := f2 + f3
        trapb

but now generate:

	f1 := f2 + f3
	trapb
	LIVE {regs=[f1,f2,f3], spilled=[]}

Furthermore, the DEFFREG (hack) required that all floating point instruction
use all registers mentioned in the instruction. Therefore f1 := f2 + f3,
defines f1 and uses [f1,f2,f3]! This hack is no longer required resulting
in a cleaner alpha implementation. (Hopefully, intel will not get rid of
this architecture).

COPYXXX is intended to replace the parallel COPY and FCOPY  available on
all the architectures. This will result in further simplification of the
register allocator that must be aware of them for coalescing purposes, and
will also simplify certain aspects of the machine description that provides
callbacks related to parallel copies.

ANNOTATION should be obvious, and now INSTR represents the honest to God
machine instruction set!

The <arch>/instructions/<arch>Instr.sml files define certain utility
functions for making porting easier -- essentially converting upper case
to lower case. All machine instructions (of type instr) are in upper case,
and the lower case form generates an MLRISC instruction. For example on
the alpha we have:

  datatype instr =
     LDA of {r:cell, b:cell, d:operand}
   | ...

  val lda : {r:cell, b:cell, d:operand} -> instruction
    ...

where lda is just (INSTR o LDA), etc.
functor SparcPseudoInstrs
   (Instr : SPARCINSTR where Region=CPSRegions) : SPARC_PSEUDO_INSTR = 
struct
  structure I = Instr
  structure C = Instr.C

  type format1 =
       {r:CellsBasis.cell, i:I.operand, d:CellsBasis.cell} *
       (I.operand -> CellsBasis.cell) -> I.instruction list

  type format2 =
       {i:I.operand, d:CellsBasis.cell} *
       (I.operand -> CellsBasis.cell) -> I.instruction list

  fun error msg = MLRiscErrorMsg.impossible ("SparcPseudoInstrs."^msg)

  val delta = SparcSpec.framesize	(* initial value of %fp - %sp *)

  (* runtime system dependent; the numbers are relative to %sp but
   * we need offsets relative to %fp, hence the adjustment by delta *)
  val floatTmpOffset = I.IMMED (88 - delta)
  val umulOffset = I.IMMED (80 - delta)
  val smulOffset = I.IMMED (72 - delta)
  val udivOffset = I.IMMED (84 - delta)
  val sdivOffset = I.IMMED (76 - delta)

  val stack = CPSRegions.stack

  val native = true  (* use native versions of the instructions? *)

  fun umul_native({r, i, d}, reduceOpnd) =
      [I.arith{a=I.UMUL,r=r,i=i,d=d}]

  val TNE = I.ticc{t=I.BNE,cc=I.ICC,r=C.r0,i=I.IMMED 7}
  val TVS = I.ticc{t=I.BVS,cc=I.ICC,r=C.r0,i=I.IMMED 7}

      (* overflows iff Y != (d ~>> 31) *)
  fun smul_native({r, i, d}, reduceOpnd) =
      let val t1 = C.newReg()
          val t2 = C.newReg()
      in  [I.arith{a=I.SMUL,r=r,i=i,d=d},
           I.shift{s=I.SRA,r=d,i=I.IMMED 31,d=t1},
           I.rdy{d=t2},
           I.arith{a=I.SUBCC,r=t1,i=I.REG t2,d=C.r0},
           TNE
          ] 
      end
  fun udiv_native({r,i,d},reduceOpnd) = 
      [I.wry{r=C.r0,i=I.REG C.r0},
       I.arith{a=I.UDIV,r=r,i=i,d=d}]

   (* May overflow if MININT div -1 *)
  fun sdiv_native({r,i,d},reduceOpnd) = 
      let val t1 = C.newReg()
      in  [I.shift{s=I.SRA,r=r,i=I.IMMED 31,d=t1},
           I.wry{r=t1,i=I.REG C.r0},
           I.arith{a=I.SDIVCC,r=r,i=i,d=d},
           TVS
          ]
      end

  (* 
   * Registers %o2, %o3 are used to pass arguments to ml_mul and ml_div 
   * Result is returned in %o2.
   *)
  val r10 = C.GPReg 10
  val r11 = C.GPReg 11

  fun callRoutine(offset,reduceOpnd,r,i,d) =   
  let val addr = C.newReg()
      val defs = C.addReg(r10,C.empty) 
      val uses = C.addReg(r10,C.addReg(r11,C.empty))
  in
      [I.copy{src=[r,reduceOpnd i],dst=[r10,r11],
                   tmp=SOME(I.Direct(C.newReg())),impl=ref NONE},
       I.load{l=I.LD,r=C.frameptrR,i=offset,d=addr,mem=stack},
       I.jmpl{r=addr,i=I.IMMED 0,d=C.linkReg,defs=defs,uses=uses,
              cutsTo=[],nop=true,mem=stack},
       I.copy{src=[r10],dst=[d],tmp=NONE,impl=ref NONE}
      ]
  end

  fun umul({r, i, d}, reduceOpnd) = callRoutine(umulOffset,reduceOpnd,r,i,d)
  fun smultrap({r, i, d}, reduceOpnd) = callRoutine(smulOffset,reduceOpnd,r,i,d)
  fun udiv({r, i, d}, reduceOpnd) = callRoutine(udivOffset,reduceOpnd,r,i,d)
  fun sdivtrap({r, i, d}, reduceOpnd) = callRoutine(sdivOffset,reduceOpnd,r,i,d)

  fun cvti2d({i, d}, reduceOpnd) = 
      [I.store{s=I.ST,r=C.frameptrR,i=floatTmpOffset,d=reduceOpnd i,mem=stack},
       I.fload{l=I.LDF,r=C.frameptrR,i=floatTmpOffset,d=d,mem=stack},
       I.fpop1{a=I.FiTOd,r=d,d=d}
      ]
  fun cvti2s _ = error "cvti2s"
  fun cvti2q _ = error "cvti2q"

     (* Generate native versions of the instructions *)
  val umul32 = if native then umul_native else umul
  fun smul32 _ = error "smul32"
  val smul32trap = if native then smul_native else smultrap
  val udiv32 = if native then udiv_native else udiv
  fun sdiv32 _ = error "sdiv32"
  val sdiv32trap = if native then sdiv_native else sdivtrap

  val overflowtrap32 = (* tvs 0x7 *)
                       [I.ticc{t=I.BVS,cc=I.ICC,r=C.r0,i=I.IMMED 7}]
  val overflowtrap64 = [] (* not needed *)


end


root@smlnj-gforge.cs.uchicago.edu
ViewVC Help
Powered by ViewVC 1.0.0