Home My Page Projects Code Snippets Project Openings diderot
Summary Activity Tracker Tasks SCM

SCM Repository

[diderot] View of /branches/vis15/src/compiler/mid-to-low/ein-to-vec.sml
ViewVC logotype

View of /branches/vis15/src/compiler/mid-to-low/ein-to-vec.sml

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3646 - (download) (annotate)
Tue Feb 2 14:09:56 2016 UTC (4 years ago) by jhr
File size: 9218 byte(s)
working on merge
 (*
* helper functions used for vector operations.
* At this point every index is bound to an int
* and we are ready to return low-IL code.
* using LowIL vectors ops like subVec, addVec, prodVec..
*
* EIN->scan->iter->here.
*)
structure VecToLow = struct
    local

    structure Ty = LowTypes
    structure Op = LowOps
    structure E = Ein
    structure H = Helper
    structure IMap = IntRedBlackMap

    in


(*
*
* Each EIN operator has a list of arguments.
* To reduce code size each argument is grouped together with its
- EIN index binding (ix) i.e. [E.V 0,E.V 1],
- Low-IR variable (var_a)
- argument type(argTy) i.e. TensorTy[3,3]
- id (position in parameter list)
- and operation type (opTy) i.e. Low-IL operators indexTensor  or projectLast
* in a datatype called ParamTy. ParamTy =(id,var_a,argTy,ix, opTy).
*
* The EIN operator λA <A_ijk>_{ijk} (a)
* has a single argument a.
* It's paramTy is (id,a,argTy,ix, opTy) where id=0 and  argTy=Ty.TensorTy[i,j,k]
*
* The opTy or operation type determines if second order tensor argument A
* will be treated as a scalar or vector arguement in the next operation.
* If opTy=Proj k then then last index is projected to a vector. tensor[k] result= A[i,j,:]
* If opTy=Ind then each index in the the Tensor is indexed.  real result= A[i,j,k]
*
* paramToOp: converts each paramTy into a low-ir argument.
* ParamTy(id,a,argTy,[i,j], opTy) where opTy= Proj(k) creates a vector
*       => Op.ProjectLast (0,k ixx,Ty.TensorTy[i,j,k])
*          A[i,j,:]
*
* ParamTy (id,a,argTy,[i,j,k],opTy) where opTy= Indx  creates a scalar
*       => Op.IndexTensor (0, ixx, Ty.TensorTy[i,j,k])
*       A[i,j,:]
*----------------------------------------------------------------------
* Ex. 1
* argTy out = λAB <A_ijk+B_ijk>_ijk (a,b) where argTy = Ty.TensorTy[i,j,k]
* Create a vector addition operation
* (1) iter() function binds indices i and j in mapp variable.
* (2) addVec() is called with paramTy
    paramTy[(0,a,argTy,[E.V 0,E.V 1],Proj k),(1,b,argTy,[E.V 0,E.V 1],Proj k)]
    paramToOp() creates
    Ty.TensorTy[k] x= Op.ProjectLast (0,k,ixx,argTy) (a)
    Ty.TensorTy[k] y= Op.ProjectLast (1,k,ixx,argTy) (b)
    where ixx=[find E.V i in mapp, find E.V j in mapp]
* (3) assignOP() is called with Op.addVec
creates Ty.TensorTy[k] t = Op.addVec (x,y)
* ===> Code produced
* Ty.TensorTy[k] x0= Op.ProjectLast (0,k, [0,0],argTy) (a)
* Ty.TensorTy[k] y0= Op.ProjectLast (1,k, [0,0],argTy) (b)
* Ty.TensorTy[k] out00=Op.addVec(k) (x0,y0)
* Ty.TensorTy[k] x1= Op.ProjectLast (0,k, [0,1],argTy) (a)
* Ty.TensorTy[k] y1= Op.ProjectLast (1,k, [0,1],argTy) (b)
* Ty.TensorTy[k] out01=Op.addVec(k) (x1,y1)
* Ty.TensorTy[i,j,k] out=CONS[CONS [out00,out01],..]
*
* Ex. 2
* Ty.TensorTy[i,j,k] out = λAB <A_i*B_jk>_ijk (a,b)
* Create a scaleV operation
* (1) iter() function binds indices i and j in mapp variable.
* (2) op2() is called with paramTy
    paramTy = [(0,a,argTy,[E.V i], Indx), (1,b,argTy,[E.V j], Proj k)]
    paramToOp() creates
    Ty.TensorTy[] x = Op.IndexTensor (0, [find E.V i in mapp], Ty.TensorTy[i]) (a)
    Ty.TensorTy[k] y = Op.ProjectLast (1, k,[find E.V j in mapp], Ty.TensorTy[i,j]) (b)
* (3) assignOP() is called with Op.prodScaV
    creates Ty.TensorTy[k] t = Op.addScaV (x,y)
* ===> Code produced
* Ty.TensorTy[] x00 = Op.IndexTensor (0, [0], Ty.TensorTy[i]) (a)
* Ty.TensorTy[k] y00 = Op.ProjectLast (1, k,[0], Ty.TensorTy[i,j]) (b)
* Ty.TensorTy[k] out00 = Op.addScaV (x00,y00)
* Ty.TensorTy[k] y01 = Op.ProjectLast (1, k,[1], Ty.TensorTy[i,j]) (b)
* Ty.TensorTy[k] out01 = Op.addScaV (x00,y01)
* Ty.TensorTy[] x10 = Op.IndexTensor (0, [1], Ty.TensorTy[i]) (a)
* Ty.TensorTy[k] out10 = Op.addScaV (x10,y00)
* Ty.TensorTy[k] out11 = Op.addScaV (x10,y01)
* Ty.TensorTy[i,j,k] out=CONS [CONS [out00,out01],CONS[out10,out11]]
*)
(*----------------------------------------------------------------------*)

    datatype opTy=  Indx| Proj of int
    datatype paramTy  (*param id, argument var * argument type* ein binding* result type*)
        = Param of int * LowIR.var * Ty.ty * E.mu list * opTy

    fun insert (k, v) d = IMap.insert (d, k, v)

    (*paramToOp:
    * Does the transformation from paramTy to Low-IR operators (indextensor and projectlast).
    * Arguments are avail-rhs(avail), IntRedBlackMap from EIN indices to ints (mapp), and a paramTy.
    * Returns avail-rhs and lhs Low-IR variable for last operation created.
    *)
    fun paramToOp(avail, mapp, paramTy)= case paramTy
        of Param(_, vA ,_ , [], Proj _) => (avail,vA)
        |  Param(_, vA, Ty.TensorTy[], _, Indx) =>  (avail, vA)
        |  Param(id,vA ,argTy, ix, opType) => let
            val ixx = List.map (fn e1 => H.mapIndex (e1, mapp)) ix (*index Ty*)
            val str=String.concat (List.map Int.toString ixx)
            val (ty,opp,name)= (case opType
                of (Proj vecIX) => let
                    val vecTy = Ty.TensorTy [vecIX]
                    val opp = Op.ProjectLast (id, vecIX, ixx, argTy)
                    val name = String.concat["ProjLast_", str, "_"]
                    in (vecTy,opp,name) end
                | Indx => let
                    val opp = Op.IndexTensor (id, ixx, argTy)
                    val name = String.concat["Indx_",str, "_"]
                    in (Ty.TensorTy[], opp, name) end
            (* end case *))
            in
	      H.assignOP (avail, opp, [vA], name, ty)
            end
    (*--------------------Vectorization Helper Functions--------------------*)
    (*
    * The rest of the functions create vector operations for tensor arguments (vecA,vecB..)
    * The arguments to the functions
    - avail is avail-rhs
    - mapp is an IntRedBlackMap from ints to ints. It is used to bind EIN variable indices.
    - vecIX is an integer which is the length of the vector result
    - vecA,vecB are  paramTy as described above
        - They hold information about the tensor arguments in the operation.
        - They can represent an arbitrary-sized tensor (scalar,vector, matrix,..)
        - They will be transformed to scalar or vector with paramToOp()
    * Other variable names
    - vA, vB, vD are low-IR variables
    * Each function sends their arguments(vecA,vecB) to paramToOp()
    which updates avail with additional code and returns the last variable referenced vA.
    * paramToOp() transforms arbitrary-sized tensors to vectors and scalars.
    * assignOp() then creates low-ir vector operations of type Ty.TensorTy[vecIX]
    * addV(), adds two vector arguments,
    * negV(), multiples a scalar and a vector argument.
    * The low-ir vector operation used the the distinguishing factor between each function.
    *)

    (* vector negation: multiplies vector vB by negative 1 (vA)*)
    fun negV (avail, mapp, (vA, vecIX, vec)) = let
        val  (avail, vB) =  paramToOp (avail, mapp, vec)
        in
	  H.assignOP (avail, Op.prodScaV vecIX, [vA, vB], "prodScaV", Ty.TensorTy [vecIX])
        end

    (* binary vector operation between arguments vecA and vecB
    * dstop refers to the destination vector operation(exs: scaling, subtraction,product,..)
    *)
    fun op2 (avail, mapp, (vecIX, dstop, vecA,vecB)) = let
        val  (avail, vA) = paramToOp (avail, mapp, vecA)
        val  (avail, vB) = paramToOp (avail, mapp, vecB)
        in
	  H.assignOP (avail, dstop, [vA, vB], "vectorOp", Ty.TensorTy [vecIX])
        end

    (*vector addition: adds a list of vectors "rest" "*)
    fun addV (avail, mapp, (vecIX, rest)) = let
        fun add (avail, [], rest) = (avail, List.rev rest)
          | add (avail, vecT ::es, rest) = let
            val  (avail, vT) = paramToOp (avail, mapp, vecT)
            in
                add (avail, es, vT::rest)
            end
        val  (avail, rest) = add (avail, rest, [])
        in
	  H.mkMultiple (avail, rest, Op.addVec vecIX, Ty.TensorTy [vecIX])
        end

    (*dot product: takes product between vecA and vecB then does summation*)
    fun dotV (avail, mapp, (vecIX, vecA, vecB)) = let
        val  (avail, vD) = op2 (avail, mapp, (vecIX, Op.prodVec vecIX, vecA, vecB))
        in
	  H.mkSumVec (avail, vecIX, [vD])
        end

    (*takes the sum of multiple dot products:
    * updates mapp with sumrange argument (E.V v, lb, ub)
    * by binding E.V v from lb (lowerbound) to ub (upperbound)
    * does the dot product between vecA and vecB with updated mapp
    * accumulates the result in "rest"
    * then adds up all the terms in "rest".
    *)
    fun sumDotV (avail, mapp, ((E.V v, lb, ub), vecIX, vecA,vecB)) = let
        val nextfnargs = (vecIX, vecA, vecB)
        fun sumI (avail, counter, 0, rest) = let
            val mapp = insert (v, 0)  counter
            val  (avail, vE) = dotV (avail, mapp, nextfnargs)
            in
	      H.mkMultiple (avail, vE::rest, Op.addSca, Ty.TensorTy [])
            end
        | sumI (avail, counter, sx, rest) = let
            val mapp = insert (v, (sx+lb))  counter
            val  (avail, vE) = dotV (avail, mapp, nextfnargs)
            in
                sumI (avail, counter, sx-1, vE::rest)
            end
        in
            sumI (avail, mapp, (ub-lb), [])
        end
    | sumDotV _ = raise Fail "Non-variable index in summation"

    end

end

root@smlnj-gforge.cs.uchicago.edu
ViewVC Help
Powered by ViewVC 1.0.0