Home My Page Projects Code Snippets Project Openings diderot

# SCM Repository

[diderot] View of /trunk/src/compiler/basis/basis-vars.sml
 [diderot] / trunk / src / compiler / basis / basis-vars.sml

# View of /trunk/src/compiler/basis/basis-vars.sml

Sat Jun 23 12:02:18 2012 UTC (7 years, 1 month ago) by jhr
File size: 12902 byte(s)
ported changes from vis12 branch (C math functions)
(* basis-vars.sml
*
* COPYRIGHT (c) 2010 The Diderot Project (http://diderot-language.cs.uchicago.edu)
*
* This module defines the AST variables for the built in operators and functions.
*)

structure BasisVars =
struct
local
structure N = BasisNames
structure Ty = Types
structure MV = MetaVar

fun --> (tys1, ty) = Ty.T_Fun(tys1, ty)
infix -->

val N2 = Ty.DimConst 2
val N3 = Ty.DimConst 3

(* short names for kinds *)
val TK : unit -> Ty.meta_var = Ty.TYPE o MV.newTyVar
fun DK () : Ty.meta_var = Ty.DIFF(MV.newDiffVar 0)
val SK : unit -> Ty.meta_var = Ty.SHAPE o MV.newShapeVar
val NK : unit -> Ty.meta_var = Ty.DIM o MV.newDimVar

fun ty t = ([], t)
fun all (kinds, mkTy : Ty.meta_var list -> Ty.ty) = let
val tvs = List.map (fn mk => mk()) kinds
in
(tvs, mkTy tvs)
end
fun allNK mkTy = let
val tv = MV.newDimVar()
in
([Ty.DIM tv], mkTy tv)
end

fun field (k, d, dd) = Ty.T_Field{diff=k, dim=d, shape=dd}
fun tensor ds = Ty.T_Tensor(Ty.Shape ds)
fun matrix d = tensor[d,d]

fun monoVar (name, ty) = Var.new (name, AST.BasisVar, ty)
fun polyVar (name, scheme) = Var.newPoly (name, AST.BasisVar, scheme)
in

(* TODO: I'm not sure how to extend + and - to fields, since the typing rules should allow
* two fields with different differentiation levels to be added.
*)

(* overloaded operators; the naming convention is to use the operator name followed
* by the argument type signature, where
*	i  -- int
*	b  -- bool
*	r  -- real (tensor[])
*	t  -- tensor[shape]
*	f  -- field#k(d)[shape]
*)

val t = Ty.T_Tensor(Ty.ShapeVar dd)
in
[t, t] --> t
end))
fn [Ty.DIFF k, Ty.DIM d, Ty.SHAPE dd] => let
val t = Ty.T_Field{diff = Ty.DiffVar(k, 0), dim = Ty.DimVar d, shape = Ty.ShapeVar dd}
in
[t, t] --> t
end))

val sub_ii = monoVar(N.op_sub, [Ty.T_Int, Ty.T_Int] --> Ty.T_Int)
val sub_tt = polyVar(N.op_sub, all([SK], fn [Ty.SHAPE dd] => let
val t = Ty.T_Tensor(Ty.ShapeVar dd)
in
[t, t] --> t
end))
val sub_ff = polyVar(N.op_sub, all([DK,NK,SK],
fn [Ty.DIFF k, Ty.DIM d, Ty.SHAPE dd] => let
val t = Ty.T_Field{diff = Ty.DiffVar(k, 0), dim = Ty.DimVar d, shape = Ty.ShapeVar dd}
in
[t, t] --> t
end))

(* note that we assume that operators are tested in the order defined here, so that mul_rr
* takes precedence over mul_rt and mul_tr!
*)
val mul_ii = monoVar(N.op_mul, [Ty.T_Int, Ty.T_Int] --> Ty.T_Int)
val mul_rr = monoVar(N.op_mul, [Ty.realTy, Ty.realTy] --> Ty.realTy)
val mul_rt = polyVar(N.op_mul, all([SK], fn [Ty.SHAPE dd] => let
val t = Ty.T_Tensor(Ty.ShapeVar dd)
in
[Ty.realTy, t] --> t
end))
val mul_tr = polyVar(N.op_mul, all([SK], fn [Ty.SHAPE dd] => let
val t = Ty.T_Tensor(Ty.ShapeVar dd)
in
[t, Ty.realTy] --> t
end))
val mul_rf = polyVar(N.op_mul, all([DK,NK,SK],
fn [Ty.DIFF k, Ty.DIM d, Ty.SHAPE dd] => let
val t = Ty.T_Field{diff = Ty.DiffVar(k, 0), dim = Ty.DimVar d, shape = Ty.ShapeVar dd}
in
[Ty.realTy, t] --> t
end))
val mul_fr = polyVar(N.op_mul, all([DK,NK,SK],
fn [Ty.DIFF k, Ty.DIM d, Ty.SHAPE dd] => let
val t = Ty.T_Field{diff = Ty.DiffVar(k, 0), dim = Ty.DimVar d, shape = Ty.ShapeVar dd}
in
[t, Ty.realTy] --> t
end))

val div_ii = monoVar(N.op_div, [Ty.T_Int, Ty.T_Int] --> Ty.T_Int)
val div_rr = monoVar(N.op_div, [Ty.realTy, Ty.realTy] --> Ty.realTy)
val div_tr = polyVar(N.op_div, all([SK], fn [Ty.SHAPE dd] => let
val t = Ty.T_Tensor(Ty.ShapeVar dd)
in
[t, Ty.realTy] --> t
end))
val div_fr = polyVar(N.op_div, all([DK,NK,SK],
fn [Ty.DIFF k, Ty.DIM d, Ty.SHAPE dd] => let
val t = Ty.T_Field{diff = Ty.DiffVar(k, 0), dim = Ty.DimVar d, shape = Ty.ShapeVar dd}
in
[t, Ty.realTy] --> t
end))

(* exponentiation; we distinguish between integer and real exponents to allow x^2 to be compiled
* as x*x.
*)
val exp_ri = monoVar(N.op_exp, [Ty.realTy, Ty.T_Int] --> Ty.realTy)
val exp_rr = monoVar(N.op_exp, [Ty.realTy, Ty.realTy] --> Ty.realTy)

val convolve_vk = polyVar (N.op_convolve, all([DK, NK, SK],
fn [Ty.DIFF k, Ty.DIM d, Ty.SHAPE dd] => let
val k = Ty.DiffVar(k, 0)
val d = Ty.DimVar d
val dd = Ty.ShapeVar dd
in
[Ty.T_Image{dim=d, shape=dd}, Ty.T_Kernel k]
--> field(k, d, dd)
end))
val convolve_kv = polyVar (N.op_convolve, all([DK, NK, SK],
fn [Ty.DIFF k, Ty.DIM d, Ty.SHAPE dd] => let
val k = Ty.DiffVar(k, 0)
val d = Ty.DimVar d
val dd = Ty.ShapeVar dd
in
[Ty.T_Kernel k, Ty.T_Image{dim=d, shape=dd}]
--> field(k, d, dd)
end))

val lt_ii = monoVar(N.op_lt, [Ty.T_Int, Ty.T_Int] --> Ty.T_Bool)
val lt_rr = monoVar(N.op_lt, [Ty.realTy, Ty.realTy] --> Ty.T_Bool)
val lte_ii = monoVar(N.op_lte, [Ty.T_Int, Ty.T_Int] --> Ty.T_Bool)
val lte_rr = monoVar(N.op_lte, [Ty.realTy, Ty.realTy] --> Ty.T_Bool)
val gte_ii = monoVar(N.op_gte, [Ty.T_Int, Ty.T_Int] --> Ty.T_Bool)
val gte_rr = monoVar(N.op_gte, [Ty.realTy, Ty.realTy] --> Ty.T_Bool)
val gt_ii = monoVar(N.op_gt, [Ty.T_Int, Ty.T_Int] --> Ty.T_Bool)
val gt_rr = monoVar(N.op_gt, [Ty.realTy, Ty.realTy] --> Ty.T_Bool)

val equ_bb = monoVar(N.op_equ, [Ty.T_Bool, Ty.T_Bool] --> Ty.T_Bool)
val equ_ii = monoVar(N.op_equ, [Ty.T_Int, Ty.T_Int] --> Ty.T_Bool)
val equ_ss = monoVar(N.op_equ, [Ty.T_String, Ty.T_String] --> Ty.T_Bool)
val equ_rr = monoVar(N.op_equ, [Ty.realTy, Ty.realTy] --> Ty.T_Bool)
val neq_bb = monoVar(N.op_neq, [Ty.T_Bool, Ty.T_Bool] --> Ty.T_Bool)
val neq_ii = monoVar(N.op_neq, [Ty.T_Int, Ty.T_Int] --> Ty.T_Bool)
val neq_ss = monoVar(N.op_neq, [Ty.T_String, Ty.T_String] --> Ty.T_Bool)
val neq_rr = monoVar(N.op_neq, [Ty.realTy, Ty.realTy] --> Ty.T_Bool)

val neg_i = monoVar(N.op_neg, [Ty.T_Int] --> Ty.T_Int)
val neg_t = polyVar(N.op_neg, all([SK],
fn [Ty.SHAPE dd] => let
val t = Ty.T_Tensor(Ty.ShapeVar dd)
in
[t] --> t
end))
val neg_f = polyVar(N.op_neg, all([DK, NK, SK],
fn [Ty.DIFF k, Ty.DIM d, Ty.SHAPE dd] => let
val k = Ty.DiffVar(k, 0)
val d = Ty.DimVar d
val dd = Ty.ShapeVar dd
in
[field(k, d, dd)] --> field(k, d, dd)
end))

(* clamp is overloaded at scalars and vectors *)
val clamp_rrr = monoVar(N.fn_clamp, [Ty.realTy, Ty.realTy, Ty.realTy] --> Ty.realTy)
val clamp_vvv = polyVar (N.fn_clamp, allNK(fn tv => let
val t = tensor[Ty.DimVar tv]
in
[t, t, t] --> t
end))

val lerp3 = polyVar(N.fn_lerp, all([SK],
fn [Ty.SHAPE dd] => let
val t = Ty.T_Tensor(Ty.ShapeVar dd)
in
[t, t, Ty.realTy] --> t
end))
val lerp5 = polyVar(N.fn_lerp, all([SK],
fn [Ty.SHAPE dd] => let
val t = Ty.T_Tensor(Ty.ShapeVar dd)
in
[t, t, Ty.realTy, Ty.realTy, Ty.realTy] --> t
end))

(* Eigenvalues/vectors of a matrix; we only support this operation on 2x2 and 3x3 matrices, so
*)
local
fun evals d = monoVar (N.fn_evals, [matrix d] --> Ty.T_Sequence(Ty.realTy, d))
fun evecs d = monoVar (N.fn_evecs, [matrix d] --> Ty.T_Sequence(tensor[d], d))
in
val evals2x2 = evals(Ty.DimConst 2)
val evecs2x2 = evecs(Ty.DimConst 2)
val evals3x3 = evals(Ty.DimConst 3)
val evecs3x3 = evecs(Ty.DimConst 3)
end

(* C math functions *)
val mathFns : (MathFuns.name * Var.var) list = let
fun ty n = List.tabulate(MathFuns.arity n, fn _ => Ty.realTy) --> Ty.realTy
in
List.map (fn n => (n, monoVar(MathFuns.toAtom n, ty n))) MathFuns.allFuns
end

(* pseudo-operator for probing a field *)
val op_probe = polyVar (N.op_at, all([DK, NK, SK],
fn [Ty.DIFF k, Ty.DIM d, Ty.SHAPE dd] => let
val k = Ty.DiffVar(k, 0)
val d = Ty.DimVar d
val dd = Ty.ShapeVar dd
in
[field(k, d, dd), tensor[d]] --> Ty.T_Tensor dd
end))

(* differentiation of scalar fields *)
val op_D = polyVar (N.op_D, all([DK, NK],
fn [Ty.DIFF k, Ty.DIM d] => let
val k0 = Ty.DiffVar(k, 0)
val km1 = Ty.DiffVar(k, ~1)
val d = Ty.DimVar d
in
[field(k0, d, Ty.Shape[])]
--> field(km1, d, Ty.Shape[d])
end))
(* differetiation of higher-order tensor fields *)
val op_Dotimes = polyVar (N.op_Dotimes, all([DK, NK, SK, NK],
fn [Ty.DIFF k, Ty.DIM d, Ty.SHAPE dd, Ty.DIM d'] => let
val k0 = Ty.DiffVar(k, 0)
val km1 = Ty.DiffVar(k, ~1)
val d = Ty.DimVar d
val d' = Ty.DimVar d'
val dd = Ty.ShapeVar dd
in
[field(k0, d, Ty.ShapeExt(dd, d'))]
--> field(km1, d, Ty.ShapeExt(Ty.ShapeExt(dd, d'), d))
end))

val op_norm = polyVar (N.op_norm, all([SK],
fn [Ty.SHAPE dd] => [Ty.T_Tensor(Ty.ShapeVar dd)] --> Ty.realTy))

val op_not = monoVar (N.op_not, [Ty.T_Bool] --> Ty.T_Bool)

(* functions *)
local
val crossTy = let
val t = tensor[N3]
in
[t, t] --> t
end
in
val op_cross = monoVar (N.op_cross, crossTy)
end

(* the inner product operator (including dot product) is treated as a special case in the
* typechecker.  It is not included in the basis environment, but we define its type scheme
* here.  There is an implicit constraint on its type to have the following scheme:
*
*     ALL[sigma1, d1, sigma2] . tensor[sigma1, d1] * tensor[d1, sigma2] -> tensor[sigma1, sigma2]
*)
val op_inner = polyVar (N.op_dot, all([SK, SK, SK],
fn [Ty.SHAPE s1, Ty.SHAPE s2, Ty.SHAPE s3] =>
[Ty.T_Tensor(Ty.ShapeVar s1), Ty.T_Tensor(Ty.ShapeVar s2)]
--> Ty.T_Tensor(Ty.ShapeVar s3)))

val fn_inside = polyVar (N.fn_inside, all([DK, NK, SK],
fn [Ty.DIFF k, Ty.DIM d, Ty.SHAPE dd] => let
val k = Ty.DiffVar(k, 0)
val d = Ty.DimVar d
val dd = Ty.ShapeVar dd
in
[Ty.T_Tensor(Ty.Shape[d]), field(k, d, dd)]
--> Ty.T_Bool
end))

fn [Ty.DIM d, Ty.SHAPE dd] => let
val d = Ty.DimVar d
val dd = Ty.ShapeVar dd
in
[Ty.T_String] --> Ty.T_Image{dim=d, shape=dd}
end))

val fn_max = monoVar (N.fn_max, [Ty.realTy, Ty.realTy] --> Ty.realTy)
val fn_min = monoVar (N.fn_min, [Ty.realTy, Ty.realTy] --> Ty.realTy)

val fn_modulate = polyVar (N.fn_modulate, all([NK],
fn [Ty.DIM d] => let
val t = Ty.T_Tensor(Ty.Shape[Ty.DimVar d])
in
[t, t] --> t
end))

val fn_normalize = polyVar (N.fn_normalize, all([NK],
fn [Ty.DIM d] => let
val t = Ty.T_Tensor(Ty.Shape[Ty.DimVar d])
in
[t] --> t
end))

(* outer product *)
local
fun mkOuter [Ty.DIM d1, Ty.DIM d2] = let
val vt1 = Ty.T_Tensor(Ty.Shape[Ty.DimVar d1])
val vt2 = Ty.T_Tensor(Ty.Shape[Ty.DimVar d2])
val mt = Ty.T_Tensor(Ty.Shape[Ty.DimVar d1, Ty.DimVar d2])
in
[vt1, vt2] --> mt
end
in
val op_outer = polyVar (N.op_outer, all([NK, NK], mkOuter))
end

val fn_principleEvec = polyVar (N.fn_principleEvec, all([NK],
fn [Ty.DIM d] => let
val d = Ty.DimVar d
in
[matrix d] --> tensor[d]
end))

val fn_trace = polyVar (N.fn_trace, all([NK],
fn [Ty.DIM d] => [matrix(Ty.DimVar d)] --> Ty.realTy))

(* kernels *)
(* FIXME: we should really get the continuity info from the kernels themselves *)
val kn_bspln3 = monoVar (N.kn_bspln3, Ty.T_Kernel(Ty.DiffConst 2))
val kn_bspln5 = monoVar (N.kn_bspln5, Ty.T_Kernel(Ty.DiffConst 4))
val kn_ctmr = monoVar (N.kn_ctmr, Ty.T_Kernel(Ty.DiffConst 1))
val kn_tent = monoVar (N.kn_tent, Ty.T_Kernel(Ty.DiffConst 0))
(* kernels with false claims of differentiability, for pedagogy *)
val kn_c1tent = monoVar (N.kn_c1tent, Ty.T_Kernel(Ty.DiffConst 1))
val kn_c2ctmr = monoVar (N.kn_c2ctmr, Ty.T_Kernel(Ty.DiffConst 2))

(***** internal variables *****)

(* integer to real conversion *)
val i2r = monoVar (Atom.atom "\$i2r", [Ty.T_Int] --> Ty.realTy)

(* identity matrix *)
val identity = polyVar (Atom.atom "\$id", allNK (fn dv => [] --> matrix(Ty.DimVar dv)))

(* zero tensor *)
val zero = polyVar (Atom.atom "\$zero", all ([SK],
fn [Ty.SHAPE dd] => [] --> Ty.T_Tensor(Ty.ShapeVar dd)))

(* sequence subscript *)
val subscript = polyVar (Atom.atom "\$sub", all ([TK, NK],
fn [Ty.TYPE tv, Ty.DIM d] =>
[Ty.T_Sequence(Ty.T_Var tv, Ty.DimVar d), Ty.T_Int] --> Ty.T_Var tv))
end (* local *)
end