diff --git a/base/boot.jl b/base/boot.jl index 88d7ed85972fe..1775bc8312037 100644 --- a/base/boot.jl +++ b/base/boot.jl @@ -59,15 +59,19 @@ # name::Symbol #end -#type LambdaInfo -# ast::Expr -# sparams::Tuple -# tfunc -# name::Symbol -# inferred -# file::Symbol -# line::Int -# module::Module +#type Method +#end + +#type MethodInstance +#end + +#type CodeInfo +#end + +#type TypeMapLevel +#end + +#type TypeMapEntry #end #abstract Ref{T} @@ -119,7 +123,7 @@ export Tuple, Type, TypeConstructor, TypeName, TypeVar, Union, Void, SimpleVector, AbstractArray, DenseArray, # special objects - Function, LambdaInfo, Method, MethodTable, TypeMapEntry, TypeMapLevel, + Function, CodeInfo, Method, MethodTable, TypeMapEntry, TypeMapLevel, Module, Symbol, Task, Array, WeakRef, VecElement, # numeric types Number, Real, Integer, Bool, Ref, Ptr, diff --git a/base/deepcopy.jl b/base/deepcopy.jl index 77c7a9118c3d5..b95eb1d094390 100644 --- a/base/deepcopy.jl +++ b/base/deepcopy.jl @@ -7,7 +7,7 @@ deepcopy(x) = deepcopy_internal(x, ObjectIdDict())::typeof(x) -deepcopy_internal(x::Union{Symbol,LambdaInfo,GlobalRef,DataType,Union,Task}, +deepcopy_internal(x::Union{Symbol,Core.MethodInstance,Method,GlobalRef,DataType,Union,Task}, stackdict::ObjectIdDict) = x deepcopy_internal(x::Tuple, stackdict::ObjectIdDict) = ntuple(i->deepcopy_internal(x[i], stackdict), length(x)) diff --git a/base/deprecated.jl b/base/deprecated.jl index 19a4707c5d47e..a9aeb4a304773 100644 --- a/base/deprecated.jl +++ b/base/deprecated.jl @@ -273,8 +273,6 @@ const call = @eval function(f, args...; kw...) end export call -@deprecate_binding LambdaStaticData LambdaInfo - # Changed issym to issymmetric. #15192 @deprecate issym issymmetric diff --git a/base/essentials.jl b/base/essentials.jl index 75fa67251fa56..dbcf56f27ba75 100644 --- a/base/essentials.jl +++ b/base/essentials.jl @@ -1,5 +1,7 @@ # This file is a part of Julia. License is MIT: http://julialang.org/license +using Core: CodeInfo + typealias Callable Union{Function,DataType} const Bottom = Union{} diff --git a/base/inference.jl b/base/inference.jl index a0c80da586308..e69229d0e8f27 100644 --- a/base/inference.jl +++ b/base/inference.jl @@ -1,6 +1,6 @@ # This file is a part of Julia. License is MIT: http://julialang.org/license -import Core: _apply, svec, apply_type, Builtin, IntrinsicFunction +import Core: _apply, svec, apply_type, Builtin, IntrinsicFunction, MethodInstance #### parameters limiting potentially-infinite types #### const MAX_TYPEUNION_LEN = 3 @@ -48,7 +48,8 @@ type InferenceState currpc::LineNum # info on the state of inference and the linfo - linfo::LambdaInfo + linfo::MethodInstance # used here for the tuple (specTypes, env, Method) + src::CodeInfo nargs::Int stmt_types::Vector{Any} # return type @@ -74,36 +75,35 @@ type InferenceState # optimization optimize::Bool inlining::Bool - needtree::Bool + cached::Bool inferred::Bool - function InferenceState(linfo::LambdaInfo, optimize::Bool, inlining::Bool, needtree::Bool) - @assert isa(linfo.code,Array{Any,1}) - nslots = length(linfo.slotnames) - nl = label_counter(linfo.code)+1 + # src is assumed to be a newly-allocated CodeInfo, that can be modified in-place to contain intermediate results + function InferenceState(linfo::MethodInstance, src::CodeInfo, optimize::Bool, inlining::Bool, cached::Bool) + code = src.code::Array{Any,1} + nl = label_counter(code) + 1 + toplevel = !isdefined(linfo, :def) - if isempty(linfo.sparam_vals) && !isempty(linfo.sparam_syms) - sp = svec(Any[ TypeVar(sym, Any, true) for sym in linfo.sparam_syms ]...) + if !toplevel && isempty(linfo.sparam_vals) && !isempty(linfo.def.sparam_syms) + sp = svec(Any[ TypeVar(sym, Any, true) for sym in linfo.def.sparam_syms ]...) else sp = linfo.sparam_vals end - if !isa(linfo.slottypes, Array) - linfo.slottypes = Any[ Any for i = 1:nslots ] - end - if !isa(linfo.ssavaluetypes, Array) - linfo.ssavaluetypes = Any[ NF for i = 1:(linfo.ssavaluetypes::Int) ] - end + nslots = length(src.slotnames) + src.slottypes = Any[ Any for i = 1:nslots ] + src.ssavaluetypes = Any[ NF for i = 1:(src.ssavaluetypes::Int) ] - n = length(linfo.code) - s = Any[ () for i=1:n ] + n = length(code) + s = Any[ () for i = 1:n ] # initial types - s[1] = Any[ VarState(Bottom,true) for i=1:nslots ] + s[1] = Any[ VarState(Bottom, true) for i = 1:nslots ] atypes = linfo.specTypes - la = linfo.nargs + nargs = toplevel ? 0 : linfo.def.nargs + la = nargs if la > 0 - if linfo.isva + if linfo.def.isva if atypes === Tuple if la > 1 atypes = Tuple{Any[Any for i=1:la-1]..., Tuple.parameters[1]} @@ -143,8 +143,8 @@ type InferenceState @assert la == 0 # wrong number of arguments end - ssavalue_uses = find_ssavalue_uses(linfo.code) - ssavalue_init = copy(linfo.ssavaluetypes) + ssavalue_uses = find_ssavalue_uses(code) + ssavalue_init = copy(src.ssavaluetypes::Vector{Any}) # exception handlers cur_hand = () @@ -154,21 +154,44 @@ type InferenceState W = IntSet() push!(W, 1) #initial pc to visit - inmodule = isdefined(linfo, :def) ? linfo.def.module : current_module() # toplevel thunks are inferred in the current module + inmodule = toplevel ? current_module() : linfo.def.module # toplevel thunks are inferred in the current module frame = new( sp, nl, inmodule, 0, - linfo, la, s, Union{}, W, n, + linfo, src, nargs, s, Union{}, W, n, cur_hand, handler_at, n_handlers, ssavalue_uses, ssavalue_init, ObjectIdDict(), #Dict{InferenceState, Vector{LineNum}}(), Vector{Tuple{InferenceState, Vector{LineNum}}}(), - false, false, optimize, inlining, needtree, false) + false, false, optimize, inlining, cached, false) push!(active, frame) nactive[] += 1 return frame end end +# create copies of the CodeInfo definition, and any fields that type-inference might modify +# TODO: post-inference see if we can swap back to the original arrays +function get_source(li::MethodInstance) + src = ccall(:jl_copy_code_info, Ref{CodeInfo}, (Any,), li.def.source) + if isa(src.code, Array{UInt8,1}) + src.code = ccall(:jl_uncompress_ast, Any, (Any, Any), li.def, src.code) + else + src.code = copy_exprargs(src.code) + end + src.slotnames = copy(src.slotnames) + src.slotflags = copy(src.slotflags) + return src +end + +function get_staged(li::MethodInstance) + src = ccall(:jl_code_for_staged, Any, (Any,), li)::CodeInfo + if isa(src.code, Array{UInt8,1}) + src.code = ccall(:jl_uncompress_ast, Any, (Any, Any), li.def, src.code) + end + return src +end + + #### current global inference state #### const active = Vector{Any}() # set of all InferenceState objects being processed @@ -998,7 +1021,7 @@ function pure_eval_call(f::ANY, argtypes::ANY, atype, vtypes, sv) meth = meth[1]::SimpleVector method = meth[3]::Method # TODO: check pure on the inferred thunk - if method.isstaged || !method.lambda_template.pure + if method.isstaged || !method.source.pure return false end @@ -1130,7 +1153,7 @@ function abstract_eval(e::ANY, vtypes::VarTable, sv::InferenceState) if isa(e,QuoteNode) return abstract_eval_constant((e::QuoteNode).value) elseif isa(e,SSAValue) - return abstract_eval_ssavalue(e::SSAValue, sv.linfo) + return abstract_eval_ssavalue(e::SSAValue, sv.src) elseif isa(e,Slot) return vtypes[e.id].typ elseif isa(e,Symbol) @@ -1214,8 +1237,8 @@ function abstract_eval_global(M::Module, s::Symbol) return Any end -function abstract_eval_ssavalue(s::SSAValue, linfo::LambdaInfo) - typ = linfo.ssavaluetypes[s.id + 1] +function abstract_eval_ssavalue(s::SSAValue, src::CodeInfo) + typ = src.ssavaluetypes[s.id + 1] if typ === NF return Bottom end @@ -1435,66 +1458,70 @@ function find_ssavalue_uses(e::ANY, uses, line) end end -function newvar!(sv::InferenceState, typ) - id = length(sv.linfo.ssavaluetypes) - push!(sv.linfo.ssavaluetypes, typ) +function newvar!(sv::InferenceState, typ::ANY) + id = length(sv.src.ssavaluetypes) + push!(sv.src.ssavaluetypes, typ) return SSAValue(id) end -# create a specialized LambdaInfo from a method -function specialize_method(method::Method, types::ANY, sp::SimpleVector, cached) - if cached - return ccall(:jl_specializations_get_linfo, Ref{LambdaInfo}, (Any, Any, Any, Cint), method, types, sp, true) - else - return ccall(:jl_get_specialized, Ref{LambdaInfo}, (Any, Any, Any), method, types, sp) - end -end - -# create copies of any field that type-inference might modify -function unshare_linfo!(li::LambdaInfo) - orig = li.def.lambda_template - if isa(li.code, Array{UInt8,1}) - li.code = ccall(:jl_uncompress_ast, Any, (Any,Any), li, li.code) - elseif li.code === orig.code - li.code = copy_exprargs(orig.code) - end - if !li.def.isstaged - li.slotnames = copy(li.slotnames) - li.slotflags = copy(li.slotflags) - end - return li +# create a specialized MethodInstance from a method +function get_linfo(method::Method, types::ANY, sp::SimpleVector) + return ccall(:jl_specializations_get_linfo, Ref{MethodInstance}, (Any, Any, Any), method, types, sp) end inlining_enabled() = (JLOptions().can_inline == 1) coverage_enabled() = (JLOptions().code_coverage != 0) -#### entry points for inferring a LambdaInfo given a type signature #### +#### entry points for inferring a MethodInstance given a type signature #### function typeinf_edge(method::Method, atypes::ANY, sparams::SimpleVector, needtree::Bool, optimize::Bool, cached::Bool, caller) local code = nothing local frame = nothing - if isa(caller, LambdaInfo) + if isa(caller, MethodInstance) code = caller - elseif cached + elseif cached && !is(method.specializations, nothing) # check cached specializations # for an existing result stored there - if !is(method.specializations, nothing) - code = ccall(:jl_specializations_lookup, Any, (Any, Any), method, atypes) - if isa(code, Void) - # something completely new - elseif isa(code, LambdaInfo) - # something existing - if code.inferred && !(needtree && code.code === nothing) - return (code, code.rettype, true) - end + code = ccall(:jl_specializations_lookup, Any, (Any, Any), method, atypes) + if isa(code, Void) + # something completely new + elseif isa(code, MethodInstance) + # something existing + else + # sometimes just a return type is stored here. if a full AST + # is not needed, we can return it. + typeassert(code, Type) + if !needtree + return (nothing, code, true) + end + cached = false # don't need to save the new result + code = nothing + end + end + + if isa(code, MethodInstance) && isdefined(code, :inferred) + if code.jlcall_api == 2 + if needtree + tree = ccall(:jl_new_code_info_uninit, Ref{CodeInfo}, ()) + tree.code = Any[ Expr(:return, QuoteNode(code.inferred)) ] + tree.slotnames = Any[ compiler_temp_sym for i = 1:method.nargs ] + tree.slotflags = UInt8[ 0 for i = 1:method.nargs ] + tree.slottypes = nothing + tree.ssavaluetypes = 0 + tree.inferred = true + tree.pure = true + tree.inlineable = true else - # sometimes just a return type is stored here. if a full AST - # is not needed, we can return it. - typeassert(code, Type) - if !needtree - return (nothing, code, true) - end - code = nothing + tree = Const(code.inferred) end + return (tree, code.rettype, true) + elseif isa(code.inferred, CodeInfo) + if code.inferred.inferred + return (code.inferred, code.rettype, true) + end + elseif !needtree + return (nothing, code.rettype, true) + else + cached = false # don't need to save the new result end end @@ -1531,26 +1558,19 @@ function typeinf_edge(method::Method, atypes::ANY, sparams::SimpleVector, needtr end end - if isa(code, LambdaInfo) && code.code !== nothing + if isa(code, MethodInstance) # reuse the existing code object linfo = code - @assert typeseq(linfo.specTypes, atypes) && !code.inferred - elseif method.isstaged - if !isleaftype(atypes) + @assert typeseq(linfo.specTypes, atypes) + else + if method.isstaged && !isleaftype(atypes) # don't call staged functions on abstract types. # (see issues #8504, #10230) # we can't guarantee that their type behavior is monotonic. # XXX: this test is wrong if Types (such as DataType) are present return (nothing, Any, false) end - try - # user code might throw errors – ignore them - linfo = specialize_method(method, atypes, sparams, cached) - catch - return (nothing, Any, false) - end - else - linfo = specialize_method(method, atypes, sparams, cached) + linfo = get_linfo(method, atypes, sparams) end ccall(:jl_typeinf_begin, Void, ()) @@ -1569,13 +1589,21 @@ function typeinf_edge(method::Method, atypes::ANY, sparams::SimpleVector, needtr end # TODO: this assertion seems iffy assert(frame !== nothing) - if needtree - frame.needtree = true - end else + # TODO: verify again here that linfo wasn't just inferred # inference not started yet, make a new frame for a new lambda + if method.isstaged + try + # user code might throw errors – ignore them + src = get_staged(linfo) + catch + return (nothing, Any, false) + end + else + src = get_source(linfo) + end linfo.inInference = true - frame = InferenceState(unshare_linfo!(linfo::LambdaInfo), optimize, inlining_enabled(), needtree) + frame = InferenceState(linfo::MethodInstance, src, optimize, inlining_enabled(), cached) end frame = frame::InferenceState @@ -1597,7 +1625,7 @@ function typeinf_edge(method::Method, atypes::ANY, sparams::SimpleVector, needtr end typeinf_loop(frame) ccall(:jl_typeinf_end, Void, ()) - return (frame.linfo, widenconst(frame.bestguess), frame.inferred) + return (frame.src, widenconst(frame.bestguess), frame.inferred) end function typeinf_edge(method::Method, atypes::ANY, sparams::SimpleVector, caller) @@ -1613,44 +1641,23 @@ end function typeinf_uncached(method::Method, atypes::ANY, sparams::SimpleVector, optimize::Bool) return typeinf_edge(method, atypes, sparams, true, optimize, false, nothing) end -function typeinf_ext(linfo::LambdaInfo) +function typeinf_ext(linfo::MethodInstance) if isdefined(linfo, :def) # method lambda - infer this specialization via the method cache - if linfo.inferred && linfo.code !== nothing - return linfo - end - (code, _t, inferred) = typeinf_edge(linfo.def, linfo.specTypes, linfo.sparam_vals, true, true, true, linfo) - if inferred && code.inferred && linfo !== code - # This case occurs when the IR for a function has been deleted. - # `code` will be a newly-created LambdaInfo, and we need to copy its - # contents to the existing one to copy the info to the method cache. - linfo.inInference = true - linfo.code = code.code - linfo.slotnames = code.slotnames - linfo.slottypes = code.slottypes - linfo.slotflags = code.slotflags - linfo.ssavaluetypes = code.ssavaluetypes - linfo.pure = code.pure - linfo.inlineable = code.inlineable - linfo.propagate_inbounds = code.propagate_inbounds - ccall(:jl_set_lambda_rettype, Void, (Any, Any), linfo, code.rettype) - if code.jlcall_api == 2 - linfo.constval = code.constval - linfo.jlcall_api = 2 - end - linfo.inferred = true - linfo.inInference = false + if isdefined(linfo, :inferred) && isa(linfo.inferred, CodeInfo) + return linfo.inferred end + (code, typ, inferred) = typeinf_edge(linfo.def, linfo.specTypes, linfo.sparam_vals, true, true, true, linfo) return code else # toplevel lambda - infer directly linfo.inInference = true ccall(:jl_typeinf_begin, Void, ()) - frame = InferenceState(linfo, true, inlining_enabled(), true) + frame = InferenceState(linfo, linfo.inferred, true, inlining_enabled(), true) typeinf_loop(frame) ccall(:jl_typeinf_end, Void, ()) @assert frame.inferred # TODO: deal with this better - return linfo + return frame.src end end @@ -1729,7 +1736,7 @@ function typeinf_frame(frame) delete!(W, pc) frame.currpc = pc frame.cur_hand = frame.handler_at[pc] - stmt = frame.linfo.code[pc] + stmt = frame.src.code[pc] changes = abstract_interpret(stmt, s[pc]::Array{Any,1}, frame) if changes === () # this line threw an error and there is no need to continue @@ -1751,9 +1758,9 @@ function typeinf_frame(frame) changes = changes::StateUpdate id = (changes.var::SSAValue).id + 1 new = changes.vtype.typ - old = frame.linfo.ssavaluetypes[id] - if old===NF || !(new ⊑ old) - frame.linfo.ssavaluetypes[id] = tmerge(old, new) + old = frame.src.ssavaluetypes[id] + if old === NF || !(new ⊑ old) + frame.src.ssavaluetypes[id] = tmerge(old, new) for r in frame.ssavalue_uses[id] if !is(s[r], ()) # s[r] === () => unreached statement push!(W, r) @@ -1885,32 +1892,30 @@ end #### finalize and record the result of running type inference #### -function isinlineable(linfo::LambdaInfo) +function isinlineable(m::Method, src::CodeInfo) inlineable = false - if isdefined(linfo, :def) - cost = 1000 - if linfo.def.module === _topmod(linfo.def.module) - name = linfo.def.name - sig = linfo.def.sig - if ((name === :+ || name === :* || name === :min || name === :max) && - sig == Tuple{sig.parameters[1],Any,Any,Any,Vararg{Any}}) - inlineable = true - elseif (name === :next || name === :done || name === :unsafe_convert || - name === :cconvert) - cost ÷= 4 - end - end - if !inlineable - body = Expr(:block) - body.args = linfo.code - inlineable = inline_worthy(body, cost) - end + cost = 1000 + if m.module === _topmod(m.module) + name = m.name + sig = m.sig + if ((name === :+ || name === :* || name === :min || name === :max) && + sig == Tuple{sig.parameters[1],Any,Any,Any,Vararg{Any}}) + inlineable = true + elseif (name === :next || name === :done || name === :unsafe_convert || + name === :cconvert) + cost ÷= 4 + end + end + if !inlineable + body = Expr(:block) + body.args = src.code + inlineable = inline_worthy(body, cost) end return inlineable end # inference completed on `me` -# update the LambdaInfo and notify the edges +# update the MethodInstance and notify the edges function finish(me::InferenceState) for (i,_) in me.edges @assert (i::InferenceState).fixedpoint @@ -1922,13 +1927,13 @@ function finish(me::InferenceState) @assert me.inworkq # annotate fulltree with type information - gt = me.linfo.ssavaluetypes + gt = me.src.ssavaluetypes for i = 1:length(gt) if gt[i] === NF gt[i] = Union{} end end - type_annotate!(me.linfo, me.stmt_types, me, me.nargs) + type_annotate!(me) do_coverage = coverage_enabled() force_noinline = false @@ -1939,37 +1944,37 @@ function finish(me::InferenceState) # This (and `reindex_labels!`) needs to be run for `!me.optimize` # if we start to create `SSAValue` in type inference when not # optimizing and use unoptimized IR in codegen. - gotoifnot_elim_pass!(me.linfo, me) - inlining_pass!(me.linfo, me) - void_use_elim_pass!(me.linfo, me) - alloc_elim_pass!(me.linfo, me) - getfield_elim_pass!(me.linfo, me) + gotoifnot_elim_pass!(me) + inlining_pass!(me) + void_use_elim_pass!(me) + alloc_elim_pass!(me) + getfield_elim_pass!(me) # Clean up for `alloc_elim_pass!` and `getfield_elim_pass!` - void_use_elim_pass!(me.linfo, me) - meta_elim_pass!(me.linfo, me.linfo.code::Array{Any,1}, do_coverage) + void_use_elim_pass!(me) + meta_elim_pass!(me.src.code::Array{Any,1}, me.src.propagate_inbounds, do_coverage) # Pop metadata before label reindexing - force_noinline = popmeta!(me.linfo.code::Array{Any,1}, :noinline)[1] - reindex_labels!(me.linfo, me) + force_noinline = popmeta!(me.src.code::Array{Any,1}, :noinline)[1] + reindex_labels!(me) end - widen_all_consts!(me.linfo) - - ispure = me.linfo.pure - ccall(:jl_set_lambda_rettype, Void, (Any, Any), me.linfo, widenconst(me.bestguess)) + widen_all_consts!(me.src) + const_api = false + ispure = me.src.pure + inferred = me.src # Do not emit `jlcall_api == 2` if coverage is enabled so that we don't # need to add coverage support to the `jl_call_method_internal` fast path if !do_coverage && ((isa(me.bestguess,Const) && me.bestguess.val !== nothing) || (isType(me.bestguess) && !has_typevars(me.bestguess.parameters[1],true))) - if !ispure && length(me.linfo.code) < 10 + if !ispure && length(me.src.code) < 10 ispure = true - for stmt in me.linfo.code - if !statement_effect_free(stmt, me.linfo) + for stmt in me.src.code + if !statement_effect_free(stmt, me.src, me.mod) ispure = false; break end end if ispure - for fl in me.linfo.slotflags + for fl in me.src.slotflags if (fl & Slot_UsedUndef) != 0 ispure = false; break end @@ -1978,35 +1983,35 @@ function finish(me::InferenceState) end if ispure # use constant calling convention - setfield!(me.linfo, :constval, - isa(me.bestguess,Const) ? me.bestguess.val : me.bestguess.parameters[1]) - me.linfo.jlcall_api = 2 + inferred = isa(me.bestguess,Const) ? me.bestguess.val : me.bestguess.parameters[1] + const_api = true end - me.linfo.pure = ispure + me.src.pure = ispure end # determine and cache inlineability - if !me.linfo.inlineable && !force_noinline - me.linfo.inlineable = me.linfo.jlcall_api==2 || isinlineable(me.linfo) - end - - if !me.needtree - me.needtree = me.linfo.inlineable || ccall(:jl_is_cacheable_sig, Int32, (Any, Any, Any), - me.linfo.specTypes, me.linfo.def.sig, me.linfo.def) != 0 - end - - if me.needtree - if isdefined(me.linfo, :def) - # compress code for non-toplevel thunks - compressedtree = ccall(:jl_compress_ast, Any, (Any,Any), me.linfo, me.linfo.code) - me.linfo.code = compressedtree + if !me.src.inlineable && !force_noinline && isdefined(me.linfo, :def) + me.src.inlineable = const_api || isinlineable(me.linfo.def, me.src) + end + + if me.cached + # TODO: check that mutating the lambda info is OK first? + if !const_api + if isdefined(me.linfo, :def) + keeptree = me.src.inlineable || ccall(:jl_is_cacheable_sig, Int32, (Any, Any, Any), + me.linfo.specTypes, me.linfo.def.sig, me.linfo.def) != 0 + if !keeptree + inferred = nothing + else + # compress code for non-toplevel thunks + inferred.code = ccall(:jl_compress_ast, Any, (Any, Any), me.linfo.def, inferred.code) + end + end end - else - ccall(:jl_set_lambda_code_null, Void, (Any,), me.linfo) - me.linfo.inlineable = false + ccall(:jl_set_lambda_rettype, Void, (Any, Any, Any, Any), me.linfo, widenconst(me.bestguess), const_api, inferred) end - me.linfo.inferred = true + me.src.inferred = true me.linfo.inInference = false # finalize and record the linfo result me.inferred = true @@ -2049,11 +2054,11 @@ function eval_annotate(e::ANY, vtypes::ANY, sv::InferenceState, undefs, pass) if s.undef undefs[id] = true end - record_slot_type!(id, vt, sv.linfo.slottypes) + record_slot_type!(id, vt, sv.src.slottypes) return e end # second pass: add type annotations where needed - return vt === sv.linfo.slottypes[id] ? e : TypedSlot(id, vt) + return vt === sv.src.slottypes[id] ? e : TypedSlot(id, vt) end if !isa(e,Expr) @@ -2079,17 +2084,19 @@ function eval_annotate(e::ANY, vtypes::ANY, sv::InferenceState, undefs, pass) end # annotate types of all symbols in AST -function type_annotate!(linfo::LambdaInfo, states::Array{Any,1}, sv::ANY, nargs) +function type_annotate!(sv::InferenceState) + src = sv.src + states = sv.stmt_types + nargs = sv.nargs nslots = length(states[1]) - nargs = linfo.nargs for i = 1:nargs - linfo.slottypes[i] = widenconst(states[1][i].typ) + src.slottypes[i] = widenconst(states[1][i].typ) end for i = nargs+1:nslots - linfo.slottypes[i] = Bottom + src.slottypes[i] = Bottom end undefs = fill(false, nslots) - body = linfo.code::Array{Any,1} + body = src.code::Array{Any,1} nexpr = length(body) i = 1 optimize = sv.optimize::Bool @@ -2103,7 +2110,7 @@ function type_annotate!(linfo::LambdaInfo, states::Array{Any,1}, sv::ANY, nargs) # record type of assigned slot by looking at the next statement. # this is needed in case the slot is never used (which makes eval_annotate miss it). id = expr.args[1].id - record_slot_type!(id, widenconst(states[i+1][id].typ), linfo.slottypes) + record_slot_type!(id, widenconst(states[i+1][id].typ), src.slottypes) end elseif optimize if ((isa(expr, Expr) && is_meta_expr(expr::Expr)) || @@ -2130,7 +2137,7 @@ function type_annotate!(linfo::LambdaInfo, states::Array{Any,1}, sv::ANY, nargs) # mark used-undef variables for i = 1:nslots if undefs[i] - linfo.slotflags[i] |= Slot_UsedUndef + src.slotflags[i] |= Slot_UsedUndef end end nothing @@ -2144,16 +2151,16 @@ function _widen_all_consts(x::Expr) for i = 1:length(x.args) x.args[i] = _widen_all_consts(x.args[i]) end - x + return x end -function widen_all_consts!(linfo::LambdaInfo) - for i = 1:length(linfo.ssavaluetypes) - linfo.ssavaluetypes[i] = widenconst(linfo.ssavaluetypes[i]) +function widen_all_consts!(src::CodeInfo) + for i = 1:length(src.ssavaluetypes) + src.ssavaluetypes[i] = widenconst(src.ssavaluetypes[i]) end - for i = 1:length(linfo.code) - linfo.code[i] = _widen_all_consts(linfo.code[i]) + for i = 1:length(src.code) + src.code[i] = _widen_all_consts(src.code[i]) end - linfo + return src end # replace slots 1:na with argexprs, static params with spvals, and increment @@ -2211,17 +2218,16 @@ function occurs_more(e::ANY, pred, n) return 0 end -function exprtype(x::ANY, linfo::LambdaInfo) +function exprtype(x::ANY, src::CodeInfo, mod::Module) if isa(x, Expr) return (x::Expr).typ elseif isa(x, SlotNumber) - return linfo.slottypes[x.id] + return src.slottypes[x.id] elseif isa(x, TypedSlot) return (x::Slot).typ elseif isa(x, SSAValue) - return abstract_eval_ssavalue(x::SSAValue, linfo) + return abstract_eval_ssavalue(x::SSAValue, src) elseif isa(x, Symbol) - mod = isdefined(linfo, :def) ? linfo.def.module : current_module() return abstract_eval_global(mod, x::Symbol) elseif isa(x, QuoteNode) return abstract_eval_constant((x::QuoteNode).value) @@ -2256,23 +2262,23 @@ function is_pure_builtin(f::ANY) return false end -function statement_effect_free(e::ANY, linfo::LambdaInfo) +function statement_effect_free(e::ANY, src::CodeInfo, mod::Module) if isa(e, Expr) if e.head === :(=) - return !isa(e.args[1], GlobalRef) && effect_free(e.args[2], linfo, false) + return !isa(e.args[1], GlobalRef) && effect_free(e.args[2], src, mod, false) elseif e.head === :gotoifnot - return effect_free(e.args[1], linfo, false) + return effect_free(e.args[1], src, mod, false) end elseif isa(e, LabelNode) || isa(e, GotoNode) return true end - return effect_free(e, linfo, false) + return effect_free(e, src, mod, false) end # detect some important side-effect-free calls (allow_volatile=true) # and some affect-free calls (allow_volatile=false) -- affect_free means the call # cannot be affected by previous calls, except assignment nodes -function effect_free(e::ANY, linfo::LambdaInfo, allow_volatile::Bool) +function effect_free(e::ANY, src::CodeInfo, mod::Module, allow_volatile::Bool) if isa(e, GlobalRef) return (isdefined(e.mod, e.name) && (allow_volatile || isconst(e.mod, e.name))) elseif isa(e, Symbol) @@ -2285,16 +2291,16 @@ function effect_free(e::ANY, linfo::LambdaInfo, allow_volatile::Bool) end ea = e.args if head === :call && !isa(e.args[1], SSAValue) && !isa(e.args[1], Slot) - if is_known_call_p(e, is_pure_builtin, linfo) + if is_known_call_p(e, is_pure_builtin, src, mod) if !allow_volatile - if is_known_call(e, arrayref, linfo) || is_known_call(e, arraylen, linfo) + if is_known_call(e, arrayref, src, mod) || is_known_call(e, arraylen, src, mod) return false - elseif is_known_call(e, getfield, linfo) - et = exprtype(e,linfo) + elseif is_known_call(e, getfield, src, mod) + et = exprtype(e, src, mod) if !isa(et,Const) && !(isType(et) && isleaftype(et)) # first argument must be immutable to ensure e is affect_free a = ea[2] - typ = widenconst(exprtype(a, linfo)) + typ = widenconst(exprtype(a, src, mod)) if !isa(typ, DataType) || typ.mutable || typ.abstract return false end @@ -2308,7 +2314,7 @@ function effect_free(e::ANY, linfo::LambdaInfo, allow_volatile::Bool) elseif head === :new if !allow_volatile a = ea[1] - typ = widenconst(exprtype(a, linfo)) + typ = widenconst(exprtype(a, src, mod)) if !isType(typ) || !isa((typ::Type).parameters[1],DataType) || ((typ::Type).parameters[1]::DataType).mutable return false end @@ -2322,7 +2328,7 @@ function effect_free(e::ANY, linfo::LambdaInfo, allow_volatile::Bool) return false end for a in ea - if !effect_free(a, linfo, allow_volatile) + if !effect_free(a, src, mod, allow_volatile) return false end end @@ -2335,12 +2341,12 @@ end #### post-inference optimizations #### -function inline_as_constant(val::ANY, argexprs, linfo::LambdaInfo) +function inline_as_constant(val::ANY, argexprs, sv::InferenceState) # check if any arguments aren't effect_free and need to be kept around stmts = Any[] for i = 1:length(argexprs) arg = argexprs[i] - if !effect_free(arg, linfo, false) + if !effect_free(arg, sv.src, sv.mod, false) push!(stmts, arg) end end @@ -2362,7 +2368,7 @@ end # static parameters are ok if all the static parameter values are leaf types, # meaning they are fully known. # `ft` is the type of the function. `f` is the exact function if known, or else `nothing`. -function inlineable(f::ANY, ft::ANY, e::Expr, atypes::Vector{Any}, sv::InferenceState, enclosing::LambdaInfo) +function inlineable(f::ANY, ft::ANY, e::Expr, atypes::Vector{Any}, sv::InferenceState) argexprs = e.args if (is(f, typeassert) || ft ⊑ typeof(typeassert)) && length(atypes)==3 @@ -2387,7 +2393,7 @@ function inlineable(f::ANY, ft::ANY, e::Expr, atypes::Vector{Any}, sv::Inference istopfunction(topmod, f, :typejoin) || istopfunction(topmod, f, :promote_type)) # XXX: compute effect_free for the actual arguments - if length(argexprs) < 2 || effect_free(argexprs[2], enclosing, true) + if length(argexprs) < 2 || effect_free(argexprs[2], sv.src, sv.mod, true) return (e.typ.parameters[1],()) else return (e.typ.parameters[1], Any[argexprs[2]]) @@ -2395,11 +2401,11 @@ function inlineable(f::ANY, ft::ANY, e::Expr, atypes::Vector{Any}, sv::Inference end end if istopfunction(topmod, f, :isbits) && length(atypes)==2 && isType(atypes[2]) && - effect_free(argexprs[2], enclosing, true) && isleaftype(atypes[2].parameters[1]) + effect_free(argexprs[2], sv.src, sv.mod, true) && isleaftype(atypes[2].parameters[1]) return (isbits(atypes[2].parameters[1]),()) end if is(f, Core.kwfunc) && length(argexprs) == 2 && isa(e.typ, Const) - if effect_free(argexprs[2], enclosing, true) + if effect_free(argexprs[2], sv.src, sv.mod, true) return (e.typ.val, ()) else return (e.typ.val, Any[argexprs[2]]) @@ -2421,7 +2427,7 @@ function inlineable(f::ANY, ft::ANY, e::Expr, atypes::Vector{Any}, sv::Inference local spec_hit = nothing local spec_miss = nothing local error_label = nothing - local linfo_var = add_slot!(enclosing, LambdaInfo, false) + local linfo_var = add_slot!(sv.src, MethodInstance, false) local ex = copy(e) local stmts = [] local arg_hoisted = false @@ -2429,7 +2435,7 @@ function inlineable(f::ANY, ft::ANY, e::Expr, atypes::Vector{Any}, sv::Inference local ti = atypes[i] if arg_hoisted || isa(ti, Union) aei = ex.args[i] - if !effect_free(aei, enclosing, false) + if !effect_free(aei, sv.src, sv.mod, false) arg_hoisted = true newvar = newvar!(sv, ti) insert!(stmts, 1, Expr(:(=), newvar, aei)) @@ -2488,7 +2494,7 @@ function inlineable(f::ANY, ft::ANY, e::Expr, atypes::Vector{Any}, sv::Inference end local ret_var, merge if spec_miss !== nothing - ret_var = add_slot!(enclosing, ex.typ, false) + ret_var = add_slot!(sv.src, ex.typ, false) merge = genlabel(sv) push!(stmts, spec_miss) push!(stmts, Expr(:(=), ret_var, ex)) @@ -2533,15 +2539,15 @@ function inlineable(f::ANY, ft::ANY, e::Expr, atypes::Vector{Any}, sv::Inference methsp = meth[2] method = meth[3]::Method # check whether call can be inlined to just a quoted constant value - if isa(f, widenconst(ft)) && !method.isstaged && (method.lambda_template.pure || f === return_type) && + if isa(f, widenconst(ft)) && !method.isstaged && (method.source.pure || f === return_type) && (isType(e.typ) || isa(e.typ,Const)) if isType(e.typ) if !has_typevars(e.typ.parameters[1]) - return inline_as_constant(e.typ.parameters[1], argexprs, enclosing) + return inline_as_constant(e.typ.parameters[1], argexprs, sv) end else assert(isa(e.typ,Const)) - return inline_as_constant(e.typ.val, argexprs, enclosing) + return inline_as_constant(e.typ.val, argexprs, sv) end end @@ -2550,10 +2556,10 @@ function inlineable(f::ANY, ft::ANY, e::Expr, atypes::Vector{Any}, sv::Inference return invoke_NF() end - na = method.lambda_template.nargs + na = method.nargs # check for vararg function isva = false - if na > 0 && method.lambda_template.isva + if na > 0 && method.isva @assert length(argexprs) >= na-1 # construct tuple-forming expression for argument tail vararg = mk_tuplecall(argexprs[na:end], sv) @@ -2574,21 +2580,17 @@ function inlineable(f::ANY, ft::ANY, e::Expr, atypes::Vector{Any}, sv::Inference isa(si, TypeVar) && return NF end - (linfo, ty, inferred) = typeinf(method, metharg, methsp, false) - if linfo === nothing || !inferred + (src, rettype, inferred) = typeinf(method, metharg, methsp, false) + if src === nothing || !inferred return invoke_NF() end - if linfo !== nothing && linfo.jlcall_api == 2 + if isa(src, Const) # in this case function can be inlined to a constant - return inline_as_constant(linfo.constval, argexprs, enclosing) - elseif linfo !== nothing && !linfo.inlineable - return invoke_NF() - elseif linfo === nothing || linfo.code === nothing - (linfo, ty, inferred) = typeinf(method, metharg, methsp, true) - end - if linfo === nothing || !inferred || !linfo.inlineable || (ast = linfo.code) === nothing + return inline_as_constant(src.val, argexprs, sv) + elseif !isa(src, CodeInfo) || !src.inlineable return invoke_NF() end + ast = src.code spvals = Any[] for i = 1:length(methsp) @@ -2605,7 +2607,7 @@ function inlineable(f::ANY, ft::ANY, e::Expr, atypes::Vector{Any}, sv::Inference nm = length(methargs) if !isa(ast, Array{Any,1}) - ast = ccall(:jl_uncompress_ast, Any, (Any,Any), linfo, ast) + ast = ccall(:jl_uncompress_ast, Any, (Any, Any), method, ast) else ast = copy_exprargs(ast) end @@ -2613,7 +2615,7 @@ function inlineable(f::ANY, ft::ANY, e::Expr, atypes::Vector{Any}, sv::Inference body = Expr(:block) body.args = ast - propagate_inbounds = linfo.propagate_inbounds + propagate_inbounds = src.propagate_inbounds # see if each argument occurs only once in the body expression stmts = Any[] @@ -2623,7 +2625,7 @@ function inlineable(f::ANY, ft::ANY, e::Expr, atypes::Vector{Any}, sv::Inference for i=na:-1:1 # stmts_free needs to be calculated in reverse-argument order #args_i = args[i] aei = argexprs[i] - aeitype = argtype = widenconst(exprtype(aei, enclosing)) + aeitype = argtype = widenconst(exprtype(aei, sv.src, sv.mod)) # ok for argument to occur more than once if the actual argument # is a symbol or constant, or is not affected by previous statements @@ -2635,7 +2637,7 @@ function inlineable(f::ANY, ft::ANY, e::Expr, atypes::Vector{Any}, sv::Inference if occ < 6 occ += occurs_more(b, x->(isa(x,Slot)&&x.id==i), 6) end - if occ > 0 && affect_free && !effect_free(b, linfo, true) + if occ > 0 && affect_free && !effect_free(b, src, method.module, true) #TODO: we might be able to short-circuit this test better by memoizing effect_free(b) in the for loop over i affect_free = false end @@ -2643,9 +2645,9 @@ function inlineable(f::ANY, ft::ANY, e::Expr, atypes::Vector{Any}, sv::Inference break end end - free = effect_free(aei, enclosing, true) + free = effect_free(aei, sv.src, sv.mod, true) if ((occ==0 && is(aeitype,Bottom)) || (occ > 1 && !inline_worthy(aei, occ*2000)) || - (affect_free && !free) || (!affect_free && !effect_free(aei, enclosing, false))) + (affect_free && !free) || (!affect_free && !effect_free(aei, sv.src, sv.mod, false))) if occ != 0 vnew = newvar!(sv, aeitype) argexprs[i] = vnew @@ -2659,20 +2661,20 @@ function inlineable(f::ANY, ft::ANY, e::Expr, atypes::Vector{Any}, sv::Inference end # re-number the SSAValues and copy their type-info to the new ast - ssavalue_types = linfo.ssavaluetypes + ssavalue_types = src.ssavaluetypes if !isempty(ssavalue_types) - incr = length(sv.linfo.ssavaluetypes) + incr = length(sv.src.ssavaluetypes) if incr != 0 body = ssavalue_increment(body, incr) end - append!(sv.linfo.ssavaluetypes, ssavalue_types) + append!(sv.src.ssavaluetypes, ssavalue_types) end # ok, substitute argument expressions for argument names in the body - body = substitute!(body, na, argexprs, spvals, length(enclosing.slotnames)-na) - append!(enclosing.slotnames, linfo.slotnames[na+1:end]) - append!(enclosing.slottypes, linfo.slottypes[na+1:end]) - append!(enclosing.slotflags, linfo.slotflags[na+1:end]) + body = substitute!(body, na, argexprs, spvals, length(sv.src.slotnames) - na) + append!(sv.src.slotnames, src.slotnames[na+1:end]) + append!(sv.src.slottypes, src.slottypes[na+1:end]) + append!(sv.src.slotflags, src.slotflags[na+1:end]) # make labels / goto statements unique # relocate inlining information @@ -2703,7 +2705,6 @@ function inlineable(f::ANY, ft::ANY, e::Expr, atypes::Vector{Any}, sv::Inference # convert return statements into a series of goto's retstmt = genlabel(sv) - rettype = linfo.rettype local retval multiret = false lastexpr = pop!(body.args) @@ -2723,7 +2724,7 @@ function inlineable(f::ANY, ft::ANY, e::Expr, atypes::Vector{Any}, sv::Inference if a.head === :return if !multiret # create slot first time - retval = add_slot!(enclosing, rettype, false) + retval = add_slot!(sv.src, rettype, false) end multiret = true unshift!(a.args, retval) @@ -2758,35 +2759,33 @@ function inlineable(f::ANY, ft::ANY, e::Expr, atypes::Vector{Any}, sv::Inference return false end if do_coverage - line = if !isempty(stmts) && isa(stmts[1], LineNumberNode) - (shift!(stmts)::LineNumberNode).line - else - linfo.def.line + line = method.line + if !isempty(stmts) && isa(stmts[1], LineNumberNode) + line = (shift!(stmts)::LineNumberNode).line end # Check if we are switching module, which is necessary to catch user # code inlined into `Base` with `--code-coverage=user`. # Assume we are inlining directly into `enclosing` instead of another # function inlined in it - mod = linfo.def.module + mod = method.module if mod === sv.mod - unshift!(stmts, Expr(:meta, :push_loc, linfo.def.file, - linfo.def.name, line)) + unshift!(stmts, Expr(:meta, :push_loc, method.file, + method.name, line)) else - unshift!(stmts, Expr(:meta, :push_loc, linfo.def.file, - linfo.def.name, line, mod)) + unshift!(stmts, Expr(:meta, :push_loc, method.file, + method.name, line, mod)) end push!(stmts, Expr(:meta, :pop_loc)) elseif !isempty(stmts) if all(inlining_ignore, stmts) empty!(stmts) else - line = if isa(stmts[1], LineNumberNode) - (shift!(stmts)::LineNumberNode).line - else - linfo.def.line + line::Int = method.line + if isa(stmts[1], LineNumberNode) + line = (shift!(stmts)::LineNumberNode).line end - unshift!(stmts, Expr(:meta, :push_loc, linfo.def.file, - linfo.def.name, line)) + unshift!(stmts, Expr(:meta, :push_loc, method.file, + method.name, line)) if isa(stmts[end], LineNumberNode) stmts[end] = Expr(:meta, :pop_loc) else @@ -2872,22 +2871,22 @@ const top_tuple = GlobalRef(Core, :tuple) function mk_getfield(texpr, i, T) e = Expr(:call, top_getfield, texpr, i) e.typ = T - e + return e end function mk_tuplecall(args, sv::InferenceState) e = Expr(:call, top_tuple, args...) - e.typ = tuple_tfunc(Tuple{Any[widenconst(exprtype(x, sv.linfo)) for x in args]...}) + e.typ = tuple_tfunc(Tuple{Any[widenconst(exprtype(x, sv.src, sv.mod)) for x in args]...}) return e end -function inlining_pass!(linfo::LambdaInfo, sv::InferenceState) - eargs = linfo.code +function inlining_pass!(sv::InferenceState) + eargs = sv.src.code i = 1 while i <= length(eargs) ei = eargs[i] if isa(ei, Expr) - res = inlining_pass(ei, sv, linfo) + res = inlining_pass(ei, sv) eargs[i] = res[1] if isa(res[2], Array) sts = res[2]::Array{Any,1} @@ -2903,7 +2902,7 @@ end const corenumtype = Union{Int32, Int64, Float32, Float64} -function inlining_pass(e::Expr, sv, linfo) +function inlining_pass(e::Expr, sv::InferenceState) if e.head === :method # avoid running the inlining pass on function definitions return (e,()) @@ -2917,11 +2916,11 @@ function inlining_pass(e::Expr, sv, linfo) # don't inline first (global) arguments of ccall, as this needs to be evaluated # by the interpreter and inlining might put in something it can't handle, # like another ccall (or try to move the variables out into the function) - if is_known_call(e, Core.Intrinsics.ccall, linfo) + if is_known_call(e, Core.Intrinsics.ccall, sv.src, sv.mod) # 4 is rewritten to 2 below to handle the callee. i0 = 4 isccall = true - elseif is_known_call(e, Core.Intrinsics.llvmcall, linfo) + elseif is_known_call(e, Core.Intrinsics.llvmcall, sv.src, sv.mod) i0 = 5 isccall = false else @@ -2945,10 +2944,10 @@ function inlining_pass(e::Expr, sv, linfo) else argloc = eargs end - res = inlining_pass(ei, sv, linfo) + res = inlining_pass(ei, sv) res1 = res[1] - if has_stmts && !effect_free(res1, linfo, false) - restype = exprtype(res1, linfo) + if has_stmts && !effect_free(res1, sv.src, sv.mod, false) + restype = exprtype(res1, sv.src, sv.mod) vnew = newvar!(sv, restype) argloc[i] = vnew unshift!(stmts, Expr(:(=), vnew, res1)) @@ -2961,7 +2960,7 @@ function inlining_pass(e::Expr, sv, linfo) prepend!(stmts,res2) if !has_stmts for stmt in res2 - if !effect_free(stmt, linfo, true) + if !effect_free(stmt, sv.src, sv.mod, true) has_stmts = true end end @@ -2982,7 +2981,7 @@ function inlining_pass(e::Expr, sv, linfo) end end - ft = exprtype(arg1, linfo) + ft = exprtype(arg1, sv.src, sv.mod) if isa(ft, Const) f = ft.val else @@ -3000,7 +2999,7 @@ function inlining_pass(e::Expr, sv, linfo) a2 = e.args[3] if isa(a2, Symbol) || isa(a2, Slot) || isa(a2, SSAValue) - ta2 = exprtype(a2, linfo) + ta2 = exprtype(a2, sv.src, sv.mod) if isa(ta2, Const) a2 = ta2.val end @@ -3012,13 +3011,13 @@ function inlining_pass(e::Expr, sv, linfo) a1 = e.args[2] basenumtype = Union{corenumtype, Main.Base.Complex64, Main.Base.Complex128, Main.Base.Rational} if isa(a1, basenumtype) || ((isa(a1, Symbol) || isa(a1, Slot) || isa(a1, SSAValue)) && - exprtype(a1, linfo) ⊑ basenumtype) + exprtype(a1, sv.src, sv.mod) ⊑ basenumtype) if square e.args = Any[GlobalRef(Main.Base,:*), a1, a1] - res = inlining_pass(e, sv, linfo) + res = inlining_pass(e, sv) else e.args = Any[GlobalRef(Main.Base,:*), Expr(:call, GlobalRef(Main.Base,:*), a1, a1), a1] - res = inlining_pass(e, sv, linfo) + res = inlining_pass(e, sv) end if isa(res, Tuple) if isa(res[2], Array) && !isempty(res[2]) @@ -3036,11 +3035,11 @@ function inlining_pass(e::Expr, sv, linfo) ata = Vector{Any}(length(e.args)) ata[1] = ft for i = 2:length(e.args) - a = exprtype(e.args[i], linfo) + a = exprtype(e.args[i], sv.src, sv.mod) (a === Bottom || isvarargtype(a)) && return (e, stmts) ata[i] = a end - res = inlineable(f, ft, e, ata, sv, linfo) + res = inlineable(f, ft, e, ata, sv) if isa(res,Tuple) if isa(res[2],Array) && !isempty(res[2]) append!(stmts,res[2]) @@ -3051,7 +3050,7 @@ function inlining_pass(e::Expr, sv, linfo) if !is(res,NF) # iteratively inline apply(f, tuple(...), tuple(...), ...) in order # to simplify long vararg lists as in multi-arg + - if isa(res,Expr) && is_known_call(res, _apply, linfo) + if isa(res,Expr) && is_known_call(res, _apply, sv.src, sv.mod) e = res::Expr f = _apply; ft = abstract_eval_constant(f) else @@ -3064,14 +3063,14 @@ function inlining_pass(e::Expr, sv, linfo) newargs = Vector{Any}(na-2) for i = 3:na aarg = e.args[i] - t = widenconst(exprtype(aarg, linfo)) - if isa(aarg,Expr) && (is_known_call(aarg, tuple, linfo) || is_known_call(aarg, svec, linfo)) + t = widenconst(exprtype(aarg, sv.src, sv.mod)) + if isa(aarg,Expr) && (is_known_call(aarg, tuple, sv.src, sv.mod) || is_known_call(aarg, svec, sv.src, sv.mod)) # apply(f,tuple(x,y,...)) => f(x,y,...) newargs[i-2] = aarg.args[2:end] elseif isa(aarg, Tuple) newargs[i-2] = Any[ QuoteNode(x) for x in aarg ] elseif isa(t, DataType) && t.name === Tuple.name && !isvatuple(t) && - effect_free(aarg, linfo, true) && length(t.parameters) <= MAX_TUPLE_SPLAT + effect_free(aarg, sv.src, sv.mod, true) && length(t.parameters) <= MAX_TUPLE_SPLAT # apply(f,t::(x,y)) => f(t[1],t[2]) tp = t.parameters newargs[i-2] = Any[ mk_getfield(aarg,j,tp[j]) for j=1:length(tp) ] @@ -3083,7 +3082,7 @@ function inlining_pass(e::Expr, sv, linfo) e.args = [Any[e.args[2]]; newargs...] # now try to inline the simplified call - ft = exprtype(e.args[1], linfo) + ft = exprtype(e.args[1], sv.src, sv.mod) if isa(ft,Const) f = ft.val else @@ -3101,45 +3100,45 @@ end const compiler_temp_sym = Symbol("#temp#") -function add_slot!(linfo::LambdaInfo, typ, is_sa, name=compiler_temp_sym) - id = length(linfo.slotnames)+1 - push!(linfo.slotnames, name) - push!(linfo.slottypes, typ) - push!(linfo.slotflags, Slot_Assigned + is_sa * Slot_AssignedOnce) +function add_slot!(src::CodeInfo, typ::ANY, is_sa::Bool, name::Symbol=compiler_temp_sym) + id = length(src.slotnames) + 1 + push!(src.slotnames, name) + push!(src.slottypes, typ) + push!(src.slotflags, Slot_Assigned + is_sa * Slot_AssignedOnce) return SlotNumber(id) end -function is_known_call(e::Expr, func::ANY, linfo::LambdaInfo) +function is_known_call(e::Expr, func::ANY, src::CodeInfo, mod::Module) if e.head !== :call return false end - f = exprtype(e.args[1], linfo) + f = exprtype(e.args[1], src, mod) return isa(f, Const) && f.val === func end -function is_known_call_p(e::Expr, pred::ANY, linfo::LambdaInfo) +function is_known_call_p(e::Expr, pred::ANY, src::CodeInfo, mod::Module) if e.head !== :call return false end - f = exprtype(e.args[1], linfo) + f = exprtype(e.args[1], src, mod) return isa(f, Const) && pred(f.val) end -function delete_var!(linfo, id, T) +function delete_var!(src::CodeInfo, id, T) filter!(x->!(isa(x,Expr) && (x.head === :(=) || x.head === :const) && isa(x.args[1],T) && x.args[1].id == id), - linfo.code) - linfo + src.code) + return src end -function slot_replace!(linfo::LambdaInfo, id, rhs, T) - for i = 1:length(linfo.code) - linfo.code[i] = _slot_replace!(linfo.code[i], id, rhs, T) +function slot_replace!(src::CodeInfo, id, rhs, T) + for i = 1:length(src.code) + src.code[i] = _slot_replace!(src.code[i], id, rhs, T) end - linfo + return src end -function _slot_replace!(e, id, rhs, T) +function _slot_replace!(e, id, rhs, T::ANY) if isa(e,T) && e.id == id return rhs end @@ -3154,17 +3153,18 @@ end occurs_undef(var::Int, expr, flags) = flags[var]&Slot_UsedUndef != 0 && occurs_more(expr, e->(isa(e,Slot) && e.id==var), 0)>0 -is_argument(linfo, v) = isa(v,Slot) && v.id <= linfo.nargs +is_argument(nargs::Int, v::Slot) = v.id <= nargs # remove all single-assigned vars v in "v = x" where x is an argument. # "sa" is the result of find_sa_vars # T: Slot or SSAValue -function remove_redundant_temp_vars(linfo, sa, T) - flags = linfo.slotflags - ssavalue_types = linfo.ssavaluetypes - bexpr = Expr(:block); bexpr.args = linfo.code - for (v,init) in sa - if (isa(init, Slot) && is_argument(linfo, init::Slot)) +function remove_redundant_temp_vars(src::CodeInfo, nargs::Int, sa, T) + flags = src.slotflags + ssavalue_types = src.ssavaluetypes + bexpr = Expr(:block) + bexpr.args = src.code + for (v, init) in sa + if isa(init, Slot) && is_argument(nargs, init::Slot) # this transformation is not valid for vars used before def. # we need to preserve the point of assignment to know where to # throw errors (issue #4645). @@ -3174,24 +3174,23 @@ function remove_redundant_temp_vars(linfo, sa, T) # (from inlining improved type inference information) # and this transformation would worsen the type information # everywhere later in the function - ityp = isa(init,TypedSlot) ? init.typ : linfo.slottypes[init.id] - if ityp ⊑ (T===SSAValue ? ssavalue_types[v+1] : linfo.slottypes[v]) - delete_var!(linfo, v, T) - slot_replace!(linfo, v, init, T) + ityp = isa(init,TypedSlot) ? init.typ : src.slottypes[init.id] + if ityp ⊑ (T===SSAValue ? ssavalue_types[v+1] : src.slottypes[v]) + delete_var!(src, v, T) + slot_replace!(src, v, init, T) end end end end - linfo + return src end # compute set of slots assigned once -function find_sa_vars(linfo::LambdaInfo) - body = linfo.code +function find_sa_vars(src::CodeInfo, nargs::Int) + body = src.code av = ObjectIdDict() av2 = ObjectIdDict() gss = ObjectIdDict() - nargs = linfo.nargs for i = 1:length(body) e = body[i] if isa(e,Expr) && is(e.head,:(=)) @@ -3210,15 +3209,15 @@ function find_sa_vars(linfo::LambdaInfo) end end end - filter!((id,_)->!haskey(av2,id), av) - av, gss + filter!((id, _) -> !haskey(av2, id), av) + return (av, gss) end symequal(x::SSAValue, y::SSAValue) = is(x.id,y.id) symequal(x::Slot , y::Slot) = is(x.id,y.id) symequal(x::ANY , y::ANY) = is(x,y) -function occurs_outside_getfield(linfo::LambdaInfo, e::ANY, sym::ANY, +function occurs_outside_getfield(e::ANY, sym::ANY, sv::InferenceState, field_count, field_names) if e===sym || (isa(e,Slot) && isa(sym,Slot) && (e::Slot).id == (sym::Slot).id) return true @@ -3227,7 +3226,7 @@ function occurs_outside_getfield(linfo::LambdaInfo, e::ANY, sym::ANY, e = e::Expr head = e.head is_meta_expr_head(head) && return false - if is_known_call(e, getfield, linfo) && symequal(e.args[2],sym) + if is_known_call(e, getfield, sv.src, sv.mod) && symequal(e.args[2],sym) idx = e.args[3] if isa(idx,QuoteNode) && (idx.value in field_names) return false @@ -3238,11 +3237,11 @@ function occurs_outside_getfield(linfo::LambdaInfo, e::ANY, sym::ANY, return true end if head === :(=) - return occurs_outside_getfield(linfo, e.args[2], sym, sv, + return occurs_outside_getfield(e.args[2], sym, sv, field_count, field_names) else if (head === :block && isa(sym, Slot) && - linfo.slotflags[(sym::Slot).id] & Slot_UsedUndef == 0) + sv.src.slotflags[(sym::Slot).id] & Slot_UsedUndef == 0) ignore_void = true else ignore_void = false @@ -3251,7 +3250,7 @@ function occurs_outside_getfield(linfo::LambdaInfo, e::ANY, sym::ANY, if ignore_void && isa(a, Slot) && (a::Slot).id == (sym::Slot).id continue end - if occurs_outside_getfield(linfo, a, sym, sv, field_count, field_names) + if occurs_outside_getfield(a, sym, sv, field_count, field_names) return true end end @@ -3260,7 +3259,7 @@ function occurs_outside_getfield(linfo::LambdaInfo, e::ANY, sym::ANY, return false end -function void_use_elim_pass!(linfo::LambdaInfo, sv) +function void_use_elim_pass!(sv::InferenceState) # Remove top level SSAValue and slots that is `!usedUndef`. # Also remove some `nothing` while we are at it.... not_void_use = function (ex::ANY) @@ -3268,7 +3267,7 @@ function void_use_elim_pass!(linfo::LambdaInfo, sv) # Explicitly listed here for clarity return false elseif isa(ex, Slot) - return linfo.slotflags[(ex::Slot).id] & Slot_UsedUndef != 0 + return sv.src.slotflags[(ex::Slot).id] & Slot_UsedUndef != 0 elseif isa(ex, GlobalRef) ex = ex::GlobalRef return !isdefined(ex.mod, ex.name) @@ -3279,11 +3278,11 @@ function void_use_elim_pass!(linfo::LambdaInfo, sv) end return false end - filter!(not_void_use, linfo.code::Array{Any,1}) - return + filter!(not_void_use, sv.src.code::Array{Any,1}) + nothing end -function meta_elim_pass!(linfo::LambdaInfo, code::Array{Any,1}, do_coverage) +function meta_elim_pass!(code::Array{Any,1}, propagate_inbounds::Bool, do_coverage::Bool) # 1. Remove place holders # # 2. If coverage is off, remove line number nodes that don't mark any @@ -3325,7 +3324,7 @@ function meta_elim_pass!(linfo::LambdaInfo, code::Array{Any,1}, do_coverage) # not too common. check_bounds = JLOptions().check_bounds - inbounds_stack = linfo.propagate_inbounds ? Bool[] : [false] + inbounds_stack = propagate_inbounds ? Bool[] : [false] # Whether the push is deleted (therefore if the pop has to be too) # Shared for `Expr(:boundscheck)` and `Expr(:inbounds)` bounds_elim_stack = Bool[] @@ -3534,23 +3533,23 @@ function meta_elim_pass!(linfo::LambdaInfo, code::Array{Any,1}, do_coverage) continue end end - filter!(x->x!==nothing, code) + return filter!(x -> x !== nothing, code) end # does the same job as alloc_elim_pass for allocations inline in getfields # TODO can probably be removed when we switch to a linear IR -function getfield_elim_pass!(linfo::LambdaInfo, sv) - body = linfo.code +function getfield_elim_pass!(sv::InferenceState) + body = sv.src.code for i = 1:length(body) body[i] = _getfield_elim_pass!(body[i], sv) end end -function _getfield_elim_pass!(e::Expr, sv) +function _getfield_elim_pass!(e::Expr, sv::InferenceState) for i = 1:length(e.args) e.args[i] = _getfield_elim_pass!(e.args[i], sv) end - if is_known_call(e, getfield, sv.linfo) && length(e.args)==3 && + if is_known_call(e, getfield, sv.src, sv.mod) && length(e.args)==3 && (isa(e.args[3],Int) || isa(e.args[3],QuoteNode)) e1 = e.args[2] j = e.args[3] @@ -3565,7 +3564,7 @@ function _getfield_elim_pass!(e::Expr, sv) ok = true for k = 2:length(e1.args) k == j+1 && continue - if !effect_free(e1.args[k], sv.linfo, true) + if !effect_free(e1.args[k], sv.src, sv.mod, true) ok = false; break end end @@ -3593,12 +3592,12 @@ _getfield_elim_pass!(e::ANY, sv) = e # check if e is a successful allocation of an struct # if it is, returns (n,f) such that it is always valid to call # getfield(..., 1 <= x <= n) or getfield(..., x in f) on the result -function is_allocation(e :: ANY, sv::InferenceState) +function is_allocation(e::ANY, sv::InferenceState) isa(e, Expr) || return false - if is_known_call(e, tuple, sv.linfo) + if is_known_call(e, tuple, sv.src, sv.mod) return (length(e.args)-1,()) elseif e.head === :new - typ = widenconst(exprtype(e, sv.linfo)) + typ = widenconst(exprtype(e, sv.src, sv.mod)) if isleaftype(typ) @assert(isa(typ,DataType)) nf = length(e.args)-1 @@ -3617,8 +3616,8 @@ function is_allocation(e :: ANY, sv::InferenceState) end # Replace branches with constant conditions with unconditional branches -function gotoifnot_elim_pass!(linfo::LambdaInfo, sv::InferenceState) - body = linfo.code +function gotoifnot_elim_pass!(sv::InferenceState) + body = sv.src.code i = 1 while i < length(body) expr = body[i] @@ -3627,7 +3626,7 @@ function gotoifnot_elim_pass!(linfo::LambdaInfo, sv::InferenceState) expr = expr::Expr expr.head === :gotoifnot || continue cond = expr.args[1] - condt = exprtype(cond, linfo) + condt = exprtype(cond, sv.src, sv.mod) isa(condt, Const) || continue val = (condt::Const).val # Codegen should emit an unreachable if val is not a Bool so @@ -3646,12 +3645,13 @@ end # eliminate allocation of unnecessary objects # that are only used as arguments to safe getfield calls -function alloc_elim_pass!(linfo::LambdaInfo, sv::InferenceState) - body = linfo.code - bexpr = Expr(:block); bexpr.args = body - vs, gs = find_sa_vars(linfo) - remove_redundant_temp_vars(linfo, vs, Slot) - remove_redundant_temp_vars(linfo, gs, SSAValue) +function alloc_elim_pass!(sv::InferenceState) + body = sv.src.code + bexpr = Expr(:block) + bexpr.args = body + vs, gs = find_sa_vars(sv.src, sv.nargs) + remove_redundant_temp_vars(sv.src, sv.nargs, vs, Slot) + remove_redundant_temp_vars(sv.src, sv.nargs, gs, SSAValue) i = 1 while i < length(body) e = body[i] @@ -3666,7 +3666,7 @@ function alloc_elim_pass!(linfo::LambdaInfo, sv::InferenceState) rhs = e.args[2] # Need to make sure LLVM can recognize this as LLVM ssa value too is_ssa = (isa(var, SSAValue) || - linfo.slotflags[(var::Slot).id] & Slot_UsedUndef == 0) + sv.src.slotflags[(var::Slot).id] & Slot_UsedUndef == 0) else var = nothing rhs = e @@ -3679,7 +3679,7 @@ function alloc_elim_pass!(linfo::LambdaInfo, sv::InferenceState) # This makes sure the value doesn't escape so we can elide # allocation of mutable types too if (var !== nothing && - occurs_outside_getfield(linfo, bexpr, var, sv, nv, field_names)) + occurs_outside_getfield(bexpr, var, sv, nv, field_names)) i += 1 continue end @@ -3711,16 +3711,16 @@ function alloc_elim_pass!(linfo::LambdaInfo, sv::InferenceState) isa(tupelt,QuoteNode) || isa(tupelt, SSAValue)) vals[j] = tupelt else - elty = exprtype(tupelt, linfo) + elty = exprtype(tupelt, sv.src, sv.mod) if is_ssa tmpv = newvar!(sv, elty) else var = var::Slot - tmpv = add_slot!(linfo, elty, false, - linfo.slotnames[var.id]) + tmpv = add_slot!(sv.src, elty, false, + sv.src.slotnames[var.id]) slot_id = tmpv.id new_slots[j] = slot_id - linfo.slotflags[slot_id] |= Slot_UsedUndef + sv.src.slotflags[slot_id] |= Slot_UsedUndef end tmp = Expr(:(=), tmpv, tupelt) insert!(body, i+n_ins, tmp) @@ -3728,7 +3728,7 @@ function alloc_elim_pass!(linfo::LambdaInfo, sv::InferenceState) n_ins += 1 end end - replace_getfield!(linfo, bexpr, var, vals, field_names, sv) + replace_getfield!(bexpr, var, vals, field_names, sv) if !is_ssa i += replace_newvar_node!(body, (var::Slot).id, new_slots, i) @@ -3789,13 +3789,13 @@ function delete_void_use!(body, var::Slot, i0) i += 1 end end - ndel + return ndel end -function replace_getfield!(linfo::LambdaInfo, e::Expr, tupname, vals, field_names, sv) +function replace_getfield!(e::Expr, tupname, vals, field_names, sv::InferenceState) for i = 1:length(e.args) a = e.args[i] - if isa(a,Expr) && is_known_call(a, getfield, linfo) && + if isa(a,Expr) && is_known_call(a, getfield, sv.src, sv.mod) && symequal(a.args[2],tupname) idx = if isa(a.args[3], Int) a.args[3] @@ -3809,30 +3809,30 @@ function replace_getfield!(linfo::LambdaInfo, e::Expr, tupname, vals, field_name # the tuple element expression that's replacing it. if isa(val,Slot) val = val::Slot - valtyp = isa(val,TypedSlot) ? val.typ : linfo.slottypes[val.id] + valtyp = isa(val,TypedSlot) ? val.typ : sv.src.slottypes[val.id] if a.typ ⊑ valtyp && !(valtyp ⊑ a.typ) if isa(val,TypedSlot) val = TypedSlot(val.id, a.typ) end - linfo.slottypes[val.id] = widenconst(a.typ) + sv.src.slottypes[val.id] = widenconst(a.typ) end elseif isa(val,SSAValue) val = val::SSAValue - typ = exprtype(val, linfo) + typ = exprtype(val, sv.src, sv.mod) if a.typ ⊑ typ && !(typ ⊑ a.typ) - sv.linfo.ssavaluetypes[val.id+1] = a.typ + sv.src.ssavaluetypes[val.id+1] = a.typ end end e.args[i] = val elseif isa(a, Expr) - replace_getfield!(linfo, a::Expr, tupname, vals, field_names, sv) + replace_getfield!(a::Expr, tupname, vals, field_names, sv) end end end # fix label numbers to always equal the statement index of the label -function reindex_labels!(linfo::LambdaInfo, sv::InferenceState) - body = linfo.code +function reindex_labels!(sv::InferenceState) + body = sv.src.code mapping = zeros(Int, sv.label_counter) for i = 1:length(body) el = body[i] diff --git a/base/interactiveutil.jl b/base/interactiveutil.jl index 955aa20b33f10..2ea2a8b098085 100644 --- a/base/interactiveutil.jl +++ b/base/interactiveutil.jl @@ -316,22 +316,23 @@ See [Manual](:ref:`man-code-warntype`) for more information. """ function code_warntype(io::IO, f, t::ANY) emph_io = IOContext(io, :TYPEEMPHASIZE => true) - for li in code_typed(f, t) + for (src, rettype) in code_typed(f, t) println(emph_io, "Variables:") - slotnames = lambdainfo_slotnames(li) + slotnames = sourceinfo_slotnames(src) for i = 1:length(slotnames) print(emph_io, " ", slotnames[i]) - if isa(li.slottypes,Array) - show_expr_type(emph_io, li.slottypes[i], true) + if isa(src.slottypes, Array) + show_expr_type(emph_io, src.slottypes[i], true) end print(emph_io, '\n') end print(emph_io, "\nBody:\n ") - body = Expr(:body); body.args = uncompressed_ast(li) - body.typ = li.rettype + body = Expr(:body) + body.args = src.code + body.typ = rettype # Fix slot names and types in function body - show_unquoted(IOContext(IOContext(emph_io, :LAMBDAINFO => li), - :LAMBDA_SLOTNAMES => slotnames), + show_unquoted(IOContext(IOContext(emph_io, :SOURCEINFO => src), + :SOURCE_SLOTNAMES => slotnames), body, 2) print(emph_io, '\n') end @@ -709,12 +710,12 @@ whos(pat::Regex) = whos(STDOUT, current_module(), pat) ################################################################################# """ - Base.summarysize(obj; exclude=Union{Module,Function,DataType,TypeName}) -> Int + Base.summarysize(obj; exclude=Union{Module,DataType,TypeName}) -> Int Compute the amount of memory used by all unique objects reachable from the argument. Keyword argument `exclude` specifies a type of objects to exclude from the traversal. """ -summarysize(obj; exclude = Union{Module,Function,DataType,TypeName}) = +summarysize(obj; exclude = Union{Module,DataType,TypeName}) = summarysize(obj, ObjectIdDict(), exclude) summarysize(obj::Symbol, seen, excl) = 0 diff --git a/base/linalg/ldlt.jl b/base/linalg/ldlt.jl index 15ea8de561301..8c2ee742ad580 100644 --- a/base/linalg/ldlt.jl +++ b/base/linalg/ldlt.jl @@ -88,4 +88,4 @@ convert(::Type{AbstractMatrix}, F::LDLt) = convert(SymTridiagonal, F) convert(::Type{AbstractArray}, F::LDLt) = convert(AbstractMatrix, F) convert(::Type{Matrix}, F::LDLt) = convert(Array, convert(AbstractArray, F)) convert(::Type{Array}, F::LDLt) = convert(Matrix, F) -full(F::LDLt) = convert(Array, F) \ No newline at end of file +full(F::LDLt) = convert(Array, F) diff --git a/base/linalg/lu.jl b/base/linalg/lu.jl index 5201decd46fc6..1416a12a2a189 100644 --- a/base/linalg/lu.jl +++ b/base/linalg/lu.jl @@ -477,4 +477,4 @@ convert{T}(::Type{AbstractMatrix}, F::Base.LinAlg.LU{T,Tridiagonal{T}}) = conver convert{T}(::Type{AbstractArray}, F::Base.LinAlg.LU{T,Tridiagonal{T}}) = convert(AbstractMatrix, F) convert{T}(::Type{Matrix}, F::Base.LinAlg.LU{T,Tridiagonal{T}}) = convert(Array, convert(AbstractArray, F)) convert{T}(::Type{Array}, F::Base.LinAlg.LU{T,Tridiagonal{T}}) = convert(Matrix, F) -full{T}(F::Base.LinAlg.LU{T,Tridiagonal{T}}) = convert(Array, F) \ No newline at end of file +full{T}(F::Base.LinAlg.LU{T,Tridiagonal{T}}) = convert(Array, F) diff --git a/base/linalg/svd.jl b/base/linalg/svd.jl index f5d7d1fa77726..f3596b6b3096a 100644 --- a/base/linalg/svd.jl +++ b/base/linalg/svd.jl @@ -241,4 +241,4 @@ convert(::Type{AbstractMatrix}, F::SVD) = (F.U * Diagonal(F.S)) * F.Vt convert(::Type{AbstractArray}, F::SVD) = convert(AbstractMatrix, F) convert(::Type{Matrix}, F::SVD) = convert(Array, convert(AbstractArray, F)) convert(::Type{Array}, F::SVD) = convert(Matrix, F) -full(F::SVD) = convert(Array, F) \ No newline at end of file +full(F::SVD) = convert(Array, F) diff --git a/base/methodshow.jl b/base/methodshow.jl index 06a24df9377d3..7eb1dc609c89a 100644 --- a/base/methodshow.jl +++ b/base/methodshow.jl @@ -2,7 +2,7 @@ # Method and method table pretty-printing -function argtype_decl(env, n, sig, i, nargs, isva) # -> (argname, argtype) +function argtype_decl(env, n, sig::DataType, i::Int, nargs, isva::Bool) # -> (argname, argtype) t = sig.parameters[i] if i == nargs && isva && !isvarargtype(t) t = Vararg{t,length(sig.parameters)-nargs+1} @@ -39,15 +39,19 @@ function arg_decl_parts(m::Method) else tv = Any[tv...] end - li = m.lambda_template - file, line = "", 0 - if li !== nothing && isdefined(li, :slotnames) - argnames = li.slotnames[1:li.nargs] - decls = Any[argtype_decl(:tvar_env => tv, argnames[i], m.sig, i, li.nargs, li.isva) - for i = 1:li.nargs] - if isdefined(li, :def) - file, line = li.def.file, li.def.line - end + if m.isstaged + src = m.unspecialized.inferred + elseif isdefined(m, :source) + src = m.source + else + src = nothing + end + file = m.file + line = m.line + if src !== nothing && src.slotnames !== nothing + argnames = src.slotnames[1:m.nargs] + decls = Any[argtype_decl(:tvar_env => tv, argnames[i], m.sig, i, m.nargs, m.isva) + for i = 1:m.nargs] else decls = Any[("", "") for i = 1:length(m.sig.parameters)] end @@ -59,7 +63,8 @@ function kwarg_decl(sig::ANY, kwtype::DataType) kwli = ccall(:jl_methtable_lookup, Any, (Any, Any), kwtype.name.mt, sig) if kwli !== nothing kwli = kwli::Method - kws = filter(x->!('#' in string(x)), kwli.lambda_template.slotnames[kwli.lambda_template.nargs+1:end]) + src = kwli.isstaged ? kwli.unspecialized.inferred : kwli.source + kws = filter(x->!('#' in string(x)), src.slotnames[kwli.nargs+1:end]) # ensure the kwarg... is always printed last. The order of the arguments are not # necessarily the same as defined in the function i = findfirst(x -> endswith(string(x), "..."), kws) diff --git a/base/reflection.jl b/base/reflection.jl index b3e86f5719197..d9cb9d33fc3ea 100644 --- a/base/reflection.jl +++ b/base/reflection.jl @@ -336,7 +336,13 @@ tt_cons(t::ANY, tup::ANY) = (@_pure_meta; Tuple{t, (isa(tup, Type) ? tup.paramet Returns an array of lowered ASTs for the methods matching the given generic function and type signature. """ -code_lowered(f, t::ANY=Tuple) = map(m -> (m::Method).lambda_template, methods(f, t)) +function code_lowered(f, t::ANY=Tuple) + asts = map(methods(f, t)) do m + m = m::Method + return uncompressed_ast(m, m.isstaged ? m.unspecialized.inferred : m.source) + end + return asts +end # low-level method lookup functions used by the compiler @@ -345,6 +351,7 @@ function _methods(f::ANY,t::ANY,lim) tt = isa(t,Type) ? Tuple{ft, t.parameters...} : Tuple{ft, t...} return _methods_by_ftype(tt, lim) end + function _methods_by_ftype(t::ANY, lim) tp = t.parameters::SimpleVector nu = 1 @@ -359,6 +366,7 @@ function _methods_by_ftype(t::ANY, lim) # XXX: the following can return incorrect answers that the above branch would have corrected return ccall(:jl_matching_methods, Any, (Any,Cint,Cint), t, lim, 0) end + function _methods(t::Array,i,lim::Integer,matching::Array{Any,1}) if i == 0 new = ccall(:jl_matching_methods, Any, (Any,Cint,Cint), Tuple{t...}, lim, 0) @@ -401,7 +409,7 @@ function MethodList(mt::MethodTable) visit(mt) do m push!(ms, m) end - MethodList(ms, mt) + return MethodList(ms, mt) end """ @@ -470,9 +478,14 @@ function length(mt::MethodTable) end isempty(mt::MethodTable) = (mt.defs === nothing) -uncompressed_ast(l::Method) = uncompressed_ast(l.lambda_template) -uncompressed_ast(l::LambdaInfo) = - isa(l.code,Array{UInt8,1}) ? ccall(:jl_uncompress_ast, Array{Any,1}, (Any,Any), l, l.code) : l.code +uncompressed_ast(m::Method) = uncompressed_ast(m, m.source) +function uncompressed_ast(m::Method, s::CodeInfo) + if isa(s.code, Array{UInt8,1}) + s = ccall(:jl_copy_code_info, Ref{CodeInfo}, (Any,), s) + s.code = ccall(:jl_uncompress_ast, Array{Any,1}, (Any, Any), m, s.code) + end + return s +end # Printing code representations in IR and assembly function _dump_function(f::ANY, t::ANY, native::Bool, wrapper::Bool, strip_ir_metadata::Bool, dump_module::Bool) @@ -480,21 +493,20 @@ function _dump_function(f::ANY, t::ANY, native::Bool, wrapper::Bool, strip_ir_me if isa(f, Core.Builtin) throw(ArgumentError("argument is not a generic function")) end - # get the LambdaInfo for the method match + # get the MethodInstance for the method match meth = which(f, t) t = to_tuple_type(t) ft = isa(f, Type) ? Type{f} : typeof(f) tt = Tuple{ft, t.parameters...} (ti, env) = ccall(:jl_match_method, Any, (Any, Any, Any), tt, meth.sig, meth.tvars)::SimpleVector - li = func_for_method_checked(meth, tt) - # try to infer it - (linfo, ty, inf) = Core.Inference.typeinf(li, ti, env, true) + meth = func_for_method_checked(meth, tt) + linfo = ccall(:jl_specializations_get_linfo, Ref{Core.MethodInstance}, (Any, Any, Any), meth, tt, env) # get the code for it return _dump_function(linfo, native, wrapper, strip_ir_metadata, dump_module) end -function _dump_function(linfo::LambdaInfo, native::Bool, wrapper::Bool, strip_ir_metadata::Bool, dump_module::Bool) +function _dump_function(linfo::Core.MethodInstance, native::Bool, wrapper::Bool, strip_ir_metadata::Bool, dump_module::Bool) if native llvmf = ccall(:jl_get_llvmf_decl, Ptr{Void}, (Any, Bool), linfo, wrapper) else @@ -540,7 +552,7 @@ code_native(io::IO, f::ANY, types::ANY=Tuple) = code_native(f::ANY, types::ANY=Tuple) = code_native(STDOUT, f, types) # give a decent error message if we try to instantiate a staged function on non-leaf types -function func_for_method_checked(m::Method, types) +function func_for_method_checked(m::Method, types::ANY) if m.isstaged && !isleaftype(types) error("cannot call @generated function `", m, "` ", "with abstract argument types: ", types) @@ -564,16 +576,16 @@ function code_typed(f::ANY, types::ANY=Tuple; optimize=true) types = to_tuple_type(types) asts = [] for x in _methods(f,types,-1) - linfo = func_for_method_checked(x[3], types) + meth = func_for_method_checked(x[3], types) if optimize - (li, ty, inf) = Core.Inference.typeinf(linfo, x[1], x[2], true) + (code, ty, inf) = Core.Inference.typeinf(meth, x[1], x[2], true) else - (li, ty, inf) = Core.Inference.typeinf_uncached(linfo, x[1], x[2], optimize=false) + (code, ty, inf) = Core.Inference.typeinf_uncached(meth, x[1], x[2], optimize=false) end inf || error("inference not successful") # Inference disabled - push!(asts, li) + push!(asts, uncompressed_ast(meth, code) => ty) end - asts + return asts end function return_types(f::ANY, types::ANY=Tuple) @@ -584,12 +596,12 @@ function return_types(f::ANY, types::ANY=Tuple) types = to_tuple_type(types) rt = [] for x in _methods(f,types,-1) - linfo = func_for_method_checked(x[3], types) - (_li, ty, inf) = Core.Inference.typeinf(linfo, x[1], x[2]) + meth = func_for_method_checked(x[3], types) + (code, ty, inf) = Core.Inference.typeinf(meth, x[1], x[2]) inf || error("inference not successful") # Inference disabled push!(rt, ty) end - rt + return rt end """ @@ -635,7 +647,7 @@ function which_module(m::Module, s::Symbol) if !isdefined(m, s) error("\"$s\" is not defined in module $m") end - binding_module(m, s) + return binding_module(m, s) end # function reflection @@ -646,7 +658,7 @@ Get the name of a generic `Function` as a symbol, or `:anonymous`. """ function_name(f::Function) = typeof(f).name.mt.name -functionloc(m::LambdaInfo) = functionloc(m.def) +functionloc(m::Core.MethodInstance) = functionloc(m.def) """ functionloc(m::Method) @@ -658,7 +670,7 @@ function functionloc(m::Method) if ln <= 0 error("could not determine location of method definition") end - (find_source_file(string(m.file)), ln) + return (find_source_file(string(m.file)), ln) end """ @@ -668,10 +680,10 @@ Returns a tuple `(filename,line)` giving the location of a generic `Function` de """ functionloc(f::ANY, types::ANY) = functionloc(which(f,types)) -function functionloc(f) +function functionloc(f::ANY) mt = methods(f) if isempty(mt) - if isa(f,Function) + if isa(f, Function) error("function has no definitions") else error("object is not callable") @@ -680,7 +692,7 @@ function functionloc(f) if length(mt) > 1 error("function has multiple methods; please specify a type signature") end - functionloc(first(mt)) + return functionloc(first(mt)) end """ @@ -696,12 +708,12 @@ function_module(f::Function) = datatype_module(typeof(f)) Determine the module containing a given definition of a generic function. """ -function function_module(f, types::ANY) +function function_module(f::ANY, types::ANY) m = methods(f, types) if isempty(m) error("no matching methods") end - first(m).module + return first(m).module end """ diff --git a/base/replutil.jl b/base/replutil.jl index 0c40305f80917..978b3946d3379 100644 --- a/base/replutil.jl +++ b/base/replutil.jl @@ -117,23 +117,6 @@ function show(io::IO, ::MIME"text/plain", f::Function) end end -function show(io::IO, ::MIME"text/plain", l::LambdaInfo) - show(io, l) - # Fix slot names and types in function body - ast = uncompressed_ast(l) - if ast !== nothing - println(io) - lambda_io = IOContext(io, :LAMBDAINFO => l) - if isdefined(l, :slotnames) - lambda_io = IOContext(lambda_io, :LAMBDA_SLOTNAMES => lambdainfo_slotnames(l)) - end - body = Expr(:body) - body.args = ast - body.typ = l.rettype - show(lambda_io, body) - end -end - function show(io::IO, ::MIME"text/plain", r::LinSpace) # show for linspace, e.g. # linspace(1,3,7) diff --git a/base/serialize.jl b/base/serialize.jl index fceb4e6cc9b49..039722a719421 100644 --- a/base/serialize.jl +++ b/base/serialize.jl @@ -29,7 +29,7 @@ const TAGS = Any[ #LongSymbol, LongTuple, LongExpr, Symbol, Tuple, Expr, # dummy entries, intentionally shadowed by earlier ones LineNumberNode, Slot, LabelNode, GotoNode, - QuoteNode, :reserved23 #=was TopNode=#, TypeVar, Core.Box, LambdaInfo, + QuoteNode, CodeInfo, TypeVar, Core.Box, Core.MethodInstance, Module, #=UndefRefTag=#Symbol, Task, String, Float16, SimpleVector, #=BackrefTag=#Symbol, Method, GlobalRef, @@ -81,7 +81,7 @@ const BACKREF_TAG = Int32(sertag(SimpleVector)+1) const EXPR_TAG = sertag(Expr) const LONGEXPR_TAG = Int32(sertag(Expr)+3) const MODULE_TAG = sertag(Module) -const LAMBDAINFO_TAG = sertag(LambdaInfo) +const METHODINSTANCE_TAG = sertag(Core.MethodInstance) const METHOD_TAG = sertag(Method) const TASK_TAG = sertag(Task) const DATATYPE_TAG = sertag(DataType) @@ -335,33 +335,27 @@ function serialize(s::AbstractSerializer, meth::Method) serialize(s, meth.line) serialize(s, meth.sig) serialize(s, meth.tvars) + serialize(s, meth.sparam_syms) serialize(s, meth.ambig) + serialize(s, meth.nargs) + serialize(s, meth.isva) serialize(s, meth.isstaged) - serialize(s, meth.lambda_template) + if meth.isstaged + serialize(s, uncompressed_ast(meth, meth.unspecialized.inferred)) + else + serialize(s, uncompressed_ast(meth, meth.source)) + end nothing end -function serialize(s::AbstractSerializer, linfo::LambdaInfo) +function serialize(s::AbstractSerializer, linfo::Core.MethodInstance) serialize_cycle(s, linfo) && return - writetag(s.io, LAMBDAINFO_TAG) - serialize(s, uncompressed_ast(linfo)) - serialize(s, linfo.slotnames) - serialize(s, linfo.slottypes) - serialize(s, linfo.slotflags) - serialize(s, linfo.ssavaluetypes) - serialize(s, linfo.sparam_syms) + isdefined(linfo, :def) && error("can only serialize toplevel MethodInstance objects") + writetag(s.io, METHODINSTANCE_TAG) + serialize(s, linfo.inferred) serialize(s, linfo.sparam_vals) serialize(s, linfo.rettype) serialize(s, linfo.specTypes) - serialize(s, linfo.inferred) - if isdefined(linfo, :def) - serialize(s, linfo.def) - else - writetag(s.io, UNDEFREF_TAG) - end - serialize(s, linfo.pure) - serialize(s, linfo.nargs) - serialize(s, linfo.isva) end function serialize(s::AbstractSerializer, t::Task) @@ -621,12 +615,15 @@ function deserialize(s::AbstractSerializer, ::Type{Method}) mod = deserialize(s)::Module name = deserialize(s)::Symbol file = deserialize(s)::Symbol - line = deserialize(s) + line = deserialize(s)::Int32 sig = deserialize(s) - tvars = deserialize(s) - ambig = deserialize(s) + tvars = deserialize(s)::Union{SimpleVector, TypeVar} + sparam_syms = deserialize(s)::SimpleVector + ambig = deserialize(s)::Union{Array{Any,1}, Void} + nargs = deserialize(s)::Int32 + isva = deserialize(s)::Bool isstaged = deserialize(s)::Bool - template = deserialize(s)::LambdaInfo + template = deserialize(s)::CodeInfo if makenew meth.module = mod meth.name = name @@ -634,35 +631,32 @@ function deserialize(s::AbstractSerializer, ::Type{Method}) meth.line = line meth.sig = sig meth.tvars = tvars + meth.sparam_syms = sparam_syms meth.ambig = ambig meth.isstaged = isstaged - meth.lambda_template = template - ccall(:jl_method_init_properties, Void, (Any,), meth) + meth.nargs = nargs + meth.isva = isva + # TODO: compress template + if isstaged + linfo = ccall(:jl_new_method_instance_uninit, Ref{Core.MethodInstance}, ()) + linfo.specTypes = Tuple + linfo.inferred = template + meth.unspecialized = linfo + else + meth.source = template + end known_object_data[lnumber] = meth end return meth end -function deserialize(s::AbstractSerializer, ::Type{LambdaInfo}) - linfo = ccall(:jl_new_lambda_info_uninit, Ref{LambdaInfo}, (Ptr{Void},), C_NULL) +function deserialize(s::AbstractSerializer, ::Type{Core.MethodInstance}) + linfo = ccall(:jl_new_method_instance_uninit, Ref{Core.MethodInstance}, (Ptr{Void},), C_NULL) deserialize_cycle(s, linfo) - linfo.code = deserialize(s) - linfo.slotnames = deserialize(s) - linfo.slottypes = deserialize(s) - linfo.slotflags = deserialize(s) - linfo.ssavaluetypes = deserialize(s) - linfo.sparam_syms = deserialize(s)::SimpleVector + linfo.inferred = deserialize(s)::CodeInfo linfo.sparam_vals = deserialize(s)::SimpleVector linfo.rettype = deserialize(s) linfo.specTypes = deserialize(s) - linfo.inferred = deserialize(s)::Bool - tag = Int32(read(s.io, UInt8)::UInt8) - if tag != UNDEFREF_TAG - linfo.def = handle_deserialize(s, tag)::Method - end - linfo.pure = deserialize(s)::Bool - linfo.nargs = deserialize(s) - linfo.isva = deserialize(s)::Bool return linfo end diff --git a/base/show.jl b/base/show.jl index 2a61334a17b3e..e9e6455e7f618 100644 --- a/base/show.jl +++ b/base/show.jl @@ -272,8 +272,8 @@ function show(io::IO, m::Module) end end -function lambdainfo_slotnames(l::LambdaInfo) - slotnames = l.slotnames +function sourceinfo_slotnames(src::CodeInfo) + slotnames = src.slotnames isa(slotnames, Array) || return String[] names = Dict{String,Int}() printnames = Vector{String}(length(slotnames)) @@ -292,20 +292,37 @@ function lambdainfo_slotnames(l::LambdaInfo) return printnames end -function show(io::IO, l::LambdaInfo) +function show(io::IO, l::Core.MethodInstance) if isdefined(l, :def) - if l === l.def.lambda_template - print(io, "LambdaInfo template for ") + if l.def.isstaged && l === l.def.unspecialized + print(io, "MethodInstance generator for ") show(io, l.def) else - print(io, "LambdaInfo for ") + print(io, "MethodInstance for ") show_lambda_types(io, l) end else - print(io, "Toplevel LambdaInfo thunk") + print(io, "Toplevel MethodInstance thunk") end end +function show(io::IO, src::CodeInfo) + # Fix slot names and types in function body + print(io, "CodeInfo(") + if isa(src.code, Array{Any,1}) + lambda_io = IOContext(io, :SOURCEINFO => src) + if src.slotnames !== nothing + lambda_io = IOContext(lambda_io, :SOURCE_SLOTNAMES => sourceinfo_slotnames(src)) + end + body = Expr(:body) + body.args = src.code + show(lambda_io, body) + else + print(io, "") + end + print(io, ")") +end + function show_delim_array(io::IO, itr::Union{AbstractArray,SimpleVector}, op, delim, cl, delim_one, i1=first(linearindices(itr)), l=last(linearindices(itr))) print(io, op) @@ -593,16 +610,16 @@ show_unquoted(io::IO, ex::GlobalRef, ::Int, ::Int) = print(io, ex.mod, '.', function show_unquoted(io::IO, ex::Slot, ::Int, ::Int) typ = isa(ex,TypedSlot) ? ex.typ : Any slotid = ex.id - li = get(io, :LAMBDAINFO, false) - if isa(li, LambdaInfo) - slottypes = (li::LambdaInfo).slottypes + src = get(io, :SOURCEINFO, false) + if isa(src, CodeInfo) + slottypes = (src::CodeInfo).slottypes if isa(slottypes, Array) && slotid <= length(slottypes::Array) slottype = slottypes[slotid] # The Slot in assignment can somehow have an Any type slottype <: typ && (typ = slottype) end end - slotnames = get(io, :LAMBDA_SLOTNAMES, false) + slotnames = get(io, :SOURCE_SLOTNAMES, false) if (isa(slotnames, Vector{String}) && slotid <= length(slotnames::Vector{String})) print(io, (slotnames::Vector{String})[slotid]) @@ -1009,7 +1026,7 @@ function show_unquoted(io::IO, ex::Expr, indent::Int, prec::Int) nothing end -function show_lambda_types(io::IO, li::LambdaInfo) +function show_lambda_types(io::IO, li::Core.MethodInstance) # print a method signature tuple for a lambda definition if li.specTypes === Tuple print(io, li.def.name, "(...)") diff --git a/base/stacktraces.jl b/base/stacktraces.jl index dbc64382e540a..7f92db5995b0b 100644 --- a/base/stacktraces.jl +++ b/base/stacktraces.jl @@ -17,9 +17,9 @@ Stack information representing execution context, with the following fields: The name of the function containing the execution context. -- `linfo::Nullable{LambdaInfo}` +- `linfo::Nullable{Core.MethodInstance}` - The LambdaInfo containing the execution context (if it could be found). + The MethodInstance containing the execution context (if it could be found). - `file::Symbol` @@ -49,8 +49,8 @@ immutable StackFrame # this type should be kept platform-agnostic so that profil file::Symbol "the line number in the file containing the execution context" line::Int - "the LambdaInfo containing the execution context (if it could be found)" - linfo::Nullable{LambdaInfo} + "the MethodInstance containing the execution context (if it could be found)" + linfo::Nullable{Core.MethodInstance} "true if the code is from C" from_c::Bool "true if the code is from an inlined frame" @@ -59,7 +59,7 @@ immutable StackFrame # this type should be kept platform-agnostic so that profil pointer::UInt64 # Large enough to be read losslessly on 32- and 64-bit machines. end -StackFrame(func, file, line) = StackFrame(func, file, line, Nullable{LambdaInfo}(), false, false, 0) +StackFrame(func, file, line) = StackFrame(func, file, line, Nullable{Core.MethodInstance}(), false, false, 0) """ StackTrace @@ -70,7 +70,7 @@ An alias for `Vector{StackFrame}` provided for convenience; returned by calls to typealias StackTrace Vector{StackFrame} const empty_sym = Symbol("") -const UNKNOWN = StackFrame(empty_sym, empty_sym, -1, Nullable{LambdaInfo}(), true, false, 0) # === lookup(C_NULL) +const UNKNOWN = StackFrame(empty_sym, empty_sym, -1, Nullable{Core.MethodInstance}(), true, false, 0) # === lookup(C_NULL) #= @@ -110,7 +110,7 @@ function deserialize(s::AbstractSerializer, ::Type{StackFrame}) from_c = read(s.io, Bool) inlined = read(s.io, Bool) pointer = read(s.io, UInt64) - return StackFrame(func, file, line, Nullable{LambdaInfo}(), from_c, inlined, pointer) + return StackFrame(func, file, line, Nullable{Core.MethodInstance}(), from_c, inlined, pointer) end @@ -123,12 +123,12 @@ inlined at that point, innermost function first. """ function lookup(pointer::Ptr{Void}) infos = ccall(:jl_lookup_code_address, Any, (Ptr{Void}, Cint), pointer - 1, false) - isempty(infos) && return [StackFrame(empty_sym, empty_sym, -1, Nullable{LambdaInfo}(), true, false, convert(UInt64, pointer))] + isempty(infos) && return [StackFrame(empty_sym, empty_sym, -1, Nullable{Core.MethodInstance}(), true, false, convert(UInt64, pointer))] res = Array{StackFrame}(length(infos)) for i in 1:length(infos) info = infos[i] @assert(length(info) == 7) - li = info[4] === nothing ? Nullable{LambdaInfo}() : Nullable{LambdaInfo}(info[4]) + li = info[4] === nothing ? Nullable{Core.MethodInstance}() : Nullable{Core.MethodInstance}(info[4]) res[i] = StackFrame(info[1], info[2], info[3], li, info[5], info[6], info[7]) end return res diff --git a/doc/devdocs/ast.rst b/doc/devdocs/ast.rst index 72712803a5fcc..a2673a0022d77 100644 --- a/doc/devdocs/ast.rst +++ b/doc/devdocs/ast.rst @@ -34,12 +34,12 @@ The following data types exist in lowered form: Both types have an integer-valued ``id`` field giving the slot index. Most slots have the same type at all uses, and so are represented with ``SlotNumber``. The types of these slots are found in the ``slottypes`` - field of their ``LambdaInfo`` object. + field of their ``MethodInstance`` object. Slots that require per-use type annotations are represented with ``TypedSlot``, which has a ``typ`` field. -``LambdaInfo`` - wraps the IR of each method. +``CodeInfo`` + wraps the IR of a method. ``LineNumberNode`` contains a single number, specifying the line number the next statement @@ -78,7 +78,7 @@ These symbols appear in the ``head`` field of ``Expr``\s in lowered form. ``args[2:end]`` are the arguments. ``invoke`` - function call (static dispatch). ``args[1]`` is the LambdaInfo to call, + function call (static dispatch). ``args[1]`` is the MethodInstance to call, ``args[2:end]`` are the arguments (including the function that is being called, at ``args[2]``). ``static_parameter`` @@ -117,11 +117,11 @@ These symbols appear in the ``head`` field of ``Expr``\s in lowered form. ``(::T)(x) = x``. ``args[2]`` - a ``SimpleVector`` of argument type data. ``args[2][1]`` is - a ``Tuple`` type of the argument types, and ``args[2][2]`` is a + a ``SimpleVector`` of the argument types, and ``args[2][2]`` is a ``SimpleVector`` of type variables corresponding to the method's static parameters. - ``args[3]`` - a ``LambdaInfo`` of the method itself. For "out of scope" + ``args[3]`` - a ``CodeInfo`` of the method itself. For "out of scope" method definitions (adding a method to a function that also has methods defined in different scopes) this is an expression that evaluates to a ``:lambda`` expression. @@ -183,12 +183,66 @@ These symbols appear in the ``head`` field of ``Expr``\s in lowered form. ``:pop_loc``: returns to the source location before the matching ``:push_loc``. -LambdaInfo -~~~~~~~~~~ -``sparam_syms`` - The names (symbols) of static parameters. +Method +~~~~~~ + +A unique'd container describing the shared metadata for a single (unspecialized) method. + +``name``, ``module``, ``file``, ``line``, ``sig`` - Metadata to uniquely identify the method + for the computer and the human + +``ambig`` - Cache of other methods that may be ambiguous with this one + +``specializations`` - Cache of all MethodInstance ever created for this Method, + used to ensure uniqueness. Uniqueness is required for efficiency, + especially for incremental precompile and tracking of method invalidation. + +``source`` - The original source code (compressed) + +``roots`` - Pointers to non-AST things that have been interpolated into the AST, + required by compression of the AST, type-inference, or the generation of native code. + +``nargs``, ``isva``, ``called``, ``isstaged`` - Descriptive bit-fields for the source code of this Method. + + +MethodInstance +~~~~~~~~~~~~~~ + +A unique'd container describing a single callable signature for a Method. +See especially :ref:`devdocs-locks` for important details on how to modify these fields safely. -``sparam_vals`` - The values of the static parameters (once known), indexed by ``sparam_syms``. +``specTypes`` - The primary key for this MethodInstance. + Uniqueness is guaranteed through a ``def.specializations`` lookup. + +``def`` - The ``Method`` that this function describes a specialization of. + Or ``#undef``, if this is a top-level Lambda that is not part of a Method. + +``sparam_vals`` - The values of the static parameters in specTypes + indexed by ``def.sparam_syms``. For the ``MethodInstance`` at ``Method.unspecialized``, + this is the empty ``SimpleVector``. But for a runtime ``MethodInstance`` from the ``MethodTable`` cache, + this will always be defined and indexable. + +``rettype`` - The inferred return type for the ``specFunctionObject`` field, + which (in most cases) is also the computed return type for the function in general. + +``inferred`` - May contain a cache of the inferred source for this function, + or other information about the inference result such as a constant return value + may be put here (if ``jlcall_api == 2``), or it could be set to `nothing` + to just indicate ``rettype`` is inferred + +``ftpr`` - The generic jlcall entry point + +``jlcall_api`` - The ABI to use when calling ``fptr``. Some significant ones include: + - 0 - not compiled yet + - 1 - JL_CALLABLE ``jl_value_t *(*)(jl_function_t *f, jl_value_t *args[nargs], uint32_t nargs)`` + - 2 - constant (stored in ``inferred``) + + +CodeInfo +~~~~~~~~ + +A temporary container for holding lowered source code. ``code`` - An ``Any`` array of statements, or a UInt8 array with a compressed representation of the code. @@ -197,20 +251,26 @@ LambdaInfo ``slottypes`` - An array of types for the slots. ``slotflags`` - A UInt8 array of slot properties, represented as bit flags: - - 1 - captured (closed over) - 2 - assigned (only false if there are *no* assignment statements with this var on the left) - - 4 - assigned by an inner function - 8 - const (currently unused for local variables) - 16 - statically assigned once - 32 - might be used before assigned. This flag is only valid after type inference. -``ssavaluetypes`` - Either an array or an Int giving the number of compiler-inserted - temporary locations in the function. If an array, specifies a type for each location. +``ssavaluetypes`` - Either an array or an Int. + If an Int, it gives the number of compiler-inserted temporary locations in the function. + If an array, specifies a type for each location. + +Boolean properties: + +``inferred`` - Whether this has been produced by type inference + +``inlineable`` - Whether this should be inlined -``nargs`` - The number of argument slots. The first ``nargs`` entries of the slots - arrays refer to arguments. +``propagate_inbounds`` - Whether this should should propagate ``@inbounds`` when inlined + for the purpose of eliding ``@boundscheck`` blocks -``isva`` - A boolean indicating whether the function is variadic. +``pure`` - Whether this is known to be a pure function of its arguments, + without respect to the state of the method caches or other mutable global state Surface syntax AST diff --git a/doc/devdocs/debuggingtips.rst b/doc/devdocs/debuggingtips.rst index 153e8003399e4..7842e1960b38a 100644 --- a/doc/devdocs/debuggingtips.rst +++ b/doc/devdocs/debuggingtips.rst @@ -70,7 +70,7 @@ It's particularly helpful to back up to the ``jl_apply`` frame, from which you c (gdb) call jl_(args[0]) -Another useful frame is ``to_function(jl_lambda_info_t *li, bool cstyle)``. The ``jl_lambda_info_t*`` argument is a struct with a reference to the final AST sent into the compiler. However, the AST at this point will usually be compressed; to view the AST, call ``jl_uncompress_ast`` and then pass the result to ``jl_``:: +Another useful frame is ``to_function(jl_method_instance_t *li, bool cstyle)``. The ``jl_method_instance_t*`` argument is a struct with a reference to the final AST sent into the compiler. However, the AST at this point will usually be compressed; to view the AST, call ``jl_uncompress_ast`` and then pass the result to ``jl_``:: #2 0x00007ffff7928bf7 in to_function (li=0x2812060, cstyle=false) at codegen.cpp:584 584 abort(); diff --git a/doc/devdocs/eval.rst b/doc/devdocs/eval.rst index 9d944838c0d28..3f07e159989b0 100644 --- a/doc/devdocs/eval.rst +++ b/doc/devdocs/eval.rst @@ -150,7 +150,7 @@ Codegen is the process of turning a Julia AST into native machine code. The JIT environment is initialized by an early call to `jl_init_codegen in codegen.cpp `_. -On demand, a Julia method is converted into a native function by the function :c:func:`emit_function(jl_lambda_info_t*) `. +On demand, a Julia method is converted into a native function by the function :c:func:`emit_function(jl_method_instance_t*) `. (note, when using the MCJIT (in LLVM v3.4+), each function must be JIT into a new module.) This function recursively calls :c:func:`emit_expr` until the entire function has been emitted. diff --git a/doc/devdocs/locks.rst b/doc/devdocs/locks.rst index ea37560bd2eaf..1034128b95e8c 100644 --- a/doc/devdocs/locks.rst +++ b/doc/devdocs/locks.rst @@ -1,4 +1,4 @@ -.. currentmodule:: Base +.. _devdocs-locks: **************************************************** Proper maintenance and care of multi-threading locks @@ -48,21 +48,11 @@ The following is a level 3 lock, which can only acquire level 1 or level 2 locks * Method->writelock - but note that this is violated by staged functions! - The following is a level 4 lock, which can only recurse to acquire level 1, 2, or 3 locks: * MethodTable->writelock - but note that this is violated by staged functions! - -The following is a proposed level 5 lock, which can only recurse to acquire locks at lower levels: - - * staged - - this theoretical lock would create a priority inversion from the `method->writelock` (level 3), - but only prohibiting running any staging function in parallel - (thus allowing temporary release of the MethodTable and Method locks) +No Julia code may be called while holding a lock above this point. The following is a level 6 lock, which can only recurse to acquire locks at lower levels: @@ -74,6 +64,8 @@ The following is an almost root lock (level end-1), meaning only the root look m this one is perhaps one of the most tricky ones, since type-inference can be invoked from many points + currently the lock is merged with the codegen lock, since they call each other recursively + The following is the root lock, meaning no other lock shall be held when trying to acquire it: * toplevel @@ -95,49 +87,70 @@ The following locks are broken: fix: create it -* codegen - recursive (through ``static_eval``), but caller might also be holding locks (due to staged functions) +Shared Global Data Structures +----------------------------- - other issues? +These data structures each need locks due to being shared mutable global state. +It is the inverse list for the above lock priority list. +This list does not include level 1 leaf resources due to their simplicity. - fix: prohibit codegen while holding any other lock (possibly by checking ``ptls->current_task->locks.len != 0`` & explicitly check the locks that are OK to hold simultaneously)? +MethodTable modifications (def, cache, kwsorter type) : MethodTable->writelock -* typeinf +Type declarations : toplevel lock - not certain of whether there are issues here or what they are. staging functions, of course, are a source of deadlocks here. +Type application : typecache lock - fix: unknown +Module serializer : toplevel lock -* staged +JIT & type-inference : codegen lock - possible solution to prevent staged functions from causing deadlock. +MethodInstance updates : codegen lock - this theoretical lock would create a priority inversion such that the Method and MethodTable write locks could be released - by ensuring that no staging functions can run in parallel allow this level 5 lock to protect staged function conflicts (a level 3 operation) + - These fields are generally lazy initialized, using the test-and-test-and-set pattern. - fix: create it + - These are set at construction and immutable: + + specTypes -Shared Global Data Structures ------------------------------ + + sparam_vals -These data structures each need locks due to being shared mutable global state. -It is the inverse list for the above lock priority list. -This list does not include level 1 leaf resources due to their simplicity. + + def -MethodTable modifications (def, cache, kwsorter type) : MethodTable->writelock + - These are set by ``jl_type_infer`` (while holding codegen lock): -Type declarations : toplevel lock + + rettype -Type application : typecache lock + + inferred -Module serializer : toplevel lock + + these can also be reset, see ``jl_set_lambda_rettype`` for that logic as it needs to keep ``functionObjectsDecls`` in sync + + - ``inInference`` flag: + + + optimization to quickly avoid recurring into ``jl_type_infer`` while it is already running + + + actual state (of setting ``inferred``, then ``fptr``) is protected by codegen lock + + - Function pointers (``jlcall_api`` and ``fptr``, ``unspecialized_ducttape``): + + + these transition once, from ``NULL`` to a value, while the codegen lock is held + + - Code-generator cache (the contents of ``functionObjectsDecls``): + + + these can transition multiple times, but only while the codegen lock is held + + + it is valid to use old version of this, or block for new versions of this, + so races are benign, as long as the code is careful not to reference other data in the method instance (such as ``rettype``) + and assume it is coordinated, unless also holding the codegen lock + + - ``compile_traced`` flag: + + + unknown -JIT : codegen lock LLVMContext : codegen lock Method : Method->writelock + - roots array (serializer and codegen) - invoke / specializations / tfunc modifications diff --git a/doc/devdocs/object.rst b/doc/devdocs/object.rst index 99137ea5be094..add317bfd2120 100644 --- a/doc/devdocs/object.rst +++ b/doc/devdocs/object.rst @@ -136,10 +136,10 @@ Symbols:: jl_sym_t *jl_symbol(const char *str); -Functions and LambdaInfo:: +Functions and MethodInstance:: jl_function_t *jl_new_generic_function(jl_sym_t *name); - jl_lambda_info_t *jl_new_lambda_info(jl_value_t *ast, jl_tuple_t *sparams); + jl_method_instance_t *jl_new_method_instance(jl_value_t *ast, jl_tuple_t *sparams); Arrays:: diff --git a/doc/devdocs/reflection.rst b/doc/devdocs/reflection.rst index 22d053413e362..1e91c73a97a41 100644 --- a/doc/devdocs/reflection.rst +++ b/doc/devdocs/reflection.rst @@ -106,7 +106,7 @@ variable assignments: julia> expand( :(f() = 1) ) :(begin $(Expr(:method, :f)) - $(Expr(:method, :f, :((Core.svec)((Core.apply_type)(Tuple,(Core.Typeof)(f)),(Core.svec)())), Toplevel LambdaInfo thunk, false)) + $(Expr(:method, :f, :((Core.svec)((Core.apply_type)(Tuple,(Core.Typeof)(f)),(Core.svec)())), Toplevel MethodInstance thunk, false)) return f end) diff --git a/doc/manual/modules.rst b/doc/manual/modules.rst index 9ccc2276a6695..2f0799ecaad11 100644 --- a/doc/manual/modules.rst +++ b/doc/manual/modules.rst @@ -445,7 +445,7 @@ A few other points to be aware of: (this will be fixed in an upcoming release). 5. It is usually best to avoid capturing references to instances of internal metadata objects such as - Method, LambdaInfo, MethodTable, TypeMapLevel, TypeMapEntry + Method, MethodInstance, MethodTable, TypeMapLevel, TypeMapEntry and fields of those objects, as this can confuse the serializer and may not lead to the outcome you desire. It is not necessarily an error to do this, diff --git a/doc/manual/stacktraces.rst b/doc/manual/stacktraces.rst index 32b14abef18a1..f55bbabf67144 100644 --- a/doc/manual/stacktraces.rst +++ b/doc/manual/stacktraces.rst @@ -86,7 +86,7 @@ Each :obj:`StackFrame` contains the function name, file name, line number, lambd 234 julia> top_frame.linfo - Nullable{LambdaInfo}(LambdaInfo for eval(::Module, ::Any)) + Nullable{MethodInstance}(MethodInstance for eval(::Module, ::Any)) julia> top_frame.inlined false diff --git a/doc/stdlib/base.rst b/doc/stdlib/base.rst index 9c551a29ec945..d6e9b08652cad 100644 --- a/doc/stdlib/base.rst +++ b/doc/stdlib/base.rst @@ -58,7 +58,7 @@ Getting Around The memory consumption estimate is an approximate lower bound on the size of the internal structure of the object. -.. function:: Base.summarysize(obj; exclude=Union{Module,Function,DataType,TypeName}) -> Int +.. function:: Base.summarysize(obj; exclude=Union{Module,DataType,TypeName}) -> Int .. Docstring generated from Julia source diff --git a/doc/stdlib/stacktraces.rst b/doc/stdlib/stacktraces.rst index 1cc87e89aac3d..f8af24f1d8627 100644 --- a/doc/stdlib/stacktraces.rst +++ b/doc/stdlib/stacktraces.rst @@ -17,9 +17,9 @@ * ``func::Symbol`` The name of the function containing the execution context. - * ``linfo::Nullable{LambdaInfo}`` + * ``linfo::Nullable{MethodInstance}`` - The LambdaInfo containing the execution context (if it could be found). + The MethodInstance containing the execution context (if it could be found). * ``file::Symbol`` The path to the file containing the execution context. diff --git a/src/alloc.c b/src/alloc.c index 7ff367c16aa00..61066a8a65809 100644 --- a/src/alloc.c +++ b/src/alloc.c @@ -42,7 +42,8 @@ jl_datatype_t *jl_method_type; jl_datatype_t *jl_methtable_type; jl_datatype_t *jl_typemap_entry_type; jl_datatype_t *jl_typemap_level_type; -jl_datatype_t *jl_lambda_info_type; +jl_datatype_t *jl_method_instance_type; +jl_datatype_t *jl_code_info_type; jl_datatype_t *jl_module_type; jl_datatype_t *jl_errorexception_type=NULL; jl_datatype_t *jl_argumenterror_type; @@ -267,12 +268,12 @@ JL_DLLEXPORT jl_value_t *jl_new_struct_uninit(jl_datatype_t *type) extern jl_value_t *jl_builtin_getfield; -jl_value_t *jl_resolve_globals(jl_value_t *expr, jl_lambda_info_t *lam) +jl_value_t *jl_resolve_globals(jl_value_t *expr, jl_module_t *module) { if (jl_is_symbol(expr)) { - if (lam->def->module == NULL) + if (module == NULL) return expr; - return jl_module_globalref(lam->def->module, (jl_sym_t*)expr); + return jl_module_globalref(module, (jl_sym_t*)expr); } else if (jl_is_expr(expr)) { jl_expr_t *e = (jl_expr_t*)expr; @@ -282,8 +283,8 @@ jl_value_t *jl_resolve_globals(jl_value_t *expr, jl_lambda_info_t *lam) e->head == boundscheck_sym || e->head == simdloop_sym) { } else { - if (e->head == call_sym && jl_expr_nargs(e) == 3 && jl_is_quotenode(jl_exprarg(e, 2)) && - lam->def->module != NULL) { + if (e->head == call_sym && jl_expr_nargs(e) == 3 && + jl_is_quotenode(jl_exprarg(e, 2)) && module != NULL) { // replace getfield(module_expr, :sym) with GlobalRef jl_value_t *s = jl_fieldref(jl_exprarg(e, 2), 0); jl_value_t *fe = jl_exprarg(e, 0); @@ -301,8 +302,8 @@ jl_value_t *jl_resolve_globals(jl_value_t *expr, jl_lambda_info_t *lam) me_mod = jl_globalref_mod(me); me_sym = jl_globalref_name(me); } - else if (jl_is_symbol(me) && jl_binding_resolved_p(lam->def->module, (jl_sym_t*)me)) { - me_mod = lam->def->module; + else if (jl_is_symbol(me) && jl_binding_resolved_p(module, (jl_sym_t*)me)) { + me_mod = module; me_sym = (jl_sym_t*)me; } if (me_mod && me_sym) { @@ -321,30 +322,33 @@ jl_value_t *jl_resolve_globals(jl_value_t *expr, jl_lambda_info_t *lam) if (e->head == method_sym || e->head == abstracttype_sym || e->head == compositetype_sym || e->head == bitstype_sym || e->head == module_sym) i++; - for(; i < jl_array_len(e->args); i++) { - jl_exprargset(e, i, jl_resolve_globals(jl_exprarg(e, i), lam)); + for (; i < jl_array_len(e->args); i++) { + // TODO: this should be making a copy, not mutating the source + jl_exprargset(e, i, jl_resolve_globals(jl_exprarg(e, i), module)); } } } return expr; } -// copy a :lambda Expr into its LambdaInfo representation -static void jl_lambda_info_set_ast(jl_lambda_info_t *li, jl_expr_t *ast) +// copy a :lambda Expr into its CodeInfo representation, +// including popping of known meta nodes +static void jl_code_info_set_ast(jl_code_info_t *li, jl_expr_t *ast) { assert(jl_is_expr(ast)); jl_expr_t *bodyex = (jl_expr_t*)jl_exprarg(ast, 2); assert(jl_is_expr(bodyex)); jl_array_t *body = bodyex->args; - li->code = (jl_value_t*)body; jl_gc_wb(li, li->code); + li->code = body; + jl_gc_wb(li, li->code); size_t j, n = jl_array_len(body); jl_value_t **bd = (jl_value_t**)jl_array_data((jl_array_t*)li->code); - for(j=0; j < n; j++) { + for (j = 0; j < n; j++) { jl_value_t *st = bd[j]; if (jl_is_expr(st) && ((jl_expr_t*)st)->head == meta_sym) { size_t k, ins = 0, na = jl_expr_nargs(st); jl_array_t *meta = ((jl_expr_t*)st)->args; - for(k=0; k < na; k++) { + for (k = 0; k < na; k++) { jl_value_t *ma = jl_array_ptr_ref(meta, k); if (ma == (jl_value_t*)pure_sym) li->pure = 1; @@ -358,7 +362,7 @@ static void jl_lambda_info_set_ast(jl_lambda_info_t *li, jl_expr_t *ast) if (ins == 0) bd[j] = jl_nothing; else - jl_array_del_end(meta, na-ins); + jl_array_del_end(meta, na - ins); } } jl_array_t *vinfo = (jl_array_t*)jl_exprarg(ast, 1); @@ -375,7 +379,7 @@ static void jl_lambda_info_set_ast(jl_lambda_info_t *li, jl_expr_t *ast) li->ssavaluetypes = jl_box_long(nssavalue); jl_gc_wb(li, li->ssavaluetypes); int i; - for(i=0; i < nslots; i++) { + for (i = 0; i < nslots; i++) { jl_value_t *vi = jl_array_ptr_ref(vis, i); jl_sym_t *name = (jl_sym_t*)jl_array_ptr_ref(vi, 0); assert(jl_is_symbol(name)); @@ -393,138 +397,138 @@ static void jl_lambda_info_set_ast(jl_lambda_info_t *li, jl_expr_t *ast) jl_array_ptr_set(li->slotnames, i, name); jl_array_uint8_set(li->slotflags, i, jl_unbox_long(jl_array_ptr_ref(vi, 2))); } - jl_array_t *sparams = (jl_array_t*)jl_array_ptr_ref(vinfo, 3); - assert(jl_is_array(sparams)); - li->sparam_syms = jl_alloc_svec_uninit(jl_array_len(sparams)); - jl_gc_wb(li, li->sparam_syms); - for(i=0; i < jl_array_len(sparams); i++) { - jl_svecset(li->sparam_syms, i, jl_array_ptr_ref(sparams, i)); - } - jl_array_t *args = (jl_array_t*)jl_exprarg(ast, 0); - size_t narg = jl_array_len(args); - li->nargs = narg; - li->isva = narg > 0 && jl_is_rest_arg(jl_array_ptr_ref(args, narg - 1)); } -JL_DLLEXPORT jl_lambda_info_t *jl_new_lambda_info_uninit(void) +JL_DLLEXPORT jl_method_instance_t *jl_new_method_instance_uninit(void) { jl_ptls_t ptls = jl_get_ptls_states(); - jl_lambda_info_t *li = - (jl_lambda_info_t*)jl_gc_alloc(ptls, sizeof(jl_lambda_info_t), - jl_lambda_info_type); - li->code = NULL; - li->slotnames = NULL; - li->slotflags = NULL; - li->slottypes = NULL; - li->ssavaluetypes = NULL; + jl_method_instance_t *li = + (jl_method_instance_t*)jl_gc_alloc(ptls, sizeof(jl_method_instance_t), + jl_method_instance_type); + li->inferred = NULL; li->rettype = (jl_value_t*)jl_any_type; - li->sparam_syms = jl_emptysvec; li->sparam_vals = jl_emptysvec; li->fptr = NULL; + li->unspecialized_ducttape = NULL; li->jlcall_api = 0; li->compile_traced = 0; li->functionObjectsDecls.functionObject = NULL; li->functionObjectsDecls.specFunctionObject = NULL; li->specTypes = NULL; - li->unspecialized_ducttape = NULL; - li->inferred = 0; li->inInference = 0; - li->inCompile = 0; li->def = NULL; - li->constval = NULL; - li->pure = 0; - li->inlineable = 0; - li->propagate_inbounds = 0; return li; } -JL_DLLEXPORT jl_lambda_info_t *jl_new_lambda_info_from_ast(jl_expr_t *ast) +JL_DLLEXPORT jl_code_info_t *jl_new_code_info_uninit(void) +{ + jl_ptls_t ptls = jl_get_ptls_states(); + jl_code_info_t *src = + (jl_code_info_t*)jl_gc_alloc(ptls, sizeof(jl_code_info_t), + jl_code_info_type); + src->code = NULL; + src->slotnames = NULL; + src->slotflags = NULL; + src->slottypes = NULL; + src->ssavaluetypes = NULL; + src->inferred = 0; + src->pure = 0; + src->inlineable = 0; + src->propagate_inbounds = 0; + return src; +} + +jl_code_info_t *jl_new_code_info_from_ast(jl_expr_t *ast) { - jl_lambda_info_t *li=NULL; - JL_GC_PUSH1(&li); - li = jl_new_lambda_info_uninit(); - jl_lambda_info_set_ast(li, ast); + jl_code_info_t *src = NULL; + JL_GC_PUSH1(&src); + src = jl_new_code_info_uninit(); + jl_code_info_set_ast(src, ast); JL_GC_POP(); - return li; + return src; } // invoke (compiling if necessary) the jlcall function pointer for a method template -STATIC_INLINE jl_value_t *jl_call_staged(jl_svec_t *sparam_vals, jl_lambda_info_t *meth, +STATIC_INLINE jl_value_t *jl_call_staged(jl_svec_t *sparam_vals, jl_method_instance_t *generator, jl_value_t **args, uint32_t nargs) { - if (__unlikely(meth->fptr == NULL)) { - jl_compile_linfo(meth); - jl_generate_fptr(meth); + jl_generic_fptr_t fptr; + fptr.fptr = generator->fptr; + fptr.jlcall_api = generator->jlcall_api; + if (__unlikely(fptr.fptr == NULL || fptr.jlcall_api == 0)) { + void *F = jl_compile_linfo(generator, (jl_code_info_t*)generator->inferred).functionObject; + fptr = jl_generate_fptr(generator, F); } - assert(jl_svec_len(meth->sparam_syms) == jl_svec_len(sparam_vals)); - if (__likely(meth->jlcall_api == 0)) - return meth->fptr(args[0], &args[1], nargs-1); + assert(jl_svec_len(generator->def->sparam_syms) == jl_svec_len(sparam_vals)); + if (fptr.jlcall_api == 1) + return fptr.fptr1(args[0], &args[1], nargs-1); + else if (fptr.jlcall_api == 3) + return fptr.fptr3(sparam_vals, args[0], &args[1], nargs-1); else - return ((jl_fptr_sparam_t)meth->fptr)(sparam_vals, args[0], &args[1], nargs-1); + abort(); // shouldn't have inferred any other calling convention } -static jl_lambda_info_t *jl_instantiate_staged(jl_method_t *generator, jl_tupletype_t *tt, jl_svec_t *env) +// return a newly allocated CodeInfo for the function signature +// effectively described by the tuple (specTypes, env, Method) inside linfo +JL_DLLEXPORT jl_code_info_t *jl_code_for_staged(jl_method_instance_t *linfo) { JL_TIMING(STAGED_FUNCTION); + jl_tupletype_t *tt = linfo->specTypes; + jl_svec_t *env = linfo->sparam_vals; size_t i, l; jl_expr_t *ex = NULL; jl_value_t *linenum = NULL; jl_svec_t *sparam_vals = env; - jl_lambda_info_t *func = generator->lambda_template; + jl_method_instance_t *generator = linfo->def->unspecialized; + assert(linfo != generator); + assert(linfo->def->isstaged); + jl_code_info_t *func = NULL; JL_GC_PUSH4(&ex, &linenum, &sparam_vals, &func); jl_ptls_t ptls = jl_get_ptls_states(); int last_in = ptls->in_pure_callback; - assert(jl_svec_len(func->sparam_syms) == jl_svec_len(sparam_vals)); + assert(jl_svec_len(linfo->def->sparam_syms) == jl_svec_len(sparam_vals)); JL_TRY { ptls->in_pure_callback = 1; ex = jl_exprn(lambda_sym, 2); - int nargs = func->nargs; + int nargs = linfo->def->nargs; jl_array_t *argnames = jl_alloc_vec_any(nargs); jl_array_ptr_set(ex->args, 0, argnames); for (i = 0; i < nargs; i++) - jl_array_ptr_set(argnames, i, jl_array_ptr_ref(func->slotnames, i)); + jl_array_ptr_set(argnames, i, jl_array_ptr_ref(((jl_code_info_t*)generator->inferred)->slotnames, i)); jl_expr_t *scopeblock = jl_exprn(jl_symbol("scope-block"), 1); jl_array_ptr_set(ex->args, 1, scopeblock); jl_expr_t *body = jl_exprn(jl_symbol("block"), 2); jl_array_ptr_set(((jl_expr_t*)jl_exprarg(ex,1))->args, 0, body); - linenum = jl_box_long(generator->line); + linenum = jl_box_long(linfo->def->line); jl_value_t *linenode = jl_new_struct(jl_linenumbernode_type, linenum); jl_array_ptr_set(body->args, 0, linenode); // invoke code generator assert(jl_nparams(tt) == jl_array_len(argnames) || - (func->isva && (jl_nparams(tt) >= jl_array_len(argnames) - 1))); + (linfo->def->isva && (jl_nparams(tt) >= jl_array_len(argnames) - 1))); jl_array_ptr_set(body->args, 1, - jl_call_staged(sparam_vals, func, jl_svec_data(tt->parameters), jl_nparams(tt))); + jl_call_staged(sparam_vals, generator, jl_svec_data(tt->parameters), jl_nparams(tt))); - if (func->sparam_syms != jl_emptysvec) { + if (linfo->def->sparam_syms != jl_emptysvec) { // mark this function as having the same static parameters as the generator - size_t i, nsp = jl_svec_len(func->sparam_syms); + size_t i, nsp = jl_svec_len(linfo->def->sparam_syms); jl_expr_t *newast = jl_exprn(jl_symbol("with-static-parameters"), nsp + 1); jl_exprarg(newast, 0) = (jl_value_t*)ex; // (with-static-parameters func_expr sp_1 sp_2 ...) for (i = 0; i < nsp; i++) - jl_exprarg(newast, i+1) = jl_svecref(func->sparam_syms, i); + jl_exprarg(newast, i+1) = jl_svecref(linfo->def->sparam_syms, i); ex = newast; } // need to eval macros in the right module, but not give a warning for the `eval` call unless that results in a call to `eval` - func = (jl_lambda_info_t*)jl_toplevel_eval_in_warn(generator->module, (jl_value_t*)ex, 1); - - // finish marking this as a specialization of the generator - func->isva = generator->lambda_template->isva; - func->def = generator; - jl_gc_wb(func, generator); - func->sparam_vals = env; - jl_gc_wb(func, env); - func->specTypes = tt; - jl_gc_wb(func, tt); + func = (jl_code_info_t*)jl_toplevel_eval_in_warn(linfo->def->module, (jl_value_t*)ex, 1); + assert(jl_is_code_info(func)); jl_array_t *stmts = (jl_array_t*)func->code; - for(i = 0, l = jl_array_len(stmts); i < l; i++) { - jl_array_ptr_set(stmts, i, jl_resolve_globals(jl_array_ptr_ref(stmts, i), func)); + for (i = 0, l = jl_array_len(stmts); i < l; i++) { + jl_array_ptr_set(stmts, i, jl_resolve_globals(jl_array_ptr_ref(stmts, i), linfo->def->module)); } ptls->in_pure_callback = last_in; } @@ -536,77 +540,66 @@ static jl_lambda_info_t *jl_instantiate_staged(jl_method_t *generator, jl_tuplet return func; } -static jl_lambda_info_t *jl_copy_lambda(jl_lambda_info_t *linfo) +JL_DLLEXPORT jl_code_info_t *jl_copy_code_info(jl_code_info_t *src) { - assert(linfo->sparam_vals == jl_emptysvec); - jl_lambda_info_t *new_linfo = jl_new_lambda_info_uninit(); - new_linfo->code = linfo->code; - new_linfo->slotnames = linfo->slotnames; - new_linfo->slottypes = linfo->slottypes; - new_linfo->slotflags = linfo->slotflags; - new_linfo->ssavaluetypes = linfo->ssavaluetypes; - new_linfo->sparam_syms = linfo->sparam_syms; - new_linfo->sparam_vals = linfo->sparam_vals; - new_linfo->pure = linfo->pure; - new_linfo->inlineable = linfo->inlineable; - new_linfo->propagate_inbounds = linfo->propagate_inbounds; - new_linfo->nargs = linfo->nargs; - new_linfo->isva = linfo->isva; - new_linfo->rettype = linfo->rettype; - new_linfo->def = linfo->def; - new_linfo->constval = linfo->constval; - return new_linfo; + jl_ptls_t ptls = jl_get_ptls_states(); + jl_code_info_t *newsrc = + (jl_code_info_t*)jl_gc_alloc(ptls, sizeof(jl_code_info_t), + jl_code_info_type); + *newsrc = *src; + return newsrc; } // return a new lambda-info that has some extra static parameters merged in -JL_DLLEXPORT jl_lambda_info_t *jl_get_specialized(jl_method_t *m, jl_tupletype_t *types, jl_svec_t *sp, int allow_exec) +jl_method_instance_t *jl_get_specialized(jl_method_t *m, jl_tupletype_t *types, jl_svec_t *sp) { - jl_lambda_info_t *linfo = m->lambda_template; - jl_lambda_info_t *new_linfo; - assert(jl_svec_len(linfo->sparam_syms) == jl_svec_len(sp) || sp == jl_emptysvec); - - if (!m->isstaged) { - new_linfo = jl_copy_lambda(linfo); - new_linfo->specTypes = types; - new_linfo->def = m; - new_linfo->sparam_vals = sp; - } - else if (!allow_exec) { - new_linfo = jl_copy_lambda(linfo); - new_linfo->specTypes = types; - new_linfo->def = m; - new_linfo->sparam_vals = sp; - jl_set_lambda_code_null(new_linfo); - } - else { - new_linfo = jl_instantiate_staged(m, types, sp); - } + assert(jl_svec_len(m->sparam_syms) == jl_svec_len(sp) || sp == jl_emptysvec); + jl_method_instance_t *new_linfo = jl_new_method_instance_uninit(); + new_linfo->def = m; + new_linfo->specTypes = types; + new_linfo->sparam_vals = sp; return new_linfo; } -JL_DLLEXPORT void jl_method_init_properties(jl_method_t *m) +JL_DLLEXPORT void jl_method_set_source(jl_method_t *m, jl_code_info_t *src) { - jl_lambda_info_t *li = m->lambda_template; - size_t j, n = jl_array_len((jl_array_t*)li->code); - jl_value_t **body = (jl_value_t**)jl_array_data((jl_array_t*)li->code); - for(j=0; j < n; j++) { - jl_value_t *st = body[j]; + uint8_t j; + uint8_t called = 0; + for (j = 1; j < m->nargs && j <= 8; j++) { + jl_value_t *ai = jl_array_ptr_ref(src->slotnames, j); + if (ai == (jl_value_t*)unused_sym) + continue; + if (jl_array_uint8_ref(src->slotflags, j) & 64) + called |= (1 << (j - 1)); + } + m->called = called; + + assert(jl_typeis(src->code, jl_array_any_type)); + jl_array_t *stmts = (jl_array_t*)src->code; + size_t i, n = jl_array_len(stmts); + jl_array_t *copy = jl_alloc_vec_any(n); + JL_GC_PUSH1(©); + int set_lineno = 0; + for (i = 0; i < n; i++) { + jl_value_t *st = jl_array_ptr_ref(stmts, i); if (jl_is_expr(st) && ((jl_expr_t*)st)->head == line_sym) { - m->line = jl_unbox_long(jl_exprarg(st, 0)); - m->file = (jl_sym_t*)jl_exprarg(st, 1); - body[j] = jl_nothing; - break; + if (!set_lineno) { + m->line = jl_unbox_long(jl_exprarg(st, 0)); + m->file = (jl_sym_t*)jl_exprarg(st, 1); + st = jl_nothing; + set_lineno = 1; + } } + else { + st = jl_resolve_globals(st, m->module); + } + jl_array_ptr_set(copy, i, st); } - int i; - uint8_t called=0; - for(i=1; i < li->nargs && i <= 8; i++) { - jl_value_t *ai = jl_array_ptr_ref(li->slotnames,i); - if (ai == (jl_value_t*)unused_sym) continue; - if (jl_array_uint8_ref(li->slotflags,i)&64) - called |= (1<<(i-1)); - } - m->called = called; + copy = jl_compress_ast(m, copy); + m->source = jl_copy_code_info(src); + jl_gc_wb(m, m->source); + m->source->code = copy; + JL_GC_POP(); } JL_DLLEXPORT jl_method_t *jl_new_method_uninit(void) @@ -617,57 +610,61 @@ JL_DLLEXPORT jl_method_t *jl_new_method_uninit(void) m->specializations.unknown = jl_nothing; m->sig = NULL; m->tvars = NULL; + m->sparam_syms = NULL; m->ambig = jl_nothing; m->roots = NULL; m->module = ptls->current_module; - m->lambda_template = NULL; + m->source = NULL; + m->unspecialized = NULL; m->name = NULL; m->file = empty_sym; m->line = 0; m->called = 0xff; m->invokes.unknown = NULL; m->isstaged = 0; + m->isva = 0; + m->nargs = 0; m->needs_sparam_vals_ducttape = 2; m->traced = 0; JL_MUTEX_INIT(&m->writelock); return m; } -jl_method_t *jl_new_method(jl_lambda_info_t *definition, jl_sym_t *name, jl_tupletype_t *sig, jl_svec_t *tvars, int isstaged) +jl_method_t *jl_new_method(jl_code_info_t *definition, + jl_sym_t *name, + jl_tupletype_t *sig, + size_t nargs, + int isva, + jl_svec_t *tvars, + int isstaged) { - assert(definition->code); + size_t i, l = jl_svec_len(tvars); + jl_svec_t *sparam_syms = jl_alloc_svec_uninit(l); + for (i = 0; i < l; i++) { + jl_svecset(sparam_syms, i, ((jl_tvar_t*)jl_svecref(tvars, i))->name); + } + jl_value_t *root = (jl_value_t*)sparam_syms; + JL_GC_PUSH1(&root); + jl_method_t *m = jl_new_method_uninit(); m->isstaged = isstaged; m->name = name; m->sig = sig; + m->isva = isva; + m->nargs = nargs; if (jl_svec_len(tvars) == 1) tvars = (jl_svec_t*)jl_svecref(tvars, 0); m->tvars = tvars; - JL_GC_PUSH1(&m); - // the front end may add this lambda to multiple methods; make a copy if so - jl_method_t *oldm = definition->def; - int reused = oldm != NULL; - if (reused) - definition = jl_copy_lambda(definition); - - definition->specTypes = isstaged ? jl_anytuple_type : sig; - m->lambda_template = definition; - jl_gc_wb(m, definition); - definition->def = m; - jl_gc_wb(definition, m); - - if (reused) { - m->file = oldm->file; - m->line = oldm->line; - m->called = oldm->called; - } - else { - jl_array_t *stmts = (jl_array_t*)definition->code; - int i, l; - for(i = 0, l = jl_array_len(stmts); i < l; i++) { - jl_array_ptr_set(stmts, i, jl_resolve_globals(jl_array_ptr_ref(stmts, i), definition)); - } - jl_method_init_properties(m); + m->sparam_syms = sparam_syms; + root = (jl_value_t*)m; + jl_method_set_source(m, definition); + if (isstaged) { + // remove the code from `->source` (since generic source isn't present) + // and use the `->unspecialized` field to be the source generator + m->unspecialized = jl_get_specialized(m, jl_anytuple_type, jl_emptysvec); + jl_gc_wb(m, m->unspecialized); + m->unspecialized->inferred = (jl_value_t*)m->source; + m->source = NULL; } JL_GC_POP(); return m; diff --git a/src/anticodegen.c b/src/anticodegen.c index 948c2a81b08c3..552ae552e5911 100644 --- a/src/anticodegen.c +++ b/src/anticodegen.c @@ -25,7 +25,7 @@ JL_DLLEXPORT void *jl_LLVMCreateDisasm(const char *TripleName, void *DisInfo, in JL_DLLEXPORT size_t jl_LLVMDisasmInstruction(void *DC, uint8_t *Bytes, uint64_t BytesSize, uint64_t PC, char *OutString, size_t OutStringSize) UNAVAILABLE void jl_init_codegen(void) { } -void jl_fptr_to_llvm(jl_fptr_t fptr, jl_lambda_info_t *lam, int specsig) +void jl_fptr_to_llvm(jl_fptr_t fptr, jl_method_instance_t *lam, int specsig) { if (!specsig) lam->fptr = fptr; @@ -36,16 +36,16 @@ int jl_getFunctionInfo(jl_frame_t **frames, uintptr_t pointer, int skipC, int no return 0; } -void jl_register_fptrs(uint64_t sysimage_base, void **fptrs, jl_lambda_info_t **linfos, size_t n) +void jl_register_fptrs(uint64_t sysimage_base, void **fptrs, jl_method_instance_t **linfos, size_t n) { (void)sysimage_base; (void)fptrs; (void)linfos; (void)n; } -void jl_compile_linfo(jl_lambda_info_t *li) { } +void jl_compile_linfo(jl_method_instance_t *li) { } -jl_value_t *jl_interpret_call(jl_lambda_info_t *lam, jl_value_t **args, uint32_t nargs); -void jl_generate_fptr(jl_lambda_info_t *li) +jl_value_t *jl_interpret_call(jl_method_instance_t *lam, jl_value_t **args, uint32_t nargs); +void jl_generate_fptr(jl_method_instance_t *li) { li->fptr = (jl_fptr_t)&jl_interpret_call; - li->jlcall_api = 3; + li->jlcall_api = 4; } diff --git a/src/ast.c b/src/ast.c index 6f4df6823e664..8bcf0b31a48b4 100644 --- a/src/ast.c +++ b/src/ast.c @@ -141,7 +141,7 @@ value_t fl_invoke_julia_macro(fl_context_t *fl_ctx, value_t *args, uint32_t narg jl_ptls_t ptls = jl_get_ptls_states(); if (nargs < 1) argcount(fl_ctx, "invoke-julia-macro", nargs, 1); - jl_lambda_info_t *mfunc = NULL; + jl_method_instance_t *mfunc = NULL; jl_value_t **margs; // Reserve one more slot for the result JL_GC_PUSHARGS(margs, nargs + 1); @@ -515,7 +515,7 @@ static jl_value_t *scm_to_julia_(fl_context_t *fl_ctx, value_t e, int eo) e = cdr_(e); } if (sym == lambda_sym) - ex = (jl_value_t*)jl_new_lambda_info_from_ast((jl_expr_t*)ex); + ex = (jl_value_t*)jl_new_code_info_from_ast((jl_expr_t*)ex); JL_GC_POP(); if (sym == list_sym) return (jl_value_t*)((jl_expr_t*)ex)->args; @@ -813,13 +813,13 @@ JL_DLLEXPORT jl_value_t *jl_macroexpand(jl_value_t *expr) } // wrap expr in a thunk AST -jl_lambda_info_t *jl_wrap_expr(jl_value_t *expr) +jl_code_info_t *jl_wrap_expr(jl_value_t *expr) { // `(lambda () (() () () ()) ,expr) jl_expr_t *le=NULL, *bo=NULL; jl_value_t *vi=NULL; jl_value_t *mt = jl_an_empty_vec_any; - jl_lambda_info_t *li = NULL; - JL_GC_PUSH4(&le, &vi, &bo, &li); + jl_code_info_t *src = NULL; + JL_GC_PUSH4(&le, &vi, &bo, &src); le = jl_exprn(lambda_sym, 3); jl_array_ptr_set(le->args, 0, mt); vi = (jl_value_t*)jl_alloc_vec_any(4); @@ -836,26 +836,13 @@ jl_lambda_info_t *jl_wrap_expr(jl_value_t *expr) expr = (jl_value_t*)bo; } jl_array_ptr_set(le->args, 2, expr); - li = jl_new_lambda_info_from_ast(le); + src = jl_new_code_info_from_ast(le); JL_GC_POP(); - return li; + return src; } // syntax tree accessors -JL_DLLEXPORT int jl_is_rest_arg(jl_value_t *ex) -{ - if (!jl_is_expr(ex)) return 0; - if (((jl_expr_t*)ex)->head != colons_sym) return 0; - jl_expr_t *atype = (jl_expr_t*)jl_exprarg(ex,1); - if (!jl_is_expr(atype)) return 0; - if (((jl_expr_t*)atype)->head == dots_sym) - return 1; - if (atype->head != call_sym || jl_array_len(atype->args) < 3 || jl_array_len(atype->args) > 4) - return 0; - return ((jl_sym_t*)jl_exprarg(atype,1)) == vararg_sym; -} - JL_DLLEXPORT jl_value_t *jl_copy_ast(jl_value_t *expr) { if (expr == NULL) { diff --git a/src/builtins.c b/src/builtins.c index f93f27475a96f..ab42d879286fc 100644 --- a/src/builtins.c +++ b/src/builtins.c @@ -1187,7 +1187,8 @@ void jl_init_primitives(void) add_builtin("IntrinsicFunction", (jl_value_t*)jl_intrinsic_type); add_builtin("Function", (jl_value_t*)jl_function_type); add_builtin("Builtin", (jl_value_t*)jl_builtin_type); - add_builtin("LambdaInfo", (jl_value_t*)jl_lambda_info_type); + add_builtin("MethodInstance", (jl_value_t*)jl_method_instance_type); + add_builtin("CodeInfo", (jl_value_t*)jl_code_info_type); add_builtin("Ref", (jl_value_t*)jl_ref_type); add_builtin("Ptr", (jl_value_t*)jl_pointer_type); add_builtin("Task", (jl_value_t*)jl_task_type); @@ -1259,8 +1260,8 @@ static size_t jl_static_show_x_(JL_STREAM *out, jl_value_t *v, jl_datatype_t *vt n += jl_static_show_x(out, (jl_value_t*)m->module, depth); n += jl_printf(out, ".%s(...)", jl_symbol_name(m->name)); } - else if (vt == jl_lambda_info_type) { - jl_lambda_info_t *li = (jl_lambda_info_t*)v; + else if (vt == jl_method_instance_type) { + jl_method_instance_t *li = (jl_method_instance_t*)v; if (li->def) { n += jl_static_show_x(out, (jl_value_t*)li->def->module, depth); if (li->specTypes) { @@ -1274,7 +1275,7 @@ static size_t jl_static_show_x_(JL_STREAM *out, jl_value_t *v, jl_datatype_t *vt } else { n += jl_printf(out, " -> "); - n += jl_static_show_x(out, (jl_value_t*)li->code, depth); + n += jl_static_show_x(out, li->inferred, depth); } } else if (vt == jl_simplevector_type) { diff --git a/src/ccall.cpp b/src/ccall.cpp index f85e30081eb03..16b58bea717a9 100644 --- a/src/ccall.cpp +++ b/src/ccall.cpp @@ -251,6 +251,7 @@ static Value *emit_plt(FunctionType *functype, const AttributeSet &attrs, Function *plt = Function::Create(functype, GlobalVariable::ExternalLinkage, fname, M); + jl_init_function(plt); plt->setAttributes(attrs); if (cc != CallingConv::C) plt->setCallingConv(cc); @@ -628,7 +629,7 @@ static jl_value_t* try_eval(jl_value_t *ex, jl_codectx_t *ctx, const char *failu if (constant || jl_is_ssavalue(ex)) return constant; JL_TRY { - constant = jl_interpret_toplevel_expr_in(ctx->module, ex, ctx->linfo); + constant = jl_interpret_toplevel_expr_in(ctx->module, ex, ctx->source, ctx->linfo->sparam_vals); } JL_CATCH { if (compiletime) @@ -1041,10 +1042,13 @@ static jl_cgval_t emit_llvmcall(jl_value_t **args, size_t nargs, jl_codectx_t *c std::stringstream name; name << "jl_llvmcall" << llvmcallnumbering++; f->setName(name.str()); + jl_init_function(f); f = cast(prepare_call(function_proto(f))); } - else + else { + jl_init_function(f); f->setLinkage(GlobalValue::LinkOnceODRLinkage); + } // the actual call builder.CreateCall(prepare_call(gcroot_flush_func)); diff --git a/src/cgutils.cpp b/src/cgutils.cpp index 167f7b302f320..039188ef55776 100644 --- a/src/cgutils.cpp +++ b/src/cgutils.cpp @@ -88,7 +88,7 @@ static DIType julia_type_to_di(jl_value_t *jt, DIBuilder *dbuilder, bool isboxed if (jl_is_abstracttype(jt) || !jl_is_datatype(jt) || jl_is_array_type(jt) || jt == (jl_value_t*)jl_sym_type || jt == (jl_value_t*)jl_module_type || jt == (jl_value_t*)jl_simplevector_type || jt == (jl_value_t*)jl_datatype_type || - jt == (jl_value_t*)jl_lambda_info_type) + jt == (jl_value_t*)jl_method_instance_type) return jl_pvalue_dillvmt; if (jl_is_typector(jt) || jl_is_typevar(jt)) return jl_pvalue_dillvmt; @@ -246,8 +246,8 @@ static Value *literal_pointer_val(jl_value_t *p) // functions are prefixed with a - return julia_gv("-", m->name, m->module, p); } - if (jl_is_lambda_info(p)) { - jl_lambda_info_t *linfo = (jl_lambda_info_t*)p; + if (jl_is_method_instance(p)) { + jl_method_instance_t *linfo = (jl_method_instance_t*)p; // Type-inferred functions are also prefixed with a - if (linfo->def) return julia_gv("-", linfo->def->name, linfo->def->module, p); @@ -905,15 +905,15 @@ static inline jl_module_t *topmod(jl_codectx_t *ctx) static jl_value_t *expr_type(jl_value_t *e, jl_codectx_t *ctx) { if (jl_is_ssavalue(e)) { - if (jl_is_long(ctx->linfo->ssavaluetypes)) + if (jl_is_long(ctx->source->ssavaluetypes)) return (jl_value_t*)jl_any_type; int idx = ((jl_ssavalue_t*)e)->id; - assert(jl_is_array(ctx->linfo->ssavaluetypes)); - jl_array_t *ssavalue_types = (jl_array_t*)ctx->linfo->ssavaluetypes; + assert(jl_is_array(ctx->source->ssavaluetypes)); + jl_array_t *ssavalue_types = (jl_array_t*)ctx->source->ssavaluetypes; return jl_array_ptr_ref(ssavalue_types, idx); } if (jl_typeis(e, jl_slotnumber_type)) { - jl_array_t *slot_types = (jl_array_t*)ctx->linfo->slottypes; + jl_array_t *slot_types = (jl_array_t*)ctx->source->slottypes; if (!jl_is_array(slot_types)) return (jl_value_t*)jl_any_type; return jl_array_ptr_ref(slot_types, jl_slot_number(e)-1); @@ -1439,7 +1439,7 @@ static Value *call_with_unsigned(Function *ufunc, Value *v) return Call; } -static void jl_add_linfo_root(jl_lambda_info_t *li, jl_value_t *val); +static void jl_add_method_root(jl_method_instance_t *li, jl_value_t *val); static Value *as_value(Type *t, const jl_cgval_t &v) { @@ -1476,7 +1476,7 @@ static Value *boxed(const jl_cgval_t &vinfo, jl_codectx_t *ctx, bool gcrooted) if (Constant *c = dyn_cast(v)) { jl_value_t *s = static_constant_instance(c, jt); if (s) { - jl_add_linfo_root(ctx->linfo, s); + jl_add_method_root(ctx->linfo, s); return literal_pointer_val(s); } } diff --git a/src/codegen.cpp b/src/codegen.cpp index 5aa5cbaac1deb..79e7902857064 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -279,6 +279,18 @@ DICompositeType jl_di_func_null_sig; #endif #endif + +extern "C" +int32_t jl_jlcall_api(const void *function) +{ + // give the function an index in the constant lookup table + if (function == NULL) + return 0; + Function *F = (Function*)function; + return (F->getFunctionType() == jl_func_sig ? 1 : 3); +} + + // constants static Value *V_null; static Type *NoopType; @@ -510,7 +522,9 @@ typedef struct { std::vector ssavalue_assigned; std::map *arrayvars; jl_module_t *module; - jl_lambda_info_t *linfo; + jl_method_instance_t *linfo; + jl_code_info_t *source; + jl_array_t *code; const char *name; StringRef file; Value *spvals_ptr; @@ -521,6 +535,7 @@ typedef struct { bool vaStack; // varargs stack-allocated bool sret; int nReqArgs; + int nargs; CallInst *ptlsStates; Value *signalPage; @@ -705,14 +720,14 @@ static bool store_unboxed_p(int s, jl_codectx_t *ctx) jl_varinfo_t &vi = ctx->slots[s]; // only store a variable unboxed if type inference has run, which // checks that the variable is not referenced undefined. - return (ctx->linfo->inferred && !vi.usedUndef && + return (ctx->source->inferred && !vi.usedUndef && // don't unbox vararg tuples s != ctx->vaSlot && store_unboxed_p(vi.value.typ)); } static jl_sym_t *slot_symbol(int s, jl_codectx_t *ctx) { - return (jl_sym_t*)jl_array_ptr_ref(ctx->linfo->slotnames, s); + return (jl_sym_t*)jl_array_ptr_ref(ctx->source->slotnames, s); } static Value *alloc_local(int s, jl_codectx_t *ctx) @@ -730,7 +745,7 @@ static Value *alloc_local(int s, jl_codectx_t *ctx) Value *lv = builder.CreateAlloca(vtype, 0, jl_symbol_name(slot_symbol(s,ctx))); vi.value = mark_julia_slot(lv, jt, tbaa_stack); // slot is not immutable if there are multiple assignments - vi.value.isimmutable &= (vi.isSA && s >= ctx->linfo->nargs); + vi.value.isimmutable &= (vi.isSA && s >= ctx->nargs); assert(vi.value.isboxed == false); return lv; } @@ -769,47 +784,85 @@ void jl_dump_compiles(void *s) // --- entry point --- //static int n_emit=0; -static std::unique_ptr emit_function(jl_lambda_info_t *lam, jl_llvm_functions_t *declarations); -void jl_add_linfo_in_flight(StringRef name, jl_lambda_info_t *linfo, const DataLayout &DL); +static std::unique_ptr emit_function(jl_method_instance_t *lam, jl_code_info_t *src, jl_llvm_functions_t *declarations); +void jl_add_linfo_in_flight(StringRef name, jl_method_instance_t *linfo, const DataLayout &DL); // this generates llvm code for the lambda info // and adds the result to the jitlayers // (and the shadow module), but doesn't yet compile // or generate object code for it -// objective: assign li->functionObject -extern "C" void jl_compile_linfo(jl_lambda_info_t *li) +extern "C" +jl_llvm_functions_t jl_compile_linfo(jl_method_instance_t *li, jl_code_info_t *src) { JL_TIMING(CODEGEN); - if (li->jlcall_api == 2) { - // delete code for functions reduced to a constant - jl_set_lambda_code_null(li); - return; - } - // grab the codegen lock and see if this needs to be compiled - if (li->functionObjectsDecls.functionObject != NULL) { - return; + assert(jl_is_method_instance(li)); + jl_llvm_functions_t decls = {}; + // Step 1. See if it is already compiled, + // Get the codegen lock, + // And get the source + if (!src) { + // Step 1a. If the caller didn't provide the source, + // try to infer it for ourself + // first see if it is already compiled + decls = li->functionObjectsDecls; + if (decls.functionObject != NULL || li->jlcall_api == 2) { + return decls; + } + JL_LOCK(&codegen_lock); + decls = li->functionObjectsDecls; + if (decls.functionObject != NULL || li->jlcall_api == 2) { + JL_UNLOCK(&codegen_lock); + return decls; + } + + // see if it is inferred + src = (jl_code_info_t*)li->inferred; + if (src) { + if (!jl_is_code_info(src)) { + src = jl_type_infer(li, 0); + } + if (!src || li->jlcall_api == 2) { + JL_UNLOCK(&codegen_lock); + return decls; + } + } + else { + // failed to compile + JL_UNLOCK(&codegen_lock); + return decls; + } } - JL_LOCK(&codegen_lock); - if (li->functionObjectsDecls.functionObject != NULL) { - JL_UNLOCK(&codegen_lock); - return; + else { + // similar to above, but never returns a NULL + // decl (unless compile fails), even if jlcall_api == 2 + decls = li->functionObjectsDecls; + if (decls.functionObject != NULL) { + return decls; + } + JL_LOCK(&codegen_lock); + decls = li->functionObjectsDecls; + if (decls.functionObject != NULL) { + JL_UNLOCK(&codegen_lock); + return decls; + } } - // setup global state - assert(!li->inInference); - li->inCompile = 1; + JL_GC_PUSH1(&src); + assert(jl_is_code_info(src)); + + // Step 2: setup global state BasicBlock *old = nested_compile ? builder.GetInsertBlock() : NULL; DebugLoc olddl = builder.getCurrentDebugLocation(); bool last_n_c = nested_compile; if (!nested_compile && dump_compiles_stream != NULL) last_time = jl_hrtime(); nested_compile = true; + + // Step 3. actually do the work of emitting the function std::unique_ptr m; Function *f = NULL, *specf = NULL; - // actually do the work of emitting the function JL_TRY { - m = emit_function(li, &li->functionObjectsDecls); - f = (Function*)li->functionObjectsDecls.functionObject; - specf = (Function*)li->functionObjectsDecls.specFunctionObject; + m = emit_function(li, src, &li->functionObjectsDecls); + decls = li->functionObjectsDecls; //n_emit++; } JL_CATCH { @@ -822,10 +875,13 @@ extern "C" void jl_compile_linfo(jl_lambda_info_t *li) builder.SetInsertPoint(old); builder.SetCurrentDebugLocation(olddl); } - li->inCompile = 0; JL_UNLOCK(&codegen_lock); // Might GC jl_rethrow_with_add("error compiling %s", jl_symbol_name(li->def ? li->def->name : anonymous_sym)); } + f = (Function*)decls.functionObject; + specf = (Function*)decls.specFunctionObject; + + // Step 4. Prepare debug info to receive this function // record that this function name came from this linfo, // so we can build a reverse mapping for debug-info. bool toplevel = li->def == NULL; @@ -842,28 +898,27 @@ extern "C" void jl_compile_linfo(jl_lambda_info_t *li) jl_add_linfo_in_flight((specf ? specf : f)->getName(), li, DL); } - // mark the pointer calling convention - li->jlcall_api = (f->getFunctionType() == jl_func_sig ? 0 : 1); - li->fptr = NULL; - - // success. add the result to the execution engine now + // Step 5. Add the result to the execution engine now jl_finalize_module(m.release(), !toplevel); - // if not inlineable, code won't be needed again - if (JL_DELETE_NON_INLINEABLE && jl_options.debug_level <= 1 && - li->def && li->inferred && !li->inlineable && - li != li->def->lambda_template && !imaging_mode) { - jl_set_lambda_code_null(li); + if (li->jlcall_api != 2) { + // if not inlineable, code won't be needed again + if (JL_DELETE_NON_INLINEABLE && jl_options.debug_level <= 1 && + li->def && li->inferred && jl_is_code_info(li->inferred) && + !((jl_code_info_t*)li->inferred)->inlineable && + li != li->def->unspecialized && !imaging_mode) { + li->inferred = jl_nothing; + } } - // done compiling: restore global state + // Step 6: Done compiling: Restore global state if (old != NULL) { builder.SetInsertPoint(old); builder.SetCurrentDebugLocation(olddl); } - li->inCompile = 0; nested_compile = last_n_c; JL_UNLOCK(&codegen_lock); // Might GC + if (dump_compiles_stream != NULL) { uint64_t this_time = jl_hrtime(); jl_printf(dump_compiles_stream, "%" PRIu64 "\t\"", this_time - last_time); @@ -871,6 +926,8 @@ extern "C" void jl_compile_linfo(jl_lambda_info_t *li) jl_printf(dump_compiles_stream, "\"\n"); last_time = this_time; } + JL_GC_POP(); + return decls; } #ifndef LLVM37 @@ -931,7 +988,7 @@ static uint64_t getAddressForFunction(llvm::Function *llvmf) llvm::raw_fd_ostream out(1,false); #endif #ifdef USE_MCJIT - jl_finalize_function(llvmf, NULL); + jl_finalize_function(llvmf); uint64_t ret = jl_ExecutionEngine->getFunctionAddress(llvmf->getName()); // delay executing trace callbacks until here to make sure there's no // recursive compilation. @@ -954,20 +1011,137 @@ uint64_t jl_get_llvm_fptr(llvm::Function *llvmf) return addr; } -// this assumes that jl_compile_linfo has already been called -// and forces compilation of the lambda info -extern "C" void jl_generate_fptr(jl_lambda_info_t *li) +static jl_method_instance_t *jl_get_unspecialized(jl_method_instance_t *method) { - if (li->jlcall_api == 2) return; + // one unspecialized version of a function can be shared among all cached specializations + jl_method_t *def = method->def; + if (def->needs_sparam_vals_ducttape == 2) { + if (def->isstaged) { + def->needs_sparam_vals_ducttape = 1; + } + else { + // determine if this needs an unspec version compiled for each + // sparam, or whether they can be shared + // TODO: remove this once runtime intrinsics are hooked up + int needs_sparam_vals_ducttape = 0; + if (method->sparam_vals != jl_emptysvec) { + jl_array_t *code = (jl_array_t*)def->source->code; + JL_GC_PUSH1(&code); + if (!jl_typeis(code, jl_array_any_type)) + code = jl_uncompress_ast(def, code); + size_t i, l = jl_array_len(code); + for (i = 0; i < l; i++) { + if (jl_has_intrinsics(method, jl_array_ptr_ref(code, i), def->module)) { + needs_sparam_vals_ducttape = 1; + break; + } + } + JL_GC_POP(); + } + def->needs_sparam_vals_ducttape = needs_sparam_vals_ducttape; + } + } + if (def->needs_sparam_vals_ducttape) { + return method; + } + if (def->unspecialized == NULL) { + JL_LOCK(&def->writelock); + if (def->unspecialized == NULL) { + // XXX: use computed env rather than empty svec + def->unspecialized = jl_get_specialized(def, def->sig, jl_emptysvec); + jl_gc_wb(def, def->unspecialized); + } + JL_UNLOCK(&def->writelock); + } + return def->unspecialized; +} + +// this compiles li and emits fptr +extern "C" +jl_generic_fptr_t jl_generate_fptr(jl_method_instance_t *li, void *_F) +{ + Function *F = (Function*)_F; + jl_generic_fptr_t fptr; + fptr.fptr = li->fptr; + fptr.jlcall_api = li->jlcall_api; + if (fptr.fptr && fptr.jlcall_api) { + return fptr; + } + fptr.fptr = li->unspecialized_ducttape; + fptr.jlcall_api = 1; + if (!li->inferred && fptr.fptr) { + return fptr; + } JL_LOCK(&codegen_lock); - // objective: assign li->fptr - assert(li->functionObjectsDecls.functionObject); - assert(!li->inCompile); - if (li->fptr == NULL) { - li->fptr = (jl_fptr_t)getAddressForFunction((Function*)li->functionObjectsDecls.functionObject); - assert(li->fptr != NULL); + fptr.fptr = li->fptr; + fptr.jlcall_api = li->jlcall_api; + if (fptr.fptr && fptr.jlcall_api) { + JL_UNLOCK(&codegen_lock); + return fptr; + } + jl_method_instance_t *unspec = NULL; + if (li->def && !li->def->isstaged && li->def->unspecialized) { + unspec = li->def->unspecialized; + } + if (!F || !jl_can_finalize_function(F)) { + // can't compile F in the JIT right now, + // so instead compile an unspecialized version + // and return its fptr instead + if (!unspec) + unspec = jl_get_unspecialized(li); // get-or-create the unspecialized version to cache the result + jl_code_info_t *src = unspec->def->isstaged ? jl_code_for_staged(unspec) : unspec->def->source; + fptr.fptr = unspec->fptr; + fptr.jlcall_api = unspec->jlcall_api; + if (fptr.fptr && fptr.jlcall_api) { + JL_UNLOCK(&codegen_lock); + return fptr; + } + jl_llvm_functions_t decls = unspec->functionObjectsDecls; + if (unspec == li) { + // temporarily clear the decls so that it will compile our unspec version of src + unspec->functionObjectsDecls.functionObject = NULL; + unspec->functionObjectsDecls.specFunctionObject = NULL; + } + F = (Function*)jl_compile_linfo(unspec, src).functionObject; + if (unspec == li) { + unspec->functionObjectsDecls = decls; + } + assert(jl_can_finalize_function(F)); + } + assert(F); + fptr.fptr = (jl_fptr_t)getAddressForFunction(F); + fptr.jlcall_api = jl_jlcall_api(F); + assert(fptr.fptr != NULL); + // decide if the fptr should be cached somewhere also + if (li->functionObjectsDecls.functionObject == F) { + if (li->fptr) { + // don't change fptr as that leads to race conditions + // with the (not) simultaneous update to jlcall_api + } + else if (li->inferred || fptr.jlcall_api != 1) { + li->jlcall_api = fptr.jlcall_api; + li->fptr = fptr.fptr; + } + else { + li->unspecialized_ducttape = fptr.fptr; + } + } + else if (unspec) { + if (unspec->fptr) { + // don't change fptr as that leads to race conditions + // with the (not) simultaneous update to jlcall_api + } + else if (unspec == li) { + if (fptr.jlcall_api == 1) + li->unspecialized_ducttape = fptr.fptr; + } + else if (unspec->functionObjectsDecls.functionObject == F) { + unspec->jlcall_api = fptr.jlcall_api; + unspec->fptr = fptr.fptr; + } } JL_UNLOCK(&codegen_lock); // Might GC + return fptr; } static Function *jl_cfunction_object(jl_function_t *f, jl_value_t *rt, jl_tupletype_t *argt); @@ -1040,27 +1214,19 @@ void jl_extern_c(jl_function_t *f, jl_value_t *rt, jl_value_t *argt, char *name) // this is paired with jl_dump_function_ir and jl_dump_function_asm in particular ways: // misuse will leak memory or cause read-after-free extern "C" JL_DLLEXPORT -void *jl_get_llvmf_defn(jl_lambda_info_t *linfo, bool getwrapper) +void *jl_get_llvmf_defn(jl_method_instance_t *linfo, bool getwrapper) { - if (linfo->def && linfo->def->lambda_template->code == jl_nothing) { + if (linfo->def && linfo->def->source == NULL) { // not a generic function return NULL; } - jl_lambda_info_t *temp = NULL; - JL_GC_PUSH1(&temp); - if (linfo->code == jl_nothing && linfo->def) { - // re-infer if we've deleted the code - // first copy the linfo to avoid corrupting it and - // confusing the compiler about the - // validity of the code it already generated - temp = jl_get_specialized(linfo->def, linfo->specTypes, linfo->sparam_vals, 1); - jl_type_infer(temp, 0); - if (temp->code == jl_nothing || temp->inInference) { - // something went wrong: abort! - JL_GC_POP(); - return NULL; - } + jl_code_info_t *src = (jl_code_info_t*)linfo->inferred; + JL_GC_PUSH1(&src); + if (!src || !jl_is_code_info(src)) { + src = jl_type_infer(linfo, 0); + if (!src) + src = linfo->def->isstaged ? jl_code_for_staged(linfo) : linfo->def->source; } // Backup the info for the nested compile @@ -1073,7 +1239,7 @@ void *jl_get_llvmf_defn(jl_lambda_info_t *linfo, bool getwrapper) jl_llvm_functions_t declarations; std::unique_ptr m; JL_TRY { - m = emit_function(temp ? temp : linfo, &declarations); + m = emit_function(linfo, src, &declarations); } JL_CATCH { // something failed! @@ -1127,48 +1293,44 @@ void *jl_get_llvmf_defn(jl_lambda_info_t *linfo, bool getwrapper) extern "C" JL_DLLEXPORT -void *jl_get_llvmf_decl(jl_lambda_info_t *linfo, bool getwrapper) +void *jl_get_llvmf_decl(jl_method_instance_t *linfo, bool getwrapper) { - if (linfo->def && linfo->def->lambda_template->code == jl_nothing) { + if (linfo->def && linfo->def->source == NULL) { // not a generic function return NULL; } // compile this normally - linfo = jl_compile_for_dispatch(linfo); + jl_llvm_functions_t decls = jl_compile_for_dispatch(linfo); - if (linfo->jlcall_api == 2 && linfo->def) { + if (decls.functionObject == NULL && linfo->jlcall_api == 2 && linfo->def) { // normally we don't generate native code for these functions, so need an exception here // This leaks a bit of memory to cache native code that we'll never actually need - if (linfo->functionObjectsDecls.functionObject == NULL) { - jl_lambda_info_t *temp = NULL; - JL_GC_PUSH1(&temp); - temp = jl_get_specialized(linfo->def, linfo->specTypes, linfo->sparam_vals, 1); - jl_type_infer(temp, 0); - temp->jlcall_api = 0; - temp->constval = jl_nothing; - if (temp->code == jl_nothing || temp->inInference) { - JL_GC_POP(); - return NULL; + JL_LOCK(&codegen_lock); + decls = linfo->functionObjectsDecls; + if (decls.functionObject == NULL) { + jl_code_info_t *src = NULL; + src = jl_type_infer(linfo, 0); + if (!src) { + src = linfo->def->isstaged ? jl_code_for_staged(linfo) : linfo->def->source; } - jl_compile_linfo(temp); - linfo->functionObjectsDecls = temp->functionObjectsDecls; - JL_GC_POP(); + decls = jl_compile_linfo(linfo, src); + linfo->functionObjectsDecls = decls; } - jl_set_lambda_code_null(linfo); + JL_UNLOCK(&codegen_lock); } - if (getwrapper || !linfo->functionObjectsDecls.specFunctionObject) - return linfo->functionObjectsDecls.functionObject; + if (getwrapper || !decls.specFunctionObject) + return decls.functionObject; else - return linfo->functionObjectsDecls.specFunctionObject; + return decls.specFunctionObject; } extern "C" JL_DLLEXPORT void *jl_get_llvmf(jl_tupletype_t *tt, bool getwrapper, bool getdeclarations) { // DEPRECATED - jl_lambda_info_t *linfo = NULL, *temp = NULL; + jl_method_instance_t *linfo = NULL, *temp = NULL; JL_GC_PUSH3(&linfo, &temp, &tt); if (tt != NULL) { linfo = jl_get_specialization1(tt); @@ -1591,7 +1753,7 @@ static jl_value_t *static_eval(jl_value_t *ex, jl_codectx_t *ctx, int sparams=tr } if (jl_is_quotenode(ex)) return jl_fieldref(ex, 0); - if (jl_is_lambda_info(ex)) + if (jl_is_method_instance(ex)) return NULL; jl_module_t *m = NULL; jl_sym_t *s = NULL; @@ -1771,8 +1933,7 @@ static void simple_escape_analysis(jl_value_t *expr, bool esc, jl_codectx_t *ctx jl_value_t *f = jl_exprarg(e,0); simple_escape_analysis(f, esc, ctx); if (expr_is_symbol(f)) { - if (is_constant(f, ctx, false)) { - jl_value_t *fv = jl_interpret_toplevel_expr_in(ctx->module, f, NULL); + if (jl_value_t *fv = static_eval(f, ctx, false)) { if (jl_typeis(fv, jl_intrinsic_type)) { esc = false; JL_I::intrinsic fi = (JL_I::intrinsic)jl_unbox_int32(fv); @@ -1875,7 +2036,7 @@ static Value *make_jlcall(ArrayRef args, jl_codectx_t *ctx) return largs; } -static void jl_add_linfo_root(jl_lambda_info_t *li, jl_value_t *val) +static void jl_add_method_root(jl_method_instance_t *li, jl_value_t *val) { if (jl_is_leaf_type(val) || jl_is_bool(val) || jl_is_symbol(val)) return; @@ -1898,7 +2059,7 @@ static void jl_add_linfo_root(jl_lambda_info_t *li, jl_value_t *val) } jl_array_ptr_1d_push(m->roots, val); } - JL_UNLOCK(&li->def->writelock); + JL_UNLOCK(&m->writelock); JL_GC_POP(); } @@ -2243,7 +2404,7 @@ static bool emit_builtin_call(jl_cgval_t *ret, jl_value_t *f, jl_value_t **args, JL_GC_POP(); return true; } - if (ctx->linfo->inferred) { + if (ctx->source->inferred) { rt1 = expr_type(expr, ctx); if (jl_is_tuple_type(rt1) && jl_is_leaf_type(rt1) && nargs == jl_datatype_nfields(rt1)) { *ret = emit_new_struct(rt1, nargs+1, args, ctx); @@ -2596,7 +2757,7 @@ static bool emit_builtin_call(jl_cgval_t *ret, jl_value_t *f, jl_value_t **args, if (ty!=NULL && jl_is_leaf_type(ty)) { if (jl_has_typevars(ty)) { // add root for types not cached. issue #7065 - jl_add_linfo_root(ctx->linfo, ty); + jl_add_method_root(ctx->linfo, ty); } *ret = mark_julia_const(ty); JL_GC_POP(); @@ -2637,15 +2798,16 @@ static Value *emit_jlcall(Value *theFptr, Value *theF, jl_value_t **args, return result; } -static jl_cgval_t emit_call_function_object(jl_lambda_info_t *li, const jl_cgval_t &theF, Value *theFptr, +static jl_cgval_t emit_call_function_object(jl_method_instance_t *li, const jl_cgval_t &theF, jl_llvm_functions_t decls, jl_value_t **args, size_t nargs, jl_value_t *callexpr, jl_codectx_t *ctx) { - if (li->functionObjectsDecls.specFunctionObject != NULL) { + Value *theFptr = (Value*)decls.functionObject; + if (decls.specFunctionObject != NULL) { // emit specialized call site jl_value_t *jlretty = li->rettype; bool retboxed; (void)julia_type_to_llvm(jlretty, &retboxed); - Function *cf = cast(prepare_call((Function*)li->functionObjectsDecls.specFunctionObject)); + Function *cf = cast(prepare_call((Function*)decls.specFunctionObject)); FunctionType *cft = cf->getFunctionType(); size_t nfargs = cft->getNumParams(); Value **argvals = (Value**) alloca(nfargs*sizeof(Value*)); @@ -2658,26 +2820,27 @@ static jl_cgval_t emit_call_function_object(jl_lambda_info_t *li, const jl_cgval idx++; } SmallVector gc_uses; - for(size_t i=0; i < nargs+1; i++) { + for (size_t i = 0; i < nargs + 1; i++) { jl_value_t *jt = jl_nth_slot_type(li->specTypes,i); bool isboxed; Type *et = julia_type_to_llvm(jt, &isboxed); if (type_is_ghost(et)) { // Still emit the expression in case it has side effects - if (i>0) emit_expr(args[i], ctx); + if (i > 0) + emit_expr(args[i], ctx); continue; } assert(idx < nfargs); Type *at = cft->getParamType(idx); if (isboxed) { assert(at == T_pjlvalue && et == T_pjlvalue); - jl_cgval_t origval = i==0 ? theF : emit_expr(args[i], ctx); + jl_cgval_t origval = i == 0 ? theF : emit_expr(args[i], ctx); argvals[idx] = boxed(origval, ctx); } else if (et->isAggregateType()) { // can lazy load on demand, no copy needed assert(at == PointerType::get(et, 0)); - jl_cgval_t arg = i==0 ? theF : emit_expr(args[i], ctx); + jl_cgval_t arg = i == 0 ? theF : emit_expr(args[i], ctx); assert(arg.ispointer()); argvals[idx] = data_pointer(arg, ctx, at); push_gc_use(gc_uses, arg); @@ -2711,30 +2874,23 @@ static jl_cgval_t emit_invoke(jl_expr_t *ex, jl_codectx_t *ctx) jl_cgval_t lival = emit_expr(args[0], ctx); if (lival.constant) { - jl_lambda_info_t *li = (jl_lambda_info_t*)lival.constant; - assert(jl_is_lambda_info(li)); + jl_method_instance_t *li = (jl_method_instance_t*)lival.constant; + assert(jl_is_method_instance(li)); + jl_llvm_functions_t decls = jl_compile_linfo(li, NULL); if (li->jlcall_api == 2) { - assert(li->constval); - return mark_julia_const(li->constval); - } - if (li->functionObjectsDecls.functionObject == NULL) { - assert(!li->inCompile); - if (li->code == jl_nothing && !li->inInference && li->inferred) { - // XXX: it was inferred in the past, so it's almost valid to re-infer it now - jl_type_infer(li, 0); - } - if (!li->inInference && li->inferred && li->code != jl_nothing) { - jl_compile_linfo(li); + assert(li->inferred); + return mark_julia_const(li->inferred); + } + if (decls.functionObject) { + int jlcall_api = jl_jlcall_api(decls.functionObject); + if (jlcall_api == 1) { + jl_cgval_t fval = emit_expr(args[1], ctx); + jl_cgval_t result = emit_call_function_object(li, fval, decls, &args[1], nargs - 1, (jl_value_t*)ex, ctx); + if (result.typ == jl_bottom_type) + CreateTrap(builder); + return result; } } - Value *theFptr = (Value*)li->functionObjectsDecls.functionObject; - if (theFptr && li->jlcall_api == 0) { - jl_cgval_t fval = emit_expr(args[1], ctx); - jl_cgval_t result = emit_call_function_object(li, fval, theFptr, &args[1], nargs - 1, (jl_value_t*)ex, ctx); - if (result.typ == jl_bottom_type) - CreateTrap(builder); - return result; - } } jl_cgval_t result = mark_julia_type(emit_jlcall(prepare_call(jlinvoke_func), boxed(lival, ctx, false), &args[1], nargs, ctx), @@ -2924,7 +3080,7 @@ static jl_cgval_t emit_local(jl_value_t *slotload, jl_codectx_t *ctx) if (vi.memloc) { Value *bp = vi.memloc; jl_value_t *typ; - if (ctx->linfo->inferred) { + if (ctx->source->inferred) { // use the better type from inference for this load typ = expr_type(slotload, ctx); if (jl_is_typevar(typ)) @@ -2973,7 +3129,7 @@ static void emit_assignment(jl_value_t *l, jl_value_t *r, jl_codectx_t *ctx) } if (slot.isboxed && slot.isimmutable) { // see if inference had a better type for the ssavalue than the expression (after inlining getfield on a Tuple) - jl_value_t *ssavalue_types = (jl_value_t*)ctx->linfo->ssavaluetypes; + jl_value_t *ssavalue_types = (jl_value_t*)ctx->source->ssavaluetypes; if (jl_is_array(ssavalue_types)) { jl_value_t *declType = jl_array_ptr_ref(ssavalue_types, idx); if (declType != slot.typ) { @@ -3175,7 +3331,7 @@ static jl_cgval_t emit_expr(jl_value_t *expr, jl_codectx_t *ctx) } } if (needroot && ctx->linfo->def) { // toplevel exprs and some integers are already rooted - jl_add_linfo_root(ctx->linfo, expr); + jl_add_method_root(ctx->linfo, expr); } return mark_julia_const(expr); } @@ -3193,7 +3349,7 @@ static jl_cgval_t emit_expr(jl_value_t *expr, jl_codectx_t *ctx) if (ctx->linfo->def) { // don't bother codegen constant-folding for toplevel jl_value_t *c = static_eval(expr, ctx, true, true); if (c) { - jl_add_linfo_root(ctx->linfo, c); + jl_add_method_root(ctx->linfo, c); return mark_julia_const(c); } } @@ -3361,8 +3517,9 @@ static Function *gen_cfun_wrapper(jl_function_t *ff, jl_value_t *jlrettype, jl_t Type *prt = NULL; int sret = 0; size_t nargs = jl_nparams(argt); - std::string err_msg = generate_func_sig(&crt, &prt, sret, fargt, fargt_isboxed, fargt_sig, fargt_vasig, inRegList, byRefList, attrs, - jlrettype, argt->parameters, nargs); + std::string err_msg = generate_func_sig(&crt, &prt, sret, fargt, fargt_isboxed, + fargt_sig, fargt_vasig, inRegList, byRefList, + attrs, jlrettype, argt->parameters, nargs); if (!err_msg.empty()) jl_error(err_msg.c_str()); if (fargt.size() + sret != fargt_sig.size()) @@ -3370,33 +3527,31 @@ static Function *gen_cfun_wrapper(jl_function_t *ff, jl_value_t *jlrettype, jl_t const char *name = "cfunction"; // try to look up this function for direct invoking - jl_lambda_info_t *lam = jl_get_specialization1((jl_tupletype_t*)sigt); + jl_method_instance_t *lam = jl_get_specialization1((jl_tupletype_t*)sigt); jl_value_t *astrt = (jl_value_t*)jl_any_type; // infer it first, if necessary - if (lam && lam->inInference) - lam = NULL; // TODO: use emit_invoke framework to dispatch these if (lam) { - if (lam->functionObjectsDecls.functionObject == NULL && lam->jlcall_api != 2) { - if (lam && (lam->code == jl_nothing || !lam->inferred)) - jl_type_infer(lam, 0); - if (lam && (lam->inInference || !lam->inferred)) - lam = NULL; // TODO: use emit_invoke framework to dispatch these - } - } - if (lam != NULL) { - jl_compile_linfo(lam); name = jl_symbol_name(lam->def->name); - astrt = lam->rettype; - if (astrt != (jl_value_t*)jl_bottom_type && - jl_type_intersection(astrt, declrt) == jl_bottom_type) { - // Do not warn if the function does not return since it is - // occasionally required by the C API (typically error callbacks) - // and doesn't capture the majority of the case when a function - // may throw. - jl_printf(JL_STDERR, "WARNING: cfunction: return type of %s does not match", name); + jl_code_info_t *src = NULL; + if (!lam->inferred) // TODO: this isn't ideal to be unconditionally calling type inference from here + src = jl_type_infer(lam, 0); + jl_compile_linfo(lam, src); + if (lam->jlcall_api != 2) { + if (lam->functionObjectsDecls.functionObject == NULL || + jl_jlcall_api(lam->functionObjectsDecls.functionObject) != 1) { + lam = NULL; // TODO: use emit_invoke framework to dispatch these + } } - if (!lam->functionObjectsDecls.functionObject && lam->jlcall_api != 2) { - jl_errorf("ERROR: cfunction: compiling %s failed", name); + if (lam) { + astrt = lam->rettype; + if (astrt != (jl_value_t*)jl_bottom_type && + jl_type_intersection(astrt, declrt) == jl_bottom_type) { + // Do not warn if the function does not return since it is + // occasionally required by the C API (typically error callbacks) + // and doesn't capture the majority of the case when a function + // may throw. + jl_printf(JL_STDERR, "WARNING: cfunction: return type of %s does not match", name); + } } } @@ -3408,6 +3563,7 @@ static Function *gen_cfun_wrapper(jl_function_t *ff, jl_value_t *jlrettype, jl_t Function *cw = Function::Create(FunctionType::get(sret ? T_void : prt, fargt_sig, false), GlobalVariable::ExternalLinkage, funcName.str(), M); + jl_init_function(cw); cw->setAttributes(attrs); #ifdef LLVM37 cw->addFnAttr("no-frame-pointer-elim", "true"); @@ -3605,7 +3761,7 @@ static Function *gen_cfun_wrapper(jl_function_t *ff, jl_value_t *jlrettype, jl_t retval = mark_julia_type(jlfunc_sret ? (Value*)builder.CreateLoad(result) : (Value*)call, retboxed, astrt, &ctx); } else if (lam->jlcall_api == 2) { - retval = mark_julia_const(lam->constval); + retval = mark_julia_const(lam->inferred); } else { assert(theFptr); @@ -3752,7 +3908,7 @@ static Function *jl_cfunction_object(jl_function_t *ff, jl_value_t *declrt, jl_t } // generate a julia-callable function that calls f (AKA lam) -static Function *gen_jlcall_wrapper(jl_lambda_info_t *lam, Function *f, bool sret, Module *M) +static Function *gen_jlcall_wrapper(jl_method_instance_t *lam, Function *f, bool sret, Module *M) { std::stringstream funcName; const std::string &fname = f->getName().str(); @@ -3764,6 +3920,7 @@ static Function *gen_jlcall_wrapper(jl_lambda_info_t *lam, Function *f, bool sre Function *w = Function::Create(jl_func_sig, GlobalVariable::ExternalLinkage, funcName.str(), M); + jl_init_function(w); #ifdef LLVM37 w->addFnAttr("no-frame-pointer-elim", "true"); #endif @@ -3784,7 +3941,7 @@ static Function *gen_jlcall_wrapper(jl_lambda_info_t *lam, Function *f, bool sre ctx.spvals_ptr = NULL; allocate_gc_frame(b0, &ctx); - size_t nargs = lam->nargs; + size_t nargs = lam->def->nargs; size_t nfargs = f->getFunctionType()->getNumParams(); Value **args = (Value**) alloca(nfargs*sizeof(Value*)); unsigned idx = 0; @@ -3831,16 +3988,16 @@ static Function *gen_jlcall_wrapper(jl_lambda_info_t *lam, Function *f, bool sre } // Compile to LLVM IR, using a specialized signature if applicable. -static std::unique_ptr emit_function(jl_lambda_info_t *lam, jl_llvm_functions_t *declarations) +static std::unique_ptr emit_function(jl_method_instance_t *lam, jl_code_info_t *src, jl_llvm_functions_t *declarations) { jl_ptls_t ptls = jl_get_ptls_states(); assert(declarations && "Capturing declarations is always required"); // step 1. unpack AST and allocate codegen context for this function - jl_array_t *code = (jl_array_t*)lam->code; + jl_array_t *code = (jl_array_t*)src->code; JL_GC_PUSH1(&code); if (!jl_typeis(code,jl_array_any_type)) - code = jl_uncompress_ast(lam, code); + code = jl_uncompress_ast(lam->def, code); //jl_static_show(JL_STDOUT, (jl_value_t*)ast); //jl_printf(JL_STDOUT, "\n"); std::map arrayvars; @@ -3849,31 +4006,30 @@ static std::unique_ptr emit_function(jl_lambda_info_t *lam, jl_llvm_func ctx.arrayvars = &arrayvars; ctx.module = lam->def ? lam->def->module : ptls->current_module; ctx.linfo = lam; + ctx.source = src; + ctx.code = code; ctx.name = jl_symbol_name(lam->def ? lam->def->name : anonymous_sym); ctx.funcName = ctx.name; ctx.vaSlot = -1; ctx.vaStack = false; ctx.spvals_ptr = NULL; + ctx.nargs = lam->def ? lam->def->nargs : 0; // step 2. process var-info lists to see what vars need boxing - int n_ssavalues = jl_is_long(lam->ssavaluetypes) ? jl_unbox_long(lam->ssavaluetypes) : jl_array_len(lam->ssavaluetypes); - size_t largslen = lam->nargs; - size_t vinfoslen = jl_array_dim0(lam->slotnames); + int n_ssavalues = jl_is_long(src->ssavaluetypes) ? jl_unbox_long(src->ssavaluetypes) : jl_array_len(src->ssavaluetypes); + size_t vinfoslen = jl_array_dim0(src->slotnames); ctx.slots.resize(vinfoslen); - size_t nreq = largslen; + size_t nreq = ctx.nargs; int va = 0; - assert(lam->specTypes); // this could happen if the user tries to compile a generic-function - // without specializing (or unspecializing) it first - // compiling this would cause all specializations to inherit - // this code and could create an broken compile / function cache + assert(lam->specTypes); // the specTypes field should always be assigned - if (nreq > 0 && lam->isva) { + if (nreq > 0 && lam->def->isva) { nreq--; va = 1; - jl_sym_t *vn = (jl_sym_t*)jl_array_ptr_ref(lam->slotnames,largslen-1); + jl_sym_t *vn = (jl_sym_t*)jl_array_ptr_ref(src->slotnames, ctx.nargs - 1); if (vn != unused_sym) - ctx.vaSlot = largslen-1; + ctx.vaSlot = ctx.nargs - 1; } ctx.nReqArgs = nreq; @@ -3884,7 +4040,7 @@ static std::unique_ptr emit_function(jl_lambda_info_t *lam, jl_llvm_func // step 3. some variable analysis size_t i; for(i=0; i < nreq; i++) { - jl_sym_t *argname = (jl_sym_t*)jl_array_ptr_ref(lam->slotnames,i); + jl_sym_t *argname = (jl_sym_t*)jl_array_ptr_ref(src->slotnames,i); if (argname == unused_sym) continue; jl_varinfo_t &varinfo = ctx.slots[i]; varinfo.isArgument = true; @@ -3899,12 +4055,12 @@ static std::unique_ptr emit_function(jl_lambda_info_t *lam, jl_llvm_func for(i=0; i < vinfoslen; i++) { jl_varinfo_t &varinfo = ctx.slots[i]; - uint8_t flags = jl_array_uint8_ref(lam->slotflags, i); + uint8_t flags = jl_array_uint8_ref(src->slotflags, i); varinfo.escapes = false; varinfo.isSA = (jl_vinfo_sa(flags)!=0); - varinfo.usedUndef = (jl_vinfo_usedundef(flags)!=0) || (!varinfo.isArgument && !lam->inferred); + varinfo.usedUndef = (jl_vinfo_usedundef(flags)!=0) || (!varinfo.isArgument && !src->inferred); if (!varinfo.isArgument) { - jl_value_t *typ = jl_is_array(lam->slottypes) ? jl_array_ptr_ref(lam->slottypes,i) : (jl_value_t*)jl_any_type; + jl_value_t *typ = jl_is_array(src->slottypes) ? jl_array_ptr_ref(src->slottypes,i) : (jl_value_t*)jl_any_type; if (!jl_is_type(typ)) typ = (jl_value_t*)jl_any_type; varinfo.value = mark_julia_type((Value*)NULL, false, typ, &ctx); @@ -3926,13 +4082,13 @@ static std::unique_ptr emit_function(jl_lambda_info_t *lam, jl_llvm_func Function *f = NULL; bool specsig = false; - bool needsparams = jl_svec_len(lam->sparam_syms) != jl_svec_len(lam->sparam_vals); + bool needsparams = lam->def ? jl_svec_len(lam->def->sparam_syms) != jl_svec_len(lam->sparam_vals) : false; for (i = 0; !needsparams && i < jl_svec_len(lam->sparam_vals); i++) { jl_value_t *e = jl_svecref(lam->sparam_vals, i); if (jl_is_typevar(e)) needsparams = true; } - if (!va && !needsparams && lam->specTypes != jl_anytuple_type && lam->inferred) { + if (!va && ctx.nargs > 0 && !needsparams && lam->specTypes != jl_anytuple_type && src->inferred) { // not vararg, consider specialized signature for(size_t i=0; i < jl_nparams(lam->specTypes); i++) { if (isbits_spec(jl_tparam(lam->specTypes, i))) { // assumes !va @@ -3989,6 +4145,7 @@ static std::unique_ptr emit_function(jl_lambda_info_t *lam, jl_llvm_func f = Function::Create(FunctionType::get(rt, fsig, false), GlobalVariable::ExternalLinkage, funcName.str(), M); + jl_init_function(f); if (ctx.sret) { f->addAttribute(1, Attribute::StructRet); f->addAttribute(1, Attribute::NoAlias); @@ -4004,6 +4161,7 @@ static std::unique_ptr emit_function(jl_lambda_info_t *lam, jl_llvm_func f = Function::Create(needsparams ? jl_func_sig_sparams : jl_func_sig, GlobalVariable::ExternalLinkage, funcName.str(), M); + jl_init_function(f); #ifdef LLVM37 f->addFnAttr("no-frame-pointer-elim", "true"); #endif @@ -4110,8 +4268,8 @@ static std::unique_ptr emit_function(jl_lambda_info_t *lam, jl_llvm_func #else std::vector ditypes(0); #endif - for(size_t i=0; i < jl_nparams(lam->specTypes); i++) { // assumes !va - if (i < largslen && ctx.slots[i].value.isghost) + for (size_t i = 0; i < jl_nparams(lam->specTypes); i++) { // assumes !va + if (i < ctx.nargs && ctx.slots[i].value.isghost) continue; ditypes.push_back(julia_type_to_di(jl_tparam(lam->specTypes,i),&dbuilder,false)); } @@ -4159,7 +4317,7 @@ static std::unique_ptr emit_function(jl_lambda_info_t *lam, jl_llvm_func const bool AlwaysPreserve = true; // Go over all arguments and local variables and initialize their debug information for (i = 0; i < nreq; i++) { - jl_sym_t *argname = (jl_sym_t*)jl_array_ptr_ref(lam->slotnames, i); + jl_sym_t *argname = (jl_sym_t*)jl_array_ptr_ref(src->slotnames, i); if (argname == unused_sym) continue; jl_varinfo_t &varinfo = ctx.slots[i]; @@ -4171,7 +4329,7 @@ static std::unique_ptr emit_function(jl_lambda_info_t *lam, jl_llvm_func topfile, // File toplineno == -1 ? 0 : toplineno, // Line // Variable type - julia_type_to_di(varinfo.value.typ,&dbuilder,false), + julia_type_to_di(varinfo.value.typ, &dbuilder, false), AlwaysPreserve, // May be deleted if optimized out 0); // Flags (TODO: Do we need any) #else @@ -4181,7 +4339,7 @@ static std::unique_ptr emit_function(jl_lambda_info_t *lam, jl_llvm_func jl_symbol_name(argname), // Variable name topfile, // File toplineno == -1 ? 0 : toplineno, // Line (for now, use lineno of the function) - julia_type_to_di(varinfo.value.typ, &dbuilder,false), // Variable type + julia_type_to_di(varinfo.value.typ, &dbuilder, false), // Variable type AlwaysPreserve, // May be deleted if optimized out 0, // Flags (TODO: Do we need any) ctx.sret + i + 1); // Argument number (1-based) @@ -4212,7 +4370,7 @@ static std::unique_ptr emit_function(jl_lambda_info_t *lam, jl_llvm_func #endif } for (i = 0; i < vinfoslen; i++) { - jl_sym_t *s = (jl_sym_t*)jl_array_ptr_ref(lam->slotnames, i); + jl_sym_t *s = (jl_sym_t*)jl_array_ptr_ref(src->slotnames, i); jl_varinfo_t &varinfo = ctx.slots[i]; if (varinfo.isArgument || s == compiler_temp_sym || s == unused_sym) continue; @@ -4341,8 +4499,8 @@ static std::unique_ptr emit_function(jl_lambda_info_t *lam, jl_llvm_func Function::arg_iterator AI = f->arg_begin(); if (ctx.sret) AI++; // skip sret slot - for(i=0; i < nreq; i++) { - jl_sym_t *s = (jl_sym_t*)jl_array_ptr_ref(lam->slotnames,i); + for (i = 0; i < nreq; i++) { + jl_sym_t *s = (jl_sym_t*)jl_array_ptr_ref(src->slotnames, i); jl_value_t *argType = jl_nth_slot_type(lam->specTypes, i); bool isboxed; Type *llvmArgType = julia_type_to_llvm(argType, &isboxed); @@ -4943,7 +5101,7 @@ static Function *jlcall_func_to_llvm(const std::string &cname, jl_fptr_t addr, M return f; } -extern "C" void jl_fptr_to_llvm(jl_fptr_t fptr, jl_lambda_info_t *lam, int specsig) +extern "C" void jl_fptr_to_llvm(jl_fptr_t fptr, jl_method_instance_t *lam, int specsig) { if (imaging_mode) { if (!specsig) { @@ -4990,7 +5148,7 @@ extern "C" void jl_fptr_to_llvm(jl_fptr_t fptr, jl_lambda_info_t *lam, int specs add_named_global(f, fptr); } else { - if (lam->jlcall_api == 1) { // jl_func_sig_sparams -- don't bother emitting the FunctionObject (since can't be used right now) + if (lam->jlcall_api != 1) { // jl_func_sig_sparams -- don't bother emitting the FunctionObject (since can't be used right now) assert(lam->fptr == NULL); lam->fptr = fptr; } diff --git a/src/debuginfo.cpp b/src/debuginfo.cpp index 6d0ef5b01660c..3ec064ee0f182 100644 --- a/src/debuginfo.cpp +++ b/src/debuginfo.cpp @@ -86,7 +86,7 @@ struct FuncInfo { const Function *func; size_t lengthAdr; std::vector lines; - jl_lambda_info_t *linfo; + jl_method_instance_t *linfo; }; #else struct ObjectInfo { @@ -105,7 +105,7 @@ struct ObjectInfo { // Maintain a mapping of unrealized function names -> linfo objects // so that when we see it get emitted, we can add a link back to the linfo // that it came from (providing name, type signature, file info, etc.) -static StringMap linfo_in_flight; +static StringMap linfo_in_flight; static std::string mangle(const std::string &Name, const DataLayout &DL) { #if defined(USE_MCJIT) || defined(USE_ORCJIT) @@ -119,7 +119,7 @@ static std::string mangle(const std::string &Name, const DataLayout &DL) return Name; #endif } -void jl_add_linfo_in_flight(StringRef name, jl_lambda_info_t *linfo, const DataLayout &DL) +void jl_add_linfo_in_flight(StringRef name, jl_method_instance_t *linfo, const DataLayout &DL) { linfo_in_flight[mangle(name, DL)] = linfo; } @@ -216,14 +216,14 @@ struct strrefcomp { #endif extern "C" tracer_cb jl_linfo_tracer; -static std::vector triggered_linfos; +static std::vector triggered_linfos; void jl_callback_triggered_linfos(void) { if (triggered_linfos.empty()) return; if (jl_linfo_tracer) { - std::vector to_process(std::move(triggered_linfos)); - for (jl_lambda_info_t *linfo : to_process) + std::vector to_process(std::move(triggered_linfos)); + for (jl_method_instance_t *linfo : to_process) jl_call_tracer(jl_linfo_tracer, (jl_value_t*)linfo); } } @@ -234,7 +234,7 @@ class JuliaJITEventListener: public JITEventListener std::map info; #else std::map objectmap; - std::map, revcomp> linfomap; + std::map, revcomp> linfomap; #endif public: @@ -251,23 +251,28 @@ class JuliaJITEventListener: public JITEventListener int8_t gc_state = jl_gc_safe_enter(ptls); uv_rwlock_wrlock(&threadsafe); StringRef sName = F.getName(); - StringMap::iterator linfo_it = linfo_in_flight.find(sName); - jl_lambda_info_t *linfo = NULL; + StringMap::iterator linfo_it = linfo_in_flight.find(sName); + jl_method_instance_t *linfo = NULL; if (linfo_it != linfo_in_flight.end()) { linfo = linfo_it->second; linfo_in_flight.erase(linfo_it); - if (((Function*)linfo->functionObjectsDecls.functionObject)->getName().equals(sName)) - linfo->fptr = (jl_fptr_t)(uintptr_t)Code; + if (!linfo->fptr && linfo->functionObjectsDecls.functionObject && + ((Function*)linfo->functionObjectsDecls.functionObject)->getName().equals(sName)) { + int jlcall_api = jl_jlcall_api(&F); + if (linfo->inferred || jlcall_api != 1) { + linfo->jlcall_api = jlcall_api; + linfo->fptr = (jl_fptr_t)(uintptr_t)Code; + } + else { + linfo->unspecialized_ducttape = (jl_fptr_t)(uintptr_t)Code; + } + } } #if defined(_OS_WINDOWS_) create_PRUNTIME_FUNCTION((uint8_t*)Code, Size, F.getName(), (uint8_t*)Code, Size, NULL); #endif FuncInfo tmp = {&F, Size, Details.LineStarts, linfo}; info[(size_t)(Code)] = tmp; -#ifndef KEEP_BODIES - if (!jl_generating_output()) - const_cast(&F)->deleteBody(); -#endif uv_rwlock_wrunlock(&threadsafe); jl_gc_safe_leave(ptls, gc_state); } @@ -280,7 +285,7 @@ class JuliaJITEventListener: public JITEventListener #endif // ifndef USE_MCJIT #ifdef USE_MCJIT - jl_lambda_info_t *lookupLinfo(size_t pointer) + jl_method_instance_t *lookupLinfo(size_t pointer) { auto linfo = linfomap.lower_bound(pointer); if (linfo != linfomap.end() && pointer < linfo->first + linfo->second.first) @@ -480,15 +485,24 @@ class JuliaJITEventListener: public JITEventListener (uint8_t*)(uintptr_t)Addr, (size_t)Size, sName, (uint8_t*)(uintptr_t)SectionLoadAddr, (size_t)SectionSize, UnwindData); #endif - StringMap::iterator linfo_it = linfo_in_flight.find(sName); - jl_lambda_info_t *linfo = NULL; + StringMap::iterator linfo_it = linfo_in_flight.find(sName); + jl_method_instance_t *linfo = NULL; if (linfo_it != linfo_in_flight.end()) { linfo = linfo_it->second; if (linfo->compile_traced) triggered_linfos.push_back(linfo); linfo_in_flight.erase(linfo_it); - if (((Function*)linfo->functionObjectsDecls.functionObject)->getName().equals(sName)) - linfo->fptr = (jl_fptr_t)(uintptr_t)Addr; + Function *F = (Function*)linfo->functionObjectsDecls.functionObject; + if (!linfo->fptr && F && F->getName().equals(sName)) { + int jlcall_api = jl_jlcall_api(F); + if (linfo->inferred || jlcall_api != 1) { + linfo->jlcall_api = jlcall_api; + linfo->fptr = (jl_fptr_t)(uintptr_t)Addr; + } + else { + linfo->unspecialized_ducttape = (jl_fptr_t)(uintptr_t)Addr; + } + } } if (linfo) linfomap[Addr] = std::make_pair(Size, linfo); @@ -556,13 +570,22 @@ class JuliaJITEventListener: public JITEventListener (uint8_t*)(uintptr_t)Addr, (size_t)Size, sName, (uint8_t*)(uintptr_t)SectionLoadAddr, (size_t)SectionSize, UnwindData); #endif - StringMap::iterator linfo_it = linfo_in_flight.find(sName); - jl_lambda_info_t *linfo = NULL; + StringMap::iterator linfo_it = linfo_in_flight.find(sName); + jl_method_instance_t *linfo = NULL; if (linfo_it != linfo_in_flight.end()) { linfo = linfo_it->second; linfo_in_flight.erase(linfo_it); - if (((Function*)linfo->functionObjectsDecls.functionObject)->getName().equals(sName)) - linfo->fptr = (jl_fptr_t)(uintptr_t)Addr; + Function *F = (Function*)linfo->functionObjectsDecls.functionObject; + if (!linfo->fptr && F && F->getName().equals(sName)) { + int jlcall_api = jl_jlcall_api(F); + if (linfo->inferred || jlcall_api != 1) { + linfo->jlcall_api = jlcall_api; + linfo->fptr = (jl_fptr_t)(uintptr_t)Addr; + } + else { + linfo->unspecialized_ducttape = (jl_fptr_t)(uintptr_t)Addr; + } + } } if (linfo) linfomap[Addr] = std::make_pair(Size, linfo); @@ -940,9 +963,9 @@ openDebugInfo(StringRef debuginfopath, const debug_link_info &info) static uint64_t jl_sysimage_base; static void **sysimg_fvars; -static jl_lambda_info_t **sysimg_fvars_linfo; +static jl_method_instance_t **sysimg_fvars_linfo; static size_t sysimg_fvars_n; -extern "C" void jl_register_fptrs(uint64_t sysimage_base, void **fptrs, jl_lambda_info_t **linfos, size_t n) +extern "C" void jl_register_fptrs(uint64_t sysimage_base, void **fptrs, jl_method_instance_t **linfos, size_t n) { jl_sysimage_base = (uintptr_t)sysimage_base; sysimg_fvars = fptrs; @@ -1540,12 +1563,12 @@ int jl_getFunctionInfo(jl_frame_t **frames_out, size_t pointer, int skipC, int n return jl_getDylibFunctionInfo(frames_out, pointer, skipC, noInline); } -extern "C" jl_lambda_info_t *jl_gdblookuplinfo(void *p) +extern "C" jl_method_instance_t *jl_gdblookuplinfo(void *p) { #ifndef USE_MCJIT std::map &info = jl_jit_events->getMap(); std::map::iterator it = info.lower_bound((size_t)p); - jl_lambda_info_t *li = NULL; + jl_method_instance_t *li = NULL; if (it != info.end() && (uintptr_t)(*it).first + (*it).second.lengthAdr >= (uintptr_t)p) li = (*it).second.linfo; uv_rwlock_rdunlock(&threadsafe); diff --git a/src/dump.c b/src/dump.c index 9c5a54674b6fa..9a8dba06a150a 100644 --- a/src/dump.c +++ b/src/dump.c @@ -103,7 +103,7 @@ typedef enum _DUMP_MODES { MODE_INVALID = 0, // jl_uncompress_ast - // compressing / decompressing an AST Expr in a LambdaInfo + // compressing / decompressing an AST Expr in a MethodInstance MODE_AST, // jl_restore_system_image @@ -384,7 +384,7 @@ static void jl_deserialize_gv_others(jl_serializer_state *s) } static struct delayed_fptrs_t { - jl_lambda_info_t *li; + jl_method_instance_t *li; int32_t func; int32_t cfunc; } *delayed_fptrs = NULL; @@ -392,7 +392,7 @@ static size_t delayed_fptrs_n = 0; static size_t delayed_fptrs_max = 0; static size_t sysimg_fvars_max = 0; -static void jl_delayed_fptrs(jl_lambda_info_t *li, int32_t func, int32_t cfunc) +static void jl_delayed_fptrs(jl_method_instance_t *li, int32_t func, int32_t cfunc) { // can't restore the fptrs until after the system image is fully restored, // since it will try to decompress the function AST to determine the argument types @@ -416,7 +416,7 @@ static void jl_delayed_fptrs(jl_lambda_info_t *li, int32_t func, int32_t cfunc) } } -void jl_register_fptrs(uint64_t sysimage_base, void **fptrs, jl_lambda_info_t **linfos, size_t n); +void jl_register_fptrs(uint64_t sysimage_base, void **fptrs, jl_method_instance_t **linfos, size_t n); static void jl_update_all_fptrs(void) { @@ -428,9 +428,9 @@ static void jl_update_all_fptrs(void) sysimg_gvars = NULL; sysimg_fvars = NULL; size_t i; - jl_lambda_info_t **linfos = (jl_lambda_info_t**)malloc(sizeof(jl_lambda_info_t*) * sysimg_fvars_max); + jl_method_instance_t **linfos = (jl_method_instance_t**)malloc(sizeof(jl_method_instance_t*) * sysimg_fvars_max); for (i = 0; i < delayed_fptrs_n; i++) { - jl_lambda_info_t *li = delayed_fptrs[i].li; + jl_method_instance_t *li = delayed_fptrs[i].li; int32_t func = delayed_fptrs[i].func - 1; if (func >= 0) { jl_fptr_to_llvm((jl_fptr_t)fvars[func], li, 0); @@ -452,13 +452,13 @@ static void jl_update_all_fptrs(void) // --- serialize --- -static void jl_serialize_fptr(jl_serializer_state *s, void *fptr) +static uint16_t jl_fptr_id(void *fptr) { void **pbp = ptrhash_bp(&fptr_to_id, fptr); if (*pbp == HT_NOTFOUND || fptr == NULL) - write_uint16(s->s, 1); + return 1; else - write_uint16(s->s, *(intptr_t*)pbp); + return *(intptr_t*)pbp; } static int module_in_worklist(jl_module_t *mod) @@ -511,18 +511,6 @@ static int type_recursively_external(jl_datatype_t *dt) return 1; } -static int jl_prune_specializations(jl_typemap_entry_t *ml, void *closure) -{ - jl_value_t *ret = ml->func.value; - if (jl_is_lambda_info(ret) && - ((!jl_is_leaf_type((jl_value_t*)ml->sig) && !((jl_lambda_info_t*)ret)->inlineable) || - ((jl_lambda_info_t*)ret)->code == jl_nothing)) { - ml->func.value = ((jl_lambda_info_t*)ret)->rettype; - jl_gc_wb(ml, ml->func.value); - } - return 1; -} - static void jl_serialize_datatype(jl_serializer_state *s, jl_datatype_t *dt) { @@ -898,12 +886,6 @@ static void jl_serialize_value_(jl_serializer_state *s, jl_value_t *v) if (external) jl_error("support for serializing a direct reference to an external Method not implemented"); } - if (tf->unknown && tf->unknown != jl_nothing) { - // go through the t-func cache, replacing ASTs with just return - // types for abstract argument types. these ASTs are generally - // not needed (e.g. they don't get inlined). - jl_typemap_visitor(*tf, jl_prune_specializations, NULL); - } jl_serialize_value(s, tf->unknown); jl_serialize_value(s, (jl_value_t*)m->name); write_int8(s->s, m->isstaged); @@ -914,17 +896,20 @@ static void jl_serialize_value_(jl_serializer_state *s, jl_value_t *v) if (s->mode != MODE_MODULE_POSTWORK) jl_serialize_value(s, (jl_value_t*)m->ambig); write_int8(s->s, m->called); + write_int32(s->s, m->nargs); + write_int8(s->s, m->isva); jl_serialize_value(s, (jl_value_t*)m->module); + jl_serialize_value(s, (jl_value_t*)m->sparam_syms); jl_serialize_value(s, (jl_value_t*)m->roots); - jl_serialize_value(s, (jl_value_t*)m->lambda_template); + jl_serialize_value(s, (jl_value_t*)m->source); + jl_serialize_value(s, (jl_value_t*)m->unspecialized); jl_serialize_value(s, (jl_value_t*)m->invokes.unknown); write_int8(s->s, m->needs_sparam_vals_ducttape); } - else if (jl_is_lambda_info(v)) { - writetag(s->s, jl_lambda_info_type); - jl_lambda_info_t *li = (jl_lambda_info_t*)v; + else if (jl_is_method_instance(v)) { + writetag(s->s, jl_method_instance_type); + jl_method_instance_t *li = (jl_method_instance_t*)v; jl_serialize_value(s, (jl_value_t*)li->specTypes); - write_int8(s->s, li->inferred); if (s->mode == MODE_MODULE || s->mode == MODE_MODULE_POSTWORK) { int external = li->def && !module_in_worklist(li->def->module); write_uint8(s->s, external); @@ -936,29 +921,29 @@ static void jl_serialize_value_(jl_serializer_state *s, jl_value_t *v) return; } } - if (li->jlcall_api == 2) - jl_serialize_value(s, jl_nothing); - else - jl_serialize_value(s, li->code); - jl_serialize_value(s, li->slotnames); - jl_serialize_value(s, li->slottypes); - jl_serialize_value(s, li->slotflags); - jl_serialize_value(s, li->ssavaluetypes); + jl_serialize_value(s, li->inferred); jl_serialize_value(s, li->rettype); - jl_serialize_value(s, (jl_value_t*)li->sparam_syms); jl_serialize_value(s, (jl_value_t*)li->sparam_vals); - write_int8(s->s, li->pure); - write_int8(s->s, li->inlineable); - write_int8(s->s, li->propagate_inbounds); - write_int8(s->s, li->isva); - write_int32(s->s, li->nargs); jl_serialize_value(s, (jl_value_t*)li->def); - jl_serialize_value(s, li->constval); - jl_serialize_fptr(s, (void*)(uintptr_t)li->fptr); - // save functionObject pointers - write_int32(s->s, jl_assign_functionID(li->functionObjectsDecls.functionObject)); - write_int32(s->s, jl_assign_functionID(li->functionObjectsDecls.specFunctionObject)); - write_int8(s->s, li->jlcall_api); + uint16_t id = jl_fptr_id((void*)(uintptr_t)li->fptr); + if (li->jlcall_api == 2) { + write_int8(s->s, 2); + } + else if (id >= 2) { + write_int8(s->s, -li->jlcall_api); + write_uint16(s->s, id); + } + else if (li->functionObjectsDecls.functionObject) { + int jlcall_api = jl_jlcall_api(li->functionObjectsDecls.functionObject); + assert(jlcall_api); + // save functionObject pointers + write_int8(s->s, jlcall_api); + write_int32(s->s, jl_assign_functionID(li->functionObjectsDecls.functionObject)); + write_int32(s->s, jl_assign_functionID(li->functionObjectsDecls.specFunctionObject)); + } + else { + write_int8(s->s, 0); + } } else if (jl_typeis(v, jl_module_type)) { jl_serialize_module(s, (jl_module_t*)v); @@ -1550,12 +1535,21 @@ static jl_value_t *jl_deserialize_value_(jl_serializer_state *s, jl_value_t *vta m->ambig = jl_nothing; } m->called = read_int8(s->s); + m->nargs = read_int32(s->s); + m->isva = read_int8(s->s); m->module = (jl_module_t*)jl_deserialize_value(s, (jl_value_t**)&m->module); jl_gc_wb(m, m->module); + m->sparam_syms = (jl_svec_t*)jl_deserialize_value(s, (jl_value_t**)&m->sparam_syms); + jl_gc_wb(m, m->sparam_syms); m->roots = (jl_array_t*)jl_deserialize_value(s, (jl_value_t**)&m->roots); - if (m->roots) jl_gc_wb(m, m->roots); - m->lambda_template = (jl_lambda_info_t*)jl_deserialize_value(s, (jl_value_t**)&m->lambda_template); - if (m->lambda_template) jl_gc_wb(m, m->lambda_template); + if (m->roots) + jl_gc_wb(m, m->roots); + m->source = (jl_code_info_t*)jl_deserialize_value(s, (jl_value_t**)&m->source); + if (m->source) + jl_gc_wb(m, m->source); + m->unspecialized = (jl_method_instance_t*)jl_deserialize_value(s, (jl_value_t**)&m->unspecialized); + if (m->unspecialized) + jl_gc_wb(m, m->unspecialized); m->invokes.unknown = jl_deserialize_value(s, (jl_value_t**)&m->invokes); jl_gc_wb(m, m->invokes.unknown); m->needs_sparam_vals_ducttape = read_int8(s->s); @@ -1563,19 +1557,17 @@ static jl_value_t *jl_deserialize_value_(jl_serializer_state *s, jl_value_t *vta JL_MUTEX_INIT(&m->writelock); return (jl_value_t*)m; } - else if (vtag == (jl_value_t*)jl_lambda_info_type) { - jl_lambda_info_t *li = - (jl_lambda_info_t*)jl_gc_alloc(ptls, sizeof(jl_lambda_info_t), - jl_lambda_info_type); - memset(li, 0, sizeof(jl_lambda_info_t)); + else if (vtag == (jl_value_t*)jl_method_instance_type) { + jl_method_instance_t *li = + (jl_method_instance_t*)jl_gc_alloc(ptls, sizeof(jl_method_instance_t), + jl_method_instance_type); + memset(li, 0, sizeof(jl_method_instance_t)); uintptr_t pos = backref_list.len; if (usetable) arraylist_push(&backref_list, li); li->specTypes = (jl_tupletype_t*)jl_deserialize_value(s, (jl_value_t**)&li->specTypes); if (li->specTypes) jl_gc_wb(li, li->specTypes); - int inferred = read_int8(s->s); - li->inferred = inferred; if (s->mode == MODE_MODULE) { int external = read_uint8(s->s); @@ -1594,47 +1586,40 @@ static jl_value_t *jl_deserialize_value_(jl_serializer_state *s, jl_value_t *vta li = jl_method_lookup_by_type(mt, li->specTypes, 0, 0, 0); assert(li); backref_list.items[pos] = li; - // if it can be inferred but isn't, encourage codegen to infer it - if (inferred && !li->inferred) { - jl_set_lambda_code_null(li); - li->inferred = 1; - } return (jl_value_t*)li; } } - li->code = jl_deserialize_value(s, &li->code); jl_gc_wb(li, li->code); - li->slotnames = (jl_array_t*)jl_deserialize_value(s, (jl_value_t**)&li->slotnames); jl_gc_wb(li, li->slotnames); - li->slottypes = jl_deserialize_value(s, &li->slottypes); jl_gc_wb(li, li->slottypes); - li->slotflags = (jl_array_t*)jl_deserialize_value(s, (jl_value_t**)&li->slotflags); jl_gc_wb(li, li->slotflags); - li->ssavaluetypes = jl_deserialize_value(s, &li->ssavaluetypes); jl_gc_wb(li, li->ssavaluetypes); + li->inferred = jl_deserialize_value(s, &li->inferred); + jl_gc_wb(li, li->inferred); li->rettype = jl_deserialize_value(s, &li->rettype); jl_gc_wb(li, li->rettype); - li->sparam_syms = (jl_svec_t*)jl_deserialize_value(s, (jl_value_t**)&li->sparam_syms); - jl_gc_wb(li, li->sparam_syms); li->sparam_vals = (jl_svec_t*)jl_deserialize_value(s, (jl_value_t**)&li->sparam_vals); jl_gc_wb(li, li->sparam_vals); li->unspecialized_ducttape = NULL; - li->pure = read_int8(s->s); - li->inlineable = read_int8(s->s); - li->propagate_inbounds = read_int8(s->s); - li->isva = read_int8(s->s); - li->nargs = read_int32(s->s); li->def = (jl_method_t*)jl_deserialize_value(s, (jl_value_t**)&li->def); - if (li->def) jl_gc_wb(li, li->def); - li->constval = jl_deserialize_value(s, &li->constval); - if (li->constval) jl_gc_wb(li, li->constval); - li->fptr = NULL; + if (li->def) + jl_gc_wb(li, li->def); li->functionObjectsDecls.functionObject = NULL; li->functionObjectsDecls.specFunctionObject = NULL; li->inInference = 0; - li->inCompile = 0; - li->fptr = jl_deserialize_fptr(s); - int32_t cfunc_llvm, func_llvm; - func_llvm = read_int32(s->s); - cfunc_llvm = read_int32(s->s); - jl_delayed_fptrs(li, func_llvm, cfunc_llvm); - li->jlcall_api = read_int8(s->s); + int8_t jlcall_api = read_int8(s->s); + if (jlcall_api == 2 || jlcall_api == 0) { + li->fptr = NULL; + li->jlcall_api = jlcall_api; + } + else if (jlcall_api < 0) { + li->fptr = jl_deserialize_fptr(s); + li->jlcall_api = -jlcall_api; + } + else { + int32_t cfunc_llvm, func_llvm; + func_llvm = read_int32(s->s); + cfunc_llvm = read_int32(s->s); + jl_delayed_fptrs(li, func_llvm, cfunc_llvm); + li->fptr = NULL; + li->jlcall_api = jlcall_api; + } li->compile_traced = 0; return (jl_value_t*)li; } @@ -2242,23 +2227,23 @@ JL_DLLEXPORT void jl_restore_system_image_data(const char *buf, size_t len) JL_SIGATOMIC_END(); } -JL_DLLEXPORT jl_array_t *jl_compress_ast(jl_lambda_info_t *li, jl_array_t *ast) +JL_DLLEXPORT jl_array_t *jl_compress_ast(jl_method_t *m, jl_array_t *ast) { JL_TIMING(AST_COMPRESS); - JL_LOCK(&li->def->writelock); // protect the roots array (Might GC) - assert(jl_is_lambda_info(li)); + JL_LOCK(&m->writelock); // protect the roots array (Might GC) + assert(jl_is_method(m)); assert(jl_is_array(ast)); ios_t dest; ios_mem(&dest, 0); int en = jl_gc_enable(0); // Might GC - if (li->def->roots == NULL) { - li->def->roots = jl_alloc_vec_any(0); - jl_gc_wb(li->def, li->def->roots); + if (m->roots == NULL) { + m->roots = jl_alloc_vec_any(0); + jl_gc_wb(m, m->roots); } jl_serializer_state s = { &dest, MODE_AST, - li->def->roots, li->def->module + m->roots, m->module }; size_t i, nstmts = jl_array_len(ast); assert(nstmts < INT32_MAX); @@ -2270,21 +2255,21 @@ JL_DLLEXPORT jl_array_t *jl_compress_ast(jl_lambda_info_t *li, jl_array_t *ast) //jl_printf(JL_STDERR, "%d bytes, %d values\n", dest.size, vals->length); jl_array_t *v = jl_takebuf_array(&dest); - if (jl_array_len(li->def->roots) == 0) { - li->def->roots = NULL; + if (jl_array_len(m->roots) == 0) { + m->roots = NULL; } JL_GC_PUSH1(&v); jl_gc_enable(en); - JL_UNLOCK(&li->def->writelock); // Might GC + JL_UNLOCK(&m->writelock); // Might GC JL_GC_POP(); return v; } -JL_DLLEXPORT jl_array_t *jl_uncompress_ast(jl_lambda_info_t *li, jl_array_t *data) +JL_DLLEXPORT jl_array_t *jl_uncompress_ast(jl_method_t *m, jl_array_t *data) { JL_TIMING(AST_UNCOMPRESS); - JL_LOCK(&li->def->writelock); // protect the roots array (Might GC) - assert(jl_is_lambda_info(li)); + JL_LOCK(&m->writelock); // protect the roots array (Might GC) + assert(jl_is_method(m)); assert(jl_is_array(data)); jl_array_t *bytes = (jl_array_t*)data; ios_t src; @@ -2294,7 +2279,7 @@ JL_DLLEXPORT jl_array_t *jl_uncompress_ast(jl_lambda_info_t *li, jl_array_t *dat int en = jl_gc_enable(0); // Might GC jl_serializer_state s = { &src, MODE_AST, - li->def->roots, li->def->module + m->roots, m->module }; size_t i, nstmts = read_int32(&src); @@ -2304,7 +2289,7 @@ JL_DLLEXPORT jl_array_t *jl_uncompress_ast(jl_lambda_info_t *li, jl_array_t *dat jl_array_ptr_set(ast, i, jl_deserialize_value(&s, NULL)); } jl_gc_enable(en); - JL_UNLOCK(&li->def->writelock); // Might GC + JL_UNLOCK(&m->writelock); // Might GC JL_GC_POP(); return ast; } @@ -2448,21 +2433,15 @@ static void jl_recache_types(void) int offs = (int)(intptr_t)flagref_list.items[i++]; jl_value_t *v, *o = loc ? *loc : (jl_value_t*)backref_list.items[offs]; jl_datatype_t *dt, *t; - if (jl_is_lambda_info(o)) { - // lookup the real LambdaInfo based on the placeholder specTypes - jl_lambda_info_t *li = (jl_lambda_info_t*)o; - int inferred = li->inferred; + if (jl_is_method_instance(o)) { + // lookup the real MethodInstance based on the placeholder specTypes + jl_method_instance_t *li = (jl_method_instance_t*)o; jl_datatype_t *argtypes = jl_recache_type(li->specTypes, i, NULL); jl_datatype_t *ftype = jl_first_argument_datatype((jl_value_t*)argtypes); jl_methtable_t *mt = ftype->name->mt; jl_set_typeof(li, (void*)(intptr_t)0x30); // invalidate the old value to help catch errors li = jl_method_lookup_by_type(mt, argtypes, 0, 0, 0); assert(li); - // if it can be inferred but isn't, encourage codegen to infer it - if (inferred && !li->inferred) { - jl_set_lambda_code_null(li); - li->inferred = 1; - } // update the backref list if (loc) *loc = (jl_value_t*)li; if (offs > 0) backref_list.items[offs] = li; @@ -2629,7 +2608,7 @@ void jl_init_serializer(void) (void*)LongExpr_tag, (void*)LiteralVal_tag, (void*)SmallInt64_tag, (void*)SmallDataType_tag, (void*)Int32_tag, (void*)Array1d_tag, (void*)Singleton_tag, - jl_module_type, jl_tvar_type, jl_lambda_info_type, jl_method_type, + jl_module_type, jl_tvar_type, jl_method_instance_type, jl_method_type, (void*)CommonSym_tag, (void*)NearbyGlobal_tag, jl_globalref_type, // everything above here represents a class of object rather than only a literal @@ -2679,7 +2658,7 @@ void jl_init_serializer(void) jl_type_type, jl_bottom_type, jl_ref_type, jl_pointer_type, jl_vararg_type, jl_abstractarray_type, jl_densearray_type, jl_void_type, jl_function_type, - jl_typector_type, jl_typename_type, jl_builtin_type, + jl_typector_type, jl_typename_type, jl_builtin_type, jl_code_info_type, jl_task_type, jl_uniontype_type, jl_typetype_type, jl_typetype_tvar, jl_ANY_flag, jl_array_any_type, jl_intrinsic_type, jl_abstractslot_type, jl_methtable_type, jl_typemap_level_type, jl_typemap_entry_type, @@ -2692,7 +2671,7 @@ void jl_init_serializer(void) jl_expr_type->name, jl_typename_type->name, jl_type_type->name, jl_methtable_type->name, jl_typemap_level_type->name, jl_typemap_entry_type->name, jl_tvar_type->name, jl_abstractarray_type->name, jl_vararg_type->name, - jl_densearray_type->name, jl_void_type->name, jl_lambda_info_type->name, jl_method_type->name, + jl_densearray_type->name, jl_void_type->name, jl_method_instance_type->name, jl_method_type->name, jl_module_type->name, jl_function_type->name, jl_typedslot_type->name, jl_abstractslot_type->name, jl_slotnumber_type->name, jl_typector_type->name, jl_intrinsic_type->name, jl_task_type->name, diff --git a/src/gf.c b/src/gf.c index b067083609e10..4c82b1aec4c69 100644 --- a/src/gf.c +++ b/src/gf.c @@ -24,7 +24,7 @@ extern "C" { #endif -JL_DLLEXPORT jl_value_t *jl_invoke(jl_lambda_info_t *meth, jl_value_t **args, uint32_t nargs) +JL_DLLEXPORT jl_value_t *jl_invoke(jl_method_instance_t *meth, jl_value_t **args, uint32_t nargs) { return jl_call_method_internal(meth, args, nargs); } @@ -49,20 +49,20 @@ JL_DLLEXPORT void jl_untrace_method(jl_method_t *m) m->traced = 0; } -JL_DLLEXPORT void jl_trace_linfo(jl_lambda_info_t *linfo) +JL_DLLEXPORT void jl_trace_linfo(jl_method_instance_t *linfo) { - assert(jl_is_lambda_info(linfo)); + assert(jl_is_method_instance(linfo)); linfo->compile_traced = 1; } -JL_DLLEXPORT void jl_untrace_linfo(jl_lambda_info_t *linfo) +JL_DLLEXPORT void jl_untrace_linfo(jl_method_instance_t *linfo) { - assert(jl_is_lambda_info(linfo)); + assert(jl_is_method_instance(linfo)); linfo->compile_traced = 0; } static tracer_cb jl_method_tracer = NULL; -JL_DLLEXPORT void jl_register_method_tracer(void (*callback)(jl_lambda_info_t *tracee)) +JL_DLLEXPORT void jl_register_method_tracer(void (*callback)(jl_method_instance_t *tracee)) { jl_method_tracer = (tracer_cb)callback; } @@ -74,7 +74,7 @@ JL_DLLEXPORT void jl_register_newmeth_tracer(void (*callback)(jl_method_t *trace } tracer_cb jl_linfo_tracer = NULL; -JL_DLLEXPORT void jl_register_linfo_tracer(void (*callback)(jl_lambda_info_t *tracee)) +JL_DLLEXPORT void jl_register_linfo_tracer(void (*callback)(jl_method_instance_t *tracee)) { jl_linfo_tracer = (tracer_cb)callback; } @@ -103,7 +103,7 @@ const struct jl_typemap_info method_defs = { 0, &jl_method_type }; const struct jl_typemap_info lambda_cache = { - 0, &jl_lambda_info_type + 0, &jl_method_instance_type }; const struct jl_typemap_info tfunc_cache = { 1, &jl_any_type @@ -116,16 +116,16 @@ static int8_t jl_cachearg_offset(jl_methtable_t *mt) /// ----- Insertion logic for special entries ----- /// -// get or create the LambdaInfo for a specialization -JL_DLLEXPORT jl_lambda_info_t *jl_specializations_get_linfo(jl_method_t *m, jl_tupletype_t *type, jl_svec_t *sparams, int allow_exec) +// get or create the MethodInstance for a specialization +JL_DLLEXPORT jl_method_instance_t *jl_specializations_get_linfo(jl_method_t *m, jl_tupletype_t *type, jl_svec_t *sparams) { JL_LOCK(&m->writelock); - jl_typemap_entry_t *sf = jl_typemap_assoc_by_type(m->specializations, type, NULL, 1, /*subtype*/0, /*offs*/0); - if (sf && jl_is_lambda_info(sf->func.value) && ((jl_lambda_info_t*)sf->func.value)->code != jl_nothing) { + jl_typemap_entry_t *sf = jl_typemap_assoc_by_type(m->specializations, type, NULL, 2, /*subtype*/0, /*offs*/0); + if (sf && jl_is_method_instance(sf->func.value)) { JL_UNLOCK(&m->writelock); - return (jl_lambda_info_t*)sf->func.value; + return (jl_method_instance_t*)sf->func.value; } - jl_lambda_info_t *li = jl_get_specialized(m, type, sparams, allow_exec); + jl_method_instance_t *li = jl_get_specialized(m, type, sparams); JL_GC_PUSH1(&li); // TODO: fuse lookup and insert steps jl_typemap_insert(&m->specializations, (jl_value_t*)m, type, jl_emptysvec, NULL, jl_emptysvec, (jl_value_t*)li, 0, &tfunc_cache, NULL); @@ -150,7 +150,7 @@ JL_DLLEXPORT jl_value_t *jl_methtable_lookup(jl_methtable_t *mt, jl_tupletype_t return sf->func.value; } -// ----- LambdaInfo specialization instantiation ----- // +// ----- MethodInstance specialization instantiation ----- // JL_DLLEXPORT jl_method_t *jl_new_method_uninit(void); static jl_function_t *jl_new_generic_function_with_supertype(jl_sym_t *name, jl_module_t *module, jl_datatype_t *st, int iskw); @@ -162,44 +162,43 @@ void jl_mk_builtin_func(jl_datatype_t *dt, const char *name, jl_fptr_t fptr) jl_set_const(jl_core_module, sname, f); dt = (jl_datatype_t*)jl_typeof(f); } - jl_lambda_info_t *li = jl_new_lambda_info_uninit(); + jl_method_instance_t *li = jl_new_method_instance_uninit(); li->fptr = fptr; - li->code = jl_nothing; - li->slottypes = jl_nothing; + li->jlcall_api = 1; li->specTypes = jl_anytuple_type; - li->ssavaluetypes = jl_box_long(0); - jl_gc_wb(li, li->ssavaluetypes); li->def = jl_new_method_uninit(); li->def->name = sname; li->def->module = jl_core_module; - li->def->lambda_template = li; + li->def->isva = 1; + li->def->nargs = 2; li->def->sig = jl_anytuple_type; li->def->tvars = jl_emptysvec; + li->def->sparam_syms = jl_emptysvec; jl_methtable_t *mt = dt->name->mt; jl_typemap_insert(&mt->cache, (jl_value_t*)mt, jl_anytuple_type, jl_emptysvec, NULL, jl_emptysvec, (jl_value_t*)li, 0, &lambda_cache, NULL); } -/* - run type inference on lambda "li" for given argument types. - if "li" has been inferred before but the IR was deleted, returns a - new LambdaInfo with the IR reconstituted. -*/ -jl_lambda_info_t *jl_type_infer(jl_lambda_info_t *li, int force) +// run type inference on lambda "li" for given argument types. +// returns the inferred source, and may cache the result in li +// if inference doesn't occur (or can't finish), returns NULL instead +jl_code_info_t *jl_type_infer(jl_method_instance_t *li, int force) { JL_TIMING(INFERENCE); + if (jl_typeinf_func == NULL) + return NULL; #ifdef ENABLE_INFERENCE - JL_LOCK(&codegen_lock); // use codegen lock to synchronize type-inference jl_module_t *mod = NULL; if (li->def != NULL) mod = li->def->module; static int inInference = 0; int lastIn = inInference; + jl_code_info_t *src = NULL; inInference = 1; - if (jl_typeinf_func != NULL && (force || + if (force || (mod != jl_gf_mtable(jl_typeinf_func)->module && - (mod != jl_core_module || !lastIn)))) { // avoid any potential recursion in calling jl_typeinf_func on itself + (mod != jl_core_module || !lastIn))) { // avoid any potential recursion in calling jl_typeinf_func on itself assert(li->inInference == 0); jl_value_t *fargs[2]; fargs[0] = (jl_value_t*)jl_typeinf_func; @@ -209,41 +208,44 @@ jl_lambda_info_t *jl_type_infer(jl_lambda_info_t *li, int force) jl_static_show_func_sig(JL_STDERR, (jl_value_t*)li->specTypes); jl_printf(JL_STDERR, "\n"); #endif - li = (jl_lambda_info_t*)jl_apply(fargs, 2); + src = (jl_code_info_t *)jl_apply(fargs, 2); + if (src == (void*)jl_nothing) + src = NULL; assert(li->def || li->inInference == 0); // if this is toplevel expr, make sure inference finished } inInference = lastIn; - JL_UNLOCK(&codegen_lock); // Might GC (li might be rooted?) #endif - return li; + return src; } -JL_DLLEXPORT void jl_set_lambda_rettype(jl_lambda_info_t *li, jl_value_t *rettype) +JL_DLLEXPORT void jl_set_lambda_rettype(jl_method_instance_t *li, jl_value_t *rettype, jl_value_t *const_api, jl_value_t *inferred) { // changing rettype changes the llvm signature, // so clear all of the llvm state at the same time assert(li->inInference); assert(!li->inferred || li->functionObjectsDecls.functionObject == NULL); // protect against some double-infer dataflow mistakes - assert(li->jlcall_api != 2); // protect against some double-infer dataflow mistakes - li->rettype = rettype; - jl_gc_wb(li, rettype); li->functionObjectsDecls.functionObject = NULL; li->functionObjectsDecls.specFunctionObject = NULL; - li->constval = NULL; + li->rettype = rettype; + jl_gc_wb(li, rettype); + li->inferred = inferred; + jl_gc_wb(li, inferred); + if (const_api == jl_true) + li->jlcall_api = 2; } -JL_DLLEXPORT void jl_set_lambda_code_null(jl_lambda_info_t *li) +static int jl_is_uninferred(jl_method_instance_t *li) { - li->code = jl_nothing; - li->ssavaluetypes = jl_box_long(jl_array_len(li->ssavaluetypes)); - jl_gc_wb(li, li->ssavaluetypes); - li->slotflags = NULL; - li->slotnames = NULL; + if (!li->inferred) + return 1; + if (jl_is_code_info(li->inferred) && !((jl_code_info_t*)li->inferred)->inferred) + return 1; + return 0; } static int get_spec_unspec_list(jl_typemap_entry_t *l, void *closure) { - if (jl_is_lambda_info(l->func.value) && !l->func.linfo->inferred) + if (jl_is_method_instance(l->func.value) && jl_is_uninferred(l->func.linfo)) jl_array_ptr_1d_push((jl_array_t*)closure, l->func.value); return 1; } @@ -297,8 +299,8 @@ JL_DLLEXPORT void jl_set_typeinf_func(jl_value_t *f) jl_reset_mt_caches(jl_main_module, unspec); size_t i, l; for (i = 0, l = jl_array_len(unspec); i < l; i++) { - jl_lambda_info_t *li = (jl_lambda_info_t*)jl_array_ptr_ref(unspec, i); - if (!li->inferred) + jl_method_instance_t *li = (jl_method_instance_t*)jl_array_ptr_ref(unspec, i); + if (jl_is_uninferred(li)) jl_type_infer(li, 1); } JL_GC_POP(); @@ -607,7 +609,7 @@ JL_DLLEXPORT int jl_is_cacheable_sig( return 1; } -static jl_lambda_info_t *cache_method(jl_methtable_t *mt, union jl_typemap_t *cache, jl_value_t *parent, +static jl_method_instance_t *cache_method(jl_methtable_t *mt, union jl_typemap_t *cache, jl_value_t *parent, jl_tupletype_t *type, // the specialized type signature for type lambda jl_tupletype_t *tt, // the original tupletype of the signature jl_typemap_entry_t *m, @@ -620,7 +622,7 @@ static jl_lambda_info_t *cache_method(jl_methtable_t *mt, union jl_typemap_t *ca jl_value_t *temp = NULL; jl_value_t *temp2 = NULL; jl_value_t *temp3 = NULL; - jl_lambda_info_t *newmeth = NULL; + jl_method_instance_t *newmeth = NULL; jl_svec_t *newparams = NULL; JL_GC_PUSH5(&temp, &temp2, &temp3, &newmeth, &newparams); @@ -758,7 +760,7 @@ static jl_lambda_info_t *cache_method(jl_methtable_t *mt, union jl_typemap_t *ca } // here we infer types and specialize the method - newmeth = jl_specializations_get_linfo(definition, type, sparams, allow_exec); + newmeth = jl_specializations_get_linfo(definition, type, sparams); if (cache_with_orig) { // if there is a need to cache with one of the original signatures, @@ -804,7 +806,7 @@ static jl_lambda_info_t *cache_method(jl_methtable_t *mt, union jl_typemap_t *ca return newmeth; } -static jl_lambda_info_t *jl_mt_assoc_by_type(jl_methtable_t *mt, jl_datatype_t *tt, int cache, int inexact, int allow_exec) +static jl_method_instance_t *jl_mt_assoc_by_type(jl_methtable_t *mt, jl_datatype_t *tt, int cache, int inexact, int allow_exec) { // caller must hold the mt->writelock jl_typemap_entry_t *entry = NULL; @@ -825,9 +827,9 @@ static jl_lambda_info_t *jl_mt_assoc_by_type(jl_methtable_t *mt, jl_datatype_t * return NULL; } sig = join_tsig(tt, entry->sig); - jl_lambda_info_t *nf; + jl_method_instance_t *nf; if (!cache) { - nf = jl_specializations_get_linfo(m, sig, env, allow_exec); + nf = jl_get_specialized(m, sig, env); // TODO: should be jl_specializations_get_linfo } else { nf = cache_method(mt, &mt->cache, (jl_value_t*)mt, sig, tt, entry, env, allow_exec); @@ -1143,7 +1145,7 @@ jl_tupletype_t *arg_type_tuple(jl_value_t **args, size_t nargs) return tt; } -jl_lambda_info_t *jl_method_lookup_by_type(jl_methtable_t *mt, jl_tupletype_t *types, +jl_method_instance_t *jl_method_lookup_by_type(jl_methtable_t *mt, jl_tupletype_t *types, int cache, int inexact, int allow_exec) { jl_typemap_entry_t *entry = jl_typemap_assoc_by_type(mt->cache, types, NULL, 0, 1, jl_cachearg_offset(mt)); @@ -1157,7 +1159,7 @@ jl_lambda_info_t *jl_method_lookup_by_type(jl_methtable_t *mt, jl_tupletype_t *t } if (jl_is_leaf_type((jl_value_t*)types)) cache = 1; - jl_lambda_info_t *sf = jl_mt_assoc_by_type(mt, types, cache, inexact, allow_exec); + jl_method_instance_t *sf = jl_mt_assoc_by_type(mt, types, cache, inexact, allow_exec); if (cache) { JL_UNLOCK(&mt->writelock); } @@ -1174,7 +1176,7 @@ JL_DLLEXPORT int jl_method_exists(jl_methtable_t *mt, jl_tupletype_t *types) return jl_method_lookup_by_type(mt, types, 0, 0, 1) != NULL; } -jl_lambda_info_t *jl_method_lookup(jl_methtable_t *mt, jl_value_t **args, size_t nargs, int cache) +jl_method_instance_t *jl_method_lookup(jl_methtable_t *mt, jl_value_t **args, size_t nargs, int cache) { jl_typemap_entry_t *entry = jl_typemap_assoc_exact(mt->cache, args, nargs, jl_cachearg_offset(mt)); if (entry) @@ -1187,7 +1189,7 @@ jl_lambda_info_t *jl_method_lookup(jl_methtable_t *mt, jl_value_t **args, size_t return entry->func.linfo; } jl_tupletype_t *tt = arg_type_tuple(args, nargs); - jl_lambda_info_t *sf = NULL; + jl_method_instance_t *sf = NULL; JL_GC_PUSH2(&tt, &sf); sf = jl_mt_assoc_by_type(mt, tt, cache, 0, 1); if (cache) { @@ -1202,70 +1204,31 @@ jl_lambda_info_t *jl_method_lookup(jl_methtable_t *mt, jl_value_t **args, size_t return sf; } -static jl_lambda_info_t *jl_get_unspecialized(jl_lambda_info_t *method) -{ - // one unspecialized version of a function can be shared among all cached specializations - jl_method_t *def = method->def; - if (method->unspecialized_ducttape != NULL) - return method->unspecialized_ducttape; - if (method->sparam_syms != jl_emptysvec) { - JL_LOCK(&def->writelock); - if (method->unspecialized_ducttape != NULL) { - JL_UNLOCK(&def->writelock); - return method->unspecialized_ducttape; - } - if (def->needs_sparam_vals_ducttape == 2) { - jl_array_t *code = (jl_array_t*)def->lambda_template->code; - JL_GC_PUSH1(&code); - if (!jl_typeis(code, jl_array_any_type)) - code = jl_uncompress_ast(def->lambda_template, code); - size_t i, l = jl_array_len(code); - def->needs_sparam_vals_ducttape = 0; - for (i = 0; i < l; i++) { - if (jl_has_intrinsics(method, jl_array_ptr_ref(code, i), def->module)) { - def->needs_sparam_vals_ducttape = 1; - break; - } - } - JL_GC_POP(); - } - if (def->needs_sparam_vals_ducttape) { - method->unspecialized_ducttape = jl_get_specialized(def, method->specTypes, method->sparam_vals, 1); - jl_gc_wb(method, method->unspecialized_ducttape); - method->unspecialized_ducttape->unspecialized_ducttape = method->unspecialized_ducttape; - } - JL_UNLOCK(&def->writelock); - if (method->unspecialized_ducttape != NULL) - return method->unspecialized_ducttape; - } - return def->lambda_template; -} - JL_DLLEXPORT jl_value_t *jl_matching_methods(jl_tupletype_t *types, int lim, int include_ambiguous); -jl_lambda_info_t *jl_compile_for_dispatch(jl_lambda_info_t *li) +jl_llvm_functions_t jl_compile_for_dispatch(jl_method_instance_t *li) { if (li->jlcall_api == 2) - return li; + return li->functionObjectsDecls; if (jl_options.compile_enabled == JL_OPTIONS_COMPILE_OFF || jl_options.compile_enabled == JL_OPTIONS_COMPILE_MIN) { // copy fptr from the template method definition jl_method_t *def = li->def; - if (def && !def->isstaged) { - if (def->lambda_template->jlcall_api == 2) { + if (def && !def->isstaged && def->unspecialized) { + if (def->unspecialized->jlcall_api == 2) { li->functionObjectsDecls.functionObject = NULL; li->functionObjectsDecls.specFunctionObject = NULL; + li->inferred = def->unspecialized->inferred; li->jlcall_api = 2; - li->constval = def->lambda_template->constval; - jl_gc_wb(li, li->constval); - return li; + jl_gc_wb(li, li->inferred); + return li->functionObjectsDecls; } - if (def->lambda_template->fptr) { + if (def->unspecialized->fptr) { li->functionObjectsDecls.functionObject = NULL; li->functionObjectsDecls.specFunctionObject = NULL; - li->fptr = def->lambda_template->fptr; - li->jlcall_api = def->lambda_template->jlcall_api; - return li; + li->jlcall_api = def->unspecialized->jlcall_api; + li->fptr = def->unspecialized->fptr; + return li->functionObjectsDecls; } } if (jl_options.compile_enabled == JL_OPTIONS_COMPILE_OFF) { @@ -1274,52 +1237,26 @@ jl_lambda_info_t *jl_compile_for_dispatch(jl_lambda_info_t *li) jl_printf(JL_STDERR, " sysimg may not have been built with --compile=all\n"); } } - if (li->functionObjectsDecls.functionObject != NULL) - return li; - if (li->def) { - JL_LOCK(&codegen_lock); - JL_LOCK(&li->def->writelock); - if (li->functionObjectsDecls.functionObject != NULL) { - JL_UNLOCK(&li->def->writelock); - JL_UNLOCK(&codegen_lock); - return li; - } - if (li->inInference || li->inCompile) { - // if inference is running on this function, get a copy - // of the function to be compiled without inference and run. - assert(li->def != NULL); - li = jl_get_unspecialized(li); - } - else if (li->jlcall_api != 2) { - if (li->code == jl_nothing || - (!li->inferred && li->def != NULL && jl_symbol_name(li->def->name)[0] != '@')) { - // don't bother with typeinf on macros or toplevel thunks - jl_type_infer(li, 0); - } - if (li->functionObjectsDecls.functionObject == NULL && li->jlcall_api != 2) { - if (li->inInference || li->inCompile || li->code == jl_nothing) { - // if inference is running on this function, get a copy - // of the function to be compiled without inference and run. - assert(li->def != NULL); - li = jl_get_unspecialized(li); - } - } - } - } - assert(!li->inInference && !li->inCompile && - (li->code != jl_nothing || li->jlcall_api == 2)); - if (li->functionObjectsDecls.functionObject == NULL) { // check again, because jl_type_infer may have compiled it - jl_compile_linfo(li); - } - if (li->def) { - JL_UNLOCK(&li->def->writelock); - JL_UNLOCK(&codegen_lock); + jl_llvm_functions_t decls = li->functionObjectsDecls; + if (decls.functionObject != NULL || li->jlcall_api == 2) + return decls; + + jl_code_info_t *src = NULL; + if (li->def && jl_is_uninferred(li) && !li->inInference && + jl_symbol_name(li->def->name)[0] != '@') { + // don't bother with typeinf on macros or toplevel thunks + // but try to infer everything else + src = jl_type_infer(li, 0); } - return li; + // check again, because jl_type_infer may have compiled it + decls = li->functionObjectsDecls; + if (decls.functionObject != NULL || li->jlcall_api == 2) + return decls; + return jl_compile_linfo(li, src); } // compile-time method lookup -jl_lambda_info_t *jl_get_specialization1(jl_tupletype_t *types) +jl_method_instance_t *jl_get_specialization1(jl_tupletype_t *types) { JL_TIMING(METHOD_LOOKUP_COMPILE); assert(jl_nparams(types) > 0); @@ -1343,17 +1280,12 @@ jl_lambda_info_t *jl_get_specialization1(jl_tupletype_t *types) } jl_methtable_t *mt = ((jl_datatype_t*)jl_tparam0(types))->name->mt; - jl_lambda_info_t *sf = NULL; // most of the time sf is rooted in mt, but if the method is staged it may // not be the case + // TODO: the above should be false, but better safe than sorry? + jl_method_instance_t *sf = jl_method_lookup_by_type(mt, types, 1, 1, 1); JL_GC_PUSH1(&sf); - JL_TRY { - sf = jl_method_lookup_by_type(mt, types, 1, 1, 1); - } JL_CATCH { - sf = NULL; - } - if (sf == NULL || sf->code == NULL || - jl_has_call_ambiguities(types, sf->def)) { + if (sf != NULL && jl_has_call_ambiguities(types, sf->def)) { sf = NULL; } JL_GC_POP(); @@ -1362,25 +1294,20 @@ jl_lambda_info_t *jl_get_specialization1(jl_tupletype_t *types) JL_DLLEXPORT int jl_compile_hint(jl_tupletype_t *types) { - jl_lambda_info_t *li = jl_get_specialization1(types); + jl_method_instance_t *li = jl_get_specialization1(types); if (li == NULL) return 0; - if (li->functionObjectsDecls.functionObject == NULL && li->jlcall_api != 2) { - assert(!li->inCompile); - if (li->inInference) - return 0; - if (li->code == jl_nothing || !li->inferred) - jl_type_infer(li, 0); - if (li->inInference || li->code == jl_nothing) - return 0; - jl_compile_linfo(li); - } + jl_code_info_t *src = NULL; + if (jl_is_uninferred(li)) + src = jl_type_infer(li, 0); + if (li->jlcall_api != 2) + jl_compile_linfo(li, src); return 1; } JL_DLLEXPORT jl_value_t *jl_get_spec_lambda(jl_tupletype_t *types) { - jl_lambda_info_t *li = jl_get_specialization1(types); + jl_method_instance_t *li = jl_get_specialization1(types); return li ? (jl_value_t*)li : jl_nothing; } @@ -1581,61 +1508,41 @@ static void _compile_all_deq(jl_array_t *found) { int found_i, found_l = jl_array_len(found); jl_printf(JL_STDERR, "found %d uncompiled methods for compile-all\n", (int)found_l); - jl_lambda_info_t *linfo = NULL; - JL_GC_PUSH1(&linfo); + jl_method_instance_t *linfo = NULL; + jl_code_info_t *src = NULL; + JL_GC_PUSH2(&linfo, &src); for (found_i = 0; found_i < found_l; found_i++) { if (found_i % (1 + found_l / 300) == 0 || found_i == found_l - 1) // show 300 progress steps, to show progress without overwhelming log files jl_printf(JL_STDERR, " %d / %d\r", found_i + 1, found_l); jl_typemap_entry_t *ml = (jl_typemap_entry_t*)jl_array_ptr_ref(found, found_i); jl_method_t *m = ml->func.method; - jl_lambda_info_t *templ = m->lambda_template; - // type infer a copy of the template, to avoid modifying the template code itself - if (m->isstaged) - linfo = templ; - else - linfo = jl_specializations_get_linfo(m, ml->sig, jl_emptysvec, 1); - - if (linfo->jlcall_api == 2) { - if (linfo != templ) { - templ->jlcall_api = 2; - templ->constval = linfo->constval; - } - continue; + jl_method_instance_t *linfo = m->unspecialized; + if (!linfo) { + // XXX: use computed env rather than empty svec + linfo = jl_specializations_get_linfo(m, ml->sig, jl_emptysvec); + m->unspecialized = linfo; + jl_gc_wb(m, linfo); } // infer this function now, if necessary - if (!linfo->inferred || linfo->code == jl_nothing) - jl_type_infer(linfo, 1); - - if (linfo->jlcall_api == 2) { - if (linfo != templ) { - templ->jlcall_api = 2; - templ->constval = linfo->constval; - } + if (linfo->jlcall_api == 2) + continue; + src = jl_type_infer(linfo, 1); + if (linfo->jlcall_api == 2) continue; - } // keep track of whether all possible signatures have been cached (and thus whether it can skip trying to compile the template function) // this is necessary because many intrinsics try to call static_eval and thus are not compilable unspecialized int complete = _compile_all_union(ml->sig, ml->tvars); if (complete) { - if (templ->fptr == NULL) + if (linfo->fptr == NULL && linfo->functionObjectsDecls.functionObject == NULL) // indicate that this method doesn't need to be compiled, because it was fully covered above - templ->fptr = (jl_fptr_t)(uintptr_t)-1; + // TODO: do this some other way + linfo->fptr = (jl_fptr_t)(uintptr_t)-1; } else { - jl_compile_linfo(linfo); - assert(linfo->functionObjectsDecls.functionObject != NULL || linfo->jlcall_api == 2); - if (linfo != templ) { - // copy the function pointer back to the lambda_template - templ->functionObjectsDecls = linfo->functionObjectsDecls; - templ->jlcall_api = linfo->jlcall_api; - templ->constval = linfo->constval; - if (templ->constval) jl_gc_wb(templ, templ->constval); - templ->rettype = linfo->rettype; - jl_gc_wb(templ, templ->rettype); - templ->fptr = NULL; - } + jl_compile_linfo(linfo, src); + assert(linfo->functionObjectsDecls.functionObject != NULL); } } JL_GC_POP(); @@ -1647,9 +1554,10 @@ static int _compile_all_enq(jl_typemap_entry_t *ml, void *env) jl_array_t *found = (jl_array_t*)env; // method definition -- compile template field jl_method_t *m = ml->func.method; - if (m->lambda_template->functionObjectsDecls.functionObject == NULL && - m->lambda_template->jlcall_api != 2 && - m->lambda_template->fptr == NULL) { + if (!m->unspecialized || + (m->unspecialized->functionObjectsDecls.functionObject == NULL && + m->unspecialized->jlcall_api != 2 && + m->unspecialized->fptr == NULL)) { // found a lambda that still needs to be compiled jl_array_ptr_1d_push(found, (jl_value_t*)ml); } @@ -1659,7 +1567,7 @@ static int _compile_all_enq(jl_typemap_entry_t *ml, void *env) static void _compile_all_enq_module(jl_module_t *m, jl_array_t *found) { // scan through all types reachable from 'v' and - // record all jl_lambda_info_t objects and signatures in their method tables + // record all jl_method_instance_t objects and signatures in their method tables size_t i, sz = m->bindings.size; for(i=1; i < sz; i+=2) { if (m->bindings.table[i] != HT_NOTFOUND) { @@ -1690,7 +1598,7 @@ static void _compile_all_enq_module(jl_module_t *m, jl_array_t *found) static void jl_compile_all_defs(void) { // this "found" array will contain - // TypeMapEntries for Methods and LambdaInfos that need to be compiled + // TypeMapEntries for Methods and MethodInstances that need to be compiled jl_array_t *m = jl_alloc_vec_any(0); JL_GC_PUSH1(&m); while (1) { @@ -1706,7 +1614,7 @@ static void jl_compile_all_defs(void) static int _precompile_enq_tfunc(jl_typemap_entry_t *l, void *closure) { - if (jl_is_lambda_info(l->func.value) && + if (jl_is_method_instance(l->func.value) && l->func.linfo->functionObjectsDecls.functionObject == NULL && l->func.linfo->jlcall_api != 2) jl_array_ptr_1d_push((jl_array_t*)closure, (jl_value_t*)l->sig); @@ -1908,7 +1816,7 @@ JL_DLLEXPORT jl_value_t *jl_apply_generic(jl_value_t **args, uint32_t nargs) } } - jl_lambda_info_t *mfunc = NULL; + jl_method_instance_t *mfunc = NULL; if (entry) { mfunc = entry->func.linfo; } @@ -1986,7 +1894,7 @@ jl_value_t *jl_gf_invoke(jl_tupletype_t *types0, jl_value_t **args, size_t nargs // next look for or create a specialization of this definition. jl_method_t *method = entry->func.method; - jl_lambda_info_t *mfunc = NULL; + jl_method_instance_t *mfunc = NULL; jl_typemap_entry_t *tm = NULL; if (method->invokes.unknown != NULL) tm = jl_typemap_assoc_exact(method->invokes, args, nargs, jl_cachearg_offset(mt)); diff --git a/src/interpreter.c b/src/interpreter.c index 857807f0c4d1f..93ffdff932f72 100644 --- a/src/interpreter.c +++ b/src/interpreter.c @@ -15,7 +15,8 @@ extern "C" { #endif typedef struct { - jl_lambda_info_t *lam; + jl_code_info_t *src; + jl_module_t *module; jl_value_t **locals; jl_svec_t *sparam_vals; } interpreter_state; @@ -32,14 +33,18 @@ jl_value_t *jl_interpret_toplevel_expr(jl_value_t *e) } JL_DLLEXPORT jl_value_t *jl_interpret_toplevel_expr_in(jl_module_t *m, jl_value_t *e, - jl_lambda_info_t *lam) + jl_code_info_t *src, + jl_svec_t *sparam_vals) { jl_ptls_t ptls = jl_get_ptls_states(); jl_value_t *v=NULL; jl_module_t *last_m = ptls->current_module; jl_module_t *task_last_m = ptls->current_task->current_module; interpreter_state s; - s.lam = lam; s.locals = NULL; s.sparam_vals = NULL; + s.src = src; + s.module = m; + s.locals = NULL; + s.sparam_vals = sparam_vals; JL_TRY { ptls->current_task->current_module = ptls->current_module = m; @@ -75,8 +80,8 @@ static jl_value_t *do_invoke(jl_value_t **args, size_t nargs, interpreter_state size_t i; for (i = 1; i < nargs; i++) argv[i - 1] = eval(args[i], s); - jl_lambda_info_t *meth = (jl_lambda_info_t*)args[0]; - assert(jl_is_lambda_info(meth) && !meth->inInference); + jl_method_instance_t *meth = (jl_method_instance_t*)args[0]; + assert(jl_is_method_instance(meth) && !meth->inInference); jl_value_t *result = jl_call_method_internal(meth, argv, nargs - 1); JL_GC_POP(); return result; @@ -150,34 +155,34 @@ void jl_set_datatype_super(jl_datatype_t *tt, jl_value_t *super) jl_gc_wb(tt, tt->super); } -static int jl_linfo_nslots(jl_lambda_info_t *li) +static int jl_source_nslots(jl_code_info_t *src) { - return jl_array_len(li->slotflags); + return jl_array_len(src->slotflags); } -static int jl_linfo_nssavalues(jl_lambda_info_t *li) +static int jl_source_nssavalues(jl_code_info_t *src) { - return jl_is_long(li->ssavaluetypes) ? jl_unbox_long(li->ssavaluetypes) : jl_array_len(li->ssavaluetypes); + return jl_is_long(src->ssavaluetypes) ? jl_unbox_long(src->ssavaluetypes) : jl_array_len(src->ssavaluetypes); } static jl_value_t *eval(jl_value_t *e, interpreter_state *s) { jl_ptls_t ptls = jl_get_ptls_states(); - jl_lambda_info_t *lam = s==NULL ? NULL : s->lam; + jl_code_info_t *src = s==NULL ? NULL : s->src; if (jl_is_ssavalue(e)) { ssize_t id = ((jl_ssavalue_t*)e)->id; - if (id >= jl_linfo_nssavalues(lam) || id < 0 || s->locals == NULL) + if (id >= jl_source_nssavalues(src) || id < 0 || s->locals == NULL) jl_error("access to invalid SSAValue"); else - return s->locals[jl_linfo_nslots(lam) + id]; + return s->locals[jl_source_nslots(src) + id]; } if (jl_is_slot(e)) { ssize_t n = jl_slot_number(e); - if (n > jl_linfo_nslots(lam) || n < 1 || s->locals == NULL) + if (n > jl_source_nslots(src) || n < 1 || s->locals == NULL) jl_error("access to invalid slot number"); jl_value_t *v = s->locals[n-1]; if (v == NULL) - jl_undefined_var_error((jl_sym_t*)jl_array_ptr_ref(lam->slotnames,n-1)); + jl_undefined_var_error((jl_sym_t*)jl_array_ptr_ref(src->slotnames, n - 1)); return v; } if (jl_is_globalref(e)) { @@ -189,7 +194,7 @@ static jl_value_t *eval(jl_value_t *e, interpreter_state *s) } if (jl_is_quotenode(e)) return jl_fieldref(e,0); - jl_module_t *modu = (lam == NULL || lam->def == NULL) ? ptls->current_module : lam->def->module; + jl_module_t *modu = (s == NULL ? ptls->current_module : s->module); if (jl_is_symbol(e)) { // bare symbols appear in toplevel exprs not wrapped in `thunk` jl_value_t *v = jl_get_global(modu, (jl_sym_t*)e); if (v == NULL) @@ -222,10 +227,8 @@ static jl_value_t *eval(jl_value_t *e, interpreter_state *s) else if (ex->head == static_parameter_sym) { ssize_t n = jl_unbox_long(args[0]); assert(n > 0); - if (s->sparam_vals) - return jl_svecref(s->sparam_vals, n - 1); - if (n <= jl_svec_len(lam->sparam_vals)) { - jl_value_t *sp = jl_svecref(lam->sparam_vals, n - 1); + if (s->sparam_vals && n <= jl_svec_len(s->sparam_vals)) { + jl_value_t *sp = jl_svecref(s->sparam_vals, n - 1); if (!jl_is_typevar(sp)) return sp; } @@ -263,7 +266,7 @@ static jl_value_t *eval(jl_value_t *e, interpreter_state *s) JL_GC_PUSH2(&atypes, &meth); atypes = eval(args[1], s); meth = eval(args[2], s); - jl_method_def((jl_svec_t*)atypes, (jl_lambda_info_t*)meth, args[3]); + jl_method_def((jl_svec_t*)atypes, (jl_code_info_t*)meth, args[3]); JL_GC_POP(); return jl_nothing; } @@ -481,13 +484,13 @@ static jl_value_t *eval_body(jl_array_t *stmts, interpreter_state *s, int start, jl_value_t *rhs = eval(jl_exprarg(stmt,1), s); if (jl_is_ssavalue(sym)) { ssize_t genid = ((jl_ssavalue_t*)sym)->id; - if (genid >= jl_linfo_nssavalues(s->lam) || genid < 0) + if (genid >= jl_source_nssavalues(s->src) || genid < 0) jl_error("assignment to invalid GenSym location"); - s->locals[jl_linfo_nslots(s->lam) + genid] = rhs; + s->locals[jl_source_nslots(s->src) + genid] = rhs; } else if (jl_is_slot(sym)) { ssize_t n = jl_slot_number(sym); - assert(n <= jl_linfo_nslots(s->lam) && n > 0); + assert(n <= jl_source_nslots(s->src) && n > 0); s->locals[n-1] = rhs; } else { @@ -497,7 +500,7 @@ static jl_value_t *eval_body(jl_array_t *stmts, interpreter_state *s, int start, sym = (jl_value_t*)jl_globalref_name(sym); } else { - m = (s==NULL || s->lam==NULL || s->lam->def==NULL) ? ptls->current_module : s->lam->def->module; + m = (s == NULL ? ptls->current_module : s->module); } assert(jl_is_symbol(sym)); JL_GC_PUSH1(&rhs); @@ -555,7 +558,7 @@ static jl_value_t *eval_body(jl_array_t *stmts, interpreter_state *s, int start, jl_value_t *var = jl_fieldref(stmt,0); assert(jl_is_slot(var)); ssize_t n = jl_slot_number(var); - assert(n <= jl_linfo_nslots(s->lam) && n > 0); + assert(n <= jl_source_nslots(s->src) && n > 0); s->locals[n-1] = NULL; } else { @@ -567,29 +570,64 @@ static jl_value_t *eval_body(jl_array_t *stmts, interpreter_state *s, int start, return NULL; } -jl_value_t *jl_interpret_call(jl_lambda_info_t *lam, jl_value_t **args, uint32_t nargs, jl_svec_t *sparam_vals) +jl_value_t *jl_interpret_call(jl_method_instance_t *lam, jl_value_t **args, uint32_t nargs) { - jl_array_t *stmts = (jl_array_t*)lam->code; + if (lam->jlcall_api == 2) + return lam->inferred; + jl_code_info_t *src = (jl_code_info_t*)lam->inferred; + if (src == NULL || !jl_is_code_info(src)) { + if (lam->def->isstaged) { + src = jl_code_for_staged(lam); + lam->inferred = (jl_value_t*)src; + jl_gc_wb(lam, src); + } + else { + src = lam->def->source; + } + } + jl_array_t *stmts = src->code; + if (!jl_typeis(stmts, jl_array_any_type)) { + stmts = jl_uncompress_ast(lam->def, stmts); + src->code = stmts; + jl_gc_wb(src, stmts); + } assert(jl_typeis(stmts, jl_array_any_type)); jl_value_t **locals; - JL_GC_PUSHARGS(locals, jl_linfo_nslots(lam) + jl_linfo_nssavalues(lam)); + JL_GC_PUSHARGS(locals, jl_source_nslots(src) + jl_source_nssavalues(src) + 2); + locals[0] = (jl_value_t*)src; + locals[1] = (jl_value_t*)stmts; interpreter_state s; - s.lam = lam; s.locals = locals; s.sparam_vals = sparam_vals; + s.src = src; + s.module = lam->def->module; + s.locals = locals + 2; + s.sparam_vals = lam->sparam_vals; size_t i; - for(i=0; i < lam->nargs; i++) { - if (lam->isva && i == lam->nargs-1) - locals[i] = jl_f_tuple(NULL, &args[i], nargs-i); + for (i = 0; i < lam->def->nargs; i++) { + if (lam->def->isva && i == lam->def->nargs - 1) + s.locals[i] = jl_f_tuple(NULL, &args[i], nargs - i); else - locals[i] = args[i]; + s.locals[i] = args[i]; } - jl_value_t *r = eval_body(stmts, &s, 0, lam->nargs==0); + jl_value_t *r = eval_body(stmts, &s, 0, 0); JL_GC_POP(); return r; } -jl_value_t *jl_interpret_toplevel_thunk(jl_lambda_info_t *lam) +jl_value_t *jl_interpret_toplevel_thunk(jl_code_info_t *src) { - return jl_interpret_call(lam, NULL, 0, NULL); + jl_ptls_t ptls = jl_get_ptls_states(); + jl_array_t *stmts = src->code; + assert(jl_typeis(stmts, jl_array_any_type)); + jl_value_t **locals; + JL_GC_PUSHARGS(locals, jl_source_nslots(src) + jl_source_nssavalues(src)); + interpreter_state s; + s.src = src; + s.locals = locals; + s.module = ptls->current_module; + s.sparam_vals = jl_emptysvec; + jl_value_t *r = eval_body(stmts, &s, 0, 1); + JL_GC_POP(); + return r; } #ifdef __cplusplus diff --git a/src/jitlayers.cpp b/src/jitlayers.cpp index e30336385ba22..81010548828b7 100644 --- a/src/jitlayers.cpp +++ b/src/jitlayers.cpp @@ -59,6 +59,9 @@ namespace llvm { #include #include +#include +#include +#include using namespace llvm; @@ -743,61 +746,118 @@ static void jl_merge_module(Module *dest, std::unique_ptr src) // to finalizing a function, look up its name in the `module_for_fname` map of unfinalized functions // and merge it, plus any other modules it depends upon, into `collector` // then add `collector` to the execution engine -// -// in the old JIT, functions are finalized by adding them to the shadow module -// (which aliases the engine module), so this is unneeded -#ifdef USE_MCJIT static StringMap module_for_fname; +static void jl_merge_recursive(Module *m, Module *collector); + +#if defined(USE_MCJIT) || defined(USE_ORCJIT) +static void jl_add_to_ee(std::unique_ptr m) +{ +#if defined(_CPU_X86_64_) && defined(_OS_WINDOWS_) && defined(LLVM35) + // Add special values used by debuginfo to build the UnwindData table registration for Win64 + ArrayType *atype = ArrayType::get(T_uint32, 3); // want 4-byte alignment of 12-bytes of data + (new GlobalVariable(*m, atype, + false, GlobalVariable::InternalLinkage, + ConstantAggregateZero::get(atype), "__UnwindData"))->setSection(".text"); + (new GlobalVariable(*m, atype, + false, GlobalVariable::InternalLinkage, + ConstantAggregateZero::get(atype), "__catchjmp"))->setSection(".text"); +#endif + assert(jl_ExecutionEngine); +#if defined(LLVM36) + jl_ExecutionEngine->addModule(std::move(m)); +#else + jl_ExecutionEngine->addModule(m.release()); +#endif +} + +void jl_finalize_function(Function *F) +{ + std::unique_ptr m(module_for_fname.lookup(F->getName())); + if (m) { + jl_merge_recursive(m.get(), m.get()); + jl_add_to_ee(std::move(m)); + } +} +#else +static bool jl_try_finalize(Module *m) +{ + for (Module::iterator I = m->begin(), E = m->end(); I != E; ++I) { + Function *F = &*I; + if (F->isDeclaration() && !isIntrinsicFunction(F)) { + if (!jl_can_finalize_function(F)) + return false; + } + } + jl_merge_recursive(m, shadow_output); + jl_merge_module(shadow_output, std::unique_ptr(m)); + return true; +} +#endif + static void jl_finalize_function(const std::string &F, Module *collector) { std::unique_ptr m(module_for_fname.lookup(F)); if (m) { - // probably not many unresolved declarations, but be sure iterate over their Names, - // since the declarations may get destroyed by the jl_merge_module call. - // this is also why we copy the Name string, rather than save a StringRef - SmallVector to_finalize; - for (Module::iterator I = m->begin(), E = m->end(); I != E; ++I) { - Function *F = &*I; - if (!F->isDeclaration()) { - module_for_fname.erase(F->getName()); - } - else if (!isIntrinsicFunction(F)) { - to_finalize.push_back(F->getName().str()); - } - } + jl_merge_recursive(m.get(), collector); + jl_merge_module(collector, std::move(m)); + } +} - for (const auto F : to_finalize) { - jl_finalize_function(F, collector ? collector : m.get()); +static void jl_merge_recursive(Module *m, Module *collector) +{ + // probably not many unresolved declarations, but be sure to iterate over their Names, + // since the declarations may get destroyed by the jl_merge_module call. + // this is also why we copy the Name string, rather than save a StringRef + SmallVector to_finalize; + for (Module::iterator I = m->begin(), E = m->end(); I != E; ++I) { + Function *F = &*I; + if (!F->isDeclaration()) { + module_for_fname.erase(F->getName()); } - - if (collector) { - jl_merge_module(collector, std::move(m)); + else if (!isIntrinsicFunction(F)) { + to_finalize.push_back(F->getName().str()); } - else { -#if defined(_CPU_X86_64_) && defined(_OS_WINDOWS_) && defined(LLVM35) - // Add special values used by debuginfo to build the UnwindData table registration for Win64 - ArrayType *atype = ArrayType::get(T_uint32, 3); // want 4-byte alignment of 12-bytes of data - (new GlobalVariable(*m, atype, - false, GlobalVariable::InternalLinkage, - ConstantAggregateZero::get(atype), "__UnwindData"))->setSection(".text"); - (new GlobalVariable(*m, atype, - false, GlobalVariable::InternalLinkage, - ConstantAggregateZero::get(atype), "__catchjmp"))->setSection(".text"); -#endif - assert(jl_ExecutionEngine); -#if defined(LLVM36) - jl_ExecutionEngine->addModule(std::move(m)); + } + + for (const auto F : to_finalize) { + jl_finalize_function(F, collector); + } +} + +// see if any of the functions needed by F are still WIP +static StringSet<> incomplete_fname; +static bool jl_can_finalize_function(StringRef F, SmallSet &known) +{ + if (incomplete_fname.find(F) != incomplete_fname.end()) + return false; + Module *M = module_for_fname.lookup(F); +#ifdef LLVM35 + if (M && known.insert(M).second) #else - jl_ExecutionEngine->addModule(m.release()); + if (M && known.insert(M)) #endif + { + for (Module::iterator I = M->begin(), E = M->end(); I != E; ++I) { + Function *F = &*I; + if (F->isDeclaration() && !isIntrinsicFunction(F)) { + if (!jl_can_finalize_function(F->getName(), known)) + return false; + } } } + return true; } -void jl_finalize_function(Function *F, Module *collector) +bool jl_can_finalize_function(Function *F) { - jl_finalize_function(F->getName().str(), collector); + SmallSet known; + return jl_can_finalize_function(F->getName(), known); +} + +// let the JIT know this function is a WIP +void jl_init_function(Function *F) +{ + incomplete_fname.insert(F->getName()); } -#endif // this takes ownership of a module after code emission is complete // and will add it to the execution engine when required (by jl_finalize_function) @@ -806,20 +866,30 @@ void jl_finalize_module(Module *m, bool shadow) #if !defined(USE_ORCJIT) jl_globalPM->run(*m); #endif -#ifdef USE_MCJIT // record the function names that are part of this Module // so it can be added to the JIT when needed for (Module::iterator I = m->begin(), E = m->end(); I != E; ++I) { Function *F = &*I; - if (!F->isDeclaration()) + if (!F->isDeclaration()) { + bool known = incomplete_fname.erase(F->getName()); + (void)known; // TODO: assert(known); // llvmcall gets this wrong module_for_fname[F->getName()] = m; + } } -#endif #if defined(USE_ORCJIT) || defined(USE_MCJIT) // in the newer JITs, the shadow module is separate from the execution module if (shadow) -#endif jl_add_to_shadow(m); +#else + bool changes = jl_try_finalize(m); + while (changes) { + // this definitely isn't the most efficient, but it's only for the old LLVM 3.3 JIT + changes = false; + for (StringMap::iterator MI = module_for_fname.begin(), ME = module_for_fname.end(); MI != ME; ++MI) { + changes |= jl_try_finalize(MI->second); + } + } +#endif } // helper function for adding a DLLImport (dlsym) address to the execution engine @@ -942,10 +1012,9 @@ void* jl_get_global(GlobalVariable *gv) } // clones the contents of the module `m` to the shadow_output collector -// in the old JIT, this is equivalent to also adding it to the execution engine +#if defined(USE_MCJIT) || defined(USE_ORCJIT) void jl_add_to_shadow(Module *m) { -#if defined(USE_MCJIT) || defined(USE_ORCJIT) #ifndef KEEP_BODIES if (!imaging_mode) return; @@ -959,12 +1028,9 @@ void jl_add_to_shadow(Module *m) addComdat(F); } } -#else - // on the old jit, the shadow_module is the same as the execution engine_module - std::unique_ptr clone(m); -#endif jl_merge_module(shadow_output, std::move(clone)); } +#endif #ifdef HAVE_CPUID extern "C" { diff --git a/src/jitlayers.h b/src/jitlayers.h index a4d41dc680f17..bffa7aa7e5022 100644 --- a/src/jitlayers.h +++ b/src/jitlayers.h @@ -65,7 +65,9 @@ GlobalVariable *jl_emit_sysimg_slot(Module *m, Type *typ, const char *name, void* jl_get_global(GlobalVariable *gv); GlobalVariable *jl_get_global_for(const char *cname, void *addr, Module *M); void jl_add_to_shadow(Module *m); -void jl_finalize_function(Function *F, Module *collector = NULL); +void jl_init_function(Function *f); +bool jl_can_finalize_function(Function *F); +void jl_finalize_function(Function *F); void jl_finalize_module(Module *m, bool shadow); // Connect Modules via prototypes, each owned by module `M` diff --git a/src/jltypes.c b/src/jltypes.c index bba5300c3f5ca..a5f6fa6bd82a3 100644 --- a/src/jltypes.c +++ b/src/jltypes.c @@ -3851,10 +3851,35 @@ void jl_init_types(void) jl_svecset(jl_typename_type->types, 1, jl_module_type); jl_svecset(jl_methtable_type->types, 5, jl_module_type); + jl_code_info_type = + jl_new_datatype(jl_symbol("CodeInfo"), + jl_any_type, jl_emptysvec, + jl_svec(9, + jl_symbol("code"), + jl_symbol("slottypes"), + jl_symbol("ssavaluetypes"), + jl_symbol("slotnames"), + jl_symbol("slotflags"), + jl_symbol("inferred"), + jl_symbol("inlineable"), + jl_symbol("propagate_inbounds"), + jl_symbol("pure")), + jl_svec(9, + jl_any_type, + jl_any_type, + jl_any_type, + jl_array_any_type, + jl_array_uint8_type, + jl_bool_type, + jl_bool_type, + jl_bool_type, + jl_bool_type), + 0, 1, 9); + jl_method_type = jl_new_datatype(jl_symbol("Method"), jl_any_type, jl_emptysvec, - jl_svec(15, + jl_svec(18, jl_symbol("name"), jl_symbol("module"), jl_symbol("file"), @@ -3863,14 +3888,17 @@ void jl_init_types(void) jl_symbol("tvars"), jl_symbol("ambig"), jl_symbol("specializations"), - jl_symbol("lambda_template"), + jl_symbol("sparam_syms"), + jl_symbol("source"), + jl_symbol("unspecialized"), jl_symbol("roots"), jl_symbol("invokes"), + jl_symbol("nargs"), jl_symbol("called"), + jl_symbol("isva"), jl_symbol("isstaged"), - jl_symbol("needs_sparam_vals_ducttape"), - jl_symbol("")), - jl_svec(15, + jl_symbol("needs_sparam_vals_ducttape")), + jl_svec(18, jl_sym_type, jl_module_type, jl_sym_type, @@ -3879,71 +3907,46 @@ void jl_init_types(void) jl_any_type, jl_any_type, // Union{Array, Void} jl_any_type, - jl_any_type, + jl_simplevector_type, + jl_code_info_type, + jl_any_type, // jl_method_instance_type jl_array_any_type, jl_any_type, jl_int32_type, + jl_int32_type, jl_bool_type, jl_bool_type, jl_bool_type), 0, 1, 9); - jl_lambda_info_type = - jl_new_datatype(jl_symbol("LambdaInfo"), + jl_method_instance_type = + jl_new_datatype(jl_symbol("MethodInstance"), jl_any_type, jl_emptysvec, - jl_svec(25, + jl_svec(12, + jl_symbol("specTypes"), jl_symbol("rettype"), - jl_symbol("sparam_syms"), jl_symbol("sparam_vals"), - jl_symbol("specTypes"), - jl_symbol("code"), - jl_symbol("slottypes"), - jl_symbol("ssavaluetypes"), - jl_symbol("slotnames"), - jl_symbol("slotflags"), - jl_symbol("unspecialized_ducttape"), - jl_symbol("def"), - jl_symbol("constval"), - jl_symbol("nargs"), - jl_symbol("isva"), jl_symbol("inferred"), - jl_symbol("pure"), - jl_symbol("inlineable"), - jl_symbol("propagate_inbounds"), + jl_symbol("def"), jl_symbol("inInference"), - jl_symbol("inCompile"), jl_symbol("jlcall_api"), jl_symbol(""), jl_symbol("fptr"), + jl_symbol("unspecialized_ducttape"), jl_symbol(""), jl_symbol("")), - jl_svec(25, - jl_any_type, - jl_simplevector_type, - jl_simplevector_type, + jl_svec(12, jl_any_type, jl_any_type, - jl_any_type, - jl_any_type, - jl_array_any_type, - jl_array_uint8_type, + jl_simplevector_type, jl_any_type, jl_method_type, - jl_any_type, - jl_int32_type, - jl_bool_type, - jl_bool_type, - jl_bool_type, - jl_bool_type, - jl_bool_type, - jl_bool_type, jl_bool_type, jl_uint8_type, jl_bool_type, - jl_any_type, - jl_any_type, jl_any_type), - 0, 1, 7); - jl_svecset(jl_lambda_info_type->types, 9, jl_lambda_info_type); - jl_svecset(jl_method_type->types, 8, jl_lambda_info_type); + jl_any_type, // void* + jl_any_type, // void* + jl_any_type, jl_any_type), // void*, void* + 0, 1, 4); jl_typector_type = jl_new_datatype(jl_symbol("TypeConstructor"), @@ -4004,9 +4007,11 @@ void jl_init_types(void) jl_svecset(jl_methtable_type->types, 6, jl_int32_type); // DWORD #endif jl_svecset(jl_methtable_type->types, 7, jl_int32_type); // uint32_t - jl_svecset(jl_lambda_info_type->types, 22, jl_voidpointer_type); - jl_svecset(jl_lambda_info_type->types, 23, jl_voidpointer_type); - jl_svecset(jl_lambda_info_type->types, 24, jl_voidpointer_type); + jl_svecset(jl_method_type->types, 10, jl_method_instance_type); + jl_svecset(jl_method_instance_type->types, 8, jl_voidpointer_type); + jl_svecset(jl_method_instance_type->types, 9, jl_voidpointer_type); + jl_svecset(jl_method_instance_type->types, 10, jl_voidpointer_type); + jl_svecset(jl_method_instance_type->types, 11, jl_voidpointer_type); jl_compute_field_offsets(jl_datatype_type); jl_compute_field_offsets(jl_typename_type); @@ -4019,7 +4024,7 @@ void jl_init_types(void) jl_compute_field_offsets(jl_gotonode_type); jl_compute_field_offsets(jl_quotenode_type); jl_compute_field_offsets(jl_module_type); - jl_compute_field_offsets(jl_lambda_info_type); + jl_compute_field_offsets(jl_method_instance_type); jl_compute_field_offsets(jl_typector_type); jl_compute_field_offsets(jl_simplevector_type); jl_compute_field_offsets(jl_sym_type); diff --git a/src/julia-syntax.scm b/src/julia-syntax.scm index bf467d1838d65..8d59cf7d14869 100644 --- a/src/julia-syntax.scm +++ b/src/julia-syntax.scm @@ -338,12 +338,12 @@ (renames (map cons names temps)) (mdef (if (null? sparams) - `(method ,name (call (core svec) (curly Tuple ,@(dots->vararg types)) (call (core svec))) + `(method ,name (call (core svec) (call (core svec) ,@(dots->vararg types)) (call (core svec))) ,body ,isstaged) `(method ,name (block ,@(map make-assignment temps (symbols->typevars names bounds #t)) - (call (core svec) (curly Tuple + (call (core svec) (call (core svec) ,@(dots->vararg (map (lambda (ty) (replace-vars ty renames)) @@ -2644,7 +2644,7 @@ f(x) = yt(x) (pattern-set (pattern-lambda (call (core (-/ Typeof)) name) (get namemap name __))) - (cdddr typapp))) + (cddr typapp))) (closure-type (if (null? type-sp) typ `(call (core apply_type) ,typ ,@type-sp))) @@ -2652,7 +2652,7 @@ f(x) = yt(x) (if iskw `(,(car types) ,(cadr types) ,closure-type ,@(cdddr types)) `(,closure-type ,@(cdr types))))) - `(call (core svec) (call (core apply_type) Tuple ,@newtypes) + `(call (core svec) (call (core svec) ,@newtypes) (call (core svec) ,@(append (cddr (cadddr te)) type-sp))))) ;; collect all toplevel-butlast expressions inside `e`, and return diff --git a/src/julia.h b/src/julia.h index 8294263d586dd..0c72dd0dd0703 100644 --- a/src/julia.h +++ b/src/julia.h @@ -168,7 +168,7 @@ STATIC_INLINE int jl_array_ndimwords(uint32_t ndims) } typedef struct _jl_datatype_t jl_tupletype_t; -struct _jl_lambda_info_t; +struct _jl_method_instance_t; // TypeMap is an implicitly defined type // that can consist of any of the following nodes: @@ -188,13 +188,37 @@ union jl_typemap_t { // This defines the default ABI used by compiled julia functions. typedef jl_value_t *(*jl_fptr_t)(jl_value_t*, jl_value_t**, uint32_t); typedef jl_value_t *(*jl_fptr_sparam_t)(jl_svec_t*, jl_value_t*, jl_value_t**, uint32_t); -typedef jl_value_t *(*jl_fptr_linfo_t)(struct _jl_lambda_info_t*, jl_value_t**, uint32_t, jl_svec_t*); +typedef jl_value_t *(*jl_fptr_linfo_t)(struct _jl_method_instance_t*, jl_value_t**, uint32_t, jl_svec_t*); + +typedef struct { + union { + jl_fptr_t fptr; + jl_fptr_t fptr1; + // constant fptr2; + jl_fptr_sparam_t fptr3; + jl_fptr_linfo_t fptr4; + }; + uint8_t jlcall_api; +} jl_generic_fptr_t; typedef struct _jl_llvm_functions_t { void *functionObject; // jlcall llvm Function void *specFunctionObject; // specialized llvm Function } jl_llvm_functions_t; +// This type describes a single function body +typedef struct _jl_code_info_t { + jl_array_t *code; // compressed uint8 array, or Any array of statements + jl_value_t *slottypes; // types of variable slots (or `nothing`) + jl_value_t *ssavaluetypes; // types of ssa values (or count of them) + jl_array_t *slotnames; // names of local variables + jl_array_t *slotflags; // local var bit flags + uint8_t inferred; + uint8_t inlineable; + uint8_t propagate_inbounds; + uint8_t pure; +} jl_code_info_t; + // This type describes a single method definition, and stores data // shared by the specializations of a function. typedef struct _jl_method_t { @@ -204,7 +228,7 @@ typedef struct _jl_method_t { jl_sym_t *file; int32_t line; - // method's type signature. partly redundant with lambda_template->specTypes + // method's type signature. redundant with TypeMapEntry->specTypes jl_tupletype_t *sig; // bound type variables (static parameters). redundant with TypeMapEntry->tvars jl_svec_t *tvars; @@ -214,64 +238,54 @@ typedef struct _jl_method_t { // table of all argument types for which we've inferred or compiled this code union jl_typemap_t specializations; - // the AST template (or, for isstaged, code for the generator) - struct _jl_lambda_info_t *lambda_template; - jl_array_t *roots; // pointers in generated code (shared to reduce memory) + // sparams are the symbols in the tvars vector + jl_svec_t *sparam_syms; + // the code AST template + jl_code_info_t *source; // null for builtins and staged functions + // unspecialized executable thunk (for isstaged, code for the generator), or null + struct _jl_method_instance_t *unspecialized; + jl_array_t *roots; // pointers in generated code (shared to reduce memory), or null // cache of specializations of this method for invoke(), i.e. // cases where this method was called even though it was not necessarily // the most specific for the argument types. union jl_typemap_t invokes; + int32_t nargs; int32_t called; // bit flags: whether each of the first 8 arguments is called - int8_t isstaged; + uint8_t isva; + uint8_t isstaged; // if there are intrinsic calls, sparams are probably required to compile successfully, // and so unspecialized will be created for each linfo instead of using linfo->def->template // 0 = no, 1 = yes, 2 = not yet known uint8_t needs_sparam_vals_ducttape; - uint8_t traced; // hidden fields: + uint8_t traced; // lock for modifications to the method jl_mutex_t writelock; } jl_method_t; -// This holds data for a single executable function body: -// code in Julia IR, static parameters, and (if it has been compiled) -// a function pointer. -typedef struct _jl_lambda_info_t { +// This type caches the data for a specType signature specialization of a Method +typedef struct _jl_method_instance_t { JL_DATA_TYPE - jl_value_t *rettype; - jl_svec_t *sparam_syms; // sparams is a vector of values indexed by symbols - jl_svec_t *sparam_vals; jl_tupletype_t *specTypes; // argument types this was specialized for - jl_value_t *code; // compressed uint8 array, or Any array of statements - jl_value_t *slottypes; - jl_value_t *ssavaluetypes; // types of ssa values - jl_array_t *slotnames; // names of local variables - jl_array_t *slotflags; // local var bit flags - struct _jl_lambda_info_t *unspecialized_ducttape; // if template can't be compiled due to intrinsics, an un-inferred executable copy may get stored here - jl_method_t *def; // method this is specialized from, (null if this is a toplevel thunk) - jl_value_t *constval; // value of the function if jlcall_api==2 - int32_t nargs; - int8_t isva; - int8_t inferred; - int8_t pure; - int8_t inlineable; - int8_t propagate_inbounds; - int8_t inInference; // flags to tell if inference is running on this function - int8_t inCompile; // flag to tell if codegen is running on this function - int8_t jlcall_api; // the c-abi for fptr; 0 = jl_fptr_t, 1 = jl_fptr_sparam_t, 2 = constval - int8_t compile_traced; // if set will notify callback if this linfo is compiled - jl_fptr_t fptr; // jlcall entry point + jl_value_t *rettype; // return type for fptr + jl_svec_t *sparam_vals; // the values for the tvars, indexed by def->sparam_syms + jl_value_t *inferred; // inferred jl_code_info_t, or value of the function if jlcall_api == 2, or null + jl_method_t *def; // method this is specialized from, null if this is a toplevel thunk + uint8_t inInference; // flags to tell if inference is running on this function + uint8_t jlcall_api; // the c-abi for fptr; 0 = jl_fptr_t, 1 = jl_fptr_sparam_t, 2 = constval + uint8_t compile_traced; // if set will notify callback if this linfo is compiled + jl_fptr_t fptr; // jlcall entry point with api specified by jlcall_api + jl_fptr_t unspecialized_ducttape; // if template can't be compiled due to intrinsics, an un-inferred fptr may get stored here, jlcall_api = 1 -// hidden fields: // On the old JIT, handles to all Functions generated for this linfo // For the new JITs, handles to declarations in the shadow module // with the same name as the generated functions for this linfo, suitable // for referencing in LLVM IR jl_llvm_functions_t functionObjectsDecls; -} jl_lambda_info_t; +} jl_method_instance_t; // all values are callable as Functions typedef jl_value_t jl_function_t; @@ -410,7 +424,7 @@ typedef struct _jl_typemap_entry_t { jl_svec_t *guardsigs; union { jl_value_t *value; - jl_lambda_info_t *linfo; // [nullable] for guard entries + jl_method_instance_t *linfo; // [nullable] for guard entries jl_method_t *method; } func; // memoized properties of sig: @@ -484,7 +498,8 @@ extern JL_DLLEXPORT jl_datatype_t *jl_uniontype_type; extern JL_DLLEXPORT jl_datatype_t *jl_datatype_type; extern JL_DLLEXPORT jl_value_t *jl_bottom_type; -extern JL_DLLEXPORT jl_datatype_t *jl_lambda_info_type; +extern JL_DLLEXPORT jl_datatype_t *jl_method_instance_type; +extern JL_DLLEXPORT jl_datatype_t *jl_code_info_type; extern JL_DLLEXPORT jl_datatype_t *jl_method_type; extern JL_DLLEXPORT jl_datatype_t *jl_module_type; extern JL_DLLEXPORT jl_datatype_t *jl_abstractarray_type; @@ -841,7 +856,8 @@ static inline uint32_t jl_fielddesc_size(int8_t fielddesc_type) #define jl_is_quotenode(v) jl_typeis(v,jl_quotenode_type) #define jl_is_newvarnode(v) jl_typeis(v,jl_newvarnode_type) #define jl_is_linenode(v) jl_typeis(v,jl_linenumbernode_type) -#define jl_is_lambda_info(v) jl_typeis(v,jl_lambda_info_type) +#define jl_is_method_instance(v) jl_typeis(v,jl_method_instance_type) +#define jl_is_code_info(v) jl_typeis(v,jl_code_info_type) #define jl_is_method(v) jl_typeis(v,jl_method_type) #define jl_is_module(v) jl_typeis(v,jl_module_type) #define jl_is_mtable(v) jl_typeis(v,jl_methtable_type) @@ -987,9 +1003,7 @@ JL_DLLEXPORT jl_value_t *jl_new_struct(jl_datatype_t *type, ...); JL_DLLEXPORT jl_value_t *jl_new_structv(jl_datatype_t *type, jl_value_t **args, uint32_t na); JL_DLLEXPORT jl_value_t *jl_new_struct_uninit(jl_datatype_t *type); -JL_DLLEXPORT jl_lambda_info_t *jl_new_lambda_info_uninit(void); -JL_DLLEXPORT jl_lambda_info_t *jl_new_lambda_info_from_ast(jl_expr_t *ast); -JL_DLLEXPORT jl_method_t *jl_new_method(jl_lambda_info_t *definition, jl_sym_t *name, jl_tupletype_t *sig, jl_svec_t *tvars, int isstaged); +JL_DLLEXPORT jl_method_instance_t *jl_new_method_instance_uninit(void); JL_DLLEXPORT jl_svec_t *jl_svec(size_t n, ...); JL_DLLEXPORT jl_svec_t *jl_svec1(void *a); JL_DLLEXPORT jl_svec_t *jl_svec2(void *a, void *b); @@ -1007,7 +1021,9 @@ JL_DLLEXPORT jl_sym_t *jl_get_root_symbol(void); JL_DLLEXPORT jl_value_t *jl_generic_function_def(jl_sym_t *name, jl_value_t **bp, jl_value_t *bp_owner, jl_binding_t *bnd); -JL_DLLEXPORT void jl_method_def(jl_svec_t *argdata, jl_lambda_info_t *f, jl_value_t *isstaged); +JL_DLLEXPORT void jl_method_def(jl_svec_t *argdata, jl_code_info_t *f, jl_value_t *isstaged); +JL_DLLEXPORT jl_code_info_t *jl_code_for_staged(jl_method_instance_t *linfo); +JL_DLLEXPORT jl_code_info_t *jl_copy_code_info(jl_code_info_t *src); JL_DLLEXPORT jl_function_t *jl_get_kwsorter(jl_typename_t *tn); JL_DLLEXPORT jl_value_t *jl_box_bool(int8_t x); JL_DLLEXPORT jl_value_t *jl_box_int8(int8_t x); @@ -1319,26 +1335,22 @@ JL_DLLEXPORT const char *jl_lookup_soname(const char *pfx, size_t n); JL_DLLEXPORT jl_value_t *jl_toplevel_eval(jl_value_t *v); JL_DLLEXPORT jl_value_t *jl_toplevel_eval_in(jl_module_t *m, jl_value_t *ex); JL_DLLEXPORT jl_value_t *jl_load(const char *fname); -JL_DLLEXPORT jl_value_t *jl_interpret_toplevel_expr_in(jl_module_t *m, jl_value_t *e, - jl_lambda_info_t *lam); JL_DLLEXPORT jl_module_t *jl_base_relative_to(jl_module_t *m); // tracing JL_DLLEXPORT void jl_trace_method(jl_method_t *m); JL_DLLEXPORT void jl_untrace_method(jl_method_t *m); -JL_DLLEXPORT void jl_trace_linfo(jl_lambda_info_t *linfo); -JL_DLLEXPORT void jl_untrace_linfo(jl_lambda_info_t *linfo); -JL_DLLEXPORT void jl_register_linfo_tracer(void (*callback)(jl_lambda_info_t *tracee)); -JL_DLLEXPORT void jl_register_method_tracer(void (*callback)(jl_lambda_info_t *tracee)); +JL_DLLEXPORT void jl_trace_linfo(jl_method_instance_t *linfo); +JL_DLLEXPORT void jl_untrace_linfo(jl_method_instance_t *linfo); +JL_DLLEXPORT void jl_register_linfo_tracer(void (*callback)(jl_method_instance_t *tracee)); +JL_DLLEXPORT void jl_register_method_tracer(void (*callback)(jl_method_instance_t *tracee)); JL_DLLEXPORT void jl_register_newmeth_tracer(void (*callback)(jl_method_t *tracee)); // AST access -JL_DLLEXPORT int jl_is_rest_arg(jl_value_t *ex); - JL_DLLEXPORT jl_value_t *jl_copy_ast(jl_value_t *expr); -JL_DLLEXPORT jl_array_t *jl_compress_ast(jl_lambda_info_t *li, jl_array_t *ast); -JL_DLLEXPORT jl_array_t *jl_uncompress_ast(jl_lambda_info_t *li, jl_array_t *data); +JL_DLLEXPORT jl_array_t *jl_compress_ast(jl_method_t *m, jl_array_t *ast); +JL_DLLEXPORT jl_array_t *jl_uncompress_ast(jl_method_t *m, jl_array_t *data); JL_DLLEXPORT int jl_is_operator(char *sym); JL_DLLEXPORT int jl_operator_precedence(char *sym); @@ -1356,7 +1368,7 @@ STATIC_INLINE int jl_vinfo_usedundef(uint8_t vi) // calling into julia --------------------------------------------------------- JL_DLLEXPORT jl_value_t *jl_apply_generic(jl_value_t **args, uint32_t nargs); -JL_DLLEXPORT jl_value_t *jl_invoke(jl_lambda_info_t *meth, jl_value_t **args, uint32_t nargs); +JL_DLLEXPORT jl_value_t *jl_invoke(jl_method_instance_t *meth, jl_value_t **args, uint32_t nargs); STATIC_INLINE jl_value_t *jl_apply(jl_value_t **args, uint32_t nargs) diff --git a/src/julia_internal.h b/src/julia_internal.h index 00225f51f7214..de3e6fd8b8c20 100644 --- a/src/julia_internal.h +++ b/src/julia_internal.h @@ -165,30 +165,61 @@ STATIC_INLINE void *jl_gc_alloc_buf(jl_ptls_t ptls, size_t sz) return jl_gc_alloc(ptls, sz, (void*)jl_buff_tag); } -jl_lambda_info_t *jl_type_infer(jl_lambda_info_t *li, int force); -void jl_generate_fptr(jl_lambda_info_t *li); -void jl_compile_linfo(jl_lambda_info_t *li); +jl_code_info_t *jl_type_infer(jl_method_instance_t *li, int force); +jl_generic_fptr_t jl_generate_fptr(jl_method_instance_t *li, void *F); +jl_llvm_functions_t jl_compile_linfo(jl_method_instance_t *li, jl_code_info_t *src); +jl_llvm_functions_t jl_compile_for_dispatch(jl_method_instance_t *li); JL_DLLEXPORT int jl_compile_hint(jl_tupletype_t *types); -jl_lambda_info_t *jl_compile_for_dispatch(jl_lambda_info_t *li); -JL_DLLEXPORT void jl_set_lambda_code_null(jl_lambda_info_t *li); +jl_code_info_t *jl_new_code_info_from_ast(jl_expr_t *ast); +jl_method_t *jl_new_method(jl_code_info_t *definition, + jl_sym_t *name, + jl_tupletype_t *sig, + size_t nargs, + int isva, + jl_svec_t *tvars, + int isstaged); // invoke (compiling if necessary) the jlcall function pointer for a method -STATIC_INLINE jl_value_t *jl_call_method_internal(jl_lambda_info_t *meth, jl_value_t **args, uint32_t nargs) +STATIC_INLINE jl_value_t *jl_call_method_internal(jl_method_instance_t *meth, jl_value_t **args, uint32_t nargs) { - jl_lambda_info_t *mfptr = meth; - if (__unlikely(mfptr->fptr == NULL && mfptr->jlcall_api != 2)) { - mfptr = jl_compile_for_dispatch(mfptr); - if (!mfptr->fptr) - jl_generate_fptr(mfptr); + jl_generic_fptr_t fptr; + fptr.fptr = meth->fptr; + fptr.jlcall_api = meth->jlcall_api; + if (fptr.jlcall_api == 2) + return meth->inferred; + if (__unlikely(fptr.fptr == NULL || fptr.jlcall_api == 0)) { + // first see if it likely needs to be compiled + void *F = meth->functionObjectsDecls.functionObject; + if (!F) // ask codegen to try to turn it into llvm code + F = jl_compile_for_dispatch(meth).functionObject; + if (meth->jlcall_api == 2) + return meth->inferred; + // if it hasn't been inferred, try using the unspecialized meth cache instead + if (!meth->inferred) { + fptr.fptr = meth->unspecialized_ducttape; + fptr.jlcall_api = 1; + if (!fptr.fptr) { + if (meth->def && !meth->def->isstaged && meth->def->unspecialized) { + fptr.fptr = meth->def->unspecialized->fptr; + fptr.jlcall_api = meth->def->unspecialized->jlcall_api; + if (fptr.jlcall_api == 2) + return meth->def->unspecialized->inferred; + } + } + } + if (!fptr.fptr || fptr.jlcall_api == 0) { + // ask codegen to make the fptr + fptr = jl_generate_fptr(meth, F); + if (fptr.jlcall_api == 2) + return meth->inferred; + } } - if (mfptr->jlcall_api == 0) - return mfptr->fptr(args[0], &args[1], nargs-1); - else if (mfptr->jlcall_api == 1) - return ((jl_fptr_sparam_t)mfptr->fptr)(meth->sparam_vals, args[0], &args[1], nargs-1); - else if (mfptr->jlcall_api == 2) - return meth->constval; - else if (mfptr->jlcall_api == 3) - return ((jl_fptr_linfo_t)mfptr->fptr)(mfptr, &args[0], nargs, meth->sparam_vals); + if (fptr.jlcall_api == 1) + return fptr.fptr1(args[0], &args[1], nargs-1); + else if (fptr.jlcall_api == 3) + return fptr.fptr3(meth->sparam_vals, args[0], &args[1], nargs-1); + else if (fptr.jlcall_api == 4) + return fptr.fptr4(meth, &args[0], nargs, meth->sparam_vals); else abort(); } @@ -296,22 +327,25 @@ jl_value_t *jl_toplevel_eval_flex(jl_value_t *e, int fast, int expanded); jl_value_t *jl_toplevel_eval_in_warn(jl_module_t *m, jl_value_t *ex, int delay_warn); -jl_lambda_info_t *jl_wrap_expr(jl_value_t *expr); +jl_code_info_t *jl_wrap_expr(jl_value_t *expr); jl_value_t *jl_eval_global_var(jl_module_t *m, jl_sym_t *e); jl_value_t *jl_parse_eval_all(const char *fname, const char *content, size_t contentlen); -jl_value_t *jl_interpret_toplevel_thunk(jl_lambda_info_t *lam); +jl_value_t *jl_interpret_toplevel_thunk(jl_code_info_t *src); jl_value_t *jl_interpret_toplevel_expr(jl_value_t *e); +jl_value_t *jl_interpret_toplevel_expr_in(jl_module_t *m, jl_value_t *e, + jl_code_info_t *src, + jl_svec_t *sparam_vals); int jl_is_toplevel_only_expr(jl_value_t *e); jl_value_t *jl_call_scm_on_ast(const char *funcname, jl_value_t *expr); -jl_lambda_info_t *jl_method_lookup_by_type(jl_methtable_t *mt, jl_tupletype_t *types, +jl_method_instance_t *jl_method_lookup_by_type(jl_methtable_t *mt, jl_tupletype_t *types, int cache, int inexact, int allow_exec); -jl_lambda_info_t *jl_method_lookup(jl_methtable_t *mt, jl_value_t **args, size_t nargs, int cache); +jl_method_instance_t *jl_method_lookup(jl_methtable_t *mt, jl_value_t **args, size_t nargs, int cache); jl_value_t *jl_gf_invoke(jl_tupletype_t *types, jl_value_t **args, size_t nargs); jl_datatype_t *jl_first_argument_datatype(jl_value_t *argtypes); -int jl_has_intrinsics(jl_lambda_info_t *li, jl_value_t *v, jl_module_t *m); +int jl_has_intrinsics(jl_method_instance_t *li, jl_value_t *v, jl_module_t *m); jl_value_t *jl_nth_slot_type(jl_tupletype_t *sig, size_t i); void jl_compute_field_offsets(jl_datatype_t *st); @@ -416,17 +450,18 @@ static inline void jl_set_gc_and_wait(void) void jl_dump_native(const char *bc_fname, const char *obj_fname, const char *sysimg_data, size_t sysimg_len); int32_t jl_get_llvm_gv(jl_value_t *p); int32_t jl_assign_functionID(/*llvm::Function*/void *function); +int32_t jl_jlcall_api(/*llvm::Function*/const void *function); // the first argument to jl_idtable_rehash is used to return a value // make sure it is rooted if it is used after the function returns JL_DLLEXPORT jl_array_t *jl_idtable_rehash(jl_array_t *a, size_t newsz); JL_DLLEXPORT jl_methtable_t *jl_new_method_table(jl_sym_t *name, jl_module_t *module); -jl_lambda_info_t *jl_get_specialization1(jl_tupletype_t *types); +jl_method_instance_t *jl_get_specialization1(jl_tupletype_t *types); JL_DLLEXPORT int jl_has_call_ambiguities(jl_tupletype_t *types, jl_method_t *m); -JL_DLLEXPORT jl_lambda_info_t *jl_get_specialized(jl_method_t *m, jl_tupletype_t *types, jl_svec_t *sp, int allow_exec); +jl_method_instance_t *jl_get_specialized(jl_method_t *m, jl_tupletype_t *types, jl_svec_t *sp); uint32_t jl_module_next_counter(jl_module_t *m); -void jl_fptr_to_llvm(jl_fptr_t fptr, jl_lambda_info_t *lam, int specsig); +void jl_fptr_to_llvm(jl_fptr_t fptr, jl_method_instance_t *lam, int specsig); jl_tupletype_t *arg_type_tuple(jl_value_t **args, size_t nargs); int jl_has_meta(jl_array_t *body, jl_sym_t *sym); @@ -436,7 +471,7 @@ typedef struct { char *func_name; char *file_name; int line; - jl_lambda_info_t *linfo; + jl_method_instance_t *linfo; int fromC; int inlined; } jl_frame_t; @@ -480,7 +515,7 @@ void jl_critical_error(int sig, bt_context_t *context, uintptr_t *bt_data, size_ JL_DLLEXPORT void jl_raise_debugger(void); int jl_getFunctionInfo(jl_frame_t **frames, uintptr_t pointer, int skipC, int noInline); JL_DLLEXPORT void jl_gdblookup(uintptr_t ip); -jl_value_t *jl_uncompress_ast_(jl_lambda_info_t*, jl_value_t*, int); +jl_value_t *jl_uncompress_ast_(jl_method_instance_t*, jl_value_t*, int); // *to is NULL or malloc'd pointer, from is allowed to be NULL STATIC_INLINE char *jl_copy_str(char **to, const char *from) { diff --git a/src/toplevel.c b/src/toplevel.c index 742d84779fbe2..afe4b85121c63 100644 --- a/src/toplevel.c +++ b/src/toplevel.c @@ -267,7 +267,7 @@ JL_DLLEXPORT jl_module_t *jl_base_relative_to(jl_module_t *m) // remove this once jl_has_intrinsics is deleted extern jl_value_t *jl_builtin_getfield; static jl_value_t *jl_static_eval(jl_value_t *ex, jl_module_t *mod, - jl_lambda_info_t *linfo, int sparams) + jl_method_instance_t *linfo, int sparams) { if (jl_is_symbol(ex)) { jl_sym_t *sym = (jl_sym_t*)ex; @@ -281,7 +281,7 @@ static jl_value_t *jl_static_eval(jl_value_t *ex, jl_module_t *mod, return NULL; if (jl_is_quotenode(ex)) return jl_fieldref(ex, 0); - if (jl_is_lambda_info(ex)) + if (jl_is_method_instance(ex)) return NULL; jl_module_t *m = NULL; jl_sym_t *s = NULL; @@ -324,7 +324,7 @@ static jl_value_t *jl_static_eval(jl_value_t *ex, jl_module_t *mod, } -int jl_has_intrinsics(jl_lambda_info_t *li, jl_value_t *v, jl_module_t *m) +int jl_has_intrinsics(jl_method_instance_t *li, jl_value_t *v, jl_module_t *m) { if (!jl_is_expr(v)) return 0; jl_expr_t *e = (jl_expr_t*)v; @@ -355,7 +355,7 @@ int jl_has_intrinsics(jl_lambda_info_t *li, jl_value_t *v, jl_module_t *m) // heuristic for whether a top-level input should be evaluated with // the compiler or the interpreter. -static int jl_eval_with_compiler_p(jl_lambda_info_t *li, jl_array_t *body, int compileloops, jl_module_t *m) +static int jl_eval_with_compiler_p(jl_code_info_t *src, jl_array_t *body, int compileloops, jl_module_t *m) { size_t i, maxlabl=0; // compile if there are backwards branches @@ -388,7 +388,7 @@ static int jl_eval_with_compiler_p(jl_lambda_info_t *li, jl_array_t *body, int c } } } - if (jl_has_intrinsics(li, stmt, m)) return 1; + if (jl_has_intrinsics(NULL, stmt, m)) return 1; } return 0; } @@ -397,7 +397,8 @@ static int jl_eval_expr_with_compiler_p(jl_value_t *e, int compileloops, jl_modu { if (jl_is_expr(e) && ((jl_expr_t*)e)->head == body_sym) return jl_eval_with_compiler_p(NULL, ((jl_expr_t*)e)->args, compileloops, m); - if (jl_has_intrinsics(NULL, e, m)) return 1; + if (jl_has_intrinsics(NULL, e, m)) + return 1; return 0; } @@ -504,6 +505,14 @@ int jl_is_toplevel_only_expr(jl_value_t *e) ((jl_expr_t*)e)->head == toplevel_sym); } +static jl_method_instance_t *jl_new_thunk(jl_code_info_t *src) +{ + jl_method_instance_t *li = jl_new_method_instance_uninit(); + li->inferred = (jl_value_t*)src; + li->specTypes = (jl_tupletype_t*)jl_typeof(jl_emptytuple); + return li; +} + jl_value_t *jl_toplevel_eval_flex(jl_value_t *e, int fast, int expanded) { jl_ptls_t ptls = jl_get_ptls_states(); @@ -575,9 +584,9 @@ jl_value_t *jl_toplevel_eval_flex(jl_value_t *e, int fast, int expanded) return jl_nothing; } - jl_value_t *thunk=NULL; + jl_value_t *thunk = NULL; jl_value_t *result; - jl_lambda_info_t *thk=NULL; + jl_code_info_t *thk = NULL; int ewc = 0; JL_GC_PUSH3(&thunk, &thk, &ex); @@ -598,8 +607,8 @@ jl_value_t *jl_toplevel_eval_flex(jl_value_t *e, int fast, int expanded) } if (head == thunk_sym) { - thk = (jl_lambda_info_t*)jl_exprarg(ex,0); - assert(jl_is_lambda_info(thk)); + thk = (jl_code_info_t*)jl_exprarg(ex,0); + assert(jl_is_code_info(thk)); assert(jl_typeis(thk->code, jl_array_any_type)); ewc = jl_eval_with_compiler_p(thk, (jl_array_t*)thk->code, fast, ptls->current_module); } @@ -623,11 +632,11 @@ jl_value_t *jl_toplevel_eval_flex(jl_value_t *e, int fast, int expanded) } } - thk->specTypes = (jl_tupletype_t*)jl_typeof(jl_emptytuple); // no gc_wb needed if (ewc) { - jl_type_infer(thk, 0); - jl_value_t *dummy_f_arg=NULL; - result = jl_call_method_internal(thk, &dummy_f_arg, 1); + jl_method_instance_t *li = jl_new_thunk(thk); + jl_type_infer(li, 0); + jl_value_t *dummy_f_arg = NULL; + result = jl_call_method_internal(li, &dummy_f_arg, 1); } else { result = jl_interpret_toplevel_thunk(thk); @@ -699,14 +708,16 @@ void print_func_loc(JL_STREAM *s, jl_method_t *m); void jl_check_static_parameter_conflicts(jl_method_t *m, jl_svec_t *t) { - jl_lambda_info_t *li = m->lambda_template; - size_t nvars = jl_array_len(li->slotnames); - - for(size_t i=0; i < jl_svec_len(t); i++) { - for(size_t j=0; j < nvars; j++) { - jl_value_t *tv = jl_svecref(t,i); + jl_code_info_t *src = m->isstaged ? (jl_code_info_t*)m->unspecialized->inferred : m->source; + size_t nvars = jl_array_len(src->slotnames); + + size_t i, n = jl_svec_len(t); + for (i = 0; i < n; i++) { + jl_value_t *tv = jl_svecref(t, i); + size_t j; + for (j = 0; j < nvars; j++) { if (jl_is_typevar(tv)) { - if ((jl_sym_t*)jl_array_ptr_ref(li->slotnames, j) == ((jl_tvar_t*)tv)->name) { + if ((jl_sym_t*)jl_array_ptr_ref(src->slotnames, j) == ((jl_tvar_t*)tv)->name) { jl_printf(JL_STDERR, "WARNING: local variable %s conflicts with a static parameter in %s", jl_symbol_name(((jl_tvar_t*)tv)->name), @@ -786,31 +797,35 @@ jl_datatype_t *jl_first_argument_datatype(jl_value_t *argtypes) } extern tracer_cb jl_newmeth_tracer; -JL_DLLEXPORT void jl_method_def(jl_svec_t *argdata, jl_lambda_info_t *f, jl_value_t *isstaged) +JL_DLLEXPORT void jl_method_def(jl_svec_t *argdata, + jl_code_info_t *f, + jl_value_t *isstaged) { - // argdata is svec({types...}, svec(typevars...)) - jl_tupletype_t *argtypes = (jl_tupletype_t*)jl_svecref(argdata,0); - jl_svec_t *tvars = (jl_svec_t*)jl_svecref(argdata,1); + // argdata is svec(svec(types...), svec(typevars...)) + jl_svec_t *atypes = (jl_svec_t*)jl_svecref(argdata, 0); + jl_svec_t *tvars = (jl_svec_t*)jl_svecref(argdata, 1); + size_t nargs = jl_svec_len(atypes); + int isva = jl_is_vararg_type(jl_svecref(atypes, nargs - 1)); + assert(jl_is_svec(atypes)); + assert(nargs > 0); + assert(jl_is_svec(tvars)); + if (!jl_is_type(jl_svecref(atypes, 0)) || (isva && nargs == 1)) + jl_error("function type in method definition is not a type"); jl_methtable_t *mt; jl_sym_t *name; jl_method_t *m = NULL; - JL_GC_PUSH2(&f, &m); + jl_tupletype_t *argtype = jl_apply_tuple_type(atypes); + JL_GC_PUSH3(&f, &m, &argtype); - if (!jl_is_lambda_info(f)) { + if (!jl_is_code_info(f)) { // this occurs when there is a closure being added to an out-of-scope function // the user should only do this at the toplevel // the result is that the closure variables get interpolated directly into the AST - f = jl_new_lambda_info_from_ast((jl_expr_t*)f); + f = jl_new_code_info_from_ast((jl_expr_t*)f); } - assert(jl_is_lambda_info(f)); - assert(jl_is_tuple_type(argtypes)); - assert(jl_is_svec(tvars)); - assert(jl_nparams(argtypes)>0); - - if (jl_is_tuple_type(argtypes) && jl_nparams(argtypes) > 0 && !jl_is_type(jl_tparam0(argtypes))) - jl_error("function type in method definition is not a type"); - jl_datatype_t *ftype = jl_first_argument_datatype((jl_value_t*)argtypes); + assert(jl_is_code_info(f)); + jl_datatype_t *ftype = jl_first_argument_datatype((jl_value_t*)argtype); if (ftype == NULL || !(jl_is_type_type((jl_value_t*)ftype) || (jl_is_datatype(ftype) && @@ -823,13 +838,12 @@ JL_DLLEXPORT void jl_method_def(jl_svec_t *argdata, jl_lambda_info_t *f, jl_valu if (jl_subtype((jl_value_t*)ftype, (jl_value_t*)jl_builtin_type, 0)) jl_error("cannot add methods to a builtin function"); - m = jl_new_method(f, name, argtypes, tvars, isstaged == jl_true); - f = m->lambda_template; // because jl_new_method makes a copy + m = jl_new_method(f, name, argtype, nargs, isva, tvars, isstaged == jl_true); jl_check_static_parameter_conflicts(m, tvars); - size_t i, na = jl_nparams(argtypes); + size_t i, na = jl_nparams(argtype); for (i = 0; i < na; i++) { - jl_value_t *elt = jl_tparam(argtypes, i); + jl_value_t *elt = jl_tparam(argtype, i); if (!jl_is_type(elt) && !jl_is_typevar(elt)) { jl_sym_t *argname = (jl_sym_t*)jl_array_ptr_ref(f->slotnames, i); if (argname == unused_sym) @@ -854,7 +868,7 @@ JL_DLLEXPORT void jl_method_def(jl_svec_t *argdata, jl_lambda_info_t *f, jl_valu jl_value_t *tv = jl_svecref(tvars,i); if (!jl_is_typevar(tv)) jl_type_error_rt(jl_symbol_name(name), "method definition", (jl_value_t*)jl_tvar_type, tv); - if (!ishidden && !type_contains((jl_value_t*)argtypes, tv)) { + if (!ishidden && !type_contains((jl_value_t*)argtype, tv)) { jl_printf(JL_STDERR, "WARNING: static parameter %s does not occur in signature for %s", jl_symbol_name(((jl_tvar_t*)tv)->name), jl_symbol_name(name)); @@ -866,11 +880,6 @@ JL_DLLEXPORT void jl_method_def(jl_svec_t *argdata, jl_lambda_info_t *f, jl_valu jl_method_table_insert(mt, m, NULL); if (jl_newmeth_tracer) jl_call_tracer(jl_newmeth_tracer, (jl_value_t*)m); - - if (jl_boot_file_loaded && f->code && jl_typeis(f->code, jl_array_any_type)) { - f->code = (jl_value_t*)jl_compress_ast(f, (jl_array_t*)f->code); - jl_gc_wb(f, f->code); - } JL_GC_POP(); } diff --git a/test/compile.jl b/test/compile.jl index 6098ced8e8337..4e377126138ac 100644 --- a/test/compile.jl +++ b/test/compile.jl @@ -87,8 +87,12 @@ try (::Type{Vector{NominalValue{T, T}}}){T}() = 4 (::Type{Vector{NominalValue{Int, Int}}})() = 5 - #const some_method = @which Base.include("string") // FIXME: support for serializing a direct reference to an external Method not implemented - const some_linfo = @code_typed Base.include("string") + let some_method = @which Base.include("string") + # global const some_method // FIXME: support for serializing a direct reference to an external Method not implemented + global const some_linfo = + ccall(:jl_specializations_get_linfo, Ref{MethodInstance}, (Any, Any, Any), + some_method, Tuple{typeof(Base.include), String}, Core.svec()) + end end """) @test_throws ErrorException Core.kwfunc(Base.nothing) # make sure `nothing` didn't have a kwfunc (which would invalidate the attempted test) @@ -150,8 +154,11 @@ try Val{3}, Val{nothing}}, 0:25) - - @test Foo.some_linfo === @code_typed Base.include("string") + some_method = @which Base.include("string") + some_linfo = + ccall(:jl_specializations_get_linfo, Ref{MethodInstance}, (Any, Any, Any), + some_method, Tuple{typeof(Base.include), String}, Core.svec()) + @test Foo.some_linfo::Core.MethodInstance === some_linfo end Baz_file = joinpath(dir, "Baz.jl") diff --git a/test/core.jl b/test/core.jl index 87140d30fba2c..e920d418106fd 100644 --- a/test/core.jl +++ b/test/core.jl @@ -4067,10 +4067,10 @@ function count_expr_push(ex::Expr, head::Symbol, counter) return false end -function metadata_matches(ast::LambdaInfo) +function metadata_matches(ast::CodeInfo) inbounds_cnt = Ref(0) boundscheck_cnt = Ref(0) - for ex in Base.uncompressed_ast(ast) + for ex in ast.code::Array{Any,1} if isa(ex, Expr) ex = ex::Expr count_expr_push(ex, :inbounds, inbounds_cnt) @@ -4082,7 +4082,7 @@ function metadata_matches(ast::LambdaInfo) end function test_metadata_matches(f::ANY, tt::ANY) - metadata_matches(code_typed(f, tt)[1]) + metadata_matches(code_typed(f, tt)[1][1]) end function f1() @@ -4558,3 +4558,15 @@ function f18173() successflag = false end @test f18173() == false + +let _true = Ref(true), f, g, h + @noinline f() = ccall((:time, "error_library_doesnt_exist\0"), Void, ()) # some expression that throws an error in codegen + @noinline g() = _true[] ? 0 : h() + @noinline h() = (g(); f()) + @test_throws ErrorException @code_native h() # due to a failure to compile f() + @test g() == 0 +end + +fVararg(x) = Vararg{x} +gVararg(a::fVararg(Int)) = length(a) +@test gVararg(1,2,3,4,5) == 5 diff --git a/test/inference.jl b/test/inference.jl index 7c607548b84a0..3ad7016fc04ea 100644 --- a/test/inference.jl +++ b/test/inference.jl @@ -189,7 +189,8 @@ end end let ast12474 = code_typed(f12474, Tuple{Float64}) - @test all(isleaftype, ast12474[1].slottypes) + @test isleaftype(ast12474[1][2]) + @test all(isleaftype, ast12474[1][1].slottypes) end @@ -202,7 +203,7 @@ end @eval f15259(x,y) = (a = $(Expr(:new, :A15259, :x, :y)); (a.x, a.y, getfield(a,1), getfield(a, 2))) @test isempty(filter(x -> isa(x,Expr) && x.head === :(=) && isa(x.args[2], Expr) && x.args[2].head === :new, - code_typed(f15259, (Any,Int))[1].code)) + code_typed(f15259, (Any,Int))[1][1].code)) @test f15259(1,2) == (1,2,1,2) # check that error cases are still correct @eval g15259(x,y) = (a = $(Expr(:new, :A15259, :x, :y)); a.z) diff --git a/test/inline.jl b/test/inline.jl index 3b569a56a8fe0..780897441145a 100644 --- a/test/inline.jl +++ b/test/inline.jl @@ -19,9 +19,11 @@ end Helper to test that every slot is in range after inlining. """ function test_inlined_symbols(func, argtypes) - linfo = code_typed(func, argtypes)[1] - nl = length(linfo.slottypes) - ast = Expr(:body); ast.args = Base.uncompressed_ast(linfo) + src, rettype = code_typed(func, argtypes)[1] + nl = length(src.slottypes) + ast = Expr(:body) + ast.args = src.code + ast.typ = rettype walk(ast) do e if isa(e, Slot) @test 1 <= e.id <= nl diff --git a/test/meta.jl b/test/meta.jl index dfa7c98c980fa..4f68fbbc13a73 100644 --- a/test/meta.jl +++ b/test/meta.jl @@ -62,7 +62,7 @@ asts = code_lowered(dummy, Tuple{}) ast = asts[1] body = Expr(:block) -body.args = Base.uncompressed_ast(ast) +body.args = ast.code @test popmeta!(body, :test) == (true, [42]) @test popmeta!(body, :nonexistent) == (false, []) @@ -110,7 +110,7 @@ asts = code_lowered(dummy_multi, Tuple{}) ast = asts[1] body = Expr(:block) -body.args = Base.uncompressed_ast(ast) +body.args = ast.code @test popmeta!(body, :test) == (true, [40]) @test popmeta!(body, :test) == (true, [41]) diff --git a/test/parse.jl b/test/parse.jl index 09bb1f73b121a..9e49b05984f51 100644 --- a/test/parse.jl +++ b/test/parse.jl @@ -674,10 +674,11 @@ end # Issue #16578 (Lowering) mismatch between push_loc and pop_loc module TestMeta_16578 using Base.Test -function get_expr_list(ex) - if isa(ex, LambdaInfo) - return Base.uncompressed_ast(ex) - elseif ex.head == :thunk +function get_expr_list(ex::CodeInfo) + return ex.code::Array{Any,1} +end +function get_expr_list(ex::Expr) + if ex.head == :thunk return get_expr_list(ex.args[1]) else return ex.args @@ -767,8 +768,8 @@ end end end -f1_exprs = get_expr_list(@code_typed f1(1)) -f2_exprs = get_expr_list(@code_typed f2(1)) +f1_exprs = get_expr_list(@code_typed(f1(1))[1]) +f2_exprs = get_expr_list(@code_typed(f2(1))[1]) @test Meta.isexpr(f1_exprs[end], :return) @test is_pop_loc(f2_exprs[end - 1]) diff --git a/test/reflection.jl b/test/reflection.jl index 382fab91f3555..d1a1825dcf18e 100644 --- a/test/reflection.jl +++ b/test/reflection.jl @@ -9,6 +9,7 @@ using Base.Test function test_ast_reflection(freflect, f, types) @test !isempty(freflect(f, types)) + nothing end function test_bin_reflection(freflect, f, types) @@ -16,11 +17,13 @@ function test_bin_reflection(freflect, f, types) freflect(iob, f, types) str = takebuf_string(iob) @test !isempty(str) + nothing end function test_code_reflection(freflect, f, types, tester) tester(freflect, f, types) tester(freflect, f, (types.parameters...)) + nothing end function test_code_reflections(tester, freflect) @@ -45,6 +48,7 @@ mktemp() do f, io redirect_stdout(io) @test try @code_native map(abs, rand(3)); true; catch false; end redirect_stdout(OLDSTDOUT) + nothing end end # module ReflectionTest @@ -57,7 +61,7 @@ function warntype_hastag(f, types, tag) iob = IOBuffer() code_warntype(iob, f, types) str = takebuf_string(iob) - !isempty(search(str, tag)) + return !isempty(search(str, tag)) end pos_stable(x) = x > 0 ? x : zero(x) @@ -372,10 +376,10 @@ end used_dup_var_tested15714 = false used_unique_var_tested15714 = false function test_typed_ast_printing(f::ANY, types::ANY, must_used_vars) - li = code_typed(f, types)[1] + src, rettype = code_typed(f, types)[1] dupnames = Set() slotnames = Set() - for name in li.slotnames + for name in src.slotnames if name in slotnames push!(dupnames, name) else @@ -387,17 +391,15 @@ function test_typed_ast_printing(f::ANY, types::ANY, must_used_vars) @test name in slotnames end for str in (sprint(code_warntype, f, types), - stringmime("text/plain", li)) - # Test to make sure the clearing of file path below works - @test string(li.def.file) == @__FILE__ + stringmime("text/plain", src)) for var in must_used_vars @test contains(str, string(var)) end @test !contains(str, "Any") @test !contains(str, "ANY") # Check that we are not printing the bare slot numbers - for i in 1:length(li.slotnames) - name = li.slotnames[i] + for i in 1:length(src.slotnames) + name = src.slotnames[i] if name in dupnames @test contains(str, "_$i") if name in must_used_vars @@ -411,12 +413,12 @@ function test_typed_ast_printing(f::ANY, types::ANY, must_used_vars) end end end - # Make sure printing an AST outside LambdaInfo still works. - str = sprint(show, Base.uncompressed_ast(li)) + # Make sure printing an AST outside CodeInfo still works. + str = sprint(show, src.code) # Check that we are printing the slot numbers when we don't have the context # Use the variable names that we know should be present in the optimized AST - for i in 2:length(li.slotnames) - name = li.slotnames[i] + for i in 2:length(src.slotnames) + name = src.slotnames[i] if name in must_used_vars @test contains(str, "_$i") end @@ -429,20 +431,21 @@ test_typed_ast_printing(g15714, Tuple{Vector{Float32}}, @test used_dup_var_tested15714 @test used_unique_var_tested15714 -let li = typeof(getfield).name.mt.cache.func::LambdaInfo, +let li = typeof(getfield).name.mt.cache.func::Core.MethodInstance, lrepr = string(li), mrepr = string(li.def), - lmime = stringmime("text/plain", li) + lmime = stringmime("text/plain", li), + mmime = stringmime("text/plain", li.def) - @test lrepr == "LambdaInfo template for getfield(...)" - @test mrepr == "getfield(...)" + @test lrepr == lmime == "MethodInstance for getfield(...)" + @test mrepr == mmime == "getfield(...)" end # Linfo Tracing test tracefoo(x, y) = x+y didtrace = false -tracer(x::Ptr{Void}) = (@test isa(unsafe_pointer_to_objref(x), LambdaInfo); global didtrace = true; nothing) +tracer(x::Ptr{Void}) = (@test isa(unsafe_pointer_to_objref(x), Core.MethodInstance); global didtrace = true; nothing) ccall(:jl_register_method_tracer, Void, (Ptr{Void},), cfunction(tracer, Void, (Ptr{Void},))) meth = which(tracefoo,Tuple{Any,Any}) ccall(:jl_trace_method, Void, (Any,), meth) @@ -538,8 +541,10 @@ end let a = @code_typed 1 + 1 b = @code_lowered 1 + 1 - @test isa(a, LambdaInfo) - @test isa(b, LambdaInfo) + @test isa(a, Pair{CodeInfo, DataType}) + @test isa(b, CodeInfo) + @test isa(a[1].code, Array{Any,1}) + @test isa(b.code, Array{Any,1}) function thing(a::Array, b::Real) println("thing") diff --git a/test/stacktraces.jl b/test/stacktraces.jl index 2ba19b597b3de..8f2400662f0f0 100644 --- a/test/stacktraces.jl +++ b/test/stacktraces.jl @@ -99,12 +99,17 @@ for (frame, func, inlined) in zip(trace, [g,h,f], (can_inline, can_inline, false end end -let li = expand(quote let x = 1 end end).args[1]::LambdaInfo, - sf = StackFrame(:a, :b, 3, li, false, false, 0), +let src = expand(quote let x = 1 end end).args[1]::CodeInfo, + li = ccall(:jl_new_method_instance_uninit, Ref{Core.MethodInstance}, ()), + sf + + li.inferred = src + li.specTypes = Tuple{} + sf = StackFrame(:a, :b, 3, li, false, false, 0) repr = string(sf) - @test repr == " in Toplevel LambdaInfo thunk at b:3" + @test repr == " in Toplevel MethodInstance thunk at b:3" end -let li = typeof(getfield).name.mt.cache.func::LambdaInfo, +let li = typeof(getfield).name.mt.cache.func::Core.MethodInstance, sf = StackFrame(:a, :b, 3, li, false, false, 0), repr = string(sf) @test repr == " in getfield(...) at b:3" diff --git a/test/staged.jl b/test/staged.jl index f74d3b2244673..621d866711738 100644 --- a/test/staged.jl +++ b/test/staged.jl @@ -170,7 +170,7 @@ let gf_err, tsk = @async nothing # create a Task for yield to try to run end @test_throws ErrorException gf_err() @test_throws ErrorException gf_err() - @test gf_err_ref[] == 2 + @test gf_err_ref[] == 4 end gf_err_ref[] = 0 @@ -186,7 +186,7 @@ let gf_err2 @test_throws ErrorException gf_err2(code_typed) @test_throws ErrorException gf_err2(code_llvm) @test_throws ErrorException gf_err2(code_native) - @test gf_err_ref[] == 6 + @test gf_err_ref[] == 12 @test gf_err2(code_lowered) === nothing end