Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

promote_op() fixes #16995

Merged
merged 3 commits into from
Jul 2, 2016
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 0 additions & 4 deletions base/bool.jl
Original file line number Diff line number Diff line change
@@ -71,7 +71,3 @@ fld(x::Bool, y::Bool) = div(x,y)
cld(x::Bool, y::Bool) = div(x,y)
rem(x::Bool, y::Bool) = y ? false : throw(DivideError())
mod(x::Bool, y::Bool) = rem(x,y)

promote_op(op, ::Type{Bool}, ::Type{Bool}) = typeof(op(true, true))
promote_op(::typeof(^), ::Type{Bool}, ::Type{Bool}) = Bool
promote_op{T<:Integer}(::typeof(^), ::Type{Bool}, ::Type{T}) = Bool
13 changes: 1 addition & 12 deletions base/complex.jl
Original file line number Diff line number Diff line change
@@ -26,17 +26,6 @@ promote_rule{T<:Real,S<:Real}(::Type{Complex{T}}, ::Type{S}) =
promote_rule{T<:Real,S<:Real}(::Type{Complex{T}}, ::Type{Complex{S}}) =
Complex{promote_type(T,S)}

promote_op{T<:Real,S<:Real}(op, ::Type{Complex{T}}, ::Type{Complex{S}}) =
Complex{promote_op(op,T,S)}
promote_op{T<:Real,S<:Real}(op, ::Type{Complex{T}}, ::Type{S}) =
Complex{promote_op(op,T,S)}
promote_op{T<:Real,S<:Real}(op, ::Type{T}, ::Type{Complex{S}}) =
Complex{promote_op(op,T,S)}
promote_op{T<:Integer,S<:Integer}(::typeof(^), ::Type{T}, ::Type{Complex{S}}) =
Complex{Float64}
promote_op{T<:Integer,S<:Integer}(::typeof(.^), ::Type{T}, ::Type{Complex{S}}) =
Complex{Float64}

widen{T}(::Type{Complex{T}}) = Complex{widen(T)}

real(z::Complex) = z.re
@@ -461,7 +450,7 @@ function ^{T<:AbstractFloat}(z::Complex{T}, p::Complex{T})
if p==2 #square
zr, zi = reim(z)
x = (zr-zi)*(zr+zi)
y = 2zr*zi
y = T(2)*zr*zi
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why is this necessary? It seems like this will be slower e.g. for Complex{BigFloat}

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Because of the type instability it creates for Float16:

julia> typeof(2*Float16(1.0))
Float32

Maybe better write it as T(2*zr*zi)?

(I wonder whether that promotion exception for Float16 is really a good idea...)

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, I think T(2*zr*zi) seems like a better way to do it.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

See #17259.

if isnan(x)
if isinf(y)
x = copysign(zero(T),zr)
1 change: 1 addition & 0 deletions base/gmp.jl
Original file line number Diff line number Diff line change
@@ -428,6 +428,7 @@ end
^(x::BigInt , y::Bool ) = y ? x : one(x)
^(x::BigInt , y::Integer) = bigint_pow(x, y)
^(x::Integer, y::BigInt ) = bigint_pow(BigInt(x), y)
^(x::Bool , y::BigInt ) = Base.power_by_squaring(x, y)

function powermod(x::BigInt, p::BigInt, m::BigInt)
r = BigInt()
22 changes: 11 additions & 11 deletions base/hashing2.jl
Original file line number Diff line number Diff line change
@@ -96,49 +96,49 @@ Special values:
decompose(x::Integer) = x, 0, 1
decompose(x::Rational) = num(x), 0, den(x)

function decompose(x::Float16)
function decompose(x::Float16)::NTuple{3,Int}
isnan(x) && return 0, 0, 0
isinf(x) && return ifelse(x < 0, -1, 1), 0, 0
n = reinterpret(UInt16, x)
s = (n & 0x03ff) % Int16
e = (n & 0x7c00 >> 10) % Int
s |= Int16(e != 0) << 10
d = ifelse(signbit(x), -1, 1)
Int(s), Int(e - 25 + (e == 0)), d
s, e - 25 + (e == 0), d
end

function decompose(x::Float32)
function decompose(x::Float32)::NTuple{3,Int}
isnan(x) && return 0, 0, 0
isinf(x) && return ifelse(x < 0, -1, 1), 0, 0
n = reinterpret(UInt32, x)
s = (n & 0x007fffff) % Int32
e = (n & 0x7f800000 >> 23) % Int
s |= Int32(e != 0) << 23
d = ifelse(signbit(x), -1, 1)
Int(s), Int(e - 150 + (e == 0)), d
s, e - 150 + (e == 0), d
end

function decompose(x::Float64)
function decompose(x::Float64)::Tuple{Int64, Int, Int}
isnan(x) && return 0, 0, 0
isinf(x) && return ifelse(x < 0, -1, 1), 0, 0
n = reinterpret(UInt64, x)
s = (n & 0x000fffffffffffff) % Int64
e = (n & 0x7ff0000000000000 >> 52) % Int
s |= Int64(e != 0) << 52
d = ifelse(signbit(x), -1, 1)
s, Int(e - 1075 + (e == 0)), d
s, e - 1075 + (e == 0), d
end

function decompose(x::BigFloat)
isnan(x) && return big(0), 0, 0
isinf(x) && return big(x.sign), 0, 0
x == 0 && return big(0), 0, Int(x.sign)
function decompose(x::BigFloat)::Tuple{BigInt, Int, Int}
isnan(x) && return 0, 0, 0
isinf(x) && return x.sign, 0, 0
x == 0 && return 0, 0, x.sign
s = BigInt()
s.size = cld(x.prec, 8*sizeof(GMP.Limb)) # limbs
b = s.size * sizeof(GMP.Limb) # bytes
ccall((:__gmpz_realloc2, :libgmp), Void, (Ptr{BigInt}, Culong), &s, 8b) # bits
ccall(:memcpy, Ptr{Void}, (Ptr{Void}, Ptr{Void}, Csize_t), s.d, x.d, b) # bytes
s, Int(x.exp - 8b), Int(x.sign)
s, x.exp - 8b, x.sign
end

## streamlined hashing for smallish rational types ##
2 changes: 0 additions & 2 deletions base/int.jl
Original file line number Diff line number Diff line change
@@ -305,8 +305,6 @@ promote_rule{T<:BitSigned64}(::Type{UInt64}, ::Type{T}) = UInt64
promote_rule{T<:Union{UInt32, UInt64}}(::Type{T}, ::Type{Int128}) = Int128
promote_rule{T<:BitSigned}(::Type{UInt128}, ::Type{T}) = UInt128

promote_op{R<:Integer,S<:Integer}(op, ::Type{R}, ::Type{S}) = typeof(op(one(R), one(S)))

## traits ##

typemin(::Type{Int8 }) = Int8(-128)
2 changes: 1 addition & 1 deletion base/intfuncs.jl
Original file line number Diff line number Diff line change
@@ -186,7 +186,7 @@ ndigits0z(x::Integer) = ndigits0z(unsigned(abs(x)))

const ndigits_max_mul = Core.sizeof(Int) == 4 ? 69000000 : 290000000000000000

function ndigits0znb(n::Int, b::Int)
function ndigits0znb(n::Signed, b::Int)
d = 0
while n != 0
n = cld(n,b)
7 changes: 7 additions & 0 deletions base/irrationals.jl
Original file line number Diff line number Diff line change
@@ -10,6 +10,13 @@ promote_rule{s}(::Type{Irrational{s}}, ::Type{Float32}) = Float32
promote_rule{s,t}(::Type{Irrational{s}}, ::Type{Irrational{t}}) = Float64
promote_rule{s,T<:Number}(::Type{Irrational{s}}, ::Type{T}) = promote_type(Float64,T)

promote_op{S<:Irrational,T<:Irrational}(op::Any, ::Type{S}, ::Type{T}) =
promote_op(op, Float64, Float64)
promote_op{S<:Irrational,T<:Number}(op::Any, ::Type{S}, ::Type{T}) =
promote_op(op, Float64, T)
promote_op{S<:Irrational,T<:Number}(op::Any, ::Type{T}, ::Type{S}) =
promote_op(op, T, Float64)

convert(::Type{AbstractFloat}, x::Irrational) = Float64(x)
convert(::Type{Float16}, x::Irrational) = Float16(Float32(x))
convert{T<:Real}(::Type{Complex{T}}, x::Irrational) = convert(Complex{T}, convert(T,x))
12 changes: 12 additions & 0 deletions base/number.jl
Original file line number Diff line number Diff line change
@@ -63,4 +63,16 @@ zero{T<:Number}(::Type{T}) = convert(T,0)
one(x::Number) = oftype(x,1)
one{T<:Number}(::Type{T}) = convert(T,1)

promote_op{R,S<:Number}(::Type{R}, ::Type{S}) = (@_pure_meta; R) # to fix ambiguities
function promote_op{T<:Number}(op, ::Type{T})
S = typeof(op(one(T)))
# preserve the most general (abstract) type when possible
return isleaftype(T) ? S : typejoin(S, T)
end
function promote_op{R<:Number,S<:Number}(op, ::Type{R}, ::Type{S})
Copy link
Contributor

@pabloferz pabloferz Jun 30, 2016

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Maybe we can write

x = R <: Irrational ? 1.0 : one(R)
y = S <: Irrational ? 1.0 : one(S)
T = typeof(op(x, y))

and remove the ones for Irrational above? (and restore the comparison ones)

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah, I thought about that possibility too. In general though, I think it's considered more Julian to dispatch on types.

That solution would be quite appealing to avoid ambiguities with the methods for comparison operators, but we still need to take care of ambiguities with the Nullable method. So I'd rather get rid of the comparison operators method altogether. That fits with @JeffBezanson's request. (But currently it fails the tests, not sure why yet.)

Copy link
Contributor

@pabloferz pabloferz Jun 30, 2016

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I now dispatching in types is more Julian. The problem here, if I understand Jeff right, is that any method that ignores op, that is promote_op(op, ...) should preferably return Any, and that we should have as fewer such methods as possible (but also the as fewer as possible of the ones that take op into account). But maybe we should still follow the Julian way everywhere.

The methods that have Number types as arguments (other than op) are a special case since we use them in other places besides broadcast. The methods with Nullable types are probably also important on its own.

The comparison methods are failing because there is no isless for Complex and other numerical types. I'm not sure why the comparison methods can't be inferred with this definition, that's why I had the comparison ones.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm not sure why the comparison methods can't be inferred with this definition, that's why I had the comparison ones.

I suspect there's a type-instability somewhere. These tests already caught one in a corner case.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Found a minimum reproducer: Base.Test.@inferred <(one(Rational{Int32}), one(Float64)) (only happens on 32-bit)

T = typeof(op(one(R), one(S)))
# preserve the most general (abstract) type when possible
return isleaftype(R) && isleaftype(S) ? T : typejoin(R, S, T)
end

factorial(x::Number) = gamma(x + 1) # fallback for x not Integer
1 change: 1 addition & 0 deletions base/pkg/resolve/fieldvalue.jl
Original file line number Diff line number Diff line change
@@ -42,6 +42,7 @@ Base.typemin(::Type{FieldValue}) = (x=typemin(Int); y=typemin(VersionWeight); Fi

Base.:-(a::FieldValue, b::FieldValue) = FieldValue(a.l0-b.l0, a.l1-b.l1, a.l2-b.l2, a.l3-b.l3, a.l4-b.l4)
Base.:+(a::FieldValue, b::FieldValue) = FieldValue(a.l0+b.l0, a.l1+b.l1, a.l2+b.l2, a.l3+b.l3, a.l4+b.l4)
Base.promote_op(::Union{typeof(+), typeof(-)}, ::Type{FieldValue}, ::Type{FieldValue}) = FieldValue

function Base.isless(a::FieldValue, b::FieldValue)
a.l0 < b.l0 && return true
4 changes: 1 addition & 3 deletions base/promotion.jl
Original file line number Diff line number Diff line change
@@ -222,10 +222,8 @@ minmax(x::Real, y::Real) = minmax(promote(x, y)...)
# for the multiplication of two types,
# promote_op{R<:MyType,S<:MyType}(::typeof(*), ::Type{R}, ::Type{S}) = MyType{multype(R,S)}
promote_op(::Any) = (@_pure_meta; Bottom)
promote_op(::Any, T) = (@_pure_meta; T)
promote_op(::Any, ::Any, ::Any...) = (@_pure_meta; Any)
promote_op{T}(::Type{T}, ::Any) = (@_pure_meta; T)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I believe this is still useful for doing things like Float64.([1,2,3])

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Seems to work without it. :-)

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

A potential issue to be aware of here is that e.g. Real(1+0im) returns an Int, so we don't necessarily want to say the result is Real.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why not? Sounds like a case where you might want to store non-Int values in the resulting array:

julia> x = Real.([1+0im])
1-element Array{Real,1}:
 1

julia> x[1] = 1.5
1.5

Copy link
Contributor

@pabloferz pabloferz Jun 30, 2016

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

promote_op{T}(::Type{T}, ::Any) = (@_pure_meta; T) was added in #16179. Without it, things like Float32.([1,2,3]) will return Any and that will fallback to the new slow broadcast algorithm. I think it it's convenient to keep it. Of course, I'll leave it for you to consider it.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm fine with adding it back, but then where should we place the limit? It's annoying that the fallback is slow in any case.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I just noticed that you have promote_op{R,S<:Number}(::Type{R}, ::Type{S}) = (@_pure_meta; R) above so that will get picked instead if you remove this. So I guess there is no problem leaving this out.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The fallback is not necessarily that slow --- in cases we can infer, there is no significant slowdown at all. However there is a chance we will lose some SIMD optimizations (though I haven't analyzed it in detail), so we should keep using the old approach for +, -, *, etc.

I'm also ok with adding back this definition.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I've added it back, that's an important special case and it doesn't create any issues.

promote_op{R,S}(::Any, ::Type{R}, ::Type{S}) = (@_pure_meta; promote_type(R, S))
promote_op(op, T, S, U, V...) = (@_pure_meta; promote_op(op, T, promote_op(op, S, U, V...)))

## catch-alls to prevent infinite recursion when definitions are missing ##
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I've removed this method when squashing since it doesn't make sense to use the output type as an input type now that the fallback to promote_type has been removed. I've replaced it with promote_op(::Any, ::Any, ::Any...) above.

Copy link
Contributor

@pabloferz pabloferz Jul 2, 2016

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Makes sense 👍. Isn't this just missing promote_op(::Any, ::Any) = Any? NVM I didn't see the splatting above.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

No, because ::Any... can also match no argument. This is an annoying detail with current varargs.


7 changes: 2 additions & 5 deletions test/arrayops.jl
Original file line number Diff line number Diff line change
@@ -1408,7 +1408,7 @@ b = rand(6,7)
# return type declarations (promote_op)
module RetTypeDecl
using Base.Test
import Base: +, *, .*, zero
import Base: +, *, .*, convert

immutable MeterUnits{T,P} <: Number
val::T
@@ -1422,11 +1422,8 @@ module RetTypeDecl
(*){T,pow}(x::Int, y::MeterUnits{T,pow}) = MeterUnits{typeof(x*one(T)),pow}(x*y.val)
(*){T}(x::MeterUnits{T,1}, y::MeterUnits{T,1}) = MeterUnits{T,2}(x.val*y.val)
(.*){T}(x::MeterUnits{T,1}, y::MeterUnits{T,1}) = MeterUnits{T,2}(x.val*y.val)
zero{T,pow}(x::MeterUnits{T,pow}) = MeterUnits{T,pow}(zero(T))

Base.promote_op{R,S}(::typeof(+), ::Type{MeterUnits{R,1}}, ::Type{MeterUnits{S,1}}) = MeterUnits{promote_type(R,S),1}
convert{T,pow}(::Type{MeterUnits{T,pow}}, y::Real) = MeterUnits{T,pow}(convert(T,y))
Base.promote_op{R,S}(::typeof(*), ::Type{MeterUnits{R,1}}, ::Type{MeterUnits{S,1}}) = MeterUnits{promote_type(R,S),2}
Base.promote_op{R,S}(::typeof(.*), ::Type{MeterUnits{R,1}}, ::Type{MeterUnits{S,1}}) = MeterUnits{promote_type(R,S),2}

@test @inferred(m+[m,m]) == [m+m,m+m]
@test @inferred([m,m]+m) == [m+m,m+m]
15 changes: 14 additions & 1 deletion test/broadcast.jl
Original file line number Diff line number Diff line change
@@ -165,7 +165,7 @@ m = [1:2;]'
@test @inferred([0,1.2].+reshape([0,-2],1,1,2)) == reshape([0 -2; 1.2 -0.8],2,1,2)
rt = Base.return_types(.+, Tuple{Array{Float64, 3}, Array{Int, 1}})
@test length(rt) == 1 && rt[1] == Array{Float64, 3}
rt = Base.return_types(broadcast, Tuple{Function, Array{Float64, 3}, Array{Int, 1}})
rt = Base.return_types(broadcast, Tuple{typeof(.+), Array{Float64, 3}, Array{Int, 3}})
@test length(rt) == 1 && rt[1] == Array{Float64, 3}
rt = Base.return_types(broadcast!, Tuple{Function, Array{Float64, 3}, Array{Float64, 3}, Array{Int, 1}})
@test length(rt) == 1 && rt[1] == Array{Float64, 3}
@@ -200,3 +200,16 @@ end
# issue #4883
@test isa(broadcast(tuple, [1 2 3], ["a", "b", "c"]), Matrix{Tuple{Int,String}})
@test isa(broadcast((x,y)->(x==1?1.0:x,y), [1 2 3], ["a", "b", "c"]), Matrix{Tuple{Real,String}})
let a = length.(["foo", "bar"])
@test isa(a, Vector{Int})
@test a == [3, 3]
end
let a = sin.([1, 2])
@test isa(a, Vector{Float64})
@test a ≈ [0.8414709848078965, 0.9092974268256817]
end

# PR 16988
@test Base.promote_op(+, Bool) === Int
@test isa(broadcast(+, true), Array{Int,0})
@test Base.promote_op(Float64, Bool) === Float64
3 changes: 3 additions & 0 deletions test/linalg/dense.jl
Original file line number Diff line number Diff line change
@@ -361,6 +361,9 @@ let
@test S*T == [z z; 0 0]
end

# similar issue for Array{Real}
@test Real[1 2] * Real[1.5; 2.0] == [5.5]

# Matrix exponential
for elty in (Float32, Float64, Complex64, Complex128)
A1 = convert(Matrix{elty}, [4 2 0; 1 4 1; 1 1 4])
50 changes: 50 additions & 0 deletions test/numbers.jl
Original file line number Diff line number Diff line change
@@ -2762,3 +2762,53 @@ testmi(typemax(UInt32)-UInt32(1000):typemax(UInt32), map(UInt32, 1:100))

# issue #16282
@test_throws MethodError 3 // 4.5im

# PR #16995
let types = (Base.BitInteger_types..., BigInt, Bool,
Rational{Int}, Rational{BigInt},
Float16, Float32, Float64, BigFloat,
Complex{Int}, Complex{UInt}, Complex32, Complex64, Complex128)
for S in types
for op in (+, -)
T = @inferred Base.promote_op(op, S)
t = @inferred op(one(S))
@test T === typeof(t)
end
end

@test @inferred(Base.promote_op(!, Bool)) === Bool

for R in types, S in types
for op in (+, -, *, /, ^)
T = @inferred Base.promote_op(op, R, S)
t = @inferred op(one(R), one(S))
@test T === typeof(t)
end
end
end

let types = (Base.BitInteger_types..., BigInt, Bool,
Rational{Int}, Rational{BigInt},
Float16, Float32, Float64, BigFloat)
for S in types, T in types
for op in (<, >, <=, >=, (==))
@test @inferred(Base.promote_op(op, S, T)) === Bool
end
end
end

let types = (Base.BitInteger_types..., BigInt, Bool)
for S in types
T = @inferred Base.promote_op(~, S)
t = @inferred ~one(S)
@test T === typeof(t)
end

for S in types, T in types
for op in (&, |, <<, >>, (>>>), %, ÷)
T = @inferred Base.promote_op(op, S, T)
t = @inferred op(one(S), one(T))
@test T === typeof(t)
end
end
end