Skip to content

Commit e286a84

Browse files
committed
added functions for easily comparing Dual vs. GradientNumber performance and codegen
1 parent 788c210 commit e286a84

File tree

2 files changed

+53
-11
lines changed

2 files changed

+53
-11
lines changed

benchmarks/benchmarks.jl

+10-11
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,15 @@ using JLD
55
##################
66
# Test functions #
77
##################
8-
sqr(i) = i*i
8+
function rosenbrock(x)
9+
a = one(eltype(x))
10+
b = 100 * a
11+
result = zero(eltype(x))
12+
for i in 1:length(x)-1
13+
result += (a - x[i])^2 + b*(x[i+1] - x[i]^2)^2
14+
end
15+
return result
16+
end
917

1018
function ackley(x)
1119
a, b, c = 20.0, -0.2, 2.0*π
@@ -14,21 +22,12 @@ function ackley(x)
1422
sum_cos = sum_sqrs
1523
for i in x
1624
sum_cos += cos(c*i)
17-
sum_sqrs += sqr(i)
25+
sum_sqrs += i^2
1826
end
1927
return (-a * exp(b * sqrt(len_recip*sum_sqrs)) -
2028
exp(len_recip*sum_cos) + a + e)
2129
end
2230

23-
function rosenbrock(x)
24-
a, b = 100.0, 1.0
25-
result = zero(eltype(x))
26-
for i in 1:length(x)-1
27-
result += sqr(b - x[i]) + a*sqr(x[i+1] - sqr(x[i]))
28-
end
29-
return result
30-
end
31-
3231
self_weighted_logit(x) = inv(1.0 + exp(-dot(x, x)))
3332

3433
#############################

benchmarks/gradnums_vs_duals.jl

+43
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,43 @@
1+
using ForwardDiff.GradientNumber
2+
using DualNumbers
3+
4+
# cmp_codegen(f) should report fairly similar code for
5+
# Duals and GradientNumbers, possibly with a few extra
6+
# allocations due to the Partials layer
7+
#
8+
# cmp_times(f) should report similar times for Duals
9+
# and GradientNumbers (within a few percent difference)
10+
11+
function cmp_codegen(f)
12+
x = rand(3);
13+
dx = map(dual, x);
14+
ndx = map(i->GradientNumber(i, zero(i)), x);
15+
16+
info("@code_llvm $(f)(::Vector{Dual{Float64}}):")
17+
@code_llvm f(dx)
18+
19+
println()
20+
21+
info("@code_llvm $(f)(::Vector{GradientNumber{1,Float64,Tuple{Float64}}}):")
22+
@code_llvm f(ndx)
23+
end
24+
25+
function cmp_times(f, xlen, repeat=5)
26+
x = rand(xlen);
27+
dx = map(dual, x);
28+
ndx = map(i->GradientNumber(i, zero(i)), x);
29+
30+
min_time_dual = Inf
31+
min_time_gradnum = Inf
32+
33+
for i in 1:(repeat+1) # +1 for warm-up
34+
gc()
35+
this_time_dual = @elapsed f(dx)
36+
this_time_gradnum = @elapsed f(ndx)
37+
min_time_dual = min(this_time_dual, min_time_dual)
38+
min_time_gradnum = min(this_time_gradnum, min_time_gradnum)
39+
end
40+
41+
println("Time for $(f)(::Vector{Dual{Float64}}): $min_time_dual seconds")
42+
println("Time for $(f)(::Vector{GradientNumber{1,Float64,Tuple{Float64}}}): $min_time_gradnum seconds")
43+
end

0 commit comments

Comments
 (0)