Skip to content

Commit 32a8eaa

Browse files
committed
benchmarks: Update for DArray changes
1 parent 8afc692 commit 32a8eaa

File tree

1 file changed

+16
-13
lines changed

1 file changed

+16
-13
lines changed

benchmarks/suites/nmf.jl

+16-13
Original file line numberDiff line numberDiff line change
@@ -55,40 +55,43 @@ function nmf_suite(ctx; method, accels)
5555
nw = length(workers())
5656
nsuite = BenchmarkGroup()
5757
while nw > 0
58-
opts = if accel == "cuda"
58+
scope = if accel == "cuda"
59+
error("Not implemented")
5960
Dagger.Sch.SchedulerOptions(;proctypes=[
6061
DaggerGPU.CuArrayDeviceProc
6162
])
6263
elseif accel == "amdgpu"
64+
error("Not implemented")
6365
Dagger.Sch.SchedulerOptions(;proctypes=[
6466
DaggerGPU.ROCArrayProc
6567
])
6668
elseif accel == "cpu"
67-
Dagger.Sch.SchedulerOptions()
69+
scope = Dagger.scope(;workers=workers()[1:nw])
6870
else
6971
error("Unknown accelerator $accel")
7072
end
7173
#bsz = ncol ÷ length(workers())
7274
bsz = ncol ÷ 64
7375
nsuite["Workers: $nw"] = @benchmarkable begin
74-
_ctx = Context($ctx, workers()[1:$nw])
75-
compute(_ctx, nnmf($X[], $W[], $H[]); options=$opts)
76+
Dagger.with_options(;scope=$scope) do
77+
fetch(nnmf($X[], $W[], $H[]))
78+
end
7679
end setup=begin
7780
_nw, _scale = $nw, $scale
7881
@info "Starting $_nw worker Dagger NNMF (scale by $_scale)"
7982
if $accel == "cuda"
8083
# FIXME: Allocate with CUDA.rand if possible
81-
$X[] = Dagger.mapchunks(CUDA.cu, compute(rand(Blocks($bsz, $bsz), Float32, $nrow, $ncol); options=$opts))
82-
$W[] = Dagger.mapchunks(CUDA.cu, compute(rand(Blocks($bsz, $bsz), Float32, $nrow, $nfeatures); options=$opts))
83-
$H[] = Dagger.mapchunks(CUDA.cu, compute(rand(Blocks($bsz, $bsz), Float32, $nfeatures, $ncol); options=$opts))
84+
$X[] = Dagger.mapchunks(CuArray, rand(Blocks($bsz, $bsz), Float32, $nrow, $ncol))
85+
$W[] = Dagger.mapchunks(CuArray, rand(Blocks($bsz, $bsz), Float32, $nrow, $nfeatures))
86+
$H[] = Dagger.mapchunks(CuArray, rand(Blocks($bsz, $bsz), Float32, $nfeatures, $ncol))
8487
elseif $accel == "amdgpu"
85-
$X[] = Dagger.mapchunks(ROCArray, compute(rand(Blocks($bsz, $bsz), Float32, $nrow, $ncol); options=$opts))
86-
$W[] = Dagger.mapchunks(ROCArray, compute(rand(Blocks($bsz, $bsz), Float32, $nrow, $nfeatures); options=$opts))
87-
$H[] = Dagger.mapchunks(ROCArray, compute(rand(Blocks($bsz, $bsz), Float32, $nfeatures, $ncol); options=$opts))
88+
$X[] = Dagger.mapchunks(ROCArray, rand(Blocks($bsz, $bsz), Float32, $nrow, $ncol))
89+
$W[] = Dagger.mapchunks(ROCArray, rand(Blocks($bsz, $bsz), Float32, $nrow, $nfeatures))
90+
$H[] = Dagger.mapchunks(ROCArray, rand(Blocks($bsz, $bsz), Float32, $nfeatures, $ncol))
8891
elseif $accel == "cpu"
89-
$X[] = compute(rand(Blocks($bsz, $bsz), Float32, $nrow, $ncol); options=$opts)
90-
$W[] = compute(rand(Blocks($bsz, $bsz), Float32, $nrow, $nfeatures); options=$opts)
91-
$H[] = compute(rand(Blocks($bsz, $bsz), Float32, $nfeatures, $ncol); options=$opts)
92+
$X[] = rand(Blocks($bsz, $bsz), Float32, $nrow, $ncol)
93+
$W[] = rand(Blocks($bsz, $bsz), Float32, $nrow, $nfeatures)
94+
$H[] = rand(Blocks($bsz, $bsz), Float32, $nfeatures, $ncol)
9295
end
9396
end teardown=begin
9497
if render != "" && !live

0 commit comments

Comments
 (0)