-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathANN.jl
134 lines (109 loc) · 3.58 KB
/
ANN.jl
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
immutable ANNTrainOptions
alpha::Float32
maxEpochs::Int
batchSize::Int
maxError::Float32
end
type ANN{B<:Backend}
af::ArrayFire{B}
numLayers::Int
signal::Vector{AFArray{B}}
weights::Vector{AFArray{B}}
end
function ANN{B}(af::ArrayFire{B}, layers::Vector{Int}, range = 0.05f0)
numLayers = length(layers)
signal = Vector{AFArray{B}}()
weights = Vector{AFArray{B}}()
for i in 1:numLayers
push!(signal, array(af))
if i != numLayers
w = randu(af, Float32, layers[i] + 1, layers[i + 1]) .* range .- (range / 2.0f0)
push!(weights, w)
end
end
ANN(af, numLayers, signal, weights)
end
function deriv(out)
out .* (1.0f0 .- out)
end
function addBias(ann::ANN, input)
joinArrays(1, constant(ann.af, 1.0f0, dims(input, 0)), input)
end
function calculateError(out, pred)
diff = out .- pred;
sq = diff .* diff;
sqrt(sum(Float32, sq)) / elements(sq)
end
function forwardPropagate(ann::ANN, input)
ann.signal[1][] = input
for i in 1:ann.numLayers - 1
scope!(ann.af) do this
inVec = addBias(ann, ann.signal[i])
outVec = matmul(inVec, ann.weights[i])
ann.signal[i + 1][] = sigmoid(outVec)
end
end
end
backPropagate(ann::ANN, target, alpha::Float32) = scope!(ann.af) do this
outVec = ann.signal[ann.numLayers]
err = outVec .- target
m = dims(target, 0)
for i in ann.numLayers - 1:-1:1
scope!(ann.af) do this
inVec = addBias(ann, ann.signal[i])
delta = transpose(deriv(outVec) .* err)
# Adjust weights
grad = (-alpha .* matmul(delta, inVec)) ./ m
addAssign!(ann.weights[i], transpose(grad))
# Input to current layer is output of previous
outVec = ann.signal[i]
allErr = matmulTT(delta, ann.weights[i])
# Remove the error of bias and propagate backward
err[] = allErr[:, seq(ann.af, 1, dims(outVec, 1))]
end
end
end
function predict(ann::ANN, input)
forwardPropagate(ann, input)
ann.signal[ann.numLayers][]
end
function train(ann::ANN, input, target, options::ANNTrainOptions)
af = ann.af
numSamples = dims(input, 0)
numBatches = Int(floor(numSamples / options.batchSize))
err = 0.0f0
allSec = 0.0f0
sync(af)
for i in 1:options.maxEpochs
scope!(af) do this
sec = @elapsed begin
for j in 0:numBatches - 2
scope!(af) do this
startPos = j * options.batchSize
endPos = startPos + options.batchSize - 1
x = input[seq(af, startPos, endPos), :]
y = target[seq(af, startPos, endPos), :]
forwardPropagate(ann, x)
backPropagate(ann, y, options.alpha)
end
end
# Validate with last batch
startPos = (numBatches - 1) * options.batchSize
endPos = numSamples - 1
outVec = predict(ann, input[seq(af, startPos, endPos), :])
err = calculateError(outVec, target[seq(af, startPos, endPos), :])
end
allSec += sec;
if i % 10 == 0
println("Epoch: $i, Error: $err, Duration: $allSec seconds")
allSec = 0.0f0
end
end
# Check if convergence criteria has been met
if err < options.maxError
println("Converged on Epoch: $i");
break
end
end
err
end