💣 Machine learning which might blow up in your face 💣

Spelling: decend/decent -> descend/descent

+29 -29
+2 -2
bench/bench-lstm.hs
··· 27 27 , bench "backwards-60" $ nf (nfT3 . uncurry4 (testRun60' layer60)) (rec60, input40, rec60, rec60) 28 28 , bench "backwards-512" $ nf (nfT3 . uncurry4 (testRun512' layer512)) (rec512, input40, rec512, rec512) 29 29 ] 30 - , bgroup "update" [ bench "matrix-60x60" $ nf (uncurry3 (decendVector 1 1 1)) (upIn60, upIn60, upIn60) 31 - , bench "matrix-512x512" $ nf (uncurry3 (decendVector 1 1 1)) (upIn512, upIn512, upIn512) 30 + , bgroup "update" [ bench "matrix-60x60" $ nf (uncurry3 (descendVector 1 1 1)) (upIn60, upIn60, upIn60) 31 + , bench "matrix-512x512" $ nf (uncurry3 (descendVector 1 1 1)) (upIn512, upIn512, upIn512) 32 32 ] 33 33 , bgroup "train" [ bench "one-time-step" $ whnf (nfT2 . trainRecurrent lp lstm 0) [(input40, Just input40)] 34 34 , bench "ten-time-steps" $ whnf (nfT2 . trainRecurrent lp lstm 0) $ replicate 10 (input40, Just input40)
+2 -2
cbits/gradient_decent.c cbits/gradient_descent.c
··· 1 - #include "gradient_decent.h" 1 + #include "gradient_descent.h" 2 2 3 - void decend_cpu(int len, double rate, double momentum, double regulariser, 3 + void descend_cpu(int len, double rate, double momentum, double regulariser, 4 4 const double* weights, 5 5 const double* gradient, 6 6 const double* last,
+1 -1
cbits/gradient_decent.h cbits/gradient_descent.h
··· 1 1 #include <stdio.h> 2 2 #include <stdint.h> 3 3 4 - void decend_cpu(int len, double rate, double momentum, double regulariser, 4 + void descend_cpu(int len, double rate, double momentum, double regulariser, 5 5 const double* weights, 6 6 const double* gradient, 7 7 const double* last,
+5 -5
grenade.cabal
··· 17 17 Grenade provides an API for composing layers of a neural network 18 18 into a sequence parallel graph in a type safe manner; running 19 19 networks with reverse automatic differentiation to calculate their 20 - gradients; and applying gradient decent for learning. 20 + gradients; and applying gradient descent for learning. 21 21 . 22 22 Documentation and examples are available on github 23 23 <https://github.com/HuwCampbell/grenade>. ··· 26 26 README.md 27 27 cbits/im2col.h 28 28 cbits/im2col.c 29 - cbits/gradient_decent.h 30 - cbits/gradient_decent.c 29 + cbits/gradient_descent.h 30 + cbits/gradient_descent.c 31 31 cbits/pad.h 32 32 cbits/pad.c 33 33 ··· 108 108 Grenade.Utils.OneHot 109 109 110 110 includes: cbits/im2col.h 111 - cbits/gradient_decent.h 111 + cbits/gradient_descent.h 112 112 cbits/pad.h 113 113 c-sources: cbits/im2col.c 114 - cbits/gradient_decent.c 114 + cbits/gradient_descent.c 115 115 cbits/pad.c 116 116 117 117 cc-options: -std=c99 -O3 -msse4.2 -Wall -Werror -DCABAL=1
+1 -1
src/Grenade/Core/Network.hs
··· 139 139 = (GNil, o) 140 140 141 141 142 - -- | Apply one step of stochastic gradient decent across the network. 142 + -- | Apply one step of stochastic gradient descent across the network. 143 143 applyUpdate :: LearningParameters 144 144 -> Network layers shapes 145 145 -> Gradients layers
+1 -1
src/Grenade/Layers/Convolution.hs
··· 131 131 ) => UpdateLayer (Convolution channels filters kernelRows kernelColumns strideRows strideColumns) where 132 132 type Gradient (Convolution channels filters kernelRows kernelCols strideRows strideCols) = (Convolution' channels filters kernelRows kernelCols strideRows strideCols) 133 133 runUpdate LearningParameters {..} (Convolution oldKernel oldMomentum) (Convolution' kernelGradient) = 134 - let (newKernel, newMomentum) = decendMatrix learningRate learningMomentum learningRegulariser oldKernel kernelGradient oldMomentum 134 + let (newKernel, newMomentum) = descendMatrix learningRate learningMomentum learningRegulariser oldKernel kernelGradient oldMomentum 135 135 in Convolution newKernel newMomentum 136 136 137 137 createRandom = randomConvolution
+1 -1
src/Grenade/Layers/Deconvolution.hs
··· 130 130 ) => UpdateLayer (Deconvolution channels filters kernelRows kernelColumns strideRows strideColumns) where 131 131 type Gradient (Deconvolution channels filters kernelRows kernelCols strideRows strideCols) = (Deconvolution' channels filters kernelRows kernelCols strideRows strideCols) 132 132 runUpdate LearningParameters {..} (Deconvolution oldKernel oldMomentum) (Deconvolution' kernelGradient) = 133 - let (newKernel, newMomentum) = decendMatrix learningRate learningMomentum learningRegulariser oldKernel kernelGradient oldMomentum 133 + let (newKernel, newMomentum) = descendMatrix learningRate learningMomentum learningRegulariser oldKernel kernelGradient oldMomentum 134 134 in Deconvolution newKernel newMomentum 135 135 136 136 createRandom = randomDeconvolution
+2 -2
src/Grenade/Layers/FullyConnected.hs
··· 39 39 type Gradient (FullyConnected i o) = (FullyConnected' i o) 40 40 41 41 runUpdate LearningParameters {..} (FullyConnected (FullyConnected' oldBias oldActivations) (FullyConnected' oldBiasMomentum oldMomentum)) (FullyConnected' biasGradient activationGradient) = 42 - let (newBias, newBiasMomentum) = decendVector learningRate learningMomentum learningRegulariser oldBias biasGradient oldBiasMomentum 43 - (newActivations, newMomentum) = decendMatrix learningRate learningMomentum learningRegulariser oldActivations activationGradient oldMomentum 42 + let (newBias, newBiasMomentum) = descendVector learningRate learningMomentum learningRegulariser oldBias biasGradient oldBiasMomentum 43 + (newActivations, newMomentum) = descendMatrix learningRate learningMomentum learningRegulariser oldActivations activationGradient oldMomentum 44 44 in FullyConnected (FullyConnected' newBias newActivations) (FullyConnected' newBiasMomentum newMomentum) 45 45 46 46 createRandom = randomFullyConnected
+12 -12
src/Grenade/Layers/Internal/Update.hs
··· 1 1 {-# LANGUAGE ForeignFunctionInterface #-} 2 2 module Grenade.Layers.Internal.Update ( 3 - decendMatrix 4 - , decendVector 3 + descendMatrix 4 + , descendVector 5 5 ) where 6 6 7 7 import Data.Maybe ( fromJust ) ··· 17 17 18 18 import System.IO.Unsafe ( unsafePerformIO ) 19 19 20 - decendMatrix :: (KnownNat rows, KnownNat columns) => Double -> Double -> Double -> L rows columns -> L rows columns -> L rows columns -> (L rows columns, L rows columns) 21 - decendMatrix rate momentum regulariser weights gradient lastUpdate = 20 + descendMatrix :: (KnownNat rows, KnownNat columns) => Double -> Double -> Double -> L rows columns -> L rows columns -> L rows columns -> (L rows columns, L rows columns) 21 + descendMatrix rate momentum regulariser weights gradient lastUpdate = 22 22 let (rows, cols) = size weights 23 23 len = rows * cols 24 24 -- Most gradients come in in ColumnMajor, ··· 29 29 weights' = flatten . tr . extract $ weights 30 30 gradient' = flatten . tr . extract $ gradient 31 31 lastUpdate' = flatten . tr . extract $ lastUpdate 32 - (vw, vm) = decendUnsafe len rate momentum regulariser weights' gradient' lastUpdate' 32 + (vw, vm) = descendUnsafe len rate momentum regulariser weights' gradient' lastUpdate' 33 33 34 34 -- Note that it's ColumnMajor, as we did a transpose before 35 35 -- using the internal vectors. ··· 37 37 mm = U.matrixFromVector U.ColumnMajor rows cols vm 38 38 in (fromJust . create $ mw, fromJust . create $ mm) 39 39 40 - decendVector :: (KnownNat r) => Double -> Double -> Double -> R r -> R r -> R r -> (R r, R r) 41 - decendVector rate momentum regulariser weights gradient lastUpdate = 40 + descendVector :: (KnownNat r) => Double -> Double -> Double -> R r -> R r -> R r -> (R r, R r) 41 + descendVector rate momentum regulariser weights gradient lastUpdate = 42 42 let len = size weights 43 43 weights' = extract weights 44 44 gradient' = extract gradient 45 45 lastUpdate' = extract lastUpdate 46 - (vw, vm) = decendUnsafe len rate momentum regulariser weights' gradient' lastUpdate' 46 + (vw, vm) = descendUnsafe len rate momentum regulariser weights' gradient' lastUpdate' 47 47 in (fromJust $ create vw, fromJust $ create vm) 48 48 49 - decendUnsafe :: Int -> Double -> Double -> Double -> Vector Double -> Vector Double -> Vector Double -> (Vector Double, Vector Double) 50 - decendUnsafe len rate momentum regulariser weights gradient lastUpdate = 49 + descendUnsafe :: Int -> Double -> Double -> Double -> Vector Double -> Vector Double -> Vector Double -> (Vector Double, Vector Double) 50 + descendUnsafe len rate momentum regulariser weights gradient lastUpdate = 51 51 unsafePerformIO $ do 52 52 outWPtr <- mallocForeignPtrArray len 53 53 outMPtr <- mallocForeignPtrArray len ··· 60 60 withForeignPtr lPtr $ \lPtr' -> 61 61 withForeignPtr outWPtr $ \outWPtr' -> 62 62 withForeignPtr outMPtr $ \outMPtr' -> 63 - decend_cpu len rate momentum regulariser wPtr' gPtr' lPtr' outWPtr' outMPtr' 63 + descend_cpu len rate momentum regulariser wPtr' gPtr' lPtr' outWPtr' outMPtr' 64 64 65 65 return (U.unsafeFromForeignPtr0 outWPtr len, U.unsafeFromForeignPtr0 outMPtr len) 66 66 67 67 foreign import ccall unsafe 68 - decend_cpu 68 + descend_cpu 69 69 :: Int -> Double -> Double -> Double -> Ptr Double -> Ptr Double -> Ptr Double -> Ptr Double -> Ptr Double -> IO () 70 70
+2 -2
src/Grenade/Recurrent/Layers/LSTM.hs
··· 87 87 -- Utility function for updating with the momentum, gradients, and weights. 88 88 u :: forall x ix out. (KnownNat ix, KnownNat out) => (x -> (L out ix)) -> x -> x -> x -> ((L out ix), (L out ix)) 89 89 u e (e -> weights) (e -> momentum) (e -> gradient) = 90 - decendMatrix learningRate learningMomentum learningRegulariser weights gradient momentum 90 + descendMatrix learningRate learningMomentum learningRegulariser weights gradient momentum 91 91 92 92 v :: forall x ix. (KnownNat ix) => (x -> (R ix)) -> x -> x -> x -> ((R ix), (R ix)) 93 93 v e (e -> weights) (e -> momentum) (e -> gradient) = 94 - decendVector learningRate learningMomentum learningRegulariser weights gradient momentum 94 + descendVector learningRate learningMomentum learningRegulariser weights gradient momentum 95 95 96 96 -- There's a lot of updates here, so to try and minimise the number of data copies 97 97 -- we'll create a mutable bucket for each.