This is the place where people can discuss massiv and possibly other array libraries in Haskell.
simulatedDataCreate :: forall r m . (Mutable r Ix2 Double, PrimMonad m, MonadRandom m) =>
Double -> Double -> Int -> m (Array r Ix2 Double)
simulatedDataCreate g deltaT n = createArrayS_ (Sz2 n 2) l
where
l marr = do
write marr (Ix2 0 0) 0.01
write marr (Ix2 0 1) 0.00
Prelude.mapM_ (f marr) [0 .. n - 1]
f :: MArray (PrimState m) r Ix2 Double -> Int -> m Bool
f marr i = do
x1Prev <- read marr (Ix2 i 0)
case x1Prev of
Nothing -> error "Whatever"
Just y1Prev -> do
x2Prev <- read marr (Ix2 i 1)
case x2Prev of
Nothing -> error "Whatever"
Just y2Prev -> do
let x1New = y1Prev + y2Prev * deltaT
x2New = y2Prev - g * sin y1Prev * deltaT
eta <- sample (R.Normal 0.0 0.001)
write marr (Ix2 (i + 1) 0) (x1New + eta)
write marr (Ix2 (i + 1) 1) x2New
simulatedDataPrim :: PrimMonad m => Double -> Double -> Ix1 -> m (Array P Ix2 Double)
simulatedDataPrim g deltaT bigT =
createArrayS_ (Sz (bigT :. 2)) $ \ma -> foldlM_ (f ma) (0.01, 0.00) (0 ..: bigT)
where
f ma a@(x1Prev, x2Prev) i = do
let x1New = x1Prev + x2Prev * deltaT
x2New = x2Prev - g * sin x1Prev * deltaT
write_ ma (i :. 0) x1New
write_ ma (i :. 1) x2New
show a `trace` pure (x1New, x2New)
random-sourse
to random
for random-fu
:wink: I might have a draft PR either later on tonight or tomorrow night
rvar
on MonadPrompt
needs looking at
MonadPrompt
doesn't even have a github repo
Let me know if I can help
If you can finish up the rough edges, that will be great:
stratified resampling is where the weights are added, divided into equal pieces and a uniform is sampled on each piece, which means that points with large weights are sampled at least once and those with small weights at most once - see here for more info: https://xianblog.wordpress.com/2012/03/13/resampling-and-gpu-parallelism/
Here's a julia implementation (which itself is based off a matlab implementation)
function resample_stratified( weights )
N = length(weights)
# make N subdivisions, and chose a random position within each one
positions = (rand(N) + collect(range(0, N - 1, length = N))) / N
indexes = zeros(Int64, N)
cumulative_sum = cumsum(weights)
i, j = 1, 1
while i <= N
if positions[i] < cumulative_sum[j]
indexes[i] = j
i += 1
else
j += 1
end
end
return indexes
end
and here is my massiv translation
resample_stratified :: forall g m r . (StatefulGen g m, PrimMonad m, Manifest r Ix1 Double) =>
Array r Ix1 Double -> g -> m (Array P Int Int)
resample_stratified weights stateGen = indices stateGen
where
bigN = elemsCount weights
cumulative_sum :: m (Array P Int Double)
cumulative_sum = createArrayS_ (Sz bigN) $
(\ma -> foldM_ (f ma) 0.0 (0 ..: bigN))
where
f ma s i = do
let v = weights!i
t = s + v
_ <- write ma i t
return t
-- Make N subdivisions, and chose a random position within each one
positions :: g -> m (Array P Int Double)
positions stateGen = createArrayS_ (Sz bigN) $
\ma -> foldlM_ (f ma) 0.0 (0 ..: bigN)
where
f ma _ i = do
epsilon <- sampleFrom stateGen (Uniform 0.0 1.0)
let t = (epsilon + fromIntegral i) / (fromIntegral bigN)
_ <- write ma i t
return t
indices stateGen = do
ps <- positions stateGen
cs <- cumulative_sum
let f ma s i = do
let go j =
if (ps!i) < (cs!j)
then do
_ <- write ma i j
return j
else go (j + 1)
go s
createArrayS_ (Sz bigN) $ \ma -> foldlM_ (f ma) 0 (0 ..: bigN)
Both in Julia and massiv, it is not clear what is going on so probably I should re-write it to be as clear as possible in Haskell without massiv before trying to optimise things.
Thoughts?
This works fine
pfOneStep' :: (PrimMonad m, MonadThrow m, StatefulGen g m, MonadReader g m) =>
Array D Ix2 Double -> Array P Int Double -> m (Array DL Ix2 Double)
pfOneStep' x_pf wn = do
let bigN = elemsCount wn
is <- resample_stratified' wn
let y_pf = backpermute' (Sz (2 :. bigN)) (\(i :. j) -> i :. (is!j)) x_pf
y1Prev = y_pf !> 0
y2Prev = y_pf !> 1
y1New = y1Prev !+! y2Prev .* deltaT
y2New = y2Prev !-! M.map (\x -> g * sin x * deltaT) y1Prev
stackSlicesM (Dim 2) [y1New, y2New]
but now I want to add some gaussian noise
pfOneStep :: (PrimMonad m, MonadThrow m, StatefulGen g m, MonadReader g m) =>
Array D Ix2 Double -> Array P Int Double -> m (Array DL Ix2 Double)
pfOneStep x_pf wn = do
let bigN = elemsCount wn
is <- resample_stratified' wn
eta :: Array DS Ix1 (LA.Vector Double) <- M.sreplicateM (Sz bigN) $ sample (Normal (LA.vector [0.0, 0.0]) bigQH)
let eta1 = M.map (LA.! 0) eta
let y_pf = backpermute' (Sz (2 :. bigN)) (\(i :. j) -> i :. (is!j)) x_pf
y1Prev = y_pf !> 0
y2Prev = y_pf !> 1
y1New = y1Prev !+! y2Prev .* deltaT !+! eta1
y2New = y2Prev !-! M.map (\x -> g * sin x * deltaT) y1Prev
stackSlicesM (Dim 2) [y1New, y2New]
But I can't do this Could not deduce (Source DS Ix1 (LA.Vector Double)) arising from a use of ‘M.map’
sampleNormal :: forall g m . (PrimMonad m, StatefulGen g m, MonadReader g m) =>
Array P Ix1 Double -> Array P Ix2 Double -> m (Array P Ix1 Double)
sampleNormal mu sigma = ...
P
array to a DS
array
massiv
sampleNormal :: forall g m . (PrimMonad m, StatefulGen g m, MonadReader g m) =>
Array P Ix1 Double -> Array P Ix2 Double -> m (Array P Ix1 Double)
sampleNormal mu sigma = do
xs <- sample (Normal mu' sigma')
return $ fromList Seq $ LA.toList xs
where
mu' = LA.fromList $ toList mu
sigma' = LA.sym $ LA.fromLists $ toLists sigma
x_pfs = particles <! k
. However it is not exactly the same, as you will definitely notice. x_pfs[:, :, :, k]
is mutation of an existing array, which I haven't implemented anything like that yet. However a writing a manual loop that iterates over the desired indices and assignes values to the mutable array at those indices is of course possible.
mapM_ (\ix@(i :> j :. k) -> write ma (i :> j :> k :. l) (particles ! ix)) ((1 :> 0 :. 0) ..: (3 :> bigN :. bigT))
λ> bigN = 5
λ> bigT = 6
λ> ((1 :> 0 :. 0) ..: (3 :> bigN :. bigT))
Array D Seq (Sz (2 :> 5 :. 6))
[ [ [ 1 :> 0 :. 0, 1 :> 0 :. 1, 1 :> 0 :. 2, 1 :> 0 :. 3, 1 :> 0 :. 4, 1 :> 0 :. 5 ]
, [ 1 :> 1 :. 0, 1 :> 1 :. 1, 1 :> 1 :. 2, 1 :> 1 :. 3, 1 :> 1 :. 4, 1 :> 1 :. 5 ]
, [ 1 :> 2 :. 0, 1 :> 2 :. 1, 1 :> 2 :. 2, 1 :> 2 :. 3, 1 :> 2 :. 4, 1 :> 2 :. 5 ]
, [ 1 :> 3 :. 0, 1 :> 3 :. 1, 1 :> 3 :. 2, 1 :> 3 :. 3, 1 :> 3 :. 4, 1 :> 3 :. 5 ]
, [ 1 :> 4 :. 0, 1 :> 4 :. 1, 1 :> 4 :. 2, 1 :> 4 :. 3, 1 :> 4 :. 4, 1 :> 4 :. 5 ]
]
, [ [ 2 :> 0 :. 0, 2 :> 0 :. 1, 2 :> 0 :. 2, 2 :> 0 :. 3, 2 :> 0 :. 4, 2 :> 0 :. 5 ]
, [ 2 :> 1 :. 0, 2 :> 1 :. 1, 2 :> 1 :. 2, 2 :> 1 :. 3, 2 :> 1 :. 4, 2 :> 1 :. 5 ]
, [ 2 :> 2 :. 0, 2 :> 2 :. 1, 2 :> 2 :. 2, 2 :> 2 :. 3, 2 :> 2 :. 4, 2 :> 2 :. 5 ]
, [ 2 :> 3 :. 0, 2 :> 3 :. 1, 2 :> 3 :. 2, 2 :> 3 :. 3, 2 :> 3 :. 4, 2 :> 3 :. 5 ]
, [ 2 :> 4 :. 0, 2 :> 4 :. 1, 2 :> 4 :. 2, 2 :> 4 :. 3, 2 :> 4 :. 4, 2 :> 4 :. 5 ]
]
]
λ>
...
instead would look closer to what you had: mapM_ (\(i :> j :. k) -> write ma (i :> j :> k :. l) (particles ! (i :> j :. k))) (1 :> 0 :. 0 ... 2 :> (bigN - 1) :. (bigT - 1))
..:
for that matter