muActual = -rand;
x = @(t) cos(sqrt(-muActual)*t);
maxT = 2*pi/sqrt(-muActual);
t = dlarray(linspace(0,maxT,batchSize),"CB");
xactual = dlarray(x(t),"CB");
net = [
featureInputLayer(1)
fullyConnectedLayer(100)
tanhLayer
fullyConnectedLayer(100)
tanhLayer
fullyConnectedLayer(1)];
params.net = dlnetwork(net);
params.mu = dlarray(-0.5);
numEpochs = 5000;
avgG = [];
avgSqG = [];
batchSize = 100;
lossFcn = dlaccelerate(@modelLoss);
clearCache(lossFcn);
lr = 1e-3;
for i = 1:numEpochs
[loss,grad] = dlfeval(lossFcn,t,xactual,params);
[params,avgG,avgSqG] = adamupdate(params,grad,avgG,avgSqG,i,lr);
if mod(i,1000)==0
fprintf("Epoch: %d, Predicted mu: %.3f, Actual mu: %.3f\n",i,extractdata(params.mu),muActual);
end
end
function [loss,grad] = modelLoss(t,x,params)
xpred = forward(params.net,t);
dxdt = dlgradient(sum(xpred),t,EnableHigherDerivatives=true);
d2xdt2 = dlgradient(sum(dxdt),t);
odeResidual = d2xdt2 - params.mu*xpred;
odeLoss = mean(odeResidual.^2);
dataLoss = l2loss(x,xpred);
loss = odeLoss + dataLoss;
[grad.net,grad.mu] = dlgradient(loss,params.net.Learnables,params.mu);
end