Greetings:
I was loosely adapting Fortran code into Julia, which solves the standard cake eating problem using spline interpolation and numerical maximization. Whereas the original (compiled) Fortran code finishes in second, the Julia code takes many minutes.
As such, it seems I am inadvertently introducing something very inefficient. The example is based on the textbook exercises in https://www.ce-fortran.com by Hans Fehr and Fabian Kindermann, specifically Chapter 8 Exercise 5. To be clear, there are differences in the numerical maximization step. However, the parameterization, size of grid, and convergence criterion should be identical.
I attach the Julia code below.
using PyPlot
using Plots
using BenchmarkTools
using LaTeXStrings
using Parameters, QuantEcon
using Dierckx, ArgParse
using LinearAlgebra, Optim, Interpolations
using Printf
using StatsBase
using Optim: converged, maximum, maximizer, minimizer, iterations # some extra functions
#import Base.Threads.@threads
using ProfileView
#Threads.nthreads()
@with_kw struct Para
# model parameters
γ::Float64 = 0.5
egam::Float64 = 1-1/γ
β::Float64 = 0.95
a0::Float64 = 100
# numerical parameters
σ::Float64 = 1e-6
itermax::Int64 = 2000
T::Int64 = 200
NA::Int64 = 1000
a = range(0., 100., length=NA)
end
function TV(a_prime, ia, V, para)
# iteration on value function
@unpack egam, β, a = para
c = max(a[ia]-a_prime, 1e-10)
# interpolating function for value function
V_temp = CubicSplineInterpolation(a, (egam*V).^(1.0/egam))(a_prime)
V_fun = max(V_temp, 1e-10)^(egam)/egam
# calculate negative of RHS
val = -(c^egam/egam + β*V_fun)
return val
end
function f(ia, V, para)
# numerical maximization
@unpack a = para
result = optimize(x ->TV(x, ia, V, para), 0.0, a[ia], Brent())
return -result.minimum, result.minimizer
end
function update(a, V, para)
c = similar(V)
V_new = similar(V)
@inbounds for ia in eachindex(a)
#a_prime = a[ia] - c[ia]
#result = optimize(x ->TV(x, ia, V, para), 0.0, a[ia], Brent())
#result = optimize(f, [a_prime], LBFGS())
# optimal consumption and value functions
#V_new[ia] = -minimum(result)
V_new[ia], a_prime = f(ia, V, para)
c[ia] = a[ia] - a_prime
end
return c, V_new
end
function value_function_iter(para)
@unpack γ, egam, β, a0, σ, itermax, T, NA, a = para
#initial consumption guess
#c = collect(a)./2
c = collect(a./2)
# initial value function based on consumption guess
V = c.^egam/egam./(1-β)
#V = zero(c)
conv_level = 1.0
iter = 1
while (iter < itermax) && (conv_level > σ)
#f(ia) = optimize(x ->TV(x, ia, V, para), 0.0, a[ia], Brent())
# calculate optimal decision for every grid point
#Threads.@threads for ia in eachindex(a)
c, V_new = update(a, V, para)
# get convergence level
crit = abs.(V_new-V)./max.(abs.(V), 1e-10)
crit = filter(!isnan, crit)
conv_level = maximum(crit)
println("Iteration=$(iter); convergence level=$(conv_level)")
# update value function and iteration count
V = V_new
iter += 1
end
if conv_level > σ
println("Failed to converge")
end
return c, V
end
function main()
para = Para(NA=1000)
c, V = value_function_iter(para)
end
c, V = main()
# smaller sample for profiling
para = Para(NA=100)
value_function_iter($para)
ProfileView.@profview value_function_iter(para)
#@btime TV($a_prime, $ia, $V, $para)
function simulate_consumption(c; a0=100, T=200)
# Policy function
c_pol(x) = Spline1D(a, c)(x)
a_t = zeros(T)
c_t = zeros(T)
a_t[1] = a0
c_t[1] = c_pol(a0)
for i in 2:T
a_t[i] = a_t[i-1] - c_t[i-1]
c_t[i] = c_pol(a_t[i])
end
return a_t, c_t
end
a_t, c_t = simulate_consumption(c)
t = range(1, T, length=T)
fig, ax = subplots(1, 1, figsize=(10, 5))
ax.plot(t, c_t, label="c")
ax.set_title("Consumption path")
ax.legend()
display(fig)
PyPlot.savefig("consumption_path")
fig, ax = subplots(nrows=1, ncols=2, figsize=(20, 5))
ax[1].plot(a, c, label="policy function c(a)")
ax[2].plot(a[2:end], V[2:end], label="value function V(a)")
ax[1].legend()
ax[2].legend()
tight_layout()
display(fig)