Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions src/StructuralEquationModels.jl
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ using LinearAlgebra,
Optim,
NLSolversBase,
Statistics,
StatsBase,
SparseArrays,
Symbolics,
NLopt,
Expand All @@ -18,6 +19,8 @@ using LinearAlgebra,
import DataFrames: DataFrame
export StenoGraphs, @StenoGraph, meld

const SEM = StructuralEquationModels

# type hierarchy
include("types.jl")
include("objective_gradient_hessian.jl")
Expand Down
9 changes: 3 additions & 6 deletions src/additional_functions/helper.jl
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ function neumann_series(mat::SparseMatrixCSC)
return inverse
end

#=
#=
function make_onelement_array(A)
isa(A, Array) ? nothing : (A = [A])
return A
Expand Down Expand Up @@ -108,11 +108,8 @@ function sparse_outer_mul!(C, A, B::Vector, ind) #computes A*S*B -> C, where ind
end

function cov_and_mean(rows; corrected = false)
data = transpose(reduce(hcat, rows))
size(rows, 1) > 1 ? obs_cov = Statistics.cov(data; corrected = corrected) :
obs_cov = reshape([0.0], 1, 1)
obs_mean = vec(Statistics.mean(data, dims = 1))
return obs_cov, obs_mean
obs_mean, obs_cov = StatsBase.mean_and_cov(reduce(hcat, rows), 2, corrected = corrected)
return obs_cov, vec(obs_mean)
end

function duplication_matrix(nobs)
Expand Down
46 changes: 32 additions & 14 deletions src/additional_functions/parameters.jl
Original file line number Diff line number Diff line change
@@ -1,5 +1,14 @@
function fill_A_S_M(A, S, M, A_indices, S_indices, M_indices, parameters)
for (iA, iS, par) in zip(A_indices, S_indices, parameters)
# fill A, S, and M matrices with the parameter values according to the parameters map
function fill_A_S_M!(
A::AbstractMatrix,
S::AbstractMatrix,
M::Union{AbstractVector, Nothing},
A_indices::AbstractArrayParamsMap,
S_indices::AbstractArrayParamsMap,
M_indices::Union{AbstractArrayParamsMap, Nothing},
parameters::AbstractVector,
)
@inbounds for (iA, iS, par) in zip(A_indices, S_indices, parameters)
for index_A in iA
A[index_A] = par
end
Expand All @@ -10,22 +19,28 @@ function fill_A_S_M(A, S, M, A_indices, S_indices, M_indices, parameters)
end

if !isnothing(M)
for (iM, par) in zip(M_indices, parameters)
@inbounds for (iM, par) in zip(M_indices, parameters)
for index_M in iM
M[index_M] = par
end
end
end
end

function get_parameter_indices(parameters, M; linear = true, kwargs...)
M_indices = [findall(x -> (x == par), M) for par in parameters]

if linear
M_indices = cartesian2linear.(M_indices, [M])
# build the map from the index of the parameter to the linear indices
# of this parameter occurences in M
# returns ArrayParamsMap object
function array_parameters_map(parameters::AbstractVector, M::AbstractArray)
params_index = Dict(param => i for (i, param) in enumerate(parameters))
T = Base.eltype(eachindex(M))
res = [Vector{T}() for _ in eachindex(parameters)]
for (i, val) in enumerate(M)
par_ind = get(params_index, val, nothing)
if !isnothing(par_ind)
push!(res[par_ind], i)
end
end

return M_indices
return res
end

function eachindex_lower(M; linear_indices = false, kwargs...)
Expand All @@ -49,9 +64,6 @@ function linear2cartesian(ind_lin, dims)
return ind_cart
end

cartesian2linear(ind_cart, A::AbstractArray) = cartesian2linear(ind_cart, size(A))
linear2cartesian(ind_linear, A::AbstractArray) = linear2cartesian(ind_linear, size(A))

function set_constants!(M, M_pre)
for index in eachindex(M)
δ = tryparse(Float64, string(M[index]))
Expand Down Expand Up @@ -85,12 +97,18 @@ function get_matrix_derivative(M_indices, parameters, n_long)
return ∇M
end

function fill_matrix(M, M_indices, parameters)
# fill M with parameters
function fill_matrix!(
M::AbstractMatrix,
M_indices::AbstractArrayParamsMap,
parameters::AbstractVector,
)
for (iM, par) in zip(M_indices, parameters)
for index_M in iM
M[index_M] = par
end
end
return M
end

function get_partition(A_indices, S_indices)
Expand Down
6 changes: 3 additions & 3 deletions src/additional_functions/start_val/start_fabin3.jl
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
"""
start_fabin3(model)

Return a vector of FABIN 3 starting values (see Hägglund 1982).
Not available for ensemble models.
"""
Expand Down Expand Up @@ -58,8 +58,8 @@ function start_fabin3(ram_matrices::RAMMatrices, Σ, μ)
if !isnothing(M)
in_M = length.(M_ind) .!= 0
in_any = in_A .| in_S .| in_M
else
in_any = in_A .| in_S
else
in_any = in_A .| in_S
end

if !all(in_any)
Expand Down
2 changes: 1 addition & 1 deletion src/diff/Empty.jl
Original file line number Diff line number Diff line change
Expand Up @@ -34,5 +34,5 @@ update_observed(optimizer::SemOptimizerEmpty, observed::SemObserved; kwargs...)
############################################################################################

function Base.show(io::IO, struct_inst::SemOptimizerEmpty)
StructuralEquationModels.print_type_name(io, struct_inst)
print_type_name(io, struct_inst)
end
2 changes: 1 addition & 1 deletion src/frontend/fit/fitmeasures/n_obs.jl
Original file line number Diff line number Diff line change
Expand Up @@ -13,4 +13,4 @@ n_obs(sem_fit::SemFit) = n_obs(sem_fit.model)

n_obs(model::AbstractSemSingle) = n_obs(model.observed)

n_obs(model::SemEnsemble) = sum(n_obs.(model.sems))
n_obs(model::SemEnsemble) = sum(n_obs, model.sems)
8 changes: 4 additions & 4 deletions src/frontend/fit/standard_errors/hessian.jl
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,9 @@
Return hessian based standard errors.

# Arguments
- `hessian`: how to compute the hessian. Options are
- `hessian`: how to compute the hessian. Options are
- `:analytic`: (only if an analytic hessian for the model can be computed)
- `:finitediff`: for finite difference approximation
- `:finitediff`: for finite difference approximation
"""
function se_hessian(sem_fit::SemFit; hessian = :finitediff)
c = H_scaling(sem_fit.model)
Expand All @@ -17,7 +17,7 @@ function se_hessian(sem_fit::SemFit; hessian = :finitediff)
hessian!(H, sem_fit.model, sem_fit.solution)
elseif hessian == :finitediff
H = FiniteDiff.finite_difference_hessian(
x -> objective!(sem_fit.model, x),
Base.Fix1(objective!, sem_fit.model),
sem_fit.solution,
)
elseif hessian == :optimizer
Expand All @@ -33,7 +33,7 @@ function se_hessian(sem_fit::SemFit; hessian = :finitediff)
),
)
else
throw(ArgumentError("I dont know how to compute `$hessian` standard-errors"))
throw(ArgumentError("I don't know how to compute `$hessian` standard-errors"))
end

invH = c * inv(H)
Expand Down
2 changes: 1 addition & 1 deletion src/frontend/specification/EnsembleParameterTable.jl
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ function Dict(partable::EnsembleParameterTable)
end

#= function DataFrame(
partable::ParameterTable;
partable::ParameterTable;
columns = nothing)
if isnothing(columns) columns = keys(partable.columns) end
out = DataFrame([key => partable.columns[key] for key in columns])
Expand Down
12 changes: 6 additions & 6 deletions src/frontend/specification/ParameterTable.jl
Original file line number Diff line number Diff line change
Expand Up @@ -224,9 +224,9 @@ update_partable!(partable::AbstractParameterTable, sem_fit::SemFit, vec, column)
# update estimates -------------------------------------------------------------------------
"""
update_estimate!(
partable::AbstractParameterTable,
partable::AbstractParameterTable,
sem_fit::SemFit)

Write parameter estimates from `sem_fit` to the `:estimate` column of `partable`
"""
update_estimate!(partable::AbstractParameterTable, sem_fit::SemFit) =
Expand All @@ -236,7 +236,7 @@ update_estimate!(partable::AbstractParameterTable, sem_fit::SemFit) =
"""
update_start!(partable::AbstractParameterTable, sem_fit::SemFit)
update_start!(partable::AbstractParameterTable, model::AbstractSem, start_val; kwargs...)

Write starting values from `sem_fit` or `start_val` to the `:estimate` column of `partable`.

# Arguments
Expand All @@ -262,10 +262,10 @@ end
# update partable standard errors ----------------------------------------------------------
"""
update_se_hessian!(
partable::AbstractParameterTable,
sem_fit::SemFit;
partable::AbstractParameterTable,
sem_fit::SemFit;
hessian = :finitediff)

Write hessian standard errors computed for `sem_fit` to the `:se` column of `partable`

# Arguments
Expand Down
34 changes: 19 additions & 15 deletions src/frontend/specification/RAMMatrices.jl
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,15 @@
### Type
############################################################################################

# map from parameter index to linear indices of matrix/vector positions where it occurs
AbstractArrayParamsMap = AbstractVector{<:AbstractVector{<:Integer}}
ArrayParamsMap = Vector{Vector{Int}}

struct RAMMatrices
A_ind::Any
S_ind::Any
F_ind::Any
M_ind::Any
A_ind::ArrayParamsMap
S_ind::ArrayParamsMap
F_ind::Vector{Int}
M_ind::Union{ArrayParamsMap, Nothing}
parameters::Any
colnames::Any
constants::Any
Expand All @@ -18,9 +22,9 @@ end
############################################################################################

function RAMMatrices(; A, S, F, M = nothing, parameters, colnames)
A_indices = get_parameter_indices(parameters, A)
S_indices = get_parameter_indices(parameters, S)
isnothing(M) ? M_indices = nothing : M_indices = get_parameter_indices(parameters, M)
A_indices = array_parameters_map(parameters, A)
S_indices = array_parameters_map(parameters, S)
M_indices = !isnothing(M) ? array_parameters_map(parameters, M) : nothing
F_indices = findall([any(isone.(col)) for col in eachcol(F)])
constants = get_RAMConstants(A, S, M)
return RAMMatrices(
Expand Down Expand Up @@ -50,7 +54,7 @@ end
import Base.==

function ==(c1::RAMConstant, c2::RAMConstant)
res = ((c1.matrix == c2.matrix) & (c1.index == c2.index) & (c1.value == c2.value))
res = ((c1.matrix == c2.matrix) && (c1.index == c2.index) && (c1.value == c2.value))
return res
end

Expand Down Expand Up @@ -425,13 +429,13 @@ end

function ==(mat1::RAMMatrices, mat2::RAMMatrices)
res = (
(mat1.A_ind == mat2.A_ind) &
(mat1.S_ind == mat2.S_ind) &
(mat1.F_ind == mat2.F_ind) &
(mat1.M_ind == mat2.M_ind) &
(mat1.parameters == mat2.parameters) &
(mat1.colnames == mat2.colnames) &
(mat1.size_F == mat2.size_F) &
(mat1.A_ind == mat2.A_ind) &&
(mat1.S_ind == mat2.S_ind) &&
(mat1.F_ind == mat2.F_ind) &&
(mat1.M_ind == mat2.M_ind) &&
(mat1.parameters == mat2.parameters) &&
(mat1.colnames == mat2.colnames) &&
(mat1.size_F == mat2.size_F) &&
(mat1.constants == mat2.constants)
)
return res
Expand Down
12 changes: 6 additions & 6 deletions src/frontend/specification/Sem.jl
Original file line number Diff line number Diff line change
Expand Up @@ -9,11 +9,11 @@ function Sem(;
optimizer::D = SemOptimizerOptim,
kwargs...,
) where {O, I, L, D}
kwargs = Dict{Symbol, Any}(kwargs...)
kwdict = Dict{Symbol, Any}(kwargs...)

set_field_type_kwargs!(kwargs, observed, imply, loss, optimizer, O, I, D)
set_field_type_kwargs!(kwdict, observed, imply, loss, optimizer, O, I, D)

observed, imply, loss, optimizer = get_fields!(kwargs, observed, imply, loss, optimizer)
observed, imply, loss, optimizer = get_fields!(kwdict, observed, imply, loss, optimizer)

sem = Sem(observed, imply, loss, optimizer)

Expand All @@ -27,11 +27,11 @@ function SemFiniteDiff(;
optimizer::D = SemOptimizerOptim,
kwargs...,
) where {O, I, L, D}
kwargs = Dict{Symbol, Any}(kwargs...)
kwdict = Dict{Symbol, Any}(kwargs...)

set_field_type_kwargs!(kwargs, observed, imply, loss, optimizer, O, I, D)
set_field_type_kwargs!(kwdict, observed, imply, loss, optimizer, O, I, D)

observed, imply, loss, optimizer = get_fields!(kwargs, observed, imply, loss, optimizer)
observed, imply, loss, optimizer = get_fields!(kwdict, observed, imply, loss, optimizer)

sem = SemFiniteDiff(observed, imply, loss, optimizer)

Expand Down
6 changes: 3 additions & 3 deletions src/imply/RAM/generic.jl
Original file line number Diff line number Diff line change
Expand Up @@ -222,7 +222,7 @@ gradient!(imply::RAM, par, model::AbstractSemSingle) =

# objective and gradient
function objective!(imply::RAM, parameters, model, has_meanstructure::Val{T}) where {T}
fill_A_S_M(
fill_A_S_M!(
imply.A,
imply.S,
imply.M,
Expand Down Expand Up @@ -250,7 +250,7 @@ function gradient!(
model::AbstractSemSingle,
has_meanstructure::Val{T},
) where {T}
fill_A_S_M(
fill_A_S_M!(
imply.A,
imply.S,
imply.M,
Expand Down Expand Up @@ -346,7 +346,7 @@ function check_acyclic(A_pre, n_par, A_indices)
A_rand = copy(A_pre)
randpar = rand(n_par)

fill_matrix(A_rand, A_indices, randpar)
fill_matrix!(A_rand, A_indices, randpar)

# check if the model is acyclic
acyclic = isone(det(I - A_rand))
Expand Down
2 changes: 1 addition & 1 deletion src/imply/RAM/symbolic.jl
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ function RAMSymbolic(;
F[CartesianIndex.(1:n_var, ram_matrices.F_ind)] .= 1.0

set_RAMConstants!(A, S, M, ram_matrices.constants)
fill_A_S_M(A, S, M, ram_matrices.A_ind, ram_matrices.S_ind, ram_matrices.M_ind, par)
fill_A_S_M!(A, S, M, ram_matrices.A_ind, ram_matrices.S_ind, ram_matrices.M_ind, par)

A, S, F = sparse(A), sparse(S), sparse(F)

Expand Down
3 changes: 1 addition & 2 deletions src/imply/empty.jl
Original file line number Diff line number Diff line change
Expand Up @@ -36,11 +36,10 @@ end

function ImplyEmpty(; specification, kwargs...)
ram_matrices = RAMMatrices(specification)
identifier = StructuralEquationModels.identifier(ram_matrices)

n_par = length(ram_matrices.parameters)

return ImplyEmpty(identifier, n_par)
return ImplyEmpty(identifier(ram_matrices), n_par)
end

############################################################################################
Expand Down
Loading