library(hmcdm)
= dim(Design_array)[1]
N = nrow(Q_matrix)
J = ncol(Q_matrix)
K = dim(Design_array)[3] L
<- numeric(K)
tau for(k in 1:K){
<- runif(1,.2,.6)
tau[k]
}= matrix(0,K,K)
R # Initial alphas
<- c(.5,.5,.4,.4)
p_mastery <- matrix(0,N,K)
Alphas_0 for(i in 1:N){
for(k in 1:K){
<- which(R[k,]==1)
prereqs if(length(prereqs)==0){
<- rbinom(1,1,p_mastery[k])
Alphas_0[i,k]
}if(length(prereqs)>0){
<- prod(Alphas_0[i,prereqs])*rbinom(1,1,p_mastery)
Alphas_0[i,k]
}
}
}<- sim_alphas(model="indept",taus=tau,N=N,L=L,R=R,alpha0=Alphas_0)
Alphas table(rowSums(Alphas[,,5]) - rowSums(Alphas[,,1])) # used to see how much transition has taken place
#>
#> 0 1 2 3 4
#> 22 95 125 81 27
<- matrix(runif(J*K,.1,.3),c(J,K))
Smats <- matrix(runif(J*K,.1,.3),c(J,K))
Gmats # Simulate rRUM parameters
<- Gmats / (1-Smats)
r_stars <- apply((1-Smats)^Q_matrix, 1, prod)
pi_stars
<- sim_hmcdm(model="rRUM",Alphas,Q_matrix,Design_array,
Y_sim r_stars=r_stars,pi_stars=pi_stars)
= hmcdm(Y_sim,Q_matrix,"rRUM_indept",Design_array,
output_rRUM_indept 100,30,R = R)
#> 0
output_rRUM_indept#>
#> Model: rRUM_indept
#>
#> Sample Size: 350
#> Number of Items:
#> Number of Time Points:
#>
#> Chain Length: 100, burn-in: 50
summary(output_rRUM_indept)
#>
#> Model: rRUM_indept
#>
#> Item Parameters:
#> r_stars1_EAP r_stars2_EAP r_stars3_EAP r_stars4_EAP pi_stars_EAP
#> 0.1993 0.51490 0.6957 0.6744 0.8504
#> 0.5519 0.43958 0.5587 0.6092 0.7719
#> 0.6596 0.51894 0.6409 0.3554 0.7504
#> 0.6738 0.68623 0.1715 0.6188 0.8434
#> 0.3924 0.09994 0.5141 0.6946 0.8155
#> ... 45 more items
#>
#> Transition Parameters:
#> taus_EAP
#> τ1 0.3851
#> τ2 0.6149
#> τ3 0.5582
#> τ4 0.3437
#>
#> Class Probabilities:
#> pis_EAP
#> 0000 0.11906
#> 0001 0.03655
#> 0010 0.09150
#> 0011 0.05177
#> 0100 0.07980
#> ... 11 more classes
#>
#> Deviance Information Criterion (DIC): 22933.92
#>
#> Posterior Predictive P-value (PPP):
#> M1: 0.5064
#> M2: 0.49
#> total scores: 0.6145
<- summary(output_rRUM_indept)
a head(a$r_stars_EAP)
#> [,1] [,2] [,3] [,4]
#> [1,] 0.1993376 0.51490431 0.6956522 0.6743655
#> [2,] 0.5519450 0.43958144 0.5586875 0.6092017
#> [3,] 0.6595621 0.51893944 0.6409174 0.3554333
#> [4,] 0.6738479 0.68622934 0.1714936 0.6188168
#> [5,] 0.3923977 0.09993945 0.5140721 0.6946310
#> [6,] 0.6193002 0.24105679 0.3053849 0.5611629
<- cor(as.vector(pi_stars),as.vector(a$pi_stars_EAP)))
(cor_pistars #> [1] 0.9637738
<- cor(as.vector(r_stars*Q_matrix),as.vector(a$r_stars_EAP*Q_matrix)))
(cor_rstars #> [1] 0.9231634
<- numeric(L)
AAR_vec for(t in 1:L){
<- mean(Alphas[,,t]==a$Alphas_est[,,t])
AAR_vec[t]
}
AAR_vec#> [1] 0.8578571 0.8857143 0.9307143 0.9671429 0.9714286
<- numeric(L)
PAR_vec for(t in 1:L){
<- mean(rowSums((Alphas[,,t]-a$Alphas_est[,,t])^2)==0)
PAR_vec[t]
}
PAR_vec#> [1] 0.5285714 0.6314286 0.7628571 0.8800000 0.8942857
$DIC
a#> Transition Response_Time Response Joint Total
#> D_bar 2064.491 NA 18249.38 1861.215 22175.09
#> D(theta_bar) 1981.126 NA 17570.21 1864.915 21416.26
#> DIC 2147.855 NA 18928.55 1857.516 22933.92
head(a$PPP_total_scores)
#> [,1] [,2] [,3] [,4] [,5]
#> [1,] 0.02 0.38 0.12 0.82 0.62
#> [2,] 0.70 0.82 0.54 0.44 0.92
#> [3,] 0.78 0.26 0.68 0.54 0.96
#> [4,] 0.82 0.66 0.92 0.82 0.10
#> [5,] 0.78 0.58 0.92 0.02 0.80
#> [6,] 0.70 0.82 1.00 0.94 0.32
head(a$PPP_item_means)
#> [1] 0.44 0.52 0.50 0.58 0.40 0.50
head(a$PPP_item_ORs)
#> [,1] [,2] [,3] [,4] [,5] [,6] [,7] [,8] [,9] [,10] [,11] [,12] [,13] [,14]
#> [1,] NA 0.58 0.72 0.90 0.80 0.88 0.70 0.76 0.52 0.50 0.30 0.40 0.08 0.32
#> [2,] NA NA 0.16 0.12 0.82 0.34 0.46 0.96 0.40 0.54 0.64 0.88 0.12 0.76
#> [3,] NA NA NA 0.94 0.82 0.92 0.86 0.82 0.80 0.82 0.22 0.42 0.24 0.74
#> [4,] NA NA NA NA 0.64 0.96 0.76 0.82 0.40 0.74 0.10 0.86 0.22 0.52
#> [5,] NA NA NA NA NA 0.56 0.80 0.80 0.70 0.64 0.78 0.86 0.10 0.60
#> [6,] NA NA NA NA NA NA 0.76 1.00 0.86 0.40 0.02 0.22 0.22 0.28
#> [,15] [,16] [,17] [,18] [,19] [,20] [,21] [,22] [,23] [,24] [,25] [,26]
#> [1,] 0.40 0.78 0.38 0.08 0.90 0.64 0.46 0.70 0.14 0.44 0.32 0.64
#> [2,] 0.30 0.70 0.22 0.18 0.14 0.56 0.70 0.28 0.56 0.44 0.64 0.42
#> [3,] 0.10 0.54 0.52 0.40 0.36 0.76 0.38 0.72 0.10 0.04 0.34 0.22
#> [4,] 0.88 0.96 0.82 0.08 0.84 0.50 0.48 0.58 0.58 0.22 0.06 0.54
#> [5,] 0.64 0.10 0.88 0.08 0.50 0.88 0.82 0.88 0.12 0.74 0.54 0.86
#> [6,] 0.92 0.32 0.86 0.06 0.34 0.42 0.58 0.88 0.68 0.04 0.58 0.28
#> [,27] [,28] [,29] [,30] [,31] [,32] [,33] [,34] [,35] [,36] [,37] [,38]
#> [1,] 0.96 0.86 0.68 0.62 0.44 0.94 0.56 0.78 0.30 0.04 0.56 0.80
#> [2,] 0.16 0.68 0.50 0.50 0.66 0.40 0.44 0.42 0.78 0.98 0.68 0.62
#> [3,] 0.74 0.70 0.40 0.24 0.18 0.18 0.66 0.74 0.50 0.56 0.32 0.28
#> [4,] 0.40 0.08 0.36 0.46 0.14 0.62 0.20 0.88 0.14 0.50 0.06 0.32
#> [5,] 0.50 0.84 0.88 0.36 0.34 0.92 0.20 0.84 0.92 0.26 0.24 0.78
#> [6,] 0.70 0.32 0.02 0.88 0.00 0.60 0.04 0.40 0.46 0.18 0.36 0.38
#> [,39] [,40] [,41] [,42] [,43] [,44] [,45] [,46] [,47] [,48] [,49] [,50]
#> [1,] 0.72 0.26 1.00 1.00 0.26 0.70 1.00 0.28 0.68 0.86 0.96 0.92
#> [2,] 0.12 0.86 0.74 0.58 0.50 0.34 0.84 0.96 0.44 0.94 0.30 0.88
#> [3,] 0.46 0.10 0.66 0.30 0.04 0.34 0.50 0.42 0.66 0.04 1.00 0.14
#> [4,] 0.62 0.06 0.62 0.88 0.46 0.98 0.60 0.58 0.56 0.86 0.12 0.64
#> [5,] 0.58 0.80 0.74 0.86 0.38 0.50 0.60 0.78 0.30 0.58 0.80 0.88
#> [6,] 0.06 0.72 0.50 0.90 0.14 0.30 0.16 0.66 0.48 0.64 0.32 0.10