@nirgrahamuk
Thank you very much for your comment. Please find the whole code as below.
Do you know how to upload a csv file here?
### DCC estimation
## package
library(tidyverse)
library(tibble)
library(lubridate)
library(quantmod)
library(PerformanceAnalytics)
library(rugarch)
library(xdcclarge)
library(nloptr)
## Clear Variables
rm(list = ls())
# Matrix Multiple
matpow = function(x, pow){
y = eigen(x)
y$vectors %*% diag( (y$values)^pow ) %*% solve(y$vectors)
}
###########
#Read Data#
###########
## Furures Data
futures = read_csv("Futures20210411.csv", col_types = cols(US = col_double(), EU = col_double()))
## Create xts Data
date = unlist(futures[,1]) %>% as.Date()
data = futures[, 2:4]
x.data = xts(data, order.by = date)
## logreturn
return = diff(log(x.data[,1:3])) %>%
na.omit()
return = return[-which(is.infinite(return[,3])), ] #EUの対数収益率になぜか無限が含まれるので除外
## Exclude Outliers
outlier_index = apply(return, 2, function(x) abs(x) > mean(x) + 3 * sd(x)) %>%
rowSums()
outlier_index = outlier_index != 0
return = return[!outlier_index]
####################
#1st stage estimate#
####################
# mean adjusted return
rt = apply(return, 2, function(x) x - mean(x))
# log likelihodd function
t = length(rt[,1])
n = 3 #number of assets
QLL = function(para, x, t, n){
# Parameters
omega = rep(0,n)
a = rep(0,n)
b = rep(0,n)
omega[1:n] = para[1:n]
a[1:n] = para[(n + 1) : (2 * n)]
b[1:n] = para[(2*n + 1) : (3 * n)]
# Quasi Log Likelihood(QLL) initialization
loglik=0
# Start of the loop
for (j in 1:n){
h=var(x[,j])
for (i in 2:t){
h = omega[j] + a[j] * x[i-1,j]^2 + b[j] * h
loglik = loglik + log(h) + x[i,j]^2/h
}
}
return(loglik)
}
## Maximization of log likelihodd function
# initial value
para.garch = c(rep(0.2,n), rep(0.5,n), rep(0.2,n))
# Constraint
cons.garch = function(para, x, t, n){
cons = c(para[n+1] + para[2*n+1] - 1, para[n+2] + para[2*n+2] - 1, para[n+3] + para[2*n+3] - 1)
return(cons) #a + b < 1
}
lb.garch = rep(0, 9) # a, b >0
ub.garch = rep(Inf, 9)
# Algorithm
opts = list("algorithm" = "NLOPT_LN_COBYLA",
"xtol_rel"=1.0e-4,
"maxeval" = 1000)
# Maximization with constraint
mlef.garch = nloptr(x0 = para.garch,
eval_f = QLL,
lb = lb.garch,
ub = ub.garch,
eval_g_ineq = cons.garch,
opts = opts,
x = rt,
t = t,
n = n
)
mlef.garch
garch.e = mlef.garch$solution
# Conditional variance based on the model
garch.ht = function(para, x, n){
ht = matrix(0, nrow = length(x[,1]), ncol = n)
ut = matrix(0, nrow = length(x[,1]), ncol = n)
for (j in 1:n){
ht[1,j] = var(x[,j])
ut[,j] = x[,j]
for (i in 2:length(x[,j])){
ht[i,j] = para[n - (n-j)] + para[2*n - (n-j)] * ut[i-1, j]^2 + para[3*n - (n-j)] * ht[i-1, j]
}
}
return(ht)
}
# Check the fit of GARCH
ht = garch.ht(para = garch.e, x = rt, n = n)
ut.AU = rt[,1]
ht.AU = ht[,1]
vt.AU = ut.AU/sqrt(ht.AU)
plot(vt.AU, type="l")
acf(vt.AU)
#####################################
#Preparation for 2nd step estimation#
#####################################
p = 3 #number of parameters
ht = matrix(rep(0, n*t),t,n)
for (j in 1:n){
ht[1,j] = sqrt(var(rt[,j]))
ut = rt[,j]
omega = garch.e[j]
a = garch.e[j + p]
b = garch.e[j + p*2]
for (i in 2:length(rt[,j])){
ht[i,j] = omega + a * ut[i-1]^2 + b * ht[i-1,j]
}
}
sigma = sqrt(ht)
fun.S = function(x, sigma, t){
sum.S = 0
for (i in 1:t){
rt = c(x[i,1], x[i,2], x[i,3])
Qt.star = diag( c(ht[i,1], ht[i,2], ht[i,3]) )
Dt = diag( c(sigma[i,1], sigma[i,2], sigma[i,3]) )
et = solve(Dt) %*% rt
sum.S = sum.S + matpow(Qt.star, 1/2) %*% et %*% t(et) %*% matpow(Qt.star, 1/2)
}
S = sum.S/t
return(S)
}
S = fun.S(x = rt, sigma = sigma, t = t)
####################
#Estimation of cDCC#
####################
# Define log likelihood function
QLL.cDCC = function(para, sigma, r, t, S){
#parameters
alpha = para[1]
beta = para[2]
A = diag( sqrt(alpha), 3, 3)
B = diag( sqrt(beta), 3, 3)
#initial value
loglik = 0
Qt = S
for (i in 2:t){
rt = c( r[i, 1], r[i, 2], r[i, 3] ) #rt
rt.L = c( r[i-1, 1], r[i-1, 2], r[i-1, 3] ) #r_{t-1}
Dt = diag( c(sigma[i, 1], sigma[i, 2], sigma[i, 3]) ) #D_t
Dt.L = diag( c(sigma[i-1, 1], sigma[i-1, 2], sigma[i-1, 3]) ) #D_{t-1}
Qt.star.L = diag( diag(Qt) ) #Q*_{t-1}
et.L = solve(Dt.L) %*% rt.L #epsilon_{t-1}
Qt = S - A %*% S %*% t(A) - B %*% S %*% t(B) +
A %*% ( matpow(Qt.star.L, 1/2) %*% et.L %*% t(et.L) %*% matpow(Qt.star.L, 1/2) ) %*% t(A) +
B %*% Qt %*% t(B)
Qt.star = diag (diag(Qt)) #Q*_t
Rt = matpow(Qt.star, -1/2) %*% Qt %*% matpow(Qt.star, -1/2) #Rt
loglik = loglik + log(det(Rt)) + t(rt) %*% solve(Dt) %*% solve(Rt) %*% solve(Dt) %*% rt
}
return(loglik)
}
QLL.cDCC(para = c(0.2, 0.7), sigma = sigma, r = rt, t = t, S = S) #check if loglikelihodd function works
# initial value
para.cDCC = c(0.2, 0.7)
# constraint
resta = rbind(c(-1, -1), diag(2))
restb = c(-1, 0, 0)
# Gradient
grad.lik = function(para, sigma, r, t, S) {
d = 1e-05
npara = length(para)
id = d * diag(npara)
para1 = para + id[, 1]
para2 = para + id[, 2]
lik0 = QLL.cDCC(para = c(para[1], para[2]), sigma, r, t, S)
lik1 = QLL.cDCC(para = c(para1[1], para1[2]), sigma, r, t, S)
lik2 = QLL.cDCC(para = c(para2[1], para2[2]), sigma, r, t, S)
c( sum ( (lik1 - lik0) / d ), sum( (lik2 - lik0) / d ) )
}
grad.lik(para = para.cDCC, sigma = sigma, r = rt, t = t, S = S)
# Optimazation
constrOptim(theta = para.cDCC, f = QLL.cDCC, grad = grad.lik,
ui = resta, ci = restb, mu = 1e-05,
outer.iterations = 20, outer.eps = 0.01,
control = list(maxit = 10, reltol = 1e-05),
sigma = sigma, r = rt, t = t, S = S)