#########################################################################
# Code Examples for Basic Garch Simulation, ARCH Testing, GARCH Fitting
#########################################################################
module(finmetrics)
#Ford Daily Returns for 2000 days.
class(ford.s)
ford.s@title
start(ford.s)
end(ford.s)
nrow(ford.s)
#A look at the data
plot(ford.s,reference.grid=F)
#Now a look at the ACF and the ACF of the SQUARES.
par(mfrow=c(1,2))
tmp = acf(ford.s,lag=12)
tmp = acf(ford.s^2, lag=12)
par(mfrow=c(1,1))
#We see that the SQUARES have more lags of significance.
#To some extent this is a "cheat" since it is not clear
#that the squared data will conform to the assumption that
#one needs to set significance levels on the estimated correlations.
#Still the picture is suggestive.
#Even without the validity of the confidence intervals, we have some
#pretty suggestive evidence that Ford Retruns are NOT INDEPENDENT.
#Thus, another instance of the Black Scholes (AKA Samuelson) model
#does not hold.
#######################################
#Simulation of an ARCH(1) model
sim.arch1=simulate.garch(model=list(a.value=0.01, arch=0.8), n=250, rseed=196)
#Now lets look at what it holds...
names(sim.arch1)
#And look at plots of these components
par(mfrow=c(2,1))
tsplot(sim.arch1$et, main="Simulated ARCH(1) errors", ylab="e(t)")
tsplot(sim.arch1$sigma.t, main="Simulated ARCH(1) volatility", ylab="sigma(t)")
par(mfrow=c(1,1))
#And a summary
summaryStats(sim.arch1$et)
#Kurtosis is a little high, but not brutal. Also, we might ask,
#what sample kurtosis even means here. By construction, these guys are not independent.
#On the other hand, the simulation function returns both the error term and the conditional standard
#deviations. In theory (i.e. if the model was exactly appropriate) these values
#would be perfectly independent standard normals. Let's look.
ratio=sim.arch1$et/sim.arch1$sigma.t
normalTest(ratio, method="sw")
normalTest(ratio, method="jb")
#Golly, for once we find that jb does not reject the normality hypothesis.
#Well, after all, this is simulated data. We just got back what we baked into the cake.
#####################################
# Back to the real data and "testing for arch"
# More precisely we test that 0=a_1=...=a_p where p=lag.n
archTest(ford.s, lag.n=12)
#We get a brutally small p-value, so it looks like there is an arch effect.
#Let estimate a univarite GARCH model for the series.
#What makes the model "Generalized" is that sigma.t is
#modeled by lags in sigma.t as well as lags in epsilon.t
#We'll try one of each.
ford.mod11 = garch(ford.s~1, ~garch(1,1))
class(ford.mod11)
#Lets Look
ford.mod11
#and check the names...
names(ford.mod11)
#Tidier to use the summary "method"
summary(ford.mod11)
#This has some weird inferences...
#Shapiro-Wilk is nonsignificant but Jarque-Berra is highly significant.
#This "may" mean that the residuals are "normal in the middle" but
#exhibit skewness or kurtosis. Also, the JB test is VERY sensitive
#to an outlier the series.
##############################################################
#Now lets look at some individual components of the series.
ford.mod11$asymp.sd
coef(ford.mod11)
vcov(ford.mod11)
vcov(ford.mod11,method="qmle")
residuals(ford.mod11,standardize=T)[1:5]
sigma.t(ford.mod11)[1:5]
# garch diagnostics
summary(ford.mod11)
autocorTest(residuals(ford.mod11,standardize=T),lag=12)
autocorTest(residuals(ford.mod11,standardize=T)^2,lag=12)
archTest(residuals(ford.mod11,standardize=T),lag=12)
#And a bucket of potential plots....
plot(ford.mod11)
#################################################################################
# Now lets look at the idea of combining the garch errors with a model for the mean
ford.meanmodel = garch(ford.s~ar(1), ~garch(1,1))
summary(ford.meanmodel)
#Now explore this model... replacing ar(1) with ar(2), ma(2), arma(1,1)
#Do the changes in the coefficients make sense? Consider size, p-value, signs, etc.
#How would you choose among these models?
#How would you summarize what you learn from this exploration.