### Selected R commands and output used in Chapter 2 > copper <- scan(file="copper1800t1996.txt") # load data into R > tdx <- c(1800:1996) # years > par(mfcol=c(2,1)) > plot(tdx,copper,xlab='year',ylab='price',type='l') > acf(copper) > y <- copper[2:197]; x <- copper[1:196] > m1 <- loess(y~x) ## local smoothing > sx <- sort(x,index=T) ## sorting the threshold variable > par(mfcol=c(1,1)) > ix <- sx$ix ## index for order-statistics > plot(x,y,xlab='x(t-1)',ylab='x(t)') > lines(x[ix],m1$fitted[ix],col="red") #### > copper <- scan("copper1800t1996.txt") > m0 <- ar(copper,method="mle") > m0$order [1] 12 > m1 <- arima(copper,order=c(12,0,0)) > m1 Call: arima(x = copper, order = c(12, 0, 0)) Coefficients: ar1 ar2 ar3 ar4 ar5 ar6 ar7 ar8 0.8999 -0.1981 0.0399 0.0459 -0.0855 0.1931 -0.0691 0.1418 s.e. 0.0689 0.0930 0.0938 0.0936 0.0936 0.0945 0.0948 0.0949 ar9 ar10 ar11 ar12 intercept -0.0416 -0.0518 0.2037 -0.2324 0.1695 s.e. 0.0950 0.0954 0.0963 0.0706 0.3360 sigma^2 estimated as 0.5542: log likelihood = -222.47, aic = 472.94 > c1 <- c(NA,NA,0,0,0,NA,0,0,0,0,NA,NA,0) > m1a <- arima(copper,order=c(12,0,0),fixed=c1) > m1a Call: arima(x = copper, order = c(12, 0, 0), fixed = c1) Coefficients: ar1 ar2 ar3 ar4 ar5 ar6 ar7 ar8 ar9 ar10 ar11 0.8984 -0.1596 0 0 0 0.1559 0 0 0 0 0.1671 s.e. 0.0674 0.0696 0 0 0 0.0456 0 0 0 0 0.0704 ar12 intercept -0.2166 0 s.e. 0.0696 0 sigma^2 estimated as 0.5666: log likelihood = -224.57, aic = 461.13 > require(forecast) ### For automatic selection of ARIMA models. > auto.arima(copper) Series: copper ARIMA(3,0,2) with zero mean Coefficients: ar1 ar2 ar3 ma1 ma2 -0.5852 0.3070 0.6622 1.5709 0.9630 s.e. 0.0710 0.0962 0.0850 0.0564 0.0265 sigma^2 estimated as 0.5769: log likelihood=-226.74 AIC=465.48 AICc=465.92 BIC=485.18 > m2 <- arima(copper,order=c(3,0,2),include.mean=F) > m2 Call: arima(x = copper, order = c(3, 0, 2), include.mean = F) Coefficients: ar1 ar2 ar3 ma1 ma2 -0.5852 0.3070 0.6622 1.5709 0.9630 s.e. 0.0710 0.0962 0.0850 0.0564 0.0265 sigma^2 estimated as 0.5769: log likelihood = -226.74, aic = 465.48 > Box.test(m2$residuals,lag=10,type='Ljung') Box-Ljung test data: m2$residuals X-squared = 9.5266, df = 10, p-value = 0.483 > Box.test(m1a$residuals,lag=10,type='Ljung') Box-Ljung test data: m1a$residuals X-squared = 5.334, df = 10, p-value = 0.8678 > pv <- 1-pchisq(5.334,5) ## Compute p-value, adjusting df. > pv [1] 0.3764918 > pv <- 1-pchisq(9.53,5) > pv [1] 0.08970192 > require(TSA) # Load TSA package > m3 <- tar(copper,12,12,1) > m3$thd 0.9459653 > m3$qr1$coefficients intercept-copper lag1-copper 0.0809485 0.9519411 > m3$qr2$coefficients intercept-copper lag1-copper lag2-copper 0.8258597 0.7184221 -0.3023816 > m4 <- tar(copper,2,2,1) ### Lower the AR orders to 2. > m4$thd 0.9459653 > m4$qr1$coefficients intercept-copper lag1-copper 0.07536253 0.92867483 > m4$qr2$coefficients intercept-copper lag1-copper lag2-copper 0.7303377 0.7336741 -0.2881161 > m4$AIC 427.8 > m4$rms1 0.3628672 > m4$rms2 1.303754 > Box.test(m4$residuals,lag=10,type='Ljung') Box-Ljung test data: m4$residuals X-squared = 9.4695, df = 10, p-value = 0.4882 > pv <- 1 - pchisq(9.47,5) > pv [1] 0.09172323 #### > pr <- scan(file=''m-precipitationLondon.txt'') > Keenan.test(pr,4) $test.stat [1] 29.23289 $p.value [1] 3.099153e-07 > require(NTS) > thr.test(pr,4,d=1) SETAR model is entertained Threshold nonlinearity test for (p,d): 4 1 F-ratio and p-value: 0.3664116 0.8702775 .... > thr.test(pr,4,d=4) SETAR model is entertained Threshold nonlinearity test for (p,d): 4 4 F-ratio and p-value: 2.091034 0.07409199 > ### TAR modeling > m5 <- tar(pr,4,4,d=4) > names(m5) [1] "dxy1" "dxy2" "p1" "q1" "d" [6] "qr1" "qr2" "x.regime1" "y.regime1" "x.regime2" [11] "y.regime2" "thd" "thdindex" "qr1" "qr2" [16] "i1" "i2" "x" "m" "rss1" [21] "rss2" "n1" "n2" "std.res" "p1" [26] "p2" "rms1" "rms2" "is.constant1" "is.constant2" [31] "residuals" "AIC" "aic.no.thd" "y" "like" [36] "method" > m5$thd 71.5 > m5$qr1$coefficients intercept-pr 98.39298 > m5$qr2$coefficients intercept-pr lag1-pr lag2-pr lag3-pr 41.27491106 0.19071082 -0.04809674 0.26076648 > m5$rms1; m5$rms2 1826.553; 823.3881 > m5$AIC 1313 > m5$n1 [1] 57 > m5$n2 [1] 75 > Box.test(m5$residuals,lag=12,type='Ljung') Box-Ljung test data: m5$residuals X-squared = 3.1796, df = 12, p-value = 0.9941 ### set.seed(51) n <- 300; np1 <- n+1 St <- c(rep(1,51),rep(2,20),rep(1,30),rep(2,30),rep(1,70),rep(2,10), rep(1,50),rep(2,20),rep(1,20)) zt <- runif(np1,min=0,max=1) ## regressor at <- rnorm(np1) ## epsilon(t) x <- at[1] ### set initial value for (i in 2:np1){ tmp = 0 if(St[i]==1){ tmp = 2.0 + 0.8*x[i-1] + 1.5*at[i] } else{ tmp = -2.0+0.6*zt[i]-0.8*x[i-1]+at[i] } x <- c(x,tmp) } xt <- x[-1]; zt <- zt[-1]; St <- St[-1] ## Remove the initial value > x <- cbind(xt,rep(1,300),zt) # Start model fitting > colnames(x) <- c("xt","cnst","zt") > X <- data.frame(x) > m1 <- lm(xt~-1+cnst+zt,data=X) > summary(m1) Call: lm(formula = xt ~ -1 + cnst + zt, data = X) Coefficients: Estimate Std. Error t value Pr(>|t|) cnst 6.7610 0.6269 10.784 <2e-16 *** zt -0.5134 1.1343 -0.453 0.651 --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 Residual standard error: 5.785 on 298 degrees of freedom > require(MSwM) # Load the package > m2 <- msmFit(m1,k=2,p=1,sw=c(T,T,T,T)) > summary(m2) Markov Switching Model Call: msmFit(object = m1, k = 2, sw = c(T, T, T, T), p = 1) AIC BIC logLik 1101.089 1157.494 -544.5443 Coefficients: Regime 1 --------- Estimate Std. Error t value Pr(>|t|) cnst(S) 2.0030 0.3374 5.9366 2.91e-09 *** zt(S) -0.4008 0.3574 -1.1214 0.2621 xt_1(S) 0.8265 0.0300 27.5500 < 2.2e-16 *** --- Residual standard error: 1.542166 Regime 2 --------- Estimate Std. Error t value Pr(>|t|) cnst(S) -1.7467 0.1953 -8.9437 <2e-16 *** zt(S) 0.3449 0.3454 0.9986 0.318 xt_1(S) -0.8043 0.0215 -37.4093 <2e-16 *** --- Residual standard error: 0.9059516 Transition probabilities: Regime 1 Regime 2 Regime 1 0.98171408 0.0517276 Regime 2 0.01828592 0.9482724 > plotDiag(m2) # Residual ACF and PACF > par(mfcol=c(2,1)) > plotDiag(m2,which=1) # Residual plot > plotDiag(m2,which=2) # Q-Q plot of residuals > slotNames(m2) > slotNames(m2@Fit) > dim(m2@Fit@filtProb) > par(mfrow=c(2,1)) > plot(m2@Fit@filtProb[,1],type='l',main='Filtered Probability Regime 1', ylab='prob',xlab='time') > plot(m2@Fit@smoProb[,1],type='l',main='Smoothed Probability Regime 1', ylab='prob',xlab='time') #### > da=read.table("GDPC1.txt",header=T) > gdp=diff(log(da[,4])) > length(gdp) [1] 273 > x <- cbind(gdp,rep(1,length(gdp))) > colnames(x) <- c("gdp","cnst") > X <- data.frame(x) > m1 <- lm(gdp~-1+cnst,data=X) # Basic model for the expected growth > summary(m1) Call: lm(formula = gdp ~ -1 + cnst, data = X) Coefficients: Estimate Std. Error t value Pr(>|t|) cnst 0.0078125 0.0005784 13.51 <2e-16 *** --- > require(MSwM) > m2 <- msmFit(m1,k=3,p=3,sw=c(T,T,T,T,T)) > summary(m2) Markov Switching Model Call: msmFit(object = m1, k = 3, sw = c(T, T, T, T, T), p = 3) AIC BIC logLik -1839.701 -1729.339 931.8506 Coefficients: Regime 1 --------- Estimate Std. Error t value Pr(>|t|) cnst(S) 0.0052 0.0013 4.0000 6.334e-05 *** gdp_1(S) 0.3176 0.0873 3.6380 0.0002748 *** gdp_2(S) 0.0908 0.0940 0.9660 0.3340442 gdp_3(S) -0.0691 0.0934 -0.7398 0.4594214 --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 Residual standard error: 0.011107 Regime 2 --------- Estimate Std. Error t value Pr(>|t|) cnst(S) 0.0081 0.0008 10.1250 < 2.2e-16 *** gdp_1(S) 0.5686 0.0899 6.3248 2.536e-10 *** gdp_2(S) -0.0096 0.0594 -0.1616 0.8716 gdp_3(S) -0.0825 0.0652 -1.2653 0.2058 --- Residual standard error: 0.002533775 Regime 3 --------- Estimate Std. Error t value Pr(>|t|) cnst(S) 0.0002 0.0008 0.2500 0.8026 gdp_1(S) 0.6197 0.0937 6.6137 3.748e-11 *** gdp_2(S) 0.3003 0.0687 4.3712 1.236e-05 *** gdp_3(S) -0.2649 0.0607 -4.3641 1.276e-05 *** --- Residual standard error: 0.003278711 Transition probabilities: Regime 1 Regime 2 Regime 3 Regime 1 0.95122395 0.086451577 0.03303734 Regime 2 0.02950221 0.007717656 0.56273477 Regime 3 0.01927384 0.905830767 0.40422789 > plotDiag(m2) ## Residual ACF and PACF > par(mfcol=c(2,1)) > plotDiag(m2,which=1) # Residual plot > plotDiag(m2,which=2) # Residual Q-Q plot > plotProb(m2,which=1) # State probabilities (Not shown) > plotProb(m2,which=2) # State-1 probabilities (Not shown) > plotProb(m2,which=4) # State-3 probabilities (No shown) > par(mfrow=c(3,1)) > plot(m2@Fit@filtProb[,1],type='l',main='Filtered Probability Regime 1', ylab='prob',xlab='time') > plot(m2@Fit@filtProb[,2],type='l',main='Filtered Probability Regime 2', ylab='prob',xlab='time') > plot(m2@Fit@filtProb[,3],type='l',main='Filtered Probability Regime 3', ylab='prob',xlab='time') # Figure 2.19 upper three > plot(m2@Fit@smoProb[,1],type='l',main='Smoothed Probability Regime 1', ylab='prob',xlab='time') > plot(m2@Fit@smoProb[,2],type='l',main='Smoothed Probability Regime 2', ylab='prob',xlab='time') > plot(m2@Fit@smoProb[,3],type='l',main='Smoothed Probability Regime 3', ylab='prob',xlab='time') # Figure 2.19 lower three > p1 = c(1, -0.6197, -.3003, .2649) > s1 <- polyroot(p1) > s1 [1] 1.394760+0.57827i -1.655885-0.00000i 1.394760-0.57827i > Mod(s1) [1] 1.509885 1.655885 1.509885 > kk=2*pi/acos(1.3948/1.5099) > kk [1] 15.98833 #### > X <- cbind(gdp[4:273],rep(1,270),gdp[3:272],gdp[2:271],gdp[1:270]) > colnames(X) <- c("gdp","cnst","gdpl1","gdpl2","gdpl3") > X <- data.frame(X) > m1 <- lm(gdp~-1+.,data=X) ## Linear AR(3) fit > summary(m1) Call: lm(formula = gdp ~ -1 + ., data = X) Coefficients: Estimate Std. Error t value Pr(>|t|) cnst 0.0048627 0.0008053 6.038 5.22e-09 *** gdpl1 0.3465507 0.0608520 5.695 3.26e-08 *** gdpl2 0.1284203 0.0639292 2.009 0.0456 * gdpl3 -0.0958892 0.0607570 -1.578 0.1157 --- Residual standard error: 0.008842 on 266 degrees of freedom > m2 <- msmFit(m1,k=3,sw=c(T,T,T,T,T)) > summary(m2) Markov Switching Model Call: msmFit(object = m1, k = 3, sw = c(T, T, T, T, T)) AIC BIC logLik -1840.463 -1730.101 932.2315 Coefficients: Regime 1 --------- Estimate Std. Error t value Pr(>|t|) cnst(S) 0.0082 0.0008 10.2500 < 2.2e-16 *** gdpl1(S) 0.5855 0.1283 4.5635 5.031e-06 *** gdpl2(S) 0.0068 0.0862 0.0789 0.9371 gdpl3(S) -0.1048 0.1129 -0.9283 0.3533 --- Residual standard error: 0.002392347 Regime 2 --------- Estimate Std. Error t value Pr(>|t|) cnst(S) 0.0052 0.0013 4.0000 6.334e-05 *** gdpl1(S) 0.3272 0.0866 3.7783 0.0001579 *** gdpl2(S) 0.0951 0.0909 1.0462 0.2954687 gdpl3(S) -0.0749 0.0886 -0.8454 0.3978875 --- Residual standard error: 0.01110063 Regime 3 --------- Estimate Std. Error t value Pr(>|t|) cnst(S) 0.0002 0.0009 0.2222 0.82416 gdpl1(S) 0.6197 0.0860 7.2058 5.771e-13 *** gdpl2(S) 0.2634 0.1303 2.0215 0.04323 * gdpl3(S) -0.2378 0.0934 -2.5460 0.01090 * --- Residual standard error: 0.003247124 Transition probabilities: Regime 1 Regime 2 Regime 3 Regime 1 0.003303693 0.02108811 0.53566037 Regime 2 0.064955893 0.95537940 0.04115438 Regime 3 0.931740414 0.02353249 0.42318526 #### > da <- read.table("INDPRO.txt",header=T) > ip <- da[,2] > dip <- diff(ip) > tdx <- c(1:nrow(da))/12+1919 > par(mfcol=c(2,1)) > plot(tdx,ip,xlab="year",ylab='ip',type='l') > plot(tdx[-1],dip,xlab="year",ylab='diff(ip)',type='l') > m4<- arima(dip,order=c(4,0,2),seasonal=list(order=c(1,0,1),period=12)) > m4 Call:arima(x=dip,order=c(4,0,2),seasonal=list(order=c(1,0,1),period=12)) Coefficients: ar1 ar2 ar3 ar4 ma1 ma2 sar1 sma1 -0.3982 0.4951 0.1925 0.1813 0.6016 -0.2501 0.4517 -0.6033 s.e. 0.1210 0.1040 0.0625 0.0420 0.1228 0.1251 0.0949 0.0836 intercept 0.0848 s.e. 0.0199 sigma^2 estimated as 0.1341: log likelihood = -483.16, aic = 986.32 > wt <- dip[13:1165]-0.4517*dip[1:1153] > f1 <- rep(0,12) > f1[12] <- .6033 > adip <- filter(adip,f1,method="r",ini=rep(0,12)) > plot(tdx[-c(1:13)],adip,xlab='year',ylab='adj-dip',type='l') > acf(adip,lag=24) > require(tsDyn) > n5 <- lstar(adip,m=8,mL=8,mH=8,thDelay=4) Optimization algorithm converged Optimized values fixed for regime 2: gamma=2.7217, th=-0.48605; SSE=147.069 > summary(n5) Non linear autoregressive model LSTAR model Coefficients: Low regime: const.L phiL.1 phiL.2 phiL.3 phiL.4 phiL.5 phiL.6 0.3115624 0.6870931 -0.4545742 0.2301197 0.2654436 0.2460963 -0.2653893 phiL.7 phiL.8 -0.2528939 0.0281774 High regime: const.H phiH.1 phiH.2 phiH.3 phiH.4 phiH.5 phiH.6 -0.3474566 -0.6539442 0.7211033 -0.0691959 -0.2485699 -0.1878756 0.2913649 phiH.7 phiH.8 0.3911484 0.0579544 Smoothing parameter: gamma = 2.722 Threshold Variable: Z(t) = + (0)X(t) + (0)X(t-1)+ (0)X(t-2)+ (0)X(t-3)+ (1)X(t-4)+ (0)X(t-5) + (0) X(t-6)+ (0) X(t-7) Value: -0.486 Fit: residuals variance = 0.1276, AIC = -2334, MAPE = 339.8% Coefficient(s): Estimate Std. Error t value Pr(>|z|) const.L 0.311562 0.249085 1.2508 0.210996 phiL.1 0.687093 0.284827 2.4123 0.015851 * phiL.2 -0.454574 0.364479 -1.2472 0.212328 phiL.3 0.230120 0.132841 1.7323 0.083222 . phiL.4 0.265444 0.087511 3.0333 0.002419 ** phiL.5 0.246096 0.181475 1.3561 0.175072 phiL.6 -0.252894 0.112054 -2.2569 0.024015 * phiL.7 -0.265389 0.117760 -2.2536 0.024219 * phiL.8 0.028177 0.082941 0.3397 0.734060 const.H -0.347457 0.312239 -1.1128 0.265799 phiH.1 -0.653944 0.330864 -1.9765 0.048101 * phiH.2 0.721103 0.396752 1.8175 0.069138 . phiH.3 -0.069196 0.163429 -0.4234 0.672003 phiH.4 -0.248570 0.136231 -1.8246 0.068058 . phiH.5 -0.187876 0.185756 -1.0114 0.311820 phiH.6 0.391148 0.131696 2.9701 0.002977 ** phiH.7 0.291365 0.129243 2.2544 0.024171 * phiH.8 0.057954 0.110205 0.5259 0.598974 gamma 2.721685 1.622500 1.6775 0.093452 . th -0.486046 0.306333 -1.5867 0.112590 --- Non-linearity test of full-order LSTAR model against full-order AR model F = 6.8874 ; p-value = 7.0957e-09 > predict(n5,n.ahead=5) Time Series: Start = 1166 End = 1170 Frequency = 1 [1] 0.030077216 0.014749998 0.113773221 0.003575012 0.139365484 #### da=read.table("INDPRO.txt",header=T) xt <- diff(da$VALUE) wt <- xt[13:length(xt)]-0.4517*xt[1:(length(xt)-12)] f1 <- rep(0,12); f1[12]=0.6033 yt <- filter(wt,f1,method="r",ini=rep(0,12)) ist <- 9; nT <- length(yt) y <- yt[ist:nT]; X <- NULL for (i in 1:8){ + X <- cbind(X,yt[(ist-i):(nT-i)])} colnames(X) <- paste(``lag'',1:8,sep=``'') Z <- model.matrix(y~lag1*(lag2+lag3+lag4+lag6+lag7+lag8),data=data.frame(X)) m1 <- lm(y~-1+Z) summary(m1) Call: lm(formula = y ~ -1 + Z) Coefficients: Estimate Std. Error t value Pr(>|t|) Z(Intercept) 0.05058 0.01258 4.021 6.18e-05 *** Zlag1 0.22153 0.03157 7.016 3.92e-12 *** Zlag2 0.11574 0.03137 3.689 0.000236 *** Zlag3 0.18077 0.02997 6.032 2.19e-09 *** Zlag4 0.04270 0.03127 1.366 0.172360 Zlag6 0.06628 0.03021 2.193 0.028476 * Zlag7 -0.07221 0.02979 -2.424 0.015520 * Zlag8 0.08205 0.02975 2.758 0.005917 ** Zlag1:lag2 0.09464 0.03879 2.440 0.014856 * Zlag1:lag3 -0.12200 0.04488 -2.719 0.006658 ** Zlag1:lag4 -0.15602 0.03090 -5.050 5.15e-07 *** Zlag1:lag6 0.05693 0.05841 0.975 0.329944 Zlag1:lag7 0.15932 0.04988 3.194 0.001441 ** Zlag1:lag8 -0.19915 0.05522 -3.607 0.000324 *** --- Residual standard error: 0.3613 on 1131 degrees of freedom Multiple R-squared: 0.2855, Adjusted R-squared: 0.2767 F-statistic: 32.28 on 14 and 1131 DF, p-value: < 2.2e-16 Box.test(m1$residuals,lag=24,type="Ljung") Box-Ljung test data: m1$residuals X-squared = 31.201, df = 24, p-value = 0.1481 ### Model comparison > m1f <- cvlm(y,Z,subsize=1000,iter=300) Average RMSE: 0.1405556 Average MAE: 0.2469571 > m1f <- cvlm(y,Z,subsize=1000,iter=500) Average RMSE: 0.1389267 Average MAE: 0.2464725 > m1f <- cvlm(y,Z,subsize=1000,iter=1000) Average RMSE: 0.1386875 Average MAE: 0.2470499 ## Z<-model.matrix(y~lag6+lag8+lag4*(lag1+lag2+lag3+lag7),data=data.frame(X)) m2 <- lm(y~-1+Z) summary(m2) Call: lm(formula = y ~ -1 + Z) Coefficients: Estimate Std. Error t value Pr(>|t|) Z(Intercept) 0.05250 0.01259 4.170 3.28e-05 *** Zlag6 0.06115 0.02981 2.052 0.040421 * Zlag8 0.05909 0.02902 2.036 0.041960 * Zlag4 0.06998 0.03162 2.213 0.027104 * Zlag1 0.17990 0.02899 6.206 7.62e-10 *** Zlag2 0.12606 0.02940 4.287 1.96e-05 *** Zlag3 0.16464 0.03155 5.218 2.15e-07 *** Zlag7 -0.05460 0.03031 -1.801 0.071906 . Zlag4:lag1 -0.24395 0.03566 -6.841 1.29e-11 *** Zlag4:lag2 0.15998 0.04772 3.353 0.000827 *** Zlag4:lag3 -0.11864 0.03743 -3.169 0.001569 ** Zlag4:lag7 0.12208 0.03289 3.712 0.000216 *** --- Residual standard error: 0.3618 on 1133 degrees of freedom Multiple R-squared: 0.2821, Adjusted R-squared: 0.2745 Box.test(m2$residuals,lag=24,type="Ljung") Box-Ljung test data: m2$residuals X-squared = 29.079, df = 24, p-value = 0.2172 > m2f <- cvlm(y,Z,subsize=1000,iter=300) Average RMSE: 0.145679 Average MAE: 0.2506342 > m2f <- cvlm(y,Z,subsize=1000,iter=500) Average RMSE: 0.1438586 Average MAE: 0.2492642 > m2f <- cvlm(y,Z,subsize=1000,iter=1000) Average RMSE: 0.1423353 Average MAE: 0.2491681 #### > require(NTS); require(dlm) > da <- read.table("aa-3rv.txt") ## Example 7 > xt <- log(da[,2]) > m1 <- tvAR(xt,lags=NULL) par: -1.467084 -5.219068 > sqrt(exp(m1$par)) [1] 0.48020505 0.07356883 > m1k <- tvARFiSm(xt,lags=NULL,par=m1$par) > fil <- m1k$filter; sm <- m1k$smooth > plot(xt,xlab='time',ylab='log(rv)',type='l') > lines(fil$m,col="blue"); lines(sm$s,col="red") ### > adip <- scan("m-adip.txt") > mk <- tvAR(adip,lags=c(1:4)) par: -2.419878 -7.369506 -2.779889 -10.71095 -10.26296 -6.325027 > sqrt(exp(mk$par)) [1] 0.298215 0.025103 0.24909 0.00472 0.00591 0.04232 > mkk <- tvARFiSm(adip,lags=c(1:4),par=mk$par) > names(mkk) [1] "filter" "smooth" > filter <- mkk$filter; smooth <- mkk$smooth > names(filter) [1] "y" "mod" "m" "U.C" "D.C" "a" "U.R" "D.R" "f" > resi <- adip[5:1153]-filter$f > m <- filter$m; s <- smooth$s > range(m) [1] -1.265166 2.750762 > range(s) [1] -0.7726883 1.5985201 > tdx <- c(2:1154)/12+1919 > tdx <- tdx[-c(1:3)] > par(mfcol=c(2,1)) > plot(tdx,m[,1],xlab='year',ylab='coefs',ylim=c(-1.3,2.8),pch="*",cex=0.5) > lines(tdx,m[,2],lty=1) > plot(tdx,m[,3],xlab='year',ylab='coefs',ylim=c(-1.3,2.8),pch="*",cex=0.5) > lines(tdx,m[,4]) > lines(tdx,m[,5],lty=2) > plot(tdx,s[,1],xlab='year',ylab='coefs',ylim=c(-0.8,1.61),pch="*",cex=0.5) > lines(tdx,s[,2],lty=1) > plot(tdx,s[,3],xlab='year',ylab='coefs',ylim=c(-0.8,1.61),pch="*",cex=0.5) > lines(tdx,s[,4]) > lines(tdx,s[,5],lty=2) #### > mm <- rcAR(adip,lags=c(1:4)) # NTS package Sample mean: 0.1166405 Estimates: est se.est tratio X1 0.2216 0.0379 5.8515 X2 0.1653 0.0377 4.3801 X3 0.1811 0.0354 5.1228 X4 0.0441 0.0327 1.3476 -1.3136 0.1563 -8.4028 -1.5962 0.2426 -6.5794 -2.1289 0.3376 -6.3050 -2.7627 0.4419 -6.2512 -3.1882 0.1098 -29.0300 > names(mm) [1] "par" "se.est" "residuals" "sresiduals" > exp(mm$par[5:9]) 0.26885490 0.20265937 0.11896990 0.06312199 0.04124666 > Box.test(mm$sresiduals^2,lag=12,type='Ljung') Box-Ljung test data: mm$sresiduals^2 X-squared = 6.616, df = 12, p-value = 0.8819