程序代写 CA02C’, ‘#1F77B4’]

#importlib.reload(forecasting)
#model=forecasting.ses(y)
#model.fit()

Copyright By PowCoder代写 加微信 powcoder

import pandas as pd
import numpy as np
from scipy import stats
from scipy.optimize import minimize
from numba import jit
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings(“ignore”)
import statsmodels.api as sm

def exponentialsmoothing(y, alpha):
l=np.zeros(n)
for i in range(1,n):
l[i]=alpha*y[i-1]+(1-alpha)*(l[i-1])

class ses:
def __init__(self, y):
assert type(y)==pd.core.series.Series, ‘Input must be a pandas series’
self.time=y.index
self.y=y.values
self.n=len(self.y)

def smooth(self):
return exponentialsmoothing(self.y, self.alpha)

def mse(self, alpha):
x=exponentialsmoothing(self.y, alpha)
return np.mean(np.power(self.y-x,2))

def loss(self, alpha):
return (1/2)*np.log(self.mse(alpha))

def fit(self, start=0.1):
result=minimize(self.loss, start, bounds=((0,1),), tol=1e-6, method=’L-BFGS-B’)
self.alpha=float(result.x)
self.sigma2=self.mse(self.alpha)
self.se=np.sqrt(np.diag((1/self.n)*result.hess_inv.todense()))
self.fitted=self.smooth()
self.resid=self.y-self.fitted

def forecast(self, h):
return np.repeat(self.smooth()[-1], h)

def intervalforecast(self, h, level=.95):
fitted=self.smooth()
sigma2=self.sigma2
forecast=np.repeat(fitted[-1], h)
crit=stats.norm.ppf(1-(1-level)/2)
ci=np.zeros((h,2))
for i in range(h):
lvar+=(self.alpha**2)*(sigma2)
var=lvar+sigma2
ci[i,0], ci[i,1]= forecast[i]-crit*np.sqrt(var), forecast[i]+crit*np.sqrt(var)

def summary(self):
mse=self.sigma2
loglik=-(N/2)*(1+np.log(2*np.pi))-(N/2)*np.log(mse)
aic=-2*loglik+2*3
bic=-2*loglik+np.log(N)*3
print(‘ Simple exponential smoothing\n’)
print(‘ Smoothing parameter:’)
print(‘ alpha {0:.3f} ({1:.3f}) \n’.format(self.alpha, self.se[0]))
print(‘ In-sample fit:’)
print(‘ MSE {0:.3f}’.format(mse))
if loglik>0:
print(‘ Log-likelihood {0:.3f}’.format(loglik))
print(‘ Log-likelihood {0:.3f}’.format(loglik))
print(‘ AIC {0:.3f}’.format(aic))
print(‘ BIC {0:.3f}’.format(bic))
self.aic=aic
self.bic=bic

def holtsmooth(y, alpha, beta, phi):
l=np.zeros(n)
b=np.zeros(n)
for i in range(1,n):
l[i]=alpha*y[i-1]+(1-alpha)*(l[i-1]+phi*b[i-1])
b[i]=beta*(l[i]-l[i-1])+(1-beta)*(phi*b[i-1])
return l, b

class holt:
def __init__(self, y, damped=False):
assert type(y)==pd.core.series.Series, ‘Input must be a pandas series’
self.time=y.index
self.y=y.values
self.n=len(self.y)
self.damped=damped

def smooth(self):
if self.damped:
l, b =holtsmooth(self.y, self.alpha, self.beta, self.phi)
l, b =holtsmooth(self.y, self.alpha, self.beta, 1.0)
return l+b

def mse(self, theta):
if self.damped:
l, b =holtsmooth(self.y, theta[0], theta[1], theta[2])
l, b= holtsmooth(self.y, theta[0], theta[1], 1.0)
return np.mean(np.power(self.y-l-b,2))

def loss(self, theta):
mse=self.mse(theta)
return (1/2)*np.log(mse)

def fit(self, start=np.array([0.3,0.05])):
if self.damped==False:
result=minimize(self.loss, start, bounds=((0,1),(0,1)), tol=1e-6, method=’L-BFGS-B’)
if len(start)==2:
start=np.array([0.1,0.1, 0.98])
result=minimize(self.loss, start, bounds=((0,1),(0,1), (0,1)), tol=1e-6, method=’L-BFGS-B’)
self.phi=float(result.x[2])
self.alpha=float(result.x[0])
self.beta=float(result.x[1])
self.params=result.x
self.sigma2=self.mse(self.params)
self.se=np.sqrt(np.diag((1/self.n)*result.hess_inv.todense()))
self.fitted=self.smooth()
self.resid=self.y-self.fitted

def forecast(self, h):
if self.damped:
l, b= holtsmooth(self.y, self.alpha, self.beta, self.phi)
prediction=np.zeros(h)
yhat=l[-1]
for i in range(h):
yhat+=np.power(self.phi, i+1)*b[-1]
prediction[i]=yhat
return prediction
l, b= holtsmooth(self.y, self.alpha, self.beta, 1.0)
return l[-1]+(1+np.arange(h))*b[-1]

def forecastvariance(self, h):
sigma2=self.sigma2
result=np.zeros(h)
var=sigma2
for i in range(h):
result[i]=var
if self.damped:
aux+=(self.phi**(1+i))*(self.beta)
aux+=self.beta
var+=np.power(self.alpha*aux,2)*sigma2
return result

def intervalforecast(self, h, level=.95):
sigma2=self.sigma2
crit=stats.norm.ppf(1-(1-level)/2)
forecast=np.reshape(self.forecast(h), (h,1))
var=np.reshape(self.forecastvariance(h), (h,1))
return np.hstack((forecast-crit*np.sqrt(var), forecast+crit*np.sqrt(var)))

def summary(self):
mse=self.sigma2
loglik=-(N/2)*(1+np.log(2*np.pi))-(N/2)*np.log(mse)
if self.damped:
aic=-2*loglik+2*4
bic=-2*loglik+np.log(N)*4
aic=-2*loglik+2*3
bic=-2*loglik+np.log(N)*3
if self.damped:
print(‘ Holt exponential smoothing (damped trend)\n’)
print(‘ Holt (trend corrected) exponential smoothing\n’)
print(‘ Smoothing parameters: ‘)
print(‘ alpha (level) {0:.3f} ({1:.3f})’.format(self.alpha, self.se[0]))
print(‘ beta (trend) {0:.3f} ({1:.3f})’.format(self.beta, self.se[1]))
if self.damped:
print(‘ phi (damping) {0:.3f} ({1:.3f})’.format(self.phi, self.se[2]))
print(‘ \n In-sample fit:’)
print(‘ MSE {0:.3f}’.format(mse))
if loglik>0:
print(‘ Log-likelihood {0:.3f}’.format(loglik))
print(‘ Log-likelihood {0:.3f}’.format(loglik))
print(‘ AIC {0:.3f}’.format(aic))
print(‘ BIC {0:.3f}’.format(bic))
self.aic=aic
self.bic=bic

def ahw(y, alpha, beta, delta, phi, m):
l=np.zeros(n)
b=np.zeros(n)
S=np.zeros(n)
l[:m]=np.mean(y[:m])
S[:m]=y[:m]-l[0]
for i in range(m,n):
l[i]=alpha*(y[i-1]-S[i-1])+(1-alpha)*(l[i-1]+phi*b[i-1])
b[i]=beta*(l[i]-l[i-1])+(1-beta)*(phi*b[i-1])
S[i]=delta*(y[i-m]-l[i-m+1])+(1-delta)*S[i-m]
return l, b, S

def mhw(y, alpha, beta, delta, phi, m):
l=np.zeros(n)
b=np.zeros(n)
S=np.zeros(n)
l[:m]=np.mean(y[:m])
S[:m]=y[:m]/l[0]
for i in range(m,n):
l[i]=alpha*(y[i-1]/S[i-1])+(1-alpha)*(l[i-1]+phi*b[i-1])
b[i]=beta*(l[i]-l[i-1])+(1-beta)*(phi*b[i-1])
S[i]=delta*(y[i-m]/l[i-m+1])+(1-delta)*S[i-m]
return l, b, S

class holtwinters:
def __init__(self, y, additive=True, damped=False, m=12):
assert type(y)==pd.core.series.Series, ‘Input must be a pandas series’
self.time=y.index
self.y=y.values
self.n=len(self.y)
self.additive=additive
self.damped=damped

def smooth(self):
if self.damped:
if self.additive:
l, b, S =ahw(self.y, self.alpha, self.beta, self.delta, self.phi, self.m)
l, b, S =mhw(self.y, self.alpha, self.beta, self.delta, self.phi, self.m)
if self.additive:
l, b, S =ahw(self.y, self.alpha, self.beta, self.delta, 1.0, self.m)
l, b, S =mhw(self.y, self.alpha, self.beta, self.delta, 1.0, self.m)
if self.additive:
return l+b+S
return (l+b)*S

def mse(self, theta):
if self.damped:
if self.additive:
l, b, S =ahw(self.y, theta[0], theta[1], theta[2], theta[3], self.m)
l, b, S =mhw(self.y, theta[0], theta[1], theta[2], theta[3], self.m)
if self.additive:
l, b, S =ahw(self.y, theta[0], theta[1], theta[2], 1.0, self.m)
l, b, S =mhw(self.y, theta[0], theta[1], theta[2], 1.0, self.m)
if self.additive:
return np.mean(np.power(self.y-l-b-S,2)[self.m:])
return np.mean(np.power(self.y-(l+b)*S,2)[self.m:])

def loss(self, theta):
mse=self.mse(theta)
return (1/2)*np.log(mse)

def fit(self, start=np.array([0.1,0.1, 0.05])):
if self.damped==False:
result=minimize(self.loss, start, bounds=((0,1),(0,1),(0,1)), tol=1e-6, method=’L-BFGS-B’)
if len(start)==3:
start=np.array([0.1, 0.1, 0.05, 0.98])
result=minimize(self.loss, start, bounds=((0,1),(0,1),(0,1),(0,1)), tol=1e-6, method=’L-BFGS-B’)
self.phi=float(result.x[3])
self.alpha=float(result.x[0])
self.beta=float(result.x[1])
self.delta=float(result.x[2])
self.params=result.x
self.sigma2=self.mse(self.params)*((len(self.y)-self.m)/(len(self.y)-self.m-len(self.params)))
self.se=np.sqrt(np.diag((1/self.n)*result.hess_inv.todense()))
self.fitted=self.smooth()
self.resid=self.y-self.fitted

def forecast(self, h):
if self.damped:
if self.additive:
l, b, S =ahw(self.y, self.alpha, self.beta, self.delta, self.phi, self.m)

l, b, S =mhw(self.y, self.alpha, self.beta, self.delta, self.phi, self.m)
phi=self.phi
if self.additive:
l, b, S =ahw(self.y, self.alpha, self.beta, self.delta, 1.0, self.m)
l, b, S =mhw(self.y, self.alpha, self.beta, self.delta, 1.0, self.m)
yhat=l[-1]
S=S[-self.m:]
prediction=np.zeros(h)
for i in range(h):
yhat+=np.power(phi, i+1)*b
if self.additive:
prediction[i]=yhat+S[i%self.m]
prediction[i]=yhat*S[i%self.m]
return prediction

def forecastvariance(self, h):
if self.additive==False:
assert h<=self.m, 'Forecast variance not available for h>m in the multiplicative model’
sigma2=self.sigma2
result=np.zeros(h)
var=sigma2
for i in range(h):
result[i]=var
if self.damped:
aux+=self.alpha*(self.phi**(1+i))*(self.beta)
aux+=self.alpha*self.beta
if (i>0) and (i%self.m==0):
var+=np.power(aux+self.delta*(1-self.alpha),2)*sigma2
var+=np.power(aux,2)*sigma2
return result

def intervalforecast(self, h, level=.95):
sigma2=self.sigma2
crit=stats.norm.ppf(1-(1-level)/2)
forecast=np.reshape(self.forecast(h), (h,1))
var=np.reshape(self.forecastvariance(h), (h,1))
return np.hstack((forecast-crit*np.sqrt(var), forecast+crit*np.sqrt(var)))

def summary(self):
mse=self.sigma2
loglik=-(N/2)*(1+np.log(2*np.pi))-(N/2)*np.log(mse)
if self.damped:
aic=-2*loglik+2*5
bic=-2*loglik+np.log(N)*5
aic=-2*loglik+2*4
bic=-2*loglik+np.log(N)*4
if self.damped:
if self.additive:
print(‘ Additive Holt-winters exponential smoothing (damped trend)\n’)
print(‘ Multiplicative Holt-winters exponential smoothing (damped trend)\n’)
if self.additive:
print(‘ Additive Holt-winters exponential smoothing\n’)
print(‘ Multiplicative Holt-winters exponential smoothing\n’)
print(‘ Smoothing parameters:’)
print(‘ alpha (level) {0:.3f} ({1:.3f})’.format(self.alpha, self.se[0]))
print(‘ beta (trend) {0:.3f} ({1:.3f})’.format(self.beta, self.se[1]))
print(‘ delta (seasonal) {0:.3f} ({1:.3f})’.format(self.delta, self.se[2]))
if self.damped:
print(‘ phi (damping) {0:.3f} ({1:.3f})’.format(self.phi, self.se[3]))
print(‘ \n In-sample fit:’)
print(‘ MSE {0:.3f}’.format(mse))
print(‘ RMSE {0:.3f}’.format(np.sqrt(mse)))
if loglik>0:
print(‘ Log-likelihood {0:.3f}’.format(loglik))
print(‘ Log-likelihood {0:.3f}’.format(loglik))
print(‘ AIC {0:.3f}’.format(aic))
print(‘ BIC {0:.3f}’.format(bic))
self.aic=aic
self.bic=bic

def histogram(series):
fig, ax= plt.subplots(figsize=(8,5))
sns.distplot(series, ax=ax, hist_kws={‘alpha’: 0.8, ‘edgecolor’:’black’, ‘color’: ‘#1F77B4’},
kde_kws={‘color’: ‘black’, ‘alpha’: 0.7})
sns.despine()
return fig, ax

def qq_plot(residuals):
fig, ax = plt.subplots(figsize=(8,5))
pp = sm.ProbPlot(residuals, fit=True)
qq = pp.qqplot(color=’#1F77B4′, alpha=0.8, ax=ax)
a=ax.get_xlim()[0]
b=ax.get_xlim()[1]
ax.plot([a,b],[a,b], color=’black’, alpha=0.6)
ax.set_xlim(a,b)
ax.set_title(‘Normal Q-Q plot for the residuals’, fontsize=12)
return fig, ax

def plot_components_x13(results, label=”):
colours=[‘#D62728’, ‘#FF7F0E’, ‘#2CA02C’, ‘#1F77B4′]
fig, ax = plt.subplots(2,2, figsize=(12,8))
ax[0,0].plot(results.observed, color=colours[0], alpha=0.95)
ax[0,0].set(ylabel=label, title=’Observed’)
ax[0,1].plot(results.trend, color=colours[1], alpha=0.95)
ax[0,1].set(title=’Trend’)
ax[1,0].plot(results.observed/results.seasadj, color=colours[2], alpha=0.95)
ax[1,0].set(ylabel=label, title=’Seasonal’)
ax[1,1].plot(results.irregular, color=colours[3], alpha=0.95)
ax[1,1].set(title=’Irregular’)
fig.suptitle(‘Time series decomposition (X-13 ARIMA-SEATS)’, fontsize=13.5)
fig.tight_layout()
fig.subplots_adjust(top=0.9)
return fig, ax

def fanchart(y, forecast, intv1, intv2, intv3):
assert type(y)==pd.core.series.Series, ‘The time series must be a pandas series’
assert type(forecast)==pd.core.series.Series, ‘The forecast must be a pandas series’

last=y.iloc[-1:]
extended=last.append(forecast)

with sns.axes_style(‘ticks’):
fig, ax= plt.subplots(figsize=(8,5))
y.plot(color=’#D62728′)
extended.plot(color=’black’, alpha=0.4, label=’Point forecast’)
ax.fill_between(extended.index, last.append(intv3.iloc[:,0]), last.append(intv3.iloc[:,1]), facecolor=’#FAB8A4′, lw=0)
ax.fill_between(extended.index, last.append(intv2.iloc[:,0]), last.append(intv2.iloc[:,1]), facecolor=’#F58671′, lw=0)
ax.fill_between(extended.index, last.append(intv1.iloc[:,0]), last.append(intv1.iloc[:,1]), facecolor=’#F15749′, lw=0)
hold = ax.get_ylim()
ax.fill_betweenx(ax.get_ylim(), extended.index[0], extended.index[-1], facecolor=’grey’, alpha=0.15)
ax.set_ylim(hold)
return fig, ax

def sarimaforecast(y, model, h=1, m=12):

x=np.zeros((n+h))

forecast_diff=model.forecast(steps=h)[0]

for i in range(h):
x[n+i]=x[n+i-1]+x[n+i-m]-x[n+i-m-1]+forecast_diff[i]

return x[-h:]

程序代写 CS代考 加微信: powcoder QQ: 1823890830 Email: powcoder@163.com