From 0412a23ecf0c4353d22025d4526fa391d7789128 Mon Sep 17 00:00:00 2001 From: gjunjie Date: Thu, 2 Apr 2026 21:24:10 -0400 Subject: [PATCH 1/8] Add FIAPARCH volatility model Implement the Fractionally Integrated Asymmetric Power ARCH model, extending FIGARCH with an asymmetry parameter (gamma) and a flexible power parameter (delta). Includes Python and Cython recursions, FIAPARCHUpdater for ARCH-in-mean support, simulation and forecasting, arch_model integration via vol="fiaparch", and comprehensive tests. Made-with: Cursor --- arch/tests/univariate/test_mean.py | 4 + arch/tests/univariate/test_volatility.py | 256 ++++++++++++ arch/univariate/__init__.py | 2 + arch/univariate/mean.py | 7 +- arch/univariate/recursions.pyi | 26 ++ arch/univariate/recursions.pyx | 155 +++++++ arch/univariate/recursions_python.py | 144 +++++++ arch/univariate/volatility.py | 496 +++++++++++++++++++++++ 8 files changed, 1089 insertions(+), 1 deletion(-) diff --git a/arch/tests/univariate/test_mean.py b/arch/tests/univariate/test_mean.py index 46024bcaed..81668b7c48 100644 --- a/arch/tests/univariate/test_mean.py +++ b/arch/tests/univariate/test_mean.py @@ -46,6 +46,7 @@ APARCH, ARCH, EGARCH, + FIAPARCH, FIGARCH, GARCH, HARCH, @@ -685,6 +686,9 @@ def test_arch_model(self): am = arch_model(self.y, vol="figarch") assert isinstance(am.volatility, FIGARCH) + am = arch_model(self.y, vol="fiaparch") + assert isinstance(am.volatility, FIAPARCH) + am = arch_model(self.y, vol="aparch") assert isinstance(am.volatility, APARCH) diff --git a/arch/tests/univariate/test_volatility.py b/arch/tests/univariate/test_volatility.py index 1807fed1c4..6c10f95ebe 100644 --- a/arch/tests/univariate/test_volatility.py +++ b/arch/tests/univariate/test_volatility.py @@ -20,6 +20,7 @@ APARCH, ARCH, EGARCH, + FIAPARCH, FIGARCH, GARCH, HARCH, @@ -1833,3 +1834,258 @@ def test_figarch_weights(): lam_cy = rec.figarch_weights(params, 1, 1, 1000) assert_allclose(lam_py, lam_nb) assert_allclose(lam_py, lam_cy) + + +def test_fiaparch(setup): + trunc_lag = 750 + fiaparch = FIAPARCH(truncation=trunc_lag) + + sv = fiaparch.starting_values(setup.resids) + assert_equal(sv.shape[0], fiaparch.num_params) + + bounds = fiaparch.bounds(setup.resids) + assert len(bounds) == fiaparch.num_params + assert bounds[0][0] == 0.0 # omega lower + assert bounds[1] == (0.0, 0.5) # phi + assert bounds[3][0] == 0.0 # beta lower + assert bounds[4] == (-0.9997, 0.9997) # gamma + assert bounds[5] == (0.05, 4.0) # delta + + backcast = fiaparch.backcast(setup.resids) + var_bounds = fiaparch.variance_bounds(setup.resids) + # omega, phi, d, beta, gamma, delta + parameters = np.array([1.0, 0.2, 0.4, 0.2, -0.3, 1.5]) + fiaparch.compute_variance( + parameters, setup.resids, setup.sigma2, backcast, var_bounds + ) + + cond_var_direct = np.zeros_like(setup.sigma2) + sigma_delta_direct = np.zeros_like(setup.sigma2) + fig_params = parameters[:4] # omega, phi, d, beta + gamma = parameters[4] + delta = parameters[5] + recpy.fiaparch_recursion_python( + fig_params, + setup.resids, + np.abs(setup.resids), + cond_var_direct, + sigma_delta_direct, + 1, + 1, + setup.t, + trunc_lag, + backcast, + var_bounds, + gamma, + delta, + ) + assert_allclose(setup.sigma2, cond_var_direct) + + a, b = fiaparch.constraints() + # Full model: omega, phi, d, beta, gamma, delta => 6 params + # FIGARCH constraints: 7 rows for (omega, phi, d, beta) + # gamma: 2 rows (> -0.9997, < 0.9997) + # delta: 2 rows (> 0.05, < 4) + assert a.shape == (11, 6) + assert b.shape == (11,) + # omega > 0 + assert a[0, 0] == 1 + # gamma constraints + assert a[7, 4] == 1.0 # gamma > -0.9997 + assert a[8, 4] == -1.0 # gamma < 0.9997 + # delta constraints + assert a[9, 5] == 1.0 # delta > 0.05 + assert a[10, 5] == -1.0 # delta < 4.0 + + state = setup.rng.get_state() + rng = Normal(seed=RandomState()) + rng.generator.set_state(state) + sim_data = fiaparch.simulate(parameters, setup.t, rng.simulate([])) + setup.rng.set_state(state) + lam = rec.figarch_weights(fig_params[1:], 1, 1, trunc_lag) + lam_rev = lam[::-1] + omega_tilde = fig_params[0] / (1 - fig_params[-1]) + persistence = np.sum(lam) + initial_value = omega_tilde + if persistence < 1: + initial_value /= 1 - persistence + e = setup.rng.standard_normal(trunc_lag + setup.t + 500) + sigma2 = np.zeros(trunc_lag + setup.t + 500) + data = np.zeros(trunc_lag + setup.t + 500) + sigma_delta_sim = np.zeros(trunc_lag + setup.t + 500) + sigma_delta_sim[:trunc_lag] = initial_value + sigma2[:trunc_lag] = initial_value ** (2.0 / delta) + data[:trunc_lag] = np.sqrt(sigma2[:trunc_lag]) * e[:trunc_lag] + + for t in range(trunc_lag, trunc_lag + setup.t + 500): + fshocks = np.empty(trunc_lag) + for i in range(trunc_lag): + shock = abs(data[t - 1 - i]) - gamma * data[t - 1 - i] + fshocks[i] = shock**delta + sigma_delta_sim[t] = omega_tilde + lam_rev.dot(fshocks[::-1]) + sigma2[t] = sigma_delta_sim[t] ** (2.0 / delta) + data[t] = e[t] * np.sqrt(sigma2[t]) + data = data[trunc_lag + 500:] + sigma2 = sigma2[trunc_lag + 500:] + assert_almost_equal(sigma2 / sim_data[1], np.ones_like(sigma2)) + assert_almost_equal(data / sim_data[0], np.ones_like(data)) + + names = fiaparch.parameter_names() + names_target = ["omega", "phi", "d", "beta", "gamma", "delta"] + assert_equal(names, names_target) + + assert isinstance(fiaparch.__str__(), str) + txt = fiaparch.__repr__() + assert str(hex(id(fiaparch))) in txt + + assert_equal(fiaparch.name, "FIAPARCH") + assert_equal(fiaparch.num_params, 6) + assert_equal(fiaparch.truncation, trunc_lag) + + +def test_fiaparch_updater_matches_recursion(setup): + fiaparch = FIAPARCH(truncation=300) + parameters = np.array([0.1, 0.2, 0.4, 0.2, 0.8, 1.3]) + backcast = fiaparch.backcast(setup.resids) + var_bounds = fiaparch.variance_bounds(setup.resids) + + sigma2 = np.zeros_like(setup.sigma2) + fiaparch.compute_variance(parameters, setup.resids, sigma2, backcast, var_bounds) + + updater = fiaparch.volatility_updater + updater.initialize_update(parameters, backcast, setup.t) + sigma2_updater = np.zeros_like(setup.sigma2) + for t in range(setup.t): + updater._update_tester(t, parameters, setup.resids, sigma2_updater, var_bounds) + + assert_allclose(sigma2_updater, sigma2) + + +def test_fiaparch_no_phi(setup): + trunc_lag = 333 + fiaparch = FIAPARCH(p=0, truncation=trunc_lag) + + sv = fiaparch.starting_values(setup.resids) + assert_equal(sv.shape[0], fiaparch.num_params) + + bounds = fiaparch.bounds(setup.resids) + # omega, d, beta, gamma, delta + assert len(bounds) == fiaparch.num_params + + a, b = fiaparch.constraints() + # No phi => FIGARCH block shrinks (5 rows), gamma (2 rows), delta (2 rows) = 9 + assert a.shape[1] == 5 + assert a.shape[0] == 9 + + +def test_fiaparch_no_beta(setup): + fiaparch = FIAPARCH(q=0) + + sv = fiaparch.starting_values(setup.resids) + assert_equal(sv.shape[0], fiaparch.num_params) + + bounds = fiaparch.bounds(setup.resids) + # omega, phi, d, gamma, delta + assert len(bounds) == fiaparch.num_params + + a, b = fiaparch.constraints() + # No beta => FIGARCH block shrinks (5 rows), gamma (2 rows), delta (2 rows) = 9 + assert a.shape[1] == 5 + assert a.shape[0] == 9 + + +def test_fiaparch_no_asym(setup): + fiaparch = FIAPARCH(o=0) + + sv = fiaparch.starting_values(setup.resids) + assert_equal(sv.shape[0], fiaparch.num_params) + + bounds = fiaparch.bounds(setup.resids) + # omega, phi, d, beta, delta (no gamma) + assert len(bounds) == fiaparch.num_params + assert fiaparch.num_params == 5 + + names = fiaparch.parameter_names() + assert "gamma" not in names + + assert_equal(fiaparch.name, "FI Power ARCH") + + +def test_fiaparch_fixed_delta(setup): + delta_val = 1.5 + fiaparch = FIAPARCH(delta=delta_val) + + sv = fiaparch.starting_values(setup.resids) + assert_equal(sv.shape[0], fiaparch.num_params) + assert fiaparch.num_params == 5 # omega, phi, d, beta, gamma + + bounds = fiaparch.bounds(setup.resids) + assert len(bounds) == 5 + # No delta bounds + for b in bounds: + assert b != (0.05, 4.0) + + names = fiaparch.parameter_names() + assert "delta" not in names + assert_equal(fiaparch.delta, delta_val) + + a, b = fiaparch.constraints() + # FIGARCH (7 rows) + gamma (2 rows) + no delta => 9 + assert a.shape == (9, 5) + + backcast = fiaparch.backcast(setup.resids) + var_bounds = fiaparch.variance_bounds(setup.resids) + parameters = np.array([1.0, 0.2, 0.4, 0.2, -0.3]) + fiaparch.compute_variance( + parameters, setup.resids, setup.sigma2, backcast, var_bounds + ) + + cond_var_direct = np.zeros_like(setup.sigma2) + sigma_delta_direct = np.zeros_like(setup.sigma2) + fig_params = parameters[:4] + gamma = parameters[4] + recpy.fiaparch_recursion_python( + fig_params, + setup.resids, + np.abs(setup.resids), + cond_var_direct, + sigma_delta_direct, + 1, + 1, + setup.t, + 1000, + backcast, + var_bounds, + gamma, + delta_val, + ) + assert_allclose(setup.sigma2, cond_var_direct) + + +def test_fiaparch_errors(setup): + with pytest.raises(ValueError, match=r"truncation must be a positive integer"): + FIAPARCH(truncation=-1) + with pytest.raises(ValueError, match=r"p and q must be either 0 or 1"): + FIAPARCH(p=2) + with pytest.raises(ValueError, match=r"p and q must be either 0 or 1"): + FIAPARCH(q=-1) + with pytest.raises(ValueError, match=r"o must be either 0 or 1"): + FIAPARCH(o=2) + with pytest.raises(ValueError, match=r"delta must be between 0.05 and 4"): + FIAPARCH(delta=0.0) + with pytest.raises(ValueError, match=r"delta must be between 0.05 and 4"): + FIAPARCH(delta=4.1) + with pytest.raises(TypeError, match=r"delta must be convertible"): + FIAPARCH(delta="a") + + +@pytest.mark.parametrize("p", [0, 1]) +@pytest.mark.parametrize("o", [0, 1]) +@pytest.mark.parametrize("q", [0, 1]) +def test_fiaparch_str(setup, p, o, q): + fiaparch = FIAPARCH(p=p, o=o, q=q) + s = str(fiaparch).lower() + assert "arch" in s + assert f"q: {q}" in s + assert f"p: {p}" in s + assert f"o: {o}" in s diff --git a/arch/univariate/__init__.py b/arch/univariate/__init__.py index 0f30e5980c..0e76085b85 100644 --- a/arch/univariate/__init__.py +++ b/arch/univariate/__init__.py @@ -21,6 +21,7 @@ APARCH, ARCH, EGARCH, + FIAPARCH, FIGARCH, GARCH, HARCH, @@ -42,6 +43,7 @@ "ARCH", "ARX", "EGARCH", + "FIAPARCH", "FIGARCH", "GARCH", "HARCH", diff --git a/arch/univariate/mean.py b/arch/univariate/mean.py index 8748666236..9c0567aea7 100644 --- a/arch/univariate/mean.py +++ b/arch/univariate/mean.py @@ -63,6 +63,7 @@ APARCH, ARCH, EGARCH, + FIAPARCH, FIGARCH, GARCH, HARCH, @@ -1994,6 +1995,7 @@ def arch_model( known_vol = ( "arch", "figarch", + "fiaparch", "aparch", "garch", "harch", @@ -2035,7 +2037,7 @@ def arch_model( else: # mean == "zero" am = ZeroMean(y, hold_back=hold_back, rescale=rescale) - if vol in ("arch", "garch", "figarch", "egarch", "aparch") and not isinstance( + if vol in ("arch", "garch", "figarch", "fiaparch", "egarch", "aparch") and not isinstance( p, int ): raise TypeError( @@ -2056,6 +2058,9 @@ def arch_model( elif vol_model == "egarch": assert isinstance(p, int) v = EGARCH(p=p, o=o, q=q) + elif vol_model == "fiaparch": + assert isinstance(p, int) + v = FIAPARCH(p=p, o=o, q=q) elif vol_model == "aparch": assert isinstance(p, int) v = APARCH(p=p, o=o, q=q) diff --git a/arch/univariate/recursions.pyi b/arch/univariate/recursions.pyi index f04b5d99c9..7337740a3b 100644 --- a/arch/univariate/recursions.pyi +++ b/arch/univariate/recursions.pyi @@ -69,6 +69,21 @@ def figarch_recursion( backcast: float, var_bounds: Float64Array2D, ) -> Float64Array: ... +def fiaparch_recursion( + parameters: Float64Array1D, + resids: Float64Array1D, + abs_resids: Float64Array1D, + sigma2: Float64Array1D, + sigma_delta: Float64Array1D, + p: int, + q: int, + nobs: int, + trunc_lag: int, + backcast: float, + var_bounds: Float64Array2D, + gamma: float, + delta: float, +) -> Float64Array: ... def aparch_recursion( parameters: Float64Array1D, resids: Float64Array1D, @@ -129,6 +144,17 @@ class EWMAUpdater(VolatilityUpdater): class FIGARCHUpdater(VolatilityUpdater): def __init__(self, p: int, q: int, power: float, truncation: int) -> None: ... +class FIAPARCHUpdater(VolatilityUpdater): + def __init__( + self, + p: int, + q: int, + o: int, + truncation: int, + est_delta: bool, + delta: float, + ) -> None: ... + class HARCHUpdater(VolatilityUpdater): def __init__(self, lags: Int32Array) -> None: ... diff --git a/arch/univariate/recursions.pyx b/arch/univariate/recursions.pyx index 702c71414e..f1d3e536a0 100644 --- a/arch/univariate/recursions.pyx +++ b/arch/univariate/recursions.pyx @@ -526,6 +526,42 @@ def figarch_recursion(const double[::1] parameters, return np.asarray(sigma2) +def fiaparch_recursion(const double[::1] parameters, + const double[::1] resids, + const double[::1] abs_resids, + double[::1] sigma2, + double[::1] sigma_delta, + int p, + int q, + int nobs, + int trunc_lag, + double backcast, + double[:, ::1] var_bounds, + double gamma, + double delta): + cdef Py_ssize_t t, i + cdef double bc_weight, omega, beta, omega_tilde, shock + cdef double [::1] lam + + omega = parameters[0] + beta = parameters[1 + p + q] if q else 0.0 + omega_tilde = omega / (1 - beta) + lam = _figarch_weights(parameters[1:], p, q, trunc_lag) + for t in range(nobs): + bc_weight = 0.0 + for i in range(t, trunc_lag): + bc_weight += lam[i] + sigma_delta[t] = omega_tilde + bc_weight * backcast + for i in range(min(t, trunc_lag)): + shock = abs_resids[t - i - 1] - gamma * resids[t - i - 1] + sigma_delta[t] += lam[i] * (shock ** delta) + sigma2[t] = sigma_delta[t] ** (2.0 / delta) + bounds_check(&sigma2[t], &var_bounds[t, 0]) + sigma_delta[t] = sigma2[t] ** (delta / 2.0) + + return np.asarray(sigma2) + + def aparch_recursion(const double[::1] parameters, const double[::1] resids, const double[::1] abs_resids, @@ -985,6 +1021,125 @@ cdef class FIGARCHUpdater(VolatilityUpdater): bounds_check(&sigma2[t], &var_bounds[t, 0]) +cdef class FIAPARCHUpdater(VolatilityUpdater): + cdef: + int p, q, o, truncation, est_delta + double delta + double[::1] lam + double[::1] _resids + double[::1] _abs_resids + double[::1] _sigma_delta + double backcast + + def __init__( + self, + int p, + int q, + int o, + int truncation, + bint est_delta, + double delta + ): + self.p = p + self.q = q + self.o = o + self.truncation = truncation + self.est_delta = est_delta + self.delta = delta + self.lam = np.empty(truncation) + self._resids = np.empty(0) + self._abs_resids = np.empty(0) + self._sigma_delta = np.empty(0) + + def __setstate__(self, state): + cdef Py_ssize_t i + cdef double[::1] temp + self.backcast = state[0] + self.delta = state[1] + temp = state[2] + assert self.lam.shape[0] == temp.shape[0] + for i in range(self.truncation): + self.lam[i] = temp[i] + temp = state[3] + self._resids = np.empty(temp.shape[0]) + for i in range(temp.shape[0]): + self._resids[i] = temp[i] + temp = state[4] + self._abs_resids = np.empty(temp.shape[0]) + for i in range(temp.shape[0]): + self._abs_resids[i] = temp[i] + temp = state[5] + self._sigma_delta = np.empty(temp.shape[0]) + for i in range(temp.shape[0]): + self._sigma_delta[i] = temp[i] + + def __reduce__(self): + return ( + FIAPARCHUpdater, + ( + self.p, + self.q, + self.o, + self.truncation, + bool(self.est_delta), + self.delta, + ), + ( + self.backcast, + self.delta, + np.asarray(self.lam), + np.asarray(self._resids), + np.asarray(self._abs_resids), + np.asarray(self._sigma_delta), + ) + ) + + def initialize_update( + self, + const double[::1] parameters, + object backcast, + Py_ssize_t nobs + ): + self.lam = _figarch_weights(parameters[1:], self.p, self.q, self.truncation) + self.backcast = backcast + if self._resids.shape[0] < nobs: + self._resids = np.empty(nobs) + self._abs_resids = np.empty(nobs) + self._sigma_delta = np.empty(nobs) + + cdef void update(self, + Py_ssize_t t, + const double[::1] parameters, + const double[::1] resids, + double[::1] sigma2, + const double[:, ::1] var_bounds + ): + cdef Py_ssize_t i + cdef double bc_weight, omega, beta, omega_tilde, shock, gamma, delta + cdef int p = self.p, q = self.q, o = self.o, trunc_lag = self.truncation + + omega = parameters[0] + beta = parameters[1 + p + q] if q else 0.0 + omega_tilde = omega / (1 - beta) + gamma = parameters[2 + p + q] if o else 0.0 + delta = parameters[parameters.shape[0] - 1] if self.est_delta else self.delta + + if t > 0: + self._resids[t-1] = resids[t-1] + self._abs_resids[t-1] = fabs(resids[t-1]) + + bc_weight = 0.0 + for i in range(t, trunc_lag): + bc_weight += self.lam[i] + self._sigma_delta[t] = omega_tilde + bc_weight * self.backcast + for i in range(min(t, trunc_lag)): + shock = self._abs_resids[t - i - 1] - gamma * self._resids[t - i - 1] + self._sigma_delta[t] += self.lam[i] * (shock ** delta) + sigma2[t] = self._sigma_delta[t] ** (2.0 / delta) + bounds_check(&sigma2[t], &var_bounds[t, 0]) + self._sigma_delta[t] = sigma2[t] ** (delta / 2.0) + + cdef class RiskMetrics2006Updater(VolatilityUpdater): cdef: int kmax diff --git a/arch/univariate/recursions_python.py b/arch/univariate/recursions_python.py index d181f35fbd..5e7c90ec1e 100644 --- a/arch/univariate/recursions_python.py +++ b/arch/univariate/recursions_python.py @@ -567,6 +567,81 @@ def figarch_recursion_python( figarch_recursion = jit(figarch_recursion_python, nopython=True) +def fiaparch_recursion_python( + parameters: Float64Array1D, + resids: Float64Array1D, + abs_resids: Float64Array1D, + sigma2: Float64Array1D, + sigma_delta: Float64Array1D, + p: int, + q: int, + nobs: int, + trunc_lag: int, + backcast: float, + var_bounds: Float64Array2D, + gamma: float, + delta: float, +) -> Float64Array: + """ + Parameters + ---------- + parameters : ndarray + Model parameters of the form (omega, phi, d, beta) where omega is the + intercept, d is the fractional integration coefficient and phi and beta + are parameters of the volatility process. + resids : ndarray + Residuals to use in the recursion. + abs_resids : ndarray + Absolute value of residuals. + sigma2 : ndarray + Conditional variances with same shape as resids + sigma_delta : ndarray + Conditional variance to the power delta with same shape as resids + p : int + 0 or 1 to indicate whether the model contains phi + q : int + 0 or 1 to indicate whether the model contains beta + nobs : int + Length of resids + trunc_lag : int + Truncation lag for the ARCH approximations + backcast : float + Value to use when initializing the recursion (in sigma^delta scale) + var_bounds : ndarray + nobs by 2-element array of upper and lower bounds for conditional + variances for each time period + gamma : float + Asymmetry parameter + delta : float + Power parameter + + Returns + ------- + sigma2 : ndarray + Conditional variances + """ + omega = parameters[0] + beta = parameters[1 + p + q] if q else 0.0 + omega_tilde = omega / (1 - beta) + lam = figarch_weights(parameters[1:], p, q, trunc_lag) + for t in range(nobs): + bc_weight = 0.0 + for i in range(t, trunc_lag): + bc_weight += lam[i] + sigma_delta[t] = omega_tilde + bc_weight * backcast + for i in range(min(t, trunc_lag)): + shock = abs_resids[t - i - 1] - gamma * resids[t - i - 1] + sigma_delta[t] += lam[i] * (shock**delta) + sigma2[t] = sigma_delta[t] ** (2.0 / delta) + sigma2[t] = bounds_check(sigma2[t], var_bounds[t]) + sigma_delta[t] = sigma2[t] ** (delta / 2.0) + + return sigma2 + + +fiaparch_recursion = jit(fiaparch_recursion_python, nopython=True) + + def aparch_recursion_python( parameters: Float64Array1D, resids: Float64Array1D, @@ -987,6 +1062,75 @@ def update( sigma2[t] = bounds_check(sigma2[t], var_bounds[t]) +class FIAPARCHUpdater(VolatilityUpdater, metaclass=AbstractDocStringInheritor): + def __init__( + self, + p: int, + q: int, + o: int, + truncation: int, + est_delta: bool, + delta: float, + ) -> None: + self.p = p + self.q = q + self.o = o + self.truncation = truncation + self.est_delta = est_delta + self.delta = delta + self.lam = np.empty(0) + self.resids = np.empty(0) + self.abs_resids = np.empty(0) + self.sigma_delta = np.empty(0) + + def initialize_update( + self, + parameters: Float64Array1D, + backcast: float | Float64Array1D, + nobs: int, + ) -> None: + self.lam = figarch_weights(parameters[1:], self.p, self.q, self.truncation) + self.backcast = backcast + if self.resids.shape[0] < nobs: + self.resids = np.empty(nobs) + self.abs_resids = np.empty(nobs) + self.sigma_delta = np.empty(nobs) + + def update( + self, + t: int, + parameters: Float64Array1D, + resids: Float64Array1D, + sigma2: Float64Array1D, + var_bounds: Float64Array2D, + ) -> None: + p = self.p + q = self.q + o = self.o + trunc_lag = self.truncation + gamma = parameters[2 + p + q] if o else 0.0 + delta = parameters[-1] if self.est_delta else self.delta + + omega = parameters[0] + beta = parameters[1 + p + q] if q else 0.0 + omega_tilde = omega / (1 - beta) + + if t > 0: + self.resids[t - 1] = resids[t - 1] + self.abs_resids[t - 1] = np.abs(resids[t - 1]) + + bc_weight = 0.0 + for i in range(t, trunc_lag): + bc_weight += self.lam[i] + self.sigma_delta[t] = omega_tilde + bc_weight * self.backcast + for i in range(min(t, trunc_lag)): + shock = self.abs_resids[t - i - 1] - gamma * self.resids[t - i - 1] + self.sigma_delta[t] += self.lam[i] * (shock**delta) + sigma2[t] = self.sigma_delta[t] ** (2.0 / delta) + sigma2[t] = bounds_check(sigma2[t], var_bounds[t]) + self.sigma_delta[t] = sigma2[t] ** (delta / 2.0) + + class RiskMetrics2006Updater(VolatilityUpdater, metaclass=AbstractDocStringInheritor): def __init__( self, diff --git a/arch/univariate/volatility.py b/arch/univariate/volatility.py index e7fd65a22b..d7a0aac117 100644 --- a/arch/univariate/volatility.py +++ b/arch/univariate/volatility.py @@ -42,6 +42,7 @@ __all__ = [ "ARCH", "EGARCH", + "FIAPARCH", "FIGARCH", "GARCH", "HARCH", @@ -3389,6 +3390,501 @@ def _simulation_forecast( return VarianceForecast(forecasts, paths, shocks) +class FIAPARCH(VolatilityProcess, metaclass=AbstractDocStringInheritor): + r""" + Fractionally Integrated Asymmetric Power ARCH (FIAPARCH) volatility process + + Parameters + ---------- + p : {0, 1} + Order of the symmetric innovation + o : {0, 1} + Order of the asymmetric innovation + q : {0, 1} + Order of the lagged (transformed) conditional variance + delta : float, optional + Value to use for a fixed delta in the model. If not provided, + the value of delta is jointly estimated with other model parameters. + User provided delta is restricted to lie in (0.05, 4.0). + truncation : int, optional + Truncation point to use in ARCH(:math:`\infty`) representation. + Default is 1000. + + Examples + -------- + >>> from arch.univariate import FIAPARCH + + Standard FIAPARCH + + >>> fiaparch = FIAPARCH() + + Without asymmetry (reduces to FIGARCH-like) + + >>> fi = FIAPARCH(o=0) + + Fixed power parameter + + >>> fiaparch = FIAPARCH(delta=1.0) + + Notes + ----- + In this class of processes, the variance dynamics are + + .. math:: + + \sigma_t^{\delta} = \omega + + [1-\beta L - (1-\phi L)(1-L)^d] + (|\epsilon_{t}| - \gamma \epsilon_{t})^{\delta} + + \beta \sigma_{t-1}^{\delta} + + where ``L`` is the lag operator, ``d`` is the fractional differencing + parameter, and ``gamma`` controls asymmetry. The model is estimated + using the ARCH(:math:`\infty`) representation, + + .. math:: + + \sigma_t^{\delta} = (1-\beta)^{-1} \omega + + \sum_{i=1}^{\infty} \lambda_i + (|\epsilon_{t-i}| - \gamma \epsilon_{t-i})^{\delta} + + The weights :math:`\lambda_i` are identical to those used in FIGARCH. + """ + + def __init__( + self, + p: int = 1, + o: int = 1, + q: int = 1, + delta: float | None = None, + truncation: int = 1000, + ) -> None: + super().__init__() + self.p: int = int(p) + self.o: int = int(o) + self.q: int = int(q) + self._truncation = int(truncation) + self._est_delta = delta is None + self._delta = float(np.nan) + if not self._est_delta: + try: + assert delta is not None + self._delta = float(delta) + except (ValueError, TypeError) as exc: + raise TypeError("delta must be convertible to a float.") from exc + if not 0.05 < delta < 4: + raise ValueError("delta must be between 0.05 and 4") + self._delta = delta + if p < 0 or q < 0 or p > 1 or q > 1: + raise ValueError("p and q must be either 0 or 1.") + if o < 0 or o > 1: + raise ValueError("o must be either 0 or 1.") + if self._truncation <= 0: + raise ValueError("truncation must be a positive integer") + self._num_params = 2 + p + q + o + int(self._est_delta) + self._name = self._generate_name() + delta_init = 2.0 if self._est_delta else self._delta + self._volatility_updater = rec.FIAPARCHUpdater( + p, q, o, self._truncation, self._est_delta, delta_init + ) + self._sigma_delta = np.empty(0) + + @property + def truncation(self) -> int: + """Truncation lag for the ARCH-infinity approximation""" + return self._truncation + + @property + def delta(self) -> float: + """The value of delta in the model. NaN if delta is estimated.""" + return self._delta + + def __str__(self) -> str: + descr = self.name + "\n" + descr += f" p: {self.p}, o: {self.o}, q: {self.q}" + if not self._est_delta: + descr += f", delta: {self._delta:0.1f}" + descr += f", truncation: {self.truncation}\n" + return descr + + def _generate_name(self) -> str: + if self.o > 0: + return "FIAPARCH" + else: + return "FI Power ARCH" + + def bounds(self, resids: ArrayLike1D) -> list[tuple[float, float]]: + eps_half = np.sqrt(np.finfo(np.double).eps) + delta = 2.0 if self._est_delta else self._delta + v = max( + float(np.mean(np.absolute(resids) ** delta)), + float(np.mean(resids**2)), + ) + + bounds = [(0.0, 10.0 * float(v))] + bounds.extend([(0.0, 0.5)] * self.p) # phi + bounds.extend([(0.0, 1.0 - eps_half)]) # d + bounds.extend([(0.0, 1.0 - eps_half)] * self.q) # beta + bounds.extend([(-0.9997, 0.9997)] * self.o) # gamma + if self._est_delta: + bounds.append((0.05, 4.0)) + + return bounds + + def constraints(self) -> tuple[Float64Array, Float64Array]: + # omega > 0 + # 0 <= phi <= (1-d)/2 (if p) + # 0 <= d <= 1 + # 0 <= beta <= d+phi (if q) + # -1 < gamma < 1 (if o) + # 0.05 < delta < 4 (if est_delta) + n_figarch = 1 + self.p + 1 + self.q # omega, [phi], d, [beta] + n_gamma = self.o + n_delta = int(self._est_delta) + n_params = n_figarch + n_gamma + n_delta + + # FIGARCH constraints first (identical to FIGARCH.constraints()) + a_fig = np.array( + [ + [1, 0, 0, 0], + [0, 1, 0, 0], + [0, -2, -1, 0], + [0, 0, 1, 0], + [0, 0, -1, 0], + [0, 0, 0, 1], + [0, 1, 1, -1], + ] + ) + b_fig = np.array([0, 0, -1, 0, -1, 0, 0]) + if not self.q: + a_fig = a_fig[:-2, :-1] + b_fig = b_fig[:-2] + if not self.p: + a_fig = np.delete(a_fig, (1,), axis=1) + a_fig = np.delete(a_fig, (1, 2), axis=0) + b_fig = np.delete(b_fig, (1, 2)) + + n_fig_constraints = a_fig.shape[0] + n_gamma_constraints = 2 * n_gamma # gamma > -0.9997, gamma < 0.9997 + n_delta_constraints = 2 * n_delta # delta > 0.05, delta < 4 + n_total_constraints = n_fig_constraints + n_gamma_constraints + n_delta_constraints + + a = np.zeros((n_total_constraints, n_params)) + b = np.zeros(n_total_constraints) + + # FIGARCH block + a[:n_fig_constraints, :n_figarch] = a_fig + b[:n_fig_constraints] = b_fig + + row = n_fig_constraints + col = n_figarch + + # Gamma constraints: gamma > -0.9997, gamma < 0.9997 + for i in range(n_gamma): + a[row, col + i] = 1.0 + b[row] = -0.9997 + row += 1 + a[row, col + i] = -1.0 + b[row] = -0.9997 + row += 1 + col += n_gamma + + # Delta constraints: delta > 0.05, delta < 4 + for i in range(n_delta): + a[row, col + i] = 1.0 + b[row] = 0.05 + row += 1 + a[row, col + i] = -1.0 + b[row] = -4.0 + row += 1 + + return a, b + + def compute_variance( + self, + parameters: Float64Array1D, + resids: ArrayLike1D, + sigma2: Float64Array1D, + backcast: float | Float64Array1D, + var_bounds: Float64Array2D, + ) -> Float64Array1D: + nobs = resids.shape[0] + abs_resids = np.absolute(resids) + if self._sigma_delta.shape[0] != nobs: + self._sigma_delta = np.empty(nobs) + sigma_delta = self._sigma_delta + + p, o, q = self.p, self.o, self.q + truncation = self.truncation + + gamma = parameters[1 + p + 1 + q] if o else 0.0 + if self._est_delta: + delta = parameters[-1] + else: + delta = self._delta + + fig_params = parameters[: 1 + p + 1 + q] + + rec.fiaparch_recursion( + fig_params, + resids, + abs_resids, + sigma2, + sigma_delta, + p, + q, + nobs, + truncation, + backcast, + var_bounds, + gamma, + delta, + ) + + return sigma2 + + def backcast_transform( + self, backcast: float | Float64Array1D + ) -> float | Float64Array1D: + backcast = super().backcast_transform(backcast) + delta = 2.0 if self._est_delta else self._delta + _backcast = np.sqrt(backcast) ** delta + if np.isscalar(_backcast): + return float(cast("np.float64", _backcast)) + else: + return to_array_1d(_backcast) + + def backcast(self, resids: ArrayLike1D) -> float | Float64Array1D: + delta = 2.0 if self._est_delta else self._delta + tau = min(75, resids.shape[0]) + w = 0.94 ** np.arange(tau) + w = w / sum(w) + backcast = float( + np.sum((np.absolute(resids[:tau]) ** delta) * w) + ) + return backcast + + def simulate( + self, + parameters: Sequence[int | float] | ArrayLike1D, + nobs: int, + rng: RNGType, + burn: int = 500, + initial_value: float | Float64Array | None = None, + ) -> tuple[Float64Array, Float64Array]: + parameters = ensure1d(parameters, "parameters", False) + truncation = self.truncation + p, o, q = self.p, self.o, self.q + + fig_params = parameters[: 1 + p + 1 + q] + gamma = float(parameters[1 + p + 1 + q]) if o else 0.0 + if self._est_delta: + delta = float(parameters[-1]) + else: + delta = self._delta + + lam = rec.figarch_weights(fig_params[1:], p, q, truncation) + lam_rev = lam[::-1] + errors = rng(truncation + nobs + burn) + + beta = float(fig_params[-1]) if q else 0.0 + + if initial_value is None: + persistence = np.sum(lam) + initial_value = float(fig_params[0]) + if beta < 1: + initial_value /= 1 - beta + if persistence < 1: + initial_value /= 1 - persistence + if persistence >= 1.0 or beta >= 1.0: + warn(initial_value_warning, InitialValueWarning, stacklevel=2) + assert initial_value is not None + sigma2 = np.empty(truncation + nobs + burn) + data = np.empty(truncation + nobs + burn) + sigma_delta_arr = np.empty(truncation + nobs + burn) + + sigma_delta_arr[:truncation] = initial_value + sigma2[:truncation] = initial_value ** (2.0 / delta) + data[:truncation] = np.sqrt(sigma2[:truncation]) * errors[:truncation] + + omega = float(fig_params[0]) + if beta < 1: + omega_tilde = omega / (1 - beta) + else: + warn( + "beta >= 1.0, using omega as intercept since long-run variance " + "is ill-defined.", + ValueWarning, + stacklevel=2, + ) + omega_tilde = omega + + for t in range(truncation, truncation + nobs + burn): + fshocks = np.empty(truncation) + for i in range(truncation): + shock = abs(data[t - 1 - i]) - gamma * data[t - 1 - i] + fshocks[i] = shock**delta + sigma_delta_arr[t] = omega_tilde + lam_rev.dot(fshocks[::-1]) + sigma2[t] = sigma_delta_arr[t] ** (2.0 / delta) + data[t] = errors[t] * np.sqrt(sigma2[t]) + + return data[truncation + burn :], sigma2[truncation + burn :] + + def starting_values(self, resids: ArrayLike1D) -> Float64Array1D: + truncation = self.truncation + p, o, q = self.p, self.o, self.q + ds = [0.2, 0.5, 0.7] + phi_ratio = [0.2, 0.5, 0.8] if p else [0] + beta_ratio = [0.1, 0.5, 0.9] if q else [0] + gammas = [-0.5, 0.0, 0.5] if o else [0.0] + deltas = [0.5, 1.2, 2.0] if self._est_delta else [self._delta] + + all_starting_vals = [] + for d in ds: + for pr in phi_ratio: + phi = (1 - d) / 2 * pr + for br in beta_ratio: + beta = (d + phi) * br + for gam in gammas: + for delt in deltas: + target = np.mean(np.absolute(resids) ** delt) + scale = np.mean(resids**2) / (target ** (2.0 / delt)) + target *= scale ** (delt / 2) + temp = [phi, d, beta] + lam = rec.figarch_weights( + np.array(temp), 1, 1, truncation + ) + omega = (1 - beta) * target * (1 - np.sum(lam)) + sv = [omega] + if p: + sv.append(phi) + sv.append(d) + if q: + sv.append(beta) + if o: + sv.append(gam) + if self._est_delta: + sv.append(delt) + all_starting_vals.append(tuple(sv)) + + distinct_svs = list(set(all_starting_vals)) + starting_vals = np.array(distinct_svs) + + var_bounds = self.variance_bounds(resids) + backcast = self.backcast(resids) + llfs = np.zeros(len(starting_vals)) + for i, sv in enumerate(starting_vals): + llfs[i] = self._gaussian_loglikelihood(sv, resids, backcast, var_bounds) + loc = np.argmax(llfs) + + return starting_vals[int(loc)] + + def parameter_names(self) -> list[str]: + names = ["omega"] + if self.p: + names += ["phi"] + names += ["d"] + if self.q: + names += ["beta"] + if self.o: + names += ["gamma"] + if self._est_delta: + names += ["delta"] + return names + + def variance_bounds( + self, resids: ArrayLike1D, power: float = 2.0 + ) -> Float64Array2D: + return super().variance_bounds(resids, power) + + def _check_forecasting_method( + self, method: ForecastingMethod, horizon: int + ) -> None: + if horizon == 1: + return + if method == "analytic": + raise ValueError("Analytic forecasts not available for horizon > 1") + return + + def _analytic_forecast( + self, + parameters: Float64Array1D, + resids: ArrayLike1D, + backcast: float | Float64Array1D, + var_bounds: Float64Array2D, + start: int, + horizon: int, + ) -> VarianceForecast: + _, forecasts = self._one_step_forecast( + parameters, to_array_1d(resids), backcast, var_bounds, horizon, start + ) + + return VarianceForecast(forecasts) + + def _simulation_forecast( + self, + parameters: Float64Array1D, + resids: ArrayLike1D, + backcast: float | Float64Array1D, + var_bounds: Float64Array2D, + start: int, + horizon: int, + simulations: int, + rng: RNGType, + ) -> VarianceForecast: + sigma2, forecasts = self._one_step_forecast( + parameters, to_array_1d(resids), backcast, var_bounds, horizon, start + ) + t = resids.shape[0] + paths = np.empty((t - start, simulations, horizon)) + shocks = np.empty((t - start, simulations, horizon)) + + p, o, q = self.p, self.o, self.q + truncation = self.truncation + + gamma = float(parameters[1 + p + 1 + q]) if o else 0.0 + if self._est_delta: + delta = float(parameters[-1]) + else: + delta = self._delta + + fig_params = parameters[: 1 + p + 1 + q] + lam = rec.figarch_weights(fig_params[1:], p, q, truncation) + lam_rev = lam[::-1] + omega = float(fig_params[0]) + beta = float(fig_params[-1]) if q else 0.0 + omega_tilde = omega / (1 - beta) + + abs_resids_full = np.absolute(resids) + fshocks_hist = np.empty(resids.shape[0]) + for i in range(resids.shape[0]): + fshocks_hist[i] = (abs_resids_full[i] - gamma * resids[i]) ** delta + + fpath = np.empty((simulations, truncation + horizon)) + + for i in range(start, t): + std_shocks = rng((simulations, horizon)) + available = i + 1 - max(0, i - truncation + 1) + fpath[:, truncation - available : truncation] = fshocks_hist[ + max(0, i + 1 - truncation) : i + 1 + ] + if available < truncation: + fpath[:, : (truncation - available)] = backcast + for h in range(horizon): + lagged_fshocks = fpath[:, h : truncation + h] + temp_sigma_delta = omega_tilde + lagged_fshocks.dot(lam_rev) + sigma2_h = temp_sigma_delta ** (2.0 / delta) + path_loc = i - start + shocks[path_loc, :, h] = std_shocks[:, h] * np.sqrt(sigma2_h) + paths[path_loc, :, h] = sigma2_h + forecasts[path_loc, h] = sigma2_h.mean() + new_shocks = np.absolute(shocks[path_loc, :, h]) + fpath[:, truncation + h] = ( + new_shocks - gamma * shocks[path_loc, :, h] + ) ** delta + + return VarianceForecast(forecasts, paths, shocks) + + class APARCH(VolatilityProcess, metaclass=AbstractDocStringInheritor): r""" Asymmetric Power ARCH (APARCH) volatility process From 7beb3c550c4ebcd206406309283f4d15480a35d8 Mon Sep 17 00:00:00 2001 From: gjunjie Date: Thu, 2 Apr 2026 21:35:15 -0400 Subject: [PATCH 2/8] Remove unused variables in FIAPARCH forecast and tests Made-with: Cursor --- arch/tests/univariate/test_volatility.py | 2 +- arch/univariate/volatility.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/tests/univariate/test_volatility.py b/arch/tests/univariate/test_volatility.py index 6c10f95ebe..5496457521 100644 --- a/arch/tests/univariate/test_volatility.py +++ b/arch/tests/univariate/test_volatility.py @@ -1972,7 +1972,7 @@ def test_fiaparch_no_phi(setup): # omega, d, beta, gamma, delta assert len(bounds) == fiaparch.num_params - a, b = fiaparch.constraints() + a, _ = fiaparch.constraints() # No phi => FIGARCH block shrinks (5 rows), gamma (2 rows), delta (2 rows) = 9 assert a.shape[1] == 5 assert a.shape[0] == 9 diff --git a/arch/univariate/volatility.py b/arch/univariate/volatility.py index d7a0aac117..b30db01604 100644 --- a/arch/univariate/volatility.py +++ b/arch/univariate/volatility.py @@ -3831,7 +3831,7 @@ def _simulation_forecast( simulations: int, rng: RNGType, ) -> VarianceForecast: - sigma2, forecasts = self._one_step_forecast( + _, forecasts = self._one_step_forecast( parameters, to_array_1d(resids), backcast, var_bounds, horizon, start ) t = resids.shape[0] From 50b715fce95758f0d9bad650f10d89a66686229f Mon Sep 17 00:00:00 2001 From: gjunjie Date: Fri, 3 Apr 2026 10:27:25 -0400 Subject: [PATCH 3/8] Add FIAPARCH to arch_model and fix bugs, expand tests - Add FIAPARCH to arch_model vol parameter type hint and docstring - Fix delta assignment bug in FIAPARCH.__init__ (used raw delta instead of self._delta for range check, then reassigned) - Fix arch_model using `vol` instead of `vol_model` for p type check - Parametrize updater-vs-recursion test over o and delta - Add tests for no-beta/no-asym compute_variance, simulation, backcast_transform, fixed-delta str, and high-persistence warnings - Include FIAPARCH in forecast test volatilities list Made-with: Cursor --- arch/tests/univariate/test_forecast.py | 2 + arch/tests/univariate/test_mean.py | 5 ++ .../univariate/test_variance_forecasting.py | 80 +++++++++++++++++ arch/tests/univariate/test_volatility.py | 86 +++++++++++++++++-- arch/univariate/mean.py | 9 +- arch/univariate/volatility.py | 3 +- 6 files changed, 175 insertions(+), 10 deletions(-) diff --git a/arch/tests/univariate/test_forecast.py b/arch/tests/univariate/test_forecast.py index 0935a5c53f..961f152b25 100644 --- a/arch/tests/univariate/test_forecast.py +++ b/arch/tests/univariate/test_forecast.py @@ -14,6 +14,7 @@ APARCH, ARX, EGARCH, + FIAPARCH, FIGARCH, GARCH, HARCH, @@ -42,6 +43,7 @@ ConstantVariance(), GARCH(), FIGARCH(), + FIAPARCH(), EWMAVariance(lam=0.94), MIDASHyperbolic(), HARCH(lags=[1, 5, 22]), diff --git a/arch/tests/univariate/test_mean.py b/arch/tests/univariate/test_mean.py index 81668b7c48..e9b0caae81 100644 --- a/arch/tests/univariate/test_mean.py +++ b/arch/tests/univariate/test_mean.py @@ -1358,6 +1358,11 @@ def test_invalid_vol_dist(): ConstantMean(SP500, distribution="Skew-t") +def test_fiaparch_non_int_p(): + with pytest.raises(TypeError, match=r"p must be a scalar int"): + arch_model(SP500, vol="fiaparch", p=[1, 2]) + + def test_param_cov(): mod = ConstantMean(SP500) res = mod.fit(disp="off") diff --git a/arch/tests/univariate/test_variance_forecasting.py b/arch/tests/univariate/test_variance_forecasting.py index 6c661bc817..814ab1e581 100644 --- a/arch/tests/univariate/test_variance_forecasting.py +++ b/arch/tests/univariate/test_variance_forecasting.py @@ -16,6 +16,7 @@ from arch.univariate.volatility import ( APARCH, EGARCH, + FIAPARCH, FIGARCH, GARCH, HARCH, @@ -1930,6 +1931,85 @@ def test_aparch_simulation_smoke(self, o, delta): method="analytic", ) + def test_fiaparch_one_step(self): + trunc = 50 + vol = FIAPARCH(truncation=trunc) + resids = self.resid + backcast = vol.backcast(resids) + var_bounds = vol.variance_bounds(resids) + params = np.array([0.1, 0.2, 0.4, 0.2, -0.3, 1.5]) + sigma2 = np.empty_like(resids) + vol.compute_variance(params, resids, sigma2, backcast, var_bounds) + forecast = vol.forecast( + params, resids, backcast, var_bounds, horizon=1, start=0 + ) + assert_allclose(sigma2[1:], forecast.forecasts[:-1, 0]) + + delta = 1.5 + vol_fixed = FIAPARCH(truncation=trunc, delta=delta) + params_fixed = np.array([0.1, 0.2, 0.4, 0.2, -0.3]) + sigma2_f = np.empty_like(resids) + vol_fixed.compute_variance( + params_fixed, resids, sigma2_f, backcast, var_bounds + ) + forecast_f = vol_fixed.forecast( + params_fixed, resids, backcast, var_bounds, horizon=1, start=0 + ) + assert_allclose(sigma2_f[1:], forecast_f.forecasts[:-1, 0]) + + with pytest.raises( + ValueError, match=r"Analytic forecasts not available for horizon" + ): + vol.forecast( + params, resids, backcast, var_bounds, horizon=2, method="analytic" + ) + + @pytest.mark.parametrize("o", [0, 1]) + @pytest.mark.parametrize("delta", [None, 1.5]) + def test_fiaparch_simulation_smoke(self, o, delta): + dist = Normal(seed=self.rng) + rng = dist.simulate([]) + trunc = 50 + vol = FIAPARCH(o=o, delta=delta, truncation=trunc) + resids = self.resid + backcast = vol.backcast(resids) + var_bounds = vol.variance_bounds(resids) + params = [0.1, 0.2, 0.4, 0.2] + if o == 0: + params = [0.1, 0.2, 0.4, 0.2] + else: + params = [0.1, 0.2, 0.4, 0.2, -0.3] + if delta is None: + params = np.array(params + [1.5]) + else: + params = np.array(params) + sigma2 = np.empty_like(resids) + vol.compute_variance(params, resids, sigma2, backcast, var_bounds) + forecast = vol.forecast( + params, + resids, + backcast, + var_bounds, + horizon=10, + start=0, + method="simulation", + rng=rng, + simulations=100, + ) + assert_allclose(sigma2[1:], forecast.forecasts[:-1, 0]) + assert forecast.forecast_paths is not None + assert forecast.shocks is not None + with pytest.raises(ValueError, match=r"Analytic forecasts not"): + vol.forecast( + params, + resids, + backcast, + var_bounds, + horizon=10, + start=0, + method="analytic", + ) + def test_midas_analytical(self): vol = MIDASHyperbolic() resids = self.resid diff --git a/arch/tests/univariate/test_volatility.py b/arch/tests/univariate/test_volatility.py index 5496457521..d4ebec69cd 100644 --- a/arch/tests/univariate/test_volatility.py +++ b/arch/tests/univariate/test_volatility.py @@ -1943,9 +1943,16 @@ def test_fiaparch(setup): assert_equal(fiaparch.truncation, trunc_lag) -def test_fiaparch_updater_matches_recursion(setup): - fiaparch = FIAPARCH(truncation=300) - parameters = np.array([0.1, 0.2, 0.4, 0.2, 0.8, 1.3]) +@pytest.mark.parametrize("o", [0, 1]) +@pytest.mark.parametrize("delta", [None, 1.5]) +def test_fiaparch_updater_matches_recursion(setup, o, delta): + fiaparch = FIAPARCH(truncation=300, o=o, delta=delta) + params = [0.1, 0.2, 0.4, 0.2] + if o: + params.append(0.8) + if delta is None: + params.append(1.3) + parameters = np.array(params) backcast = fiaparch.backcast(setup.resids) var_bounds = fiaparch.variance_bounds(setup.resids) @@ -1988,11 +1995,19 @@ def test_fiaparch_no_beta(setup): # omega, phi, d, gamma, delta assert len(bounds) == fiaparch.num_params - a, b = fiaparch.constraints() + a, _ = fiaparch.constraints() # No beta => FIGARCH block shrinks (5 rows), gamma (2 rows), delta (2 rows) = 9 assert a.shape[1] == 5 assert a.shape[0] == 9 + backcast = fiaparch.backcast(setup.resids) + var_bounds = fiaparch.variance_bounds(setup.resids) + # omega, phi, d, gamma, delta (no beta) + parameters = np.array([1.0, 0.2, 0.4, -0.3, 1.5]) + sigma2 = np.zeros_like(setup.sigma2) + fiaparch.compute_variance(parameters, setup.resids, sigma2, backcast, var_bounds) + assert np.all(np.isfinite(sigma2)) + def test_fiaparch_no_asym(setup): fiaparch = FIAPARCH(o=0) @@ -2010,6 +2025,14 @@ def test_fiaparch_no_asym(setup): assert_equal(fiaparch.name, "FI Power ARCH") + backcast = fiaparch.backcast(setup.resids) + var_bounds = fiaparch.variance_bounds(setup.resids) + # omega, phi, d, beta, delta (no gamma) + parameters = np.array([1.0, 0.2, 0.4, 0.2, 1.5]) + sigma2 = np.zeros_like(setup.sigma2) + fiaparch.compute_variance(parameters, setup.resids, sigma2, backcast, var_bounds) + assert np.all(np.isfinite(sigma2)) + def test_fiaparch_fixed_delta(setup): delta_val = 1.5 @@ -2029,7 +2052,7 @@ def test_fiaparch_fixed_delta(setup): assert "delta" not in names assert_equal(fiaparch.delta, delta_val) - a, b = fiaparch.constraints() + a, _ = fiaparch.constraints() # FIGARCH (7 rows) + gamma (2 rows) + no delta => 9 assert a.shape == (9, 5) @@ -2061,6 +2084,59 @@ def test_fiaparch_fixed_delta(setup): ) assert_allclose(setup.sigma2, cond_var_direct) + rng = Normal(seed=RandomState()) + sim_data = fiaparch.simulate(parameters, setup.t, rng.simulate([])) + assert sim_data[0].shape[0] == setup.t + assert sim_data[1].shape[0] == setup.t + + +def test_fiaparch_str_fixed_delta(setup): + fiaparch = FIAPARCH(delta=1.5) + s = str(fiaparch) + assert "delta: 1.5" in s + + +def test_fiaparch_simulate_initial_value(setup): + fiaparch = FIAPARCH(truncation=100) + rng = Normal(seed=RandomState()) + parameters = np.array([1.0, 0.2, 0.4, 0.2, -0.3, 1.5]) + sim_data = fiaparch.simulate( + parameters, setup.t, rng.simulate([]), initial_value=5.0 + ) + assert sim_data[0].shape[0] == setup.t + assert sim_data[1].shape[0] == setup.t + + +def test_fiaparch_simulate_high_persistence(setup): + fiaparch = FIAPARCH(truncation=100) + rng = Normal(seed=RandomState()) + parameters = np.array([0.1, 0.2, 1.01, 0.2, -0.3, 1.5]) + with pytest.warns(InitialValueWarning, match=r"Parameters are not consistent"): + fiaparch.simulate(parameters, 20, rng.simulate([])) + + +def test_fiaparch_simulate_high_beta(setup): + fiaparch = FIAPARCH(truncation=100) + rng = Normal(seed=RandomState()) + parameters = np.array([0.1, 0.0, 0.4, 1.005, -0.3, 1.5]) + with pytest.warns(InitialValueWarning, match=r"Parameters are not consistent"): + with pytest.warns(ValueWarning, match=r"beta >= 1"): + fiaparch.simulate(parameters, 20, rng.simulate([])) + + +def test_fiaparch_backcast_transform(setup): + fiaparch = FIAPARCH() + backcast = fiaparch.backcast(setup.resids) + result = fiaparch.backcast_transform(backcast) + assert np.isscalar(result) + delta = 2.0 + expected = float(np.sqrt(backcast) ** delta) + assert_allclose(result, expected) + + result_arr = fiaparch.backcast_transform(np.array([backcast, backcast])) + assert result_arr.shape == (2,) + assert_allclose(result_arr, np.array([expected, expected])) + def test_fiaparch_errors(setup): with pytest.raises(ValueError, match=r"truncation must be a positive integer"): diff --git a/arch/univariate/mean.py b/arch/univariate/mean.py index 9c0567aea7..78d898503f 100644 --- a/arch/univariate/mean.py +++ b/arch/univariate/mean.py @@ -1892,7 +1892,9 @@ def arch_model( "Constant", "Zero", "LS", "AR", "ARX", "HAR", "HARX", "constant", "zero" ] = "Constant", lags: int | list[int] | Int32Array | Int64Array | None = 0, - vol: Literal["GARCH", "ARCH", "EGARCH", "FIGARCH", "APARCH", "HARCH"] = "GARCH", + vol: Literal[ + "GARCH", "ARCH", "EGARCH", "FIGARCH", "FIAPARCH", "APARCH", "HARCH" + ] = "GARCH", p: int | list[int] = 1, o: int = 0, q: int = 1, @@ -1928,7 +1930,8 @@ def arch_model( integers specifying lag locations. vol : str, optional Name of the volatility model. Currently supported options are: - 'GARCH' (default), 'ARCH', 'EGARCH', 'FIGARCH', 'APARCH' and 'HARCH' + 'GARCH' (default), 'ARCH', 'EGARCH', 'FIGARCH', 'FIAPARCH', 'APARCH' + and 'HARCH' p : int, optional Lag order of the symmetric innovation o : int, optional @@ -2037,7 +2040,7 @@ def arch_model( else: # mean == "zero" am = ZeroMean(y, hold_back=hold_back, rescale=rescale) - if vol in ("arch", "garch", "figarch", "fiaparch", "egarch", "aparch") and not isinstance( + if vol_model in ("arch", "garch", "figarch", "fiaparch", "egarch", "aparch") and not isinstance( p, int ): raise TypeError( diff --git a/arch/univariate/volatility.py b/arch/univariate/volatility.py index b30db01604..938e9d9378 100644 --- a/arch/univariate/volatility.py +++ b/arch/univariate/volatility.py @@ -3471,9 +3471,8 @@ def __init__( self._delta = float(delta) except (ValueError, TypeError) as exc: raise TypeError("delta must be convertible to a float.") from exc - if not 0.05 < delta < 4: + if not 0.05 < self._delta < 4: raise ValueError("delta must be between 0.05 and 4") - self._delta = delta if p < 0 or q < 0 or p > 1 or q > 1: raise ValueError("p and q must be either 0 or 1.") if o < 0 or o > 1: From 11d18abdd6bea2ff708a40f6bffe0d28ab40672a Mon Sep 17 00:00:00 2001 From: gjunjie Date: Fri, 3 Apr 2026 13:44:59 -0400 Subject: [PATCH 4/8] Fix arch_model vol check and expand FIAPARCH reduced-order tests Fix bug using vol_model instead of vol in arch_model type check. Add tests for FIAPARCH with reduced p, o, q combinations including forecasting, simulation, parameter names, and backcast_transform. Made-with: Cursor --- arch/tests/univariate/test_mean.py | 12 +++ .../univariate/test_variance_forecasting.py | 56 +++++++++++++ arch/tests/univariate/test_volatility.py | 84 +++++++++++++++++++ arch/univariate/mean.py | 2 +- 4 files changed, 153 insertions(+), 1 deletion(-) diff --git a/arch/tests/univariate/test_mean.py b/arch/tests/univariate/test_mean.py index e9b0caae81..004eda15ee 100644 --- a/arch/tests/univariate/test_mean.py +++ b/arch/tests/univariate/test_mean.py @@ -1363,6 +1363,18 @@ def test_fiaparch_non_int_p(): arch_model(SP500, vol="fiaparch", p=[1, 2]) +def test_fiaparch_arch_model_kwargs(): + am = arch_model(SP500, vol="fiaparch", o=0) + assert isinstance(am.volatility, FIAPARCH) + assert am.volatility.o == 0 + assert am.volatility.name == "FI Power ARCH" + + am2 = arch_model(SP500, vol="FIAPARCH", p=0, q=0) + assert isinstance(am2.volatility, FIAPARCH) + assert am2.volatility.p == 0 + assert am2.volatility.q == 0 + + def test_param_cov(): mod = ConstantMean(SP500) res = mod.fit(disp="off") diff --git a/arch/tests/univariate/test_variance_forecasting.py b/arch/tests/univariate/test_variance_forecasting.py index 814ab1e581..34a15be220 100644 --- a/arch/tests/univariate/test_variance_forecasting.py +++ b/arch/tests/univariate/test_variance_forecasting.py @@ -2010,6 +2010,62 @@ def test_fiaparch_simulation_smoke(self, o, delta): method="analytic", ) + @pytest.mark.parametrize("p,q", [(0, 1), (1, 0), (0, 0)]) + def test_fiaparch_one_step_reduced(self, p, q): + trunc = 50 + vol = FIAPARCH(p=p, q=q, truncation=trunc) + resids = self.resid + backcast = vol.backcast(resids) + var_bounds = vol.variance_bounds(resids) + params = [0.1] + if p: + params.append(0.2) + params.append(0.4) + if q: + params.append(0.2) + params.extend([-0.3, 1.5]) + params = np.array(params) + sigma2 = np.empty_like(resids) + vol.compute_variance(params, resids, sigma2, backcast, var_bounds) + forecast = vol.forecast( + params, resids, backcast, var_bounds, horizon=1, start=0 + ) + assert_allclose(sigma2[1:], forecast.forecasts[:-1, 0]) + + @pytest.mark.parametrize("p,q", [(0, 1), (1, 0), (0, 0)]) + def test_fiaparch_simulation_reduced(self, p, q): + dist = Normal(seed=self.rng) + rng = dist.simulate([]) + trunc = 50 + vol = FIAPARCH(p=p, q=q, truncation=trunc) + resids = self.resid + backcast = vol.backcast(resids) + var_bounds = vol.variance_bounds(resids) + params = [0.1] + if p: + params.append(0.2) + params.append(0.4) + if q: + params.append(0.2) + params.extend([-0.3, 1.5]) + params = np.array(params) + sigma2 = np.empty_like(resids) + vol.compute_variance(params, resids, sigma2, backcast, var_bounds) + forecast = vol.forecast( + params, + resids, + backcast, + var_bounds, + horizon=10, + start=0, + method="simulation", + rng=rng, + simulations=100, + ) + assert_allclose(sigma2[1:], forecast.forecasts[:-1, 0]) + assert forecast.forecast_paths is not None + assert forecast.shocks is not None + def test_midas_analytical(self): vol = MIDASHyperbolic() resids = self.resid diff --git a/arch/tests/univariate/test_volatility.py b/arch/tests/univariate/test_volatility.py index d4ebec69cd..faa777dd36 100644 --- a/arch/tests/univariate/test_volatility.py +++ b/arch/tests/univariate/test_volatility.py @@ -1979,11 +1979,23 @@ def test_fiaparch_no_phi(setup): # omega, d, beta, gamma, delta assert len(bounds) == fiaparch.num_params + names = fiaparch.parameter_names() + assert "phi" not in names + assert names == ["omega", "d", "beta", "gamma", "delta"] + a, _ = fiaparch.constraints() # No phi => FIGARCH block shrinks (5 rows), gamma (2 rows), delta (2 rows) = 9 assert a.shape[1] == 5 assert a.shape[0] == 9 + backcast = fiaparch.backcast(setup.resids) + var_bounds = fiaparch.variance_bounds(setup.resids) + # omega, d, beta, gamma, delta (no phi) + parameters = np.array([1.0, 0.4, 0.2, -0.3, 1.5]) + sigma2 = np.zeros_like(setup.sigma2) + fiaparch.compute_variance(parameters, setup.resids, sigma2, backcast, var_bounds) + assert np.all(np.isfinite(sigma2)) + def test_fiaparch_no_beta(setup): fiaparch = FIAPARCH(q=0) @@ -1995,6 +2007,10 @@ def test_fiaparch_no_beta(setup): # omega, phi, d, gamma, delta assert len(bounds) == fiaparch.num_params + names = fiaparch.parameter_names() + assert "beta" not in names + assert names == ["omega", "phi", "d", "gamma", "delta"] + a, _ = fiaparch.constraints() # No beta => FIGARCH block shrinks (5 rows), gamma (2 rows), delta (2 rows) = 9 assert a.shape[1] == 5 @@ -2022,6 +2038,7 @@ def test_fiaparch_no_asym(setup): names = fiaparch.parameter_names() assert "gamma" not in names + assert names == ["omega", "phi", "d", "beta", "delta"] assert_equal(fiaparch.name, "FI Power ARCH") @@ -2124,6 +2141,30 @@ def test_fiaparch_simulate_high_beta(setup): fiaparch.simulate(parameters, 20, rng.simulate([])) +@pytest.mark.parametrize( + "p,o,q", + [(0, 1, 1), (1, 0, 1), (1, 1, 0), (0, 0, 1), (0, 1, 0), (1, 0, 0), (0, 0, 0)], +) +def test_fiaparch_simulate_reduced(setup, p, o, q): + fiaparch = FIAPARCH(p=p, o=o, q=q, truncation=100) + rng = Normal(seed=RandomState()) + params = [1.0] + if p: + params.append(0.2) + params.append(0.4) + if q: + params.append(0.2) + if o: + params.append(-0.3) + params.append(1.5) + parameters = np.array(params) + sim_data = fiaparch.simulate(parameters, setup.t, rng.simulate([])) + assert sim_data[0].shape[0] == setup.t + assert sim_data[1].shape[0] == setup.t + assert np.all(np.isfinite(sim_data[0])) + assert np.all(np.isfinite(sim_data[1])) + + def test_fiaparch_backcast_transform(setup): fiaparch = FIAPARCH() backcast = fiaparch.backcast(setup.resids) @@ -2137,6 +2178,48 @@ def test_fiaparch_backcast_transform(setup): assert result_arr.shape == (2,) assert_allclose(result_arr, np.array([expected, expected])) + fiaparch_fd = FIAPARCH(delta=1.5) + backcast_fd = fiaparch_fd.backcast(setup.resids) + result_fd = fiaparch_fd.backcast_transform(backcast_fd) + assert np.isscalar(result_fd) + expected_fd = float(np.sqrt(backcast_fd) ** 1.5) + assert_allclose(result_fd, expected_fd) + + result_fd_arr = fiaparch_fd.backcast_transform( + np.array([backcast_fd, backcast_fd]) + ) + assert result_fd_arr.shape == (2,) + assert_allclose(result_fd_arr, np.array([expected_fd, expected_fd])) + + +def test_fiaparch_minimal(setup): + fiaparch = FIAPARCH(p=0, q=0) + + assert fiaparch.num_params == 4 # omega, d, gamma, delta + + names = fiaparch.parameter_names() + assert names == ["omega", "d", "gamma", "delta"] + + bounds = fiaparch.bounds(setup.resids) + assert len(bounds) == 4 + + a, b = fiaparch.constraints() + # FIGARCH block: omega > 0, d > 0, d < 1 => 3 rows + # gamma: 2 rows, delta: 2 rows => total 7 + assert a.shape == (7, 4) + assert b.shape == (7,) + + sv = fiaparch.starting_values(setup.resids) + assert sv.shape[0] == 4 + + backcast = fiaparch.backcast(setup.resids) + var_bounds = fiaparch.variance_bounds(setup.resids) + # omega, d, gamma, delta + parameters = np.array([1.0, 0.4, -0.3, 1.5]) + sigma2 = np.zeros_like(setup.sigma2) + fiaparch.compute_variance(parameters, setup.resids, sigma2, backcast, var_bounds) + assert np.all(np.isfinite(sigma2)) + def test_fiaparch_errors(setup): with pytest.raises(ValueError, match=r"truncation must be a positive integer"): @@ -2165,3 +2248,4 @@ def test_fiaparch_str(setup, p, o, q): assert f"q: {q}" in s assert f"p: {p}" in s assert f"o: {o}" in s + assert "delta:" not in s diff --git a/arch/univariate/mean.py b/arch/univariate/mean.py index 78d898503f..0a2fffc41b 100644 --- a/arch/univariate/mean.py +++ b/arch/univariate/mean.py @@ -2040,7 +2040,7 @@ def arch_model( else: # mean == "zero" am = ZeroMean(y, hold_back=hold_back, rescale=rescale) - if vol_model in ("arch", "garch", "figarch", "fiaparch", "egarch", "aparch") and not isinstance( + if vol in ("arch", "garch", "figarch", "fiaparch", "egarch", "aparch") and not isinstance( p, int ): raise TypeError( From 7ce242bca2d9b3ba166d0d71d4afefb5cd84ac50 Mon Sep 17 00:00:00 2001 From: gjunjie Date: Fri, 3 Apr 2026 14:29:38 -0400 Subject: [PATCH 5/8] Expand FIAPARCH test coverage for edge cases and partial branches Cover the two partial-coverage lines flagged by Codecov (q=0 branch in fiaparch_recursion_python, persistence >= 1 simulation path) and exercise additional edge cases: p=0/q=0 recursion, updater buffer reuse, and compute_variance sigma_delta reuse. Made-with: Cursor --- arch/tests/univariate/test_volatility.py | 72 ++++++++++++++++++++++++ 1 file changed, 72 insertions(+) diff --git a/arch/tests/univariate/test_volatility.py b/arch/tests/univariate/test_volatility.py index faa777dd36..2fa9793c67 100644 --- a/arch/tests/univariate/test_volatility.py +++ b/arch/tests/univariate/test_volatility.py @@ -1858,6 +1858,12 @@ def test_fiaparch(setup): fiaparch.compute_variance( parameters, setup.resids, setup.sigma2, backcast, var_bounds ) + # Call again to exercise the sigma_delta buffer reuse path + sigma2_second = np.zeros_like(setup.sigma2) + fiaparch.compute_variance( + parameters, setup.resids, sigma2_second, backcast, var_bounds + ) + assert_allclose(setup.sigma2, sigma2_second) cond_var_direct = np.zeros_like(setup.sigma2) sigma_delta_direct = np.zeros_like(setup.sigma2) @@ -1967,6 +1973,13 @@ def test_fiaparch_updater_matches_recursion(setup, o, delta): assert_allclose(sigma2_updater, sigma2) + # Second initialize_update to exercise the buffer-already-allocated branch + updater.initialize_update(parameters, backcast, setup.t) + sigma2_updater2 = np.zeros_like(setup.sigma2) + for t in range(setup.t): + updater._update_tester(t, parameters, setup.resids, sigma2_updater2, var_bounds) + assert_allclose(sigma2_updater2, sigma2) + def test_fiaparch_no_phi(setup): trunc_lag = 333 @@ -2024,6 +2037,28 @@ def test_fiaparch_no_beta(setup): fiaparch.compute_variance(parameters, setup.resids, sigma2, backcast, var_bounds) assert np.all(np.isfinite(sigma2)) + cond_var_direct = np.zeros_like(setup.sigma2) + sigma_delta_direct = np.zeros_like(setup.sigma2) + fig_params = parameters[:3] # omega, phi, d (no beta) + gamma = parameters[3] + delta = parameters[4] + recpy.fiaparch_recursion_python( + fig_params, + setup.resids, + np.abs(setup.resids), + cond_var_direct, + sigma_delta_direct, + 1, + 0, + setup.t, + 1000, + backcast, + var_bounds, + gamma, + delta, + ) + assert_allclose(sigma2, cond_var_direct) + def test_fiaparch_no_asym(setup): fiaparch = FIAPARCH(o=0) @@ -2141,6 +2176,21 @@ def test_fiaparch_simulate_high_beta(setup): fiaparch.simulate(parameters, 20, rng.simulate([])) +def test_fiaparch_simulate_persistence_ge_1(setup): + fiaparch = FIAPARCH(truncation=100) + rng = Normal(seed=RandomState()) + # d > 1 pushes persistence >= 1 + parameters = np.array([0.1, 0.2, 1.01, 0.2, -0.3, 1.5]) + fig_params = parameters[:4] + lam = rec.figarch_weights(fig_params[1:], 1, 1, 100) + persistence = np.sum(lam) + assert persistence >= 1 + with pytest.warns(InitialValueWarning): + sim_data = fiaparch.simulate(parameters, 20, rng.simulate([])) + assert sim_data[0].shape[0] == 20 + assert sim_data[1].shape[0] == 20 + + @pytest.mark.parametrize( "p,o,q", [(0, 1, 1), (1, 0, 1), (1, 1, 0), (0, 0, 1), (0, 1, 0), (1, 0, 0), (0, 0, 0)], @@ -2220,6 +2270,28 @@ def test_fiaparch_minimal(setup): fiaparch.compute_variance(parameters, setup.resids, sigma2, backcast, var_bounds) assert np.all(np.isfinite(sigma2)) + cond_var_direct = np.zeros_like(setup.sigma2) + sigma_delta_direct = np.zeros_like(setup.sigma2) + fig_params = parameters[:2] # omega, d (no phi, no beta) + gamma = parameters[2] + delta = parameters[3] + recpy.fiaparch_recursion_python( + fig_params, + setup.resids, + np.abs(setup.resids), + cond_var_direct, + sigma_delta_direct, + 0, + 0, + setup.t, + 1000, + backcast, + var_bounds, + gamma, + delta, + ) + assert_allclose(sigma2, cond_var_direct) + def test_fiaparch_errors(setup): with pytest.raises(ValueError, match=r"truncation must be a positive integer"): From 08a4b4f9125c5322de572902b8f306a9a5600a95 Mon Sep 17 00:00:00 2001 From: gjunjie Date: Sat, 4 Apr 2026 10:37:52 -0400 Subject: [PATCH 6/8] Remove partial branch in test_fiaparch to fix coverage The `if persistence < 1` guard is unnecessary since the test parameters always yield persistence < 1. The persistence >= 1 case is already covered by dedicated tests. Made-with: Cursor --- arch/tests/univariate/test_volatility.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/arch/tests/univariate/test_volatility.py b/arch/tests/univariate/test_volatility.py index 2fa9793c67..6b1e19f8f1 100644 --- a/arch/tests/univariate/test_volatility.py +++ b/arch/tests/univariate/test_volatility.py @@ -1912,9 +1912,7 @@ def test_fiaparch(setup): lam_rev = lam[::-1] omega_tilde = fig_params[0] / (1 - fig_params[-1]) persistence = np.sum(lam) - initial_value = omega_tilde - if persistence < 1: - initial_value /= 1 - persistence + initial_value = omega_tilde / (1 - persistence) e = setup.rng.standard_normal(trunc_lag + setup.t + 500) sigma2 = np.zeros(trunc_lag + setup.t + 500) data = np.zeros(trunc_lag + setup.t + 500) From 2641b262c5263a01711d3eee162d2314bc26f457 Mon Sep 17 00:00:00 2001 From: gjunjie Date: Sat, 4 Apr 2026 11:30:34 -0400 Subject: [PATCH 7/8] Expand FIAPARCH updater tests and add reduce/setstate coverage Made-with: Cursor --- arch/tests/univariate/test_volatility.py | 42 ++++++++++++++++++++++-- 1 file changed, 39 insertions(+), 3 deletions(-) diff --git a/arch/tests/univariate/test_volatility.py b/arch/tests/univariate/test_volatility.py index 6b1e19f8f1..6a8fd23fbc 100644 --- a/arch/tests/univariate/test_volatility.py +++ b/arch/tests/univariate/test_volatility.py @@ -1947,11 +1947,18 @@ def test_fiaparch(setup): assert_equal(fiaparch.truncation, trunc_lag) +@pytest.mark.parametrize("p", [0, 1]) @pytest.mark.parametrize("o", [0, 1]) +@pytest.mark.parametrize("q", [0, 1]) @pytest.mark.parametrize("delta", [None, 1.5]) -def test_fiaparch_updater_matches_recursion(setup, o, delta): - fiaparch = FIAPARCH(truncation=300, o=o, delta=delta) - params = [0.1, 0.2, 0.4, 0.2] +def test_fiaparch_updater_matches_recursion(setup, p, o, q, delta): + fiaparch = FIAPARCH(p=p, o=o, q=q, truncation=300, delta=delta) + params = [0.1] + if p: + params.append(0.2) + params.append(0.4) + if q: + params.append(0.2) if o: params.append(0.8) if delta is None: @@ -2319,3 +2326,32 @@ def test_fiaparch_str(setup, p, o, q): assert f"p: {p}" in s assert f"o: {o}" in s assert "delta:" not in s + + +@pytest.mark.parametrize("delta", [None, 1.5]) +def test_fiaparch_updater_reduce_setstate(setup, delta): + fiaparch = FIAPARCH(truncation=300, delta=delta) + params = [0.1, 0.2, 0.4, 0.2, -0.3] + if delta is None: + params.append(1.5) + parameters = np.array(params) + backcast = fiaparch.backcast(setup.resids) + var_bounds = fiaparch.variance_bounds(setup.resids) + + sigma2 = np.zeros_like(setup.sigma2) + fiaparch.compute_variance(parameters, setup.resids, sigma2, backcast, var_bounds) + + updater = fiaparch.volatility_updater + updater.initialize_update(parameters, backcast, setup.t) + sigma2_pre = np.zeros_like(setup.sigma2) + for t in range(setup.t): + updater._update_tester(t, parameters, setup.resids, sigma2_pre, var_bounds) + + cls, init_args, state = updater.__reduce__() + restored = cls(*init_args) + restored.__setstate__(state) + sigma2_post = np.zeros_like(setup.sigma2) + for t in range(setup.t): + restored._update_tester(t, parameters, setup.resids, sigma2_post, var_bounds) + + assert_allclose(sigma2_post, sigma2_pre) From 450fbad859c684b4bfb816df772413f7033ca787 Mon Sep 17 00:00:00 2001 From: gjunjie Date: Sat, 4 Apr 2026 11:56:13 -0400 Subject: [PATCH 8/8] Add __reduce__ and __setstate__ to pure-Python FIAPARCHUpdater Matches the Cython updater so pickle round-tripping works when the extension is not built (e.g. ARCH_NO_BINARY CI), fixing test_fiaparch_updater_reduce_setstate. Made-with: Cursor --- arch/univariate/recursions_python.py | 31 ++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/arch/univariate/recursions_python.py b/arch/univariate/recursions_python.py index 5e7c90ec1e..0d93da0f98 100644 --- a/arch/univariate/recursions_python.py +++ b/arch/univariate/recursions_python.py @@ -1083,6 +1083,37 @@ def __init__( self.abs_resids = np.empty(0) self.sigma_delta = np.empty(0) + def __setstate__(self, state: tuple) -> None: + self.backcast = state[0] + self.delta = state[1] + lam = np.asarray(state[2], dtype=np.float64) + assert lam.shape[0] == self.truncation + self.lam = lam.copy() + self.resids = np.asarray(state[3], dtype=np.float64).copy() + self.abs_resids = np.asarray(state[4], dtype=np.float64).copy() + self.sigma_delta = np.asarray(state[5], dtype=np.float64).copy() + + def __reduce__(self): + return ( + FIAPARCHUpdater, + ( + self.p, + self.q, + self.o, + self.truncation, + bool(self.est_delta), + float(self.delta), + ), + ( + self.backcast, + self.delta, + np.asarray(self.lam), + np.asarray(self.resids), + np.asarray(self.abs_resids), + np.asarray(self.sigma_delta), + ), + ) + def initialize_update( self, parameters: Float64Array1D,