Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion autofit/non_linear/analysis/visualize.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
from autofit.mapper.prior_model.abstract import AbstractPriorModel
from autofit.non_linear.paths.database import DatabasePaths
from autofit.non_linear.paths.null import NullPaths
from autofit.non_linear.test_mode import is_test_mode

class Visualizer:

Expand Down Expand Up @@ -40,7 +41,7 @@ def should_visualize(
A bool determining whether visualization should be performed or not.
"""

if os.environ.get("PYAUTOFIT_TEST_MODE") == "1":
if is_test_mode():
return False

if isinstance(paths, DatabasePaths) or isinstance(paths, NullPaths):
Expand Down
3 changes: 2 additions & 1 deletion autofit/non_linear/fitness.py
Original file line number Diff line number Diff line change
Expand Up @@ -456,7 +456,8 @@ def check_log_likelihood(self, fitness):
"""
import numpy as np

if os.environ.get("PYAUTOFIT_TEST_MODE") == "1":
from autofit.non_linear.test_mode import is_test_mode
if is_test_mode():
return

if not conf.instance["general"]["test"]["check_likelihood_function"]:
Expand Down
6 changes: 4 additions & 2 deletions autofit/non_linear/initializer.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
import numpy as np

from autofit import exc
from autofit.non_linear.test_mode import is_test_mode
from autofit.non_linear.paths.abstract import AbstractPaths
from autofit.mapper.prior.abstract import Prior
from autofit.mapper.prior_model.abstract import AbstractPriorModel
Expand Down Expand Up @@ -61,7 +62,7 @@ def samples_from_model(
of free dimensions of the model.
"""

if os.environ.get("PYAUTOFIT_TEST_MODE") == "1" and test_mode_samples:
if is_test_mode() and test_mode_samples:
return self.samples_in_test_mode(total_points=total_points, model=model)

if n_cores == 1:
Expand Down Expand Up @@ -219,7 +220,8 @@ def samples_in_test_mode(self, total_points: int, model: AbstractPriorModel):
"""

logger.warning(
f"TEST MODE ON: SAMPLES BEING ASSIGNED ABRITRARY LARGE LIKELIHOODS"
"TEST MODE 1 (reduced iterations): Initial samples assigned "
"arbitrary large likelihoods to accelerate sampler convergence."
)

unit_parameter_lists = []
Expand Down
6 changes: 4 additions & 2 deletions autofit/non_linear/plot/plot_util.py
Original file line number Diff line number Diff line change
@@ -1,17 +1,19 @@
import os
import logging
import os
from functools import wraps
from pathlib import Path

import numpy as np

from autofit.non_linear.test_mode import is_test_mode

logger = logging.getLogger(__name__)


def skip_in_test_mode(func):
@wraps(func)
def wrapper(*args, **kwargs):
if os.environ.get("PYAUTOFIT_TEST_MODE") == "1":
if is_test_mode():
return
return func(*args, **kwargs)

Expand Down
3 changes: 2 additions & 1 deletion autofit/non_linear/samples/samples.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
from autoconf.class_path import get_class_path
from autofit import exc
from autofit.mapper.model import ModelInstance
from autofit.non_linear.test_mode import is_test_mode
from autofit.mapper.prior_model.abstract import AbstractPriorModel
from autofit.non_linear.samples.sample import Sample

Expand Down Expand Up @@ -378,7 +379,7 @@ def samples_above_weight_threshold_from(
if weight_threshold is None:
weight_threshold = conf.instance["output"]["samples_weight_threshold"]

if os.environ.get("PYAUTOFIT_TEST_MODE") == "1":
if is_test_mode():
weight_threshold = None

if weight_threshold is None:
Expand Down
129 changes: 126 additions & 3 deletions autofit/non_linear/search/abstract_search.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@
from autofit.graphical.expectation_propagation import AbstractFactorOptimiser

from autofit.non_linear.fitness import get_timeout_seconds
from autofit.non_linear.test_mode import is_test_mode, test_mode_level

logger = logging.getLogger(__name__)

Expand Down Expand Up @@ -650,6 +651,14 @@ def start_resume_fit(self, analysis: Analysis, model: AbstractPriorModel) -> Res
):
self.timer.start()

mode = test_mode_level()
if mode >= 2:
return self._fit_bypass_test_mode(
model=model,
analysis=analysis,
call_likelihood=(mode == 2),
)

model.freeze()
search_internal, fitness = self._fit(
model=model,
Expand Down Expand Up @@ -770,14 +779,125 @@ def post_fit_output(self, search_internal):
if not conf.instance["output"]["search_internal"]:
self.logger.info("Removing search internal folder.")
self.paths.remove_search_internal()
else:
elif search_internal is not None:
self.output_search_internal(search_internal=search_internal)

if not self.disable_output:
self.logger.info("Removing all files except for .zip file")

self.paths.zip_remove()

def _fit_bypass_test_mode(
self,
model: AbstractPriorModel,
analysis: Analysis,
call_likelihood: bool = True,
):
"""
Bypass the sampler entirely in test mode (levels 2 and 3).

Generates fake samples and writes all expected output files so that
downstream code sees a complete result folder.

Parameters
----------
model
The model being fitted.
analysis
The analysis object with the log likelihood function.
call_likelihood
If True (mode 2), call the likelihood function once to verify it
works. If False (mode 3), skip the likelihood call entirely.
"""
from autofit.non_linear.samples.pdf import SamplesPDF
from autofit.non_linear.samples.sample import Sample

mode = test_mode_level()
if mode == 2:
logger.warning(
"TEST MODE 2 (bypass + likelihood): Skipping sampler, "
"calling likelihood function once to verify it works."
)
else:
logger.warning(
"TEST MODE 3 (full bypass): Skipping sampler and likelihood "
"entirely for maximum speed. No likelihood verification."
)

model.freeze()

unit_vector = [0.5] * model.prior_count
parameter_vector = [
float(v) for v in model.vector_from_unit_vector(
unit_vector=unit_vector,
)
]

log_likelihood = -1.0e99
if call_likelihood:
instance = model.instance_from_vector(vector=parameter_vector)
log_likelihood = float(
analysis.log_likelihood_function(instance)
)

sample_list = self._build_fake_samples(
model=model,
parameter_vector=parameter_vector,
log_likelihood=log_likelihood,
)

samples = SamplesPDF(
model=model,
sample_list=sample_list,
samples_info={
"total_iterations": 1,
"time": 0.0,
},
)

samples_summary = samples.summary()
self.paths.save_samples_summary(samples_summary=samples_summary)
self.paths.save_samples(samples=samples)

result = analysis.make_result(
samples_summary=samples_summary,
paths=self.paths,
samples=samples,
search_internal=None,
)

analysis.save_results(paths=self.paths, result=result)
analysis.save_results_combined(paths=self.paths, result=result)

model.unfreeze()

self.paths.completed()

return result

@staticmethod
def _build_fake_samples(model, parameter_vector, log_likelihood):
"""
Build a minimal list of fake Sample objects for test mode bypass.

Creates two samples: the "best" at the prior median and a second
with slightly perturbed parameters and worse likelihood, so that
SamplesPDF methods like median_pdf work correctly.
"""
from autofit.non_linear.samples.sample import Sample

perturbed = [
p * 1.001 if p != 0.0 else 0.001 for p in parameter_vector
]

return Sample.from_lists(
model=model,
parameter_lists=[parameter_vector, perturbed],
log_likelihood_list=[log_likelihood, log_likelihood - 1.0],
log_prior_list=[0.0, 0.0],
weight_list=[1.0, 0.5],
)

@abstractmethod
def _fit(self, model: AbstractPriorModel, analysis: Analysis):
pass
Expand Down Expand Up @@ -815,8 +935,11 @@ def config_dict_run(self) -> Dict:
except KeyError:
pass

if os.environ.get("PYAUTOFIT_TEST_MODE") == "1":
logger.warning(f"TEST MODE ON: SEARCH WILL SKIP SAMPLING\n\n")
if is_test_mode():
logger.warning(
"TEST MODE 1 (reduced iterations): Sampler will run with "
"minimal iterations for faster completion."
)

config_dict = self.config_dict_test_mode_from(config_dict=config_dict)

Expand Down
4 changes: 3 additions & 1 deletion autofit/non_linear/search/mcmc/auto_correlations.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@

from typing import Optional

from autofit.non_linear.test_mode import is_test_mode

class AutoCorrelationsSettings:

def __init__(
Expand Down Expand Up @@ -47,7 +49,7 @@ def update_via_config(self, config):
self.check_for_convergence = self.check_for_convergence if self.check_for_convergence is not None else config_dict["check_for_convergence"]
self.check_size = self.check_size or config_dict["check_size"]

if os.environ.get("PYAUTOFIT_TEST_MODE") == "1":
if is_test_mode():
self.check_size = 1

self.required_length = self.required_length or config_dict["required_length"]
Expand Down
3 changes: 2 additions & 1 deletion autofit/non_linear/search/mcmc/emcee/search.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
from autofit.non_linear.search.mcmc.abstract_mcmc import AbstractMCMC
from autofit.non_linear.search.mcmc.auto_correlations import AutoCorrelationsSettings
from autofit.non_linear.search.mcmc.auto_correlations import AutoCorrelations
from autofit.non_linear.test_mode import is_test_mode
from autofit.non_linear.samples.sample import Sample
from autofit.non_linear.samples.mcmc import SamplesMCMC

Expand Down Expand Up @@ -266,7 +267,7 @@ def samples_via_internal_from(self, model, search_internal=None):

search_internal = search_internal or self.backend

if os.environ.get("PYAUTOFIT_TEST_MODE") == "1":
if is_test_mode():
samples_after_burn_in = search_internal.get_chain(
discard=5, thin=5, flat=True
)
Expand Down
3 changes: 2 additions & 1 deletion autofit/non_linear/search/mcmc/zeus/search.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
from autofit.non_linear.search.mcmc.auto_correlations import AutoCorrelationsSettings
from autofit.non_linear.search.mcmc.auto_correlations import AutoCorrelations
from autofit.non_linear.samples.sample import Sample
from autofit.non_linear.test_mode import is_test_mode
from autofit.non_linear.samples.mcmc import SamplesMCMC


Expand Down Expand Up @@ -285,7 +286,7 @@ def samples_via_internal_from(self, model, search_internal=None):

search_internal = search_internal or self.paths.load_search_internal()

if os.environ.get("PYAUTOFIT_TEST_MODE") == "1":
if is_test_mode():

samples_after_burn_in = search_internal.get_chain(
discard=5, thin=5, flat=True
Expand Down
20 changes: 20 additions & 0 deletions autofit/non_linear/test_mode.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
import os


def test_mode_level():
"""
Return the current test mode level.

0 = off (normal operation)
1 = reduce sampler iterations to minimum (existing behavior)
2 = bypass sampler entirely, call likelihood once
3 = bypass sampler entirely, skip likelihood call
"""
return int(os.environ.get("PYAUTOFIT_TEST_MODE", "0"))


def is_test_mode():
"""
Return True if any test mode is active.
"""
return test_mode_level() > 0
Loading