debeir.training.hparm_tuning.optuna_rank
1from functools import partial 2 3import joblib 4import optuna 5from debeir.training.hparm_tuning.trainer import Trainer 6from optuna.integration.wandb import WeightsAndBiasesCallback 7from optuna.trial import TrialState 8 9 10def objective(trainer: Trainer, trial: optuna.Trial): 11 dataset = trainer.dataset_loading_fn() 12 13 train_dataset = dataset["train"] 14 val_dataset = dataset["val"] 15 16 return trainer.fit(trial, train_dataset, val_dataset) 17 18 19def run_optuna_with_wandb(trainer, n_trials=100, n_jobs=1, maximize_objective=True, save_study_path=".", 20 wandb_kwargs=None): 21 """ 22 Partially initialize the objective function with a trainer and hparams to optimize. 23 24 Optimize using the optuna library. 25 26 :param trainer: 27 :param n_trials: 28 :param maximize_objective: 29 :param wandb_kwargs: 30 :return: 31 """ 32 assert hasattr(trainer, "fit") 33 34 if wandb_kwargs is None: 35 wandb_kwargs = {"project": "temp"} 36 37 wandbc = WeightsAndBiasesCallback(wandb_kwargs=wandb_kwargs) 38 study = optuna.create_study(direction="maximize" if maximize_objective else "minimize") 39 obj = partial(objective, trainer) 40 41 try: 42 study.optimize(obj, n_trials=n_trials, n_jobs=n_jobs, callbacks=[wandbc]) 43 except: 44 pass 45 finally: 46 joblib.dump(study, save_study_path + ".pkl") 47 48 return study 49 50 51def print_optuna_stats(study: optuna.Study): 52 pruned_trials = study.get_trials(deepcopy=False, 53 states=[TrialState.PRUNED]) 54 complete_trials = study.get_trials(deepcopy=False, 55 states=[TrialState.COMPLETE]) 56 57 print("Study statistics: ") 58 print(" Number of finished trials: ", len(study.trials)) 59 print(" Number of pruned trials: ", len(pruned_trials)) 60 print(" Number of complete trials: ", len(complete_trials)) 61 62 print("Best trial:") 63 trial = study.best_trial 64 65 print(" Value: ", trial.value) 66 67 print(" Params: ") 68 for key, value in trial.params.items(): 69 print(" {}: {}".format(key, value))
def
objective( trainer: debeir.training.hparm_tuning.trainer.Trainer, trial: optuna.trial._trial.Trial):
def
run_optuna_with_wandb( trainer, n_trials=100, n_jobs=1, maximize_objective=True, save_study_path='.', wandb_kwargs=None):
20def run_optuna_with_wandb(trainer, n_trials=100, n_jobs=1, maximize_objective=True, save_study_path=".", 21 wandb_kwargs=None): 22 """ 23 Partially initialize the objective function with a trainer and hparams to optimize. 24 25 Optimize using the optuna library. 26 27 :param trainer: 28 :param n_trials: 29 :param maximize_objective: 30 :param wandb_kwargs: 31 :return: 32 """ 33 assert hasattr(trainer, "fit") 34 35 if wandb_kwargs is None: 36 wandb_kwargs = {"project": "temp"} 37 38 wandbc = WeightsAndBiasesCallback(wandb_kwargs=wandb_kwargs) 39 study = optuna.create_study(direction="maximize" if maximize_objective else "minimize") 40 obj = partial(objective, trainer) 41 42 try: 43 study.optimize(obj, n_trials=n_trials, n_jobs=n_jobs, callbacks=[wandbc]) 44 except: 45 pass 46 finally: 47 joblib.dump(study, save_study_path + ".pkl") 48 49 return study
Partially initialize the objective function with a trainer and hparams to optimize.
Optimize using the optuna library.
Parameters
- trainer:
- n_trials:
- maximize_objective:
- wandb_kwargs:
Returns
def
print_optuna_stats(study: optuna.study.study.Study):
52def print_optuna_stats(study: optuna.Study): 53 pruned_trials = study.get_trials(deepcopy=False, 54 states=[TrialState.PRUNED]) 55 complete_trials = study.get_trials(deepcopy=False, 56 states=[TrialState.COMPLETE]) 57 58 print("Study statistics: ") 59 print(" Number of finished trials: ", len(study.trials)) 60 print(" Number of pruned trials: ", len(pruned_trials)) 61 print(" Number of complete trials: ", len(complete_trials)) 62 63 print("Best trial:") 64 trial = study.best_trial 65 66 print(" Value: ", trial.value) 67 68 print(" Params: ") 69 for key, value in trial.params.items(): 70 print(" {}: {}".format(key, value))