-
Notifications
You must be signed in to change notification settings - Fork 7
/
transfer.py
58 lines (46 loc) · 1.8 KB
/
transfer.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
'''Main transfer script.'''
import hydra
@hydra.main(config_path='conf', config_name='transfer', version_base='1.1')
def run(config):
# Deferred imports for faster tab completion
import os
import flatten_dict
import pytorch_lightning as pl
from src.datasets.catalog import TRANSFER_DATASETS
from src.systems import transfer
pl.seed_everything(config.trainer.seed)
# Saving checkpoints and logging with wandb.
flat_config = flatten_dict.flatten(config, reducer='dot')
save_dir = os.path.join(config.exp.base_dir, config.exp.name)
wandb_logger = pl.loggers.WandbLogger(project='domain-agnostic', name=config.exp.name)
wandb_logger.log_hyperparams(flat_config)
# Check every validation epoch, and save best model based on maximizing the transfer/val_metric.
ckpt_callback = pl.callbacks.ModelCheckpoint(
dirpath=save_dir,
filename='best_model_{epoch}',
monitor='transfer/val_metric',
save_top_k=1,
mode='max'
)
assert config.dataset.name in TRANSFER_DATASETS, f'{config.dataset.name} not one of {TRANSFER_DATASETS}.'
# PyTorch Lightning Trainer.
trainer = pl.Trainer(
default_root_dir=save_dir,
logger=wandb_logger,
gpus=str(config.gpus),
max_epochs=config.trainer.max_epochs,
min_epochs=config.trainer.max_epochs,
val_check_interval=config.trainer.val_check_interval,
limit_val_batches=config.trainer.limit_val_batches,
callbacks=[ckpt_callback],
# weights_summary=config.trainer.weights_summary,
precision=config.trainer.precision,
# accelerator="ddp"
)
system = transfer.TransferSystem(config)
if not config.test:
trainer.fit(system)
else:
trainer.test(system)
if __name__ == '__main__':
run()