diff --git a/train.py b/train.py index 9f869cfd..91d8dfe0 100644 --- a/train.py +++ b/train.py @@ -59,7 +59,7 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): cuda = device.type != 'cpu' init_seeds(2 + rank) with open(opt.data) as f: - data_dict = yaml.load(f, Loader=yaml.FullLoader) # data dict + data_dict = yaml.load(f, Loader=yaml.SafeLoader) # data dict with torch_distributed_zero_first(rank): check_dataset(data_dict) # check train_path = data_dict['train'] @@ -476,7 +476,7 @@ if __name__ == '__main__': assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist' apriori = opt.global_rank, opt.local_rank with open(Path(ckpt).parent.parent / 'opt.yaml') as f: - opt = argparse.Namespace(**yaml.load(f, Loader=yaml.FullLoader)) # replace + opt = argparse.Namespace(**yaml.load(f, Loader=yaml.SafeLoader)) # replace opt.cfg, opt.weights, opt.resume, opt.global_rank, opt.local_rank = '', ckpt, True, *apriori # reinstate logger.info('Resuming training from %s' % ckpt) else: @@ -500,7 +500,7 @@ if __name__ == '__main__': # Hyperparameters with open(opt.hyp) as f: - hyp = yaml.load(f, Loader=yaml.FullLoader) # load hyps + hyp = yaml.load(f, Loader=yaml.SafeLoader) # load hyps # Train logger.info(opt)