Global code reformat and optimize imports
This commit is contained in:
parent
64ff05c499
commit
bf34ae007f
@ -6,13 +6,13 @@
|
|||||||
# Download labels from Google Drive, accepting presented query
|
# Download labels from Google Drive, accepting presented query
|
||||||
filename="coco2014labels.zip"
|
filename="coco2014labels.zip"
|
||||||
fileid="1s6-CmF5_SElM28r52P1OUrCcuXZN-SFo"
|
fileid="1s6-CmF5_SElM28r52P1OUrCcuXZN-SFo"
|
||||||
curl -c ./cookie -s -L "https://drive.google.com/uc?export=download&id=${fileid}" > /dev/null
|
curl -c ./cookie -s -L "https://drive.google.com/uc?export=download&id=${fileid}" >/dev/null
|
||||||
curl -Lb ./cookie "https://drive.google.com/uc?export=download&confirm=`awk '/download/ {print $NF}' ./cookie`&id=${fileid}" -o ${filename}
|
curl -Lb ./cookie "https://drive.google.com/uc?export=download&confirm=$(awk '/download/ {print $NF}' ./cookie)&id=${fileid}" -o ${filename}
|
||||||
rm ./cookie
|
rm ./cookie
|
||||||
|
|
||||||
# Unzip labels
|
# Unzip labels
|
||||||
unzip -q ${filename} # for coco.zip
|
unzip -q ${filename} # for coco.zip
|
||||||
# tar -xzf ${filename} # for coco.tar.gz
|
# tar -xzf ${filename} # for coco.tar.gz
|
||||||
rm ${filename}
|
rm ${filename}
|
||||||
|
|
||||||
# Download and unzip images
|
# Download and unzip images
|
||||||
|
|||||||
@ -6,13 +6,13 @@
|
|||||||
# Download labels from Google Drive, accepting presented query
|
# Download labels from Google Drive, accepting presented query
|
||||||
filename="coco2017labels.zip"
|
filename="coco2017labels.zip"
|
||||||
fileid="1cXZR_ckHki6nddOmcysCuuJFM--T-Q6L"
|
fileid="1cXZR_ckHki6nddOmcysCuuJFM--T-Q6L"
|
||||||
curl -c ./cookie -s -L "https://drive.google.com/uc?export=download&id=${fileid}" > /dev/null
|
curl -c ./cookie -s -L "https://drive.google.com/uc?export=download&id=${fileid}" >/dev/null
|
||||||
curl -Lb ./cookie "https://drive.google.com/uc?export=download&confirm=`awk '/download/ {print $NF}' ./cookie`&id=${fileid}" -o ${filename}
|
curl -Lb ./cookie "https://drive.google.com/uc?export=download&confirm=$(awk '/download/ {print $NF}' ./cookie)&id=${fileid}" -o ${filename}
|
||||||
rm ./cookie
|
rm ./cookie
|
||||||
|
|
||||||
# Unzip labels
|
# Unzip labels
|
||||||
unzip -q ${filename} # for coco.zip
|
unzip -q ${filename} # for coco.zip
|
||||||
# tar -xzf ${filename} # for coco.tar.gz
|
# tar -xzf ${filename} # for coco.tar.gz
|
||||||
rm ${filename}
|
rm ${filename}
|
||||||
|
|
||||||
# Download and unzip images
|
# Download and unzip images
|
||||||
|
|||||||
28
train.py
28
train.py
@ -149,15 +149,15 @@ def train(hyp):
|
|||||||
elif len(weights) > 0: # darknet format
|
elif len(weights) > 0: # darknet format
|
||||||
# possible weights are '*.weights', 'yolov3-tiny.conv.15', 'darknet53.conv.74' etc.
|
# possible weights are '*.weights', 'yolov3-tiny.conv.15', 'darknet53.conv.74' etc.
|
||||||
load_darknet_weights(model, weights)
|
load_darknet_weights(model, weights)
|
||||||
|
|
||||||
if opt.freeze_layers:
|
if opt.freeze_layers:
|
||||||
output_layer_indices = [idx - 1 for idx, module in enumerate(model.module_list) if isinstance(module, YOLOLayer)]
|
output_layer_indices = [idx - 1 for idx, module in enumerate(model.module_list) if isinstance(module, YOLOLayer)]
|
||||||
freeze_layer_indices = [x for x in range(len(model.module_list)) if
|
freeze_layer_indices = [x for x in range(len(model.module_list)) if
|
||||||
(x not in output_layer_indices) and
|
(x not in output_layer_indices) and
|
||||||
(x - 1 not in output_layer_indices)]
|
(x - 1 not in output_layer_indices)]
|
||||||
for idx in freeze_layer_indices:
|
for idx in freeze_layer_indices:
|
||||||
for parameter in model.module_list[idx].parameters():
|
for parameter in model.module_list[idx].parameters():
|
||||||
parameter.requires_grad_(False)
|
parameter.requires_grad_(False)
|
||||||
|
|
||||||
# Mixed precision training https://github.com/NVIDIA/apex
|
# Mixed precision training https://github.com/NVIDIA/apex
|
||||||
if mixed_precision:
|
if mixed_precision:
|
||||||
@ -356,10 +356,10 @@ def train(hyp):
|
|||||||
if save:
|
if save:
|
||||||
with open(results_file, 'r') as f: # create checkpoint
|
with open(results_file, 'r') as f: # create checkpoint
|
||||||
ckpt = {'epoch': epoch,
|
ckpt = {'epoch': epoch,
|
||||||
'best_fitness': best_fitness,
|
'best_fitness': best_fitness,
|
||||||
'training_results': f.read(),
|
'training_results': f.read(),
|
||||||
'model': ema.ema.module.state_dict() if hasattr(model, 'module') else ema.ema.state_dict(),
|
'model': ema.ema.module.state_dict() if hasattr(model, 'module') else ema.ema.state_dict(),
|
||||||
'optimizer': None if final_epoch else optimizer.state_dict()}
|
'optimizer': None if final_epoch else optimizer.state_dict()}
|
||||||
|
|
||||||
# Save last, best and delete
|
# Save last, best and delete
|
||||||
torch.save(ckpt, last)
|
torch.save(ckpt, last)
|
||||||
@ -409,7 +409,7 @@ if __name__ == '__main__':
|
|||||||
parser.add_argument('--device', default='', help='device id (i.e. 0 or 0,1 or cpu)')
|
parser.add_argument('--device', default='', help='device id (i.e. 0 or 0,1 or cpu)')
|
||||||
parser.add_argument('--adam', action='store_true', help='use adam optimizer')
|
parser.add_argument('--adam', action='store_true', help='use adam optimizer')
|
||||||
parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset')
|
parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset')
|
||||||
parser.add_argument('--freeze-layers', action='store_true', help='Freeze non-output layers')
|
parser.add_argument('--freeze-layers', action='store_true', help='Freeze non-output layers')
|
||||||
opt = parser.parse_args()
|
opt = parser.parse_args()
|
||||||
opt.weights = last if opt.resume and not opt.weights else opt.weights
|
opt.weights = last if opt.resume and not opt.weights else opt.weights
|
||||||
check_git_status()
|
check_git_status()
|
||||||
|
|||||||
@ -12,8 +12,7 @@ while true; do
|
|||||||
python3 train.py --data coco2014.data --img-size 512 608 --epochs 27 --batch 8 --accum 8 --evolve --weights '' --bucket ult/coco/sppa_512 --device $1 --cfg yolov3-sppa.cfg --multi
|
python3 train.py --data coco2014.data --img-size 512 608 --epochs 27 --batch 8 --accum 8 --evolve --weights '' --bucket ult/coco/sppa_512 --device $1 --cfg yolov3-sppa.cfg --multi
|
||||||
done
|
done
|
||||||
|
|
||||||
|
|
||||||
# coco epoch times --img-size 416 608 --epochs 27 --batch 16 --accum 4
|
# coco epoch times --img-size 416 608 --epochs 27 --batch 16 --accum 4
|
||||||
# 36:34 2080ti
|
# 36:34 2080ti
|
||||||
# 21:58 V100
|
# 21:58 V100
|
||||||
# 63:00 T4
|
# 63:00 T4
|
||||||
|
|||||||
@ -29,8 +29,7 @@ docker kill $(docker ps -a -q --filter ancestor=$t)
|
|||||||
sudo -s
|
sudo -s
|
||||||
t=ultralytics/yolov3:evolve
|
t=ultralytics/yolov3:evolve
|
||||||
# docker kill $(docker ps -a -q --filter ancestor=$t)
|
# docker kill $(docker ps -a -q --filter ancestor=$t)
|
||||||
for i in 0 1 6 7
|
for i in 0 1 6 7; do
|
||||||
do
|
|
||||||
docker pull $t && docker run --gpus all -d --ipc=host -v "$(pwd)"/coco:/usr/src/coco $t bash utils/evolve.sh $i
|
docker pull $t && docker run --gpus all -d --ipc=host -v "$(pwd)"/coco:/usr/src/coco $t bash utils/evolve.sh $i
|
||||||
sleep 30
|
sleep 30
|
||||||
done
|
done
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user