Are you sure you want to delete this task? Once this task is deleted, it cannot be recovered.
JiahongX aeca31b596 | 2 years ago | |
---|---|---|
image_based_person_reID_sample | 2 years ago | |
person_search_sample | 2 years ago | |
vehicle_reID_sample | 2 years ago | |
video_based_person_reID_sample | 2 years ago | |
readme.md | 2 years ago |
Name | 名称 |
---|---|
vehicle reid | 车辆重识别 |
image-based person reid | 基于图像的行人重识别 |
person search | 行人搜索 |
video-based person reid | 基于跟踪序列的行人重识别 |
bk_
开头,为保证脚本正确运行,避免以bk_
为开头命名文件或子目录。re_ID.yaml
/dataset
路径下。配置文件务必保留以下部分
# scene: type of re-ID
# four types in total:
# 1. vehicle reid
# 2. image_based person reid
# 3. person search
# 4. video-based person reid
scene: 2 # for image_based person re_ID
attr_name:
attr_value:
input:
dataset:
data_dir: /dataset/benchmark_person_reID # for image_based person re_ID
# data_dir: /dataset/benchmark_vehicle_reID # for vehicle reid
train.py
训练脚本务必保留以下部分
#bk_utils, bk_config 模块由评测脚本提供
from bk_utils.model_complexity import compute_model_complexity
from bk_utils.util import save_network, get_stream_logger
from bk_config.mainconfig import OUTPUT_RESULT_DIR
from torchvision import datasets
def train(config_file_path: str, logger):
#phrase yaml file
with open(config_file_path, encoding='utf-8') as f:
opts = yaml.load(f, Loader=yaml.SafeLoader)
#记录数据集名称和re_ID子类型
data_dir = opts['input']['dataset']['data_dir']
data_name = data_dir.split('/')[-1]
logger.info("dataset name: %s"%(data_name))
'''
解析其他配置文件中的变量
'''
'''
加载模型,transform
'''
#加载数据集
image_datasets = {}
image_datasets['train'] =datasets.ImageFolder(os.path.join(data_dir, 'bounding_box_train'), data_transforms['train'])
image_datasets['val'] =datasets.ImageFolder(os.path.join(data_dir, 'val'), data_transforms['val'])
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batchsize,
shuffle=True, num_workers=8, pin_memory=True) # 8 workers may work faster
for x in ['train', 'val']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
#计算模型参数总数和计算复杂度
params, FLOPs = compute_model_complexity(model, (1,3,input_h,input_w), verbose=False, only_conv_linear=True)
logger.info('number of params (M): %.2f'%(params/1e6))
logger.info('FLOPs (G): %.2f'%(FLOPs/1e9))
'''
训练
'''
#存储最优模型
model.load_state_dict(best_model_wts)
save_network(model, save_path, name, 'last')
# 记录训练时间和训练总周期数
logger.info('total train time: %.2f minutes'%(time_elapsed / 60))
logger.info('total train epochs: %d epochs'%num_epochs)
test.py
测试脚本务必保留以下部分
import yaml
import pickle
from bk_config.mainconfig import OUTPUT_RESULT_DIR
from torchvision import datasets
def test(config_file_path:str, logger):
#解析配置文件
with open(config_file_path, encoding='utf-8') as f:
opts = yaml.load(f, Loader=yaml.SafeLoader)
data_dir = opts['input']['dataset']['data_dir']
name = "trained_" + opts['input']['config']['name']
trained_model_name = name + "_last.pth"
save_path = OUTPUT_RESULT_DIR
since = time.time()
with torch.no_grad():
'''
特征提取
'''
process_time = time.time() - since
logger.info('total forward time: %.2f minutes'%(process_time/60))
return_dict = {}
# 特征距离矩阵 dist.shape = (len(query), len(gallery))
return_dict['dist'] = dist.numpy()
# 特征样例
return_dict['feature_example'] = query_feature[0].numpy()
# gallery车辆和摄像头ID
return_dict['gallery_label'] = gallery_label
return_dict['gallery_cam'] = gallery_cam
# query车辆和摄像头ID
return_dict['query_label'] = query_label
return_dict['query_cam'] = query_cam
pickle.dump(return_dict, open(OUTPUT_RESULT_DIR+'test_result.pkl', 'wb'), protocol=4)
No Description
Jupyter Notebook Python reStructuredText Batchfile Makefile
Dear OpenI User
Thank you for your continuous support to the Openl Qizhi Community AI Collaboration Platform. In order to protect your usage rights and ensure network security, we updated the Openl Qizhi Community AI Collaboration Platform Usage Agreement in January 2024. The updated agreement specifies that users are prohibited from using intranet penetration tools. After you click "Agree and continue", you can continue to use our services. Thank you for your cooperation and understanding.
For more agreement content, please refer to the《Openl Qizhi Community AI Collaboration Platform Usage Agreement》