i just chane the training source folder , cannot train and show these error ,
Code: Select all
05/23/2023 13:54:05 MainProcess _training config _check_exists DEBUG Config file exists: 'C:\Users\adama\faceswap\config\train.ini'
05/23/2023 13:54:05 MainProcess _training config _load_config VERBOSE Loading config: 'C:\Users\adama\faceswap\config\train.ini'
05/23/2023 13:54:05 MainProcess _training config _validate_config DEBUG Validating config
05/23/2023 13:54:05 MainProcess _training config _check_config_change DEBUG Default config has not changed
05/23/2023 13:54:05 MainProcess _training config _check_config_choices DEBUG Checking config choices
05/23/2023 13:54:05 MainProcess _training config _parse_list DEBUG Processed raw option 'keras_encoder' to list ['keras_encoder'] for section 'model.phaze_a', option 'freeze_layers'
05/23/2023 13:54:05 MainProcess _training config _parse_list DEBUG Processed raw option 'encoder' to list ['encoder'] for section 'model.phaze_a', option 'load_layers'
05/23/2023 13:54:05 MainProcess _training config _check_config_choices DEBUG Checked config choices
05/23/2023 13:54:05 MainProcess _training config _validate_config DEBUG Validated config
05/23/2023 13:54:05 MainProcess _training config _handle_config DEBUG Handled config
05/23/2023 13:54:05 MainProcess _training config __init__ DEBUG Initialized: Config
05/23/2023 13:54:05 MainProcess _training config get DEBUG Getting config item: (section: 'global', option: 'learning_rate')
05/23/2023 13:54:05 MainProcess _training config get DEBUG Returning item: (type: <class 'float'>, value: 3e-05)
05/23/2023 13:54:05 MainProcess _training config get DEBUG Getting config item: (section: 'global', option: 'epsilon_exponent')
05/23/2023 13:54:05 MainProcess _training config get DEBUG Returning item: (type: <class 'int'>, value: -3)
05/23/2023 13:54:05 MainProcess _training config get DEBUG Getting config item: (section: 'global', option: 'autoclip')
05/23/2023 13:54:05 MainProcess _training config get DEBUG Returning item: (type: <class 'bool'>, value: False)
05/23/2023 13:54:05 MainProcess _training config get DEBUG Getting config item: (section: 'global', option: 'allow_growth')
05/23/2023 13:54:05 MainProcess _training config get DEBUG Returning item: (type: <class 'bool'>, value: False)
05/23/2023 13:54:05 MainProcess _training config get DEBUG Getting config item: (section: 'global', option: 'mixed_precision')
05/23/2023 13:54:05 MainProcess _training config get DEBUG Returning item: (type: <class 'bool'>, value: True)
05/23/2023 13:54:05 MainProcess _training config get DEBUG Getting config item: (section: 'global', option: 'nan_protection')
05/23/2023 13:54:05 MainProcess _training config get DEBUG Returning item: (type: <class 'bool'>, value: True)
05/23/2023 13:54:05 MainProcess _training config get DEBUG Getting config item: (section: 'global', option: 'convert_batchsize')
05/23/2023 13:54:05 MainProcess _training config get DEBUG Returning item: (type: <class 'int'>, value: 16)
05/23/2023 13:54:05 MainProcess _training config get DEBUG Getting config item: (section: 'global.loss', option: 'loss_function')
05/23/2023 13:54:05 MainProcess _training config get DEBUG Returning item: (type: <class 'str'>, value: ms_ssim)
05/23/2023 13:54:05 MainProcess _training config get DEBUG Getting config item: (section: 'global.loss', option: 'loss_function_2')
05/23/2023 13:54:05 MainProcess _training config get DEBUG Returning item: (type: <class 'str'>, value: mae)
05/23/2023 13:54:05 MainProcess _training config get DEBUG Getting config item: (section: 'global.loss', option: 'loss_weight_2')
05/23/2023 13:54:05 MainProcess _training config get DEBUG Returning item: (type: <class 'int'>, value: 25)
05/23/2023 13:54:05 MainProcess _training config get DEBUG Getting config item: (section: 'global.loss', option: 'loss_function_3')
05/23/2023 13:54:05 MainProcess _training config get DEBUG Returning item: (type: <class 'str'>, value: lpips_alex)
05/23/2023 13:54:05 MainProcess _training config get DEBUG Getting config item: (section: 'global.loss', option: 'loss_weight_3')
05/23/2023 13:54:05 MainProcess _training config get DEBUG Returning item: (type: <class 'int'>, value: 5)
05/23/2023 13:54:05 MainProcess _training config get DEBUG Getting config item: (section: 'global.loss', option: 'loss_function_4')
05/23/2023 13:54:05 MainProcess _training config get DEBUG Returning item: (type: <class 'str'>, value: ffl)
05/23/2023 13:54:05 MainProcess _training config get DEBUG Getting config item: (section: 'global.loss', option: 'loss_weight_4')
05/23/2023 13:54:05 MainProcess _training config get DEBUG Returning item: (type: <class 'int'>, value: 100)
05/23/2023 13:54:05 MainProcess _training config get DEBUG Getting config item: (section: 'global.loss', option: 'mask_loss_function')
05/23/2023 13:54:05 MainProcess _training config get DEBUG Returning item: (type: <class 'str'>, value: mse)
05/23/2023 13:54:05 MainProcess _training config get DEBUG Getting config item: (section: 'global.loss', option: 'eye_multiplier')
05/23/2023 13:54:05 MainProcess _training config get DEBUG Returning item: (type: <class 'int'>, value: 3)
05/23/2023 13:54:05 MainProcess _training config get DEBUG Getting config item: (section: 'global.loss', option: 'mouth_multiplier')
05/23/2023 13:54:05 MainProcess _training config get DEBUG Returning item: (type: <class 'int'>, value: 2)
05/23/2023 13:54:05 MainProcess _training config get DEBUG Getting config item: (section: 'model.phaze_a', option: 'fc_dropout')
05/23/2023 13:54:05 MainProcess _training config get DEBUG Returning item: (type: <class 'float'>, value: 0.0)
05/23/2023 13:54:05 MainProcess _training config get DEBUG Getting config item: (section: 'model.phaze_a', option: 'fc_gblock_dropout')
05/23/2023 13:54:05 MainProcess _training config get DEBUG Returning item: (type: <class 'float'>, value: 0.0)
05/23/2023 13:54:05 MainProcess _training config get DEBUG Getting config item: (section: 'model.phaze_a', option: 'freeze_layers')
05/23/2023 13:54:05 MainProcess _training config _parse_list DEBUG Processed raw option 'keras_encoder' to list ['keras_encoder'] for section 'model.phaze_a', option 'freeze_layers'
05/23/2023 13:54:05 MainProcess _training config get DEBUG Returning item: (type: <class 'list'>, value: ['keras_encoder'])
05/23/2023 13:54:05 MainProcess _training config changeable_items DEBUG Alterable for existing models: {'learning_rate': 3e-05, 'epsilon_exponent': -3, 'autoclip': False, 'allow_growth': False, 'mixed_precision': True, 'nan_protection': True, 'convert_batchsize': 16, 'loss_function': 'ms_ssim', 'loss_function_2': 'mae', 'loss_weight_2': 25, 'loss_function_3': 'lpips_alex', 'loss_weight_3': 5, 'loss_function_4': 'ffl', 'loss_weight_4': 100, 'mask_loss_function': 'mse', 'eye_multiplier': 3, 'mouth_multiplier': 2, 'fc_dropout': 0.0, 'fc_gblock_dropout': 0.0, 'freeze_layers': ['keras_encoder']}
05/23/2023 13:54:05 MainProcess _training model __init__ DEBUG Initializing State: (model_dir: 'D:\facewarp\Model Alice & Li V2_L\Model Alice & Li V2_L', model_name: 'phaze_a', config_changeable_items: '{'learning_rate': 3e-05, 'epsilon_exponent': -3, 'autoclip': False, 'allow_growth': False, 'mixed_precision': True, 'nan_protection': True, 'convert_batchsize': 16, 'loss_function': 'ms_ssim', 'loss_function_2': 'mae', 'loss_weight_2': 25, 'loss_function_3': 'lpips_alex', 'loss_weight_3': 5, 'loss_function_4': 'ffl', 'loss_weight_4': 100, 'mask_loss_function': 'mse', 'eye_multiplier': 3, 'mouth_multiplier': 2, 'fc_dropout': 0.0, 'fc_gblock_dropout': 0.0, 'freeze_layers': ['keras_encoder']}', no_logs: False
05/23/2023 13:54:05 MainProcess _training serializer get_serializer DEBUG <lib.serializer._JSONSerializer object at 0x0000021AEA344460>
05/23/2023 13:54:05 MainProcess _training model _load DEBUG Loading State
05/23/2023 13:54:05 MainProcess _training serializer load DEBUG filename: D:\facewarp\Model Alice & Li V2_L\Model Alice & Li V2_L\phaze_a_state.json
05/23/2023 13:54:05 MainProcess _training serializer load DEBUG stored data type: <class 'bytes'>
05/23/2023 13:54:05 MainProcess _training serializer unmarshal DEBUG data type: <class 'bytes'>
05/23/2023 13:54:05 MainProcess _training serializer unmarshal DEBUG returned data type: <class 'dict'>
05/23/2023 13:54:05 MainProcess _training serializer load DEBUG data type: <class 'dict'>
05/23/2023 13:54:05 MainProcess _training model _load DEBUG Loaded state: {'name': 'phaze_a', 'sessions': {'1': {'timestamp': 1683966772.4561841, 'no_logs': False, 'loss_names': ['total', 'face_a', 'face_b'], 'batchsize': 8, 'iterations': 2055, 'config': {'learning_rate': 3e-05, 'epsilon_exponent': -3, 'autoclip': False, 'allow_growth': False, 'mixed_precision': True, 'nan_protection': True, 'convert_batchsize': 16, 'loss_function': 'ssim', 'loss_function_2': 'mae', 'loss_weight_2': 100, 'loss_function_3': 'ffl', 'loss_weight_3': 50, 'loss_function_4': None, 'loss_weight_4': 0, 'mask_loss_function': 'mae', 'eye_multiplier': 3, 'mouth_multiplier': 2, 'fc_dropout': 0.0, 'fc_gblock_dropout': 0.0, 'freeze_layers': ['keras_encoder']}}, '2': {'timestamp': 1683969323.2565155, 'no_logs': False, 'loss_names': ['total', 'face_a', 'face_b'], 'batchsize': 8, 'iterations': 206801, 'config': {'learning_rate': 3e-05, 'epsilon_exponent': -3, 'autoclip': False, 'allow_growth': False, 'mixed_precision': True, 'nan_protection': True, 'convert_batchsize': 16, 'loss_function': 'ms_ssim', 'loss_function_2': 'mae', 'loss_weight_2': 100, 'loss_function_3': 'ffl', 'loss_weight_3': 50, 'loss_function_4': None, 'loss_weight_4': 0, 'mask_loss_function': 'mae', 'eye_multiplier': 3, 'mouth_multiplier': 2, 'fc_dropout': 0.0, 'fc_gblock_dropout': 0.0, 'freeze_layers': ['keras_encoder']}}, '3': {'timestamp': 1684218367.0934753, 'no_logs': False, 'loss_names': ['total', 'face_a', 'face_b'], 'batchsize': 8, 'iterations': 771, 'config': {'learning_rate': 3e-05, 'epsilon_exponent': -3, 'autoclip': False, 'allow_growth': False, 'mixed_precision': True, 'nan_protection': True, 'convert_batchsize': 16, 'loss_function': 'ms_ssim', 'loss_function_2': 'mae', 'loss_weight_2': 100, 'loss_function_3': 'ffl', 'loss_weight_3': 50, 'loss_function_4': None, 'loss_weight_4': 0, 'mask_loss_function': 'mae', 'eye_multiplier': 3, 'mouth_multiplier': 2, 'fc_dropout': 0.0, 'fc_gblock_dropout': 0.0, 'freeze_layers': ['keras_encoder']}}, '4': {'timestamp': 1684220514.234757, 'no_logs': False, 'loss_names': ['total', 'face_a', 'face_b'], 'batchsize': 6, 'iterations': 56765, 'config': {'learning_rate': 3e-05, 'epsilon_exponent': -4, 'autoclip': False, 'allow_growth': False, 'mixed_precision': True, 'nan_protection': True, 'convert_batchsize': 16, 'loss_function': 'ms_ssim', 'loss_function_2': 'mae', 'loss_weight_2': 25, 'loss_function_3': 'lpips_vgg16', 'loss_weight_3': 5, 'loss_function_4': 'ffl', 'loss_weight_4': 100, 'mask_loss_function': 'mse', 'eye_multiplier': 3, 'mouth_multiplier': 2, 'fc_dropout': 0.0, 'fc_gblock_dropout': 0.0, 'freeze_layers': ['keras_encoder']}}, '5': {'timestamp': 1684301890.4861472, 'no_logs': False, 'loss_names': ['total', 'face_a', 'face_b'], 'batchsize': 6, 'iterations': 369538, 'config': {'learning_rate': 3e-05, 'epsilon_exponent': -4, 'autoclip': False, 'allow_growth': False, 'mixed_precision': True, 'nan_protection': True, 'convert_batchsize': 16, 'loss_function': 'ms_ssim', 'loss_function_2': 'mae', 'loss_weight_2': 25, 'loss_function_3': 'lpips_vgg16', 'loss_weight_3': 5, 'loss_function_4': 'ffl', 'loss_weight_4': 100, 'mask_loss_function': 'mse', 'eye_multiplier': 3, 'mouth_multiplier': 2, 'fc_dropout': 0.0, 'fc_gblock_dropout': 0.0, 'freeze_layers': ['keras_encoder']}}}, 'lowest_avg_loss': {'a': 0.02910966745018959, 'b': 0.02204980169609189}, 'iterations': 635930, 'mixed_precision_layers': [], 'config': {'centering': 'face', 'coverage': 87.5, 'optimizer': 'adam', 'learning_rate': 3e-05, 'epsilon_exponent': -4, 'autoclip': False, 'allow_growth': False, 'mixed_precision': True, 'nan_protection': True, 'convert_batchsize': 16, 'loss_function': 'ms_ssim', 'loss_function_2': 'mae', 'loss_weight_2': 25, 'loss_function_3': 'lpips_vgg16', 'loss_weight_3': 5, 'loss_function_4': 'ffl', 'loss_weight_4': 100, 'mask_loss_function': 'mse', 'eye_multiplier': 3, 'mouth_multiplier': 2, 'penalized_mask_loss': True, 'mask_type': 'extended', 'mask_blur_kernel': 3, 'mask_threshold': 4, 'learn_mask': False, 'output_size': 256, 'shared_fc': None, 'enable_gblock': True, 'split_fc': True, 'split_gblock': False, 'split_decoders': False, 'enc_architecture': 'efficientnet_v2_l', 'enc_scaling': 60, 'enc_load_weights': True, 'bottleneck_type': 'dense', 'bottleneck_norm': None, 'bottleneck_size': 512, 'bottleneck_in_encoder': True, 'fc_depth': 1, 'fc_min_filters': 1280, 'fc_max_filters': 1280, 'fc_dimensions': 8, 'fc_filter_slope': -0.5, 'fc_dropout': 0.0, 'fc_upsampler': 'upsample2d', 'fc_upsamples': 1, 'fc_upsample_filters': 1280, 'fc_gblock_depth': 3, 'fc_gblock_min_nodes': 512, 'fc_gblock_max_nodes': 512, 'fc_gblock_filter_slope': -0.5, 'fc_gblock_dropout': 0.0, 'dec_upscale_method': 'resize_images', 'dec_upscales_in_fc': 0, 'dec_norm': None, 'dec_min_filters': 160, 'dec_max_filters': 640, 'dec_slope_mode': 'full', 'dec_filter_slope': -0.33, 'dec_res_blocks': 1, 'dec_output_kernel': 3, 'dec_gaussian': True, 'dec_skip_last_residual': False, 'freeze_layers': ['efficientnetv2-l'], 'load_layers': ['encoder'], 'fs_original_depth': 4, 'fs_original_min_filters': 128, 'fs_original_max_filters': 1024, 'fs_original_use_alt': False, 'mobilenet_width': 1.0, 'mobilenet_depth': 1, 'mobilenet_dropout': 0.001, 'mobilenet_minimalistic': False}}
05/23/2023 13:54:05 MainProcess _training model _update_legacy_config DEBUG Checking for legacy state file update
05/23/2023 13:54:05 MainProcess _training model _update_legacy_config DEBUG Legacy item 'dssim_loss' not in config. Skipping update
05/23/2023 13:54:05 MainProcess _training model _update_legacy_config DEBUG Legacy item 'l2_reg_term' not in config. Skipping update
05/23/2023 13:54:05 MainProcess _training model _update_legacy_config DEBUG Legacy item 'clipnorm' not in config. Skipping update
05/23/2023 13:54:05 MainProcess _training model _update_legacy_config DEBUG State file updated for legacy config: False
05/23/2023 13:54:05 MainProcess _training model _update_changed_config_items INFO Config item: 'epsilon_exponent' has been updated from '-4' to '-3'
05/23/2023 13:54:05 MainProcess _training model _update_changed_config_items INFO Config item: 'loss_function_3' has been updated from 'lpips_vgg16' to 'lpips_alex'
05/23/2023 13:54:05 MainProcess _training model _update_changed_config_items INFO Config item: 'freeze_layers' has been updated from '['efficientnetv2-l']' to '['keras_encoder']'
05/23/2023 13:54:05 MainProcess _training model _replace_config DEBUG Replacing config. Old config: {'centering': 'face', 'coverage': 87.5, 'optimizer': 'adam', 'learning_rate': 3e-05, 'epsilon_exponent': -3, 'autoclip': False, 'allow_growth': False, 'mixed_precision': True, 'nan_protection': True, 'convert_batchsize': 16, 'loss_function': 'ms_ssim', 'loss_function_2': 'mae', 'loss_weight_2': 25, 'loss_function_3': 'lpips_alex', 'loss_weight_3': 5, 'loss_function_4': 'ffl', 'loss_weight_4': 100, 'mask_loss_function': 'mse', 'eye_multiplier': 3, 'mouth_multiplier': 2, 'penalized_mask_loss': True, 'mask_type': 'extended', 'mask_blur_kernel': 3, 'mask_threshold': 4, 'learn_mask': False, 'output_size': 256, 'shared_fc': None, 'enable_gblock': True, 'split_fc': True, 'split_gblock': False, 'split_decoders': False, 'enc_architecture': 'efficientnet_v2_l', 'enc_scaling': 60, 'enc_load_weights': True, 'bottleneck_type': 'dense', 'bottleneck_norm': None, 'bottleneck_size': 512, 'bottleneck_in_encoder': True, 'fc_depth': 1, 'fc_min_filters': 1280, 'fc_max_filters': 1280, 'fc_dimensions': 8, 'fc_filter_slope': -0.5, 'fc_dropout': 0.0, 'fc_upsampler': 'upsample2d', 'fc_upsamples': 1, 'fc_upsample_filters': 1280, 'fc_gblock_depth': 3, 'fc_gblock_min_nodes': 512, 'fc_gblock_max_nodes': 512, 'fc_gblock_filter_slope': -0.5, 'fc_gblock_dropout': 0.0, 'dec_upscale_method': 'resize_images', 'dec_upscales_in_fc': 0, 'dec_norm': None, 'dec_min_filters': 160, 'dec_max_filters': 640, 'dec_slope_mode': 'full', 'dec_filter_slope': -0.33, 'dec_res_blocks': 1, 'dec_output_kernel': 3, 'dec_gaussian': True, 'dec_skip_last_residual': False, 'freeze_layers': ['keras_encoder'], 'load_layers': ['encoder'], 'fs_original_depth': 4, 'fs_original_min_filters': 128, 'fs_original_max_filters': 1024, 'fs_original_use_alt': False, 'mobilenet_width': 1.0, 'mobilenet_depth': 1, 'mobilenet_dropout': 0.001, 'mobilenet_minimalistic': False}
05/23/2023 13:54:05 MainProcess _training model _replace_config DEBUG Replaced config. New config: {'centering': 'face', 'coverage': 87.5, 'optimizer': 'adam', 'learning_rate': 3e-05, 'epsilon_exponent': -3, 'autoclip': False, 'allow_growth': False, 'mixed_precision': True, 'nan_protection': True, 'convert_batchsize': 16, 'loss_function': 'ms_ssim', 'loss_function_2': 'mae', 'loss_weight_2': 25, 'loss_function_3': 'lpips_alex', 'loss_weight_3': 5, 'loss_function_4': 'ffl', 'loss_weight_4': 100, 'mask_loss_function': 'mse', 'eye_multiplier': 3, 'mouth_multiplier': 2, 'penalized_mask_loss': True, 'mask_type': 'extended', 'mask_blur_kernel': 3, 'mask_threshold': 4, 'learn_mask': False, 'output_size': 256, 'shared_fc': None, 'enable_gblock': True, 'split_fc': True, 'split_gblock': False, 'split_decoders': False, 'enc_architecture': 'efficientnet_v2_l', 'enc_scaling': 60, 'enc_load_weights': True, 'bottleneck_type': 'dense', 'bottleneck_norm': None, 'bottleneck_size': 512, 'bottleneck_in_encoder': True, 'fc_depth': 1, 'fc_min_filters': 1280, 'fc_max_filters': 1280, 'fc_dimensions': 8, 'fc_filter_slope': -0.5, 'fc_dropout': 0.0, 'fc_upsampler': 'upsample2d', 'fc_upsamples': 1, 'fc_upsample_filters': 1280, 'fc_gblock_depth': 3, 'fc_gblock_min_nodes': 512, 'fc_gblock_max_nodes': 512, 'fc_gblock_filter_slope': -0.5, 'fc_gblock_dropout': 0.0, 'dec_upscale_method': 'resize_images', 'dec_upscales_in_fc': 0, 'dec_norm': None, 'dec_min_filters': 160, 'dec_max_filters': 640, 'dec_slope_mode': 'full', 'dec_filter_slope': -0.33, 'dec_res_blocks': 1, 'dec_output_kernel': 3, 'dec_gaussian': True, 'dec_skip_last_residual': False, 'freeze_layers': ['keras_encoder'], 'load_layers': ['encoder'], 'fs_original_depth': 4, 'fs_original_min_filters': 128, 'fs_original_max_filters': 1024, 'fs_original_use_alt': False, 'mobilenet_width': 1.0, 'mobilenet_depth': 1, 'mobilenet_dropout': 0.001, 'mobilenet_minimalistic': False}
05/23/2023 13:54:05 MainProcess _training model _replace_config INFO Using configuration saved in state file
05/23/2023 13:54:05 MainProcess _training model _new_session_id DEBUG 6
05/23/2023 13:54:05 MainProcess _training model _create_new_session DEBUG Creating new session. id: 6
05/23/2023 13:54:05 MainProcess _training model __init__ DEBUG Initialized State:
05/23/2023 13:54:05 MainProcess _training settings __init__ DEBUG Initializing Settings: (arguments: Namespace(func=<bound method ScriptExecutor.execute_script of <lib.cli.launcher.ScriptExecutor object at 0x0000021ADE1B00D0>>, exclude_gpus=None, configfile=None, loglevel='INFO', logfile=None, redirect_gui=True, colab=False, input_a='D:\\facewarp\\Face Alice Finall', input_b='D:\\facewarp\\Face Li', model_dir='D:\\facewarp\\Model Alice & Li V2_L\\Model Alice & Li V2_L', load_weights=None, trainer='phaze-a', summary=False, freeze_weights=False, batch_size=8, iterations=2000000, distribution_strategy='default', save_interval=250, snapshot_interval=25000, timelapse_input_a='D:\\facewarp\\Face Alice Finall', timelapse_input_b='D:\\facewarp\\Face Li', timelapse_output='D:\\facewarp\\Timeline', preview=False, write_image=False, no_logs=False, warp_to_landmarks=False, no_flip=False, no_augment_color=False, no_warp=True), mixed_precision: True, allow_growth: False, is_predict: False)
05/23/2023 13:54:05 MainProcess _training settings _set_tf_settings DEBUG Not setting any specific Tensorflow settings
05/23/2023 13:54:05 MainProcess _training settings _set_keras_mixed_precision DEBUG use_mixed_precision: True
05/23/2023 13:54:05 MainProcess _training device_compatibility_check _log_device_compatibility_check INFO Mixed precision compatibility check (mixed_float16): OK\nYour GPU will likely run quickly with dtype policy mixed_float16 as it has compute capability of at least 7.0. Your GPU: NVIDIA GeForce RTX 3090, compute capability 8.6
05/23/2023 13:54:05 MainProcess _training settings _set_keras_mixed_precision DEBUG Enabled mixed precision. (Compute dtype: float16, variable_dtype: float32)
05/23/2023 13:54:05 MainProcess _training settings __init__ INFO Enabling Mixed Precision Training.
05/23/2023 13:54:05 MainProcess _training settings _get_strategy DEBUG Using strategy: <tensorflow.python.distribute.distribute_lib._DefaultDistributionStrategy object at 0x0000021AEB378CD0>
05/23/2023 13:54:05 MainProcess _training settings __init__ DEBUG Initialized Settings
05/23/2023 13:54:05 MainProcess _training settings __init__ DEBUG Initializing Loss: (color_order: bgr)
05/23/2023 13:54:05 MainProcess _training settings _get_mask_channels DEBUG uses_masks: (True, True, True), mask_channels: [3, 4, 5]
05/23/2023 13:54:05 MainProcess _training settings __init__ DEBUG Initialized: Loss
05/23/2023 13:54:05 MainProcess _training model __init__ DEBUG Initialized ModelBase (Model)
05/23/2023 13:54:05 MainProcess _training phaze_a _select_freeze_layers DEBUG Substituting 'keras_encoder' for 'efficientnet_v2_l'
05/23/2023 13:54:05 MainProcess _training phaze_a _get_input_shape DEBUG Encoder input set to: (288, 288, 3)
05/23/2023 13:54:05 MainProcess _training settings strategy_scope DEBUG Using strategy scope: <tensorflow.python.distribute.distribute_lib._DefaultDistributionContext object at 0x0000021AEA166080>
05/23/2023 13:54:05 MainProcess _training io _load DEBUG Loading model: D:\facewarp\Model Alice & Li V2_L\Model Alice & Li V2_L\phaze_a.h5
05/23/2023 13:54:05 MainProcess _training multithreading run DEBUG Error in thread (_training): Unable to open file (bad object header version number)
05/23/2023 13:54:05 MainProcess MainThread train _monitor DEBUG Thread error detected
05/23/2023 13:54:05 MainProcess MainThread train _monitor DEBUG Closed Monitor
05/23/2023 13:54:05 MainProcess MainThread train _end_thread DEBUG Ending Training thread
05/23/2023 13:54:05 MainProcess MainThread train _end_thread CRITICAL Error caught! Exiting...
05/23/2023 13:54:05 MainProcess MainThread multithreading join DEBUG Joining Threads: '_training'
05/23/2023 13:54:05 MainProcess MainThread multithreading join DEBUG Joining Thread: '_training'
05/23/2023 13:54:05 MainProcess MainThread multithreading join ERROR Caught exception in thread: '_training'
Traceback (most recent call last):
File "C:\Users\adama\faceswap\lib\cli\launcher.py", line 230, in execute_script
process.process()
File "C:\Users\adama\faceswap\scripts\train.py", line 213, in process
self._end_thread(thread, err)
File "C:\Users\adama\faceswap\scripts\train.py", line 253, in _end_thread
thread.join()
File "C:\Users\adama\faceswap\lib\multithreading.py", line 220, in join
raise thread.err[1].with_traceback(thread.err[2])
File "C:\Users\adama\faceswap\lib\multithreading.py", line 96, in run
self._target(*self._args, **self._kwargs)
File "C:\Users\adama\faceswap\scripts\train.py", line 275, in _training
raise err
File "C:\Users\adama\faceswap\scripts\train.py", line 263, in _training
model = self._load_model()
File "C:\Users\adama\faceswap\scripts\train.py", line 291, in _load_model
model.build()
File "C:\Users\adama\faceswap\plugins\train\model\phaze_a.py", line 220, in build
model = self._io._load() # pylint:disable=protected-access
File "C:\Users\adama\faceswap\plugins\train\model\_base\io.py", line 152, in _load
model = load_model(self._filename, compile=False)
File "C:\Users\adama\MiniConda3\envs\faceswap\lib\site-packages\keras\utils\traceback_utils.py", line 70, in error_handler
raise e.with_traceback(filtered_tb) from None
File "C:\Users\adama\MiniConda3\envs\faceswap\lib\site-packages\h5py\_hl\files.py", line 567, in __init__
fid = make_fid(name, mode, userblock_size, fapl, fcpl, swmr=swmr)
File "C:\Users\adama\MiniConda3\envs\faceswap\lib\site-packages\h5py\_hl\files.py", line 231, in make_fid
fid = h5f.open(name, flags, fapl=fapl)
File "h5py\_objects.pyx", line 54, in h5py._objects.with_phil.wrapper
File "h5py\_objects.pyx", line 55, in h5py._objects.with_phil.wrapper
File "h5py\h5f.pyx", line 106, in h5py.h5f.open
OSError: Unable to open file (bad object header version number)
============ System Information ============
backend: nvidia
encoding: cp950
git_branch: master
git_commits: e2ad3e2 add ru locale (#1311)
gpu_cuda: No global version found. Check Conda packages for Conda Cuda
gpu_cudnn: No global version found. Check Conda packages for Conda cuDNN
gpu_devices: GPU_0: NVIDIA GeForce RTX 3090
gpu_devices_active: GPU_0
gpu_driver: 531.79
gpu_vram: GPU_0: 24576MB (23701MB free)
os_machine: AMD64
os_platform: Windows-10-10.0.19045-SP0
os_release: 10
py_command: C:\Users\adama\faceswap\faceswap.py train -A D:/facewarp/Face Alice Finall -B D:/facewarp/Face Li -m D:/facewarp/Model Alice & Li V2_L/Model Alice & Li V2_L -t phaze-a -bs 8 -it 2000000 -D default -s 250 -ss 25000 -tia D:/facewarp/Face Alice Finall -tib D:/facewarp/Face Li -to D:/facewarp/Timeline -nw -L INFO -gui
py_conda_version: conda 23.3.1
py_implementation: CPython
py_version: 3.9.16
py_virtual_env: True
sys_cores: 24
sys_processor: AMD64 Family 23 Model 113 Stepping 0, AuthenticAMD
sys_ram: Total: 32677MB, Available: 27160MB, Used: 5516MB, Free: 27160MB
=============== Pip Packages ===============
absl-py @ file:///C:/b/abs_5babsu7y5x/croot/absl-py_1666362945682/work
astunparse==1.6.3
cachetools==5.3.0
certifi==2023.5.7
charset-normalizer==3.1.0
cloudpickle @ file:///C:/b/abs_3796yxesic/croot/cloudpickle_1683040098851/work
colorama @ file:///C:/b/abs_a9ozq0l032/croot/colorama_1672387194846/work
cycler @ file:///tmp/build/80754af9/cycler_1637851556182/work
decorator @ file:///opt/conda/conda-bld/decorator_1643638310831/work
dm-tree @ file:///C:/b/abs_10z0iy5knj/croot/dm-tree_1671027465819/work
fastcluster @ file:///D:/bld/fastcluster_1649783471014/work
ffmpy==0.3.0
flatbuffers==23.5.8
fonttools==4.25.0
gast==0.4.0
google-auth==2.17.3
google-auth-oauthlib==0.4.6
google-pasta==0.2.0
grpcio==1.54.0
h5py==3.8.0
idna==3.4
imageio @ file:///C:/b/abs_27kq2gy1us/croot/imageio_1677879918708/work
imageio-ffmpeg @ file:///home/conda/feedstock_root/build_artifacts/imageio-ffmpeg_1673483481485/work
importlib-metadata==6.6.0
joblib @ file:///home/conda/feedstock_root/build_artifacts/joblib_1663332044897/work
keras==2.10.0
Keras-Preprocessing==1.1.2
kiwisolver @ file:///C:/b/abs_88mdhvtahm/croot/kiwisolver_1672387921783/work
libclang==16.0.0
Markdown==3.4.3
MarkupSafe==2.1.2
matplotlib @ file:///C:/b/abs_ae02atcfur/croot/matplotlib-suite_1667356722968/work
mkl-fft==1.3.6
mkl-random @ file:///C:/Users/dev-admin/mkl/mkl_random_1682977971003/work
mkl-service==2.4.0
munkres==1.1.4
numexpr @ file:///C:/b/abs_afm0oewmmt/croot/numexpr_1683221839116/work
numpy @ file:///C:/Users/dev-admin/mkl/numpy_and_numpy_base_1682981337988/work
nvidia-ml-py==11.525.112
oauthlib==3.2.2
opencv-python==4.7.0.72
opt-einsum==3.3.0
packaging @ file:///C:/b/abs_ed_kb9w6g4/croot/packaging_1678965418855/work
Pillow==9.4.0
ply==3.11
protobuf==3.19.6
psutil @ file:///C:/Windows/Temp/abs_b2c2fd7f-9fd5-4756-95ea-8aed74d0039flsd9qufz/croots/recipe/psutil_1656431277748/work
pyasn1==0.5.0
pyasn1-modules==0.3.0
pyparsing @ file:///C:/Users/BUILDE~1/AppData/Local/Temp/abs_7f_7lba6rl/croots/recipe/pyparsing_1661452540662/work
PyQt5==5.15.7
PyQt5-sip @ file:///C:/Windows/Temp/abs_d7gmd2jg8i/croots/recipe/pyqt-split_1659273064801/work/pyqt_sip
python-dateutil @ file:///tmp/build/80754af9/python-dateutil_1626374649649/work
pywin32==305.1
pywinpty @ file:///C:/ci_310/pywinpty_1644230983541/work/target/wheels/pywinpty-2.0.2-cp39-none-win_amd64.whl
requests==2.30.0
requests-oauthlib==1.3.1
rsa==4.9
scikit-learn @ file:///C:/b/abs_a0emltxu06/croot/scikit-learn_1680198772090/work
scipy==1.9.3
sip @ file:///C:/Windows/Temp/abs_b8fxd17m2u/croots/recipe/sip_1659012372737/work
six @ file:///tmp/build/80754af9/six_1644875935023/work
tensorboard==2.10.1
tensorboard-data-server==0.6.1
tensorboard-plugin-wit==1.8.1
tensorflow-estimator==2.10.0
tensorflow-gpu==2.10.1
tensorflow-io-gcs-filesystem==0.31.0
tensorflow-probability @ file:///tmp/build/80754af9/tensorflow-probability_1633017132682/work
termcolor==2.3.0
threadpoolctl @ file:///home/conda/feedstock_root/build_artifacts/threadpoolctl_1643647933166/work
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tornado @ file:///C:/ci/tornado_1662458743919/work
tqdm @ file:///C:/b/abs_f76j9hg7pv/croot/tqdm_1679561871187/work
typing_extensions @ file:///C:/b/abs_a1bb332wcs/croot/typing_extensions_1681939523095/work
urllib3==2.0.2
Werkzeug==2.3.4
wrapt==1.15.0
zipp==3.15.0
============== Conda Packages ==============
# packages in environment at C:\Users\adama\MiniConda3\envs\faceswap:
#
# Name Version Build Channel
absl-py 1.3.0 py39haa95532_0
astunparse 1.6.3 pypi_0 pypi
blas 1.0 mkl
brotli 1.0.9 h2bbff1b_7
brotli-bin 1.0.9 h2bbff1b_7
ca-certificates 2023.5.7 h56e8100_0 conda-forge
cachetools 5.3.0 pypi_0 pypi
certifi 2023.5.7 pypi_0 pypi
charset-normalizer 3.1.0 pypi_0 pypi
cloudpickle 2.2.1 py39haa95532_0
colorama 0.4.6 py39haa95532_0
cudatoolkit 11.2.2 h933977f_10 conda-forge
cudnn 8.1.0.77 h3e0f4f4_0 conda-forge
cycler 0.11.0 pyhd3eb1b0_0
decorator 5.1.1 pyhd3eb1b0_0
dm-tree 0.1.7 py39hd77b12b_1
fastcluster 1.2.6 py39h2e25243_1 conda-forge
ffmpeg 4.3.1 ha925a31_0 conda-forge
ffmpy 0.3.0 pypi_0 pypi
flatbuffers 23.5.8 pypi_0 pypi
fonttools 4.25.0 pyhd3eb1b0_0
freetype 2.12.1 ha860e81_0
gast 0.4.0 pypi_0 pypi
giflib 5.2.1 h8cc25b3_3
git 2.34.1 haa95532_0
glib 2.69.1 h5dc1a3c_2
google-auth 2.17.3 pypi_0 pypi
google-auth-oauthlib 0.4.6 pypi_0 pypi
google-pasta 0.2.0 pypi_0 pypi
grpcio 1.54.0 pypi_0 pypi
gst-plugins-base 1.18.5 h9e645db_0
gstreamer 1.18.5 hd78058f_0
h5py 3.8.0 pypi_0 pypi
icc_rt 2022.1.0 h6049295_2
icu 58.2 ha925a31_3
idna 3.4 pypi_0 pypi
imageio 2.26.0 py39haa95532_0
imageio-ffmpeg 0.4.8 pyhd8ed1ab_0 conda-forge
importlib-metadata 6.6.0 pypi_0 pypi
intel-openmp 2023.1.0 h59b6b97_46319
joblib 1.2.0 pyhd8ed1ab_0 conda-forge
jpeg 9e h2bbff1b_1
keras 2.10.0 pypi_0 pypi
keras-preprocessing 1.1.2 pypi_0 pypi
kiwisolver 1.4.4 py39hd77b12b_0
krb5 1.19.4 h5b6d351_0
lerc 3.0 hd77b12b_0
libbrotlicommon 1.0.9 h2bbff1b_7
libbrotlidec 1.0.9 h2bbff1b_7
libbrotlienc 1.0.9 h2bbff1b_7
libclang 16.0.0 pypi_0 pypi
libclang13 14.0.6 default_h8e68704_1
libdeflate 1.17 h2bbff1b_0
libffi 3.4.4 hd77b12b_0
libiconv 1.16 h2bbff1b_2
libogg 1.3.5 h2bbff1b_1
libpng 1.6.39 h8cc25b3_0
libtiff 4.5.0 h6c2663c_2
libvorbis 1.3.7 he774522_0
libwebp 1.2.4 hbc33d0d_1
libwebp-base 1.2.4 h2bbff1b_1
libxml2 2.10.3 h0ad7f3c_0
libxslt 1.1.37 h2bbff1b_0
lz4-c 1.9.4 h2bbff1b_0
markdown 3.4.3 pypi_0 pypi
markupsafe 2.1.2 pypi_0 pypi
matplotlib 3.5.3 py39haa95532_0
matplotlib-base 3.5.3 py39hd77b12b_0
mkl 2023.1.0 h8bd8f75_46356
mkl-service 2.4.0 py39h2bbff1b_1
mkl_fft 1.3.6 py39hf11a4ad_1
mkl_random 1.2.2 py39hf11a4ad_1
munkres 1.1.4 py_0
numexpr 2.8.4 py39h7b80656_1
numpy 1.23.5 py39h6917f2d_1
numpy-base 1.23.5 py39h46c4fa8_1
nvidia-ml-py 11.525.112 pypi_0 pypi
oauthlib 3.2.2 pypi_0 pypi
opencv-python 4.7.0.72 pypi_0 pypi
openssl 1.1.1t h2bbff1b_0
opt-einsum 3.3.0 pypi_0 pypi
packaging 23.0 py39haa95532_0
pcre 8.45 hd77b12b_0
pillow 9.4.0 py39hd77b12b_0
pip 23.0.1 py39haa95532_0
ply 3.11 py39haa95532_0
protobuf 3.19.6 pypi_0 pypi
psutil 5.9.0 py39h2bbff1b_0
pyasn1 0.5.0 pypi_0 pypi
pyasn1-modules 0.3.0 pypi_0 pypi
pyparsing 3.0.9 py39haa95532_0
pyqt 5.15.7 py39hd77b12b_0
pyqt5-sip 12.11.0 py39hd77b12b_0
python 3.9.16 h6244533_2
python-dateutil 2.8.2 pyhd3eb1b0_0
python_abi 3.9 2_cp39 conda-forge
pywin32 305 py39h2bbff1b_0
pywinpty 2.0.2 py39h5da7b33_0
qt-main 5.15.2 he8e5bd7_8
qt-webengine 5.15.9 hb9a9bb5_5
qtwebkit 5.212 h2bbfb41_5
requests 2.30.0 pypi_0 pypi
requests-oauthlib 1.3.1 pypi_0 pypi
rsa 4.9 pypi_0 pypi
scikit-learn 1.2.2 py39hd77b12b_0
scipy 1.9.3 py39hdcfc7df_2
setuptools 66.0.0 py39haa95532_0
sip 6.6.2 py39hd77b12b_0
six 1.16.0 pyhd3eb1b0_1
sqlite 3.41.2 h2bbff1b_0
tbb 2021.8.0 h59b6b97_0
tensorboard 2.10.1 pypi_0 pypi
tensorboard-data-server 0.6.1 pypi_0 pypi
tensorboard-plugin-wit 1.8.1 pypi_0 pypi
tensorflow-estimator 2.10.0 pypi_0 pypi
tensorflow-gpu 2.10.1 pypi_0 pypi
tensorflow-io-gcs-filesystem 0.31.0 pypi_0 pypi
tensorflow-probability 0.14.0 pyhd3eb1b0_0
termcolor 2.3.0 pypi_0 pypi
threadpoolctl 3.1.0 pyh8a188c0_0 conda-forge
tk 8.6.12 h2bbff1b_0
toml 0.10.2 pyhd3eb1b0_0
tornado 6.2 py39h2bbff1b_0
tqdm 4.65.0 py39hd4e2768_0
typing-extensions 4.5.0 py39haa95532_0
typing_extensions 4.5.0 py39haa95532_0
tzdata 2023c h04d1e81_0
urllib3 2.0.2 pypi_0 pypi
vc 14.2 h21ff451_1
vs2015_runtime 14.27.29016 h5e58377_2
werkzeug 2.3.4 pypi_0 pypi
wheel 0.38.4 py39haa95532_0
winpty 0.4.3 4
wrapt 1.15.0 pypi_0 pypi
xz 5.4.2 h8cc25b3_0
zipp 3.15.0 pypi_0 pypi
zlib 1.2.13 h8cc25b3_0
zstd 1.5.5 hd43e919_0
================= Configs ==================
--------- .faceswap ---------
backend: nvidia
--------- convert.ini ---------
[color.color_transfer]
clip: True
preserve_paper: True
[color.manual_balance]
colorspace: HSV
balance_1: 0.0
balance_2: 0.0
balance_3: 0.0
contrast: 0.0
brightness: 0.0
[color.match_hist]
threshold: 99.0
[mask.mask_blend]
type: normalized
kernel_size: 3
passes: 4
threshold: 4
erosion: 0.0
erosion_top: 0.0
erosion_bottom: 0.0
erosion_left: 0.0
erosion_right: 0.0
[scaling.sharpen]
method: none
amount: 150
radius: 0.3
threshold: 5.0
[writer.ffmpeg]
container: mp4
codec: libx264
crf: 23
preset: medium
tune: none
profile: auto
level: auto
skip_mux: False
[writer.gif]
fps: 25
loop: 0
palettesize: 256
subrectangles: False
[writer.opencv]
format: png
draw_transparent: False
separate_mask: False
jpg_quality: 75
png_compress_level: 3
[writer.pillow]
format: png
draw_transparent: False
separate_mask: False
optimize: False
gif_interlace: True
jpg_quality: 75
png_compress_level: 3
tif_compression: tiff_deflate
--------- extract.ini ---------
[global]
allow_growth: False
aligner_min_scale: 0.07
aligner_max_scale: 2.0
aligner_distance: 22.5
aligner_roll: 45.0
aligner_features: True
filter_refeed: True
save_filtered: False
realign_refeeds: True
filter_realign: True
[align.fan]
batch-size: 64
[detect.cv2_dnn]
confidence: 50
[detect.mtcnn]
minsize: 20
scalefactor: 0.709
batch-size: 8
cpu: True
threshold_1: 0.6
threshold_2: 0.7
threshold_3: 0.7
[detect.s3fd]
confidence: 70
batch-size: 20
[mask.bisenet_fp]
batch-size: 8
cpu: False
weights: faceswap
include_ears: False
include_hair: False
include_glasses: True
[mask.custom]
batch-size: 8
centering: face
fill: False
[mask.unet_dfl]
batch-size: 32
[mask.vgg_clear]
batch-size: 6
[mask.vgg_obstructed]
batch-size: 2
[recognition.vgg_face2]
batch-size: 32
cpu: False
--------- gui.ini ---------
[global]
fullscreen: False
tab: extract
options_panel_width: 30
console_panel_height: 20
icon_size: 14
font: default
font_size: 9
autosave_last_session: always
timeout: 120
auto_load_model_stats: True
--------- train.ini ---------
[global]
centering: face
coverage: 87.5
icnr_init: False
conv_aware_init: False
optimizer: adam
learning_rate: 3e-05
epsilon_exponent: -3
autoclip: False
reflect_padding: False
allow_growth: False
mixed_precision: True
nan_protection: True
convert_batchsize: 16
[global.loss]
loss_function: ms_ssim
loss_function_2: mae
loss_weight_2: 25
loss_function_3: lpips_alex
loss_weight_3: 5
loss_function_4: ffl
loss_weight_4: 100
mask_loss_function: mse
eye_multiplier: 3
mouth_multiplier: 2
penalized_mask_loss: True
mask_type: extended
mask_blur_kernel: 3
mask_threshold: 4
learn_mask: False
[model.dfaker]
output_size: 128
[model.dfl_h128]
lowmem: False
[model.dfl_sae]
input_size: 128
architecture: df
autoencoder_dims: 0
encoder_dims: 42
decoder_dims: 21
multiscale_decoder: False
[model.dlight]
features: best
details: good
output_size: 256
[model.original]
lowmem: False
[model.phaze_a]
output_size: 256
shared_fc: None
enable_gblock: True
split_fc: True
split_gblock: False
split_decoders: False
enc_architecture: efficientnet_v2_l
enc_scaling: 60
enc_load_weights: True
bottleneck_type: dense
bottleneck_norm: None
bottleneck_size: 512
bottleneck_in_encoder: True
fc_depth: 1
fc_min_filters: 1280
fc_max_filters: 1280
fc_dimensions: 8
fc_filter_slope: -0.5
fc_dropout: 0.0
fc_upsampler: upsample2d
fc_upsamples: 1
fc_upsample_filters: 1280
fc_gblock_depth: 3
fc_gblock_min_nodes: 512
fc_gblock_max_nodes: 512
fc_gblock_filter_slope: -0.5
fc_gblock_dropout: 0.0
dec_upscale_method: resize_images
dec_upscales_in_fc: 0
dec_norm: None
dec_min_filters: 160
dec_max_filters: 640
dec_slope_mode: full
dec_filter_slope: -0.33
dec_res_blocks: 1
dec_output_kernel: 3
dec_gaussian: True
dec_skip_last_residual: False
freeze_layers: keras_encoder
load_layers: encoder
fs_original_depth: 4
fs_original_min_filters: 128
fs_original_max_filters: 1024
fs_original_use_alt: False
mobilenet_width: 1.0
mobilenet_depth: 1
mobilenet_dropout: 0.001
mobilenet_minimalistic: False
[model.realface]
input_size: 64
output_size: 128
dense_nodes: 1536
complexity_encoder: 128
complexity_decoder: 512
[model.unbalanced]
input_size: 128
lowmem: False
nodes: 1024
complexity_encoder: 128
complexity_decoder_a: 384
complexity_decoder_b: 512
[model.villain]
lowmem: False
[trainer.original]
preview_images: 14
mask_opacity: 30
mask_color: #ff0000
zoom_amount: 5
rotation_range: 10
shift_range: 5
flip_chance: 50
color_lightness: 30
color_ab: 8
color_clahe_chance: 50
color_clahe_max_size: 4