Code: Select all
02/02/2024 23:02:43 CRITICAL An unexpected crash has occurred. Crash report written to 'C:\Users\90363\faceswap\crash_report.2024.02.02.230238914277.log'. You MUST provide this file if seeking assistance. Please verify you are running the latest version of faceswap before reporting
02/02/2024 23:00:58 MainProcess _training _base _overlay_foreground DEBUG Overlayed foreground. Shape: (14, 72, 72, 3)
02/02/2024 23:00:58 MainProcess _training _base _overlay_foreground DEBUG Overlayed foreground. Shape: (14, 72, 72, 3)
02/02/2024 23:00:58 MainProcess _training _base _get_headers DEBUG side: 'a', width: 72
02/02/2024 23:00:58 MainProcess _training _base _get_headers DEBUG height: 16, total_width: 216
02/02/2024 23:00:58 MainProcess _training _base _get_headers DEBUG texts: ['Original (A)', 'Original > Original', 'Original > Swap'], text_sizes: [(41, 6), (66, 6), (58, 6)], text_x: [15, 75, 151], text_y: 11
02/02/2024 23:00:58 MainProcess _training _base _get_headers DEBUG header_box.shape: (16, 216, 3)
02/02/2024 23:00:58 MainProcess _training _base _to_full_frame DEBUG side: 'b', number of sample arrays: 3, prediction.shapes: [(14, 64, 64, 3), (14, 64, 64, 3)])
02/02/2024 23:00:58 MainProcess _training _base _process_full DEBUG full_size: 74, prediction_size: 64, color: (0.0, 0.0, 1.0)
02/02/2024 23:00:58 MainProcess _training _base _resize_sample DEBUG Resizing sample: (side: 'b', sample.shape: (14, 74, 74, 3), target_size: 72, scale: 0.972972972972973)
02/02/2024 23:00:58 MainProcess _training _base _resize_sample DEBUG Resized sample: (side: 'b' shape: (14, 72, 72, 3))
02/02/2024 23:00:58 MainProcess _training _base _process_full DEBUG Overlayed background. Shape: (14, 72, 72, 3)
02/02/2024 23:00:58 MainProcess _training _base _compile_masked DEBUG masked shapes: [(14, 64, 64, 3), (14, 64, 64, 3), (14, 64, 64, 3)]
02/02/2024 23:00:58 MainProcess _training _base _overlay_foreground DEBUG Overlayed foreground. Shape: (14, 72, 72, 3)
02/02/2024 23:00:58 MainProcess _training _base _overlay_foreground DEBUG Overlayed foreground. Shape: (14, 72, 72, 3)
02/02/2024 23:00:58 MainProcess _training _base _overlay_foreground DEBUG Overlayed foreground. Shape: (14, 72, 72, 3)
02/02/2024 23:00:58 MainProcess _training _base _get_headers DEBUG side: 'b', width: 72
02/02/2024 23:00:58 MainProcess _training _base _get_headers DEBUG height: 16, total_width: 216
02/02/2024 23:00:58 MainProcess _training _base _get_headers DEBUG texts: ['Swap (B)', 'Swap > Swap', 'Swap > Original'], text_sizes: [(34, 6), (50, 6), (58, 6)], text_x: [19, 83, 151], text_y: 11
02/02/2024 23:00:58 MainProcess _training _base _get_headers DEBUG header_box.shape: (16, 216, 3)
02/02/2024 23:00:58 MainProcess _training _base _duplicate_headers DEBUG side: a header.shape: (16, 216, 3)
02/02/2024 23:00:58 MainProcess _training _base _duplicate_headers DEBUG side: b header.shape: (16, 216, 3)
02/02/2024 23:00:58 MainProcess _training _base _stack_images DEBUG Stack images
02/02/2024 23:00:58 MainProcess _training _base get_transpose_axes DEBUG Even number of images to stack
02/02/2024 23:00:58 MainProcess _training _base _stack_images DEBUG Stacked images
02/02/2024 23:00:58 MainProcess _training _base _compile_preview DEBUG Compiled sample
02/02/2024 23:00:58 MainProcess _training _base output_timelapse DEBUG Created time-lapse: 'D:\D\1706886058.jpg'
02/02/2024 23:00:58 MainProcess _training train _run_training_cycle DEBUG Saving (save_iterations: True, save_now: False) Iteration: (iteration: 1)
02/02/2024 23:00:58 MainProcess _training io save DEBUG Backing up and saving models
02/02/2024 23:00:58 MainProcess _training io _get_save_averages DEBUG Getting save averages
02/02/2024 23:00:58 MainProcess _training io _get_save_averages DEBUG Average losses since last save: [0.3055972158908844, 0.30632156133651733]
02/02/2024 23:00:58 MainProcess _training io _should_backup DEBUG Set initial save iteration loss average for 'a': 0.3055972158908844
02/02/2024 23:00:58 MainProcess _training io _should_backup DEBUG Set initial save iteration loss average for 'b': 0.30632156133651733
02/02/2024 23:00:58 MainProcess _training io _should_backup DEBUG Updated lowest historical save iteration averages from: {'a': 0.3055972158908844, 'b': 0.30632156133651733} to: {'a': 0.3055972158908844, 'b': 0.30632156133651733}
02/02/2024 23:00:58 MainProcess _training io _should_backup DEBUG Should backup: True
02/02/2024 23:00:58 MainProcess _training attrs create DEBUG Creating converter from 5 to 3
02/02/2024 23:00:59 MainProcess _training model save DEBUG Saving State
02/02/2024 23:00:59 MainProcess _training serializer save DEBUG filename: D:\C\original_state.json, data type: <class 'dict'>
02/02/2024 23:00:59 MainProcess _training serializer _check_extension DEBUG Original filename: 'D:\C\original_state.json', final filename: 'D:\C\original_state.json'
02/02/2024 23:00:59 MainProcess _training serializer marshal DEBUG data type: <class 'dict'>
02/02/2024 23:00:59 MainProcess _training serializer marshal DEBUG returned data type: <class 'bytes'>
02/02/2024 23:00:59 MainProcess _training model save DEBUG Saved State
02/02/2024 23:00:59 MainProcess _training io save INFO [Saved model] - Average loss since last save: face_a: 0.30560, face_b: 0.30632
02/02/2024 23:00:59 MainProcess _training generator generate_preview DEBUG Generating preview (is_timelapse: False)
02/02/2024 23:00:59 MainProcess _training generator generate_preview DEBUG Generated samples: is_timelapse: False, images: {'feed': {'a': (14, 64, 64, 3), 'b': (14, 64, 64, 3)}, 'samples': {'a': (14, 74, 74, 3), 'b': (14, 74, 74, 3)}, 'sides': {'a': (14, 64, 64, 1), 'b': (14, 64, 64, 1)}}
02/02/2024 23:00:59 MainProcess _training generator compile_sample DEBUG Compiling samples: (side: 'a', samples: 14)
02/02/2024 23:00:59 MainProcess _training generator compile_sample DEBUG Compiling samples: (side: 'b', samples: 14)
02/02/2024 23:00:59 MainProcess _training generator compile_sample DEBUG Compiled Samples: {'a': [(14, 64, 64, 3), (14, 74, 74, 3), (14, 64, 64, 1)], 'b': [(14, 64, 64, 3), (14, 74, 74, 3), (14, 64, 64, 1)]}
02/02/2024 23:00:59 MainProcess _training _base show_sample DEBUG Showing sample
02/02/2024 23:00:59 MainProcess _training _base _get_predictions DEBUG Getting Predictions
02/02/2024 23:01:00 MainProcess _training _base _get_predictions DEBUG Returning predictions: {'a_a': (14, 64, 64, 3), 'b_b': (14, 64, 64, 3), 'a_b': (14, 64, 64, 3), 'b_a': (14, 64, 64, 3)}
02/02/2024 23:01:00 MainProcess _training _base _to_full_frame DEBUG side: 'a', number of sample arrays: 3, prediction.shapes: [(14, 64, 64, 3), (14, 64, 64, 3)])
02/02/2024 23:01:00 MainProcess _training _base _process_full DEBUG full_size: 74, prediction_size: 64, color: (0.0, 0.0, 1.0)
02/02/2024 23:01:00 MainProcess _training _base _resize_sample DEBUG Resizing sample: (side: 'a', sample.shape: (14, 74, 74, 3), target_size: 72, scale: 0.972972972972973)
02/02/2024 23:01:00 MainProcess _training _base _resize_sample DEBUG Resized sample: (side: 'a' shape: (14, 72, 72, 3))
02/02/2024 23:01:00 MainProcess _training _base _process_full DEBUG Overlayed background. Shape: (14, 72, 72, 3)
02/02/2024 23:01:00 MainProcess _training _base _compile_masked DEBUG masked shapes: [(14, 64, 64, 3), (14, 64, 64, 3), (14, 64, 64, 3)]
02/02/2024 23:01:00 MainProcess _training _base _overlay_foreground DEBUG Overlayed foreground. Shape: (14, 72, 72, 3)
02/02/2024 23:01:00 MainProcess _training _base _overlay_foreground DEBUG Overlayed foreground. Shape: (14, 72, 72, 3)
02/02/2024 23:01:00 MainProcess _training _base _overlay_foreground DEBUG Overlayed foreground. Shape: (14, 72, 72, 3)
02/02/2024 23:01:00 MainProcess _training _base _get_headers DEBUG side: 'a', width: 72
02/02/2024 23:01:00 MainProcess _training _base _get_headers DEBUG height: 16, total_width: 216
02/02/2024 23:01:00 MainProcess _training _base _get_headers DEBUG texts: ['Original (A)', 'Original > Original', 'Original > Swap'], text_sizes: [(41, 6), (66, 6), (58, 6)], text_x: [15, 75, 151], text_y: 11
02/02/2024 23:01:00 MainProcess _training _base _get_headers DEBUG header_box.shape: (16, 216, 3)
02/02/2024 23:01:00 MainProcess _training _base _to_full_frame DEBUG side: 'b', number of sample arrays: 3, prediction.shapes: [(14, 64, 64, 3), (14, 64, 64, 3)])
02/02/2024 23:01:00 MainProcess _training _base _process_full DEBUG full_size: 74, prediction_size: 64, color: (0.0, 0.0, 1.0)
02/02/2024 23:01:00 MainProcess _training _base _resize_sample DEBUG Resizing sample: (side: 'b', sample.shape: (14, 74, 74, 3), target_size: 72, scale: 0.972972972972973)
02/02/2024 23:01:00 MainProcess _training _base _resize_sample DEBUG Resized sample: (side: 'b' shape: (14, 72, 72, 3))
02/02/2024 23:01:00 MainProcess _training _base _process_full DEBUG Overlayed background. Shape: (14, 72, 72, 3)
02/02/2024 23:01:00 MainProcess _training _base _compile_masked DEBUG masked shapes: [(14, 64, 64, 3), (14, 64, 64, 3), (14, 64, 64, 3)]
02/02/2024 23:01:00 MainProcess _training _base _overlay_foreground DEBUG Overlayed foreground. Shape: (14, 72, 72, 3)
02/02/2024 23:01:00 MainProcess _training _base _overlay_foreground DEBUG Overlayed foreground. Shape: (14, 72, 72, 3)
02/02/2024 23:01:00 MainProcess _training _base _overlay_foreground DEBUG Overlayed foreground. Shape: (14, 72, 72, 3)
02/02/2024 23:01:00 MainProcess _training _base _get_headers DEBUG side: 'b', width: 72
02/02/2024 23:01:00 MainProcess _training _base _get_headers DEBUG height: 16, total_width: 216
02/02/2024 23:01:00 MainProcess _training _base _get_headers DEBUG texts: ['Swap (B)', 'Swap > Swap', 'Swap > Original'], text_sizes: [(34, 6), (50, 6), (58, 6)], text_x: [19, 83, 151], text_y: 11
02/02/2024 23:01:00 MainProcess _training _base _get_headers DEBUG header_box.shape: (16, 216, 3)
02/02/2024 23:01:00 MainProcess _training _base _duplicate_headers DEBUG side: a header.shape: (16, 216, 3)
02/02/2024 23:01:00 MainProcess _training _base _duplicate_headers DEBUG side: b header.shape: (16, 216, 3)
02/02/2024 23:01:00 MainProcess _training _base _stack_images DEBUG Stack images
02/02/2024 23:01:00 MainProcess _training _base get_transpose_axes DEBUG Even number of images to stack
02/02/2024 23:01:00 MainProcess _training _base _stack_images DEBUG Stacked images
02/02/2024 23:01:00 MainProcess _training _base _compile_preview DEBUG Compiled sample
02/02/2024 23:01:00 MainProcess _training train _show DEBUG Updating preview: (name: Training - 'S': Save Now. 'R': Refresh Preview. 'M': Toggle Mask. 'F': Toggle Screen Fit-Actual Size. 'ENTER': Save and Quit)
02/02/2024 23:01:00 MainProcess _training train _show DEBUG Generating preview for GUI
02/02/2024 23:01:00 MainProcess _training train _show DEBUG Generated preview for GUI: 'C:\Users\90363\faceswap\lib\gui\.cache\preview\.gui_training_preview.png'
02/02/2024 23:01:00 MainProcess _training train _show DEBUG Updated preview: (name: Training - 'S': Save Now. 'R': Refresh Preview. 'M': Toggle Mask. 'F': Toggle Screen Fit-Actual Size. 'ENTER': Save and Quit)
02/02/2024 23:01:00 MainProcess _training train _run_training_cycle INFO [Preview Updated]
02/02/2024 23:02:24 MainProcess _run cache cache_metadata VERBOSE Cache filled: 'D:\A'
02/02/2024 23:02:38 MainProcess _training _base output_timelapse DEBUG Ouputting time-lapse
02/02/2024 23:02:38 MainProcess _training _base output_timelapse DEBUG Getting time-lapse samples
02/02/2024 23:02:38 MainProcess _training generator generate_preview DEBUG Generating preview (is_timelapse: True)
02/02/2024 23:02:38 MainProcess _training multithreading check_and_raise_error DEBUG Thread error caught: [(<class 'TypeError'>, TypeError("'NoneType' object is not subscriptable"), <traceback object at 0x000001D716E3E580>)]
02/02/2024 23:02:38 MainProcess _training multithreading run DEBUG Error in thread (_training): 'NoneType' object is not subscriptable
02/02/2024 23:02:38 MainProcess MainThread train _monitor DEBUG Thread error detected
02/02/2024 23:02:38 MainProcess MainThread train _monitor DEBUG Closed Monitor
02/02/2024 23:02:38 MainProcess MainThread train _end_thread DEBUG Ending Training thread
02/02/2024 23:02:38 MainProcess MainThread train _end_thread CRITICAL Error caught! Exiting...
02/02/2024 23:02:38 MainProcess MainThread multithreading join DEBUG Joining Threads: '_training'
02/02/2024 23:02:38 MainProcess MainThread multithreading join DEBUG Joining Thread: '_training'
02/02/2024 23:02:38 MainProcess MainThread multithreading join ERROR Caught exception in thread: '_training'
Traceback (most recent call last):
File "C:\Users\90363\faceswap\lib\cli\launcher.py", line 225, in execute_script
process.process()
File "C:\Users\90363\faceswap\scripts\train.py", line 209, in process
self._end_thread(thread, err)
File "C:\Users\90363\faceswap\scripts\train.py", line 249, in _end_thread
thread.join()
File "C:\Users\90363\faceswap\lib\multithreading.py", line 224, in join
raise thread.err[1].with_traceback(thread.err[2])
File "C:\Users\90363\faceswap\lib\multithreading.py", line 100, in run
self._target(*self._args, **self._kwargs)
File "C:\Users\90363\faceswap\scripts\train.py", line 274, in _training
raise err
File "C:\Users\90363\faceswap\scripts\train.py", line 264, in _training
self._run_training_cycle(model, trainer)
File "C:\Users\90363\faceswap\scripts\train.py", line 352, in _run_training_cycle
trainer.train_one_step(viewer, timelapse)
File "C:\Users\90363\faceswap\plugins\train\trainer\_base.py", line 267, in train_one_step
self._update_viewers(viewer, timelapse_kwargs)
File "C:\Users\90363\faceswap\plugins\train\trainer\_base.py", line 373, in _update_viewers
self._timelapse.output_timelapse(timelapse_kwargs)
File "C:\Users\90363\faceswap\plugins\train\trainer\_base.py", line 881, in output_timelapse
self._samples.images = self._feeder.generate_preview(is_timelapse=True)
File "C:\Users\90363\faceswap\lib\training\generator.py", line 877, in generate_preview
side_feed, side_samples = next(iterator[side])
File "C:\Users\90363\faceswap\lib\multithreading.py", line 296, in iterator
self.check_and_raise_error()
File "C:\Users\90363\faceswap\lib\multithreading.py", line 173, in check_and_raise_error
raise error[1].with_traceback(error[2])
File "C:\Users\90363\faceswap\lib\multithreading.py", line 100, in run
self._target(*self._args, **self._kwargs)
File "C:\Users\90363\faceswap\lib\multithreading.py", line 279, in _run
for item in self.generator(*self._gen_args, **self._gen_kwargs):
File "C:\Users\90363\faceswap\lib\training\generator.py", line 217, in _minibatch
retval = self._process_batch(img_paths)
File "C:\Users\90363\faceswap\lib\training\generator.py", line 330, in _process_batch
raw_faces, detected_faces = self._get_images_with_meta(filenames)
File "C:\Users\90363\faceswap\lib\training\generator.py", line 241, in _get_images_with_meta
raw_faces = self._face_cache.cache_metadata(filenames)
File "C:\Users\90363\faceswap\lib\training\cache.py", line 246, in cache_metadata
self._validate_version(meta, filename)
File "C:\Users\90363\faceswap\lib\training\cache.py", line 306, in _validate_version
alignment_version = png_meta["source"]["alignments_version"]
TypeError: 'NoneType' object is not subscriptable
============ System Information ============
backend: nvidia
encoding: cp936
git_branch: master
git_commits: dea021c bugfix - setup.py - Install xorg-libxft for Linux users - Force tensorflow-cpu from pip
gpu_cuda: No global version found. Check Conda packages for Conda Cuda
gpu_cudnn: No global version found. Check Conda packages for Conda cuDNN
gpu_devices: GPU_0: GeForce GTX 1650
gpu_devices_active: GPU_0
gpu_driver: 462.30
gpu_vram: GPU_0: 4096MB (1075MB free)
os_machine: AMD64
os_platform: Windows-10-10.0.22621-SP0
os_release: 10
py_command: C:\Users\90363\faceswap\faceswap.py train -A D:/A -B D:/B -m D:/C -t original -bs 14 -it 1000000 -D default -s 250 -ss 25000 -tia D:/A -tib D:/B -to D:/D -L INFO -gui
py_conda_version: conda 24.1.0
py_implementation: CPython
py_version: 3.10.13
py_virtual_env: True
sys_cores: 8
sys_processor: Intel64 Family 6 Model 158 Stepping 10, GenuineIntel
sys_ram: Total: 8072MB, Available: 1413MB, Used: 6658MB, Free: 1413MB
=============== Pip Packages ===============
absl-py==2.1.0
astunparse==1.6.3
cachetools==5.3.2
certifi==2024.2.2
charset-normalizer==3.3.2
colorama @ file:///C:/b/abs_a9ozq0l032/croot/colorama_1672387194846/work
contourpy @ file:///C:/b/abs_853rfy8zse/croot/contourpy_1700583617587/work
cycler @ file:///tmp/build/80754af9/cycler_1637851556182/work
fastcluster @ file:///D:/bld/fastcluster_1695650232190/work
ffmpy @ file:///home/conda/feedstock_root/build_artifacts/ffmpy_1659474992694/work
flatbuffers==23.5.26
fonttools==4.25.0
gast==0.4.0
google-auth==2.27.0
google-auth-oauthlib==0.4.6
google-pasta==0.2.0
grpcio==1.60.1
h5py==3.10.0
idna==3.6
imageio @ file:///C:/b/abs_3eijmwdodc/croot/imageio_1695996500830/work
imageio-ffmpeg==0.4.9
joblib @ file:///C:/b/abs_1anqjntpan/croot/joblib_1685113317150/work
keras==2.10.0
Keras-Preprocessing==1.1.2
kiwisolver @ file:///C:/b/abs_88mdhvtahm/croot/kiwisolver_1672387921783/work
libclang==16.0.6
Markdown==3.5.2
MarkupSafe==2.1.4
matplotlib @ file:///C:/b/abs_e26vnvd5s1/croot/matplotlib-suite_1698692153288/work
mkl-fft @ file:///C:/b/abs_19i1y8ykas/croot/mkl_fft_1695058226480/work
mkl-random @ file:///C:/b/abs_edwkj1_o69/croot/mkl_random_1695059866750/work
mkl-service==2.4.0
munkres==1.1.4
numexpr @ file:///C:/b/abs_5fucrty5dc/croot/numexpr_1696515448831/work
numpy @ file:///C:/b/abs_16b2j7ad8n/croot/numpy_and_numpy_base_1704311752418/work/dist/numpy-1.26.3-cp310-cp310-win_amd64.whl#sha256=e84057072c37569bd0e11652dc2a75980d4d360f2391adf6a29a2fb1622d20ff
nvidia-ml-py @ file:///home/conda/feedstock_root/build_artifacts/nvidia-ml-py_1698947663801/work
oauthlib==3.2.2
opencv-python==4.9.0.80
opt-einsum==3.3.0
packaging @ file:///C:/b/abs_28t5mcoltc/croot/packaging_1693575224052/work
Pillow @ file:///C:/b/abs_153xikw91n/croot/pillow_1695134603563/work
ply==3.11
protobuf==3.19.6
psutil @ file:///C:/Windows/Temp/abs_b2c2fd7f-9fd5-4756-95ea-8aed74d0039flsd9qufz/croots/recipe/psutil_1656431277748/work
pyasn1==0.5.1
pyasn1-modules==0.3.0
pyparsing @ file:///C:/Users/BUILDE~1/AppData/Local/Temp/abs_7f_7lba6rl/croots/recipe/pyparsing_1661452540662/work
PyQt5==5.15.10
PyQt5-sip @ file:///C:/b/abs_c0pi2mimq3/croot/pyqt-split_1698769125270/work/pyqt_sip
python-dateutil @ file:///tmp/build/80754af9/python-dateutil_1626374649649/work
pywin32==306
pywinpty @ file:///C:/ci_310/pywinpty_1644230983541/work/target/wheels/pywinpty-2.0.2-cp310-none-win_amd64.whl
requests==2.31.0
requests-oauthlib==1.3.1
rsa==4.9
scikit-learn @ file:///C:/b/abs_daon7wm2p4/croot/scikit-learn_1694788586973/work
scipy==1.11.4
sip @ file:///C:/b/abs_edevan3fce/croot/sip_1698675983372/work
six @ file:///tmp/build/80754af9/six_1644875935023/work
tensorboard==2.10.1
tensorboard-data-server==0.6.1
tensorboard-plugin-wit==1.8.1
tensorflow==2.10.1
tensorflow-estimator==2.10.0
tensorflow-io-gcs-filesystem==0.31.0
termcolor==2.4.0
threadpoolctl @ file:///Users/ktietz/demo/mc3/conda-bld/threadpoolctl_1629802263681/work
tomli @ file:///C:/Windows/TEMP/abs_ac109f85-a7b3-4b4d-bcfd-52622eceddf0hy332ojo/croots/recipe/tomli_1657175513137/work
tornado @ file:///C:/b/abs_0cbrstidzg/croot/tornado_1696937003724/work
tqdm @ file:///C:/b/abs_f76j9hg7pv/croot/tqdm_1679561871187/work
typing_extensions==4.9.0
urllib3==2.2.0
Werkzeug==3.0.1
wrapt==1.16.0
============== Conda Packages ==============
# packages in environment at C:\Users\90363\MiniConda3\envs\faceswap:
#
# Name Version Build Channel
absl-py 2.1.0 pypi_0 pypi
aom 3.7.1 h63175ca_0 conda-forge
astunparse 1.6.3 pypi_0 pypi
blas 1.0 mkl
brotli 1.0.9 h2bbff1b_7
brotli-bin 1.0.9 h2bbff1b_7
bzip2 1.0.8 he774522_0
ca-certificates 2023.12.12 haa95532_0
cachetools 5.3.2 pypi_0 pypi
certifi 2024.2.2 pypi_0 pypi
charset-normalizer 3.3.2 pypi_0 pypi
colorama 0.4.6 py310haa95532_0
contourpy 1.2.0 py310h59b6b97_0
cudatoolkit 11.2.2 h7d7167e_12 conda-forge
cudnn 8.1.0.77 h3e0f4f4_0 conda-forge
cycler 0.11.0 pyhd3eb1b0_0
dav1d 1.2.1 hcfcfb64_0 conda-forge
expat 2.5.0 h63175ca_1 conda-forge
fastcluster 1.2.6 py310hecd3228_3 conda-forge
ffmpeg 6.1.0 gpl_h0859920_103 conda-forge
ffmpy 0.3.0 pyhb6f538c_0 conda-forge
flatbuffers 23.5.26 pypi_0 pypi
font-ttf-dejavu-sans-mono 2.37 hab24e00_0 conda-forge
font-ttf-inconsolata 3.000 h77eed37_0 conda-forge
font-ttf-source-code-pro 2.038 h77eed37_0 conda-forge
font-ttf-ubuntu 0.83 h77eed37_1 conda-forge
fontconfig 2.14.2 hbde0cde_0 conda-forge
fonts-conda-ecosystem 1 0 conda-forge
fonts-conda-forge 1 0 conda-forge
fonttools 4.25.0 pyhd3eb1b0_0
freetype 2.12.1 ha860e81_0
gast 0.4.0 pypi_0 pypi
giflib 5.2.1 h8cc25b3_3
git 2.40.1 haa95532_1
google-auth 2.27.0 pypi_0 pypi
google-auth-oauthlib 0.4.6 pypi_0 pypi
google-pasta 0.2.0 pypi_0 pypi
grpcio 1.60.1 pypi_0 pypi
h5py 3.10.0 pypi_0 pypi
icc_rt 2022.1.0 h6049295_2
icu 73.1 h6c2663c_0
idna 3.6 pypi_0 pypi
imageio 2.31.4 py310haa95532_0
imageio-ffmpeg 0.4.9 pypi_0 pypi
intel-openmp 2023.1.0 h59b6b97_46320
joblib 1.2.0 py310haa95532_0
jpeg 9e h2bbff1b_1
keras 2.10.0 pypi_0 pypi
keras-preprocessing 1.1.2 pypi_0 pypi
kiwisolver 1.4.4 py310hd77b12b_0
krb5 1.20.1 h5b6d351_0
lerc 3.0 hd77b12b_0
libbrotlicommon 1.0.9 h2bbff1b_7
libbrotlidec 1.0.9 h2bbff1b_7
libbrotlienc 1.0.9 h2bbff1b_7
libclang 16.0.6 pypi_0 pypi
libclang13 14.0.6 default_h8e68704_1
libdeflate 1.17 h2bbff1b_1
libexpat 2.5.0 h63175ca_1 conda-forge
libffi 3.4.4 hd77b12b_0
libiconv 1.17 hcfcfb64_2 conda-forge
libopus 1.3.1 h8ffe710_1 conda-forge
libpng 1.6.39 h8cc25b3_0
libpq 12.17 h906ac69_0
libtiff 4.5.1 hd77b12b_0
libwebp 1.3.2 hbc33d0d_0
libwebp-base 1.3.2 h2bbff1b_0
libxml2 2.12.4 hc3477c8_1 conda-forge
libzlib 1.2.13 hcfcfb64_5 conda-forge
libzlib-wapi 1.2.13 hcfcfb64_5 conda-forge
lz4-c 1.9.4 h2bbff1b_0
markdown 3.5.2 pypi_0 pypi
markupsafe 2.1.4 pypi_0 pypi
matplotlib 3.8.0 py310haa95532_0
matplotlib-base 3.8.0 py310h4ed8f06_0
mkl 2023.1.0 h6b88ed4_46358
mkl-service 2.4.0 py310h2bbff1b_1
mkl_fft 1.3.8 py310h2bbff1b_0
mkl_random 1.2.4 py310h59b6b97_0
munkres 1.1.4 py_0
numexpr 2.8.7 py310h2cd9be0_0
numpy 1.26.3 py310h055cbcc_0
numpy-base 1.26.3 py310h65a83cf_0
nvidia-ml-py 12.535.133 pyhd8ed1ab_0 conda-forge
oauthlib 3.2.2 pypi_0 pypi
opencv-python 4.9.0.80 pypi_0 pypi
openh264 2.4.0 h63175ca_0 conda-forge
openssl 3.2.1 hcfcfb64_0 conda-forge
opt-einsum 3.3.0 pypi_0 pypi
packaging 23.1 py310haa95532_0
pillow 9.4.0 py310hd77b12b_1
pip 23.3.1 py310haa95532_0
ply 3.11 py310haa95532_0
protobuf 3.19.6 pypi_0 pypi
psutil 5.9.0 py310h2bbff1b_0
pyasn1 0.5.1 pypi_0 pypi
pyasn1-modules 0.3.0 pypi_0 pypi
pyparsing 3.0.9 py310haa95532_0
pyqt 5.15.10 py310hd77b12b_0
pyqt5-sip 12.13.0 py310h2bbff1b_0
python 3.10.13 he1021f5_0
python-dateutil 2.8.2 pyhd3eb1b0_0
python_abi 3.10 2_cp310 conda-forge
pywin32 306 pypi_0 pypi
pywinpty 2.0.2 py310h5da7b33_0
qt-main 5.15.2 h19c9488_10
requests 2.31.0 pypi_0 pypi
requests-oauthlib 1.3.1 pypi_0 pypi
rsa 4.9 pypi_0 pypi
scikit-learn 1.3.0 py310h4ed8f06_1
scipy 1.11.4 py310h309d312_0
setuptools 68.2.2 py310haa95532_0
sip 6.7.12 py310hd77b12b_0
six 1.16.0 pyhd3eb1b0_1
sqlite 3.41.2 h2bbff1b_0
svt-av1 1.7.0 h63175ca_0 conda-forge
tbb 2021.8.0 h59b6b97_0
tensorboard 2.10.1 pypi_0 pypi
tensorboard-data-server 0.6.1 pypi_0 pypi
tensorboard-plugin-wit 1.8.1 pypi_0 pypi
tensorflow 2.10.1 pypi_0 pypi
tensorflow-estimator 2.10.0 pypi_0 pypi
tensorflow-io-gcs-filesystem 0.31.0 pypi_0 pypi
termcolor 2.4.0 pypi_0 pypi
threadpoolctl 2.2.0 pyh0d69192_0
tk 8.6.12 h2bbff1b_0
tomli 2.0.1 py310haa95532_0
tornado 6.3.3 py310h2bbff1b_0
tqdm 4.65.0 py310h9909e9c_0
typing-extensions 4.9.0 pypi_0 pypi
tzdata 2023d h04d1e81_0
ucrt 10.0.22621.0 h57928b3_0 conda-forge
urllib3 2.2.0 pypi_0 pypi
vc 14.2 h21ff451_1
vc14_runtime 14.38.33130 h82b7239_18 conda-forge
vs2015_runtime 14.38.33130 hcb4865c_18 conda-forge
werkzeug 3.0.1 pypi_0 pypi
wheel 0.41.2 py310haa95532_0
winpty 0.4.3 4
wrapt 1.16.0 pypi_0 pypi
x264 1!164.3095 h8ffe710_2 conda-forge
x265 3.5 h2d74725_3 conda-forge
xz 5.4.5 h8cc25b3_0
zlib 1.2.13 hcfcfb64_5 conda-forge
zlib-wapi 1.2.13 hcfcfb64_5 conda-forge
zstd 1.5.5 hd43e919_0
=============== State File =================
{
"name": "original",
"sessions": {
"1": {
"timestamp": 1706886043.0229857,
"no_logs": false,
"loss_names": [
"total",
"face_a",
"face_b"
],
"batchsize": 14,
"iterations": 1,
"config": {
"learning_rate": 5e-05,
"epsilon_exponent": -7,
"save_optimizer": "exit",
"autoclip": false,
"allow_growth": false,
"mixed_precision": false,
"nan_protection": true,
"convert_batchsize": 16,
"loss_function": "ssim",
"loss_function_2": "mse",
"loss_weight_2": 100,
"loss_function_3": null,
"loss_weight_3": 0,
"loss_function_4": null,
"loss_weight_4": 0,
"mask_loss_function": "mse",
"eye_multiplier": 3,
"mouth_multiplier": 2
}
}
},
"lowest_avg_loss": {
"a": 0.3055972158908844,
"b": 0.30632156133651733
},
"iterations": 1,
"mixed_precision_layers": [
"conv_128_0_conv2d",
"conv_128_0_leakyrelu",
"conv_256_0_conv2d",
"conv_256_0_leakyrelu",
"conv_512_0_conv2d",
"conv_512_0_leakyrelu",
"conv_1024_0_conv2d",
"conv_1024_0_leakyrelu",
"flatten",
"dense",
"dense_1",
"reshape",
"upscale_512_0_conv2d_conv2d",
"upscale_512_0_conv2d_leakyrelu",
"upscale_512_0_pixelshuffler",
"upscale_256_0_conv2d_conv2d",
"upscale_256_0_conv2d_leakyrelu",
"upscale_256_0_pixelshuffler",
"upscale_128_0_conv2d_conv2d",
"upscale_128_0_conv2d_leakyrelu",
"upscale_128_0_pixelshuffler",
"upscale_64_0_conv2d_conv2d",
"upscale_64_0_conv2d_leakyrelu",
"upscale_64_0_pixelshuffler",
"face_out_a_0_conv2d",
"upscale_256_1_conv2d_conv2d",
"upscale_256_1_conv2d_leakyrelu",
"upscale_256_1_pixelshuffler",
"upscale_128_1_conv2d_conv2d",
"upscale_128_1_conv2d_leakyrelu",
"upscale_128_1_pixelshuffler",
"upscale_64_1_conv2d_conv2d",
"upscale_64_1_conv2d_leakyrelu",
"upscale_64_1_pixelshuffler",
"face_out_b_0_conv2d"
],
"config": {
"centering": "face",
"coverage": 87.5,
"optimizer": "adam",
"learning_rate": 5e-05,
"epsilon_exponent": -7,
"save_optimizer": "exit",
"lr_finder_iterations": 1000,
"lr_finder_mode": "set",
"lr_finder_strength": "default",
"autoclip": false,
"allow_growth": false,
"mixed_precision": false,
"nan_protection": true,
"convert_batchsize": 16,
"loss_function": "ssim",
"loss_function_2": "mse",
"loss_weight_2": 100,
"loss_function_3": null,
"loss_weight_3": 0,
"loss_function_4": null,
"loss_weight_4": 0,
"mask_loss_function": "mse",
"eye_multiplier": 3,
"mouth_multiplier": 2,
"penalized_mask_loss": true,
"mask_type": "extended",
"mask_blur_kernel": 3,
"mask_threshold": 4,
"learn_mask": false,
"lowmem": false
}
}
================= Configs ==================
--------- .faceswap ---------
backend: nvidia
--------- convert.ini ---------
[color.color_transfer]
clip: True
preserve_paper: True
[color.manual_balance]
colorspace: HSV
balance_1: 0.0
balance_2: 0.0
balance_3: 0.0
contrast: 0.0
brightness: 0.0
[color.match_hist]
threshold: 99.0
[mask.mask_blend]
type: normalized
kernel_size: 3
passes: 4
threshold: 4
erosion: 0.0
erosion_top: 0.0
erosion_bottom: 0.0
erosion_left: 0.0
erosion_right: 0.0
[scaling.sharpen]
method: none
amount: 150
radius: 0.3
threshold: 5.0
[writer.ffmpeg]
container: mp4
codec: libx264
crf: 23
preset: medium
tune: none
profile: auto
level: auto
skip_mux: False
[writer.gif]
fps: 25
loop: 0
palettesize: 256
subrectangles: False
[writer.opencv]
format: png
draw_transparent: False
separate_mask: False
jpg_quality: 75
png_compress_level: 3
[writer.patch]
start_index: 0
index_offset: 0
number_padding: 6
include_filename: True
face_index_location: before
origin: bottom-left
empty_frames: blank
json_output: False
separate_mask: False
bit_depth: 16
format: png
png_compress_level: 3
tiff_compression_method: lzw
[writer.pillow]
format: png
draw_transparent: False
separate_mask: False
optimize: False
gif_interlace: True
jpg_quality: 75
png_compress_level: 3
tif_compression: tiff_deflate
--------- extract.ini ---------
[global]
allow_growth: False
aligner_min_scale: 0.07
aligner_max_scale: 2.0
aligner_distance: 22.5
aligner_roll: 45.0
aligner_features: True
filter_refeed: True
save_filtered: False
realign_refeeds: True
filter_realign: True
[align.fan]
batch-size: 12
[detect.cv2_dnn]
confidence: 50
[detect.mtcnn]
minsize: 20
scalefactor: 0.709
batch-size: 8
cpu: True
threshold_1: 0.6
threshold_2: 0.7
threshold_3: 0.7
[detect.s3fd]
confidence: 70
batch-size: 4
[mask.bisenet_fp]
batch-size: 8
cpu: False
weights: faceswap
include_ears: False
include_hair: False
include_glasses: True
[mask.custom]
batch-size: 8
centering: face
fill: False
[mask.unet_dfl]
batch-size: 8
[mask.vgg_clear]
batch-size: 6
[mask.vgg_obstructed]
batch-size: 2
[recognition.vgg_face2]
batch-size: 16
cpu: False
--------- gui.ini ---------
[global]
fullscreen: False
tab: extract
options_panel_width: 30
console_panel_height: 20
icon_size: 14
font: default
font_size: 9
autosave_last_session: prompt
timeout: 120
auto_load_model_stats: True
--------- train.ini ---------
[global]
centering: face
coverage: 87.5
icnr_init: False
conv_aware_init: False
optimizer: adam
learning_rate: 5e-05
epsilon_exponent: -7
save_optimizer: exit
lr_finder_iterations: 1000
lr_finder_mode: set
lr_finder_strength: default
autoclip: False
reflect_padding: False
allow_growth: False
mixed_precision: False
nan_protection: True
convert_batchsize: 16
[global.loss]
loss_function: ssim
loss_function_2: mse
loss_weight_2: 100
loss_function_3: none
loss_weight_3: 0
loss_function_4: none
loss_weight_4: 0
mask_loss_function: mse
eye_multiplier: 3
mouth_multiplier: 2
penalized_mask_loss: True
mask_type: extended
mask_blur_kernel: 3
mask_threshold: 4
learn_mask: False
[model.dfaker]
output_size: 128
[model.dfl_h128]
lowmem: False
[model.dfl_sae]
input_size: 128
architecture: df
autoencoder_dims: 0
encoder_dims: 42
decoder_dims: 21
multiscale_decoder: False
[model.dlight]
features: best
details: good
output_size: 256
[model.original]
lowmem: False
[model.phaze_a]
output_size: 128
shared_fc: none
enable_gblock: True
split_fc: True
split_gblock: False
split_decoders: False
enc_architecture: fs_original
enc_scaling: 7
enc_load_weights: True
bottleneck_type: dense
bottleneck_norm: none
bottleneck_size: 1024
bottleneck_in_encoder: True
fc_depth: 1
fc_min_filters: 1024
fc_max_filters: 1024
fc_dimensions: 4
fc_filter_slope: -0.5
fc_dropout: 0.0
fc_upsampler: upsample2d
fc_upsamples: 1
fc_upsample_filters: 512
fc_gblock_depth: 3
fc_gblock_min_nodes: 512
fc_gblock_max_nodes: 512
fc_gblock_filter_slope: -0.5
fc_gblock_dropout: 0.0
dec_upscale_method: subpixel
dec_upscales_in_fc: 0
dec_norm: none
dec_min_filters: 64
dec_max_filters: 512
dec_slope_mode: full
dec_filter_slope: -0.45
dec_res_blocks: 1
dec_output_kernel: 5
dec_gaussian: True
dec_skip_last_residual: True
freeze_layers: keras_encoder
load_layers: encoder
fs_original_depth: 4
fs_original_min_filters: 128
fs_original_max_filters: 1024
fs_original_use_alt: False
mobilenet_width: 1.0
mobilenet_depth: 1
mobilenet_dropout: 0.001
mobilenet_minimalistic: False
[model.realface]
input_size: 64
output_size: 128
dense_nodes: 1536
complexity_encoder: 128
complexity_decoder: 512
[model.unbalanced]
input_size: 128
lowmem: False
nodes: 1024
complexity_encoder: 128
complexity_decoder_a: 384
complexity_decoder_b: 512
[model.villain]
lowmem: False
[trainer.original]
preview_images: 14
mask_opacity: 30
mask_color: #ff0000
zoom_amount: 5
rotation_range: 10
shift_range: 5
flip_chance: 50
color_lightness: 30
color_ab: 8
color_clahe_chance: 50
color_clahe_max_size: 4