@echo off :: Activate the virtual environment call venv\Scripts\activate.bat ::put your training command from 'Print training command' here accelerate launch --num_cpu_threads_per_process=2 "./sdxl_train_network.py" "--network_train_unet_only" --lr_scheduler_type "CosineAnnealingWarmRestarts" --bucket_no_upscale --bucket_reso_steps=128 --cache_latents --cache_latents_to_disk --cache_text_encoder_outputs --caption_extension=".txt" --enable_bucket --min_bucket_reso=512 --max_bucket_reso=2048 --gradient_checkpointing --learning_rate="1.0" --lr_scheduler="cosine" --lr_scheduler_args "T_0=2000" "T_mult=1" "eta_min=1e-1" --lr_scheduler_num_cycles="1" --max_data_loader_n_workers="16" --max_grad_norm="1" --resolution="832,1216" --max_token_length=225 --max_train_steps="2000" --mem_eff_attn --min_snr_gamma=5 --mixed_precision="bf16" --network_alpha="1" --network_args "preset=full" "conv_dim=128" "conv_alpha=1" "rank_dropout=0" "module_dropout=0" "use_tucker=False" "use_scalar=False" "rank_dropout_scale=False" "algo=locon" "train_norm=False" --network_dim=128 --network_module=lycoris.kohya --no_half_vae --multires_noise_iterations="6" --multires_noise_discount="0.3" --optimizer_args weight_decay=0.01 decouple=True d0=0.0001 use_bias_correction=True --optimizer_type="Prodigy" --output_dir="G:/STABLE_DIFFUSION/SD_Training/Projects/Output" --output_name="artist_takku_portrait_PonyXL_D128-A1_LoCon" --persistent_data_loader_workers --pretrained_model_name_or_path="G:/STABLE_DIFFUSION/SD_NEXT/automatic/models/Stable-diffusion/ponyDiffusionV6XL_v6StartWithThisOne.safetensors" --save_model_as=safetensors --save_precision="bf16" --scale_weight_norms="1" --seed="1337" --text_encoder_lr=1.0 --train_batch_size="1" --train_data_dir="G:/STABLE_DIFFUSION/SD_Training/Projects/uno-makoto/images_portrait" --unet_lr=1.0 --xformers echo Done Training!