Skip to content

Commit

Permalink
Use high float32 matmul precision
Browse files Browse the repository at this point in the history
  • Loading branch information
henryruhs committed Feb 26, 2025
1 parent 5a0d549 commit 338b2e2
Show file tree
Hide file tree
Showing 2 changed files with 6 additions and 0 deletions.
3 changes: 3 additions & 0 deletions embedding_converter/src/training.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,6 +118,9 @@ def train() -> None:
dataset_file_pattern = CONFIG.get('training.dataset', 'file_pattern')
output_resume_path = CONFIG.get('training.output', 'resume_path')

if torch.cuda.is_available():
torch.set_float32_matmul_precision('high')

dataset = StaticDataset(dataset_file_pattern)
training_loader, validation_loader = create_loaders(dataset)
embedding_converter_trainer = EmbeddingConverterTrainer()
Expand Down
3 changes: 3 additions & 0 deletions face_swapper/src/training.py
Original file line number Diff line number Diff line change
Expand Up @@ -194,6 +194,9 @@ def train() -> None:
dataset_batch_ratio = CONFIG.getfloat('training.dataset', 'batch_ratio')
output_resume_path = CONFIG.get('training.output', 'resume_path')

if torch.cuda.is_available():
torch.set_float32_matmul_precision('high')

dataset = DynamicDataset(dataset_file_pattern, dataset_batch_ratio)
training_loader, validation_loader = create_loaders(dataset)
face_swapper_trainer = FaceSwapperTrainer()
Expand Down

0 comments on commit 338b2e2

Please sign in to comment.