Skip to content

Commit

Permalink
changes
Browse files Browse the repository at this point in the history
  • Loading branch information
harisreedhar committed Mar 4, 2025
1 parent 6520e9b commit 4f050da
Show file tree
Hide file tree
Showing 6 changed files with 19 additions and 5 deletions.
1 change: 1 addition & 0 deletions face_swapper/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ file_pattern = .datasets/vggface2/**/*.jpg
warp_template = vgg_face_hq_to_arcface_128_v2
batch_mode = equal
batch_ratio = 0.2
resolution = 256
```

```
Expand Down
1 change: 1 addition & 0 deletions face_swapper/config.ini
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ num_filters =
num_layers =
num_discriminators =
kernel_size =
resolution =

[training.losses]
adversarial_weight =
Expand Down
5 changes: 3 additions & 2 deletions face_swapper/src/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,11 +12,12 @@


class DynamicDataset(Dataset[Tensor]):
def __init__(self, file_pattern : str, warp_template : WarpTemplate, batch_mode : BatchMode, batch_ratio : float) -> None:
def __init__(self, file_pattern : str, warp_template : WarpTemplate, batch_mode : BatchMode, batch_ratio : float, resolution : int) -> None:
self.file_paths = glob.glob(file_pattern)
self.warp_template = warp_template
self.batch_mode = batch_mode
self.batch_ratio = batch_ratio
self.resolution = resolution
self.transforms = self.compose_transforms()

def __getitem__(self, index : int) -> Batch:
Expand All @@ -38,7 +39,7 @@ def compose_transforms(self) -> transforms:
[
AugmentTransform(),
transforms.ToPILImage(),
transforms.Resize((256, 256), interpolation = transforms.InterpolationMode.BICUBIC),
transforms.Resize((self.resolution, self.resolution), interpolation = transforms.InterpolationMode.BICUBIC),
transforms.ToTensor(),
WarpTransform(self.warp_template),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
Expand Down
8 changes: 7 additions & 1 deletion face_swapper/src/models/loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,13 @@ def forward(self, target_tensor : Tensor, output_tensor : Tensor) -> Tuple[Tenso
return gaze_loss, weighted_gaze_loss

def detect_gaze(self, input_tensor : Tensor) -> Gaze:
crop_tensor = input_tensor[:, :, 60: 224, 16: 205]
resolution = CONFIG.getint('training.dataset', 'resolution')
scale_factor = resolution / 256
y_min = int(60 * scale_factor)
y_max = int(224 * scale_factor)
x_min = int(16 * scale_factor)
x_max = int(205 * scale_factor)
crop_tensor = input_tensor[:, :, y_min: y_max, x_min: x_max]
crop_tensor = (crop_tensor + 1) * 0.5
crop_tensor = transforms.Normalize(mean = [ 0.485, 0.456, 0.406 ], std = [ 0.229, 0.224, 0.225 ])(crop_tensor)
crop_tensor = nn.functional.interpolate(crop_tensor, size = (448, 448), mode = 'bicubic')
Expand Down
6 changes: 5 additions & 1 deletion face_swapper/src/networks/aad.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,9 @@ def forward(self, source_embedding : Embedding, target_attributes : Attributes)
temp_tensors = self.pixel_shuffle_up_sample(source_embedding)

for index, layer in enumerate(self.layers[:-1]):
temp_shape = target_attributes[index + 1].shape[2:]
temp_tensor = layer(temp_tensors, target_attributes[index], source_embedding)
temp_tensors = nn.functional.interpolate(temp_tensor, scale_factor = 2, mode = 'bilinear', align_corners = False)
temp_tensors = nn.functional.interpolate(temp_tensor, temp_shape, mode = 'bilinear', align_corners = False)

temp_tensors = self.layers[-1](temp_tensors, target_attributes[-1], source_embedding)
output_tensor = torch.tanh(temp_tensors)
Expand Down Expand Up @@ -113,6 +114,9 @@ def __init__(self, input_channels : int, attribute_channels : int, identity_chan
def forward(self, input_tensor : Tensor, attribute_embedding : Embedding, identity_embedding : Embedding) -> Tensor:
temp_tensor = self.instance_norm(input_tensor)

if attribute_embedding.shape[2:] != temp_tensor.shape[2:]:
attribute_embedding = nn.functional.interpolate(attribute_embedding, size = temp_tensor.shape[2:], mode = 'bilinear')

attribute_scale = self.conv1(attribute_embedding)
attribute_shift = self.conv2(attribute_embedding)
attribute_modulation = attribute_scale * temp_tensor + attribute_shift
Expand Down
3 changes: 2 additions & 1 deletion face_swapper/src/training.py
Original file line number Diff line number Diff line change
Expand Up @@ -200,12 +200,13 @@ def train() -> None:
dataset_warp_template = cast(WarpTemplate, CONFIG.get('training.dataset', 'warp_template'))
dataset_batch_mode = cast(BatchMode, CONFIG.get('training.dataset', 'batch_mode'))
dataset_batch_ratio = CONFIG.getfloat('training.dataset', 'batch_ratio')
dataset_resolution = CONFIG.getint('training.dataset', 'resolution')
output_resume_path = CONFIG.get('training.output', 'resume_path')

if torch.cuda.is_available():
torch.set_float32_matmul_precision('high')

dataset = DynamicDataset(dataset_file_pattern, dataset_warp_template, dataset_batch_mode, dataset_batch_ratio)
dataset = DynamicDataset(dataset_file_pattern, dataset_warp_template, dataset_batch_mode, dataset_batch_ratio, dataset_resolution)
training_loader, validation_loader = create_loaders(dataset)
face_swapper_trainer = FaceSwapperTrainer()
trainer = create_trainer()
Expand Down

0 comments on commit 4f050da

Please sign in to comment.