forked from w-okada/voice-changer
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request w-okada#343 from nadare881/voras_beta
Voras beta + 混合精度での推論の追加
- Loading branch information
Showing
11 changed files
with
1,544 additions
and
30 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,39 @@ | ||
import torch | ||
from torch import device | ||
|
||
from const import EnumInferenceTypes | ||
from voice_changer.RVC.inferencer.Inferencer import Inferencer | ||
from voice_changer.RVC.deviceManager.DeviceManager import DeviceManager | ||
from .voras_beta.models import Synthesizer | ||
|
||
|
||
class VoRASInferencer(Inferencer): | ||
def loadModel(self, file: str, gpu: device): | ||
super().setProps(EnumInferenceTypes.pyTorchVoRASbeta, file, False, gpu) | ||
|
||
dev = DeviceManager.get_instance().getDevice(gpu) | ||
self.isHalf = False # DeviceManager.get_instance().halfPrecisionAvailable(gpu) | ||
|
||
cpt = torch.load(file, map_location="cpu") | ||
model = Synthesizer(**cpt["params"]) | ||
|
||
model.eval() | ||
model.load_state_dict(cpt["weight"], strict=False) | ||
model.remove_weight_norm() | ||
model.change_speaker(0) | ||
|
||
model = model.to(dev) | ||
|
||
self.model = model | ||
print("load model comprete") | ||
return self | ||
|
||
def infer( | ||
self, | ||
feats: torch.Tensor, | ||
pitch_length: torch.Tensor, | ||
pitch: torch.Tensor, | ||
pitchf: torch.Tensor, | ||
sid: torch.Tensor, | ||
) -> torch.Tensor: | ||
return self.model.infer(feats, pitch_length, pitch, pitchf, sid) |
165 changes: 165 additions & 0 deletions
165
server/voice_changer/RVC/inferencer/voras_beta/commons.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,165 @@ | ||
import math | ||
|
||
import torch | ||
from torch.nn import functional as F | ||
|
||
|
||
def init_weights(m, mean=0.0, std=0.01): | ||
classname = m.__class__.__name__ | ||
if classname.find("Conv") != -1: | ||
m.weight.data.normal_(mean, std) | ||
|
||
|
||
def get_padding(kernel_size, dilation=1): | ||
return int((kernel_size * dilation - dilation) / 2) | ||
|
||
|
||
def convert_pad_shape(pad_shape): | ||
l = pad_shape[::-1] | ||
pad_shape = [item for sublist in l for item in sublist] | ||
return pad_shape | ||
|
||
|
||
def kl_divergence(m_p, logs_p, m_q, logs_q): | ||
"""KL(P||Q)""" | ||
kl = (logs_q - logs_p) - 0.5 | ||
kl += ( | ||
0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q) | ||
) | ||
return kl | ||
|
||
|
||
def rand_gumbel(shape): | ||
"""Sample from the Gumbel distribution, protect from overflows.""" | ||
uniform_samples = torch.rand(shape) * 0.99998 + 0.00001 | ||
return -torch.log(-torch.log(uniform_samples)) | ||
|
||
|
||
def rand_gumbel_like(x): | ||
g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device) | ||
return g | ||
|
||
|
||
def slice_segments(x, ids_str, segment_size=4): | ||
ret = torch.zeros_like(x[:, :, :segment_size]) | ||
for i in range(x.size(0)): | ||
idx_str = ids_str[i] | ||
idx_end = idx_str + segment_size | ||
r = x[i, :, idx_str:idx_end] | ||
ret[i, :, :r.size(1)] = r | ||
return ret | ||
|
||
|
||
def slice_segments2(x, ids_str, segment_size=4): | ||
ret = torch.zeros_like(x[:, :segment_size]) | ||
for i in range(x.size(0)): | ||
idx_str = ids_str[i] | ||
idx_end = idx_str + segment_size | ||
r = x[i, idx_str:idx_end] | ||
ret[i, :r.size(0)] = r | ||
return ret | ||
|
||
|
||
def rand_slice_segments(x, x_lengths, segment_size=4, ids_str=None): | ||
b, d, t = x.size() | ||
if ids_str is None: | ||
ids_str = torch.zeros([b]).to(device=x.device, dtype=x_lengths.dtype) | ||
ids_str_max = torch.maximum(torch.zeros_like(x_lengths).to(device=x_lengths.device ,dtype=x_lengths.dtype), x_lengths - segment_size + 1 - ids_str) | ||
ids_str += (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) | ||
ret = slice_segments(x, ids_str, segment_size) | ||
return ret, ids_str | ||
|
||
|
||
def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4): | ||
position = torch.arange(length, dtype=torch.float) | ||
num_timescales = channels // 2 | ||
log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / ( | ||
num_timescales - 1 | ||
) | ||
inv_timescales = min_timescale * torch.exp( | ||
torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment | ||
) | ||
scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) | ||
signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) | ||
signal = F.pad(signal, [0, 0, 0, channels % 2]) | ||
signal = signal.view(1, channels, length) | ||
return signal | ||
|
||
|
||
def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): | ||
b, channels, length = x.size() | ||
signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) | ||
return x + signal.to(dtype=x.dtype, device=x.device) | ||
|
||
|
||
def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1): | ||
b, channels, length = x.size() | ||
signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) | ||
return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis) | ||
|
||
|
||
def subsequent_mask(length): | ||
mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) | ||
return mask | ||
|
||
|
||
@torch.jit.script | ||
def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): | ||
n_channels_int = n_channels[0] | ||
in_act = input_a + input_b | ||
t_act = torch.tanh(in_act[:, :n_channels_int, :]) | ||
s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) | ||
acts = t_act * s_act | ||
return acts | ||
|
||
|
||
def convert_pad_shape(pad_shape): | ||
l = pad_shape[::-1] | ||
pad_shape = [item for sublist in l for item in sublist] | ||
return pad_shape | ||
|
||
|
||
def shift_1d(x): | ||
x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] | ||
return x | ||
|
||
|
||
def sequence_mask(length, max_length=None): | ||
if max_length is None: | ||
max_length = length.max() | ||
x = torch.arange(max_length, dtype=length.dtype, device=length.device) | ||
return x.unsqueeze(0) < length.unsqueeze(1) | ||
|
||
|
||
def generate_path(duration, mask): | ||
""" | ||
duration: [b, 1, t_x] | ||
mask: [b, 1, t_y, t_x] | ||
""" | ||
b, _, t_y, t_x = mask.shape | ||
cum_duration = torch.cumsum(duration, -1) | ||
|
||
cum_duration_flat = cum_duration.view(b * t_x) | ||
path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) | ||
path = path.view(b, t_x, t_y) | ||
path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] | ||
path = path.unsqueeze(1).transpose(2, 3) * mask | ||
return path | ||
|
||
|
||
def clip_grad_value_(parameters, clip_value, norm_type=2): | ||
if isinstance(parameters, torch.Tensor): | ||
parameters = [parameters] | ||
parameters = list(filter(lambda p: p.grad is not None, parameters)) | ||
norm_type = float(norm_type) | ||
if clip_value is not None: | ||
clip_value = float(clip_value) | ||
|
||
total_norm = 0 | ||
for p in parameters: | ||
param_norm = p.grad.data.norm(norm_type) | ||
total_norm += param_norm.item() ** norm_type | ||
if clip_value is not None: | ||
p.grad.data.clamp_(min=-clip_value, max=clip_value) | ||
total_norm = total_norm ** (1.0 / norm_type) | ||
return total_norm |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,61 @@ | ||
from typing import * | ||
|
||
from pydantic import BaseModel | ||
|
||
|
||
class TrainConfigTrain(BaseModel): | ||
log_interval: int | ||
seed: int | ||
epochs: int | ||
learning_rate: float | ||
betas: List[float] | ||
eps: float | ||
batch_size: int | ||
fp16_run: bool | ||
lr_decay: float | ||
segment_size: int | ||
init_lr_ratio: int | ||
warmup_epochs: int | ||
c_mel: int | ||
c_kl: float | ||
|
||
|
||
class TrainConfigData(BaseModel): | ||
max_wav_value: float | ||
sampling_rate: int | ||
filter_length: int | ||
hop_length: int | ||
win_length: int | ||
n_mel_channels: int | ||
mel_fmin: float | ||
mel_fmax: Any | ||
|
||
|
||
class TrainConfigModel(BaseModel): | ||
emb_channels: int | ||
inter_channels: int | ||
n_layers: int | ||
upsample_rates: List[int] | ||
use_spectral_norm: bool | ||
gin_channels: int | ||
spk_embed_dim: int | ||
|
||
|
||
class TrainConfig(BaseModel): | ||
version: Literal["voras"] = "voras" | ||
train: TrainConfigTrain | ||
data: TrainConfigData | ||
model: TrainConfigModel | ||
|
||
|
||
class DatasetMetaItem(BaseModel): | ||
gt_wav: str | ||
co256: str | ||
f0: Optional[str] | ||
f0nsf: Optional[str] | ||
speaker_id: int | ||
|
||
|
||
class DatasetMetadata(BaseModel): | ||
files: Dict[str, DatasetMetaItem] | ||
# mute: DatasetMetaItem |
Oops, something went wrong.