-
Notifications
You must be signed in to change notification settings - Fork 0
/
FBeta_Loss.py
45 lines (36 loc) · 1.72 KB
/
FBeta_Loss.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
# adapted from https://gist.github.com/SuperShinyEyes/dcc68a08ff8b615442e3bc6a9b55a354
import torch
import torch.nn as nn
import torch.nn.functional as F
class FBetaLoss(nn.Module):
'''Calculate F1 score. Can work with gpu tensors
The original implmentation is written by Michal Haltuf on Kaggle.
Returns
-------
torch.Tensor
`ndim` == 1. epsilon <= val <= 1
Reference
---------
- https://www.kaggle.com/rejpalcz/best-loss-function-for-f1-score-metric
- https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html#sklearn.metrics.f1_score
- https://discuss.pytorch.org/t/calculating-precision-recall-and-f1-score-in-case-of-multi-label-classification/28265/6
- http://www.ryanzhang.info/python/writing-your-own-loss-function-module-for-pytorch/
'''
def __init__(self, beta=1, epsilon=1e-7):
super().__init__()
self.beta = beta
self.epsilon = epsilon
def forward(self, y_pred, y_true,):
assert y_pred.ndim == 2
assert y_true.ndim == 1
y_true = F.one_hot(y_true, 2).to(torch.float32)
y_pred = F.softmax(y_pred, dim=1)
tp = (y_true * y_pred).sum(dim=0).to(torch.float32)
tn = ((1 - y_true) * (1 - y_pred)).sum(dim=0).to(torch.float32)
fp = ((1 - y_true) * y_pred).sum(dim=0).to(torch.float32)
fn = (y_true * (1 - y_pred)).sum(dim=0).to(torch.float32)
precision = tp / (tp + fp + self.epsilon)
recall = tp / (tp + fn + self.epsilon)
fbeta = (1 + self.beta**2) * (precision * recall) / ((self.beta**2 * precision) + recall + self.epsilon)
fbeta = fbeta.clamp(min=self.epsilon, max=1-self.epsilon)
return 1 - fbeta.mean()