-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathlosses.py
84 lines (66 loc) · 2.78 KB
/
losses.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
import numpy as np
import keras.backend as K
def dice(y_true, y_pred):
#computes the dice score on two tensors
sum_p=K.sum(y_pred,axis=0)
sum_r=K.sum(y_true,axis=0)
sum_pr=K.sum(y_true * y_pred,axis=0)
dice_numerator =2*sum_pr
dice_denominator =sum_r+sum_p
dice_score =(dice_numerator+K.epsilon() )/(dice_denominator+K.epsilon())
return dice_score
def dice_whole_metric(y_true, y_pred):
#computes the dice for the whole tumor
y_true_f = K.reshape(y_true,shape=(-1,4))
y_pred_f = K.reshape(y_pred,shape=(-1,4))
y_whole=K.sum(y_true_f[:,1:],axis=1)
p_whole=K.sum(y_pred_f[:,1:],axis=1)
dice_whole=dice(y_whole,p_whole)
return dice_whole
def dice_en_metric(y_true, y_pred):
#computes the dice for the enhancing region
y_true_f = K.reshape(y_true,shape=(-1,4))
y_pred_f = K.reshape(y_pred,shape=(-1,4))
y_enh=y_true_f[:,-1]
p_enh=y_pred_f[:,-1]
dice_en=dice(y_enh,p_enh)
return dice_en
def dice_core_metric(y_true, y_pred):
##computes the dice for the core region
y_true_f = K.reshape(y_true,shape=(-1,4))
y_pred_f = K.reshape(y_pred,shape=(-1,4))
#workaround for tf
#y_core=K.sum(tf.gather(y_true_f, [1,3],axis =1),axis=1)
#p_core=K.sum(tf.gather(y_pred_f, [1,3],axis =1),axis=1)
y_core=K.sum(y_true_f[:,[1,3]],axis=1)
p_core=K.sum(y_pred_f[:,[1,3]],axis=1)
dice_core=dice(y_core,p_core)
return dice_core
def weighted_log_loss(y_true, y_pred):
# scale predictions so that the class probas of each sample sum to 1
y_pred /= K.sum(y_pred, axis=-1, keepdims=True)
# clip to prevent NaN's and Inf's
y_pred = K.clip(y_pred, K.epsilon(), 1 - K.epsilon())
# weights are assigned in this order : normal,necrotic,edema,enhancing
weights=np.array([1,5,2,4])
weights = K.variable(weights)
loss = y_true * K.log(y_pred) * weights
loss = K.mean(-K.sum(loss, -1))
return loss
def gen_dice_loss(y_true, y_pred):
'''
computes the sum of two losses : generalised dice loss and weighted cross entropy
'''
#generalised dice score is calculated as in this paper : https://arxiv.org/pdf/1707.03237
y_true_f = K.reshape(y_true,shape=(-1,4))
y_pred_f = K.reshape(y_pred,shape=(-1,4))
sum_p=K.sum(y_pred_f,axis=-2)
sum_r=K.sum(y_true_f,axis=-2)
sum_pr=K.sum(y_true_f * y_pred_f,axis=-2)
weights=K.pow(K.square(sum_r)+K.epsilon(),-1)
generalised_dice_numerator =2*K.sum(weights*sum_pr)
generalised_dice_denominator =K.sum(weights*(sum_r+sum_p))
generalised_dice_score =generalised_dice_numerator /generalised_dice_denominator
GDL=1-generalised_dice_score
del sum_p,sum_r,sum_pr,weights
return GDL+weighted_log_loss(y_true,y_pred)