This commit is contained in:
Eren Golge 2019-05-29 00:37:41 +02:00
Родитель 9ff7151d77
Коммит f774db0241
1 изменённых файлов: 3 добавлений и 3 удалений

Просмотреть файл

@ -207,9 +207,9 @@ class Attention(nn.Module):
_, n = prev_alpha.max(1)
val, n2 = alpha.max(1)
for b in range(alignment.shape[0]):
alpha[b, n + 2:] = 0
alpha[b, :(n - 1)] = 0 # ignore all previous states to prevent repetition.
alpha[b, (n - 2)] = 0.01 * val # smoothing factor for the prev step
alpha[b, n[b] + 2:] = 0
alpha[b, :(n[b] - 1)] = 0 # ignore all previous states to prevent repetition.
alpha[b, (n[b] - 2)] = 0.01 * val[b] # smoothing factor for the prev step
# compute attention weights
self.alpha = alpha / alpha.sum(dim=1).unsqueeze(1)
# compute context