Update modeling_phi.py
This commit is contained in:
parent
1a4c7ae2ef
commit
85d00b03fe
@ -362,7 +362,10 @@ class PhiAttention(nn.Module):
|
|||||||
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
||||||
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
||||||
|
|
||||||
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
|
# Queries and keys upcast to fp32 is required by Phi-2 to avoid overflow
|
||||||
|
attn_weights = torch.matmul(
|
||||||
|
query_states.to(torch.float32), key_states.to(torch.float32).transpose(2, 3)
|
||||||
|
) / math.sqrt(self.head_dim)
|
||||||
|
|
||||||
if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
|
if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
|
Loading…
x
Reference in New Issue
Block a user