Skip to content

Commit b6ae554

Browse files
committed
Fix formatting
Signed-off-by: intervitens <[email protected]>
1 parent 7ecf64c commit b6ae554

File tree

1 file changed

+2
-3
lines changed

1 file changed

+2
-3
lines changed

vllm/model_executor/models/glm4.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -111,7 +111,6 @@ def __init__(self,
111111
rope_scaling=rope_scaling,
112112
partial_rotary_factor=partial_rotary_factor,
113113
is_neox_style=False,
114-
115114
)
116115
self.attn = Attention(self.num_heads,
117116
self.head_dim,
@@ -198,11 +197,11 @@ def forward(
198197
hidden_states=hidden_states,
199198
)
200199

201-
202200
hidden_states = self.post_self_attn_layernorm(hidden_states)
203201

204202
# Fully Connected
205-
hidden_state, residual = self.post_attention_layernorm(hidden_states, residual)
203+
hidden_state, residual = self.post_attention_layernorm(
204+
hidden_states, residual)
206205
hidden_states = self.mlp(hidden_states)
207206
hidden_states = self.post_mlp_layernorm(hidden_states)
208207

0 commit comments

Comments
 (0)