mirror of
https://github.com/NVIDIA/TensorRT-LLM.git
synced 2026-01-14 06:27:45 +08:00
[5305318] fix: Fix the accuracy issue when reduce_fusion is enabled for GEMMA model. (#5801)
Signed-off-by: Yukun He <23156053+hyukn@users.noreply.github.com>
This commit is contained in:
parent
b01d1c28f7
commit
e104f8bbb5
@ -157,10 +157,10 @@ class GemmaDecoderLayer(Module):
|
||||
if default_net().plugin_config.reduce_fusion else
|
||||
AllReduceFusionOp.NONE,
|
||||
residual=residual,
|
||||
norm_weight=self.post_layernorm.weight.value,
|
||||
norm_pre_residual_weight=self.pre_feedforward_layernorm.weight.
|
||||
value if self.config.inter_layernorms else None,
|
||||
eps=self.post_layernorm.eps))
|
||||
norm_weight=self.pre_feedforward_layernorm.weight.value,
|
||||
norm_pre_residual_weight=self.post_layernorm.weight.value
|
||||
if self.config.inter_layernorms else None,
|
||||
eps=self.pre_feedforward_layernorm.eps))
|
||||
|
||||
if use_cache:
|
||||
attention_output, presents = attention_output
|
||||
|
||||
Loading…
Reference in New Issue
Block a user