Skip to content

Commit 4ee34f6

Browse files
committed
Make style 2
1 parent afbebe6 commit 4ee34f6

File tree

1 file changed

+3
-3
lines changed

1 file changed

+3
-3
lines changed

backends/gaudi/server/text_generation_server/models/vlm_causal_lm.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -232,7 +232,7 @@ def __init__(
232232
self.prefilling = prefilling
233233

234234
@property
235-
def token_idx(self):
235+
def token_idx(self): # noqa: F811
236236
if self.prefilling:
237237
# no right padding for prefill
238238
token_idx_scalar = self.attention_mask.shape[-1] - 1
@@ -1534,8 +1534,8 @@ def warmup(
15341534

15351535
except Exception:
15361536
raise RuntimeError(
1537-
f"Not enough memory to handle following prefill and decode warmup."
1538-
f"You need to decrease `--max-batch-prefill-tokens`"
1537+
"Not enough memory to handle following prefill and decode warmup."
1538+
"You need to decrease `--max-batch-prefill-tokens`"
15391539
)
15401540

15411541
mem_stats = get_hpu_memory_stats(self.device)

0 commit comments

Comments
 (0)