This commit is contained in:
GeeeekExplorer
2025-06-13 13:07:33 +08:00
parent 135d1b38a2
commit 59aa3ff57c
4 changed files with 4 additions and 4 deletions

View File

@@ -4,7 +4,7 @@ A lightweight vLLM implementation built from scratch.
## Key Features
* 🚀 **Fase offline inference** - Comparable inference speeds to vLLM
* 🚀 **Fast offline inference** - Comparable inference speeds to vLLM
* 📖 **Readable codebase** - Clean implementation under 1,200 lines of Python code
***Optimization Suite** - Prefix caching, Torch compilation, CUDA graph, etc

View File

@@ -24,4 +24,4 @@ llm.generate(prompt_token_ids, sampling_params)
t = (time.time() - t)
total_tokens = sum(sp.max_tokens for sp in sampling_params)
throughput = total_tokens / t
print(f"Total: {total_tokens}tok, Time: {t:.2f}s, Throughput: {throughput: .2f}tok/s")
print(f"Total: {total_tokens}tok, Time: {t:.2f}s, Throughput: {throughput:.2f}tok/s")

View File

@@ -86,7 +86,7 @@ class BlockManager:
seq.block_table.append(block_id)
def deallocate(self, seq: Sequence):
for block_id in seq.block_table:
for block_id in reversed(seq.block_table):
block = self.blocks[block_id]
block.ref_count -= 1
if block.ref_count == 0:

View File

@@ -10,7 +10,7 @@ license = "MIT"
license-files = ["LICENSE"]
readme = "README.md"
description = "a mimic VLLM implementation from scratch"
requires-python = ">=3.9,<3.13"
requires-python = ">=3.10,<3.13"
dependencies = [
"torch>=2.4.0",
"triton>=3.0.0",