better
This commit is contained in:
@@ -4,7 +4,7 @@ A lightweight vLLM implementation built from scratch.
|
||||
|
||||
## Key Features
|
||||
|
||||
* 🚀 **Fase offline inference** - Comparable inference speeds to vLLM
|
||||
* 🚀 **Fast offline inference** - Comparable inference speeds to vLLM
|
||||
* 📖 **Readable codebase** - Clean implementation under 1,200 lines of Python code
|
||||
* ⚡ **Optimization Suite** - Prefix caching, Torch compilation, CUDA graph, etc
|
||||
|
||||
|
||||
@@ -86,7 +86,7 @@ class BlockManager:
|
||||
seq.block_table.append(block_id)
|
||||
|
||||
def deallocate(self, seq: Sequence):
|
||||
for block_id in seq.block_table:
|
||||
for block_id in reversed(seq.block_table):
|
||||
block = self.blocks[block_id]
|
||||
block.ref_count -= 1
|
||||
if block.ref_count == 0:
|
||||
|
||||
@@ -10,7 +10,7 @@ license = "MIT"
|
||||
license-files = ["LICENSE"]
|
||||
readme = "README.md"
|
||||
description = "a mimic VLLM implementation from scratch"
|
||||
requires-python = ">=3.9,<3.13"
|
||||
requires-python = ">=3.10,<3.13"
|
||||
dependencies = [
|
||||
"torch>=2.4.0",
|
||||
"triton>=3.0.0",
|
||||
|
||||
Reference in New Issue
Block a user