init commit
This commit is contained in:
118
nanovllm/engine/block_manager.py
Normal file
118
nanovllm/engine/block_manager.py
Normal file
@@ -0,0 +1,118 @@
|
||||
from collections import deque
|
||||
import xxhash
|
||||
import numpy as np
|
||||
|
||||
from nanovllm.engine.sequence import Sequence
|
||||
|
||||
|
||||
def compute_hash(token_ids: list[int], prefix: int = -1):
|
||||
h = xxhash.xxh64()
|
||||
if prefix != -1:
|
||||
h.update(prefix.to_bytes(8))
|
||||
h.update(np.array(token_ids).tobytes())
|
||||
return h.intdigest()
|
||||
|
||||
|
||||
class Block:
|
||||
|
||||
def __init__(self, block_id):
|
||||
self.block_id = block_id
|
||||
self.ref_count = 0
|
||||
self.hash = -1
|
||||
self.token_ids = []
|
||||
|
||||
def update(self, hash: int, token_ids: list[int]):
|
||||
assert hash != -1
|
||||
assert len(token_ids) == 256
|
||||
self.hash = hash
|
||||
self.token_ids = token_ids
|
||||
|
||||
def reset(self):
|
||||
self.ref_count = 1
|
||||
self.hash = -1
|
||||
self.token_ids = []
|
||||
|
||||
def __repr__(self):
|
||||
return f"{(self.block_id, self.ref_count, self.hash)}"
|
||||
|
||||
|
||||
class BlockManager:
|
||||
|
||||
def __init__(self, num_blocks: int, block_size: int = 256):
|
||||
assert block_size == 256
|
||||
self.block_size = block_size
|
||||
self.blocks: list[Block] = [Block(i) for i in range(num_blocks)]
|
||||
self.hash_to_block_id: dict[int, int] = dict()
|
||||
self.free_block_ids: deque[int] = deque(range(num_blocks))
|
||||
self.used_block_ids: set[int] = set()
|
||||
|
||||
def _allocate_block(self, block_id: int):
|
||||
block = self.blocks[block_id]
|
||||
assert block.ref_count == 0
|
||||
block.reset()
|
||||
self.free_block_ids.remove(block_id)
|
||||
self.used_block_ids.add(block_id)
|
||||
return self.blocks[block_id]
|
||||
|
||||
def _deallocate_block(self, block_id: int):
|
||||
assert self.blocks[block_id].ref_count == 0
|
||||
self.used_block_ids.remove(block_id)
|
||||
self.free_block_ids.append(block_id)
|
||||
|
||||
def can_allocate(self, seq: Sequence):
|
||||
return seq.num_blocks <= len(self.free_block_ids)
|
||||
|
||||
def allocate(self, seq: Sequence):
|
||||
assert not seq.block_table
|
||||
h = -1
|
||||
cache_miss = False
|
||||
for i in range(seq.num_blocks):
|
||||
token_ids = seq.block(i, self.block_size)
|
||||
h = compute_hash(token_ids, h) if len(token_ids) == self.block_size else -1
|
||||
block_id = self.hash_to_block_id.get(h, -1)
|
||||
if block_id == -1 or self.blocks[block_id].token_ids != token_ids:
|
||||
cache_miss = True
|
||||
if cache_miss:
|
||||
block_id = self.free_block_ids[0]
|
||||
block = self._allocate_block(block_id)
|
||||
else:
|
||||
seq.num_cached_tokens += self.block_size
|
||||
if block_id in self.used_block_ids:
|
||||
block = self.blocks[block_id]
|
||||
block.ref_count += 1
|
||||
else:
|
||||
block = self._allocate_block(block_id)
|
||||
if h != -1:
|
||||
block.update(h, token_ids)
|
||||
self.hash_to_block_id[h] = block_id
|
||||
seq.block_table.append(block_id)
|
||||
|
||||
def deallocate(self, seq: Sequence):
|
||||
for block_id in seq.block_table:
|
||||
block = self.blocks[block_id]
|
||||
block.ref_count -= 1
|
||||
if block.ref_count == 0:
|
||||
self._deallocate_block(block_id)
|
||||
seq.num_cached_tokens = 0
|
||||
seq.block_table.clear()
|
||||
|
||||
def can_append(self):
|
||||
return len(self.free_block_ids) >= 1
|
||||
|
||||
def may_append(self, seq: Sequence):
|
||||
block_table = seq.block_table
|
||||
last_block = self.blocks[block_table[-1]]
|
||||
if len(seq) % self.block_size == 1:
|
||||
assert last_block.hash != -1
|
||||
block_id = self.free_block_ids[0]
|
||||
self._allocate_block(block_id)
|
||||
block_table.append(block_id)
|
||||
elif len(seq) % self.block_size == 0:
|
||||
assert last_block.hash == -1
|
||||
token_ids = seq.last_block(self.block_size)
|
||||
prefix = self.blocks[block_table[-2]].hash if len(block_table) > 1 else -1
|
||||
h = compute_hash(token_ids, prefix)
|
||||
last_block.update(h, token_ids)
|
||||
self.hash_to_block_id[h] = last_block.block_id
|
||||
else:
|
||||
assert last_block.hash == -1
|
||||
66
nanovllm/engine/llm_engine.py
Normal file
66
nanovllm/engine/llm_engine.py
Normal file
@@ -0,0 +1,66 @@
|
||||
from collections import defaultdict
|
||||
from tqdm.auto import tqdm
|
||||
from transformers import AutoConfig, AutoTokenizer
|
||||
|
||||
from nanovllm.config import Config
|
||||
from nanovllm.sampling_params import SamplingParams
|
||||
from nanovllm.engine.sequence import Sequence
|
||||
from nanovllm.engine.scheduler import Scheduler
|
||||
from nanovllm.engine.model_runner import ModelRunner
|
||||
|
||||
|
||||
class LLMEngine:
|
||||
|
||||
def __init__(self, model, **kwargs):
|
||||
config = Config(model)
|
||||
for k, v in kwargs.items():
|
||||
if hasattr(config, k):
|
||||
setattr(config, k, v)
|
||||
config.hf_config = AutoConfig.from_pretrained(config.model)
|
||||
config.max_model_len = min(config.max_model_len, config.hf_config.max_position_embeddings)
|
||||
self.tokenizer = AutoTokenizer.from_pretrained(config.model, use_fast=True)
|
||||
config.eos = self.tokenizer.eos_token_id
|
||||
self.model_runner = ModelRunner(config)
|
||||
self.scheduler = Scheduler(config)
|
||||
|
||||
def add_request(self, prompt: str | list[int], sampling_params: SamplingParams):
|
||||
if isinstance(prompt, str):
|
||||
prompt = self.tokenizer.encode(prompt)
|
||||
seq = Sequence(prompt, sampling_params)
|
||||
self.scheduler.add(seq)
|
||||
|
||||
def step(self):
|
||||
seqs, is_prefill = self.scheduler.schedule()
|
||||
token_ids = self.model_runner.run(seqs, is_prefill)
|
||||
finished = self.scheduler.postprocess(seqs, token_ids)
|
||||
return [(seq.seq_id, token_id, finish) for seq, token_id, finish in zip(seqs, token_ids, finished)]
|
||||
|
||||
def is_finished(self):
|
||||
return self.scheduler.is_finished()
|
||||
|
||||
def generate(
|
||||
self,
|
||||
prompts: list[str] | list[list[int]],
|
||||
sampling_params: SamplingParams | list[SamplingParams],
|
||||
use_tqdm: bool = True,
|
||||
) -> list[str]:
|
||||
if use_tqdm:
|
||||
pbar = tqdm(total=len(prompts),
|
||||
desc="Processed prompts",
|
||||
)
|
||||
if not isinstance(SamplingParams, list):
|
||||
sampling_params = [sampling_params] * len(prompts)
|
||||
for prompt, sp in zip(prompts, sampling_params):
|
||||
self.add_request(prompt, sp)
|
||||
outputs = defaultdict(list)
|
||||
while not self.is_finished():
|
||||
output = self.step()
|
||||
for seq_id, token_id, finish in output:
|
||||
outputs[seq_id].append(token_id)
|
||||
if use_tqdm and finish:
|
||||
pbar.update(1)
|
||||
outputs = [outputs[seq_id] for seq_id in sorted(outputs)]
|
||||
outputs = [self.tokenizer.decode(token_ids) for token_ids in outputs]
|
||||
if use_tqdm:
|
||||
pbar.close()
|
||||
return outputs
|
||||
198
nanovllm/engine/model_runner.py
Normal file
198
nanovllm/engine/model_runner.py
Normal file
@@ -0,0 +1,198 @@
|
||||
import torch
|
||||
|
||||
from nanovllm.config import Config
|
||||
from nanovllm.engine.sequence import Sequence
|
||||
from nanovllm.utils.context import set_context, get_context, reset_context
|
||||
from nanovllm.utils.memory import get_gpu_memory
|
||||
from nanovllm.models.qwen3 import Qwen3ForCausalLM
|
||||
from nanovllm.layers.sampler import Sampler
|
||||
|
||||
|
||||
class ModelRunner:
|
||||
|
||||
def __init__(self, config: Config):
|
||||
self.config = config
|
||||
hf_config = config.hf_config
|
||||
self.block_size = config.kvcache_block_size
|
||||
self.enforce_eager = config.enforce_eager
|
||||
|
||||
default_dtype = torch.get_default_dtype()
|
||||
torch.set_default_dtype(hf_config.torch_dtype)
|
||||
torch.set_default_device("cuda")
|
||||
self.model = Qwen3ForCausalLM(hf_config)
|
||||
self.model.load_weights(config.model)
|
||||
self.sampler = Sampler()
|
||||
self.allocate_kv_cache(config.gpu_memory_utilization)
|
||||
if not self.enforce_eager:
|
||||
self.capture_model()
|
||||
torch.set_default_device("cpu")
|
||||
torch.set_default_dtype(default_dtype)
|
||||
|
||||
def allocate_kv_cache(self, gpu_memory_utilization):
|
||||
config = self.config
|
||||
hf_config = config.hf_config
|
||||
total, used, _ = get_gpu_memory()
|
||||
free = total * gpu_memory_utilization - used
|
||||
block_bytes = 2 * hf_config.num_hidden_layers * self.block_size * hf_config.num_key_value_heads * hf_config.head_dim * hf_config.torch_dtype.itemsize
|
||||
config.num_kvcache_blocks = int(free * 1e6) // block_bytes
|
||||
self.kv_cache = torch.zeros(2, hf_config.num_hidden_layers, config.num_kvcache_blocks, self.block_size, hf_config.num_key_value_heads, hf_config.head_dim)
|
||||
layer_id = 0
|
||||
for module in self.model.modules():
|
||||
if hasattr(module, "k_cache") and hasattr(module, "v_cache"):
|
||||
module.k_cache = self.kv_cache[0, layer_id]
|
||||
module.v_cache = self.kv_cache[1, layer_id]
|
||||
layer_id += 1
|
||||
|
||||
def preare_block_tables(self, seqs: list[Sequence]):
|
||||
max_len = max(len(seq.block_table) for seq in seqs)
|
||||
block_tables = [
|
||||
seq.block_table + [-1] * (max_len - len(seq.block_table))
|
||||
for seq in seqs
|
||||
]
|
||||
block_tables = torch.tensor(block_tables, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True)
|
||||
return block_tables
|
||||
|
||||
def prepare_prefill(self, seqs: list[Sequence]):
|
||||
input_ids = []
|
||||
positions = []
|
||||
cu_seqlens_q = [0]
|
||||
cu_seqlens_k = [0]
|
||||
max_seqlen_q = 0
|
||||
max_seqlen_k = 0
|
||||
slot_mapping = []
|
||||
context_lens = None
|
||||
block_tables = None
|
||||
for seq in seqs:
|
||||
seqlen = len(seq)
|
||||
input_ids.extend(seq[seq.num_cached_tokens:])
|
||||
positions.extend(list(range(seq.num_cached_tokens, len(seq))))
|
||||
seqlen_q = seqlen - seq.num_cached_tokens
|
||||
seqlen_k = seqlen
|
||||
cu_seqlens_q.append(cu_seqlens_q[-1] + seqlen_q)
|
||||
cu_seqlens_k.append(cu_seqlens_k[-1] + seqlen_k)
|
||||
max_seqlen_q = max(seqlen_q, max_seqlen_q)
|
||||
max_seqlen_k = max(seqlen_k, max_seqlen_k)
|
||||
for i in range(seq.num_cached_blocks, seq.num_blocks):
|
||||
start = seq.block_table[i] * self.block_size
|
||||
if i != seq.num_blocks - 1:
|
||||
end = start + self.block_size
|
||||
else:
|
||||
end = start + len(seq.last_block())
|
||||
slot_mapping.extend(list(range(start, end)))
|
||||
assert len(input_ids) == len(slot_mapping)
|
||||
assert len(input_ids) == cu_seqlens_q[-1]
|
||||
if cu_seqlens_k[-1] > cu_seqlens_q[-1]: # prefix cache
|
||||
context_lens = torch.tensor([len(seq) for seq in seqs], dtype=torch.int32, pin_memory=True).cuda(non_blocking=True)
|
||||
block_tables = self.preare_block_tables(seqs)
|
||||
input_ids = torch.tensor(input_ids, dtype=torch.int64, pin_memory=True).cuda(non_blocking=True)
|
||||
positions = torch.tensor(positions, dtype=torch.int64, pin_memory=True).cuda(non_blocking=True)
|
||||
cu_seqlens_q = torch.tensor(cu_seqlens_q, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True)
|
||||
cu_seqlens_k = torch.tensor(cu_seqlens_k, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True)
|
||||
slot_mapping = torch.tensor(slot_mapping, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True)
|
||||
set_context(True, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, slot_mapping, context_lens, block_tables)
|
||||
return input_ids, positions
|
||||
|
||||
def prepare_decode(self, seqs: list[Sequence]):
|
||||
input_ids = []
|
||||
positions = []
|
||||
slot_mapping = []
|
||||
context_lens = []
|
||||
for seq in seqs:
|
||||
input_ids.append(seq.last_token)
|
||||
positions.append(len(seq))
|
||||
context_lens.append(len(seq))
|
||||
slot_mapping.append(seq.block_table[-1] * self.block_size + len(seq.last_block()))
|
||||
input_ids = torch.tensor(input_ids, dtype=torch.int64, pin_memory=True).cuda(non_blocking=True)
|
||||
positions = torch.tensor(positions, dtype=torch.int64, pin_memory=True).cuda(non_blocking=True)
|
||||
slot_mapping = torch.tensor(slot_mapping, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True)
|
||||
context_lens = torch.tensor(context_lens, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True)
|
||||
block_tables = self.preare_block_tables(seqs)
|
||||
set_context(False, slot_mapping=slot_mapping, context_lens=context_lens, block_tables=block_tables)
|
||||
return input_ids, positions
|
||||
|
||||
def prepare_sample(self, seqs: list[Sequence]):
|
||||
temperatures = []
|
||||
for seq in seqs:
|
||||
temperatures.append(seq.temperature)
|
||||
temperatures = torch.tensor(temperatures, dtype=torch.float32, pin_memory=True).cuda(non_blocking=True)
|
||||
return temperatures
|
||||
|
||||
@torch.inference_mode()
|
||||
def run_model(self, input_ids: torch.Tensor, positions: torch.Tensor, is_prefill):
|
||||
if is_prefill or self.enforce_eager or input_ids.size(0) > 256:
|
||||
return self.model.compute_logits(self.model(input_ids, positions))
|
||||
else:
|
||||
bs = input_ids.size(0)
|
||||
context = get_context()
|
||||
self.reset_graph_vars()
|
||||
graph = self.graphs[next(x for x in self.graph_bs if x >= bs)]
|
||||
graph_vars = self.graph_vars
|
||||
graph_vars["input_ids"][:bs] = input_ids
|
||||
graph_vars["positions"][:bs] = positions
|
||||
graph_vars["slot_mapping"][:bs] = context.slot_mapping
|
||||
graph_vars["context_lens"][:bs] = context.context_lens
|
||||
graph_vars["block_tables"][:bs, :context.block_tables.size(1)] = context.block_tables
|
||||
graph.replay()
|
||||
return self.model.compute_logits(graph_vars["outputs"][:bs])
|
||||
|
||||
def reset_graph_vars(self):
|
||||
graph_vars = self.graph_vars
|
||||
graph_vars["input_ids"].zero_()
|
||||
graph_vars["positions"].zero_()
|
||||
graph_vars["slot_mapping"].zero_()
|
||||
graph_vars["context_lens"].zero_()
|
||||
graph_vars["block_tables"].zero_()
|
||||
|
||||
def run(self, seqs: list[Sequence], is_prefill: bool) -> list[int]:
|
||||
input_ids, positions = self.prepare_prefill(seqs) if is_prefill else self.prepare_decode(seqs)
|
||||
temperatures = self.prepare_sample(seqs)
|
||||
logits = self.run_model(input_ids, positions, is_prefill)
|
||||
token_ids = self.sampler(logits, temperatures).tolist()
|
||||
reset_context()
|
||||
return token_ids
|
||||
|
||||
@torch.inference_mode()
|
||||
def capture_model(self):
|
||||
get_rng_state = torch.cuda.get_rng_state
|
||||
set_rng_state = torch.cuda.set_rng_state
|
||||
rng_state = torch.cuda.get_rng_state()
|
||||
torch.cuda.get_rng_state = lambda: rng_state
|
||||
torch.cuda.set_rng_state = lambda _: None
|
||||
|
||||
config = self.config
|
||||
hf_config = config.hf_config
|
||||
max_bs = min(self.config.max_num_seqs, 256)
|
||||
max_num_blocks = (config.max_model_len + self.block_size - 1) // self.block_size
|
||||
input_ids = torch.zeros(max_bs, dtype=torch.int64)
|
||||
positions = torch.zeros(max_bs, dtype=torch.int64)
|
||||
slot_mapping = torch.zeros(max_bs, dtype=torch.int32)
|
||||
context_lens = torch.zeros(max_bs, dtype=torch.int32)
|
||||
block_tables = torch.zeros(max_bs, max_num_blocks, dtype=torch.int32)
|
||||
outputs = torch.zeros(max_bs, hf_config.hidden_size)
|
||||
self.graph_bs = [1, 2, 4, 8, 16] + list(range(32, max_bs + 1, 32))
|
||||
self.graphs = {}
|
||||
self.graph_pool = None
|
||||
|
||||
for bs in reversed(self.graph_bs):
|
||||
graph = torch.cuda.CUDAGraph()
|
||||
set_context(False, slot_mapping=slot_mapping[:bs], context_lens=context_lens[:bs], block_tables=block_tables[:bs])
|
||||
outputs[:bs] = self.model(input_ids[:bs], positions[:bs]) # warmup
|
||||
with torch.cuda.graph(graph, self.graph_pool):
|
||||
outputs[:bs] = self.model(input_ids[:bs], positions[:bs]) # capture
|
||||
if self.graph_pool is None:
|
||||
self.graph_pool = graph.pool()
|
||||
self.graphs[bs] = graph
|
||||
torch.cuda.synchronize()
|
||||
reset_context()
|
||||
|
||||
self.graph_vars = dict(
|
||||
input_ids=input_ids,
|
||||
positions=positions,
|
||||
slot_mapping=slot_mapping,
|
||||
context_lens=context_lens,
|
||||
block_tables=block_tables,
|
||||
outputs=outputs,
|
||||
)
|
||||
|
||||
torch.cuda.get_rng_state = get_rng_state
|
||||
torch.cuda.set_rng_state = set_rng_state
|
||||
84
nanovllm/engine/scheduler.py
Normal file
84
nanovllm/engine/scheduler.py
Normal file
@@ -0,0 +1,84 @@
|
||||
from collections import deque
|
||||
|
||||
from nanovllm.config import Config
|
||||
from nanovllm.engine.sequence import Sequence, SequenceStatus
|
||||
from nanovllm.engine.block_manager import BlockManager
|
||||
|
||||
|
||||
class Scheduler:
|
||||
|
||||
def __init__(self, config: Config):
|
||||
self.max_num_seqs = config.max_num_seqs
|
||||
self.max_num_batched_tokens = config.max_num_batched_tokens
|
||||
self.eos = config.eos
|
||||
self.block_manager = BlockManager(config.num_kvcache_blocks, config.kvcache_block_size)
|
||||
self.waiting: deque[Sequence] = deque()
|
||||
self.running: deque[Sequence] = deque()
|
||||
self.num_finished = 0
|
||||
self.num_tokens = 0
|
||||
|
||||
def is_finished(self):
|
||||
return not self.waiting and not self.running
|
||||
|
||||
def add(self, seq: Sequence):
|
||||
self.waiting.append(seq)
|
||||
|
||||
def schedule(self) -> tuple[list[Sequence], SequenceStatus]:
|
||||
# prefill
|
||||
scheduled_seqs = []
|
||||
num_seqs = 0
|
||||
num_batched_tokens = 0
|
||||
while self.waiting and num_seqs < self.max_num_seqs:
|
||||
seq = self.waiting[0]
|
||||
if num_batched_tokens + len(seq) > self.max_num_batched_tokens or not self.block_manager.can_allocate(seq):
|
||||
break
|
||||
num_seqs += 1
|
||||
self.block_manager.allocate(seq)
|
||||
num_batched_tokens += len(seq) - seq.num_cached_tokens
|
||||
seq.status = SequenceStatus.RUNNING
|
||||
self.waiting.popleft()
|
||||
self.running.append(seq)
|
||||
scheduled_seqs.append(seq)
|
||||
if scheduled_seqs:
|
||||
return scheduled_seqs, True
|
||||
|
||||
# decode
|
||||
# self.running = deque(sorted(self.running))
|
||||
while self.running and num_seqs < self.max_num_seqs:
|
||||
seq = self.running.popleft()
|
||||
while not self.block_manager.can_append():
|
||||
if self.running:
|
||||
self.preempt(self.running.pop())
|
||||
else:
|
||||
self.preempt(seq)
|
||||
break
|
||||
else:
|
||||
num_seqs += 1
|
||||
self.block_manager.may_append(seq)
|
||||
scheduled_seqs.append(seq)
|
||||
running = deque(scheduled_seqs)
|
||||
running.extend(self.running)
|
||||
self.running = running
|
||||
if scheduled_seqs:
|
||||
return scheduled_seqs, False
|
||||
|
||||
def preempt(self, seq: Sequence):
|
||||
seq.status = SequenceStatus.WAITING
|
||||
self.block_manager.deallocate(seq)
|
||||
self.waiting.appendleft(seq)
|
||||
return True
|
||||
|
||||
def postprocess(self, seqs: list[Sequence], token_ids: list[int]) -> list[bool]:
|
||||
self.num_tokens += len(token_ids)
|
||||
finished = []
|
||||
for seq, token_id in zip(seqs, token_ids):
|
||||
seq.append_token(token_id)
|
||||
if token_id == self.eos or seq.num_completion_tokens == seq.max_tokens:
|
||||
seq.status = SequenceStatus.FINISHED
|
||||
self.block_manager.deallocate(seq)
|
||||
self.running.remove(seq)
|
||||
self.num_finished += 1
|
||||
finished.append(True)
|
||||
else:
|
||||
finished.append(False)
|
||||
return finished
|
||||
73
nanovllm/engine/sequence.py
Normal file
73
nanovllm/engine/sequence.py
Normal file
@@ -0,0 +1,73 @@
|
||||
from copy import copy
|
||||
from enum import Enum, auto
|
||||
from itertools import count
|
||||
|
||||
from nanovllm.sampling_params import SamplingParams
|
||||
|
||||
|
||||
class SequenceStatus(Enum):
|
||||
WAITING = auto()
|
||||
RUNNING = auto()
|
||||
FINISHED = auto()
|
||||
|
||||
|
||||
class Sequence:
|
||||
counter = count()
|
||||
|
||||
def __init__(self, token_ids: list[int], sampling_params: SamplingParams):
|
||||
self.seq_id = next(Sequence.counter)
|
||||
self.status = SequenceStatus.WAITING
|
||||
self.token_ids = copy(token_ids)
|
||||
self.num_prompt_tokens = len(token_ids)
|
||||
self._num_cached_tokens = 0
|
||||
self.block_table = []
|
||||
self.temperature = sampling_params.temperature
|
||||
self.max_tokens = sampling_params.max_tokens
|
||||
self.ignore_eos = sampling_params.ignore_eos
|
||||
|
||||
def __len__(self):
|
||||
return len(self.token_ids)
|
||||
|
||||
def __lt__(self, other):
|
||||
return self.seq_id < other.seq_id
|
||||
|
||||
def __getitem__(self, key):
|
||||
return self.token_ids[key]
|
||||
|
||||
@property
|
||||
def num_completion_tokens(self):
|
||||
return len(self.token_ids) - self.num_prompt_tokens
|
||||
|
||||
@property
|
||||
def num_cached_tokens(self):
|
||||
return self._num_cached_tokens
|
||||
|
||||
@num_cached_tokens.setter
|
||||
def num_cached_tokens(self, num_cached_tokens):
|
||||
assert num_cached_tokens % 256 == 0
|
||||
self._num_cached_tokens = num_cached_tokens
|
||||
|
||||
@property
|
||||
def num_cached_blocks(self):
|
||||
return self.num_cached_tokens // 256
|
||||
|
||||
@property
|
||||
def num_blocks(self):
|
||||
return (len(self.token_ids) + 255) // 256
|
||||
|
||||
@property
|
||||
def last_token(self):
|
||||
return self.token_ids[-1]
|
||||
|
||||
def block(self, i, block_size=256):
|
||||
return self.token_ids[i*block_size: (i+1)*block_size]
|
||||
|
||||
def last_block(self, block_size=256):
|
||||
n = self.num_blocks
|
||||
t = len(self) + block_size - self.num_blocks * block_size
|
||||
x = self.token_ids[(n-1)*block_size:]
|
||||
assert len(x) == t
|
||||
return x
|
||||
|
||||
def append_token(self, token_id: int):
|
||||
self.token_ids.append(token_id)
|
||||
Reference in New Issue
Block a user