From ec3c60d96fa9d756212c98388e533f064e2bea09 Mon Sep 17 00:00:00 2001 From: GeeeekExplorer <2651904866@qq.com> Date: Thu, 12 Jun 2025 09:47:09 +0800 Subject: [PATCH] update bench --- bench.py | 25 ++++++++++++++++--------- example.py | 4 ++-- nanovllm/engine/block_manager.py | 2 +- 3 files changed, 19 insertions(+), 12 deletions(-) diff --git a/bench.py b/bench.py index 0c4825e..4b4b4a3 100644 --- a/bench.py +++ b/bench.py @@ -1,20 +1,27 @@ import os import time -import torch +from random import randint, seed from nanovllm import LLM, SamplingParams +# from vllm import LLM, SamplingParams -batch_size = 256 -seq_len = 1024 -max_tokens = 512 +seed(0) +num_seqs = 256 +max_input_len = 1024 +max_ouput_len = 1024 path = os.path.expanduser("~/huggingface/Qwen3-0.6B/") -llm = LLM(path, enforce_eager=False) +llm = LLM(path, enforce_eager=False, max_model_len=4096) -prompt_token_ids = torch.randint(0, 10240, (batch_size, seq_len)).tolist() -sampling_params = SamplingParams(temperature=0.6, ignore_eos=True, max_tokens=max_tokens) +prompt_token_ids = [[randint(0, 10000) for _ in range(randint(100, max_input_len))] for _ in range(num_seqs)] +sampling_params = [SamplingParams(temperature=0.6, ignore_eos=True, max_tokens=randint(100, max_ouput_len)) for _ in range(num_seqs)] +# uncomment the following line for vllm +# prompt_token_ids = [dict(prompt_token_ids=p) for p in prompt_token_ids] +llm.generate(["Benchmark: "], SamplingParams()) t = time.time() llm.generate(prompt_token_ids, sampling_params) -throughput = batch_size * max_tokens / (time.time() - t) -print(f"Throughput: {throughput: .2f}") +t = (time.time() - t) +total_tokens = sum(sp.max_tokens for sp in sampling_params) +throughput = total_tokens / t +print(f"Total: {total_tokens}, Time: {t:.2f}s, Throughput: {throughput: .2f}") diff --git a/example.py b/example.py index face5eb..7e46349 100644 --- a/example.py +++ b/example.py @@ -25,5 +25,5 @@ outputs = llm.generate(prompts, sampling_params) for prompt, output in zip(prompts, outputs): print("\n") - print(f"Prompt: {prompt}") - print(f"Completion: {output['text']}") + print(f"Prompt: {prompt!r}") + print(f"Completion: {output['text']!r}") diff --git a/nanovllm/engine/block_manager.py b/nanovllm/engine/block_manager.py index fde8b37..fef6645 100644 --- a/nanovllm/engine/block_manager.py +++ b/nanovllm/engine/block_manager.py @@ -8,7 +8,7 @@ from nanovllm.engine.sequence import Sequence def compute_hash(token_ids: list[int], prefix: int = -1): h = xxhash.xxh64() if prefix != -1: - h.update(prefix.to_bytes(8)) + h.update(prefix.to_bytes(8, "little")) h.update(np.array(token_ids).tobytes()) return h.intdigest()