use spawn
This commit is contained in:
5
bench.py
5
bench.py
@@ -5,6 +5,7 @@ from nanovllm import LLM, SamplingParams
|
||||
# from vllm import LLM, SamplingParams
|
||||
|
||||
|
||||
def main():
|
||||
seed(0)
|
||||
num_seqs = 256
|
||||
max_input_len = 1024
|
||||
@@ -25,3 +26,7 @@ t = (time.time() - t)
|
||||
total_tokens = sum(sp.max_tokens for sp in sampling_params)
|
||||
throughput = total_tokens / t
|
||||
print(f"Total: {total_tokens}tok, Time: {t:.2f}s, Throughput: {throughput:.2f}tok/s")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
@@ -3,9 +3,10 @@ from nanovllm import LLM, SamplingParams
|
||||
from transformers import AutoTokenizer
|
||||
|
||||
|
||||
def main():
|
||||
path = os.path.expanduser("~/huggingface/Qwen3-0.6B/")
|
||||
tokenizer = AutoTokenizer.from_pretrained(path)
|
||||
llm = LLM(path, enforce_eager=True)
|
||||
llm = LLM(path, enforce_eager=True, tensor_parallel_size=1)
|
||||
|
||||
sampling_params = SamplingParams(temperature=0.6, max_tokens=256)
|
||||
prompts = [
|
||||
@@ -27,3 +28,7 @@ for prompt, output in zip(prompts, outputs):
|
||||
print("\n")
|
||||
print(f"Prompt: {prompt!r}")
|
||||
print(f"Completion: {output['text']!r}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
@@ -20,9 +20,10 @@ class LLMEngine:
|
||||
config = Config(model, **config_kwargs)
|
||||
self.ps = []
|
||||
self.events = []
|
||||
ctx = mp.get_context("spawn")
|
||||
for i in range(1, config.tensor_parallel_size):
|
||||
event = mp.Event()
|
||||
process = mp.Process(target=ModelRunner, args=(config, i, event))
|
||||
event = ctx.Event()
|
||||
process = ctx.Process(target=ModelRunner, args=(config, i, event))
|
||||
process.start()
|
||||
self.ps.append(process)
|
||||
self.events.append(event)
|
||||
|
||||
Reference in New Issue
Block a user