use spawn

This commit is contained in:
cheunglei
2025-06-17 22:48:44 +08:00
parent 7e42fa6f63
commit b5ace32982
3 changed files with 52 additions and 41 deletions

View File

@@ -5,6 +5,7 @@ from nanovllm import LLM, SamplingParams
# from vllm import LLM, SamplingParams
def main():
seed(0)
num_seqs = 256
max_input_len = 1024
@@ -25,3 +26,7 @@ t = (time.time() - t)
total_tokens = sum(sp.max_tokens for sp in sampling_params)
throughput = total_tokens / t
print(f"Total: {total_tokens}tok, Time: {t:.2f}s, Throughput: {throughput:.2f}tok/s")
if __name__ == "__main__":
main()

View File

@@ -3,9 +3,10 @@ from nanovllm import LLM, SamplingParams
from transformers import AutoTokenizer
def main():
path = os.path.expanduser("~/huggingface/Qwen3-0.6B/")
tokenizer = AutoTokenizer.from_pretrained(path)
llm = LLM(path, enforce_eager=True)
llm = LLM(path, enforce_eager=True, tensor_parallel_size=1)
sampling_params = SamplingParams(temperature=0.6, max_tokens=256)
prompts = [
@@ -27,3 +28,7 @@ for prompt, output in zip(prompts, outputs):
print("\n")
print(f"Prompt: {prompt!r}")
print(f"Completion: {output['text']!r}")
if __name__ == "__main__":
main()

View File

@@ -20,9 +20,10 @@ class LLMEngine:
config = Config(model, **config_kwargs)
self.ps = []
self.events = []
ctx = mp.get_context("spawn")
for i in range(1, config.tensor_parallel_size):
event = mp.Event()
process = mp.Process(target=ModelRunner, args=(config, i, event))
event = ctx.Event()
process = ctx.Process(target=ModelRunner, args=(config, i, event))
process.start()
self.ps.append(process)
self.events.append(event)