GitHub - QwenLM/vllm-gptq: A high-throughput and memory-efficient inference and serving engine for LLMs
pip install fschat
python -m fastchat.serve.controller
python -m fastchat.serve.vllm_worker --model-path $model_path --tensor-parallel-size 1 --trust-remote-code
python -m fastchat.serve.openai_api_server --host localhost --port 8000
pip install --upgrade openai=0.28
import openai
# to get proper authentication, make sure to use a valid key that's listed in
# the --api-keys flag. if no flag value is provided, the `api_key` will be ignored.
openai.api_key = "EMPTY"
openai.api_base = "http://localhost:8000/v1"
model = "qwen"
call_args = {
'temperature': 1.0,
'top_p': 1.0,
'top_k': -1,
'max_tokens': 2048, # output-len
'presence_penalty': 1.0,
'frequency_penalty': 0.0,
}
# create a chat completion
completion = openai.ChatCompletion.create(
model=model,
messages=[{"role": "user", "content": "Hello! What is your name?"}],
**call_args
)
# print the completion
print(completion.choices[0].message.content)
python -m fastchat.serve.openai_api_server --host IP --port 8000