Advertisement
Qwen VL Max is a visual understanding model with 7500 tokens context length. It excels in delivering optimal performance for a broader spectrum of complex tasks.
from openai import OpenAI
client = OpenAI(
base_url="https://openrouter.ai/api/v1",
api_key="YOUR_API_KEY"
)
response = client.chat.completions.create(
model="qwen/qwen-vl-max",
messages=[
{"role": "user", "content": "Hello!"}
]
)
print(response.choices[0].message.content)