This commit is contained in:
2025-08-27 22:21:14 +08:00
parent 75ae2c5fd5
commit f6c7c65d6c
6 changed files with 176 additions and 1 deletions

View File

@@ -1,2 +1,10 @@
openai
dashscope
dashscope
langchain
langchain-deepseek
langchain-community
llama-index
llama-index-core
llama-index-llms-dashscope
llama-index-indices-managed-dashscope

View File

@@ -0,0 +1,15 @@
from langchain_openai import ChatOpenAI
from langchain_deepseek import ChatDeepSeek
import os
chatLLM = ChatDeepSeek(
api_key=os.getenv("DASHSCOPE_API_KEY"),
base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
model="deepseek-chat", # 此处以qwen-plus为例您可按需更换模型名称。模型列表https://help.aliyun.com/zh/model-studio/getting-started/models
# other params...
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "你是谁?"}]
response = chatLLM.invoke(messages)
print(response.model_dump_json())

15
test/langchain/qwen.py Normal file
View File

@@ -0,0 +1,15 @@
import getpass
import os
if not os.environ.get("DEEPSEEK_API_KEY"):
os.environ["DEEPSEEK_API_KEY"] = os.environ['DASHSCOPE_API_KEY']
from langchain.chat_models import init_chat_model
model = init_chat_model("qwen-max", model_provider="deepseek")
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "你是谁?"}]
response = model.invoke(messages)
print(response.model_dump_json())

View File

@@ -0,0 +1,52 @@
{
"cells": [
{
"metadata": {},
"cell_type": "raw",
"source": [
"from llama_index.llms.dashscope import DashScope\n",
"from llama_index.llms.openai import OpenAI\n",
"\n",
"llm = DashScope(model_name=\"qwen-max\") # 设置检索引擎生成回答时调用的大模型。"
],
"id": "a5d3b9e1d4e6588f"
},
{
"cell_type": "code",
"id": "initial_id",
"metadata": {
"collapsed": true,
"jupyter": {
"is_executing": true
}
},
"source": [
"response = llm.complete(\"William Shakespeare is \")\n",
"print(response)"
],
"outputs": [],
"execution_count": null
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 2
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2",
"version": "2.7.6"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -0,0 +1,41 @@
from llama_index.core.base.llms.types import ChatMessage
from llama_index.llms.dashscope import DashScope
import asyncio
llm = DashScope(model_name="qwen-max") # 设置检索引擎生成回答时调用的大模型。
def test1():
response = llm.complete("William Shakespeare is ")
print(response)
async def test2():
response = await llm.acomplete("William Shakespeare is ")
print(response)
def test3():
response = llm.stream_complete("William Shakespeare is ")
for chunk in response:
print(chunk)
def test4():
handle = llm.stream_complete("William Shakespeare is ")
for token in handle:
print(token.delta, end="", flush=True)
def test5():
messages = [
ChatMessage(role="system", content="You are a helpful assistant."),
ChatMessage(role="user", content="Tell me a joke."),
]
chat_response = llm.chat(messages)
print(chat_response)
if __name__ == '__main__':
# test1()
# asyncio.run(test2())
# test3()
# test4()
test5()

View File

@@ -0,0 +1,44 @@
{
"cells": [
{
"metadata": {},
"cell_type": "raw",
"source": "print(\"hello\")",
"id": "a5d3b9e1d4e6588f"
},
{
"cell_type": "code",
"id": "initial_id",
"metadata": {
"collapsed": true,
"jupyter": {
"is_executing": true
}
},
"source": "",
"outputs": [],
"execution_count": null
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 2
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2",
"version": "2.7.6"
}
},
"nbformat": 4,
"nbformat_minor": 5
}