save all
This commit is contained in:
23
test/compatible/deepseek.py
Normal file
23
test/compatible/deepseek.py
Normal file
@@ -0,0 +1,23 @@
|
||||
import os
|
||||
from openai import OpenAI
|
||||
|
||||
client = OpenAI(
|
||||
# 若没有配置环境变量,请用阿里云百炼API Key将下行替换为:api_key="sk-xxx",
|
||||
api_key=os.getenv("DASHSCOPE_API_KEY"), # 如何获取API Key:https://help.aliyun.com/zh/model-studio/developer-reference/get-api-key
|
||||
base_url="https://dashscope.aliyuncs.com/compatible-mode/v1"
|
||||
)
|
||||
|
||||
completion = client.chat.completions.create(
|
||||
model="deepseek-r1", # 此处以 deepseek-r1 为例,可按需更换模型名称。
|
||||
messages=[
|
||||
{'role': 'user', 'content': '9.9和9.11谁大'}
|
||||
]
|
||||
)
|
||||
|
||||
# 通过reasoning_content字段打印思考过程
|
||||
print("思考过程:")
|
||||
print(completion.choices[0].message.reasoning_content)
|
||||
|
||||
# 通过content字段打印最终答案
|
||||
print("最终答案:")
|
||||
print(completion.choices[0].message.content)
|
||||
20
test/dashscope/deepseek.py
Normal file
20
test/dashscope/deepseek.py
Normal file
@@ -0,0 +1,20 @@
|
||||
import os
|
||||
import dashscope
|
||||
|
||||
messages = [
|
||||
{'role': 'user', 'content': '你是谁?'}
|
||||
]
|
||||
|
||||
response = dashscope.Generation.call(
|
||||
# 若没有配置环境变量,请用阿里云百炼API Key将下行替换为:api_key="sk-xxx",
|
||||
api_key=os.getenv('DASHSCOPE_API_KEY'),
|
||||
model="deepseek-r1", # 此处以 deepseek-r1 为例,可按需更换模型名称。
|
||||
messages=messages,
|
||||
# result_format参数不可以设置为"text"。
|
||||
result_format='message'
|
||||
)
|
||||
|
||||
print("=" * 20 + "思考过程" + "=" * 20)
|
||||
print(response.output.choices[0].message.reasoning_content)
|
||||
print("=" * 20 + "最终答案" + "=" * 20)
|
||||
print(response.output.choices[0].message.content)
|
||||
15
test/dashscope/qwen.py
Normal file
15
test/dashscope/qwen.py
Normal file
@@ -0,0 +1,15 @@
|
||||
import os
|
||||
import dashscope
|
||||
|
||||
messages = [
|
||||
{'role': 'system', 'content': 'You are a helpful assistant.'},
|
||||
{'role': 'user', 'content': '你是谁?'}
|
||||
]
|
||||
response = dashscope.Generation.call(
|
||||
# 若没有配置环境变量,请用百炼API Key将下行替换为:api_key="sk-xxx",
|
||||
api_key=os.getenv('DASHSCOPE_API_KEY'),
|
||||
model="qwen-plus", # 此处以qwen-plus为例,可按需更换模型名称。模型列表:https://help.aliyun.com/zh/model-studio/getting-started/models
|
||||
messages=messages,
|
||||
result_format='message'
|
||||
)
|
||||
print(response)
|
||||
14
test/langchain/compatible/qwen.py
Normal file
14
test/langchain/compatible/qwen.py
Normal file
@@ -0,0 +1,14 @@
|
||||
from langchain_openai import ChatOpenAI
|
||||
import os
|
||||
|
||||
chatLLM = ChatOpenAI(
|
||||
api_key=os.getenv("DASHSCOPE_API_KEY"),
|
||||
base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
|
||||
model="qwen-plus", # 此处以qwen-plus为例,您可按需更换模型名称。模型列表:https://help.aliyun.com/zh/model-studio/getting-started/models
|
||||
# other params...
|
||||
)
|
||||
messages = [
|
||||
{"role": "system", "content": "You are a helpful assistant."},
|
||||
{"role": "user", "content": "你是谁?"}]
|
||||
response = chatLLM.invoke(messages)
|
||||
print(response.model_dump_json())
|
||||
11
test/langchain/dashscope/qwen.py
Normal file
11
test/langchain/dashscope/qwen.py
Normal file
@@ -0,0 +1,11 @@
|
||||
from langchain_community.chat_models.tongyi import ChatTongyi
|
||||
from langchain_core.messages import HumanMessage
|
||||
|
||||
chatLLM = ChatTongyi(
|
||||
model="qwen-max", # 此处以qwen-max为例,您可按需更换模型名称。模型列表:https://help.aliyun.com/zh/model-studio/getting-started/models
|
||||
streaming=True,
|
||||
# other params...
|
||||
)
|
||||
res = chatLLM.stream([HumanMessage(content="hi")], streaming=True)
|
||||
for r in res:
|
||||
print("chat resp:", r.content)
|
||||
15
test/langchain/deepseek.py
Normal file
15
test/langchain/deepseek.py
Normal file
@@ -0,0 +1,15 @@
|
||||
import getpass
|
||||
import os
|
||||
|
||||
if not os.environ.get("DEEPSEEK_API_KEY"):
|
||||
os.environ["DEEPSEEK_API_KEY"] = os.environ['DASHSCOPE_API_KEY']
|
||||
|
||||
from langchain.chat_models import init_chat_model
|
||||
|
||||
model = init_chat_model("deepseek-chat", model_provider="deepseek")
|
||||
|
||||
messages = [
|
||||
{"role": "system", "content": "You are a helpful assistant."},
|
||||
{"role": "user", "content": "你是谁?"}]
|
||||
response = model.invoke(messages)
|
||||
print(response.model_dump_json())
|
||||
25
test/llamaindex/dashscope_parse.py
Normal file
25
test/llamaindex/dashscope_parse.py
Normal file
@@ -0,0 +1,25 @@
|
||||
import os
|
||||
|
||||
from llama_index.readers.dashscope.base import DashScopeParse
|
||||
from llama_index.readers.dashscope.utils import ResultType
|
||||
|
||||
# 设置业务空间 ID 将决定文档解析结果在”创建知识库“步骤中上传到哪个业务空间
|
||||
os.environ['DASHSCOPE_WORKSPACE_ID'] = "<Your Workspace id, Default workspace is empty.>"
|
||||
|
||||
# 第一种方式:使用文档解析器解析一个或多个文件
|
||||
file = [
|
||||
# 需要解析的文件,支持pdf,doc,docx
|
||||
]
|
||||
# 解析文件
|
||||
parse = DashScopeParse(result_type=ResultType.DASHSCOPE_DOCMIND)
|
||||
documents = parse.load_data(file_path=file)
|
||||
|
||||
# 第二种方式:使用文档解析器解析一个文件夹内指定类型的文件
|
||||
from llama_index.core import SimpleDirectoryReader
|
||||
parse = DashScopeParse(result_type=ResultType.DASHSCOPE_DOCMIND)
|
||||
# 定义不同文档类型的解析器
|
||||
file_extractor = {".pdf": parse, '.doc': parse, '.docx': parse}
|
||||
# 读取文件夹,提取和解析文件信息
|
||||
documents = SimpleDirectoryReader(
|
||||
"your_folder", file_extractor=file_extractor
|
||||
).load_data(num_workers=1)
|
||||
7
test/llamaindex/data/test1.py
Normal file
7
test/llamaindex/data/test1.py
Normal file
@@ -0,0 +1,7 @@
|
||||
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
|
||||
|
||||
documents = SimpleDirectoryReader("data").load_data()
|
||||
index = VectorStoreIndex.from_documents(documents)
|
||||
query_engine = index.as_query_engine()
|
||||
response = query_engine.query("Some question about the data should go here")
|
||||
print(response)
|
||||
2
test/llamaindex/env
Normal file
2
test/llamaindex/env
Normal file
@@ -0,0 +1,2 @@
|
||||
# MacOS/Linux
|
||||
export OPENAI_API_KEY=sk-e2a05bbcfac84e53b73f98acef15a009
|
||||
7
test/llamaindex/test1.py
Normal file
7
test/llamaindex/test1.py
Normal file
@@ -0,0 +1,7 @@
|
||||
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
|
||||
|
||||
documents = SimpleDirectoryReader("data").load_data()
|
||||
index = VectorStoreIndex.from_documents(documents)
|
||||
query_engine = index.as_query_engine()
|
||||
response = query_engine.query("Some question about the data should go here")
|
||||
print(response)
|
||||
34
test/test_pydantic/test.py
Normal file
34
test/test_pydantic/test.py
Normal file
@@ -0,0 +1,34 @@
|
||||
# 正确示例:显式类型注解
|
||||
import os
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
class User(BaseModel):
|
||||
id: int # 显式指定int类型
|
||||
name: str # 显式指定str类型
|
||||
is_active: bool = True # 带默认值的类型注解
|
||||
|
||||
# 错误示例:缺少类型注解
|
||||
# class BadUser(BaseModel):
|
||||
# id = 1 # 缺少类型注解,Pydantic 2.9+将报错
|
||||
# name = "John" # 缺少类型注解,Pydantic 2.9+将报错
|
||||
|
||||
os.environ['app_port'] = '8888'
|
||||
|
||||
# 环境变量自动映射示例
|
||||
from pydantic_settings import BaseSettings, SettingsConfigDict
|
||||
|
||||
|
||||
class AppConfig(BaseSettings):
|
||||
host: str = "localhost"
|
||||
port: int = 8000
|
||||
|
||||
model_config = SettingsConfigDict(
|
||||
env_prefix="APP_", # 环境变量前缀
|
||||
case_sensitive=False # 不区分大小写
|
||||
)
|
||||
|
||||
|
||||
# 当环境变量存在APP_PORT=8080时
|
||||
config = AppConfig()
|
||||
print(config.port) # 输出: 8080 (而非默认的8000)
|
||||
Reference in New Issue
Block a user