Logo
RavenSaaS Docs

文本流式生成

RavenSaaS 支持市面上大部分的 AI 文本模型流式生成内容。

首先参考 文本生成 章节,配置对应的模型。

然后实现一个流式生成 API,用非阻塞的方式返回文本流。

API 调用参考代码

app/api/gen-text-stream/route.ts
1import { streamText } from "ai";
2import { createOpenAI } from "@ai-sdk/openai";
3import { respErr } from "@/lib/response";
4
5type ProviderOptions = {
6  openai?: {
7    maxTokens?: number;
8    temperature?: number;
9    [key: string]: unknown;
10  };
11  deepseek?: {
12    maxTokens?: number;
13    temperature?: number;
14    [key: string]: unknown;
15  };
16};
17
18const defaultProviderOptions: ProviderOptions = {
19  openai: {
20    maxTokens: 500,
21    temperature: 0.7,
22  },
23  deepseek: {
24    maxTokens: 500,
25    temperature: 0.7,
26  },
27};
28
29export async function POST(req: Request) {
30  try {
31    const { role, prompt, model, provider = "openai", maxTokens = 500 } = await req.json();
32    if (!prompt || !model) {
33      return respErr("invalid params");
34    }
35
36    let textModel;
37    let providerOptions = {};
38
39    switch (provider) {
40      case "openai":
41        if (!process.env.OPENAI_API_KEY) {
42          return respErr("invalid OPENAI_API_KEY");
43        }
44
45        const customOpenAI = createOpenAI({
46          baseURL: process.env.OPENAI_BASE_URL,
47          apiKey: process.env.OPENAI_API_KEY,
48        });
49
50        textModel = customOpenAI.chat(model);
51        providerOptions = {
52          openai: { ...defaultProviderOptions.openai, maxTokens },
53        };
54        break;
55
56      case "deepseek":
57        if (!process.env.DEEPSEEK_API_KEY) {
58          return respErr("invalid DEEPSEEK_API_KEY");
59        }
60
61        const deepseekOpenAI = createOpenAI({
62          baseURL: "https://openrouter.ai/api/v1",
63          apiKey: process.env.DEEPSEEK_API_KEY,
64        });
65
66        const deepseekModel = model === "deepseek-v3 0324" ? "deepseek/deepseek-chat" : model;
67        textModel = deepseekOpenAI.chat(deepseekModel);
68        providerOptions = {
69          openai: { ...defaultProviderOptions.deepseek, maxTokens },
70        };
71        break;
72
73      default:
74        return respErr(`invalid provider: ${provider}`);
75    }
76
77    const messages: Array<{ role: "system" | "user"; content: string }> = [];
78    if (role) {
79      messages.push({ role: "system", content: role });
80    }
81    messages.push({ role: "user", content: prompt });
82
83    const result = await streamText({
84      model: textModel,
85      messages: messages,
86      providerOptions,
87      onFinish: async () => {
88        console.log("Text generation Result:", await result.text);
89      },
90    });
91
92    return result.toTextStreamResponse();
93  } catch (error: unknown) {
94    console.error("Text stream generation error:", error);
95
96    const errorMessage = error instanceof Error ? error.message : String(error);
97
98    if (errorMessage.includes("API key")) {
99      return respErr("invalid API key", 401);
100    } else if (errorMessage.includes("rate limit")) {
101      return respErr("API rate limit exceeded", 429);
102    } else if (errorMessage.includes("timeout")) {
103      return respErr("request timeout", 504);
104    } else {
105      return respErr("text stream generation failed: " + errorMessage, 500);
106    }
107  }
108}

参考

AI SDK Generate Text

AI SDK Generate Text 文档

查看文档

AI SDK Provider

AI SDK Provider 文档

查看文档