Skip to content

Commit

Permalink
Switch openai to openai-edge (#66)
Browse files Browse the repository at this point in the history
[Axios doesn't appear to support streaming in the browser](axios/axios#479), and since we want AI.JSX to be able to run in browser, that's a no-go for us.

[Loom showing streaming in the browser](https://www.loom.com/share/5bf174e2217b433fbf7901ece310774f)

We still don't stream the UI demos (e.g. recipe builder) because filling in UI pieces bit-by-bit could be worse than buffering.
  • Loading branch information
NickHeiner committed Jun 18, 2023
1 parent e9f2038 commit 3a0b5e9
Show file tree
Hide file tree
Showing 13 changed files with 104 additions and 45 deletions.
4 changes: 3 additions & 1 deletion .github/workflows/tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -23,5 +23,7 @@ jobs:
# node-version: ${{ matrix.node-version }}
# - run: yarn set version 4
- run: ~/.volta/bin/volta list
- run: ~/.volta/bin/yarn install --immutable
# I added this to suppress this warning:
# YN0018: typescript@patch:typescript@npm%3A5.1.3#optional!builtin<compat/typescript>::version=5.1.3&hash=5da071: The remote archive doesn't match the expected checksum
- run: YARN_CHECKSUM_BEHAVIOR=ignore ~/.volta/bin/yarn install --immutable
- run: ~/.volta/bin/yarn test
3 changes: 2 additions & 1 deletion packages/ai-jsx/.eslintrc.cjs
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,10 @@ module.exports = {
},

rules: {
// Disable eslint rules to let their TS equivalents take over.
'no-unused-vars': 'off',
'@typescript-eslint/no-unused-vars': ['warn', { ignoreRestSiblings: true, argsIgnorePattern: '^_' }],

'no-undef': 'off',
'no-magic-numbers': 'off',
'@typescript-eslint/no-magic-numbers': 'off',

Expand Down
1 change: 1 addition & 0 deletions packages/ai-jsx/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -125,6 +125,7 @@
"react": "^16.8.0 || ^17.0.0 || ^18.0.0"
},
"dependencies": {
"@nick.heiner/openai-edge": "1.0.1-7",
"axios": "^1.4.0",
"cli-highlight": "^2.1.11",
"cli-spinners": "^2.9.0",
Expand Down
70 changes: 40 additions & 30 deletions packages/ai-jsx/src/lib/openai.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -11,17 +11,18 @@ import {
FunctionResponse,
} from '../core/completion.js';
import { ImageGenPropsWithChildren } from '../core/image-gen.js';
// openai-edge hasn't updated its types to support the new function types yet,
// so we'll import the types from openai until it does.
import { ChatCompletionFunctions, ChatCompletionResponseMessage, ChatCompletionRequestMessage } from 'openai';
import {
ChatCompletionFunctions,
ChatCompletionRequestMessage,
ChatCompletionResponseMessage,
Configuration,
CreateChatCompletionResponse,
CreateCompletionResponse,
OpenAIApi,
CreateImageRequestSizeEnum,
CreateImageRequestResponseFormatEnum,
} from 'openai';
ResponseTypes,
} from '@nick.heiner/openai-edge';
import * as LLMx from '../index.js';
import { PropsOfComponent, Node } from '../index.js';
import GPT3Tokenizer from 'gpt3-tokenizer';
Expand All @@ -44,7 +45,9 @@ type ChatOrCompletionModelOrBoth =
| { chatModel: ValidChatModel; completionModel?: ValidCompletionModel }
| { chatModel?: ValidChatModel; completionModel: ValidCompletionModel };

const openAiClientContext = LLMx.createContext<OpenAIApi>(
const decoder = new TextDecoder();

export const openAiClientContext = LLMx.createContext<OpenAIApi>(
new OpenAIApi(
new Configuration({
apiKey: process.env.OPENAI_API_KEY,
Expand Down Expand Up @@ -90,15 +93,15 @@ export function OpenAI({
* - https://github.com/openai/openai-cookbook/blob/970d8261fbf6206718fe205e88e37f4745f9cf76/examples/How_to_stream_completions.ipynb
* @param iterable A byte stream from an OpenAI SSE response.
*/
async function* openAiEventsToJson<T>(iterable: AsyncIterable<Buffer>): AsyncGenerator<T> {
async function* openAiEventsToJson<T>(iterable: AsyncIterable<String>): AsyncGenerator<T> {
const SSE_PREFIX = 'data: ';
const SSE_TERMINATOR = '\n\n';
const SSE_FINAL_EVENT = '[DONE]';

let bufferedContent = '';

for await (const chunk of iterable) {
const textToParse = bufferedContent + chunk.toString('utf8');
const textToParse = bufferedContent + chunk;
const eventsWithExtra = textToParse.split(SSE_TERMINATOR);

// Any content not terminated by a "\n\n" will be buffered for the next chunk.
Expand Down Expand Up @@ -136,12 +139,11 @@ function logitBiasOfTokens(tokens: Record<string, number>) {
}

type OpenAIMethod = 'createCompletion' | 'createChatCompletion' | 'createImage';
type AxiosResponse<M> = M extends OpenAIMethod ? Awaited<ReturnType<InstanceType<typeof OpenAIApi>[M]>> : never;

export class OpenAIError<M extends OpenAIMethod> extends HttpError {
readonly errorResponse: Record<string, any> | null;

constructor(response: AxiosResponse<M>, method: M, responseText: string) {
constructor(response: Response, method: M, responseText: string) {
let errorResponse = null as Record<string, any> | null;
let responseSuffix = '';
try {
Expand All @@ -159,20 +161,28 @@ export class OpenAIError<M extends OpenAIMethod> extends HttpError {
`OpenAI ${method} request failed with status code ${response.status}${responseSuffix}\n\nFor more information, see https://platform.openai.com/docs/guides/error-codes/api-errors`,
response.status,
responseText,
response.headers
Object.fromEntries(response.headers.entries())
);
this.errorResponse = errorResponse;
}
}

async function checkOpenAIResponse<M extends OpenAIMethod>(response: AxiosResponse<M>, logger: Logger, method: M) {
if (response.status < 200 || response.status >= 300) {
const responseData = [] as string[];
for await (const body of response.data as unknown as AsyncIterable<Buffer>) {
responseData.push(body.toString('utf8'));
async function* asyncIteratorOfFetchStream(reader: ReturnType<NonNullable<Response['body']>['getReader']>) {
while (true) {
const { done, value } =
// I don't know why the types fail here, but the code works.
// @ts-expect-error
await reader.read();
if (done) {
return;
}
yield decoder.decode(value);
}
}

throw new OpenAIError(response, method, responseData.join(''));
async function checkOpenAIResponse<M extends OpenAIMethod>(response: Response, logger: Logger, method: M) {
if (response.status < 200 || response.status >= 300 || !response.body) {
throw new OpenAIError(response, method, await response.text());
} else {
logger.debug({ statusCode: response.status, headers: response.headers }, `${method} succeeded`);
}
Expand All @@ -196,18 +206,16 @@ export async function* OpenAICompletionModel(
};
logger.debug({ completionRequest }, 'Calling createCompletion');

const completionResponse = await openai.createCompletion(completionRequest, {
responseType: 'stream',
validateStatus: () => true,
});
const completionResponse = await openai.createCompletion(completionRequest);

await checkOpenAIResponse(completionResponse, logger, 'createCompletion');

let resultSoFar = '';

for await (const event of openAiEventsToJson<CreateCompletionResponse>(
completionResponse.data as unknown as AsyncIterable<Buffer>
)) {
// checkOpenAIResponse will throw if completionResponse.body is null, so we know it's not null here.
const responseIterator = asyncIteratorOfFetchStream(completionResponse.body!.getReader());

for await (const event of openAiEventsToJson<CreateCompletionResponse>(responseIterator)) {
logger.trace({ event }, 'Got createCompletion event');
resultSoFar += event.choices[0].text;
yield resultSoFar;
Expand Down Expand Up @@ -312,10 +320,11 @@ export async function* OpenAIChatModel(
};

logger.debug({ chatCompletionRequest }, 'Calling createChatCompletion');
const chatResponse = await openai.createChatCompletion(chatCompletionRequest, {
responseType: 'stream',
validateStatus: () => true,
});
const chatResponse = await openai.createChatCompletion(
// We can remove this once openai-edge updates to reflect the new chat function types.
// @ts-expect-error
chatCompletionRequest
);

await checkOpenAIResponse(chatResponse, logger, 'createChatCompletion');

Expand All @@ -329,7 +338,7 @@ export async function* OpenAIChatModel(
const currentMessage = { content: undefined, function_call: undefined } as Partial<ChatCompletionResponseMessage>;
let finishReason: string | undefined = undefined;
for await (const deltaMessage of openAiEventsToJson<ChatCompletionDelta>(
chatResponse.data as unknown as AsyncIterable<Buffer>
asyncIteratorOfFetchStream(chatResponse.body!.getReader())
)) {
logger.trace({ deltaMessage }, 'Got delta message');
finishReason = finishReason ?? deltaMessage.choices[0].finish_reason;
Expand Down Expand Up @@ -408,11 +417,12 @@ export async function DalleImageGen(
const response = await openai.createImage(imageRequest);

if (response.status < 200 || response.status >= 300) {
throw new OpenAIError(response, 'createImage', JSON.stringify(response.data));
throw new OpenAIError(response, 'createImage', await response.text());
} else {
logger.debug({ statusCode: response.status, headers: response.headers }, 'createImage succeeded');
}

// return all image URLs as a newline-separated string
return _.map(response.data.data, 'url').join('\n');
const responseJson = (await response.json()) as ResponseTypes['createImage'];
return _.map(responseJson.data, 'url').join('\n');
}
4 changes: 4 additions & 0 deletions packages/ai-jsx/tsconfig-base.json
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,10 @@
"jsxFactory": "LLMx.createElement",
"jsxFragmentFactory": "LLMx.Fragment",
"module": "esnext",
// This is a potential footgun. We want our code to be isomorphic (run in Node and in the browser).
// By enabling the "DOM" lib, we are allowing ourselves to use DOM APIs in our code,
// which could break in Node if we're not careful.
"lib": ["DOM", "DOM.Iterable"],
"moduleResolution": "nodenext",
"esModuleInterop": true,
"resolveJsonModule": true,
Expand Down
2 changes: 1 addition & 1 deletion packages/create-react-app-demo/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
"@testing-library/jest-dom": "^5.16.5",
"@testing-library/react": "^13.4.0",
"@testing-library/user-event": "^13.5.0",
"ai-jsx": "^0.2.0-4",
"ai-jsx": "0.2.0-4",
"babel-jest": "^27.4.2",
"babel-loader": "^8.2.3",
"babel-plugin-named-asset-import": "^0.3.8",
Expand Down
5 changes: 5 additions & 0 deletions packages/create-react-app-demo/src/App.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ import './App.css';
import React from 'react';
import { createBrowserRouter, RouterProvider } from 'react-router-dom';
import RootLayout from './layout.tsx';
import BasicCompletion from './basic-completion.tsx';
import { ChooseYourOwnAdventure } from './choose-your-adventure/index.tsx';
import RecipeWrapper from './recipe/page.tsx';
import { BasicChat } from './basic-chat/index.tsx';
Expand All @@ -16,6 +17,10 @@ const router = createBrowserRouter([
path: '',
element: <ChooseYourOwnAdventure />,
},
{
path: '/basic-completion',
element: <BasicCompletion />,
},
{
path: '/basic-chat',
element: <BasicChat />,
Expand Down
1 change: 1 addition & 0 deletions packages/create-react-app-demo/src/NavBar.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ export default function NavBar() {
'Basic Chat': '/basic-chat',
'Docs Chat': '/docs-chat',
'JIT UI: React': '/recipe',
'Basic Completion': '/basic-completion',
};

return (
Expand Down
2 changes: 1 addition & 1 deletion packages/create-react-app-demo/src/ResultContainer.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ export default function ResultContainer({
<h3 className="text-base font-semibold leading-6 text-gray-900">{title}</h3>
<p>{description}</p>
</div>
<div className="bg-white px-4 py-5 sm:px-6">{children}</div>
<div className="whitespace-pre-wrap bg-white px-4 py-5 sm:px-6">{children}</div>
</div>
</div>
</div>
Expand Down
31 changes: 31 additions & 0 deletions packages/create-react-app-demo/src/basic-completion.tsx
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
/** @jsx AI.createElement */
/** @jsxFrag AI.Fragment */
import * as AI from 'ai-jsx/react';
import ResultContainer from './ResultContainer.tsx';
import InputPrompt from './InputPrompt.tsx';
import { ChatCompletion, UserMessage } from 'ai-jsx/core/completion';
import { useState } from 'react';

export default function BasicCompletion() {
const [query, setQuery] = useState('wild weasels');

return (
<>
<InputPrompt label="Give me a topic" value={query} setValue={setQuery} />
<ResultContainer title={`AI comes up with a recipe for ${query}`}>
<AI.jsx>
<ChatCompletion temperature={1}>
<UserMessage>Write me a poem about {query}</UserMessage>
</ChatCompletion>
</AI.jsx>
</ResultContainer>
<ResultContainer title={`AI lists ten facts about ${query}`}>
<AI.jsx>
<ChatCompletion temperature={1}>
<UserMessage>Give me ten facts about {query}</UserMessage>
</ChatCompletion>
</AI.jsx>
</ResultContainer>
</>
);
}
2 changes: 2 additions & 0 deletions packages/docs/.gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,8 @@ data
.docusaurus
.cache-loader

docs/api/

# Misc
.DS_Store
.env.local
Expand Down
1 change: 1 addition & 0 deletions packages/examples/src/simple-completion.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -12,3 +12,4 @@ function App() {
}

showInspector(<App />);
// console.log(await LLMx.createRenderContext().render(<App />));
23 changes: 12 additions & 11 deletions yarn.lock
Original file line number Diff line number Diff line change
Expand Up @@ -3804,6 +3804,13 @@ __metadata:
languageName: node
linkType: hard

"@nick.heiner/openai-edge@npm:1.0.1-7":
version: 1.0.1-7
resolution: "@nick.heiner/openai-edge@npm:1.0.1-7"
checksum: 3062303f4cf8cd7da7d79adb4d3981e35ec4f832644f7f8ee611d2ade3e0a0687d4e0e2e9dcc621ee914c849ffdfc88e5dbc595e33b78735a4fbfc3c1171200c
languageName: node
linkType: hard

"@nick.heiner/wandb-fork@npm:0.5.2-5":
version: 0.5.2-5
resolution: "@nick.heiner/wandb-fork@npm:0.5.2-5"
Expand Down Expand Up @@ -7472,10 +7479,11 @@ __metadata:
languageName: unknown
linkType: soft

"ai-jsx@npm:^0.2.0-1, ai-jsx@npm:^0.2.0-4, ai-jsx@workspace:packages/ai-jsx":
"ai-jsx@npm:0.2.0-4, ai-jsx@npm:^0.2.0-1, ai-jsx@npm:^0.2.0-4, ai-jsx@workspace:packages/ai-jsx":
version: 0.0.0-use.local
resolution: "ai-jsx@workspace:packages/ai-jsx"
dependencies:
"@nick.heiner/openai-edge": "npm:1.0.1-7"
"@tsconfig/node16": "npm:^1.0.4"
"@tsconfig/node18": "npm:^2.0.1"
"@types/eslint": "npm:^8"
Expand Down Expand Up @@ -8688,14 +8696,7 @@ __metadata:
languageName: node
linkType: hard

"caniuse-lite@npm:^1.0.0, caniuse-lite@npm:^1.0.30001406, caniuse-lite@npm:^1.0.30001464":
version: 1.0.30001502
resolution: "caniuse-lite@npm:1.0.30001502"
checksum: 3dc7ce2d32dc43a90f1f76e4966017f573e374211d8962fd062b5bf994ada8c155ad9fe3b7b33f69dff46446acc71e207b47b7474fa5694f802f4301aba3f07e
languageName: node
linkType: hard

"caniuse-lite@npm:^1.0.30001502":
"caniuse-lite@npm:^1.0.0, caniuse-lite@npm:^1.0.30001406, caniuse-lite@npm:^1.0.30001464, caniuse-lite@npm:^1.0.30001502":
version: 1.0.30001503
resolution: "caniuse-lite@npm:1.0.30001503"
checksum: 9db53cb56a20a5c34685199e1df1c265d0a5f977d481f0c61c347e1758c1cc685002c22c0a0bf472e5449def46145402f5365ba98c29eef9cc118b884beab5cb
Expand Down Expand Up @@ -9480,7 +9481,7 @@ __metadata:
"@types/react-dom": "npm:^18.2.5"
"@typescript-eslint/eslint-plugin": "npm:^5.59.11"
"@typescript-eslint/parser": "npm:^5.59.11"
ai-jsx: "npm:^0.2.0-4"
ai-jsx: "npm:0.2.0-4"
babel-jest: "npm:^27.4.2"
babel-loader: "npm:^8.2.3"
babel-plugin-named-asset-import: "npm:^0.3.8"
Expand Down Expand Up @@ -22346,7 +22347,7 @@ __metadata:
bin:
tsc: bin/tsc
tsserver: bin/tsserver
checksum: ef6d5eaa31049b41bf1ddf091806aed923e65ea3de951bb27e05bc65911c3bf373b3c2bcc9204d2d2dbdca7d2e06c6a645824580638a8c2c18e2e5155cfc33b7
checksum: e4412cb44926481c10ef68507740f830237594258d11432b11de90f86d4ecce0a28f46edcc96dda7f70095a2044a967af0eaa33dabdfe056bba125d329ff269d
languageName: node
linkType: hard

Expand Down

2 comments on commit 3a0b5e9

@vercel
Copy link

@vercel vercel bot commented on 3a0b5e9 Jun 18, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Successfully deployed to the following URLs:

ai-jsx-create-react-app-demo – ./packages/create-react-app-demo

ai-jsx-create-react-app-demo-fixie-ai.vercel.app
ai-jsx-create-react-app-demo.vercel.app
ai-jsx-create-react-app-demo-git-main-fixie-ai.vercel.app

@vercel
Copy link

@vercel vercel bot commented on 3a0b5e9 Jun 18, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Successfully deployed to the following URLs:

ai-jsx-docs – ./packages/docs

ai-jsx-docs-git-main-fixie-ai.vercel.app
docs.ai-jsx.com
ai-jsx-docs-fixie-ai.vercel.app
ai-jsx-docs.vercel.app

Please sign in to comment.