|
4 | 4 | */
|
5 | 5 |
|
6 | 6 | /*
|
7 |
| - * This example demostrates the usage of Instance Principal. In order to run this example, this code |
8 |
| - * must be in an Oracle Cloud instance. The Instance Principal will utiltize internal resources to |
9 |
| - * create an authentication provider. Refer to: |
10 |
| - * https://docs.cloud.oracle.com/en-us/iaas/Content/Identity/Tasks/callingservicesfrominstances.htm |
11 |
| - * for more details. |
| 7 | + * This example demostrates the parsing of SSE response. In order to run this example, this code |
| 8 | + * must be in an Oracle Cloud instance. Any API which return Content-Type as event-stream can be handled this way. |
12 | 9 | */
|
13 | 10 |
|
14 | 11 | import * as genai from "oci-generativeaiinference";
|
15 | 12 | import common = require("oci-common");
|
16 | 13 |
|
17 | 14 | (async () => {
|
18 | 15 | const region = "us-chicago-1";
|
19 |
| - const provider = new common.ConfigFileAuthenticationDetailsProvider(); |
| 16 | + const provider = new common.SessionAuthDetailProvider(); |
20 | 17 | provider.setRegion(region);
|
21 | 18 |
|
22 | 19 | const client = new genai.GenerativeAiInferenceClient({
|
@@ -60,19 +57,28 @@ import common = require("oci-common");
|
60 | 57 | const response = await client.generateText(req_body);
|
61 | 58 | console.log(
|
62 | 59 | "Response: " +
|
63 |
| - (response.generateTextResult.inferenceResponse as genai.models.CohereLlmInferenceResponse) |
64 |
| - .generatedTexts[0].text |
| 60 | + ((<genai.responses.GenerateTextResponse>response).generateTextResult |
| 61 | + .inferenceResponse as genai.models.CohereLlmInferenceResponse).generatedTexts[0].text |
65 | 62 | );
|
66 | 63 |
|
67 | 64 | // Attempt to generate text as SSE stream (throws error)
|
68 | 65 | try {
|
69 | 66 | inference_request.isStream = true;
|
70 | 67 | const responseStream = await client.generateText(req_body);
|
71 |
| - console.log( |
72 |
| - "Response: " + |
73 |
| - (responseStream.generateTextResult |
74 |
| - .inferenceResponse as genai.models.CohereLlmInferenceResponse).generatedTexts[0].text |
75 |
| - ); |
| 68 | + |
| 69 | + let streamData = ""; |
| 70 | + const lines = String(responseStream).split("\n"); |
| 71 | + |
| 72 | + lines.forEach(line => { |
| 73 | + if (line.trim() === "") { |
| 74 | + } else { |
| 75 | + if (line.startsWith("data:")) { |
| 76 | + const data = JSON.parse(line.substring(6).trim()); |
| 77 | + streamData += data.text; |
| 78 | + } |
| 79 | + } |
| 80 | + }); |
| 81 | + console.log("Stream Response: ", streamData); |
76 | 82 | } catch (e) {
|
77 | 83 | console.log(e);
|
78 | 84 | }
|
|
0 commit comments