-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathgenerate.ts
107 lines (93 loc) · 3.08 KB
/
generate.ts
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import { APIResource } from '../resource';
import * as Core from '../core';
export class Generate extends APIResource {
/**
* Generate a response using Contextual's Grounded Language Model (GLM), an LLM
* engineered specifically to prioritize faithfulness to in-context retrievals over
* parametric knowledge to reduce hallucinations in Retrieval-Augmented Generation
* and agentic use cases.
*
* The total request cannot exceed 32,000 tokens. See more details and code
* examples in our
* [our blog post](https://contextual.ai/blog/introducing-grounded-language-model/).
* Email [[email protected]](mailto:[email protected]) with any
* feedback or questions.
*/
create(body: GenerateCreateParams, options?: Core.RequestOptions): Core.APIPromise<GenerateCreateResponse> {
return this._client.post('/generate', { body, ...options });
}
}
/**
* /generate result object.
*/
export interface GenerateCreateResponse {
/**
* The model's response to the last user message.
*/
response: string;
}
export interface GenerateCreateParams {
/**
* The knowledge sources the model can use when generating a response.
*/
knowledge: Array<string>;
/**
* List of messages in the conversation so far. The last message must be from the
* user.
*/
messages: Array<GenerateCreateParams.Message>;
/**
* The version of the Contextual's GLM to use. Currently, we just have "v1".
*/
model: string;
/**
* Flag to indicate whether the model should avoid providing additional commentary
* in responses. Commentary is conversational in nature and does not contain
* verifiable claims; therefore, commentary is not strictly grounded in available
* context. However, commentary may provide useful context which improves the
* helpfulness of responses.
*/
avoid_commentary?: boolean;
/**
* The maximum number of tokens that the model can generate in the response.
*/
max_new_tokens?: number;
/**
* Instructions that the model follows when generating responses. Note that we do
* not guarantee that the model follows these instructions exactly.
*/
system_prompt?: string;
/**
* The sampling temperature, which affects the randomness in the response. Note
* that higher temperature values can reduce groundedness.
*/
temperature?: number;
/**
* A parameter for nucleus sampling, an alternative to temperature which also
* affects the randomness of the response. Note that higher top_p values can reduce
* groundedness.
*/
top_p?: number;
}
export namespace GenerateCreateParams {
/**
* Message object for a message received in the /generate request
*/
export interface Message {
/**
* Content of the message
*/
content: string;
/**
* Role of the sender
*/
role: 'user' | 'assistant';
}
}
export declare namespace Generate {
export {
type GenerateCreateResponse as GenerateCreateResponse,
type GenerateCreateParams as GenerateCreateParams,
};
}