Initialize module and dependencies
This commit is contained in:
4
vendor/github.com/sashabaranov/go-openai/.codecov.yml
generated
vendored
Normal file
4
vendor/github.com/sashabaranov/go-openai/.codecov.yml
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
coverage:
|
||||
ignore:
|
||||
- "examples/**"
|
||||
- "internal/test/**"
|
||||
22
vendor/github.com/sashabaranov/go-openai/.gitignore
generated
vendored
Normal file
22
vendor/github.com/sashabaranov/go-openai/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
# Binaries for programs and plugins
|
||||
*.exe
|
||||
*.exe~
|
||||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
|
||||
# Test binary, built with `go test -c`
|
||||
*.test
|
||||
|
||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
||||
|
||||
# Dependency directories (remove the comment below to include it)
|
||||
# vendor/
|
||||
|
||||
# Auth token for tests
|
||||
.openai-token
|
||||
.idea
|
||||
|
||||
# Generated by tests
|
||||
test.mp3
|
||||
168
vendor/github.com/sashabaranov/go-openai/.golangci.yml
generated
vendored
Normal file
168
vendor/github.com/sashabaranov/go-openai/.golangci.yml
generated
vendored
Normal file
@@ -0,0 +1,168 @@
|
||||
version: "2"
|
||||
linters:
|
||||
default: none
|
||||
enable:
|
||||
- asciicheck
|
||||
- bidichk
|
||||
- bodyclose
|
||||
- contextcheck
|
||||
- cyclop
|
||||
- dupl
|
||||
- durationcheck
|
||||
- errcheck
|
||||
- errname
|
||||
- errorlint
|
||||
- exhaustive
|
||||
- forbidigo
|
||||
- funlen
|
||||
- gochecknoinits
|
||||
- gocognit
|
||||
- goconst
|
||||
- gocritic
|
||||
- gocyclo
|
||||
- godot
|
||||
- gomoddirectives
|
||||
- gomodguard
|
||||
- goprintffuncname
|
||||
- gosec
|
||||
- govet
|
||||
- ineffassign
|
||||
- lll
|
||||
- makezero
|
||||
- mnd
|
||||
- nestif
|
||||
- nilerr
|
||||
- nilnil
|
||||
- nolintlint
|
||||
- nosprintfhostport
|
||||
- predeclared
|
||||
- promlinter
|
||||
- revive
|
||||
- rowserrcheck
|
||||
- sqlclosecheck
|
||||
- staticcheck
|
||||
- testpackage
|
||||
- tparallel
|
||||
- unconvert
|
||||
- unparam
|
||||
- unused
|
||||
- usetesting
|
||||
- wastedassign
|
||||
- whitespace
|
||||
settings:
|
||||
cyclop:
|
||||
max-complexity: 30
|
||||
package-average: 10
|
||||
errcheck:
|
||||
check-type-assertions: true
|
||||
funlen:
|
||||
lines: 100
|
||||
statements: 50
|
||||
gocognit:
|
||||
min-complexity: 20
|
||||
gocritic:
|
||||
settings:
|
||||
captLocal:
|
||||
paramsOnly: false
|
||||
underef:
|
||||
skipRecvDeref: false
|
||||
gomodguard:
|
||||
blocked:
|
||||
modules:
|
||||
- github.com/golang/protobuf:
|
||||
recommendations:
|
||||
- google.golang.org/protobuf
|
||||
reason: see https://developers.google.com/protocol-buffers/docs/reference/go/faq#modules
|
||||
- github.com/satori/go.uuid:
|
||||
recommendations:
|
||||
- github.com/google/uuid
|
||||
reason: satori's package is not maintained
|
||||
- github.com/gofrs/uuid:
|
||||
recommendations:
|
||||
- github.com/google/uuid
|
||||
reason: 'see recommendation from dev-infra team: https://confluence.gtforge.com/x/gQI6Aw'
|
||||
govet:
|
||||
disable:
|
||||
- fieldalignment
|
||||
enable-all: true
|
||||
settings:
|
||||
shadow:
|
||||
strict: true
|
||||
mnd:
|
||||
ignored-functions:
|
||||
- os.Chmod
|
||||
- os.Mkdir
|
||||
- os.MkdirAll
|
||||
- os.OpenFile
|
||||
- os.WriteFile
|
||||
- prometheus.ExponentialBuckets
|
||||
- prometheus.ExponentialBucketsRange
|
||||
- prometheus.LinearBuckets
|
||||
- strconv.FormatFloat
|
||||
- strconv.FormatInt
|
||||
- strconv.FormatUint
|
||||
- strconv.ParseFloat
|
||||
- strconv.ParseInt
|
||||
- strconv.ParseUint
|
||||
nakedret:
|
||||
max-func-lines: 0
|
||||
nolintlint:
|
||||
require-explanation: true
|
||||
require-specific: true
|
||||
allow-no-explanation:
|
||||
- funlen
|
||||
- gocognit
|
||||
- lll
|
||||
rowserrcheck:
|
||||
packages:
|
||||
- github.com/jmoiron/sqlx
|
||||
exclusions:
|
||||
generated: lax
|
||||
presets:
|
||||
- comments
|
||||
- common-false-positives
|
||||
- legacy
|
||||
- std-error-handling
|
||||
rules:
|
||||
- linters:
|
||||
- forbidigo
|
||||
- mnd
|
||||
- revive
|
||||
path : ^examples/.*\.go$
|
||||
- linters:
|
||||
- lll
|
||||
source: ^//\s*go:generate\s
|
||||
- linters:
|
||||
- godot
|
||||
source: (noinspection|TODO)
|
||||
- linters:
|
||||
- gocritic
|
||||
source: //noinspection
|
||||
- linters:
|
||||
- errorlint
|
||||
source: ^\s+if _, ok := err\.\([^.]+\.InternalError\); ok {
|
||||
- linters:
|
||||
- bodyclose
|
||||
- dupl
|
||||
- funlen
|
||||
- goconst
|
||||
- gosec
|
||||
- noctx
|
||||
- wrapcheck
|
||||
- staticcheck
|
||||
path: _test\.go
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
issues:
|
||||
max-same-issues: 50
|
||||
formatters:
|
||||
enable:
|
||||
- goimports
|
||||
exclusions:
|
||||
generated: lax
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
88
vendor/github.com/sashabaranov/go-openai/CONTRIBUTING.md
generated
vendored
Normal file
88
vendor/github.com/sashabaranov/go-openai/CONTRIBUTING.md
generated
vendored
Normal file
@@ -0,0 +1,88 @@
|
||||
# Contributing Guidelines
|
||||
|
||||
## Overview
|
||||
Thank you for your interest in contributing to the "Go OpenAI" project! By following this guideline, we hope to ensure that your contributions are made smoothly and efficiently. The Go OpenAI project is licensed under the [Apache 2.0 License](https://github.com/sashabaranov/go-openai/blob/master/LICENSE), and we welcome contributions through GitHub pull requests.
|
||||
|
||||
## Reporting Bugs
|
||||
If you discover a bug, first check the [GitHub Issues page](https://github.com/sashabaranov/go-openai/issues) to see if the issue has already been reported. If you're reporting a new issue, please use the "Bug report" template and provide detailed information about the problem, including steps to reproduce it.
|
||||
|
||||
## Suggesting Features
|
||||
If you want to suggest a new feature or improvement, first check the [GitHub Issues page](https://github.com/sashabaranov/go-openai/issues) to ensure a similar suggestion hasn't already been made. Use the "Feature request" template to provide a detailed description of your suggestion.
|
||||
|
||||
## Reporting Vulnerabilities
|
||||
If you identify a security concern, please use the "Report a security vulnerability" template on the [GitHub Issues page](https://github.com/sashabaranov/go-openai/issues) to share the details. This report will only be viewable to repository maintainers. You will be credited if the advisory is published.
|
||||
|
||||
## Questions for Users
|
||||
If you have questions, please utilize [StackOverflow](https://stackoverflow.com/) or the [GitHub Discussions page](https://github.com/sashabaranov/go-openai/discussions).
|
||||
|
||||
## Contributing Code
|
||||
There might already be a similar pull requests submitted! Please search for [pull requests](https://github.com/sashabaranov/go-openai/pulls) before creating one.
|
||||
|
||||
### Requirements for Merging a Pull Request
|
||||
|
||||
The requirements to accept a pull request are as follows:
|
||||
|
||||
- Features not provided by the OpenAI API will not be accepted.
|
||||
- The functionality of the feature must match that of the official OpenAI API.
|
||||
- All pull requests should be written in Go according to common conventions, formatted with `goimports`, and free of warnings from tools like `golangci-lint`.
|
||||
- Include tests and ensure all tests pass.
|
||||
- Maintain test coverage without any reduction.
|
||||
- All pull requests require approval from at least one Go OpenAI maintainer.
|
||||
|
||||
**Note:**
|
||||
The merging method for pull requests in this repository is squash merge.
|
||||
|
||||
### Creating a Pull Request
|
||||
- Fork the repository.
|
||||
- Create a new branch and commit your changes.
|
||||
- Push that branch to GitHub.
|
||||
- Start a new Pull Request on GitHub. (Please use the pull request template to provide detailed information.)
|
||||
|
||||
**Note:**
|
||||
If your changes introduce breaking changes, please prefix your pull request title with "[BREAKING_CHANGES]".
|
||||
|
||||
### Code Style
|
||||
In this project, we adhere to the standard coding style of Go. Your code should maintain consistency with the rest of the codebase. To achieve this, please format your code using tools like `goimports` and resolve any syntax or style issues with `golangci-lint`.
|
||||
|
||||
**Run goimports:**
|
||||
```
|
||||
go install golang.org/x/tools/cmd/goimports@latest
|
||||
```
|
||||
|
||||
```
|
||||
goimports -w .
|
||||
```
|
||||
|
||||
**Run golangci-lint:**
|
||||
```
|
||||
go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest
|
||||
```
|
||||
|
||||
```
|
||||
golangci-lint run --out-format=github-actions
|
||||
```
|
||||
|
||||
### Unit Test
|
||||
Please create or update tests relevant to your changes. Ensure all tests run successfully to verify that your modifications do not adversely affect other functionalities.
|
||||
|
||||
**Run test:**
|
||||
```
|
||||
go test -v ./...
|
||||
```
|
||||
|
||||
### Integration Test
|
||||
Integration tests are requested against the production version of the OpenAI API. These tests will verify that the library is properly coded against the actual behavior of the API, and will fail upon any incompatible change in the API.
|
||||
|
||||
**Notes:**
|
||||
These tests send real network traffic to the OpenAI API and may reach rate limits. Temporary network problems may also cause the test to fail.
|
||||
|
||||
**Run integration test:**
|
||||
```
|
||||
OPENAI_TOKEN=XXX go test -v -tags=integration ./api_integration_test.go
|
||||
```
|
||||
|
||||
If the `OPENAI_TOKEN` environment variable is not available, integration tests will be skipped.
|
||||
|
||||
---
|
||||
|
||||
We wholeheartedly welcome your active participation. Let's build an amazing project together!
|
||||
201
vendor/github.com/sashabaranov/go-openai/LICENSE
generated
vendored
Normal file
201
vendor/github.com/sashabaranov/go-openai/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
913
vendor/github.com/sashabaranov/go-openai/README.md
generated
vendored
Normal file
913
vendor/github.com/sashabaranov/go-openai/README.md
generated
vendored
Normal file
@@ -0,0 +1,913 @@
|
||||
# Go OpenAI
|
||||
[](https://pkg.go.dev/github.com/sashabaranov/go-openai)
|
||||
[](https://goreportcard.com/report/github.com/sashabaranov/go-openai)
|
||||
[](https://codecov.io/gh/sashabaranov/go-openai)
|
||||
|
||||
This library provides unofficial Go clients for [OpenAI API](https://platform.openai.com/). We support:
|
||||
|
||||
* ChatGPT 4o, o1
|
||||
* GPT-3, GPT-4
|
||||
* DALL·E 2, DALL·E 3, GPT Image 1
|
||||
* Whisper
|
||||
|
||||
## Installation
|
||||
|
||||
```
|
||||
go get github.com/sashabaranov/go-openai
|
||||
```
|
||||
Currently, go-openai requires Go version 1.18 or greater.
|
||||
|
||||
|
||||
## Usage
|
||||
|
||||
### ChatGPT example usage:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
openai "github.com/sashabaranov/go-openai"
|
||||
)
|
||||
|
||||
func main() {
|
||||
client := openai.NewClient("your token")
|
||||
resp, err := client.CreateChatCompletion(
|
||||
context.Background(),
|
||||
openai.ChatCompletionRequest{
|
||||
Model: openai.GPT3Dot5Turbo,
|
||||
Messages: []openai.ChatCompletionMessage{
|
||||
{
|
||||
Role: openai.ChatMessageRoleUser,
|
||||
Content: "Hello!",
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
fmt.Printf("ChatCompletion error: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Println(resp.Choices[0].Message.Content)
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
### Getting an OpenAI API Key:
|
||||
|
||||
1. Visit the OpenAI website at [https://platform.openai.com/account/api-keys](https://platform.openai.com/account/api-keys).
|
||||
2. If you don't have an account, click on "Sign Up" to create one. If you do, click "Log In".
|
||||
3. Once logged in, navigate to your API key management page.
|
||||
4. Click on "Create new secret key".
|
||||
5. Enter a name for your new key, then click "Create secret key".
|
||||
6. Your new API key will be displayed. Use this key to interact with the OpenAI API.
|
||||
|
||||
**Note:** Your API key is sensitive information. Do not share it with anyone.
|
||||
|
||||
### Other examples:
|
||||
|
||||
<details>
|
||||
<summary>ChatGPT streaming completion</summary>
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
openai "github.com/sashabaranov/go-openai"
|
||||
)
|
||||
|
||||
func main() {
|
||||
c := openai.NewClient("your token")
|
||||
ctx := context.Background()
|
||||
|
||||
req := openai.ChatCompletionRequest{
|
||||
Model: openai.GPT3Dot5Turbo,
|
||||
MaxTokens: 20,
|
||||
Messages: []openai.ChatCompletionMessage{
|
||||
{
|
||||
Role: openai.ChatMessageRoleUser,
|
||||
Content: "Lorem ipsum",
|
||||
},
|
||||
},
|
||||
Stream: true,
|
||||
}
|
||||
stream, err := c.CreateChatCompletionStream(ctx, req)
|
||||
if err != nil {
|
||||
fmt.Printf("ChatCompletionStream error: %v\n", err)
|
||||
return
|
||||
}
|
||||
defer stream.Close()
|
||||
|
||||
fmt.Printf("Stream response: ")
|
||||
for {
|
||||
response, err := stream.Recv()
|
||||
if errors.Is(err, io.EOF) {
|
||||
fmt.Println("\nStream finished")
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
fmt.Printf("\nStream error: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Printf(response.Choices[0].Delta.Content)
|
||||
}
|
||||
}
|
||||
```
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>GPT-3 completion</summary>
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
openai "github.com/sashabaranov/go-openai"
|
||||
)
|
||||
|
||||
func main() {
|
||||
c := openai.NewClient("your token")
|
||||
ctx := context.Background()
|
||||
|
||||
req := openai.CompletionRequest{
|
||||
Model: openai.GPT3Babbage002,
|
||||
MaxTokens: 5,
|
||||
Prompt: "Lorem ipsum",
|
||||
}
|
||||
resp, err := c.CreateCompletion(ctx, req)
|
||||
if err != nil {
|
||||
fmt.Printf("Completion error: %v\n", err)
|
||||
return
|
||||
}
|
||||
fmt.Println(resp.Choices[0].Text)
|
||||
}
|
||||
```
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>GPT-3 streaming completion</summary>
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
openai "github.com/sashabaranov/go-openai"
|
||||
)
|
||||
|
||||
func main() {
|
||||
c := openai.NewClient("your token")
|
||||
ctx := context.Background()
|
||||
|
||||
req := openai.CompletionRequest{
|
||||
Model: openai.GPT3Babbage002,
|
||||
MaxTokens: 5,
|
||||
Prompt: "Lorem ipsum",
|
||||
Stream: true,
|
||||
}
|
||||
stream, err := c.CreateCompletionStream(ctx, req)
|
||||
if err != nil {
|
||||
fmt.Printf("CompletionStream error: %v\n", err)
|
||||
return
|
||||
}
|
||||
defer stream.Close()
|
||||
|
||||
for {
|
||||
response, err := stream.Recv()
|
||||
if errors.Is(err, io.EOF) {
|
||||
fmt.Println("Stream finished")
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
fmt.Printf("Stream error: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
fmt.Printf("Stream response: %v\n", response)
|
||||
}
|
||||
}
|
||||
```
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>Audio Speech-To-Text</summary>
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
openai "github.com/sashabaranov/go-openai"
|
||||
)
|
||||
|
||||
func main() {
|
||||
c := openai.NewClient("your token")
|
||||
ctx := context.Background()
|
||||
|
||||
req := openai.AudioRequest{
|
||||
Model: openai.Whisper1,
|
||||
FilePath: "recording.mp3",
|
||||
}
|
||||
resp, err := c.CreateTranscription(ctx, req)
|
||||
if err != nil {
|
||||
fmt.Printf("Transcription error: %v\n", err)
|
||||
return
|
||||
}
|
||||
fmt.Println(resp.Text)
|
||||
}
|
||||
```
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>Audio Captions</summary>
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
openai "github.com/sashabaranov/go-openai"
|
||||
)
|
||||
|
||||
func main() {
|
||||
c := openai.NewClient(os.Getenv("OPENAI_KEY"))
|
||||
|
||||
req := openai.AudioRequest{
|
||||
Model: openai.Whisper1,
|
||||
FilePath: os.Args[1],
|
||||
Format: openai.AudioResponseFormatSRT,
|
||||
}
|
||||
resp, err := c.CreateTranscription(context.Background(), req)
|
||||
if err != nil {
|
||||
fmt.Printf("Transcription error: %v\n", err)
|
||||
return
|
||||
}
|
||||
f, err := os.Create(os.Args[1] + ".srt")
|
||||
if err != nil {
|
||||
fmt.Printf("Could not open file: %v\n", err)
|
||||
return
|
||||
}
|
||||
defer f.Close()
|
||||
if _, err := f.WriteString(resp.Text); err != nil {
|
||||
fmt.Printf("Error writing to file: %v\n", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
```
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>DALL-E 2 image generation</summary>
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
openai "github.com/sashabaranov/go-openai"
|
||||
"image/png"
|
||||
"os"
|
||||
)
|
||||
|
||||
func main() {
|
||||
c := openai.NewClient("your token")
|
||||
ctx := context.Background()
|
||||
|
||||
// Sample image by link
|
||||
reqUrl := openai.ImageRequest{
|
||||
Prompt: "Parrot on a skateboard performs a trick, cartoon style, natural light, high detail",
|
||||
Size: openai.CreateImageSize256x256,
|
||||
ResponseFormat: openai.CreateImageResponseFormatURL,
|
||||
N: 1,
|
||||
}
|
||||
|
||||
respUrl, err := c.CreateImage(ctx, reqUrl)
|
||||
if err != nil {
|
||||
fmt.Printf("Image creation error: %v\n", err)
|
||||
return
|
||||
}
|
||||
fmt.Println(respUrl.Data[0].URL)
|
||||
|
||||
// Example image as base64
|
||||
reqBase64 := openai.ImageRequest{
|
||||
Prompt: "Portrait of a humanoid parrot in a classic costume, high detail, realistic light, unreal engine",
|
||||
Size: openai.CreateImageSize256x256,
|
||||
ResponseFormat: openai.CreateImageResponseFormatB64JSON,
|
||||
N: 1,
|
||||
}
|
||||
|
||||
respBase64, err := c.CreateImage(ctx, reqBase64)
|
||||
if err != nil {
|
||||
fmt.Printf("Image creation error: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
imgBytes, err := base64.StdEncoding.DecodeString(respBase64.Data[0].B64JSON)
|
||||
if err != nil {
|
||||
fmt.Printf("Base64 decode error: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
r := bytes.NewReader(imgBytes)
|
||||
imgData, err := png.Decode(r)
|
||||
if err != nil {
|
||||
fmt.Printf("PNG decode error: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
file, err := os.Create("example.png")
|
||||
if err != nil {
|
||||
fmt.Printf("File creation error: %v\n", err)
|
||||
return
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
if err := png.Encode(file, imgData); err != nil {
|
||||
fmt.Printf("PNG encode error: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Println("The image was saved as example.png")
|
||||
}
|
||||
|
||||
```
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>GPT Image 1 image generation</summary>
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
openai "github.com/sashabaranov/go-openai"
|
||||
)
|
||||
|
||||
func main() {
|
||||
c := openai.NewClient("your token")
|
||||
ctx := context.Background()
|
||||
|
||||
req := openai.ImageRequest{
|
||||
Prompt: "Parrot on a skateboard performing a trick. Large bold text \"SKATE MASTER\" banner at the bottom of the image. Cartoon style, natural light, high detail, 1:1 aspect ratio.",
|
||||
Background: openai.CreateImageBackgroundOpaque,
|
||||
Model: openai.CreateImageModelGptImage1,
|
||||
Size: openai.CreateImageSize1024x1024,
|
||||
N: 1,
|
||||
Quality: openai.CreateImageQualityLow,
|
||||
OutputCompression: 100,
|
||||
OutputFormat: openai.CreateImageOutputFormatJPEG,
|
||||
// Moderation: openai.CreateImageModerationLow,
|
||||
// User: "",
|
||||
}
|
||||
|
||||
resp, err := c.CreateImage(ctx, req)
|
||||
if err != nil {
|
||||
fmt.Printf("Image creation Image generation with GPT Image 1error: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Println("Image Base64:", resp.Data[0].B64JSON)
|
||||
|
||||
// Decode the base64 data
|
||||
imgBytes, err := base64.StdEncoding.DecodeString(resp.Data[0].B64JSON)
|
||||
if err != nil {
|
||||
fmt.Printf("Base64 decode error: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Write image to file
|
||||
outputPath := "generated_image.jpg"
|
||||
err = os.WriteFile(outputPath, imgBytes, 0644)
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to write image file: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Printf("The image was saved as %s\n", outputPath)
|
||||
}
|
||||
```
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>Configuring proxy</summary>
|
||||
|
||||
```go
|
||||
config := openai.DefaultConfig("token")
|
||||
proxyUrl, err := url.Parse("http://localhost:{port}")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
transport := &http.Transport{
|
||||
Proxy: http.ProxyURL(proxyUrl),
|
||||
}
|
||||
config.HTTPClient = &http.Client{
|
||||
Transport: transport,
|
||||
}
|
||||
|
||||
c := openai.NewClientWithConfig(config)
|
||||
```
|
||||
|
||||
See also: https://pkg.go.dev/github.com/sashabaranov/go-openai#ClientConfig
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>ChatGPT support context</summary>
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/sashabaranov/go-openai"
|
||||
)
|
||||
|
||||
func main() {
|
||||
client := openai.NewClient("your token")
|
||||
messages := make([]openai.ChatCompletionMessage, 0)
|
||||
reader := bufio.NewReader(os.Stdin)
|
||||
fmt.Println("Conversation")
|
||||
fmt.Println("---------------------")
|
||||
|
||||
for {
|
||||
fmt.Print("-> ")
|
||||
text, _ := reader.ReadString('\n')
|
||||
// convert CRLF to LF
|
||||
text = strings.Replace(text, "\n", "", -1)
|
||||
messages = append(messages, openai.ChatCompletionMessage{
|
||||
Role: openai.ChatMessageRoleUser,
|
||||
Content: text,
|
||||
})
|
||||
|
||||
resp, err := client.CreateChatCompletion(
|
||||
context.Background(),
|
||||
openai.ChatCompletionRequest{
|
||||
Model: openai.GPT3Dot5Turbo,
|
||||
Messages: messages,
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
fmt.Printf("ChatCompletion error: %v\n", err)
|
||||
continue
|
||||
}
|
||||
|
||||
content := resp.Choices[0].Message.Content
|
||||
messages = append(messages, openai.ChatCompletionMessage{
|
||||
Role: openai.ChatMessageRoleAssistant,
|
||||
Content: content,
|
||||
})
|
||||
fmt.Println(content)
|
||||
}
|
||||
}
|
||||
```
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>Azure OpenAI ChatGPT</summary>
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
openai "github.com/sashabaranov/go-openai"
|
||||
)
|
||||
|
||||
func main() {
|
||||
config := openai.DefaultAzureConfig("your Azure OpenAI Key", "https://your Azure OpenAI Endpoint")
|
||||
// If you use a deployment name different from the model name, you can customize the AzureModelMapperFunc function
|
||||
// config.AzureModelMapperFunc = func(model string) string {
|
||||
// azureModelMapping := map[string]string{
|
||||
// "gpt-3.5-turbo": "your gpt-3.5-turbo deployment name",
|
||||
// }
|
||||
// return azureModelMapping[model]
|
||||
// }
|
||||
|
||||
client := openai.NewClientWithConfig(config)
|
||||
resp, err := client.CreateChatCompletion(
|
||||
context.Background(),
|
||||
openai.ChatCompletionRequest{
|
||||
Model: openai.GPT3Dot5Turbo,
|
||||
Messages: []openai.ChatCompletionMessage{
|
||||
{
|
||||
Role: openai.ChatMessageRoleUser,
|
||||
Content: "Hello Azure OpenAI!",
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
fmt.Printf("ChatCompletion error: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Println(resp.Choices[0].Message.Content)
|
||||
}
|
||||
|
||||
```
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>Embedding Semantic Similarity</summary>
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
openai "github.com/sashabaranov/go-openai"
|
||||
|
||||
)
|
||||
|
||||
func main() {
|
||||
client := openai.NewClient("your-token")
|
||||
|
||||
// Create an EmbeddingRequest for the user query
|
||||
queryReq := openai.EmbeddingRequest{
|
||||
Input: []string{"How many chucks would a woodchuck chuck"},
|
||||
Model: openai.AdaEmbeddingV2,
|
||||
}
|
||||
|
||||
// Create an embedding for the user query
|
||||
queryResponse, err := client.CreateEmbeddings(context.Background(), queryReq)
|
||||
if err != nil {
|
||||
log.Fatal("Error creating query embedding:", err)
|
||||
}
|
||||
|
||||
// Create an EmbeddingRequest for the target text
|
||||
targetReq := openai.EmbeddingRequest{
|
||||
Input: []string{"How many chucks would a woodchuck chuck if the woodchuck could chuck wood"},
|
||||
Model: openai.AdaEmbeddingV2,
|
||||
}
|
||||
|
||||
// Create an embedding for the target text
|
||||
targetResponse, err := client.CreateEmbeddings(context.Background(), targetReq)
|
||||
if err != nil {
|
||||
log.Fatal("Error creating target embedding:", err)
|
||||
}
|
||||
|
||||
// Now that we have the embeddings for the user query and the target text, we
|
||||
// can calculate their similarity.
|
||||
queryEmbedding := queryResponse.Data[0]
|
||||
targetEmbedding := targetResponse.Data[0]
|
||||
|
||||
similarity, err := queryEmbedding.DotProduct(&targetEmbedding)
|
||||
if err != nil {
|
||||
log.Fatal("Error calculating dot product:", err)
|
||||
}
|
||||
|
||||
log.Printf("The similarity score between the query and the target is %f", similarity)
|
||||
}
|
||||
|
||||
```
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>Azure OpenAI Embeddings</summary>
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
openai "github.com/sashabaranov/go-openai"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
||||
config := openai.DefaultAzureConfig("your Azure OpenAI Key", "https://your Azure OpenAI Endpoint")
|
||||
config.APIVersion = "2023-05-15" // optional update to latest API version
|
||||
|
||||
//If you use a deployment name different from the model name, you can customize the AzureModelMapperFunc function
|
||||
//config.AzureModelMapperFunc = func(model string) string {
|
||||
// azureModelMapping := map[string]string{
|
||||
// "gpt-3.5-turbo":"your gpt-3.5-turbo deployment name",
|
||||
// }
|
||||
// return azureModelMapping[model]
|
||||
//}
|
||||
|
||||
input := "Text to vectorize"
|
||||
|
||||
client := openai.NewClientWithConfig(config)
|
||||
resp, err := client.CreateEmbeddings(
|
||||
context.Background(),
|
||||
openai.EmbeddingRequest{
|
||||
Input: []string{input},
|
||||
Model: openai.AdaEmbeddingV2,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
fmt.Printf("CreateEmbeddings error: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
vectors := resp.Data[0].Embedding // []float32 with 1536 dimensions
|
||||
|
||||
fmt.Println(vectors[:10], "...", vectors[len(vectors)-10:])
|
||||
}
|
||||
```
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>JSON Schema for function calling</summary>
|
||||
|
||||
It is now possible for chat completion to choose to call a function for more information ([see developer docs here](https://platform.openai.com/docs/guides/gpt/function-calling)).
|
||||
|
||||
In order to describe the type of functions that can be called, a JSON schema must be provided. Many JSON schema libraries exist and are more advanced than what we can offer in this library, however we have included a simple `jsonschema` package for those who want to use this feature without formatting their own JSON schema payload.
|
||||
|
||||
The developer documents give this JSON schema definition as an example:
|
||||
|
||||
```json
|
||||
{
|
||||
"name":"get_current_weather",
|
||||
"description":"Get the current weather in a given location",
|
||||
"parameters":{
|
||||
"type":"object",
|
||||
"properties":{
|
||||
"location":{
|
||||
"type":"string",
|
||||
"description":"The city and state, e.g. San Francisco, CA"
|
||||
},
|
||||
"unit":{
|
||||
"type":"string",
|
||||
"enum":[
|
||||
"celsius",
|
||||
"fahrenheit"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required":[
|
||||
"location"
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Using the `jsonschema` package, this schema could be created using structs as such:
|
||||
|
||||
```go
|
||||
FunctionDefinition{
|
||||
Name: "get_current_weather",
|
||||
Parameters: jsonschema.Definition{
|
||||
Type: jsonschema.Object,
|
||||
Properties: map[string]jsonschema.Definition{
|
||||
"location": {
|
||||
Type: jsonschema.String,
|
||||
Description: "The city and state, e.g. San Francisco, CA",
|
||||
},
|
||||
"unit": {
|
||||
Type: jsonschema.String,
|
||||
Enum: []string{"celsius", "fahrenheit"},
|
||||
},
|
||||
},
|
||||
Required: []string{"location"},
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
The `Parameters` field of a `FunctionDefinition` can accept either of the above styles, or even a nested struct from another library (as long as it can be marshalled into JSON).
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>Error handling</summary>
|
||||
|
||||
Open-AI maintains clear documentation on how to [handle API errors](https://platform.openai.com/docs/guides/error-codes/api-errors)
|
||||
|
||||
example:
|
||||
```
|
||||
e := &openai.APIError{}
|
||||
if errors.As(err, &e) {
|
||||
switch e.HTTPStatusCode {
|
||||
case 401:
|
||||
// invalid auth or key (do not retry)
|
||||
case 429:
|
||||
// rate limiting or engine overload (wait and retry)
|
||||
case 500:
|
||||
// openai server error (retry)
|
||||
default:
|
||||
// unhandled
|
||||
}
|
||||
}
|
||||
|
||||
```
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>Fine Tune Model</summary>
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/sashabaranov/go-openai"
|
||||
)
|
||||
|
||||
func main() {
|
||||
client := openai.NewClient("your token")
|
||||
ctx := context.Background()
|
||||
|
||||
// create a .jsonl file with your training data for conversational model
|
||||
// {"prompt": "<prompt text>", "completion": "<ideal generated text>"}
|
||||
// {"prompt": "<prompt text>", "completion": "<ideal generated text>"}
|
||||
// {"prompt": "<prompt text>", "completion": "<ideal generated text>"}
|
||||
|
||||
// chat models are trained using the following file format:
|
||||
// {"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "What's the capital of France?"}, {"role": "assistant", "content": "Paris, as if everyone doesn't know that already."}]}
|
||||
// {"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "Who wrote 'Romeo and Juliet'?"}, {"role": "assistant", "content": "Oh, just some guy named William Shakespeare. Ever heard of him?"}]}
|
||||
// {"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "How far is the Moon from Earth?"}, {"role": "assistant", "content": "Around 384,400 kilometers. Give or take a few, like that really matters."}]}
|
||||
|
||||
// you can use openai cli tool to validate the data
|
||||
// For more info - https://platform.openai.com/docs/guides/fine-tuning
|
||||
|
||||
file, err := client.CreateFile(ctx, openai.FileRequest{
|
||||
FilePath: "training_prepared.jsonl",
|
||||
Purpose: "fine-tune",
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Printf("Upload JSONL file error: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
// create a fine tuning job
|
||||
// Streams events until the job is done (this often takes minutes, but can take hours if there are many jobs in the queue or your dataset is large)
|
||||
// use below get method to know the status of your model
|
||||
fineTuningJob, err := client.CreateFineTuningJob(ctx, openai.FineTuningJobRequest{
|
||||
TrainingFile: file.ID,
|
||||
Model: "davinci-002", // gpt-3.5-turbo-0613, babbage-002.
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Printf("Creating new fine tune model error: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
fineTuningJob, err = client.RetrieveFineTuningJob(ctx, fineTuningJob.ID)
|
||||
if err != nil {
|
||||
fmt.Printf("Getting fine tune model error: %v\n", err)
|
||||
return
|
||||
}
|
||||
fmt.Println(fineTuningJob.FineTunedModel)
|
||||
|
||||
// once the status of fineTuningJob is `succeeded`, you can use your fine tune model in Completion Request or Chat Completion Request
|
||||
|
||||
// resp, err := client.CreateCompletion(ctx, openai.CompletionRequest{
|
||||
// Model: fineTuningJob.FineTunedModel,
|
||||
// Prompt: "your prompt",
|
||||
// })
|
||||
// if err != nil {
|
||||
// fmt.Printf("Create completion error %v\n", err)
|
||||
// return
|
||||
// }
|
||||
//
|
||||
// fmt.Println(resp.Choices[0].Text)
|
||||
}
|
||||
```
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>Structured Outputs</summary>
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/sashabaranov/go-openai"
|
||||
"github.com/sashabaranov/go-openai/jsonschema"
|
||||
)
|
||||
|
||||
func main() {
|
||||
client := openai.NewClient("your token")
|
||||
ctx := context.Background()
|
||||
|
||||
type Result struct {
|
||||
Steps []struct {
|
||||
Explanation string `json:"explanation"`
|
||||
Output string `json:"output"`
|
||||
} `json:"steps"`
|
||||
FinalAnswer string `json:"final_answer"`
|
||||
}
|
||||
var result Result
|
||||
schema, err := jsonschema.GenerateSchemaForType(result)
|
||||
if err != nil {
|
||||
log.Fatalf("GenerateSchemaForType error: %v", err)
|
||||
}
|
||||
resp, err := client.CreateChatCompletion(ctx, openai.ChatCompletionRequest{
|
||||
Model: openai.GPT4oMini,
|
||||
Messages: []openai.ChatCompletionMessage{
|
||||
{
|
||||
Role: openai.ChatMessageRoleSystem,
|
||||
Content: "You are a helpful math tutor. Guide the user through the solution step by step.",
|
||||
},
|
||||
{
|
||||
Role: openai.ChatMessageRoleUser,
|
||||
Content: "how can I solve 8x + 7 = -23",
|
||||
},
|
||||
},
|
||||
ResponseFormat: &openai.ChatCompletionResponseFormat{
|
||||
Type: openai.ChatCompletionResponseFormatTypeJSONSchema,
|
||||
JSONSchema: &openai.ChatCompletionResponseFormatJSONSchema{
|
||||
Name: "math_reasoning",
|
||||
Schema: schema,
|
||||
Strict: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalf("CreateChatCompletion error: %v", err)
|
||||
}
|
||||
err = schema.Unmarshal(resp.Choices[0].Message.Content, &result)
|
||||
if err != nil {
|
||||
log.Fatalf("Unmarshal schema error: %v", err)
|
||||
}
|
||||
fmt.Println(result)
|
||||
}
|
||||
```
|
||||
</details>
|
||||
See the `examples/` folder for more.
|
||||
|
||||
## Frequently Asked Questions
|
||||
|
||||
### Why don't we get the same answer when specifying a temperature field of 0 and asking the same question?
|
||||
|
||||
Even when specifying a temperature field of 0, it doesn't guarantee that you'll always get the same response. Several factors come into play.
|
||||
|
||||
1. Go OpenAI Behavior: When you specify a temperature field of 0 in Go OpenAI, the omitempty tag causes that field to be removed from the request. Consequently, the OpenAI API applies the default value of 1.
|
||||
2. Token Count for Input/Output: If there's a large number of tokens in the input and output, setting the temperature to 0 can still result in non-deterministic behavior. In particular, when using around 32k tokens, the likelihood of non-deterministic behavior becomes highest even with a temperature of 0.
|
||||
|
||||
Due to the factors mentioned above, different answers may be returned even for the same question.
|
||||
|
||||
**Workarounds:**
|
||||
1. As of November 2023, use [the new `seed` parameter](https://platform.openai.com/docs/guides/text-generation/reproducible-outputs) in conjunction with the `system_fingerprint` response field, alongside Temperature management.
|
||||
2. Try using `math.SmallestNonzeroFloat32`: By specifying `math.SmallestNonzeroFloat32` in the temperature field instead of 0, you can mimic the behavior of setting it to 0.
|
||||
3. Limiting Token Count: By limiting the number of tokens in the input and output and especially avoiding large requests close to 32k tokens, you can reduce the risk of non-deterministic behavior.
|
||||
|
||||
By adopting these strategies, you can expect more consistent results.
|
||||
|
||||
**Related Issues:**
|
||||
[omitempty option of request struct will generate incorrect request when parameter is 0.](https://github.com/sashabaranov/go-openai/issues/9)
|
||||
|
||||
### Does Go OpenAI provide a method to count tokens?
|
||||
|
||||
No, Go OpenAI does not offer a feature to count tokens, and there are no plans to provide such a feature in the future. However, if there's a way to implement a token counting feature with zero dependencies, it might be possible to merge that feature into Go OpenAI. Otherwise, it would be more appropriate to implement it in a dedicated library or repository.
|
||||
|
||||
For counting tokens, you might find the following links helpful:
|
||||
- [Counting Tokens For Chat API Calls](https://github.com/pkoukk/tiktoken-go#counting-tokens-for-chat-api-calls)
|
||||
- [How to count tokens with tiktoken](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb)
|
||||
|
||||
**Related Issues:**
|
||||
[Is it possible to join the implementation of GPT3 Tokenizer](https://github.com/sashabaranov/go-openai/issues/62)
|
||||
|
||||
## Contributing
|
||||
|
||||
By following [Contributing Guidelines](https://github.com/sashabaranov/go-openai/blob/master/CONTRIBUTING.md), we hope to ensure that your contributions are made smoothly and efficiently.
|
||||
|
||||
## Thank you
|
||||
|
||||
We want to take a moment to express our deepest gratitude to the [contributors](https://github.com/sashabaranov/go-openai/graphs/contributors) and sponsors of this project:
|
||||
- [Carson Kahn](https://carsonkahn.com) of [Spindle AI](https://spindleai.com)
|
||||
|
||||
To all of you: thank you. You've helped us achieve more than we ever imagined possible. Can't wait to see where we go next, together!
|
||||
325
vendor/github.com/sashabaranov/go-openai/assistant.go
generated
vendored
Normal file
325
vendor/github.com/sashabaranov/go-openai/assistant.go
generated
vendored
Normal file
@@ -0,0 +1,325 @@
|
||||
package openai
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
const (
|
||||
assistantsSuffix = "/assistants"
|
||||
assistantsFilesSuffix = "/files"
|
||||
)
|
||||
|
||||
type Assistant struct {
|
||||
ID string `json:"id"`
|
||||
Object string `json:"object"`
|
||||
CreatedAt int64 `json:"created_at"`
|
||||
Name *string `json:"name,omitempty"`
|
||||
Description *string `json:"description,omitempty"`
|
||||
Model string `json:"model"`
|
||||
Instructions *string `json:"instructions,omitempty"`
|
||||
Tools []AssistantTool `json:"tools"`
|
||||
ToolResources *AssistantToolResource `json:"tool_resources,omitempty"`
|
||||
FileIDs []string `json:"file_ids,omitempty"` // Deprecated in v2
|
||||
Metadata map[string]any `json:"metadata,omitempty"`
|
||||
Temperature *float32 `json:"temperature,omitempty"`
|
||||
TopP *float32 `json:"top_p,omitempty"`
|
||||
ResponseFormat any `json:"response_format,omitempty"`
|
||||
|
||||
httpHeader
|
||||
}
|
||||
|
||||
type AssistantToolType string
|
||||
|
||||
const (
|
||||
AssistantToolTypeCodeInterpreter AssistantToolType = "code_interpreter"
|
||||
AssistantToolTypeRetrieval AssistantToolType = "retrieval"
|
||||
AssistantToolTypeFunction AssistantToolType = "function"
|
||||
AssistantToolTypeFileSearch AssistantToolType = "file_search"
|
||||
)
|
||||
|
||||
type AssistantTool struct {
|
||||
Type AssistantToolType `json:"type"`
|
||||
Function *FunctionDefinition `json:"function,omitempty"`
|
||||
}
|
||||
|
||||
type AssistantToolFileSearch struct {
|
||||
VectorStoreIDs []string `json:"vector_store_ids"`
|
||||
}
|
||||
|
||||
type AssistantToolCodeInterpreter struct {
|
||||
FileIDs []string `json:"file_ids"`
|
||||
}
|
||||
|
||||
type AssistantToolResource struct {
|
||||
FileSearch *AssistantToolFileSearch `json:"file_search,omitempty"`
|
||||
CodeInterpreter *AssistantToolCodeInterpreter `json:"code_interpreter,omitempty"`
|
||||
}
|
||||
|
||||
// AssistantRequest provides the assistant request parameters.
|
||||
// When modifying the tools the API functions as the following:
|
||||
// If Tools is undefined, no changes are made to the Assistant's tools.
|
||||
// If Tools is empty slice it will effectively delete all of the Assistant's tools.
|
||||
// If Tools is populated, it will replace all of the existing Assistant's tools with the provided tools.
|
||||
type AssistantRequest struct {
|
||||
Model string `json:"model"`
|
||||
Name *string `json:"name,omitempty"`
|
||||
Description *string `json:"description,omitempty"`
|
||||
Instructions *string `json:"instructions,omitempty"`
|
||||
Tools []AssistantTool `json:"-"`
|
||||
FileIDs []string `json:"file_ids,omitempty"`
|
||||
Metadata map[string]any `json:"metadata,omitempty"`
|
||||
ToolResources *AssistantToolResource `json:"tool_resources,omitempty"`
|
||||
ResponseFormat any `json:"response_format,omitempty"`
|
||||
Temperature *float32 `json:"temperature,omitempty"`
|
||||
TopP *float32 `json:"top_p,omitempty"`
|
||||
}
|
||||
|
||||
// MarshalJSON provides a custom marshaller for the assistant request to handle the API use cases
|
||||
// If Tools is nil, the field is omitted from the JSON.
|
||||
// If Tools is an empty slice, it's included in the JSON as an empty array ([]).
|
||||
// If Tools is populated, it's included in the JSON with the elements.
|
||||
func (a AssistantRequest) MarshalJSON() ([]byte, error) {
|
||||
type Alias AssistantRequest
|
||||
assistantAlias := &struct {
|
||||
Tools *[]AssistantTool `json:"tools,omitempty"`
|
||||
*Alias
|
||||
}{
|
||||
Alias: (*Alias)(&a),
|
||||
}
|
||||
|
||||
if a.Tools != nil {
|
||||
assistantAlias.Tools = &a.Tools
|
||||
}
|
||||
|
||||
return json.Marshal(assistantAlias)
|
||||
}
|
||||
|
||||
// AssistantsList is a list of assistants.
|
||||
type AssistantsList struct {
|
||||
Assistants []Assistant `json:"data"`
|
||||
LastID *string `json:"last_id"`
|
||||
FirstID *string `json:"first_id"`
|
||||
HasMore bool `json:"has_more"`
|
||||
httpHeader
|
||||
}
|
||||
|
||||
type AssistantDeleteResponse struct {
|
||||
ID string `json:"id"`
|
||||
Object string `json:"object"`
|
||||
Deleted bool `json:"deleted"`
|
||||
|
||||
httpHeader
|
||||
}
|
||||
|
||||
type AssistantFile struct {
|
||||
ID string `json:"id"`
|
||||
Object string `json:"object"`
|
||||
CreatedAt int64 `json:"created_at"`
|
||||
AssistantID string `json:"assistant_id"`
|
||||
|
||||
httpHeader
|
||||
}
|
||||
|
||||
type AssistantFileRequest struct {
|
||||
FileID string `json:"file_id"`
|
||||
}
|
||||
|
||||
type AssistantFilesList struct {
|
||||
AssistantFiles []AssistantFile `json:"data"`
|
||||
|
||||
httpHeader
|
||||
}
|
||||
|
||||
// CreateAssistant creates a new assistant.
|
||||
func (c *Client) CreateAssistant(ctx context.Context, request AssistantRequest) (response Assistant, err error) {
|
||||
req, err := c.newRequest(ctx, http.MethodPost, c.fullURL(assistantsSuffix), withBody(request),
|
||||
withBetaAssistantVersion(c.config.AssistantVersion))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = c.sendRequest(req, &response)
|
||||
return
|
||||
}
|
||||
|
||||
// RetrieveAssistant retrieves an assistant.
|
||||
func (c *Client) RetrieveAssistant(
|
||||
ctx context.Context,
|
||||
assistantID string,
|
||||
) (response Assistant, err error) {
|
||||
urlSuffix := fmt.Sprintf("%s/%s", assistantsSuffix, assistantID)
|
||||
req, err := c.newRequest(ctx, http.MethodGet, c.fullURL(urlSuffix),
|
||||
withBetaAssistantVersion(c.config.AssistantVersion))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = c.sendRequest(req, &response)
|
||||
return
|
||||
}
|
||||
|
||||
// ModifyAssistant modifies an assistant.
|
||||
func (c *Client) ModifyAssistant(
|
||||
ctx context.Context,
|
||||
assistantID string,
|
||||
request AssistantRequest,
|
||||
) (response Assistant, err error) {
|
||||
urlSuffix := fmt.Sprintf("%s/%s", assistantsSuffix, assistantID)
|
||||
req, err := c.newRequest(ctx, http.MethodPost, c.fullURL(urlSuffix), withBody(request),
|
||||
withBetaAssistantVersion(c.config.AssistantVersion))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = c.sendRequest(req, &response)
|
||||
return
|
||||
}
|
||||
|
||||
// DeleteAssistant deletes an assistant.
|
||||
func (c *Client) DeleteAssistant(
|
||||
ctx context.Context,
|
||||
assistantID string,
|
||||
) (response AssistantDeleteResponse, err error) {
|
||||
urlSuffix := fmt.Sprintf("%s/%s", assistantsSuffix, assistantID)
|
||||
req, err := c.newRequest(ctx, http.MethodDelete, c.fullURL(urlSuffix),
|
||||
withBetaAssistantVersion(c.config.AssistantVersion))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = c.sendRequest(req, &response)
|
||||
return
|
||||
}
|
||||
|
||||
// ListAssistants Lists the currently available assistants.
|
||||
func (c *Client) ListAssistants(
|
||||
ctx context.Context,
|
||||
limit *int,
|
||||
order *string,
|
||||
after *string,
|
||||
before *string,
|
||||
) (response AssistantsList, err error) {
|
||||
urlValues := url.Values{}
|
||||
if limit != nil {
|
||||
urlValues.Add("limit", fmt.Sprintf("%d", *limit))
|
||||
}
|
||||
if order != nil {
|
||||
urlValues.Add("order", *order)
|
||||
}
|
||||
if after != nil {
|
||||
urlValues.Add("after", *after)
|
||||
}
|
||||
if before != nil {
|
||||
urlValues.Add("before", *before)
|
||||
}
|
||||
|
||||
encodedValues := ""
|
||||
if len(urlValues) > 0 {
|
||||
encodedValues = "?" + urlValues.Encode()
|
||||
}
|
||||
|
||||
urlSuffix := fmt.Sprintf("%s%s", assistantsSuffix, encodedValues)
|
||||
req, err := c.newRequest(ctx, http.MethodGet, c.fullURL(urlSuffix),
|
||||
withBetaAssistantVersion(c.config.AssistantVersion))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = c.sendRequest(req, &response)
|
||||
return
|
||||
}
|
||||
|
||||
// CreateAssistantFile creates a new assistant file.
|
||||
func (c *Client) CreateAssistantFile(
|
||||
ctx context.Context,
|
||||
assistantID string,
|
||||
request AssistantFileRequest,
|
||||
) (response AssistantFile, err error) {
|
||||
urlSuffix := fmt.Sprintf("%s/%s%s", assistantsSuffix, assistantID, assistantsFilesSuffix)
|
||||
req, err := c.newRequest(ctx, http.MethodPost, c.fullURL(urlSuffix),
|
||||
withBody(request),
|
||||
withBetaAssistantVersion(c.config.AssistantVersion))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = c.sendRequest(req, &response)
|
||||
return
|
||||
}
|
||||
|
||||
// RetrieveAssistantFile retrieves an assistant file.
|
||||
func (c *Client) RetrieveAssistantFile(
|
||||
ctx context.Context,
|
||||
assistantID string,
|
||||
fileID string,
|
||||
) (response AssistantFile, err error) {
|
||||
urlSuffix := fmt.Sprintf("%s/%s%s/%s", assistantsSuffix, assistantID, assistantsFilesSuffix, fileID)
|
||||
req, err := c.newRequest(ctx, http.MethodGet, c.fullURL(urlSuffix),
|
||||
withBetaAssistantVersion(c.config.AssistantVersion))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = c.sendRequest(req, &response)
|
||||
return
|
||||
}
|
||||
|
||||
// DeleteAssistantFile deletes an existing file.
|
||||
func (c *Client) DeleteAssistantFile(
|
||||
ctx context.Context,
|
||||
assistantID string,
|
||||
fileID string,
|
||||
) (err error) {
|
||||
urlSuffix := fmt.Sprintf("%s/%s%s/%s", assistantsSuffix, assistantID, assistantsFilesSuffix, fileID)
|
||||
req, err := c.newRequest(ctx, http.MethodDelete, c.fullURL(urlSuffix),
|
||||
withBetaAssistantVersion(c.config.AssistantVersion))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = c.sendRequest(req, nil)
|
||||
return
|
||||
}
|
||||
|
||||
// ListAssistantFiles Lists the currently available files for an assistant.
|
||||
func (c *Client) ListAssistantFiles(
|
||||
ctx context.Context,
|
||||
assistantID string,
|
||||
limit *int,
|
||||
order *string,
|
||||
after *string,
|
||||
before *string,
|
||||
) (response AssistantFilesList, err error) {
|
||||
urlValues := url.Values{}
|
||||
if limit != nil {
|
||||
urlValues.Add("limit", fmt.Sprintf("%d", *limit))
|
||||
}
|
||||
if order != nil {
|
||||
urlValues.Add("order", *order)
|
||||
}
|
||||
if after != nil {
|
||||
urlValues.Add("after", *after)
|
||||
}
|
||||
if before != nil {
|
||||
urlValues.Add("before", *before)
|
||||
}
|
||||
|
||||
encodedValues := ""
|
||||
if len(urlValues) > 0 {
|
||||
encodedValues = "?" + urlValues.Encode()
|
||||
}
|
||||
|
||||
urlSuffix := fmt.Sprintf("%s/%s%s%s", assistantsSuffix, assistantID, assistantsFilesSuffix, encodedValues)
|
||||
req, err := c.newRequest(ctx, http.MethodGet, c.fullURL(urlSuffix),
|
||||
withBetaAssistantVersion(c.config.AssistantVersion))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = c.sendRequest(req, &response)
|
||||
return
|
||||
}
|
||||
234
vendor/github.com/sashabaranov/go-openai/audio.go
generated
vendored
Normal file
234
vendor/github.com/sashabaranov/go-openai/audio.go
generated
vendored
Normal file
@@ -0,0 +1,234 @@
|
||||
package openai
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
utils "github.com/sashabaranov/go-openai/internal"
|
||||
)
|
||||
|
||||
// Whisper Defines the models provided by OpenAI to use when processing audio with OpenAI.
|
||||
const (
|
||||
Whisper1 = "whisper-1"
|
||||
)
|
||||
|
||||
// Response formats; Whisper uses AudioResponseFormatJSON by default.
|
||||
type AudioResponseFormat string
|
||||
|
||||
const (
|
||||
AudioResponseFormatJSON AudioResponseFormat = "json"
|
||||
AudioResponseFormatText AudioResponseFormat = "text"
|
||||
AudioResponseFormatSRT AudioResponseFormat = "srt"
|
||||
AudioResponseFormatVerboseJSON AudioResponseFormat = "verbose_json"
|
||||
AudioResponseFormatVTT AudioResponseFormat = "vtt"
|
||||
)
|
||||
|
||||
type TranscriptionTimestampGranularity string
|
||||
|
||||
const (
|
||||
TranscriptionTimestampGranularityWord TranscriptionTimestampGranularity = "word"
|
||||
TranscriptionTimestampGranularitySegment TranscriptionTimestampGranularity = "segment"
|
||||
)
|
||||
|
||||
// AudioRequest represents a request structure for audio API.
|
||||
type AudioRequest struct {
|
||||
Model string
|
||||
|
||||
// FilePath is either an existing file in your filesystem or a filename representing the contents of Reader.
|
||||
FilePath string
|
||||
|
||||
// Reader is an optional io.Reader when you do not want to use an existing file.
|
||||
Reader io.Reader
|
||||
|
||||
Prompt string
|
||||
Temperature float32
|
||||
Language string // Only for transcription.
|
||||
Format AudioResponseFormat
|
||||
TimestampGranularities []TranscriptionTimestampGranularity // Only for transcription.
|
||||
}
|
||||
|
||||
// AudioResponse represents a response structure for audio API.
|
||||
type AudioResponse struct {
|
||||
Task string `json:"task"`
|
||||
Language string `json:"language"`
|
||||
Duration float64 `json:"duration"`
|
||||
Segments []struct {
|
||||
ID int `json:"id"`
|
||||
Seek int `json:"seek"`
|
||||
Start float64 `json:"start"`
|
||||
End float64 `json:"end"`
|
||||
Text string `json:"text"`
|
||||
Tokens []int `json:"tokens"`
|
||||
Temperature float64 `json:"temperature"`
|
||||
AvgLogprob float64 `json:"avg_logprob"`
|
||||
CompressionRatio float64 `json:"compression_ratio"`
|
||||
NoSpeechProb float64 `json:"no_speech_prob"`
|
||||
Transient bool `json:"transient"`
|
||||
} `json:"segments"`
|
||||
Words []struct {
|
||||
Word string `json:"word"`
|
||||
Start float64 `json:"start"`
|
||||
End float64 `json:"end"`
|
||||
} `json:"words"`
|
||||
Text string `json:"text"`
|
||||
|
||||
httpHeader
|
||||
}
|
||||
|
||||
type audioTextResponse struct {
|
||||
Text string `json:"text"`
|
||||
|
||||
httpHeader
|
||||
}
|
||||
|
||||
func (r *audioTextResponse) ToAudioResponse() AudioResponse {
|
||||
return AudioResponse{
|
||||
Text: r.Text,
|
||||
httpHeader: r.httpHeader,
|
||||
}
|
||||
}
|
||||
|
||||
// CreateTranscription — API call to create a transcription. Returns transcribed text.
|
||||
func (c *Client) CreateTranscription(
|
||||
ctx context.Context,
|
||||
request AudioRequest,
|
||||
) (response AudioResponse, err error) {
|
||||
return c.callAudioAPI(ctx, request, "transcriptions")
|
||||
}
|
||||
|
||||
// CreateTranslation — API call to translate audio into English.
|
||||
func (c *Client) CreateTranslation(
|
||||
ctx context.Context,
|
||||
request AudioRequest,
|
||||
) (response AudioResponse, err error) {
|
||||
return c.callAudioAPI(ctx, request, "translations")
|
||||
}
|
||||
|
||||
// callAudioAPI — API call to an audio endpoint.
|
||||
func (c *Client) callAudioAPI(
|
||||
ctx context.Context,
|
||||
request AudioRequest,
|
||||
endpointSuffix string,
|
||||
) (response AudioResponse, err error) {
|
||||
var formBody bytes.Buffer
|
||||
builder := c.createFormBuilder(&formBody)
|
||||
|
||||
if err = audioMultipartForm(request, builder); err != nil {
|
||||
return AudioResponse{}, err
|
||||
}
|
||||
|
||||
urlSuffix := fmt.Sprintf("/audio/%s", endpointSuffix)
|
||||
req, err := c.newRequest(
|
||||
ctx,
|
||||
http.MethodPost,
|
||||
c.fullURL(urlSuffix, withModel(request.Model)),
|
||||
withBody(&formBody),
|
||||
withContentType(builder.FormDataContentType()),
|
||||
)
|
||||
if err != nil {
|
||||
return AudioResponse{}, err
|
||||
}
|
||||
|
||||
if request.HasJSONResponse() {
|
||||
err = c.sendRequest(req, &response)
|
||||
} else {
|
||||
var textResponse audioTextResponse
|
||||
err = c.sendRequest(req, &textResponse)
|
||||
response = textResponse.ToAudioResponse()
|
||||
}
|
||||
if err != nil {
|
||||
return AudioResponse{}, err
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// HasJSONResponse returns true if the response format is JSON.
|
||||
func (r AudioRequest) HasJSONResponse() bool {
|
||||
return r.Format == "" || r.Format == AudioResponseFormatJSON || r.Format == AudioResponseFormatVerboseJSON
|
||||
}
|
||||
|
||||
// audioMultipartForm creates a form with audio file contents and the name of the model to use for
|
||||
// audio processing.
|
||||
func audioMultipartForm(request AudioRequest, b utils.FormBuilder) error {
|
||||
err := createFileField(request, b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = b.WriteField("model", request.Model)
|
||||
if err != nil {
|
||||
return fmt.Errorf("writing model name: %w", err)
|
||||
}
|
||||
|
||||
// Create a form field for the prompt (if provided)
|
||||
if request.Prompt != "" {
|
||||
err = b.WriteField("prompt", request.Prompt)
|
||||
if err != nil {
|
||||
return fmt.Errorf("writing prompt: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create a form field for the format (if provided)
|
||||
if request.Format != "" {
|
||||
err = b.WriteField("response_format", string(request.Format))
|
||||
if err != nil {
|
||||
return fmt.Errorf("writing format: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create a form field for the temperature (if provided)
|
||||
if request.Temperature != 0 {
|
||||
err = b.WriteField("temperature", fmt.Sprintf("%.2f", request.Temperature))
|
||||
if err != nil {
|
||||
return fmt.Errorf("writing temperature: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create a form field for the language (if provided)
|
||||
if request.Language != "" {
|
||||
err = b.WriteField("language", request.Language)
|
||||
if err != nil {
|
||||
return fmt.Errorf("writing language: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(request.TimestampGranularities) > 0 {
|
||||
for _, tg := range request.TimestampGranularities {
|
||||
err = b.WriteField("timestamp_granularities[]", string(tg))
|
||||
if err != nil {
|
||||
return fmt.Errorf("writing timestamp_granularities[]: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Close the multipart writer
|
||||
return b.Close()
|
||||
}
|
||||
|
||||
// createFileField creates the "file" form field from either an existing file or by using the reader.
|
||||
func createFileField(request AudioRequest, b utils.FormBuilder) error {
|
||||
if request.Reader != nil {
|
||||
err := b.CreateFormFileReader("file", request.Reader, request.FilePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating form using reader: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
f, err := os.Open(request.FilePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("opening audio file: %w", err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
err = b.CreateFormFile("file", f)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating form file: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
271
vendor/github.com/sashabaranov/go-openai/batch.go
generated
vendored
Normal file
271
vendor/github.com/sashabaranov/go-openai/batch.go
generated
vendored
Normal file
@@ -0,0 +1,271 @@
|
||||
package openai
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
const batchesSuffix = "/batches"
|
||||
|
||||
type BatchEndpoint string
|
||||
|
||||
const (
|
||||
BatchEndpointChatCompletions BatchEndpoint = "/v1/chat/completions"
|
||||
BatchEndpointCompletions BatchEndpoint = "/v1/completions"
|
||||
BatchEndpointEmbeddings BatchEndpoint = "/v1/embeddings"
|
||||
)
|
||||
|
||||
type BatchLineItem interface {
|
||||
MarshalBatchLineItem() []byte
|
||||
}
|
||||
|
||||
type BatchChatCompletionRequest struct {
|
||||
CustomID string `json:"custom_id"`
|
||||
Body ChatCompletionRequest `json:"body"`
|
||||
Method string `json:"method"`
|
||||
URL BatchEndpoint `json:"url"`
|
||||
}
|
||||
|
||||
func (r BatchChatCompletionRequest) MarshalBatchLineItem() []byte {
|
||||
marshal, _ := json.Marshal(r)
|
||||
return marshal
|
||||
}
|
||||
|
||||
type BatchCompletionRequest struct {
|
||||
CustomID string `json:"custom_id"`
|
||||
Body CompletionRequest `json:"body"`
|
||||
Method string `json:"method"`
|
||||
URL BatchEndpoint `json:"url"`
|
||||
}
|
||||
|
||||
func (r BatchCompletionRequest) MarshalBatchLineItem() []byte {
|
||||
marshal, _ := json.Marshal(r)
|
||||
return marshal
|
||||
}
|
||||
|
||||
type BatchEmbeddingRequest struct {
|
||||
CustomID string `json:"custom_id"`
|
||||
Body EmbeddingRequest `json:"body"`
|
||||
Method string `json:"method"`
|
||||
URL BatchEndpoint `json:"url"`
|
||||
}
|
||||
|
||||
func (r BatchEmbeddingRequest) MarshalBatchLineItem() []byte {
|
||||
marshal, _ := json.Marshal(r)
|
||||
return marshal
|
||||
}
|
||||
|
||||
type Batch struct {
|
||||
ID string `json:"id"`
|
||||
Object string `json:"object"`
|
||||
Endpoint BatchEndpoint `json:"endpoint"`
|
||||
Errors *struct {
|
||||
Object string `json:"object,omitempty"`
|
||||
Data []struct {
|
||||
Code string `json:"code,omitempty"`
|
||||
Message string `json:"message,omitempty"`
|
||||
Param *string `json:"param,omitempty"`
|
||||
Line *int `json:"line,omitempty"`
|
||||
} `json:"data"`
|
||||
} `json:"errors"`
|
||||
InputFileID string `json:"input_file_id"`
|
||||
CompletionWindow string `json:"completion_window"`
|
||||
Status string `json:"status"`
|
||||
OutputFileID *string `json:"output_file_id"`
|
||||
ErrorFileID *string `json:"error_file_id"`
|
||||
CreatedAt int `json:"created_at"`
|
||||
InProgressAt *int `json:"in_progress_at"`
|
||||
ExpiresAt *int `json:"expires_at"`
|
||||
FinalizingAt *int `json:"finalizing_at"`
|
||||
CompletedAt *int `json:"completed_at"`
|
||||
FailedAt *int `json:"failed_at"`
|
||||
ExpiredAt *int `json:"expired_at"`
|
||||
CancellingAt *int `json:"cancelling_at"`
|
||||
CancelledAt *int `json:"cancelled_at"`
|
||||
RequestCounts BatchRequestCounts `json:"request_counts"`
|
||||
Metadata map[string]any `json:"metadata"`
|
||||
}
|
||||
|
||||
type BatchRequestCounts struct {
|
||||
Total int `json:"total"`
|
||||
Completed int `json:"completed"`
|
||||
Failed int `json:"failed"`
|
||||
}
|
||||
|
||||
type CreateBatchRequest struct {
|
||||
InputFileID string `json:"input_file_id"`
|
||||
Endpoint BatchEndpoint `json:"endpoint"`
|
||||
CompletionWindow string `json:"completion_window"`
|
||||
Metadata map[string]any `json:"metadata"`
|
||||
}
|
||||
|
||||
type BatchResponse struct {
|
||||
httpHeader
|
||||
Batch
|
||||
}
|
||||
|
||||
// CreateBatch — API call to Create batch.
|
||||
func (c *Client) CreateBatch(
|
||||
ctx context.Context,
|
||||
request CreateBatchRequest,
|
||||
) (response BatchResponse, err error) {
|
||||
if request.CompletionWindow == "" {
|
||||
request.CompletionWindow = "24h"
|
||||
}
|
||||
|
||||
req, err := c.newRequest(ctx, http.MethodPost, c.fullURL(batchesSuffix), withBody(request))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = c.sendRequest(req, &response)
|
||||
return
|
||||
}
|
||||
|
||||
type UploadBatchFileRequest struct {
|
||||
FileName string
|
||||
Lines []BatchLineItem
|
||||
}
|
||||
|
||||
func (r *UploadBatchFileRequest) MarshalJSONL() []byte {
|
||||
buff := bytes.Buffer{}
|
||||
for i, line := range r.Lines {
|
||||
if i != 0 {
|
||||
buff.Write([]byte("\n"))
|
||||
}
|
||||
buff.Write(line.MarshalBatchLineItem())
|
||||
}
|
||||
return buff.Bytes()
|
||||
}
|
||||
|
||||
func (r *UploadBatchFileRequest) AddChatCompletion(customerID string, body ChatCompletionRequest) {
|
||||
r.Lines = append(r.Lines, BatchChatCompletionRequest{
|
||||
CustomID: customerID,
|
||||
Body: body,
|
||||
Method: "POST",
|
||||
URL: BatchEndpointChatCompletions,
|
||||
})
|
||||
}
|
||||
|
||||
func (r *UploadBatchFileRequest) AddCompletion(customerID string, body CompletionRequest) {
|
||||
r.Lines = append(r.Lines, BatchCompletionRequest{
|
||||
CustomID: customerID,
|
||||
Body: body,
|
||||
Method: "POST",
|
||||
URL: BatchEndpointCompletions,
|
||||
})
|
||||
}
|
||||
|
||||
func (r *UploadBatchFileRequest) AddEmbedding(customerID string, body EmbeddingRequest) {
|
||||
r.Lines = append(r.Lines, BatchEmbeddingRequest{
|
||||
CustomID: customerID,
|
||||
Body: body,
|
||||
Method: "POST",
|
||||
URL: BatchEndpointEmbeddings,
|
||||
})
|
||||
}
|
||||
|
||||
// UploadBatchFile — upload batch file.
|
||||
func (c *Client) UploadBatchFile(ctx context.Context, request UploadBatchFileRequest) (File, error) {
|
||||
if request.FileName == "" {
|
||||
request.FileName = "@batchinput.jsonl"
|
||||
}
|
||||
return c.CreateFileBytes(ctx, FileBytesRequest{
|
||||
Name: request.FileName,
|
||||
Bytes: request.MarshalJSONL(),
|
||||
Purpose: PurposeBatch,
|
||||
})
|
||||
}
|
||||
|
||||
type CreateBatchWithUploadFileRequest struct {
|
||||
Endpoint BatchEndpoint `json:"endpoint"`
|
||||
CompletionWindow string `json:"completion_window"`
|
||||
Metadata map[string]any `json:"metadata"`
|
||||
UploadBatchFileRequest
|
||||
}
|
||||
|
||||
// CreateBatchWithUploadFile — API call to Create batch with upload file.
|
||||
func (c *Client) CreateBatchWithUploadFile(
|
||||
ctx context.Context,
|
||||
request CreateBatchWithUploadFileRequest,
|
||||
) (response BatchResponse, err error) {
|
||||
var file File
|
||||
file, err = c.UploadBatchFile(ctx, UploadBatchFileRequest{
|
||||
FileName: request.FileName,
|
||||
Lines: request.Lines,
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return c.CreateBatch(ctx, CreateBatchRequest{
|
||||
InputFileID: file.ID,
|
||||
Endpoint: request.Endpoint,
|
||||
CompletionWindow: request.CompletionWindow,
|
||||
Metadata: request.Metadata,
|
||||
})
|
||||
}
|
||||
|
||||
// RetrieveBatch — API call to Retrieve batch.
|
||||
func (c *Client) RetrieveBatch(
|
||||
ctx context.Context,
|
||||
batchID string,
|
||||
) (response BatchResponse, err error) {
|
||||
urlSuffix := fmt.Sprintf("%s/%s", batchesSuffix, batchID)
|
||||
req, err := c.newRequest(ctx, http.MethodGet, c.fullURL(urlSuffix))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = c.sendRequest(req, &response)
|
||||
return
|
||||
}
|
||||
|
||||
// CancelBatch — API call to Cancel batch.
|
||||
func (c *Client) CancelBatch(
|
||||
ctx context.Context,
|
||||
batchID string,
|
||||
) (response BatchResponse, err error) {
|
||||
urlSuffix := fmt.Sprintf("%s/%s/cancel", batchesSuffix, batchID)
|
||||
req, err := c.newRequest(ctx, http.MethodPost, c.fullURL(urlSuffix))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = c.sendRequest(req, &response)
|
||||
return
|
||||
}
|
||||
|
||||
type ListBatchResponse struct {
|
||||
httpHeader
|
||||
Object string `json:"object"`
|
||||
Data []Batch `json:"data"`
|
||||
FirstID string `json:"first_id"`
|
||||
LastID string `json:"last_id"`
|
||||
HasMore bool `json:"has_more"`
|
||||
}
|
||||
|
||||
// ListBatch API call to List batch.
|
||||
func (c *Client) ListBatch(ctx context.Context, after *string, limit *int) (response ListBatchResponse, err error) {
|
||||
urlValues := url.Values{}
|
||||
if limit != nil {
|
||||
urlValues.Add("limit", fmt.Sprintf("%d", *limit))
|
||||
}
|
||||
if after != nil {
|
||||
urlValues.Add("after", *after)
|
||||
}
|
||||
encodedValues := ""
|
||||
if len(urlValues) > 0 {
|
||||
encodedValues = "?" + urlValues.Encode()
|
||||
}
|
||||
|
||||
urlSuffix := fmt.Sprintf("%s%s", batchesSuffix, encodedValues)
|
||||
req, err := c.newRequest(ctx, http.MethodGet, c.fullURL(urlSuffix))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = c.sendRequest(req, &response)
|
||||
return
|
||||
}
|
||||
498
vendor/github.com/sashabaranov/go-openai/chat.go
generated
vendored
Normal file
498
vendor/github.com/sashabaranov/go-openai/chat.go
generated
vendored
Normal file
@@ -0,0 +1,498 @@
|
||||
package openai
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"net/http"
|
||||
|
||||
"github.com/sashabaranov/go-openai/jsonschema"
|
||||
)
|
||||
|
||||
// Chat message role defined by the OpenAI API.
|
||||
const (
|
||||
ChatMessageRoleSystem = "system"
|
||||
ChatMessageRoleUser = "user"
|
||||
ChatMessageRoleAssistant = "assistant"
|
||||
ChatMessageRoleFunction = "function"
|
||||
ChatMessageRoleTool = "tool"
|
||||
ChatMessageRoleDeveloper = "developer"
|
||||
)
|
||||
|
||||
const chatCompletionsSuffix = "/chat/completions"
|
||||
|
||||
var (
|
||||
ErrChatCompletionInvalidModel = errors.New("this model is not supported with this method, please use CreateCompletion client method instead") //nolint:lll
|
||||
ErrChatCompletionStreamNotSupported = errors.New("streaming is not supported with this method, please use CreateChatCompletionStream") //nolint:lll
|
||||
ErrContentFieldsMisused = errors.New("can't use both Content and MultiContent properties simultaneously")
|
||||
)
|
||||
|
||||
type Hate struct {
|
||||
Filtered bool `json:"filtered"`
|
||||
Severity string `json:"severity,omitempty"`
|
||||
}
|
||||
type SelfHarm struct {
|
||||
Filtered bool `json:"filtered"`
|
||||
Severity string `json:"severity,omitempty"`
|
||||
}
|
||||
type Sexual struct {
|
||||
Filtered bool `json:"filtered"`
|
||||
Severity string `json:"severity,omitempty"`
|
||||
}
|
||||
type Violence struct {
|
||||
Filtered bool `json:"filtered"`
|
||||
Severity string `json:"severity,omitempty"`
|
||||
}
|
||||
|
||||
type JailBreak struct {
|
||||
Filtered bool `json:"filtered"`
|
||||
Detected bool `json:"detected"`
|
||||
}
|
||||
|
||||
type Profanity struct {
|
||||
Filtered bool `json:"filtered"`
|
||||
Detected bool `json:"detected"`
|
||||
}
|
||||
|
||||
type ContentFilterResults struct {
|
||||
Hate Hate `json:"hate,omitempty"`
|
||||
SelfHarm SelfHarm `json:"self_harm,omitempty"`
|
||||
Sexual Sexual `json:"sexual,omitempty"`
|
||||
Violence Violence `json:"violence,omitempty"`
|
||||
JailBreak JailBreak `json:"jailbreak,omitempty"`
|
||||
Profanity Profanity `json:"profanity,omitempty"`
|
||||
}
|
||||
|
||||
type PromptAnnotation struct {
|
||||
PromptIndex int `json:"prompt_index,omitempty"`
|
||||
ContentFilterResults ContentFilterResults `json:"content_filter_results,omitempty"`
|
||||
}
|
||||
|
||||
type ImageURLDetail string
|
||||
|
||||
const (
|
||||
ImageURLDetailHigh ImageURLDetail = "high"
|
||||
ImageURLDetailLow ImageURLDetail = "low"
|
||||
ImageURLDetailAuto ImageURLDetail = "auto"
|
||||
)
|
||||
|
||||
type ChatMessageImageURL struct {
|
||||
URL string `json:"url,omitempty"`
|
||||
Detail ImageURLDetail `json:"detail,omitempty"`
|
||||
}
|
||||
|
||||
type ChatMessagePartType string
|
||||
|
||||
const (
|
||||
ChatMessagePartTypeText ChatMessagePartType = "text"
|
||||
ChatMessagePartTypeImageURL ChatMessagePartType = "image_url"
|
||||
)
|
||||
|
||||
type ChatMessagePart struct {
|
||||
Type ChatMessagePartType `json:"type,omitempty"`
|
||||
Text string `json:"text,omitempty"`
|
||||
ImageURL *ChatMessageImageURL `json:"image_url,omitempty"`
|
||||
}
|
||||
|
||||
type ChatCompletionMessage struct {
|
||||
Role string `json:"role"`
|
||||
Content string `json:"content,omitempty"`
|
||||
Refusal string `json:"refusal,omitempty"`
|
||||
MultiContent []ChatMessagePart
|
||||
|
||||
// This property isn't in the official documentation, but it's in
|
||||
// the documentation for the official library for python:
|
||||
// - https://github.com/openai/openai-python/blob/main/chatml.md
|
||||
// - https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
|
||||
Name string `json:"name,omitempty"`
|
||||
|
||||
// This property is used for the "reasoning" feature supported by deepseek-reasoner
|
||||
// which is not in the official documentation.
|
||||
// the doc from deepseek:
|
||||
// - https://api-docs.deepseek.com/api/create-chat-completion#responses
|
||||
ReasoningContent string `json:"reasoning_content,omitempty"`
|
||||
|
||||
FunctionCall *FunctionCall `json:"function_call,omitempty"`
|
||||
|
||||
// For Role=assistant prompts this may be set to the tool calls generated by the model, such as function calls.
|
||||
ToolCalls []ToolCall `json:"tool_calls,omitempty"`
|
||||
|
||||
// For Role=tool prompts this should be set to the ID given in the assistant's prior request to call a tool.
|
||||
ToolCallID string `json:"tool_call_id,omitempty"`
|
||||
}
|
||||
|
||||
func (m ChatCompletionMessage) MarshalJSON() ([]byte, error) {
|
||||
if m.Content != "" && m.MultiContent != nil {
|
||||
return nil, ErrContentFieldsMisused
|
||||
}
|
||||
if len(m.MultiContent) > 0 {
|
||||
msg := struct {
|
||||
Role string `json:"role"`
|
||||
Content string `json:"-"`
|
||||
Refusal string `json:"refusal,omitempty"`
|
||||
MultiContent []ChatMessagePart `json:"content,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
ReasoningContent string `json:"reasoning_content,omitempty"`
|
||||
FunctionCall *FunctionCall `json:"function_call,omitempty"`
|
||||
ToolCalls []ToolCall `json:"tool_calls,omitempty"`
|
||||
ToolCallID string `json:"tool_call_id,omitempty"`
|
||||
}(m)
|
||||
return json.Marshal(msg)
|
||||
}
|
||||
|
||||
msg := struct {
|
||||
Role string `json:"role"`
|
||||
Content string `json:"content,omitempty"`
|
||||
Refusal string `json:"refusal,omitempty"`
|
||||
MultiContent []ChatMessagePart `json:"-"`
|
||||
Name string `json:"name,omitempty"`
|
||||
ReasoningContent string `json:"reasoning_content,omitempty"`
|
||||
FunctionCall *FunctionCall `json:"function_call,omitempty"`
|
||||
ToolCalls []ToolCall `json:"tool_calls,omitempty"`
|
||||
ToolCallID string `json:"tool_call_id,omitempty"`
|
||||
}(m)
|
||||
return json.Marshal(msg)
|
||||
}
|
||||
|
||||
func (m *ChatCompletionMessage) UnmarshalJSON(bs []byte) error {
|
||||
msg := struct {
|
||||
Role string `json:"role"`
|
||||
Content string `json:"content"`
|
||||
Refusal string `json:"refusal,omitempty"`
|
||||
MultiContent []ChatMessagePart
|
||||
Name string `json:"name,omitempty"`
|
||||
ReasoningContent string `json:"reasoning_content,omitempty"`
|
||||
FunctionCall *FunctionCall `json:"function_call,omitempty"`
|
||||
ToolCalls []ToolCall `json:"tool_calls,omitempty"`
|
||||
ToolCallID string `json:"tool_call_id,omitempty"`
|
||||
}{}
|
||||
|
||||
if err := json.Unmarshal(bs, &msg); err == nil {
|
||||
*m = ChatCompletionMessage(msg)
|
||||
return nil
|
||||
}
|
||||
multiMsg := struct {
|
||||
Role string `json:"role"`
|
||||
Content string
|
||||
Refusal string `json:"refusal,omitempty"`
|
||||
MultiContent []ChatMessagePart `json:"content"`
|
||||
Name string `json:"name,omitempty"`
|
||||
ReasoningContent string `json:"reasoning_content,omitempty"`
|
||||
FunctionCall *FunctionCall `json:"function_call,omitempty"`
|
||||
ToolCalls []ToolCall `json:"tool_calls,omitempty"`
|
||||
ToolCallID string `json:"tool_call_id,omitempty"`
|
||||
}{}
|
||||
if err := json.Unmarshal(bs, &multiMsg); err != nil {
|
||||
return err
|
||||
}
|
||||
*m = ChatCompletionMessage(multiMsg)
|
||||
return nil
|
||||
}
|
||||
|
||||
type ToolCall struct {
|
||||
// Index is not nil only in chat completion chunk object
|
||||
Index *int `json:"index,omitempty"`
|
||||
ID string `json:"id,omitempty"`
|
||||
Type ToolType `json:"type"`
|
||||
Function FunctionCall `json:"function"`
|
||||
}
|
||||
|
||||
type FunctionCall struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
// call function with arguments in JSON format
|
||||
Arguments string `json:"arguments,omitempty"`
|
||||
}
|
||||
|
||||
type ChatCompletionResponseFormatType string
|
||||
|
||||
const (
|
||||
ChatCompletionResponseFormatTypeJSONObject ChatCompletionResponseFormatType = "json_object"
|
||||
ChatCompletionResponseFormatTypeJSONSchema ChatCompletionResponseFormatType = "json_schema"
|
||||
ChatCompletionResponseFormatTypeText ChatCompletionResponseFormatType = "text"
|
||||
)
|
||||
|
||||
type ChatCompletionResponseFormat struct {
|
||||
Type ChatCompletionResponseFormatType `json:"type,omitempty"`
|
||||
JSONSchema *ChatCompletionResponseFormatJSONSchema `json:"json_schema,omitempty"`
|
||||
}
|
||||
|
||||
type ChatCompletionResponseFormatJSONSchema struct {
|
||||
Name string `json:"name"`
|
||||
Description string `json:"description,omitempty"`
|
||||
Schema json.Marshaler `json:"schema"`
|
||||
Strict bool `json:"strict"`
|
||||
}
|
||||
|
||||
func (r *ChatCompletionResponseFormatJSONSchema) UnmarshalJSON(data []byte) error {
|
||||
type rawJSONSchema struct {
|
||||
Name string `json:"name"`
|
||||
Description string `json:"description,omitempty"`
|
||||
Schema json.RawMessage `json:"schema"`
|
||||
Strict bool `json:"strict"`
|
||||
}
|
||||
var raw rawJSONSchema
|
||||
if err := json.Unmarshal(data, &raw); err != nil {
|
||||
return err
|
||||
}
|
||||
r.Name = raw.Name
|
||||
r.Description = raw.Description
|
||||
r.Strict = raw.Strict
|
||||
if len(raw.Schema) > 0 && string(raw.Schema) != "null" {
|
||||
var d jsonschema.Definition
|
||||
err := json.Unmarshal(raw.Schema, &d)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.Schema = &d
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ChatCompletionRequestExtensions contains third-party OpenAI API extensions
|
||||
// (e.g., vendor-specific implementations like vLLM).
|
||||
type ChatCompletionRequestExtensions struct {
|
||||
// GuidedChoice is a vLLM-specific extension that restricts the model's output
|
||||
// to one of the predefined string choices provided in this field. This feature
|
||||
// is used to constrain the model's responses to a controlled set of options,
|
||||
// ensuring predictable and consistent outputs in scenarios where specific
|
||||
// choices are required.
|
||||
GuidedChoice []string `json:"guided_choice,omitempty"`
|
||||
}
|
||||
|
||||
// ChatCompletionRequest represents a request structure for chat completion API.
|
||||
type ChatCompletionRequest struct {
|
||||
Model string `json:"model"`
|
||||
Messages []ChatCompletionMessage `json:"messages"`
|
||||
// MaxTokens The maximum number of tokens that can be generated in the chat completion.
|
||||
// This value can be used to control costs for text generated via API.
|
||||
// Deprecated: use MaxCompletionTokens. Not compatible with o1-series models.
|
||||
// refs: https://platform.openai.com/docs/api-reference/chat/create#chat-create-max_tokens
|
||||
MaxTokens int `json:"max_tokens,omitempty"`
|
||||
// MaxCompletionTokens An upper bound for the number of tokens that can be generated for a completion,
|
||||
// including visible output tokens and reasoning tokens https://platform.openai.com/docs/guides/reasoning
|
||||
MaxCompletionTokens int `json:"max_completion_tokens,omitempty"`
|
||||
Temperature float32 `json:"temperature,omitempty"`
|
||||
TopP float32 `json:"top_p,omitempty"`
|
||||
N int `json:"n,omitempty"`
|
||||
Stream bool `json:"stream,omitempty"`
|
||||
Stop []string `json:"stop,omitempty"`
|
||||
PresencePenalty float32 `json:"presence_penalty,omitempty"`
|
||||
ResponseFormat *ChatCompletionResponseFormat `json:"response_format,omitempty"`
|
||||
Seed *int `json:"seed,omitempty"`
|
||||
FrequencyPenalty float32 `json:"frequency_penalty,omitempty"`
|
||||
// LogitBias is must be a token id string (specified by their token ID in the tokenizer), not a word string.
|
||||
// incorrect: `"logit_bias":{"You": 6}`, correct: `"logit_bias":{"1639": 6}`
|
||||
// refs: https://platform.openai.com/docs/api-reference/chat/create#chat/create-logit_bias
|
||||
LogitBias map[string]int `json:"logit_bias,omitempty"`
|
||||
// LogProbs indicates whether to return log probabilities of the output tokens or not.
|
||||
// If true, returns the log probabilities of each output token returned in the content of message.
|
||||
// This option is currently not available on the gpt-4-vision-preview model.
|
||||
LogProbs bool `json:"logprobs,omitempty"`
|
||||
// TopLogProbs is an integer between 0 and 5 specifying the number of most likely tokens to return at each
|
||||
// token position, each with an associated log probability.
|
||||
// logprobs must be set to true if this parameter is used.
|
||||
TopLogProbs int `json:"top_logprobs,omitempty"`
|
||||
User string `json:"user,omitempty"`
|
||||
// Deprecated: use Tools instead.
|
||||
Functions []FunctionDefinition `json:"functions,omitempty"`
|
||||
// Deprecated: use ToolChoice instead.
|
||||
FunctionCall any `json:"function_call,omitempty"`
|
||||
Tools []Tool `json:"tools,omitempty"`
|
||||
// This can be either a string or an ToolChoice object.
|
||||
ToolChoice any `json:"tool_choice,omitempty"`
|
||||
// Options for streaming response. Only set this when you set stream: true.
|
||||
StreamOptions *StreamOptions `json:"stream_options,omitempty"`
|
||||
// Disable the default behavior of parallel tool calls by setting it: false.
|
||||
ParallelToolCalls any `json:"parallel_tool_calls,omitempty"`
|
||||
// Store can be set to true to store the output of this completion request for use in distillations and evals.
|
||||
// https://platform.openai.com/docs/api-reference/chat/create#chat-create-store
|
||||
Store bool `json:"store,omitempty"`
|
||||
// Controls effort on reasoning for reasoning models. It can be set to "low", "medium", or "high".
|
||||
ReasoningEffort string `json:"reasoning_effort,omitempty"`
|
||||
// Metadata to store with the completion.
|
||||
Metadata map[string]string `json:"metadata,omitempty"`
|
||||
// Configuration for a predicted output.
|
||||
Prediction *Prediction `json:"prediction,omitempty"`
|
||||
// ChatTemplateKwargs provides a way to add non-standard parameters to the request body.
|
||||
// Additional kwargs to pass to the template renderer. Will be accessible by the chat template.
|
||||
// Such as think mode for qwen3. "chat_template_kwargs": {"enable_thinking": false}
|
||||
// https://qwen.readthedocs.io/en/latest/deployment/vllm.html#thinking-non-thinking-modes
|
||||
ChatTemplateKwargs map[string]any `json:"chat_template_kwargs,omitempty"`
|
||||
// Specifies the latency tier to use for processing the request.
|
||||
ServiceTier ServiceTier `json:"service_tier,omitempty"`
|
||||
// Verbosity determines how many output tokens are generated. Lowering the number of
|
||||
// tokens reduces overall latency. It can be set to "low", "medium", or "high".
|
||||
// Note: This field is only confirmed to work with gpt-5, gpt-5-mini and gpt-5-nano.
|
||||
// Also, it is not in the API reference of chat completion at the time of writing,
|
||||
// though it is supported by the API.
|
||||
Verbosity string `json:"verbosity,omitempty"`
|
||||
// A stable identifier used to help detect users of your application that may be violating OpenAI's usage policies.
|
||||
// The IDs should be a string that uniquely identifies each user.
|
||||
// We recommend hashing their username or email address, in order to avoid sending us any identifying information.
|
||||
// https://platform.openai.com/docs/api-reference/chat/create#chat_create-safety_identifier
|
||||
SafetyIdentifier string `json:"safety_identifier,omitempty"`
|
||||
// Embedded struct for non-OpenAI extensions
|
||||
ChatCompletionRequestExtensions
|
||||
}
|
||||
|
||||
type StreamOptions struct {
|
||||
// If set, an additional chunk will be streamed before the data: [DONE] message.
|
||||
// The usage field on this chunk shows the token usage statistics for the entire request,
|
||||
// and the choices field will always be an empty array.
|
||||
// All other chunks will also include a usage field, but with a null value.
|
||||
IncludeUsage bool `json:"include_usage,omitempty"`
|
||||
}
|
||||
|
||||
type ToolType string
|
||||
|
||||
const (
|
||||
ToolTypeFunction ToolType = "function"
|
||||
)
|
||||
|
||||
type Tool struct {
|
||||
Type ToolType `json:"type"`
|
||||
Function *FunctionDefinition `json:"function,omitempty"`
|
||||
}
|
||||
|
||||
type ToolChoice struct {
|
||||
Type ToolType `json:"type"`
|
||||
Function ToolFunction `json:"function,omitempty"`
|
||||
}
|
||||
|
||||
type ToolFunction struct {
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
type FunctionDefinition struct {
|
||||
Name string `json:"name"`
|
||||
Description string `json:"description,omitempty"`
|
||||
Strict bool `json:"strict,omitempty"`
|
||||
// Parameters is an object describing the function.
|
||||
// You can pass json.RawMessage to describe the schema,
|
||||
// or you can pass in a struct which serializes to the proper JSON schema.
|
||||
// The jsonschema package is provided for convenience, but you should
|
||||
// consider another specialized library if you require more complex schemas.
|
||||
Parameters any `json:"parameters"`
|
||||
}
|
||||
|
||||
// Deprecated: use FunctionDefinition instead.
|
||||
type FunctionDefine = FunctionDefinition
|
||||
|
||||
type TopLogProbs struct {
|
||||
Token string `json:"token"`
|
||||
LogProb float64 `json:"logprob"`
|
||||
Bytes []byte `json:"bytes,omitempty"`
|
||||
}
|
||||
|
||||
// LogProb represents the probability information for a token.
|
||||
type LogProb struct {
|
||||
Token string `json:"token"`
|
||||
LogProb float64 `json:"logprob"`
|
||||
Bytes []byte `json:"bytes,omitempty"` // Omitting the field if it is null
|
||||
// TopLogProbs is a list of the most likely tokens and their log probability, at this token position.
|
||||
// In rare cases, there may be fewer than the number of requested top_logprobs returned.
|
||||
TopLogProbs []TopLogProbs `json:"top_logprobs"`
|
||||
}
|
||||
|
||||
// LogProbs is the top-level structure containing the log probability information.
|
||||
type LogProbs struct {
|
||||
// Content is a list of message content tokens with log probability information.
|
||||
Content []LogProb `json:"content"`
|
||||
}
|
||||
|
||||
type Prediction struct {
|
||||
Content string `json:"content"`
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
type FinishReason string
|
||||
|
||||
const (
|
||||
FinishReasonStop FinishReason = "stop"
|
||||
FinishReasonLength FinishReason = "length"
|
||||
FinishReasonFunctionCall FinishReason = "function_call"
|
||||
FinishReasonToolCalls FinishReason = "tool_calls"
|
||||
FinishReasonContentFilter FinishReason = "content_filter"
|
||||
FinishReasonNull FinishReason = "null"
|
||||
)
|
||||
|
||||
type ServiceTier string
|
||||
|
||||
const (
|
||||
ServiceTierAuto ServiceTier = "auto"
|
||||
ServiceTierDefault ServiceTier = "default"
|
||||
ServiceTierFlex ServiceTier = "flex"
|
||||
ServiceTierPriority ServiceTier = "priority"
|
||||
)
|
||||
|
||||
func (r FinishReason) MarshalJSON() ([]byte, error) {
|
||||
if r == FinishReasonNull || r == "" {
|
||||
return []byte("null"), nil
|
||||
}
|
||||
return []byte(`"` + string(r) + `"`), nil // best effort to not break future API changes
|
||||
}
|
||||
|
||||
type ChatCompletionChoice struct {
|
||||
Index int `json:"index"`
|
||||
Message ChatCompletionMessage `json:"message"`
|
||||
// FinishReason
|
||||
// stop: API returned complete message,
|
||||
// or a message terminated by one of the stop sequences provided via the stop parameter
|
||||
// length: Incomplete model output due to max_tokens parameter or token limit
|
||||
// function_call: The model decided to call a function
|
||||
// content_filter: Omitted content due to a flag from our content filters
|
||||
// null: API response still in progress or incomplete
|
||||
FinishReason FinishReason `json:"finish_reason"`
|
||||
LogProbs *LogProbs `json:"logprobs,omitempty"`
|
||||
ContentFilterResults ContentFilterResults `json:"content_filter_results,omitempty"`
|
||||
}
|
||||
|
||||
// ChatCompletionResponse represents a response structure for chat completion API.
|
||||
type ChatCompletionResponse struct {
|
||||
ID string `json:"id"`
|
||||
Object string `json:"object"`
|
||||
Created int64 `json:"created"`
|
||||
Model string `json:"model"`
|
||||
Choices []ChatCompletionChoice `json:"choices"`
|
||||
Usage Usage `json:"usage"`
|
||||
SystemFingerprint string `json:"system_fingerprint"`
|
||||
PromptFilterResults []PromptFilterResult `json:"prompt_filter_results,omitempty"`
|
||||
ServiceTier ServiceTier `json:"service_tier,omitempty"`
|
||||
|
||||
httpHeader
|
||||
}
|
||||
|
||||
// CreateChatCompletion — API call to Create a completion for the chat message.
|
||||
func (c *Client) CreateChatCompletion(
|
||||
ctx context.Context,
|
||||
request ChatCompletionRequest,
|
||||
) (response ChatCompletionResponse, err error) {
|
||||
if request.Stream {
|
||||
err = ErrChatCompletionStreamNotSupported
|
||||
return
|
||||
}
|
||||
|
||||
urlSuffix := chatCompletionsSuffix
|
||||
if !checkEndpointSupportsModel(urlSuffix, request.Model) {
|
||||
err = ErrChatCompletionInvalidModel
|
||||
return
|
||||
}
|
||||
|
||||
reasoningValidator := NewReasoningValidator()
|
||||
if err = reasoningValidator.Validate(request); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
req, err := c.newRequest(
|
||||
ctx,
|
||||
http.MethodPost,
|
||||
c.fullURL(urlSuffix, withModel(request.Model)),
|
||||
withBody(request),
|
||||
)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = c.sendRequest(req, &response)
|
||||
return
|
||||
}
|
||||
112
vendor/github.com/sashabaranov/go-openai/chat_stream.go
generated
vendored
Normal file
112
vendor/github.com/sashabaranov/go-openai/chat_stream.go
generated
vendored
Normal file
@@ -0,0 +1,112 @@
|
||||
package openai
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
type ChatCompletionStreamChoiceDelta struct {
|
||||
Content string `json:"content,omitempty"`
|
||||
Role string `json:"role,omitempty"`
|
||||
FunctionCall *FunctionCall `json:"function_call,omitempty"`
|
||||
ToolCalls []ToolCall `json:"tool_calls,omitempty"`
|
||||
Refusal string `json:"refusal,omitempty"`
|
||||
|
||||
// This property is used for the "reasoning" feature supported by deepseek-reasoner
|
||||
// which is not in the official documentation.
|
||||
// the doc from deepseek:
|
||||
// - https://api-docs.deepseek.com/api/create-chat-completion#responses
|
||||
ReasoningContent string `json:"reasoning_content,omitempty"`
|
||||
}
|
||||
|
||||
type ChatCompletionStreamChoiceLogprobs struct {
|
||||
Content []ChatCompletionTokenLogprob `json:"content,omitempty"`
|
||||
Refusal []ChatCompletionTokenLogprob `json:"refusal,omitempty"`
|
||||
}
|
||||
|
||||
type ChatCompletionTokenLogprob struct {
|
||||
Token string `json:"token"`
|
||||
Bytes []int64 `json:"bytes,omitempty"`
|
||||
Logprob float64 `json:"logprob,omitempty"`
|
||||
TopLogprobs []ChatCompletionTokenLogprobTopLogprob `json:"top_logprobs"`
|
||||
}
|
||||
|
||||
type ChatCompletionTokenLogprobTopLogprob struct {
|
||||
Token string `json:"token"`
|
||||
Bytes []int64 `json:"bytes"`
|
||||
Logprob float64 `json:"logprob"`
|
||||
}
|
||||
|
||||
type ChatCompletionStreamChoice struct {
|
||||
Index int `json:"index"`
|
||||
Delta ChatCompletionStreamChoiceDelta `json:"delta"`
|
||||
Logprobs *ChatCompletionStreamChoiceLogprobs `json:"logprobs,omitempty"`
|
||||
FinishReason FinishReason `json:"finish_reason"`
|
||||
ContentFilterResults ContentFilterResults `json:"content_filter_results,omitempty"`
|
||||
}
|
||||
|
||||
type PromptFilterResult struct {
|
||||
Index int `json:"index"`
|
||||
ContentFilterResults ContentFilterResults `json:"content_filter_results,omitempty"`
|
||||
}
|
||||
|
||||
type ChatCompletionStreamResponse struct {
|
||||
ID string `json:"id"`
|
||||
Object string `json:"object"`
|
||||
Created int64 `json:"created"`
|
||||
Model string `json:"model"`
|
||||
Choices []ChatCompletionStreamChoice `json:"choices"`
|
||||
SystemFingerprint string `json:"system_fingerprint"`
|
||||
PromptAnnotations []PromptAnnotation `json:"prompt_annotations,omitempty"`
|
||||
PromptFilterResults []PromptFilterResult `json:"prompt_filter_results,omitempty"`
|
||||
// An optional field that will only be present when you set stream_options: {"include_usage": true} in your request.
|
||||
// When present, it contains a null value except for the last chunk which contains the token usage statistics
|
||||
// for the entire request.
|
||||
Usage *Usage `json:"usage,omitempty"`
|
||||
}
|
||||
|
||||
// ChatCompletionStream
|
||||
// Note: Perhaps it is more elegant to abstract Stream using generics.
|
||||
type ChatCompletionStream struct {
|
||||
*streamReader[ChatCompletionStreamResponse]
|
||||
}
|
||||
|
||||
// CreateChatCompletionStream — API call to create a chat completion w/ streaming
|
||||
// support. It sets whether to stream back partial progress. If set, tokens will be
|
||||
// sent as data-only server-sent events as they become available, with the
|
||||
// stream terminated by a data: [DONE] message.
|
||||
func (c *Client) CreateChatCompletionStream(
|
||||
ctx context.Context,
|
||||
request ChatCompletionRequest,
|
||||
) (stream *ChatCompletionStream, err error) {
|
||||
urlSuffix := chatCompletionsSuffix
|
||||
if !checkEndpointSupportsModel(urlSuffix, request.Model) {
|
||||
err = ErrChatCompletionInvalidModel
|
||||
return
|
||||
}
|
||||
|
||||
request.Stream = true
|
||||
reasoningValidator := NewReasoningValidator()
|
||||
if err = reasoningValidator.Validate(request); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
req, err := c.newRequest(
|
||||
ctx,
|
||||
http.MethodPost,
|
||||
c.fullURL(urlSuffix, withModel(request.Model)),
|
||||
withBody(request),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := sendRequestStream[ChatCompletionStreamResponse](c, req)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
stream = &ChatCompletionStream{
|
||||
streamReader: resp,
|
||||
}
|
||||
return
|
||||
}
|
||||
341
vendor/github.com/sashabaranov/go-openai/client.go
generated
vendored
Normal file
341
vendor/github.com/sashabaranov/go-openai/client.go
generated
vendored
Normal file
@@ -0,0 +1,341 @@
|
||||
package openai
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
utils "github.com/sashabaranov/go-openai/internal"
|
||||
)
|
||||
|
||||
// Client is OpenAI GPT-3 API client.
|
||||
type Client struct {
|
||||
config ClientConfig
|
||||
|
||||
requestBuilder utils.RequestBuilder
|
||||
createFormBuilder func(io.Writer) utils.FormBuilder
|
||||
}
|
||||
|
||||
type Response interface {
|
||||
SetHeader(http.Header)
|
||||
}
|
||||
|
||||
type httpHeader http.Header
|
||||
|
||||
func (h *httpHeader) SetHeader(header http.Header) {
|
||||
*h = httpHeader(header)
|
||||
}
|
||||
|
||||
func (h *httpHeader) Header() http.Header {
|
||||
return http.Header(*h)
|
||||
}
|
||||
|
||||
func (h *httpHeader) GetRateLimitHeaders() RateLimitHeaders {
|
||||
return newRateLimitHeaders(h.Header())
|
||||
}
|
||||
|
||||
type RawResponse struct {
|
||||
io.ReadCloser
|
||||
|
||||
httpHeader
|
||||
}
|
||||
|
||||
// NewClient creates new OpenAI API client.
|
||||
func NewClient(authToken string) *Client {
|
||||
config := DefaultConfig(authToken)
|
||||
return NewClientWithConfig(config)
|
||||
}
|
||||
|
||||
// NewClientWithConfig creates new OpenAI API client for specified config.
|
||||
func NewClientWithConfig(config ClientConfig) *Client {
|
||||
return &Client{
|
||||
config: config,
|
||||
requestBuilder: utils.NewRequestBuilder(),
|
||||
createFormBuilder: func(body io.Writer) utils.FormBuilder {
|
||||
return utils.NewFormBuilder(body)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// NewOrgClient creates new OpenAI API client for specified Organization ID.
|
||||
//
|
||||
// Deprecated: Please use NewClientWithConfig.
|
||||
func NewOrgClient(authToken, org string) *Client {
|
||||
config := DefaultConfig(authToken)
|
||||
config.OrgID = org
|
||||
return NewClientWithConfig(config)
|
||||
}
|
||||
|
||||
type requestOptions struct {
|
||||
body any
|
||||
header http.Header
|
||||
}
|
||||
|
||||
type requestOption func(*requestOptions)
|
||||
|
||||
func withBody(body any) requestOption {
|
||||
return func(args *requestOptions) {
|
||||
args.body = body
|
||||
}
|
||||
}
|
||||
|
||||
func withExtraBody(extraBody map[string]any) requestOption {
|
||||
return func(args *requestOptions) {
|
||||
// Assert that args.body is a map[string]any.
|
||||
bodyMap, ok := args.body.(map[string]any)
|
||||
if ok {
|
||||
// If it's a map[string]any then only add extraBody
|
||||
// fields to args.body otherwise keep only fields in request struct.
|
||||
for key, value := range extraBody {
|
||||
bodyMap[key] = value
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func withContentType(contentType string) requestOption {
|
||||
return func(args *requestOptions) {
|
||||
args.header.Set("Content-Type", contentType)
|
||||
}
|
||||
}
|
||||
|
||||
func withBetaAssistantVersion(version string) requestOption {
|
||||
return func(args *requestOptions) {
|
||||
args.header.Set("OpenAI-Beta", fmt.Sprintf("assistants=%s", version))
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Client) newRequest(ctx context.Context, method, url string, setters ...requestOption) (*http.Request, error) {
|
||||
// Default Options
|
||||
args := &requestOptions{
|
||||
body: nil,
|
||||
header: make(http.Header),
|
||||
}
|
||||
for _, setter := range setters {
|
||||
setter(args)
|
||||
}
|
||||
req, err := c.requestBuilder.Build(ctx, method, url, args.body, args.header)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.setCommonHeaders(req)
|
||||
return req, nil
|
||||
}
|
||||
|
||||
func (c *Client) sendRequest(req *http.Request, v Response) error {
|
||||
req.Header.Set("Accept", "application/json")
|
||||
|
||||
// Check whether Content-Type is already set, Upload Files API requires
|
||||
// Content-Type == multipart/form-data
|
||||
contentType := req.Header.Get("Content-Type")
|
||||
if contentType == "" {
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
}
|
||||
|
||||
res, err := c.config.HTTPClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer res.Body.Close()
|
||||
|
||||
if v != nil {
|
||||
v.SetHeader(res.Header)
|
||||
}
|
||||
|
||||
if isFailureStatusCode(res) {
|
||||
return c.handleErrorResp(res)
|
||||
}
|
||||
|
||||
return decodeResponse(res.Body, v)
|
||||
}
|
||||
|
||||
func (c *Client) sendRequestRaw(req *http.Request) (response RawResponse, err error) {
|
||||
resp, err := c.config.HTTPClient.Do(req) //nolint:bodyclose // body should be closed by outer function
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if isFailureStatusCode(resp) {
|
||||
err = c.handleErrorResp(resp)
|
||||
return
|
||||
}
|
||||
|
||||
response.SetHeader(resp.Header)
|
||||
response.ReadCloser = resp.Body
|
||||
return
|
||||
}
|
||||
|
||||
func sendRequestStream[T streamable](client *Client, req *http.Request) (*streamReader[T], error) {
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("Accept", "text/event-stream")
|
||||
req.Header.Set("Cache-Control", "no-cache")
|
||||
req.Header.Set("Connection", "keep-alive")
|
||||
|
||||
resp, err := client.config.HTTPClient.Do(req) //nolint:bodyclose // body is closed in stream.Close()
|
||||
if err != nil {
|
||||
return new(streamReader[T]), err
|
||||
}
|
||||
if isFailureStatusCode(resp) {
|
||||
return new(streamReader[T]), client.handleErrorResp(resp)
|
||||
}
|
||||
return &streamReader[T]{
|
||||
emptyMessagesLimit: client.config.EmptyMessagesLimit,
|
||||
reader: bufio.NewReader(resp.Body),
|
||||
response: resp,
|
||||
errAccumulator: utils.NewErrorAccumulator(),
|
||||
unmarshaler: &utils.JSONUnmarshaler{},
|
||||
httpHeader: httpHeader(resp.Header),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *Client) setCommonHeaders(req *http.Request) {
|
||||
// https://learn.microsoft.com/en-us/azure/cognitive-services/openai/reference#authentication
|
||||
switch c.config.APIType {
|
||||
case APITypeAzure, APITypeCloudflareAzure:
|
||||
// Azure API Key authentication
|
||||
req.Header.Set(AzureAPIKeyHeader, c.config.authToken)
|
||||
case APITypeAnthropic:
|
||||
// https://docs.anthropic.com/en/api/versioning
|
||||
req.Header.Set("anthropic-version", c.config.APIVersion)
|
||||
case APITypeOpenAI, APITypeAzureAD:
|
||||
fallthrough
|
||||
default:
|
||||
if c.config.authToken != "" {
|
||||
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", c.config.authToken))
|
||||
}
|
||||
}
|
||||
|
||||
if c.config.OrgID != "" {
|
||||
req.Header.Set("OpenAI-Organization", c.config.OrgID)
|
||||
}
|
||||
}
|
||||
|
||||
func isFailureStatusCode(resp *http.Response) bool {
|
||||
return resp.StatusCode < http.StatusOK || resp.StatusCode >= http.StatusBadRequest
|
||||
}
|
||||
|
||||
func decodeResponse(body io.Reader, v any) error {
|
||||
if v == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
switch o := v.(type) {
|
||||
case *string:
|
||||
return decodeString(body, o)
|
||||
case *audioTextResponse:
|
||||
return decodeString(body, &o.Text)
|
||||
default:
|
||||
return json.NewDecoder(body).Decode(v)
|
||||
}
|
||||
}
|
||||
|
||||
func decodeString(body io.Reader, output *string) error {
|
||||
b, err := io.ReadAll(body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*output = string(b)
|
||||
return nil
|
||||
}
|
||||
|
||||
type fullURLOptions struct {
|
||||
model string
|
||||
}
|
||||
|
||||
type fullURLOption func(*fullURLOptions)
|
||||
|
||||
func withModel(model string) fullURLOption {
|
||||
return func(args *fullURLOptions) {
|
||||
args.model = model
|
||||
}
|
||||
}
|
||||
|
||||
var azureDeploymentsEndpoints = []string{
|
||||
"/completions",
|
||||
"/embeddings",
|
||||
"/chat/completions",
|
||||
"/audio/transcriptions",
|
||||
"/audio/translations",
|
||||
"/audio/speech",
|
||||
"/images/generations",
|
||||
}
|
||||
|
||||
// fullURL returns full URL for request.
|
||||
func (c *Client) fullURL(suffix string, setters ...fullURLOption) string {
|
||||
baseURL := strings.TrimRight(c.config.BaseURL, "/")
|
||||
args := fullURLOptions{}
|
||||
for _, setter := range setters {
|
||||
setter(&args)
|
||||
}
|
||||
|
||||
if c.config.APIType == APITypeAzure || c.config.APIType == APITypeAzureAD {
|
||||
baseURL = c.baseURLWithAzureDeployment(baseURL, suffix, args.model)
|
||||
}
|
||||
|
||||
if c.config.APIVersion != "" {
|
||||
suffix = c.suffixWithAPIVersion(suffix)
|
||||
}
|
||||
return fmt.Sprintf("%s%s", baseURL, suffix)
|
||||
}
|
||||
|
||||
func (c *Client) suffixWithAPIVersion(suffix string) string {
|
||||
parsedSuffix, err := url.Parse(suffix)
|
||||
if err != nil {
|
||||
panic("failed to parse url suffix")
|
||||
}
|
||||
query := parsedSuffix.Query()
|
||||
query.Add("api-version", c.config.APIVersion)
|
||||
return fmt.Sprintf("%s?%s", parsedSuffix.Path, query.Encode())
|
||||
}
|
||||
|
||||
func (c *Client) baseURLWithAzureDeployment(baseURL, suffix, model string) (newBaseURL string) {
|
||||
baseURL = fmt.Sprintf("%s/%s", strings.TrimRight(baseURL, "/"), azureAPIPrefix)
|
||||
if containsSubstr(azureDeploymentsEndpoints, suffix) {
|
||||
azureDeploymentName := c.config.GetAzureDeploymentByModel(model)
|
||||
if azureDeploymentName == "" {
|
||||
azureDeploymentName = "UNKNOWN"
|
||||
}
|
||||
baseURL = fmt.Sprintf("%s/%s/%s", baseURL, azureDeploymentsPrefix, azureDeploymentName)
|
||||
}
|
||||
return baseURL
|
||||
}
|
||||
|
||||
func (c *Client) handleErrorResp(resp *http.Response) error {
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error, reading response body: %w", err)
|
||||
}
|
||||
var errRes ErrorResponse
|
||||
err = json.Unmarshal(body, &errRes)
|
||||
if err != nil || errRes.Error == nil {
|
||||
reqErr := &RequestError{
|
||||
HTTPStatus: resp.Status,
|
||||
HTTPStatusCode: resp.StatusCode,
|
||||
Err: err,
|
||||
Body: body,
|
||||
}
|
||||
if errRes.Error != nil {
|
||||
reqErr.Err = errRes.Error
|
||||
}
|
||||
return reqErr
|
||||
}
|
||||
|
||||
errRes.Error.HTTPStatus = resp.Status
|
||||
errRes.Error.HTTPStatusCode = resp.StatusCode
|
||||
return errRes.Error
|
||||
}
|
||||
|
||||
func containsSubstr(s []string, e string) bool {
|
||||
for _, v := range s {
|
||||
if strings.Contains(e, v) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
26
vendor/github.com/sashabaranov/go-openai/common.go
generated
vendored
Normal file
26
vendor/github.com/sashabaranov/go-openai/common.go
generated
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
package openai
|
||||
|
||||
// common.go defines common types used throughout the OpenAI API.
|
||||
|
||||
// Usage Represents the total token usage per request to OpenAI.
|
||||
type Usage struct {
|
||||
PromptTokens int `json:"prompt_tokens"`
|
||||
CompletionTokens int `json:"completion_tokens"`
|
||||
TotalTokens int `json:"total_tokens"`
|
||||
PromptTokensDetails *PromptTokensDetails `json:"prompt_tokens_details"`
|
||||
CompletionTokensDetails *CompletionTokensDetails `json:"completion_tokens_details"`
|
||||
}
|
||||
|
||||
// CompletionTokensDetails Breakdown of tokens used in a completion.
|
||||
type CompletionTokensDetails struct {
|
||||
AudioTokens int `json:"audio_tokens"`
|
||||
ReasoningTokens int `json:"reasoning_tokens"`
|
||||
AcceptedPredictionTokens int `json:"accepted_prediction_tokens"`
|
||||
RejectedPredictionTokens int `json:"rejected_prediction_tokens"`
|
||||
}
|
||||
|
||||
// PromptTokensDetails Breakdown of tokens used in the prompt.
|
||||
type PromptTokensDetails struct {
|
||||
AudioTokens int `json:"audio_tokens"`
|
||||
CachedTokens int `json:"cached_tokens"`
|
||||
}
|
||||
295
vendor/github.com/sashabaranov/go-openai/completion.go
generated
vendored
Normal file
295
vendor/github.com/sashabaranov/go-openai/completion.go
generated
vendored
Normal file
@@ -0,0 +1,295 @@
|
||||
package openai
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// GPT3 Defines the models provided by OpenAI to use when generating
|
||||
// completions from OpenAI.
|
||||
// GPT3 Models are designed for text-based tasks. For code-specific
|
||||
// tasks, please refer to the Codex series of models.
|
||||
const (
|
||||
O1Mini = "o1-mini"
|
||||
O1Mini20240912 = "o1-mini-2024-09-12"
|
||||
O1Preview = "o1-preview"
|
||||
O1Preview20240912 = "o1-preview-2024-09-12"
|
||||
O1 = "o1"
|
||||
O120241217 = "o1-2024-12-17"
|
||||
O3 = "o3"
|
||||
O320250416 = "o3-2025-04-16"
|
||||
O3Mini = "o3-mini"
|
||||
O3Mini20250131 = "o3-mini-2025-01-31"
|
||||
O4Mini = "o4-mini"
|
||||
O4Mini20250416 = "o4-mini-2025-04-16"
|
||||
GPT432K0613 = "gpt-4-32k-0613"
|
||||
GPT432K0314 = "gpt-4-32k-0314"
|
||||
GPT432K = "gpt-4-32k"
|
||||
GPT40613 = "gpt-4-0613"
|
||||
GPT40314 = "gpt-4-0314"
|
||||
GPT4o = "gpt-4o"
|
||||
GPT4o20240513 = "gpt-4o-2024-05-13"
|
||||
GPT4o20240806 = "gpt-4o-2024-08-06"
|
||||
GPT4o20241120 = "gpt-4o-2024-11-20"
|
||||
GPT4oLatest = "chatgpt-4o-latest"
|
||||
GPT4oMini = "gpt-4o-mini"
|
||||
GPT4oMini20240718 = "gpt-4o-mini-2024-07-18"
|
||||
GPT4Turbo = "gpt-4-turbo"
|
||||
GPT4Turbo20240409 = "gpt-4-turbo-2024-04-09"
|
||||
GPT4Turbo0125 = "gpt-4-0125-preview"
|
||||
GPT4Turbo1106 = "gpt-4-1106-preview"
|
||||
GPT4TurboPreview = "gpt-4-turbo-preview"
|
||||
GPT4VisionPreview = "gpt-4-vision-preview"
|
||||
GPT4 = "gpt-4"
|
||||
GPT4Dot1 = "gpt-4.1"
|
||||
GPT4Dot120250414 = "gpt-4.1-2025-04-14"
|
||||
GPT4Dot1Mini = "gpt-4.1-mini"
|
||||
GPT4Dot1Mini20250414 = "gpt-4.1-mini-2025-04-14"
|
||||
GPT4Dot1Nano = "gpt-4.1-nano"
|
||||
GPT4Dot1Nano20250414 = "gpt-4.1-nano-2025-04-14"
|
||||
GPT4Dot5Preview = "gpt-4.5-preview"
|
||||
GPT4Dot5Preview20250227 = "gpt-4.5-preview-2025-02-27"
|
||||
GPT5 = "gpt-5"
|
||||
GPT5Mini = "gpt-5-mini"
|
||||
GPT5Nano = "gpt-5-nano"
|
||||
GPT5ChatLatest = "gpt-5-chat-latest"
|
||||
GPT3Dot5Turbo0125 = "gpt-3.5-turbo-0125"
|
||||
GPT3Dot5Turbo1106 = "gpt-3.5-turbo-1106"
|
||||
GPT3Dot5Turbo0613 = "gpt-3.5-turbo-0613"
|
||||
GPT3Dot5Turbo0301 = "gpt-3.5-turbo-0301"
|
||||
GPT3Dot5Turbo16K = "gpt-3.5-turbo-16k"
|
||||
GPT3Dot5Turbo16K0613 = "gpt-3.5-turbo-16k-0613"
|
||||
GPT3Dot5Turbo = "gpt-3.5-turbo"
|
||||
GPT3Dot5TurboInstruct = "gpt-3.5-turbo-instruct"
|
||||
// Deprecated: Model is shutdown. Use gpt-3.5-turbo-instruct instead.
|
||||
GPT3TextDavinci003 = "text-davinci-003"
|
||||
// Deprecated: Model is shutdown. Use gpt-3.5-turbo-instruct instead.
|
||||
GPT3TextDavinci002 = "text-davinci-002"
|
||||
// Deprecated: Model is shutdown. Use gpt-3.5-turbo-instruct instead.
|
||||
GPT3TextCurie001 = "text-curie-001"
|
||||
// Deprecated: Model is shutdown. Use gpt-3.5-turbo-instruct instead.
|
||||
GPT3TextBabbage001 = "text-babbage-001"
|
||||
// Deprecated: Model is shutdown. Use gpt-3.5-turbo-instruct instead.
|
||||
GPT3TextAda001 = "text-ada-001"
|
||||
// Deprecated: Model is shutdown. Use gpt-3.5-turbo-instruct instead.
|
||||
GPT3TextDavinci001 = "text-davinci-001"
|
||||
// Deprecated: Model is shutdown. Use gpt-3.5-turbo-instruct instead.
|
||||
GPT3DavinciInstructBeta = "davinci-instruct-beta"
|
||||
// Deprecated: Model is shutdown. Use davinci-002 instead.
|
||||
GPT3Davinci = "davinci"
|
||||
GPT3Davinci002 = "davinci-002"
|
||||
// Deprecated: Model is shutdown. Use gpt-3.5-turbo-instruct instead.
|
||||
GPT3CurieInstructBeta = "curie-instruct-beta"
|
||||
GPT3Curie = "curie"
|
||||
GPT3Curie002 = "curie-002"
|
||||
// Deprecated: Model is shutdown. Use babbage-002 instead.
|
||||
GPT3Ada = "ada"
|
||||
GPT3Ada002 = "ada-002"
|
||||
// Deprecated: Model is shutdown. Use babbage-002 instead.
|
||||
GPT3Babbage = "babbage"
|
||||
GPT3Babbage002 = "babbage-002"
|
||||
)
|
||||
|
||||
// Codex Defines the models provided by OpenAI.
|
||||
// These models are designed for code-specific tasks, and use
|
||||
// a different tokenizer which optimizes for whitespace.
|
||||
const (
|
||||
CodexCodeDavinci002 = "code-davinci-002"
|
||||
CodexCodeCushman001 = "code-cushman-001"
|
||||
CodexCodeDavinci001 = "code-davinci-001"
|
||||
)
|
||||
|
||||
var disabledModelsForEndpoints = map[string]map[string]bool{
|
||||
"/completions": {
|
||||
O1Mini: true,
|
||||
O1Mini20240912: true,
|
||||
O1Preview: true,
|
||||
O1Preview20240912: true,
|
||||
O3Mini: true,
|
||||
O3Mini20250131: true,
|
||||
O4Mini: true,
|
||||
O4Mini20250416: true,
|
||||
O3: true,
|
||||
O320250416: true,
|
||||
GPT3Dot5Turbo: true,
|
||||
GPT3Dot5Turbo0301: true,
|
||||
GPT3Dot5Turbo0613: true,
|
||||
GPT3Dot5Turbo1106: true,
|
||||
GPT3Dot5Turbo0125: true,
|
||||
GPT3Dot5Turbo16K: true,
|
||||
GPT3Dot5Turbo16K0613: true,
|
||||
GPT4: true,
|
||||
GPT4Dot5Preview: true,
|
||||
GPT4Dot5Preview20250227: true,
|
||||
GPT4o: true,
|
||||
GPT4o20240513: true,
|
||||
GPT4o20240806: true,
|
||||
GPT4o20241120: true,
|
||||
GPT4oLatest: true,
|
||||
GPT4oMini: true,
|
||||
GPT4oMini20240718: true,
|
||||
GPT4TurboPreview: true,
|
||||
GPT4VisionPreview: true,
|
||||
GPT4Turbo1106: true,
|
||||
GPT4Turbo0125: true,
|
||||
GPT4Turbo: true,
|
||||
GPT4Turbo20240409: true,
|
||||
GPT40314: true,
|
||||
GPT40613: true,
|
||||
GPT432K: true,
|
||||
GPT432K0314: true,
|
||||
GPT432K0613: true,
|
||||
O1: true,
|
||||
GPT4Dot1: true,
|
||||
GPT4Dot120250414: true,
|
||||
GPT4Dot1Mini: true,
|
||||
GPT4Dot1Mini20250414: true,
|
||||
GPT4Dot1Nano: true,
|
||||
GPT4Dot1Nano20250414: true,
|
||||
GPT5: true,
|
||||
GPT5Mini: true,
|
||||
GPT5Nano: true,
|
||||
GPT5ChatLatest: true,
|
||||
},
|
||||
chatCompletionsSuffix: {
|
||||
CodexCodeDavinci002: true,
|
||||
CodexCodeCushman001: true,
|
||||
CodexCodeDavinci001: true,
|
||||
GPT3TextDavinci003: true,
|
||||
GPT3TextDavinci002: true,
|
||||
GPT3TextCurie001: true,
|
||||
GPT3TextBabbage001: true,
|
||||
GPT3TextAda001: true,
|
||||
GPT3TextDavinci001: true,
|
||||
GPT3DavinciInstructBeta: true,
|
||||
GPT3Davinci: true,
|
||||
GPT3CurieInstructBeta: true,
|
||||
GPT3Curie: true,
|
||||
GPT3Ada: true,
|
||||
GPT3Babbage: true,
|
||||
},
|
||||
}
|
||||
|
||||
func checkEndpointSupportsModel(endpoint, model string) bool {
|
||||
return !disabledModelsForEndpoints[endpoint][model]
|
||||
}
|
||||
|
||||
func checkPromptType(prompt any) bool {
|
||||
_, isString := prompt.(string)
|
||||
_, isStringSlice := prompt.([]string)
|
||||
if isString || isStringSlice {
|
||||
return true
|
||||
}
|
||||
|
||||
// check if it is prompt is []string hidden under []any
|
||||
slice, isSlice := prompt.([]any)
|
||||
if !isSlice {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, item := range slice {
|
||||
_, itemIsString := item.(string)
|
||||
if !itemIsString {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true // all items in the slice are string, so it is []string
|
||||
}
|
||||
|
||||
// CompletionRequest represents a request structure for completion API.
|
||||
type CompletionRequest struct {
|
||||
Model string `json:"model"`
|
||||
Prompt any `json:"prompt,omitempty"`
|
||||
BestOf int `json:"best_of,omitempty"`
|
||||
Echo bool `json:"echo,omitempty"`
|
||||
FrequencyPenalty float32 `json:"frequency_penalty,omitempty"`
|
||||
// LogitBias is must be a token id string (specified by their token ID in the tokenizer), not a word string.
|
||||
// incorrect: `"logit_bias":{"You": 6}`, correct: `"logit_bias":{"1639": 6}`
|
||||
// refs: https://platform.openai.com/docs/api-reference/completions/create#completions/create-logit_bias
|
||||
LogitBias map[string]int `json:"logit_bias,omitempty"`
|
||||
// Store can be set to true to store the output of this completion request for use in distillations and evals.
|
||||
// https://platform.openai.com/docs/api-reference/chat/create#chat-create-store
|
||||
Store bool `json:"store,omitempty"`
|
||||
// Metadata to store with the completion.
|
||||
Metadata map[string]string `json:"metadata,omitempty"`
|
||||
LogProbs int `json:"logprobs,omitempty"`
|
||||
MaxTokens int `json:"max_tokens,omitempty"`
|
||||
N int `json:"n,omitempty"`
|
||||
PresencePenalty float32 `json:"presence_penalty,omitempty"`
|
||||
Seed *int `json:"seed,omitempty"`
|
||||
Stop []string `json:"stop,omitempty"`
|
||||
Stream bool `json:"stream,omitempty"`
|
||||
Suffix string `json:"suffix,omitempty"`
|
||||
Temperature float32 `json:"temperature,omitempty"`
|
||||
TopP float32 `json:"top_p,omitempty"`
|
||||
User string `json:"user,omitempty"`
|
||||
// Options for streaming response. Only set this when you set stream: true.
|
||||
StreamOptions *StreamOptions `json:"stream_options,omitempty"`
|
||||
}
|
||||
|
||||
// CompletionChoice represents one of possible completions.
|
||||
type CompletionChoice struct {
|
||||
Text string `json:"text"`
|
||||
Index int `json:"index"`
|
||||
FinishReason string `json:"finish_reason"`
|
||||
LogProbs LogprobResult `json:"logprobs"`
|
||||
}
|
||||
|
||||
// LogprobResult represents logprob result of Choice.
|
||||
type LogprobResult struct {
|
||||
Tokens []string `json:"tokens"`
|
||||
TokenLogprobs []float32 `json:"token_logprobs"`
|
||||
TopLogprobs []map[string]float32 `json:"top_logprobs"`
|
||||
TextOffset []int `json:"text_offset"`
|
||||
}
|
||||
|
||||
// CompletionResponse represents a response structure for completion API.
|
||||
type CompletionResponse struct {
|
||||
ID string `json:"id"`
|
||||
Object string `json:"object"`
|
||||
Created int64 `json:"created"`
|
||||
Model string `json:"model"`
|
||||
Choices []CompletionChoice `json:"choices"`
|
||||
Usage *Usage `json:"usage,omitempty"`
|
||||
|
||||
httpHeader
|
||||
}
|
||||
|
||||
// CreateCompletion — API call to create a completion. This is the main endpoint of the API. Returns new text as well
|
||||
// as, if requested, the probabilities over each alternative token at each position.
|
||||
//
|
||||
// If using a fine-tuned model, simply provide the model's ID in the CompletionRequest object,
|
||||
// and the server will use the model's parameters to generate the completion.
|
||||
func (c *Client) CreateCompletion(
|
||||
ctx context.Context,
|
||||
request CompletionRequest,
|
||||
) (response CompletionResponse, err error) {
|
||||
if request.Stream {
|
||||
err = ErrCompletionStreamNotSupported
|
||||
return
|
||||
}
|
||||
|
||||
urlSuffix := "/completions"
|
||||
if !checkEndpointSupportsModel(urlSuffix, request.Model) {
|
||||
err = ErrCompletionUnsupportedModel
|
||||
return
|
||||
}
|
||||
|
||||
if !checkPromptType(request.Prompt) {
|
||||
err = ErrCompletionRequestPromptTypeNotSupported
|
||||
return
|
||||
}
|
||||
|
||||
req, err := c.newRequest(
|
||||
ctx,
|
||||
http.MethodPost,
|
||||
c.fullURL(urlSuffix, withModel(request.Model)),
|
||||
withBody(request),
|
||||
)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = c.sendRequest(req, &response)
|
||||
return
|
||||
}
|
||||
109
vendor/github.com/sashabaranov/go-openai/config.go
generated
vendored
Normal file
109
vendor/github.com/sashabaranov/go-openai/config.go
generated
vendored
Normal file
@@ -0,0 +1,109 @@
|
||||
package openai
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
const (
|
||||
openaiAPIURLv1 = "https://api.openai.com/v1"
|
||||
defaultEmptyMessagesLimit uint = 300
|
||||
|
||||
azureAPIPrefix = "openai"
|
||||
azureDeploymentsPrefix = "deployments"
|
||||
|
||||
AnthropicAPIVersion = "2023-06-01"
|
||||
)
|
||||
|
||||
type APIType string
|
||||
|
||||
const (
|
||||
APITypeOpenAI APIType = "OPEN_AI"
|
||||
APITypeAzure APIType = "AZURE"
|
||||
APITypeAzureAD APIType = "AZURE_AD"
|
||||
APITypeCloudflareAzure APIType = "CLOUDFLARE_AZURE"
|
||||
APITypeAnthropic APIType = "ANTHROPIC"
|
||||
)
|
||||
|
||||
const AzureAPIKeyHeader = "api-key"
|
||||
|
||||
const defaultAssistantVersion = "v2" // upgrade to v2 to support vector store
|
||||
|
||||
type HTTPDoer interface {
|
||||
Do(req *http.Request) (*http.Response, error)
|
||||
}
|
||||
|
||||
// ClientConfig is a configuration of a client.
|
||||
type ClientConfig struct {
|
||||
authToken string
|
||||
|
||||
BaseURL string
|
||||
OrgID string
|
||||
APIType APIType
|
||||
APIVersion string // required when APIType is APITypeAzure or APITypeAzureAD or APITypeAnthropic
|
||||
AssistantVersion string
|
||||
AzureModelMapperFunc func(model string) string // replace model to azure deployment name func
|
||||
HTTPClient HTTPDoer
|
||||
|
||||
EmptyMessagesLimit uint
|
||||
}
|
||||
|
||||
func DefaultConfig(authToken string) ClientConfig {
|
||||
return ClientConfig{
|
||||
authToken: authToken,
|
||||
BaseURL: openaiAPIURLv1,
|
||||
APIType: APITypeOpenAI,
|
||||
AssistantVersion: defaultAssistantVersion,
|
||||
OrgID: "",
|
||||
|
||||
HTTPClient: &http.Client{},
|
||||
|
||||
EmptyMessagesLimit: defaultEmptyMessagesLimit,
|
||||
}
|
||||
}
|
||||
|
||||
func DefaultAzureConfig(apiKey, baseURL string) ClientConfig {
|
||||
return ClientConfig{
|
||||
authToken: apiKey,
|
||||
BaseURL: baseURL,
|
||||
OrgID: "",
|
||||
APIType: APITypeAzure,
|
||||
APIVersion: "2023-05-15",
|
||||
AzureModelMapperFunc: func(model string) string {
|
||||
return regexp.MustCompile(`[.:]`).ReplaceAllString(model, "")
|
||||
},
|
||||
|
||||
HTTPClient: &http.Client{},
|
||||
|
||||
EmptyMessagesLimit: defaultEmptyMessagesLimit,
|
||||
}
|
||||
}
|
||||
|
||||
func DefaultAnthropicConfig(apiKey, baseURL string) ClientConfig {
|
||||
if baseURL == "" {
|
||||
baseURL = "https://api.anthropic.com/v1"
|
||||
}
|
||||
return ClientConfig{
|
||||
authToken: apiKey,
|
||||
BaseURL: baseURL,
|
||||
OrgID: "",
|
||||
APIType: APITypeAnthropic,
|
||||
APIVersion: AnthropicAPIVersion,
|
||||
|
||||
HTTPClient: &http.Client{},
|
||||
|
||||
EmptyMessagesLimit: defaultEmptyMessagesLimit,
|
||||
}
|
||||
}
|
||||
|
||||
func (ClientConfig) String() string {
|
||||
return "<OpenAI API ClientConfig>"
|
||||
}
|
||||
|
||||
func (c ClientConfig) GetAzureDeploymentByModel(model string) string {
|
||||
if c.AzureModelMapperFunc != nil {
|
||||
return c.AzureModelMapperFunc(model)
|
||||
}
|
||||
|
||||
return model
|
||||
}
|
||||
53
vendor/github.com/sashabaranov/go-openai/edits.go
generated
vendored
Normal file
53
vendor/github.com/sashabaranov/go-openai/edits.go
generated
vendored
Normal file
@@ -0,0 +1,53 @@
|
||||
package openai
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// EditsRequest represents a request structure for Edits API.
|
||||
type EditsRequest struct {
|
||||
Model *string `json:"model,omitempty"`
|
||||
Input string `json:"input,omitempty"`
|
||||
Instruction string `json:"instruction,omitempty"`
|
||||
N int `json:"n,omitempty"`
|
||||
Temperature float32 `json:"temperature,omitempty"`
|
||||
TopP float32 `json:"top_p,omitempty"`
|
||||
}
|
||||
|
||||
// EditsChoice represents one of possible edits.
|
||||
type EditsChoice struct {
|
||||
Text string `json:"text"`
|
||||
Index int `json:"index"`
|
||||
}
|
||||
|
||||
// EditsResponse represents a response structure for Edits API.
|
||||
type EditsResponse struct {
|
||||
Object string `json:"object"`
|
||||
Created int64 `json:"created"`
|
||||
Usage Usage `json:"usage"`
|
||||
Choices []EditsChoice `json:"choices"`
|
||||
|
||||
httpHeader
|
||||
}
|
||||
|
||||
// Edits Perform an API call to the Edits endpoint.
|
||||
/* Deprecated: Users of the Edits API and its associated models (e.g., text-davinci-edit-001 or code-davinci-edit-001)
|
||||
will need to migrate to GPT-3.5 Turbo by January 4, 2024.
|
||||
You can use CreateChatCompletion or CreateChatCompletionStream instead.
|
||||
*/
|
||||
func (c *Client) Edits(ctx context.Context, request EditsRequest) (response EditsResponse, err error) {
|
||||
req, err := c.newRequest(
|
||||
ctx,
|
||||
http.MethodPost,
|
||||
c.fullURL("/edits", withModel(fmt.Sprint(request.Model))),
|
||||
withBody(request),
|
||||
)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = c.sendRequest(req, &response)
|
||||
return
|
||||
}
|
||||
297
vendor/github.com/sashabaranov/go-openai/embeddings.go
generated
vendored
Normal file
297
vendor/github.com/sashabaranov/go-openai/embeddings.go
generated
vendored
Normal file
@@ -0,0 +1,297 @@
|
||||
package openai
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"math"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
var ErrVectorLengthMismatch = errors.New("vector length mismatch")
|
||||
|
||||
// EmbeddingModel enumerates the models which can be used
|
||||
// to generate Embedding vectors.
|
||||
type EmbeddingModel string
|
||||
|
||||
const (
|
||||
// Deprecated: The following block is shut down. Use text-embedding-ada-002 instead.
|
||||
AdaSimilarity EmbeddingModel = "text-similarity-ada-001"
|
||||
BabbageSimilarity EmbeddingModel = "text-similarity-babbage-001"
|
||||
CurieSimilarity EmbeddingModel = "text-similarity-curie-001"
|
||||
DavinciSimilarity EmbeddingModel = "text-similarity-davinci-001"
|
||||
AdaSearchDocument EmbeddingModel = "text-search-ada-doc-001"
|
||||
AdaSearchQuery EmbeddingModel = "text-search-ada-query-001"
|
||||
BabbageSearchDocument EmbeddingModel = "text-search-babbage-doc-001"
|
||||
BabbageSearchQuery EmbeddingModel = "text-search-babbage-query-001"
|
||||
CurieSearchDocument EmbeddingModel = "text-search-curie-doc-001"
|
||||
CurieSearchQuery EmbeddingModel = "text-search-curie-query-001"
|
||||
DavinciSearchDocument EmbeddingModel = "text-search-davinci-doc-001"
|
||||
DavinciSearchQuery EmbeddingModel = "text-search-davinci-query-001"
|
||||
AdaCodeSearchCode EmbeddingModel = "code-search-ada-code-001"
|
||||
AdaCodeSearchText EmbeddingModel = "code-search-ada-text-001"
|
||||
BabbageCodeSearchCode EmbeddingModel = "code-search-babbage-code-001"
|
||||
BabbageCodeSearchText EmbeddingModel = "code-search-babbage-text-001"
|
||||
|
||||
AdaEmbeddingV2 EmbeddingModel = "text-embedding-ada-002"
|
||||
SmallEmbedding3 EmbeddingModel = "text-embedding-3-small"
|
||||
LargeEmbedding3 EmbeddingModel = "text-embedding-3-large"
|
||||
)
|
||||
|
||||
// Embedding is a special format of data representation that can be easily utilized by machine
|
||||
// learning models and algorithms. The embedding is an information dense representation of the
|
||||
// semantic meaning of a piece of text. Each embedding is a vector of floating point numbers,
|
||||
// such that the distance between two embeddings in the vector space is correlated with semantic similarity
|
||||
// between two inputs in the original format. For example, if two texts are similar,
|
||||
// then their vector representations should also be similar.
|
||||
type Embedding struct {
|
||||
Object string `json:"object"`
|
||||
Embedding []float32 `json:"embedding"`
|
||||
Index int `json:"index"`
|
||||
}
|
||||
|
||||
// DotProduct calculates the dot product of the embedding vector with another
|
||||
// embedding vector. Both vectors must have the same length; otherwise, an
|
||||
// ErrVectorLengthMismatch is returned. The method returns the calculated dot
|
||||
// product as a float32 value.
|
||||
func (e *Embedding) DotProduct(other *Embedding) (float32, error) {
|
||||
if len(e.Embedding) != len(other.Embedding) {
|
||||
return 0, ErrVectorLengthMismatch
|
||||
}
|
||||
|
||||
var dotProduct float32
|
||||
for i := range e.Embedding {
|
||||
dotProduct += e.Embedding[i] * other.Embedding[i]
|
||||
}
|
||||
|
||||
return dotProduct, nil
|
||||
}
|
||||
|
||||
// EmbeddingResponse is the response from a Create embeddings request.
|
||||
type EmbeddingResponse struct {
|
||||
Object string `json:"object"`
|
||||
Data []Embedding `json:"data"`
|
||||
Model EmbeddingModel `json:"model"`
|
||||
Usage Usage `json:"usage"`
|
||||
|
||||
httpHeader
|
||||
}
|
||||
|
||||
type base64String string
|
||||
|
||||
func (b base64String) Decode() ([]float32, error) {
|
||||
decodedData, err := base64.StdEncoding.DecodeString(string(b))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
const sizeOfFloat32 = 4
|
||||
floats := make([]float32, len(decodedData)/sizeOfFloat32)
|
||||
for i := 0; i < len(floats); i++ {
|
||||
floats[i] = math.Float32frombits(binary.LittleEndian.Uint32(decodedData[i*4 : (i+1)*4]))
|
||||
}
|
||||
|
||||
return floats, nil
|
||||
}
|
||||
|
||||
// Base64Embedding is a container for base64 encoded embeddings.
|
||||
type Base64Embedding struct {
|
||||
Object string `json:"object"`
|
||||
Embedding base64String `json:"embedding"`
|
||||
Index int `json:"index"`
|
||||
}
|
||||
|
||||
// EmbeddingResponseBase64 is the response from a Create embeddings request with base64 encoding format.
|
||||
type EmbeddingResponseBase64 struct {
|
||||
Object string `json:"object"`
|
||||
Data []Base64Embedding `json:"data"`
|
||||
Model EmbeddingModel `json:"model"`
|
||||
Usage Usage `json:"usage"`
|
||||
|
||||
httpHeader
|
||||
}
|
||||
|
||||
// ToEmbeddingResponse converts an embeddingResponseBase64 to an EmbeddingResponse.
|
||||
func (r *EmbeddingResponseBase64) ToEmbeddingResponse() (EmbeddingResponse, error) {
|
||||
data := make([]Embedding, len(r.Data))
|
||||
|
||||
for i, base64Embedding := range r.Data {
|
||||
embedding, err := base64Embedding.Embedding.Decode()
|
||||
if err != nil {
|
||||
return EmbeddingResponse{}, err
|
||||
}
|
||||
|
||||
data[i] = Embedding{
|
||||
Object: base64Embedding.Object,
|
||||
Embedding: embedding,
|
||||
Index: base64Embedding.Index,
|
||||
}
|
||||
}
|
||||
|
||||
return EmbeddingResponse{
|
||||
Object: r.Object,
|
||||
Model: r.Model,
|
||||
Data: data,
|
||||
Usage: r.Usage,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type EmbeddingRequestConverter interface {
|
||||
// Needs to be of type EmbeddingRequestStrings or EmbeddingRequestTokens
|
||||
Convert() EmbeddingRequest
|
||||
}
|
||||
|
||||
// EmbeddingEncodingFormat is the format of the embeddings data.
|
||||
// Currently, only "float" and "base64" are supported, however, "base64" is not officially documented.
|
||||
// If not specified OpenAI will use "float".
|
||||
type EmbeddingEncodingFormat string
|
||||
|
||||
const (
|
||||
EmbeddingEncodingFormatFloat EmbeddingEncodingFormat = "float"
|
||||
EmbeddingEncodingFormatBase64 EmbeddingEncodingFormat = "base64"
|
||||
)
|
||||
|
||||
type EmbeddingRequest struct {
|
||||
Input any `json:"input"`
|
||||
Model EmbeddingModel `json:"model"`
|
||||
User string `json:"user,omitempty"`
|
||||
EncodingFormat EmbeddingEncodingFormat `json:"encoding_format,omitempty"`
|
||||
// Dimensions The number of dimensions the resulting output embeddings should have.
|
||||
// Only supported in text-embedding-3 and later models.
|
||||
Dimensions int `json:"dimensions,omitempty"`
|
||||
// The ExtraBody field allows for the inclusion of arbitrary key-value pairs
|
||||
// in the request body that may not be explicitly defined in this struct.
|
||||
ExtraBody map[string]any `json:"extra_body,omitempty"`
|
||||
}
|
||||
|
||||
func (r EmbeddingRequest) Convert() EmbeddingRequest {
|
||||
return r
|
||||
}
|
||||
|
||||
// EmbeddingRequestStrings is the input to a create embeddings request with a slice of strings.
|
||||
type EmbeddingRequestStrings struct {
|
||||
// Input is a slice of strings for which you want to generate an Embedding vector.
|
||||
// Each input must not exceed 8192 tokens in length.
|
||||
// OpenAPI suggests replacing newlines (\n) in your input with a single space, as they
|
||||
// have observed inferior results when newlines are present.
|
||||
// E.g.
|
||||
// "The food was delicious and the waiter..."
|
||||
Input []string `json:"input"`
|
||||
// ID of the model to use. You can use the List models API to see all of your available models,
|
||||
// or see our Model overview for descriptions of them.
|
||||
Model EmbeddingModel `json:"model"`
|
||||
// A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse.
|
||||
User string `json:"user"`
|
||||
// EmbeddingEncodingFormat is the format of the embeddings data.
|
||||
// Currently, only "float" and "base64" are supported, however, "base64" is not officially documented.
|
||||
// If not specified OpenAI will use "float".
|
||||
EncodingFormat EmbeddingEncodingFormat `json:"encoding_format,omitempty"`
|
||||
// Dimensions The number of dimensions the resulting output embeddings should have.
|
||||
// Only supported in text-embedding-3 and later models.
|
||||
Dimensions int `json:"dimensions,omitempty"`
|
||||
// The ExtraBody field allows for the inclusion of arbitrary key-value pairs
|
||||
// in the request body that may not be explicitly defined in this struct.
|
||||
ExtraBody map[string]any `json:"extra_body,omitempty"`
|
||||
}
|
||||
|
||||
func (r EmbeddingRequestStrings) Convert() EmbeddingRequest {
|
||||
return EmbeddingRequest{
|
||||
Input: r.Input,
|
||||
Model: r.Model,
|
||||
User: r.User,
|
||||
EncodingFormat: r.EncodingFormat,
|
||||
Dimensions: r.Dimensions,
|
||||
ExtraBody: r.ExtraBody,
|
||||
}
|
||||
}
|
||||
|
||||
type EmbeddingRequestTokens struct {
|
||||
// Input is a slice of slices of ints ([][]int) for which you want to generate an Embedding vector.
|
||||
// Each input must not exceed 8192 tokens in length.
|
||||
// OpenAPI suggests replacing newlines (\n) in your input with a single space, as they
|
||||
// have observed inferior results when newlines are present.
|
||||
// E.g.
|
||||
// "The food was delicious and the waiter..."
|
||||
Input [][]int `json:"input"`
|
||||
// ID of the model to use. You can use the List models API to see all of your available models,
|
||||
// or see our Model overview for descriptions of them.
|
||||
Model EmbeddingModel `json:"model"`
|
||||
// A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse.
|
||||
User string `json:"user"`
|
||||
// EmbeddingEncodingFormat is the format of the embeddings data.
|
||||
// Currently, only "float" and "base64" are supported, however, "base64" is not officially documented.
|
||||
// If not specified OpenAI will use "float".
|
||||
EncodingFormat EmbeddingEncodingFormat `json:"encoding_format,omitempty"`
|
||||
// Dimensions The number of dimensions the resulting output embeddings should have.
|
||||
// Only supported in text-embedding-3 and later models.
|
||||
Dimensions int `json:"dimensions,omitempty"`
|
||||
// The ExtraBody field allows for the inclusion of arbitrary key-value pairs
|
||||
// in the request body that may not be explicitly defined in this struct.
|
||||
ExtraBody map[string]any `json:"extra_body,omitempty"`
|
||||
}
|
||||
|
||||
func (r EmbeddingRequestTokens) Convert() EmbeddingRequest {
|
||||
return EmbeddingRequest{
|
||||
Input: r.Input,
|
||||
Model: r.Model,
|
||||
User: r.User,
|
||||
EncodingFormat: r.EncodingFormat,
|
||||
Dimensions: r.Dimensions,
|
||||
ExtraBody: r.ExtraBody,
|
||||
}
|
||||
}
|
||||
|
||||
// CreateEmbeddings returns an EmbeddingResponse which will contain an Embedding for every item in |body.Input|.
|
||||
// https://beta.openai.com/docs/api-reference/embeddings/create
|
||||
//
|
||||
// Body should be of type EmbeddingRequestStrings for embedding strings or EmbeddingRequestTokens
|
||||
// for embedding groups of text already converted to tokens.
|
||||
func (c *Client) CreateEmbeddings(
|
||||
ctx context.Context,
|
||||
conv EmbeddingRequestConverter,
|
||||
) (res EmbeddingResponse, err error) {
|
||||
baseReq := conv.Convert()
|
||||
|
||||
// The body map is used to dynamically construct the request payload for the embedding API.
|
||||
// Instead of relying on a fixed struct, the body map allows for flexible inclusion of fields
|
||||
// based on their presence, avoiding unnecessary or empty fields in the request.
|
||||
extraBody := baseReq.ExtraBody
|
||||
baseReq.ExtraBody = nil
|
||||
|
||||
// Serialize baseReq to JSON
|
||||
jsonData, err := json.Marshal(baseReq)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Deserialize JSON to map[string]any
|
||||
var body map[string]any
|
||||
_ = json.Unmarshal(jsonData, &body)
|
||||
|
||||
req, err := c.newRequest(
|
||||
ctx,
|
||||
http.MethodPost,
|
||||
c.fullURL("/embeddings", withModel(string(baseReq.Model))),
|
||||
withBody(body), // Main request body.
|
||||
withExtraBody(extraBody), // Merge ExtraBody fields.
|
||||
)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if baseReq.EncodingFormat != EmbeddingEncodingFormatBase64 {
|
||||
err = c.sendRequest(req, &res)
|
||||
return
|
||||
}
|
||||
|
||||
base64Response := &EmbeddingResponseBase64{}
|
||||
err = c.sendRequest(req, base64Response)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
res, err = base64Response.ToEmbeddingResponse()
|
||||
return
|
||||
}
|
||||
52
vendor/github.com/sashabaranov/go-openai/engines.go
generated
vendored
Normal file
52
vendor/github.com/sashabaranov/go-openai/engines.go
generated
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
package openai
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// Engine struct represents engine from OpenAPI API.
|
||||
type Engine struct {
|
||||
ID string `json:"id"`
|
||||
Object string `json:"object"`
|
||||
Owner string `json:"owner"`
|
||||
Ready bool `json:"ready"`
|
||||
|
||||
httpHeader
|
||||
}
|
||||
|
||||
// EnginesList is a list of engines.
|
||||
type EnginesList struct {
|
||||
Engines []Engine `json:"data"`
|
||||
|
||||
httpHeader
|
||||
}
|
||||
|
||||
// ListEngines Lists the currently available engines, and provides basic
|
||||
// information about each option such as the owner and availability.
|
||||
func (c *Client) ListEngines(ctx context.Context) (engines EnginesList, err error) {
|
||||
req, err := c.newRequest(ctx, http.MethodGet, c.fullURL("/engines"))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = c.sendRequest(req, &engines)
|
||||
return
|
||||
}
|
||||
|
||||
// GetEngine Retrieves an engine instance, providing basic information about
|
||||
// the engine such as the owner and availability.
|
||||
func (c *Client) GetEngine(
|
||||
ctx context.Context,
|
||||
engineID string,
|
||||
) (engine Engine, err error) {
|
||||
urlSuffix := fmt.Sprintf("/engines/%s", engineID)
|
||||
req, err := c.newRequest(ctx, http.MethodGet, c.fullURL(urlSuffix))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = c.sendRequest(req, &engine)
|
||||
return
|
||||
}
|
||||
115
vendor/github.com/sashabaranov/go-openai/error.go
generated
vendored
Normal file
115
vendor/github.com/sashabaranov/go-openai/error.go
generated
vendored
Normal file
@@ -0,0 +1,115 @@
|
||||
package openai
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// APIError provides error information returned by the OpenAI API.
|
||||
// InnerError struct is only valid for Azure OpenAI Service.
|
||||
type APIError struct {
|
||||
Code any `json:"code,omitempty"`
|
||||
Message string `json:"message"`
|
||||
Param *string `json:"param,omitempty"`
|
||||
Type string `json:"type"`
|
||||
HTTPStatus string `json:"-"`
|
||||
HTTPStatusCode int `json:"-"`
|
||||
InnerError *InnerError `json:"innererror,omitempty"`
|
||||
}
|
||||
|
||||
// InnerError Azure Content filtering. Only valid for Azure OpenAI Service.
|
||||
type InnerError struct {
|
||||
Code string `json:"code,omitempty"`
|
||||
ContentFilterResults ContentFilterResults `json:"content_filter_result,omitempty"`
|
||||
}
|
||||
|
||||
// RequestError provides information about generic request errors.
|
||||
type RequestError struct {
|
||||
HTTPStatus string
|
||||
HTTPStatusCode int
|
||||
Err error
|
||||
Body []byte
|
||||
}
|
||||
|
||||
type ErrorResponse struct {
|
||||
Error *APIError `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
func (e *APIError) Error() string {
|
||||
if e.HTTPStatusCode > 0 {
|
||||
return fmt.Sprintf("error, status code: %d, status: %s, message: %s", e.HTTPStatusCode, e.HTTPStatus, e.Message)
|
||||
}
|
||||
|
||||
return e.Message
|
||||
}
|
||||
|
||||
func (e *APIError) UnmarshalJSON(data []byte) (err error) {
|
||||
var rawMap map[string]json.RawMessage
|
||||
err = json.Unmarshal(data, &rawMap)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = json.Unmarshal(rawMap["message"], &e.Message)
|
||||
if err != nil {
|
||||
// If the parameter field of a function call is invalid as a JSON schema
|
||||
// refs: https://github.com/sashabaranov/go-openai/issues/381
|
||||
var messages []string
|
||||
err = json.Unmarshal(rawMap["message"], &messages)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
e.Message = strings.Join(messages, ", ")
|
||||
}
|
||||
|
||||
// optional fields for azure openai
|
||||
// refs: https://github.com/sashabaranov/go-openai/issues/343
|
||||
if _, ok := rawMap["type"]; ok {
|
||||
err = json.Unmarshal(rawMap["type"], &e.Type)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if _, ok := rawMap["innererror"]; ok {
|
||||
err = json.Unmarshal(rawMap["innererror"], &e.InnerError)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// optional fields
|
||||
if _, ok := rawMap["param"]; ok {
|
||||
err = json.Unmarshal(rawMap["param"], &e.Param)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if _, ok := rawMap["code"]; !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
// if the api returned a number, we need to force an integer
|
||||
// since the json package defaults to float64
|
||||
var intCode int
|
||||
err = json.Unmarshal(rawMap["code"], &intCode)
|
||||
if err == nil {
|
||||
e.Code = intCode
|
||||
return nil
|
||||
}
|
||||
|
||||
return json.Unmarshal(rawMap["code"], &e.Code)
|
||||
}
|
||||
|
||||
func (e *RequestError) Error() string {
|
||||
return fmt.Sprintf(
|
||||
"error, status code: %d, status: %s, message: %s, body: %s",
|
||||
e.HTTPStatusCode, e.HTTPStatus, e.Err, e.Body,
|
||||
)
|
||||
}
|
||||
|
||||
func (e *RequestError) Unwrap() error {
|
||||
return e.Err
|
||||
}
|
||||
171
vendor/github.com/sashabaranov/go-openai/files.go
generated
vendored
Normal file
171
vendor/github.com/sashabaranov/go-openai/files.go
generated
vendored
Normal file
@@ -0,0 +1,171 @@
|
||||
package openai
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
)
|
||||
|
||||
type FileRequest struct {
|
||||
FileName string `json:"file"`
|
||||
FilePath string `json:"-"`
|
||||
Purpose string `json:"purpose"`
|
||||
}
|
||||
|
||||
// PurposeType represents the purpose of the file when uploading.
|
||||
type PurposeType string
|
||||
|
||||
const (
|
||||
PurposeFineTune PurposeType = "fine-tune"
|
||||
PurposeFineTuneResults PurposeType = "fine-tune-results"
|
||||
PurposeAssistants PurposeType = "assistants"
|
||||
PurposeAssistantsOutput PurposeType = "assistants_output"
|
||||
PurposeBatch PurposeType = "batch"
|
||||
)
|
||||
|
||||
// FileBytesRequest represents a file upload request.
|
||||
type FileBytesRequest struct {
|
||||
// the name of the uploaded file in OpenAI
|
||||
Name string
|
||||
// the bytes of the file
|
||||
Bytes []byte
|
||||
// the purpose of the file
|
||||
Purpose PurposeType
|
||||
}
|
||||
|
||||
// File struct represents an OpenAPI file.
|
||||
type File struct {
|
||||
Bytes int `json:"bytes"`
|
||||
CreatedAt int64 `json:"created_at"`
|
||||
ID string `json:"id"`
|
||||
FileName string `json:"filename"`
|
||||
Object string `json:"object"`
|
||||
Status string `json:"status"`
|
||||
Purpose string `json:"purpose"`
|
||||
StatusDetails string `json:"status_details"`
|
||||
|
||||
httpHeader
|
||||
}
|
||||
|
||||
// FilesList is a list of files that belong to the user or organization.
|
||||
type FilesList struct {
|
||||
Files []File `json:"data"`
|
||||
|
||||
httpHeader
|
||||
}
|
||||
|
||||
// CreateFileBytes uploads bytes directly to OpenAI without requiring a local file.
|
||||
func (c *Client) CreateFileBytes(ctx context.Context, request FileBytesRequest) (file File, err error) {
|
||||
var b bytes.Buffer
|
||||
reader := bytes.NewReader(request.Bytes)
|
||||
builder := c.createFormBuilder(&b)
|
||||
|
||||
err = builder.WriteField("purpose", string(request.Purpose))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = builder.CreateFormFileReader("file", reader, request.Name)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = builder.Close()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
req, err := c.newRequest(ctx, http.MethodPost, c.fullURL("/files"),
|
||||
withBody(&b), withContentType(builder.FormDataContentType()))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = c.sendRequest(req, &file)
|
||||
return
|
||||
}
|
||||
|
||||
// CreateFile uploads a jsonl file to GPT3
|
||||
// FilePath must be a local file path.
|
||||
func (c *Client) CreateFile(ctx context.Context, request FileRequest) (file File, err error) {
|
||||
var b bytes.Buffer
|
||||
builder := c.createFormBuilder(&b)
|
||||
|
||||
err = builder.WriteField("purpose", request.Purpose)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
fileData, err := os.Open(request.FilePath)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer fileData.Close()
|
||||
|
||||
err = builder.CreateFormFile("file", fileData)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = builder.Close()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
req, err := c.newRequest(ctx, http.MethodPost, c.fullURL("/files"),
|
||||
withBody(&b), withContentType(builder.FormDataContentType()))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = c.sendRequest(req, &file)
|
||||
return
|
||||
}
|
||||
|
||||
// DeleteFile deletes an existing file.
|
||||
func (c *Client) DeleteFile(ctx context.Context, fileID string) (err error) {
|
||||
req, err := c.newRequest(ctx, http.MethodDelete, c.fullURL("/files/"+fileID))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = c.sendRequest(req, nil)
|
||||
return
|
||||
}
|
||||
|
||||
// ListFiles Lists the currently available files,
|
||||
// and provides basic information about each file such as the file name and purpose.
|
||||
func (c *Client) ListFiles(ctx context.Context) (files FilesList, err error) {
|
||||
req, err := c.newRequest(ctx, http.MethodGet, c.fullURL("/files"))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = c.sendRequest(req, &files)
|
||||
return
|
||||
}
|
||||
|
||||
// GetFile Retrieves a file instance, providing basic information about the file
|
||||
// such as the file name and purpose.
|
||||
func (c *Client) GetFile(ctx context.Context, fileID string) (file File, err error) {
|
||||
urlSuffix := fmt.Sprintf("/files/%s", fileID)
|
||||
req, err := c.newRequest(ctx, http.MethodGet, c.fullURL(urlSuffix))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = c.sendRequest(req, &file)
|
||||
return
|
||||
}
|
||||
|
||||
func (c *Client) GetFileContent(ctx context.Context, fileID string) (content RawResponse, err error) {
|
||||
urlSuffix := fmt.Sprintf("/files/%s/content", fileID)
|
||||
req, err := c.newRequest(ctx, http.MethodGet, c.fullURL(urlSuffix))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return c.sendRequestRaw(req)
|
||||
}
|
||||
178
vendor/github.com/sashabaranov/go-openai/fine_tunes.go
generated
vendored
Normal file
178
vendor/github.com/sashabaranov/go-openai/fine_tunes.go
generated
vendored
Normal file
@@ -0,0 +1,178 @@
|
||||
package openai
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// Deprecated: On August 22nd, 2023, OpenAI announced the deprecation of the /v1/fine-tunes API.
|
||||
// This API will be officially deprecated on January 4th, 2024.
|
||||
// OpenAI recommends to migrate to the new fine tuning API implemented in fine_tuning_job.go.
|
||||
type FineTuneRequest struct {
|
||||
TrainingFile string `json:"training_file"`
|
||||
ValidationFile string `json:"validation_file,omitempty"`
|
||||
Model string `json:"model,omitempty"`
|
||||
Epochs int `json:"n_epochs,omitempty"`
|
||||
BatchSize int `json:"batch_size,omitempty"`
|
||||
LearningRateMultiplier float32 `json:"learning_rate_multiplier,omitempty"`
|
||||
PromptLossRate float32 `json:"prompt_loss_rate,omitempty"`
|
||||
ComputeClassificationMetrics bool `json:"compute_classification_metrics,omitempty"`
|
||||
ClassificationClasses int `json:"classification_n_classes,omitempty"`
|
||||
ClassificationPositiveClass string `json:"classification_positive_class,omitempty"`
|
||||
ClassificationBetas []float32 `json:"classification_betas,omitempty"`
|
||||
Suffix string `json:"suffix,omitempty"`
|
||||
}
|
||||
|
||||
// Deprecated: On August 22nd, 2023, OpenAI announced the deprecation of the /v1/fine-tunes API.
|
||||
// This API will be officially deprecated on January 4th, 2024.
|
||||
// OpenAI recommends to migrate to the new fine tuning API implemented in fine_tuning_job.go.
|
||||
type FineTune struct {
|
||||
ID string `json:"id"`
|
||||
Object string `json:"object"`
|
||||
Model string `json:"model"`
|
||||
CreatedAt int64 `json:"created_at"`
|
||||
FineTuneEventList []FineTuneEvent `json:"events,omitempty"`
|
||||
FineTunedModel string `json:"fine_tuned_model"`
|
||||
HyperParams FineTuneHyperParams `json:"hyperparams"`
|
||||
OrganizationID string `json:"organization_id"`
|
||||
ResultFiles []File `json:"result_files"`
|
||||
Status string `json:"status"`
|
||||
ValidationFiles []File `json:"validation_files"`
|
||||
TrainingFiles []File `json:"training_files"`
|
||||
UpdatedAt int64 `json:"updated_at"`
|
||||
|
||||
httpHeader
|
||||
}
|
||||
|
||||
// Deprecated: On August 22nd, 2023, OpenAI announced the deprecation of the /v1/fine-tunes API.
|
||||
// This API will be officially deprecated on January 4th, 2024.
|
||||
// OpenAI recommends to migrate to the new fine tuning API implemented in fine_tuning_job.go.
|
||||
type FineTuneEvent struct {
|
||||
Object string `json:"object"`
|
||||
CreatedAt int64 `json:"created_at"`
|
||||
Level string `json:"level"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
// Deprecated: On August 22nd, 2023, OpenAI announced the deprecation of the /v1/fine-tunes API.
|
||||
// This API will be officially deprecated on January 4th, 2024.
|
||||
// OpenAI recommends to migrate to the new fine tuning API implemented in fine_tuning_job.go.
|
||||
type FineTuneHyperParams struct {
|
||||
BatchSize int `json:"batch_size"`
|
||||
LearningRateMultiplier float64 `json:"learning_rate_multiplier"`
|
||||
Epochs int `json:"n_epochs"`
|
||||
PromptLossWeight float64 `json:"prompt_loss_weight"`
|
||||
}
|
||||
|
||||
// Deprecated: On August 22nd, 2023, OpenAI announced the deprecation of the /v1/fine-tunes API.
|
||||
// This API will be officially deprecated on January 4th, 2024.
|
||||
// OpenAI recommends to migrate to the new fine tuning API implemented in fine_tuning_job.go.
|
||||
type FineTuneList struct {
|
||||
Object string `json:"object"`
|
||||
Data []FineTune `json:"data"`
|
||||
|
||||
httpHeader
|
||||
}
|
||||
|
||||
// Deprecated: On August 22nd, 2023, OpenAI announced the deprecation of the /v1/fine-tunes API.
|
||||
// This API will be officially deprecated on January 4th, 2024.
|
||||
// OpenAI recommends to migrate to the new fine tuning API implemented in fine_tuning_job.go.
|
||||
type FineTuneEventList struct {
|
||||
Object string `json:"object"`
|
||||
Data []FineTuneEvent `json:"data"`
|
||||
|
||||
httpHeader
|
||||
}
|
||||
|
||||
// Deprecated: On August 22nd, 2023, OpenAI announced the deprecation of the /v1/fine-tunes API.
|
||||
// This API will be officially deprecated on January 4th, 2024.
|
||||
// OpenAI recommends to migrate to the new fine tuning API implemented in fine_tuning_job.go.
|
||||
type FineTuneDeleteResponse struct {
|
||||
ID string `json:"id"`
|
||||
Object string `json:"object"`
|
||||
Deleted bool `json:"deleted"`
|
||||
|
||||
httpHeader
|
||||
}
|
||||
|
||||
// Deprecated: On August 22nd, 2023, OpenAI announced the deprecation of the /v1/fine-tunes API.
|
||||
// This API will be officially deprecated on January 4th, 2024.
|
||||
// OpenAI recommends to migrate to the new fine tuning API implemented in fine_tuning_job.go.
|
||||
func (c *Client) CreateFineTune(ctx context.Context, request FineTuneRequest) (response FineTune, err error) {
|
||||
urlSuffix := "/fine-tunes"
|
||||
req, err := c.newRequest(ctx, http.MethodPost, c.fullURL(urlSuffix), withBody(request))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = c.sendRequest(req, &response)
|
||||
return
|
||||
}
|
||||
|
||||
// CancelFineTune cancel a fine-tune job.
|
||||
// Deprecated: On August 22nd, 2023, OpenAI announced the deprecation of the /v1/fine-tunes API.
|
||||
// This API will be officially deprecated on January 4th, 2024.
|
||||
// OpenAI recommends to migrate to the new fine tuning API implemented in fine_tuning_job.go.
|
||||
func (c *Client) CancelFineTune(ctx context.Context, fineTuneID string) (response FineTune, err error) {
|
||||
req, err := c.newRequest(ctx, http.MethodPost, c.fullURL("/fine-tunes/"+fineTuneID+"/cancel")) //nolint:lll //this method is deprecated
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = c.sendRequest(req, &response)
|
||||
return
|
||||
}
|
||||
|
||||
// Deprecated: On August 22nd, 2023, OpenAI announced the deprecation of the /v1/fine-tunes API.
|
||||
// This API will be officially deprecated on January 4th, 2024.
|
||||
// OpenAI recommends to migrate to the new fine tuning API implemented in fine_tuning_job.go.
|
||||
func (c *Client) ListFineTunes(ctx context.Context) (response FineTuneList, err error) {
|
||||
req, err := c.newRequest(ctx, http.MethodGet, c.fullURL("/fine-tunes"))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = c.sendRequest(req, &response)
|
||||
return
|
||||
}
|
||||
|
||||
// Deprecated: On August 22nd, 2023, OpenAI announced the deprecation of the /v1/fine-tunes API.
|
||||
// This API will be officially deprecated on January 4th, 2024.
|
||||
// OpenAI recommends to migrate to the new fine tuning API implemented in fine_tuning_job.go.
|
||||
func (c *Client) GetFineTune(ctx context.Context, fineTuneID string) (response FineTune, err error) {
|
||||
urlSuffix := fmt.Sprintf("/fine-tunes/%s", fineTuneID)
|
||||
req, err := c.newRequest(ctx, http.MethodGet, c.fullURL(urlSuffix))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = c.sendRequest(req, &response)
|
||||
return
|
||||
}
|
||||
|
||||
// Deprecated: On August 22nd, 2023, OpenAI announced the deprecation of the /v1/fine-tunes API.
|
||||
// This API will be officially deprecated on January 4th, 2024.
|
||||
// OpenAI recommends to migrate to the new fine tuning API implemented in fine_tuning_job.go.
|
||||
func (c *Client) DeleteFineTune(ctx context.Context, fineTuneID string) (response FineTuneDeleteResponse, err error) {
|
||||
req, err := c.newRequest(ctx, http.MethodDelete, c.fullURL("/fine-tunes/"+fineTuneID))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = c.sendRequest(req, &response)
|
||||
return
|
||||
}
|
||||
|
||||
// Deprecated: On August 22nd, 2023, OpenAI announced the deprecation of the /v1/fine-tunes API.
|
||||
// This API will be officially deprecated on January 4th, 2024.
|
||||
// OpenAI recommends to migrate to the new fine tuning API implemented in fine_tuning_job.go.
|
||||
func (c *Client) ListFineTuneEvents(ctx context.Context, fineTuneID string) (response FineTuneEventList, err error) {
|
||||
req, err := c.newRequest(ctx, http.MethodGet, c.fullURL("/fine-tunes/"+fineTuneID+"/events"))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = c.sendRequest(req, &response)
|
||||
return
|
||||
}
|
||||
159
vendor/github.com/sashabaranov/go-openai/fine_tuning_job.go
generated
vendored
Normal file
159
vendor/github.com/sashabaranov/go-openai/fine_tuning_job.go
generated
vendored
Normal file
@@ -0,0 +1,159 @@
|
||||
package openai
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
type FineTuningJob struct {
|
||||
ID string `json:"id"`
|
||||
Object string `json:"object"`
|
||||
CreatedAt int64 `json:"created_at"`
|
||||
FinishedAt int64 `json:"finished_at"`
|
||||
Model string `json:"model"`
|
||||
FineTunedModel string `json:"fine_tuned_model,omitempty"`
|
||||
OrganizationID string `json:"organization_id"`
|
||||
Status string `json:"status"`
|
||||
Hyperparameters Hyperparameters `json:"hyperparameters"`
|
||||
TrainingFile string `json:"training_file"`
|
||||
ValidationFile string `json:"validation_file,omitempty"`
|
||||
ResultFiles []string `json:"result_files"`
|
||||
TrainedTokens int `json:"trained_tokens"`
|
||||
|
||||
httpHeader
|
||||
}
|
||||
|
||||
type Hyperparameters struct {
|
||||
Epochs any `json:"n_epochs,omitempty"`
|
||||
LearningRateMultiplier any `json:"learning_rate_multiplier,omitempty"`
|
||||
BatchSize any `json:"batch_size,omitempty"`
|
||||
}
|
||||
|
||||
type FineTuningJobRequest struct {
|
||||
TrainingFile string `json:"training_file"`
|
||||
ValidationFile string `json:"validation_file,omitempty"`
|
||||
Model string `json:"model,omitempty"`
|
||||
Hyperparameters *Hyperparameters `json:"hyperparameters,omitempty"`
|
||||
Suffix string `json:"suffix,omitempty"`
|
||||
}
|
||||
|
||||
type FineTuningJobEventList struct {
|
||||
Object string `json:"object"`
|
||||
Data []FineTuneEvent `json:"data"`
|
||||
HasMore bool `json:"has_more"`
|
||||
|
||||
httpHeader
|
||||
}
|
||||
|
||||
type FineTuningJobEvent struct {
|
||||
Object string `json:"object"`
|
||||
ID string `json:"id"`
|
||||
CreatedAt int `json:"created_at"`
|
||||
Level string `json:"level"`
|
||||
Message string `json:"message"`
|
||||
Data any `json:"data"`
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
// CreateFineTuningJob create a fine tuning job.
|
||||
func (c *Client) CreateFineTuningJob(
|
||||
ctx context.Context,
|
||||
request FineTuningJobRequest,
|
||||
) (response FineTuningJob, err error) {
|
||||
urlSuffix := "/fine_tuning/jobs"
|
||||
req, err := c.newRequest(ctx, http.MethodPost, c.fullURL(urlSuffix), withBody(request))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = c.sendRequest(req, &response)
|
||||
return
|
||||
}
|
||||
|
||||
// CancelFineTuningJob cancel a fine tuning job.
|
||||
func (c *Client) CancelFineTuningJob(ctx context.Context, fineTuningJobID string) (response FineTuningJob, err error) {
|
||||
req, err := c.newRequest(ctx, http.MethodPost, c.fullURL("/fine_tuning/jobs/"+fineTuningJobID+"/cancel"))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = c.sendRequest(req, &response)
|
||||
return
|
||||
}
|
||||
|
||||
// RetrieveFineTuningJob retrieve a fine tuning job.
|
||||
func (c *Client) RetrieveFineTuningJob(
|
||||
ctx context.Context,
|
||||
fineTuningJobID string,
|
||||
) (response FineTuningJob, err error) {
|
||||
urlSuffix := fmt.Sprintf("/fine_tuning/jobs/%s", fineTuningJobID)
|
||||
req, err := c.newRequest(ctx, http.MethodGet, c.fullURL(urlSuffix))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = c.sendRequest(req, &response)
|
||||
return
|
||||
}
|
||||
|
||||
type listFineTuningJobEventsParameters struct {
|
||||
after *string
|
||||
limit *int
|
||||
}
|
||||
|
||||
type ListFineTuningJobEventsParameter func(*listFineTuningJobEventsParameters)
|
||||
|
||||
func ListFineTuningJobEventsWithAfter(after string) ListFineTuningJobEventsParameter {
|
||||
return func(args *listFineTuningJobEventsParameters) {
|
||||
args.after = &after
|
||||
}
|
||||
}
|
||||
|
||||
func ListFineTuningJobEventsWithLimit(limit int) ListFineTuningJobEventsParameter {
|
||||
return func(args *listFineTuningJobEventsParameters) {
|
||||
args.limit = &limit
|
||||
}
|
||||
}
|
||||
|
||||
// ListFineTuningJobs list fine tuning jobs events.
|
||||
func (c *Client) ListFineTuningJobEvents(
|
||||
ctx context.Context,
|
||||
fineTuningJobID string,
|
||||
setters ...ListFineTuningJobEventsParameter,
|
||||
) (response FineTuningJobEventList, err error) {
|
||||
parameters := &listFineTuningJobEventsParameters{
|
||||
after: nil,
|
||||
limit: nil,
|
||||
}
|
||||
|
||||
for _, setter := range setters {
|
||||
setter(parameters)
|
||||
}
|
||||
|
||||
urlValues := url.Values{}
|
||||
if parameters.after != nil {
|
||||
urlValues.Add("after", *parameters.after)
|
||||
}
|
||||
if parameters.limit != nil {
|
||||
urlValues.Add("limit", fmt.Sprintf("%d", *parameters.limit))
|
||||
}
|
||||
|
||||
encodedValues := ""
|
||||
if len(urlValues) > 0 {
|
||||
encodedValues = "?" + urlValues.Encode()
|
||||
}
|
||||
|
||||
req, err := c.newRequest(
|
||||
ctx,
|
||||
http.MethodGet,
|
||||
c.fullURL("/fine_tuning/jobs/"+fineTuningJobID+"/events"+encodedValues),
|
||||
)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = c.sendRequest(req, &response)
|
||||
return
|
||||
}
|
||||
289
vendor/github.com/sashabaranov/go-openai/image.go
generated
vendored
Normal file
289
vendor/github.com/sashabaranov/go-openai/image.go
generated
vendored
Normal file
@@ -0,0 +1,289 @@
|
||||
package openai
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Image sizes defined by the OpenAI API.
|
||||
const (
|
||||
CreateImageSize256x256 = "256x256"
|
||||
CreateImageSize512x512 = "512x512"
|
||||
CreateImageSize1024x1024 = "1024x1024"
|
||||
|
||||
// dall-e-3 supported only.
|
||||
CreateImageSize1792x1024 = "1792x1024"
|
||||
CreateImageSize1024x1792 = "1024x1792"
|
||||
|
||||
// gpt-image-1 supported only.
|
||||
CreateImageSize1536x1024 = "1536x1024" // Landscape
|
||||
CreateImageSize1024x1536 = "1024x1536" // Portrait
|
||||
)
|
||||
|
||||
const (
|
||||
// dall-e-2 and dall-e-3 only.
|
||||
CreateImageResponseFormatB64JSON = "b64_json"
|
||||
CreateImageResponseFormatURL = "url"
|
||||
)
|
||||
|
||||
const (
|
||||
CreateImageModelDallE2 = "dall-e-2"
|
||||
CreateImageModelDallE3 = "dall-e-3"
|
||||
CreateImageModelGptImage1 = "gpt-image-1"
|
||||
)
|
||||
|
||||
const (
|
||||
CreateImageQualityHD = "hd"
|
||||
CreateImageQualityStandard = "standard"
|
||||
|
||||
// gpt-image-1 only.
|
||||
CreateImageQualityHigh = "high"
|
||||
CreateImageQualityMedium = "medium"
|
||||
CreateImageQualityLow = "low"
|
||||
)
|
||||
|
||||
const (
|
||||
// dall-e-3 only.
|
||||
CreateImageStyleVivid = "vivid"
|
||||
CreateImageStyleNatural = "natural"
|
||||
)
|
||||
|
||||
const (
|
||||
// gpt-image-1 only.
|
||||
CreateImageBackgroundTransparent = "transparent"
|
||||
CreateImageBackgroundOpaque = "opaque"
|
||||
)
|
||||
|
||||
const (
|
||||
// gpt-image-1 only.
|
||||
CreateImageModerationLow = "low"
|
||||
)
|
||||
|
||||
const (
|
||||
// gpt-image-1 only.
|
||||
CreateImageOutputFormatPNG = "png"
|
||||
CreateImageOutputFormatJPEG = "jpeg"
|
||||
CreateImageOutputFormatWEBP = "webp"
|
||||
)
|
||||
|
||||
// ImageRequest represents the request structure for the image API.
|
||||
type ImageRequest struct {
|
||||
Prompt string `json:"prompt,omitempty"`
|
||||
Model string `json:"model,omitempty"`
|
||||
N int `json:"n,omitempty"`
|
||||
Quality string `json:"quality,omitempty"`
|
||||
Size string `json:"size,omitempty"`
|
||||
Style string `json:"style,omitempty"`
|
||||
ResponseFormat string `json:"response_format,omitempty"`
|
||||
User string `json:"user,omitempty"`
|
||||
Background string `json:"background,omitempty"`
|
||||
Moderation string `json:"moderation,omitempty"`
|
||||
OutputCompression int `json:"output_compression,omitempty"`
|
||||
OutputFormat string `json:"output_format,omitempty"`
|
||||
}
|
||||
|
||||
// ImageResponse represents a response structure for image API.
|
||||
type ImageResponse struct {
|
||||
Created int64 `json:"created,omitempty"`
|
||||
Data []ImageResponseDataInner `json:"data,omitempty"`
|
||||
Usage ImageResponseUsage `json:"usage,omitempty"`
|
||||
|
||||
httpHeader
|
||||
}
|
||||
|
||||
// ImageResponseInputTokensDetails represents the token breakdown for input tokens.
|
||||
type ImageResponseInputTokensDetails struct {
|
||||
TextTokens int `json:"text_tokens,omitempty"`
|
||||
ImageTokens int `json:"image_tokens,omitempty"`
|
||||
}
|
||||
|
||||
// ImageResponseUsage represents the token usage information for image API.
|
||||
type ImageResponseUsage struct {
|
||||
TotalTokens int `json:"total_tokens,omitempty"`
|
||||
InputTokens int `json:"input_tokens,omitempty"`
|
||||
OutputTokens int `json:"output_tokens,omitempty"`
|
||||
InputTokensDetails ImageResponseInputTokensDetails `json:"input_tokens_details,omitempty"`
|
||||
}
|
||||
|
||||
// ImageResponseDataInner represents a response data structure for image API.
|
||||
type ImageResponseDataInner struct {
|
||||
URL string `json:"url,omitempty"`
|
||||
B64JSON string `json:"b64_json,omitempty"`
|
||||
RevisedPrompt string `json:"revised_prompt,omitempty"`
|
||||
}
|
||||
|
||||
// CreateImage - API call to create an image. This is the main endpoint of the DALL-E API.
|
||||
func (c *Client) CreateImage(ctx context.Context, request ImageRequest) (response ImageResponse, err error) {
|
||||
urlSuffix := "/images/generations"
|
||||
req, err := c.newRequest(
|
||||
ctx,
|
||||
http.MethodPost,
|
||||
c.fullURL(urlSuffix, withModel(request.Model)),
|
||||
withBody(request),
|
||||
)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = c.sendRequest(req, &response)
|
||||
return
|
||||
}
|
||||
|
||||
// WrapReader wraps an io.Reader with filename and Content-type.
|
||||
func WrapReader(rdr io.Reader, filename string, contentType string) io.Reader {
|
||||
return file{rdr, filename, contentType}
|
||||
}
|
||||
|
||||
type file struct {
|
||||
io.Reader
|
||||
name string
|
||||
contentType string
|
||||
}
|
||||
|
||||
func (f file) Name() string {
|
||||
if f.name != "" {
|
||||
return f.name
|
||||
} else if named, ok := f.Reader.(interface{ Name() string }); ok {
|
||||
return named.Name()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (f file) ContentType() string {
|
||||
return f.contentType
|
||||
}
|
||||
|
||||
// ImageEditRequest represents the request structure for the image API.
|
||||
// Use WrapReader to wrap an io.Reader with filename and Content-type.
|
||||
type ImageEditRequest struct {
|
||||
Image io.Reader `json:"image,omitempty"`
|
||||
Mask io.Reader `json:"mask,omitempty"`
|
||||
Prompt string `json:"prompt,omitempty"`
|
||||
Model string `json:"model,omitempty"`
|
||||
N int `json:"n,omitempty"`
|
||||
Size string `json:"size,omitempty"`
|
||||
ResponseFormat string `json:"response_format,omitempty"`
|
||||
Quality string `json:"quality,omitempty"`
|
||||
User string `json:"user,omitempty"`
|
||||
}
|
||||
|
||||
// CreateEditImage - API call to create an image. This is the main endpoint of the DALL-E API.
|
||||
func (c *Client) CreateEditImage(ctx context.Context, request ImageEditRequest) (response ImageResponse, err error) {
|
||||
body := &bytes.Buffer{}
|
||||
builder := c.createFormBuilder(body)
|
||||
|
||||
// image, filename verification can be postponed
|
||||
err = builder.CreateFormFileReader("image", request.Image, "")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// mask, it is optional
|
||||
if request.Mask != nil {
|
||||
// filename verification can be postponed
|
||||
err = builder.CreateFormFileReader("mask", request.Mask, "")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
err = builder.WriteField("prompt", request.Prompt)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = builder.WriteField("n", strconv.Itoa(request.N))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = builder.WriteField("size", request.Size)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = builder.WriteField("response_format", request.ResponseFormat)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = builder.Close()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
req, err := c.newRequest(
|
||||
ctx,
|
||||
http.MethodPost,
|
||||
c.fullURL("/images/edits", withModel(request.Model)),
|
||||
withBody(body),
|
||||
withContentType(builder.FormDataContentType()),
|
||||
)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = c.sendRequest(req, &response)
|
||||
return
|
||||
}
|
||||
|
||||
// ImageVariRequest represents the request structure for the image API.
|
||||
// Use WrapReader to wrap an io.Reader with filename and Content-type.
|
||||
type ImageVariRequest struct {
|
||||
Image io.Reader `json:"image,omitempty"`
|
||||
Model string `json:"model,omitempty"`
|
||||
N int `json:"n,omitempty"`
|
||||
Size string `json:"size,omitempty"`
|
||||
ResponseFormat string `json:"response_format,omitempty"`
|
||||
User string `json:"user,omitempty"`
|
||||
}
|
||||
|
||||
// CreateVariImage - API call to create an image variation. This is the main endpoint of the DALL-E API.
|
||||
// Use abbreviations(vari for variation) because ci-lint has a single-line length limit ...
|
||||
func (c *Client) CreateVariImage(ctx context.Context, request ImageVariRequest) (response ImageResponse, err error) {
|
||||
body := &bytes.Buffer{}
|
||||
builder := c.createFormBuilder(body)
|
||||
|
||||
// image, filename verification can be postponed
|
||||
err = builder.CreateFormFileReader("image", request.Image, "")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = builder.WriteField("n", strconv.Itoa(request.N))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = builder.WriteField("size", request.Size)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = builder.WriteField("response_format", request.ResponseFormat)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = builder.Close()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
req, err := c.newRequest(
|
||||
ctx,
|
||||
http.MethodPost,
|
||||
c.fullURL("/images/variations", withModel(request.Model)),
|
||||
withBody(body),
|
||||
withContentType(builder.FormDataContentType()),
|
||||
)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = c.sendRequest(req, &response)
|
||||
return
|
||||
}
|
||||
44
vendor/github.com/sashabaranov/go-openai/internal/error_accumulator.go
generated
vendored
Normal file
44
vendor/github.com/sashabaranov/go-openai/internal/error_accumulator.go
generated
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
package openai
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
type ErrorAccumulator interface {
|
||||
Write(p []byte) error
|
||||
Bytes() []byte
|
||||
}
|
||||
|
||||
type errorBuffer interface {
|
||||
io.Writer
|
||||
Len() int
|
||||
Bytes() []byte
|
||||
}
|
||||
|
||||
type DefaultErrorAccumulator struct {
|
||||
Buffer errorBuffer
|
||||
}
|
||||
|
||||
func NewErrorAccumulator() ErrorAccumulator {
|
||||
return &DefaultErrorAccumulator{
|
||||
Buffer: &bytes.Buffer{},
|
||||
}
|
||||
}
|
||||
|
||||
func (e *DefaultErrorAccumulator) Write(p []byte) error {
|
||||
_, err := e.Buffer.Write(p)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error accumulator write error, %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *DefaultErrorAccumulator) Bytes() (errBytes []byte) {
|
||||
if e.Buffer.Len() == 0 {
|
||||
return
|
||||
}
|
||||
errBytes = e.Buffer.Bytes()
|
||||
return
|
||||
}
|
||||
112
vendor/github.com/sashabaranov/go-openai/internal/form_builder.go
generated
vendored
Normal file
112
vendor/github.com/sashabaranov/go-openai/internal/form_builder.go
generated
vendored
Normal file
@@ -0,0 +1,112 @@
|
||||
package openai
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"mime/multipart"
|
||||
"net/textproto"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type FormBuilder interface {
|
||||
CreateFormFile(fieldname string, file *os.File) error
|
||||
CreateFormFileReader(fieldname string, r io.Reader, filename string) error
|
||||
WriteField(fieldname, value string) error
|
||||
Close() error
|
||||
FormDataContentType() string
|
||||
}
|
||||
|
||||
type DefaultFormBuilder struct {
|
||||
writer *multipart.Writer
|
||||
}
|
||||
|
||||
func NewFormBuilder(body io.Writer) *DefaultFormBuilder {
|
||||
return &DefaultFormBuilder{
|
||||
writer: multipart.NewWriter(body),
|
||||
}
|
||||
}
|
||||
|
||||
func (fb *DefaultFormBuilder) CreateFormFile(fieldname string, file *os.File) error {
|
||||
return fb.createFormFile(fieldname, file, file.Name())
|
||||
}
|
||||
|
||||
var quoteEscaper = strings.NewReplacer("\\", "\\\\", `"`, "\\\"")
|
||||
|
||||
func escapeQuotes(s string) string {
|
||||
return quoteEscaper.Replace(s)
|
||||
}
|
||||
|
||||
// CreateFormFileReader creates a form field with a file reader.
|
||||
// The filename in Content-Disposition is required.
|
||||
func (fb *DefaultFormBuilder) CreateFormFileReader(fieldname string, r io.Reader, filename string) error {
|
||||
if filename == "" {
|
||||
if f, ok := r.(interface{ Name() string }); ok {
|
||||
filename = f.Name()
|
||||
}
|
||||
}
|
||||
var contentType string
|
||||
if f, ok := r.(interface{ ContentType() string }); ok {
|
||||
contentType = f.ContentType()
|
||||
}
|
||||
|
||||
h := make(textproto.MIMEHeader)
|
||||
h.Set(
|
||||
"Content-Disposition",
|
||||
fmt.Sprintf(
|
||||
`form-data; name="%s"; filename="%s"`,
|
||||
escapeQuotes(fieldname),
|
||||
escapeQuotes(filepath.Base(filename)),
|
||||
),
|
||||
)
|
||||
// content type is optional, but it can be set
|
||||
if contentType != "" {
|
||||
h.Set("Content-Type", contentType)
|
||||
}
|
||||
|
||||
fieldWriter, err := fb.writer.CreatePart(h)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = io.Copy(fieldWriter, r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fb *DefaultFormBuilder) createFormFile(fieldname string, r io.Reader, filename string) error {
|
||||
if filename == "" {
|
||||
return fmt.Errorf("filename cannot be empty")
|
||||
}
|
||||
|
||||
fieldWriter, err := fb.writer.CreateFormFile(fieldname, filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = io.Copy(fieldWriter, r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fb *DefaultFormBuilder) WriteField(fieldname, value string) error {
|
||||
if fieldname == "" {
|
||||
return fmt.Errorf("fieldname cannot be empty")
|
||||
}
|
||||
return fb.writer.WriteField(fieldname, value)
|
||||
}
|
||||
|
||||
func (fb *DefaultFormBuilder) Close() error {
|
||||
return fb.writer.Close()
|
||||
}
|
||||
|
||||
func (fb *DefaultFormBuilder) FormDataContentType() string {
|
||||
return fb.writer.FormDataContentType()
|
||||
}
|
||||
15
vendor/github.com/sashabaranov/go-openai/internal/marshaller.go
generated
vendored
Normal file
15
vendor/github.com/sashabaranov/go-openai/internal/marshaller.go
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
package openai
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
)
|
||||
|
||||
type Marshaller interface {
|
||||
Marshal(value any) ([]byte, error)
|
||||
}
|
||||
|
||||
type JSONMarshaller struct{}
|
||||
|
||||
func (jm *JSONMarshaller) Marshal(value any) ([]byte, error) {
|
||||
return json.Marshal(value)
|
||||
}
|
||||
52
vendor/github.com/sashabaranov/go-openai/internal/request_builder.go
generated
vendored
Normal file
52
vendor/github.com/sashabaranov/go-openai/internal/request_builder.go
generated
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
package openai
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
type RequestBuilder interface {
|
||||
Build(ctx context.Context, method, url string, body any, header http.Header) (*http.Request, error)
|
||||
}
|
||||
|
||||
type HTTPRequestBuilder struct {
|
||||
marshaller Marshaller
|
||||
}
|
||||
|
||||
func NewRequestBuilder() *HTTPRequestBuilder {
|
||||
return &HTTPRequestBuilder{
|
||||
marshaller: &JSONMarshaller{},
|
||||
}
|
||||
}
|
||||
|
||||
func (b *HTTPRequestBuilder) Build(
|
||||
ctx context.Context,
|
||||
method string,
|
||||
url string,
|
||||
body any,
|
||||
header http.Header,
|
||||
) (req *http.Request, err error) {
|
||||
var bodyReader io.Reader
|
||||
if body != nil {
|
||||
if v, ok := body.(io.Reader); ok {
|
||||
bodyReader = v
|
||||
} else {
|
||||
var reqBytes []byte
|
||||
reqBytes, err = b.marshaller.Marshal(body)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
bodyReader = bytes.NewBuffer(reqBytes)
|
||||
}
|
||||
}
|
||||
req, err = http.NewRequestWithContext(ctx, method, url, bodyReader)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if header != nil {
|
||||
req.Header = header
|
||||
}
|
||||
return
|
||||
}
|
||||
15
vendor/github.com/sashabaranov/go-openai/internal/unmarshaler.go
generated
vendored
Normal file
15
vendor/github.com/sashabaranov/go-openai/internal/unmarshaler.go
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
package openai
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
)
|
||||
|
||||
type Unmarshaler interface {
|
||||
Unmarshal(data []byte, v any) error
|
||||
}
|
||||
|
||||
type JSONUnmarshaler struct{}
|
||||
|
||||
func (jm *JSONUnmarshaler) Unmarshal(data []byte, v any) error {
|
||||
return json.Unmarshal(data, v)
|
||||
}
|
||||
235
vendor/github.com/sashabaranov/go-openai/jsonschema/json.go
generated
vendored
Normal file
235
vendor/github.com/sashabaranov/go-openai/jsonschema/json.go
generated
vendored
Normal file
@@ -0,0 +1,235 @@
|
||||
// Package jsonschema provides very simple functionality for representing a JSON schema as a
|
||||
// (nested) struct. This struct can be used with the chat completion "function call" feature.
|
||||
// For more complicated schemas, it is recommended to use a dedicated JSON schema library
|
||||
// and/or pass in the schema in []byte format.
|
||||
package jsonschema
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type DataType string
|
||||
|
||||
const (
|
||||
Object DataType = "object"
|
||||
Number DataType = "number"
|
||||
Integer DataType = "integer"
|
||||
String DataType = "string"
|
||||
Array DataType = "array"
|
||||
Null DataType = "null"
|
||||
Boolean DataType = "boolean"
|
||||
)
|
||||
|
||||
// Definition is a struct for describing a JSON Schema.
|
||||
// It is fairly limited, and you may have better luck using a third-party library.
|
||||
type Definition struct {
|
||||
// Type specifies the data type of the schema.
|
||||
Type DataType `json:"type,omitempty"`
|
||||
// Description is the description of the schema.
|
||||
Description string `json:"description,omitempty"`
|
||||
// Enum is used to restrict a value to a fixed set of values. It must be an array with at least
|
||||
// one element, where each element is unique. You will probably only use this with strings.
|
||||
Enum []string `json:"enum,omitempty"`
|
||||
// Properties describes the properties of an object, if the schema type is Object.
|
||||
Properties map[string]Definition `json:"properties,omitempty"`
|
||||
// Required specifies which properties are required, if the schema type is Object.
|
||||
Required []string `json:"required,omitempty"`
|
||||
// Items specifies which data type an array contains, if the schema type is Array.
|
||||
Items *Definition `json:"items,omitempty"`
|
||||
// AdditionalProperties is used to control the handling of properties in an object
|
||||
// that are not explicitly defined in the properties section of the schema. example:
|
||||
// additionalProperties: true
|
||||
// additionalProperties: false
|
||||
// additionalProperties: jsonschema.Definition{Type: jsonschema.String}
|
||||
AdditionalProperties any `json:"additionalProperties,omitempty"`
|
||||
// Whether the schema is nullable or not.
|
||||
Nullable bool `json:"nullable,omitempty"`
|
||||
|
||||
// Ref Reference to a definition in $defs or external schema.
|
||||
Ref string `json:"$ref,omitempty"`
|
||||
// Defs A map of reusable schema definitions.
|
||||
Defs map[string]Definition `json:"$defs,omitempty"`
|
||||
}
|
||||
|
||||
func (d *Definition) MarshalJSON() ([]byte, error) {
|
||||
if d.Properties == nil {
|
||||
d.Properties = make(map[string]Definition)
|
||||
}
|
||||
type Alias Definition
|
||||
return json.Marshal(struct {
|
||||
Alias
|
||||
}{
|
||||
Alias: (Alias)(*d),
|
||||
})
|
||||
}
|
||||
|
||||
func (d *Definition) Unmarshal(content string, v any) error {
|
||||
return VerifySchemaAndUnmarshal(*d, []byte(content), v)
|
||||
}
|
||||
|
||||
func GenerateSchemaForType(v any) (*Definition, error) {
|
||||
var defs = make(map[string]Definition)
|
||||
def, err := reflectSchema(reflect.TypeOf(v), defs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// If the schema has a root $ref, resolve it by:
|
||||
// 1. Extracting the key from the $ref.
|
||||
// 2. Detaching the referenced definition from $defs.
|
||||
// 3. Checking for self-references in the detached definition.
|
||||
// - If a self-reference is found, restore the original $defs structure.
|
||||
// 4. Flattening the referenced definition into the root schema.
|
||||
// 5. Clearing the $ref field in the root schema.
|
||||
if def.Ref != "" {
|
||||
origRef := def.Ref
|
||||
key := strings.TrimPrefix(origRef, "#/$defs/")
|
||||
if root, ok := defs[key]; ok {
|
||||
delete(defs, key)
|
||||
root.Defs = defs
|
||||
if containsRef(root, origRef) {
|
||||
root.Defs = nil
|
||||
defs[key] = root
|
||||
}
|
||||
*def = root
|
||||
}
|
||||
def.Ref = ""
|
||||
}
|
||||
def.Defs = defs
|
||||
return def, nil
|
||||
}
|
||||
|
||||
func reflectSchema(t reflect.Type, defs map[string]Definition) (*Definition, error) {
|
||||
var d Definition
|
||||
switch t.Kind() {
|
||||
case reflect.String:
|
||||
d.Type = String
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
|
||||
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
d.Type = Integer
|
||||
case reflect.Float32, reflect.Float64:
|
||||
d.Type = Number
|
||||
case reflect.Bool:
|
||||
d.Type = Boolean
|
||||
case reflect.Slice, reflect.Array:
|
||||
d.Type = Array
|
||||
items, err := reflectSchema(t.Elem(), defs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
d.Items = items
|
||||
case reflect.Struct:
|
||||
if t.Name() != "" {
|
||||
if _, ok := defs[t.Name()]; !ok {
|
||||
defs[t.Name()] = Definition{}
|
||||
object, err := reflectSchemaObject(t, defs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defs[t.Name()] = *object
|
||||
}
|
||||
return &Definition{Ref: "#/$defs/" + t.Name()}, nil
|
||||
}
|
||||
d.Type = Object
|
||||
d.AdditionalProperties = false
|
||||
object, err := reflectSchemaObject(t, defs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
d = *object
|
||||
case reflect.Ptr:
|
||||
definition, err := reflectSchema(t.Elem(), defs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
d = *definition
|
||||
case reflect.Invalid, reflect.Uintptr, reflect.Complex64, reflect.Complex128,
|
||||
reflect.Chan, reflect.Func, reflect.Interface, reflect.Map,
|
||||
reflect.UnsafePointer:
|
||||
return nil, fmt.Errorf("unsupported type: %s", t.Kind().String())
|
||||
default:
|
||||
}
|
||||
return &d, nil
|
||||
}
|
||||
|
||||
func reflectSchemaObject(t reflect.Type, defs map[string]Definition) (*Definition, error) {
|
||||
var d = Definition{
|
||||
Type: Object,
|
||||
AdditionalProperties: false,
|
||||
}
|
||||
properties := make(map[string]Definition)
|
||||
var requiredFields []string
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
field := t.Field(i)
|
||||
if !field.IsExported() {
|
||||
continue
|
||||
}
|
||||
jsonTag := field.Tag.Get("json")
|
||||
var required = true
|
||||
switch {
|
||||
case jsonTag == "-":
|
||||
continue
|
||||
case jsonTag == "":
|
||||
jsonTag = field.Name
|
||||
case strings.HasSuffix(jsonTag, ",omitempty"):
|
||||
jsonTag = strings.TrimSuffix(jsonTag, ",omitempty")
|
||||
required = false
|
||||
}
|
||||
|
||||
item, err := reflectSchema(field.Type, defs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
description := field.Tag.Get("description")
|
||||
if description != "" {
|
||||
item.Description = description
|
||||
}
|
||||
enum := field.Tag.Get("enum")
|
||||
if enum != "" {
|
||||
item.Enum = strings.Split(enum, ",")
|
||||
}
|
||||
|
||||
if n := field.Tag.Get("nullable"); n != "" {
|
||||
nullable, _ := strconv.ParseBool(n)
|
||||
item.Nullable = nullable
|
||||
}
|
||||
|
||||
properties[jsonTag] = *item
|
||||
|
||||
if s := field.Tag.Get("required"); s != "" {
|
||||
required, _ = strconv.ParseBool(s)
|
||||
}
|
||||
if required {
|
||||
requiredFields = append(requiredFields, jsonTag)
|
||||
}
|
||||
}
|
||||
d.Required = requiredFields
|
||||
d.Properties = properties
|
||||
return &d, nil
|
||||
}
|
||||
|
||||
func containsRef(def Definition, targetRef string) bool {
|
||||
if def.Ref == targetRef {
|
||||
return true
|
||||
}
|
||||
|
||||
for _, d := range def.Defs {
|
||||
if containsRef(d, targetRef) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
for _, prop := range def.Properties {
|
||||
if containsRef(prop, targetRef) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
if def.Items != nil && containsRef(*def.Items, targetRef) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
140
vendor/github.com/sashabaranov/go-openai/jsonschema/validate.go
generated
vendored
Normal file
140
vendor/github.com/sashabaranov/go-openai/jsonschema/validate.go
generated
vendored
Normal file
@@ -0,0 +1,140 @@
|
||||
package jsonschema
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
)
|
||||
|
||||
func CollectDefs(def Definition) map[string]Definition {
|
||||
result := make(map[string]Definition)
|
||||
collectDefsRecursive(def, result, "#")
|
||||
return result
|
||||
}
|
||||
|
||||
func collectDefsRecursive(def Definition, result map[string]Definition, prefix string) {
|
||||
for k, v := range def.Defs {
|
||||
path := prefix + "/$defs/" + k
|
||||
result[path] = v
|
||||
collectDefsRecursive(v, result, path)
|
||||
}
|
||||
for k, sub := range def.Properties {
|
||||
collectDefsRecursive(sub, result, prefix+"/properties/"+k)
|
||||
}
|
||||
if def.Items != nil {
|
||||
collectDefsRecursive(*def.Items, result, prefix)
|
||||
}
|
||||
}
|
||||
|
||||
func VerifySchemaAndUnmarshal(schema Definition, content []byte, v any) error {
|
||||
var data any
|
||||
err := json.Unmarshal(content, &data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !Validate(schema, data, WithDefs(CollectDefs(schema))) {
|
||||
return errors.New("data validation failed against the provided schema")
|
||||
}
|
||||
return json.Unmarshal(content, &v)
|
||||
}
|
||||
|
||||
type validateArgs struct {
|
||||
Defs map[string]Definition
|
||||
}
|
||||
|
||||
type ValidateOption func(*validateArgs)
|
||||
|
||||
func WithDefs(defs map[string]Definition) ValidateOption {
|
||||
return func(option *validateArgs) {
|
||||
option.Defs = defs
|
||||
}
|
||||
}
|
||||
|
||||
func Validate(schema Definition, data any, opts ...ValidateOption) bool {
|
||||
args := validateArgs{}
|
||||
for _, opt := range opts {
|
||||
opt(&args)
|
||||
}
|
||||
if len(opts) == 0 {
|
||||
args.Defs = CollectDefs(schema)
|
||||
}
|
||||
switch schema.Type {
|
||||
case Object:
|
||||
return validateObject(schema, data, args.Defs)
|
||||
case Array:
|
||||
return validateArray(schema, data, args.Defs)
|
||||
case String:
|
||||
v, ok := data.(string)
|
||||
if ok && len(schema.Enum) > 0 {
|
||||
return contains(schema.Enum, v)
|
||||
}
|
||||
return ok
|
||||
case Number: // float64 and int
|
||||
_, ok := data.(float64)
|
||||
if !ok {
|
||||
_, ok = data.(int)
|
||||
}
|
||||
return ok
|
||||
case Boolean:
|
||||
_, ok := data.(bool)
|
||||
return ok
|
||||
case Integer:
|
||||
// Golang unmarshals all numbers as float64, so we need to check if the float64 is an integer
|
||||
if num, ok := data.(float64); ok {
|
||||
return num == float64(int64(num))
|
||||
}
|
||||
_, ok := data.(int)
|
||||
return ok
|
||||
case Null:
|
||||
return data == nil
|
||||
default:
|
||||
if schema.Ref != "" && args.Defs != nil {
|
||||
if v, ok := args.Defs[schema.Ref]; ok {
|
||||
return Validate(v, data, WithDefs(args.Defs))
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func validateObject(schema Definition, data any, defs map[string]Definition) bool {
|
||||
dataMap, ok := data.(map[string]any)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
for _, field := range schema.Required {
|
||||
if _, exists := dataMap[field]; !exists {
|
||||
return false
|
||||
}
|
||||
}
|
||||
for key, valueSchema := range schema.Properties {
|
||||
value, exists := dataMap[key]
|
||||
if exists && !Validate(valueSchema, value, WithDefs(defs)) {
|
||||
return false
|
||||
} else if !exists && contains(schema.Required, key) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func validateArray(schema Definition, data any, defs map[string]Definition) bool {
|
||||
dataArray, ok := data.([]any)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
for _, item := range dataArray {
|
||||
if !Validate(*schema.Items, item, WithDefs(defs)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func contains[S ~[]E, E comparable](s S, v E) bool {
|
||||
for i := range s {
|
||||
if v == s[i] {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
224
vendor/github.com/sashabaranov/go-openai/messages.go
generated
vendored
Normal file
224
vendor/github.com/sashabaranov/go-openai/messages.go
generated
vendored
Normal file
@@ -0,0 +1,224 @@
|
||||
package openai
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
const (
|
||||
messagesSuffix = "messages"
|
||||
)
|
||||
|
||||
type Message struct {
|
||||
ID string `json:"id"`
|
||||
Object string `json:"object"`
|
||||
CreatedAt int `json:"created_at"`
|
||||
ThreadID string `json:"thread_id"`
|
||||
Role string `json:"role"`
|
||||
Content []MessageContent `json:"content"`
|
||||
FileIds []string `json:"file_ids"` //nolint:revive //backwards-compatibility
|
||||
AssistantID *string `json:"assistant_id,omitempty"`
|
||||
RunID *string `json:"run_id,omitempty"`
|
||||
Metadata map[string]any `json:"metadata"`
|
||||
|
||||
httpHeader
|
||||
}
|
||||
|
||||
type MessagesList struct {
|
||||
Messages []Message `json:"data"`
|
||||
|
||||
Object string `json:"object"`
|
||||
FirstID *string `json:"first_id"`
|
||||
LastID *string `json:"last_id"`
|
||||
HasMore bool `json:"has_more"`
|
||||
|
||||
httpHeader
|
||||
}
|
||||
|
||||
type MessageContent struct {
|
||||
Type string `json:"type"`
|
||||
Text *MessageText `json:"text,omitempty"`
|
||||
ImageFile *ImageFile `json:"image_file,omitempty"`
|
||||
ImageURL *ImageURL `json:"image_url,omitempty"`
|
||||
}
|
||||
type MessageText struct {
|
||||
Value string `json:"value"`
|
||||
Annotations []any `json:"annotations"`
|
||||
}
|
||||
|
||||
type ImageFile struct {
|
||||
FileID string `json:"file_id"`
|
||||
}
|
||||
|
||||
type ImageURL struct {
|
||||
URL string `json:"url"`
|
||||
Detail string `json:"detail"`
|
||||
}
|
||||
|
||||
type MessageRequest struct {
|
||||
Role string `json:"role"`
|
||||
Content string `json:"content"`
|
||||
FileIds []string `json:"file_ids,omitempty"` //nolint:revive // backwards-compatibility
|
||||
Metadata map[string]any `json:"metadata,omitempty"`
|
||||
Attachments []ThreadAttachment `json:"attachments,omitempty"`
|
||||
}
|
||||
|
||||
type MessageFile struct {
|
||||
ID string `json:"id"`
|
||||
Object string `json:"object"`
|
||||
CreatedAt int `json:"created_at"`
|
||||
MessageID string `json:"message_id"`
|
||||
|
||||
httpHeader
|
||||
}
|
||||
|
||||
type MessageFilesList struct {
|
||||
MessageFiles []MessageFile `json:"data"`
|
||||
|
||||
httpHeader
|
||||
}
|
||||
|
||||
type MessageDeletionStatus struct {
|
||||
ID string `json:"id"`
|
||||
Object string `json:"object"`
|
||||
Deleted bool `json:"deleted"`
|
||||
|
||||
httpHeader
|
||||
}
|
||||
|
||||
// CreateMessage creates a new message.
|
||||
func (c *Client) CreateMessage(ctx context.Context, threadID string, request MessageRequest) (msg Message, err error) {
|
||||
urlSuffix := fmt.Sprintf("/threads/%s/%s", threadID, messagesSuffix)
|
||||
req, err := c.newRequest(ctx, http.MethodPost, c.fullURL(urlSuffix), withBody(request),
|
||||
withBetaAssistantVersion(c.config.AssistantVersion))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = c.sendRequest(req, &msg)
|
||||
return
|
||||
}
|
||||
|
||||
// ListMessage fetches all messages in the thread.
|
||||
func (c *Client) ListMessage(ctx context.Context, threadID string,
|
||||
limit *int,
|
||||
order *string,
|
||||
after *string,
|
||||
before *string,
|
||||
runID *string,
|
||||
) (messages MessagesList, err error) {
|
||||
urlValues := url.Values{}
|
||||
if limit != nil {
|
||||
urlValues.Add("limit", fmt.Sprintf("%d", *limit))
|
||||
}
|
||||
if order != nil {
|
||||
urlValues.Add("order", *order)
|
||||
}
|
||||
if after != nil {
|
||||
urlValues.Add("after", *after)
|
||||
}
|
||||
if before != nil {
|
||||
urlValues.Add("before", *before)
|
||||
}
|
||||
if runID != nil {
|
||||
urlValues.Add("run_id", *runID)
|
||||
}
|
||||
|
||||
encodedValues := ""
|
||||
if len(urlValues) > 0 {
|
||||
encodedValues = "?" + urlValues.Encode()
|
||||
}
|
||||
|
||||
urlSuffix := fmt.Sprintf("/threads/%s/%s%s", threadID, messagesSuffix, encodedValues)
|
||||
req, err := c.newRequest(ctx, http.MethodGet, c.fullURL(urlSuffix),
|
||||
withBetaAssistantVersion(c.config.AssistantVersion))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = c.sendRequest(req, &messages)
|
||||
return
|
||||
}
|
||||
|
||||
// RetrieveMessage retrieves a Message.
|
||||
func (c *Client) RetrieveMessage(
|
||||
ctx context.Context,
|
||||
threadID, messageID string,
|
||||
) (msg Message, err error) {
|
||||
urlSuffix := fmt.Sprintf("/threads/%s/%s/%s", threadID, messagesSuffix, messageID)
|
||||
req, err := c.newRequest(ctx, http.MethodGet, c.fullURL(urlSuffix),
|
||||
withBetaAssistantVersion(c.config.AssistantVersion))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = c.sendRequest(req, &msg)
|
||||
return
|
||||
}
|
||||
|
||||
// ModifyMessage modifies a message.
|
||||
func (c *Client) ModifyMessage(
|
||||
ctx context.Context,
|
||||
threadID, messageID string,
|
||||
metadata map[string]string,
|
||||
) (msg Message, err error) {
|
||||
urlSuffix := fmt.Sprintf("/threads/%s/%s/%s", threadID, messagesSuffix, messageID)
|
||||
req, err := c.newRequest(ctx, http.MethodPost, c.fullURL(urlSuffix),
|
||||
withBody(map[string]any{"metadata": metadata}), withBetaAssistantVersion(c.config.AssistantVersion))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = c.sendRequest(req, &msg)
|
||||
return
|
||||
}
|
||||
|
||||
// RetrieveMessageFile fetches a message file.
|
||||
func (c *Client) RetrieveMessageFile(
|
||||
ctx context.Context,
|
||||
threadID, messageID, fileID string,
|
||||
) (file MessageFile, err error) {
|
||||
urlSuffix := fmt.Sprintf("/threads/%s/%s/%s/files/%s", threadID, messagesSuffix, messageID, fileID)
|
||||
req, err := c.newRequest(ctx, http.MethodGet, c.fullURL(urlSuffix),
|
||||
withBetaAssistantVersion(c.config.AssistantVersion))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = c.sendRequest(req, &file)
|
||||
return
|
||||
}
|
||||
|
||||
// ListMessageFiles fetches all files attached to a message.
|
||||
func (c *Client) ListMessageFiles(
|
||||
ctx context.Context,
|
||||
threadID, messageID string,
|
||||
) (files MessageFilesList, err error) {
|
||||
urlSuffix := fmt.Sprintf("/threads/%s/%s/%s/files", threadID, messagesSuffix, messageID)
|
||||
req, err := c.newRequest(ctx, http.MethodGet, c.fullURL(urlSuffix),
|
||||
withBetaAssistantVersion(c.config.AssistantVersion))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = c.sendRequest(req, &files)
|
||||
return
|
||||
}
|
||||
|
||||
// DeleteMessage deletes a message..
|
||||
func (c *Client) DeleteMessage(
|
||||
ctx context.Context,
|
||||
threadID, messageID string,
|
||||
) (status MessageDeletionStatus, err error) {
|
||||
urlSuffix := fmt.Sprintf("/threads/%s/%s/%s", threadID, messagesSuffix, messageID)
|
||||
req, err := c.newRequest(ctx, http.MethodDelete, c.fullURL(urlSuffix),
|
||||
withBetaAssistantVersion(c.config.AssistantVersion))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = c.sendRequest(req, &status)
|
||||
return
|
||||
}
|
||||
90
vendor/github.com/sashabaranov/go-openai/models.go
generated
vendored
Normal file
90
vendor/github.com/sashabaranov/go-openai/models.go
generated
vendored
Normal file
@@ -0,0 +1,90 @@
|
||||
package openai
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// Model struct represents an OpenAPI model.
|
||||
type Model struct {
|
||||
CreatedAt int64 `json:"created"`
|
||||
ID string `json:"id"`
|
||||
Object string `json:"object"`
|
||||
OwnedBy string `json:"owned_by"`
|
||||
Permission []Permission `json:"permission"`
|
||||
Root string `json:"root"`
|
||||
Parent string `json:"parent"`
|
||||
|
||||
httpHeader
|
||||
}
|
||||
|
||||
// Permission struct represents an OpenAPI permission.
|
||||
type Permission struct {
|
||||
CreatedAt int64 `json:"created"`
|
||||
ID string `json:"id"`
|
||||
Object string `json:"object"`
|
||||
AllowCreateEngine bool `json:"allow_create_engine"`
|
||||
AllowSampling bool `json:"allow_sampling"`
|
||||
AllowLogprobs bool `json:"allow_logprobs"`
|
||||
AllowSearchIndices bool `json:"allow_search_indices"`
|
||||
AllowView bool `json:"allow_view"`
|
||||
AllowFineTuning bool `json:"allow_fine_tuning"`
|
||||
Organization string `json:"organization"`
|
||||
Group interface{} `json:"group"`
|
||||
IsBlocking bool `json:"is_blocking"`
|
||||
}
|
||||
|
||||
// FineTuneModelDeleteResponse represents the deletion status of a fine-tuned model.
|
||||
type FineTuneModelDeleteResponse struct {
|
||||
ID string `json:"id"`
|
||||
Object string `json:"object"`
|
||||
Deleted bool `json:"deleted"`
|
||||
|
||||
httpHeader
|
||||
}
|
||||
|
||||
// ModelsList is a list of models, including those that belong to the user or organization.
|
||||
type ModelsList struct {
|
||||
Models []Model `json:"data"`
|
||||
|
||||
httpHeader
|
||||
}
|
||||
|
||||
// ListModels Lists the currently available models,
|
||||
// and provides basic information about each model such as the model id and parent.
|
||||
func (c *Client) ListModels(ctx context.Context) (models ModelsList, err error) {
|
||||
req, err := c.newRequest(ctx, http.MethodGet, c.fullURL("/models"))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = c.sendRequest(req, &models)
|
||||
return
|
||||
}
|
||||
|
||||
// GetModel Retrieves a model instance, providing basic information about
|
||||
// the model such as the owner and permissioning.
|
||||
func (c *Client) GetModel(ctx context.Context, modelID string) (model Model, err error) {
|
||||
urlSuffix := fmt.Sprintf("/models/%s", modelID)
|
||||
req, err := c.newRequest(ctx, http.MethodGet, c.fullURL(urlSuffix))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = c.sendRequest(req, &model)
|
||||
return
|
||||
}
|
||||
|
||||
// DeleteFineTuneModel Deletes a fine-tune model. You must have the Owner
|
||||
// role in your organization to delete a model.
|
||||
func (c *Client) DeleteFineTuneModel(ctx context.Context, modelID string) (
|
||||
response FineTuneModelDeleteResponse, err error) {
|
||||
req, err := c.newRequest(ctx, http.MethodDelete, c.fullURL("/models/"+modelID))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = c.sendRequest(req, &response)
|
||||
return
|
||||
}
|
||||
107
vendor/github.com/sashabaranov/go-openai/moderation.go
generated
vendored
Normal file
107
vendor/github.com/sashabaranov/go-openai/moderation.go
generated
vendored
Normal file
@@ -0,0 +1,107 @@
|
||||
package openai
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// The moderation endpoint is a tool you can use to check whether content complies with OpenAI's usage policies.
|
||||
// Developers can thus identify content that our usage policies prohibits and take action, for instance by filtering it.
|
||||
|
||||
// The default is text-moderation-latest which will be automatically upgraded over time.
|
||||
// This ensures you are always using our most accurate model.
|
||||
// If you use text-moderation-stable, we will provide advanced notice before updating the model.
|
||||
// Accuracy of text-moderation-stable may be slightly lower than for text-moderation-latest.
|
||||
const (
|
||||
ModerationOmniLatest = "omni-moderation-latest"
|
||||
ModerationOmni20240926 = "omni-moderation-2024-09-26"
|
||||
ModerationTextStable = "text-moderation-stable"
|
||||
ModerationTextLatest = "text-moderation-latest"
|
||||
// Deprecated: use ModerationTextStable and ModerationTextLatest instead.
|
||||
ModerationText001 = "text-moderation-001"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrModerationInvalidModel = errors.New("this model is not supported with moderation, please use text-moderation-stable or text-moderation-latest instead") //nolint:lll
|
||||
)
|
||||
|
||||
var validModerationModel = map[string]struct{}{
|
||||
ModerationOmniLatest: {},
|
||||
ModerationOmni20240926: {},
|
||||
ModerationTextStable: {},
|
||||
ModerationTextLatest: {},
|
||||
}
|
||||
|
||||
// ModerationRequest represents a request structure for moderation API.
|
||||
type ModerationRequest struct {
|
||||
Input string `json:"input,omitempty"`
|
||||
Model string `json:"model,omitempty"`
|
||||
}
|
||||
|
||||
// Result represents one of possible moderation results.
|
||||
type Result struct {
|
||||
Categories ResultCategories `json:"categories"`
|
||||
CategoryScores ResultCategoryScores `json:"category_scores"`
|
||||
Flagged bool `json:"flagged"`
|
||||
}
|
||||
|
||||
// ResultCategories represents Categories of Result.
|
||||
type ResultCategories struct {
|
||||
Hate bool `json:"hate"`
|
||||
HateThreatening bool `json:"hate/threatening"`
|
||||
Harassment bool `json:"harassment"`
|
||||
HarassmentThreatening bool `json:"harassment/threatening"`
|
||||
SelfHarm bool `json:"self-harm"`
|
||||
SelfHarmIntent bool `json:"self-harm/intent"`
|
||||
SelfHarmInstructions bool `json:"self-harm/instructions"`
|
||||
Sexual bool `json:"sexual"`
|
||||
SexualMinors bool `json:"sexual/minors"`
|
||||
Violence bool `json:"violence"`
|
||||
ViolenceGraphic bool `json:"violence/graphic"`
|
||||
}
|
||||
|
||||
// ResultCategoryScores represents CategoryScores of Result.
|
||||
type ResultCategoryScores struct {
|
||||
Hate float32 `json:"hate"`
|
||||
HateThreatening float32 `json:"hate/threatening"`
|
||||
Harassment float32 `json:"harassment"`
|
||||
HarassmentThreatening float32 `json:"harassment/threatening"`
|
||||
SelfHarm float32 `json:"self-harm"`
|
||||
SelfHarmIntent float32 `json:"self-harm/intent"`
|
||||
SelfHarmInstructions float32 `json:"self-harm/instructions"`
|
||||
Sexual float32 `json:"sexual"`
|
||||
SexualMinors float32 `json:"sexual/minors"`
|
||||
Violence float32 `json:"violence"`
|
||||
ViolenceGraphic float32 `json:"violence/graphic"`
|
||||
}
|
||||
|
||||
// ModerationResponse represents a response structure for moderation API.
|
||||
type ModerationResponse struct {
|
||||
ID string `json:"id"`
|
||||
Model string `json:"model"`
|
||||
Results []Result `json:"results"`
|
||||
|
||||
httpHeader
|
||||
}
|
||||
|
||||
// Moderations — perform a moderation api call over a string.
|
||||
// Input can be an array or slice but a string will reduce the complexity.
|
||||
func (c *Client) Moderations(ctx context.Context, request ModerationRequest) (response ModerationResponse, err error) {
|
||||
if _, ok := validModerationModel[request.Model]; len(request.Model) > 0 && !ok {
|
||||
err = ErrModerationInvalidModel
|
||||
return
|
||||
}
|
||||
req, err := c.newRequest(
|
||||
ctx,
|
||||
http.MethodPost,
|
||||
c.fullURL("/moderations", withModel(request.Model)),
|
||||
withBody(&request),
|
||||
)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = c.sendRequest(req, &response)
|
||||
return
|
||||
}
|
||||
43
vendor/github.com/sashabaranov/go-openai/ratelimit.go
generated
vendored
Normal file
43
vendor/github.com/sashabaranov/go-openai/ratelimit.go
generated
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
package openai
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
// RateLimitHeaders struct represents Openai rate limits headers.
|
||||
type RateLimitHeaders struct {
|
||||
LimitRequests int `json:"x-ratelimit-limit-requests"`
|
||||
LimitTokens int `json:"x-ratelimit-limit-tokens"`
|
||||
RemainingRequests int `json:"x-ratelimit-remaining-requests"`
|
||||
RemainingTokens int `json:"x-ratelimit-remaining-tokens"`
|
||||
ResetRequests ResetTime `json:"x-ratelimit-reset-requests"`
|
||||
ResetTokens ResetTime `json:"x-ratelimit-reset-tokens"`
|
||||
}
|
||||
|
||||
type ResetTime string
|
||||
|
||||
func (r ResetTime) String() string {
|
||||
return string(r)
|
||||
}
|
||||
|
||||
func (r ResetTime) Time() time.Time {
|
||||
d, _ := time.ParseDuration(string(r))
|
||||
return time.Now().Add(d)
|
||||
}
|
||||
|
||||
func newRateLimitHeaders(h http.Header) RateLimitHeaders {
|
||||
limitReq, _ := strconv.Atoi(h.Get("x-ratelimit-limit-requests"))
|
||||
limitTokens, _ := strconv.Atoi(h.Get("x-ratelimit-limit-tokens"))
|
||||
remainingReq, _ := strconv.Atoi(h.Get("x-ratelimit-remaining-requests"))
|
||||
remainingTokens, _ := strconv.Atoi(h.Get("x-ratelimit-remaining-tokens"))
|
||||
return RateLimitHeaders{
|
||||
LimitRequests: limitReq,
|
||||
LimitTokens: limitTokens,
|
||||
RemainingRequests: remainingReq,
|
||||
RemainingTokens: remainingTokens,
|
||||
ResetRequests: ResetTime(h.Get("x-ratelimit-reset-requests")),
|
||||
ResetTokens: ResetTime(h.Get("x-ratelimit-reset-tokens")),
|
||||
}
|
||||
}
|
||||
82
vendor/github.com/sashabaranov/go-openai/reasoning_validator.go
generated
vendored
Normal file
82
vendor/github.com/sashabaranov/go-openai/reasoning_validator.go
generated
vendored
Normal file
@@ -0,0 +1,82 @@
|
||||
package openai
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
// Deprecated: use ErrReasoningModelMaxTokensDeprecated instead.
|
||||
ErrO1MaxTokensDeprecated = errors.New("this model is not supported MaxTokens, please use MaxCompletionTokens") //nolint:lll
|
||||
ErrCompletionUnsupportedModel = errors.New("this model is not supported with this method, please use CreateChatCompletion client method instead") //nolint:lll
|
||||
ErrCompletionStreamNotSupported = errors.New("streaming is not supported with this method, please use CreateCompletionStream") //nolint:lll
|
||||
ErrCompletionRequestPromptTypeNotSupported = errors.New("the type of CompletionRequest.Prompt only supports string and []string") //nolint:lll
|
||||
)
|
||||
|
||||
var (
|
||||
ErrO1BetaLimitationsMessageTypes = errors.New("this model has beta-limitations, user and assistant messages only, system messages are not supported") //nolint:lll
|
||||
ErrO1BetaLimitationsTools = errors.New("this model has beta-limitations, tools, function calling, and response format parameters are not supported") //nolint:lll
|
||||
// Deprecated: use ErrReasoningModelLimitations* instead.
|
||||
ErrO1BetaLimitationsLogprobs = errors.New("this model has beta-limitations, logprobs not supported") //nolint:lll
|
||||
ErrO1BetaLimitationsOther = errors.New("this model has beta-limitations, temperature, top_p and n are fixed at 1, while presence_penalty and frequency_penalty are fixed at 0") //nolint:lll
|
||||
)
|
||||
|
||||
var (
|
||||
//nolint:lll
|
||||
ErrReasoningModelMaxTokensDeprecated = errors.New("this model is not supported MaxTokens, please use MaxCompletionTokens")
|
||||
ErrReasoningModelLimitationsLogprobs = errors.New("this model has beta-limitations, logprobs not supported") //nolint:lll
|
||||
ErrReasoningModelLimitationsOther = errors.New("this model has beta-limitations, temperature, top_p and n are fixed at 1, while presence_penalty and frequency_penalty are fixed at 0") //nolint:lll
|
||||
)
|
||||
|
||||
// ReasoningValidator handles validation for reasoning model requests.
|
||||
type ReasoningValidator struct{}
|
||||
|
||||
// NewReasoningValidator creates a new validator for reasoning models.
|
||||
func NewReasoningValidator() *ReasoningValidator {
|
||||
return &ReasoningValidator{}
|
||||
}
|
||||
|
||||
// Validate performs all validation checks for reasoning models.
|
||||
func (v *ReasoningValidator) Validate(request ChatCompletionRequest) error {
|
||||
o1Series := strings.HasPrefix(request.Model, "o1")
|
||||
o3Series := strings.HasPrefix(request.Model, "o3")
|
||||
o4Series := strings.HasPrefix(request.Model, "o4")
|
||||
gpt5Series := strings.HasPrefix(request.Model, "gpt-5")
|
||||
|
||||
if !o1Series && !o3Series && !o4Series && !gpt5Series {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := v.validateReasoningModelParams(request); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateReasoningModelParams checks reasoning model parameters.
|
||||
func (v *ReasoningValidator) validateReasoningModelParams(request ChatCompletionRequest) error {
|
||||
if request.MaxTokens > 0 {
|
||||
return ErrReasoningModelMaxTokensDeprecated
|
||||
}
|
||||
if request.LogProbs {
|
||||
return ErrReasoningModelLimitationsLogprobs
|
||||
}
|
||||
if request.Temperature > 0 && request.Temperature != 1 {
|
||||
return ErrReasoningModelLimitationsOther
|
||||
}
|
||||
if request.TopP > 0 && request.TopP != 1 {
|
||||
return ErrReasoningModelLimitationsOther
|
||||
}
|
||||
if request.N > 0 && request.N != 1 {
|
||||
return ErrReasoningModelLimitationsOther
|
||||
}
|
||||
if request.PresencePenalty > 0 {
|
||||
return ErrReasoningModelLimitationsOther
|
||||
}
|
||||
if request.FrequencyPenalty > 0 {
|
||||
return ErrReasoningModelLimitationsOther
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
454
vendor/github.com/sashabaranov/go-openai/run.go
generated
vendored
Normal file
454
vendor/github.com/sashabaranov/go-openai/run.go
generated
vendored
Normal file
@@ -0,0 +1,454 @@
|
||||
package openai
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
type Run struct {
|
||||
ID string `json:"id"`
|
||||
Object string `json:"object"`
|
||||
CreatedAt int64 `json:"created_at"`
|
||||
ThreadID string `json:"thread_id"`
|
||||
AssistantID string `json:"assistant_id"`
|
||||
Status RunStatus `json:"status"`
|
||||
RequiredAction *RunRequiredAction `json:"required_action,omitempty"`
|
||||
LastError *RunLastError `json:"last_error,omitempty"`
|
||||
ExpiresAt int64 `json:"expires_at"`
|
||||
StartedAt *int64 `json:"started_at,omitempty"`
|
||||
CancelledAt *int64 `json:"cancelled_at,omitempty"`
|
||||
FailedAt *int64 `json:"failed_at,omitempty"`
|
||||
CompletedAt *int64 `json:"completed_at,omitempty"`
|
||||
Model string `json:"model"`
|
||||
Instructions string `json:"instructions,omitempty"`
|
||||
Tools []Tool `json:"tools"`
|
||||
FileIDS []string `json:"file_ids"` //nolint:revive // backwards-compatibility
|
||||
Metadata map[string]any `json:"metadata"`
|
||||
Usage Usage `json:"usage,omitempty"`
|
||||
|
||||
Temperature *float32 `json:"temperature,omitempty"`
|
||||
// The maximum number of prompt tokens that may be used over the course of the run.
|
||||
// If the run exceeds the number of prompt tokens specified, the run will end with status 'incomplete'.
|
||||
MaxPromptTokens int `json:"max_prompt_tokens,omitempty"`
|
||||
// The maximum number of completion tokens that may be used over the course of the run.
|
||||
// If the run exceeds the number of completion tokens specified, the run will end with status 'incomplete'.
|
||||
MaxCompletionTokens int `json:"max_completion_tokens,omitempty"`
|
||||
// ThreadTruncationStrategy defines the truncation strategy to use for the thread.
|
||||
TruncationStrategy *ThreadTruncationStrategy `json:"truncation_strategy,omitempty"`
|
||||
|
||||
httpHeader
|
||||
}
|
||||
|
||||
type RunStatus string
|
||||
|
||||
const (
|
||||
RunStatusQueued RunStatus = "queued"
|
||||
RunStatusInProgress RunStatus = "in_progress"
|
||||
RunStatusRequiresAction RunStatus = "requires_action"
|
||||
RunStatusCancelling RunStatus = "cancelling"
|
||||
RunStatusFailed RunStatus = "failed"
|
||||
RunStatusCompleted RunStatus = "completed"
|
||||
RunStatusIncomplete RunStatus = "incomplete"
|
||||
RunStatusExpired RunStatus = "expired"
|
||||
RunStatusCancelled RunStatus = "cancelled"
|
||||
)
|
||||
|
||||
type RunRequiredAction struct {
|
||||
Type RequiredActionType `json:"type"`
|
||||
SubmitToolOutputs *SubmitToolOutputs `json:"submit_tool_outputs,omitempty"`
|
||||
}
|
||||
|
||||
type RequiredActionType string
|
||||
|
||||
const (
|
||||
RequiredActionTypeSubmitToolOutputs RequiredActionType = "submit_tool_outputs"
|
||||
)
|
||||
|
||||
type SubmitToolOutputs struct {
|
||||
ToolCalls []ToolCall `json:"tool_calls"`
|
||||
}
|
||||
|
||||
type RunLastError struct {
|
||||
Code RunError `json:"code"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
type RunError string
|
||||
|
||||
const (
|
||||
RunErrorServerError RunError = "server_error"
|
||||
RunErrorRateLimitExceeded RunError = "rate_limit_exceeded"
|
||||
)
|
||||
|
||||
type RunRequest struct {
|
||||
AssistantID string `json:"assistant_id"`
|
||||
Model string `json:"model,omitempty"`
|
||||
Instructions string `json:"instructions,omitempty"`
|
||||
AdditionalInstructions string `json:"additional_instructions,omitempty"`
|
||||
AdditionalMessages []ThreadMessage `json:"additional_messages,omitempty"`
|
||||
Tools []Tool `json:"tools,omitempty"`
|
||||
Metadata map[string]any `json:"metadata,omitempty"`
|
||||
|
||||
// Sampling temperature between 0 and 2. Higher values like 0.8 are more random.
|
||||
// lower values are more focused and deterministic.
|
||||
Temperature *float32 `json:"temperature,omitempty"`
|
||||
TopP *float32 `json:"top_p,omitempty"`
|
||||
|
||||
// The maximum number of prompt tokens that may be used over the course of the run.
|
||||
// If the run exceeds the number of prompt tokens specified, the run will end with status 'incomplete'.
|
||||
MaxPromptTokens int `json:"max_prompt_tokens,omitempty"`
|
||||
|
||||
// The maximum number of completion tokens that may be used over the course of the run.
|
||||
// If the run exceeds the number of completion tokens specified, the run will end with status 'incomplete'.
|
||||
MaxCompletionTokens int `json:"max_completion_tokens,omitempty"`
|
||||
|
||||
// ThreadTruncationStrategy defines the truncation strategy to use for the thread.
|
||||
TruncationStrategy *ThreadTruncationStrategy `json:"truncation_strategy,omitempty"`
|
||||
|
||||
// This can be either a string or a ToolChoice object.
|
||||
ToolChoice any `json:"tool_choice,omitempty"`
|
||||
// This can be either a string or a ResponseFormat object.
|
||||
ResponseFormat any `json:"response_format,omitempty"`
|
||||
// Disable the default behavior of parallel tool calls by setting it: false.
|
||||
ParallelToolCalls any `json:"parallel_tool_calls,omitempty"`
|
||||
}
|
||||
|
||||
// ThreadTruncationStrategy defines the truncation strategy to use for the thread.
|
||||
// https://platform.openai.com/docs/assistants/how-it-works/truncation-strategy.
|
||||
type ThreadTruncationStrategy struct {
|
||||
// default 'auto'.
|
||||
Type TruncationStrategy `json:"type,omitempty"`
|
||||
// this field should be set if the truncation strategy is set to LastMessages.
|
||||
LastMessages *int `json:"last_messages,omitempty"`
|
||||
}
|
||||
|
||||
// TruncationStrategy defines the existing truncation strategies existing for thread management in an assistant.
|
||||
type TruncationStrategy string
|
||||
|
||||
const (
|
||||
// TruncationStrategyAuto messages in the middle of the thread will be dropped to fit the context length of the model.
|
||||
TruncationStrategyAuto = TruncationStrategy("auto")
|
||||
// TruncationStrategyLastMessages the thread will be truncated to the n most recent messages in the thread.
|
||||
TruncationStrategyLastMessages = TruncationStrategy("last_messages")
|
||||
)
|
||||
|
||||
// ReponseFormat specifies the format the model must output.
|
||||
// https://platform.openai.com/docs/api-reference/runs/createRun#runs-createrun-response_format.
|
||||
// Type can either be text or json_object.
|
||||
type ReponseFormat struct {
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
type RunModifyRequest struct {
|
||||
Metadata map[string]any `json:"metadata,omitempty"`
|
||||
}
|
||||
|
||||
// RunList is a list of runs.
|
||||
type RunList struct {
|
||||
Runs []Run `json:"data"`
|
||||
|
||||
httpHeader
|
||||
}
|
||||
|
||||
type SubmitToolOutputsRequest struct {
|
||||
ToolOutputs []ToolOutput `json:"tool_outputs"`
|
||||
}
|
||||
|
||||
type ToolOutput struct {
|
||||
ToolCallID string `json:"tool_call_id"`
|
||||
Output any `json:"output"`
|
||||
}
|
||||
|
||||
type CreateThreadAndRunRequest struct {
|
||||
RunRequest
|
||||
Thread ThreadRequest `json:"thread"`
|
||||
}
|
||||
|
||||
type RunStep struct {
|
||||
ID string `json:"id"`
|
||||
Object string `json:"object"`
|
||||
CreatedAt int64 `json:"created_at"`
|
||||
AssistantID string `json:"assistant_id"`
|
||||
ThreadID string `json:"thread_id"`
|
||||
RunID string `json:"run_id"`
|
||||
Type RunStepType `json:"type"`
|
||||
Status RunStepStatus `json:"status"`
|
||||
StepDetails StepDetails `json:"step_details"`
|
||||
LastError *RunLastError `json:"last_error,omitempty"`
|
||||
ExpiredAt *int64 `json:"expired_at,omitempty"`
|
||||
CancelledAt *int64 `json:"cancelled_at,omitempty"`
|
||||
FailedAt *int64 `json:"failed_at,omitempty"`
|
||||
CompletedAt *int64 `json:"completed_at,omitempty"`
|
||||
Metadata map[string]any `json:"metadata"`
|
||||
|
||||
httpHeader
|
||||
}
|
||||
|
||||
type RunStepStatus string
|
||||
|
||||
const (
|
||||
RunStepStatusInProgress RunStepStatus = "in_progress"
|
||||
RunStepStatusCancelling RunStepStatus = "cancelled"
|
||||
RunStepStatusFailed RunStepStatus = "failed"
|
||||
RunStepStatusCompleted RunStepStatus = "completed"
|
||||
RunStepStatusExpired RunStepStatus = "expired"
|
||||
)
|
||||
|
||||
type RunStepType string
|
||||
|
||||
const (
|
||||
RunStepTypeMessageCreation RunStepType = "message_creation"
|
||||
RunStepTypeToolCalls RunStepType = "tool_calls"
|
||||
)
|
||||
|
||||
type StepDetails struct {
|
||||
Type RunStepType `json:"type"`
|
||||
MessageCreation *StepDetailsMessageCreation `json:"message_creation,omitempty"`
|
||||
ToolCalls []ToolCall `json:"tool_calls,omitempty"`
|
||||
}
|
||||
|
||||
type StepDetailsMessageCreation struct {
|
||||
MessageID string `json:"message_id"`
|
||||
}
|
||||
|
||||
// RunStepList is a list of steps.
|
||||
type RunStepList struct {
|
||||
RunSteps []RunStep `json:"data"`
|
||||
|
||||
FirstID string `json:"first_id"`
|
||||
LastID string `json:"last_id"`
|
||||
HasMore bool `json:"has_more"`
|
||||
|
||||
httpHeader
|
||||
}
|
||||
|
||||
type Pagination struct {
|
||||
Limit *int
|
||||
Order *string
|
||||
After *string
|
||||
Before *string
|
||||
}
|
||||
|
||||
// CreateRun creates a new run.
|
||||
func (c *Client) CreateRun(
|
||||
ctx context.Context,
|
||||
threadID string,
|
||||
request RunRequest,
|
||||
) (response Run, err error) {
|
||||
urlSuffix := fmt.Sprintf("/threads/%s/runs", threadID)
|
||||
req, err := c.newRequest(
|
||||
ctx,
|
||||
http.MethodPost,
|
||||
c.fullURL(urlSuffix),
|
||||
withBody(request),
|
||||
withBetaAssistantVersion(c.config.AssistantVersion))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = c.sendRequest(req, &response)
|
||||
return
|
||||
}
|
||||
|
||||
// RetrieveRun retrieves a run.
|
||||
func (c *Client) RetrieveRun(
|
||||
ctx context.Context,
|
||||
threadID string,
|
||||
runID string,
|
||||
) (response Run, err error) {
|
||||
urlSuffix := fmt.Sprintf("/threads/%s/runs/%s", threadID, runID)
|
||||
req, err := c.newRequest(
|
||||
ctx,
|
||||
http.MethodGet,
|
||||
c.fullURL(urlSuffix),
|
||||
withBetaAssistantVersion(c.config.AssistantVersion))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = c.sendRequest(req, &response)
|
||||
return
|
||||
}
|
||||
|
||||
// ModifyRun modifies a run.
|
||||
func (c *Client) ModifyRun(
|
||||
ctx context.Context,
|
||||
threadID string,
|
||||
runID string,
|
||||
request RunModifyRequest,
|
||||
) (response Run, err error) {
|
||||
urlSuffix := fmt.Sprintf("/threads/%s/runs/%s", threadID, runID)
|
||||
req, err := c.newRequest(
|
||||
ctx,
|
||||
http.MethodPost,
|
||||
c.fullURL(urlSuffix),
|
||||
withBody(request),
|
||||
withBetaAssistantVersion(c.config.AssistantVersion))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = c.sendRequest(req, &response)
|
||||
return
|
||||
}
|
||||
|
||||
// ListRuns lists runs.
|
||||
func (c *Client) ListRuns(
|
||||
ctx context.Context,
|
||||
threadID string,
|
||||
pagination Pagination,
|
||||
) (response RunList, err error) {
|
||||
urlValues := url.Values{}
|
||||
if pagination.Limit != nil {
|
||||
urlValues.Add("limit", fmt.Sprintf("%d", *pagination.Limit))
|
||||
}
|
||||
if pagination.Order != nil {
|
||||
urlValues.Add("order", *pagination.Order)
|
||||
}
|
||||
if pagination.After != nil {
|
||||
urlValues.Add("after", *pagination.After)
|
||||
}
|
||||
if pagination.Before != nil {
|
||||
urlValues.Add("before", *pagination.Before)
|
||||
}
|
||||
|
||||
encodedValues := ""
|
||||
if len(urlValues) > 0 {
|
||||
encodedValues = "?" + urlValues.Encode()
|
||||
}
|
||||
|
||||
urlSuffix := fmt.Sprintf("/threads/%s/runs%s", threadID, encodedValues)
|
||||
req, err := c.newRequest(
|
||||
ctx,
|
||||
http.MethodGet,
|
||||
c.fullURL(urlSuffix),
|
||||
withBetaAssistantVersion(c.config.AssistantVersion))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = c.sendRequest(req, &response)
|
||||
return
|
||||
}
|
||||
|
||||
// SubmitToolOutputs submits tool outputs.
|
||||
func (c *Client) SubmitToolOutputs(
|
||||
ctx context.Context,
|
||||
threadID string,
|
||||
runID string,
|
||||
request SubmitToolOutputsRequest) (response Run, err error) {
|
||||
urlSuffix := fmt.Sprintf("/threads/%s/runs/%s/submit_tool_outputs", threadID, runID)
|
||||
req, err := c.newRequest(
|
||||
ctx,
|
||||
http.MethodPost,
|
||||
c.fullURL(urlSuffix),
|
||||
withBody(request),
|
||||
withBetaAssistantVersion(c.config.AssistantVersion))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = c.sendRequest(req, &response)
|
||||
return
|
||||
}
|
||||
|
||||
// CancelRun cancels a run.
|
||||
func (c *Client) CancelRun(
|
||||
ctx context.Context,
|
||||
threadID string,
|
||||
runID string) (response Run, err error) {
|
||||
urlSuffix := fmt.Sprintf("/threads/%s/runs/%s/cancel", threadID, runID)
|
||||
req, err := c.newRequest(
|
||||
ctx,
|
||||
http.MethodPost,
|
||||
c.fullURL(urlSuffix),
|
||||
withBetaAssistantVersion(c.config.AssistantVersion))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = c.sendRequest(req, &response)
|
||||
return
|
||||
}
|
||||
|
||||
// CreateThreadAndRun submits tool outputs.
|
||||
func (c *Client) CreateThreadAndRun(
|
||||
ctx context.Context,
|
||||
request CreateThreadAndRunRequest) (response Run, err error) {
|
||||
urlSuffix := "/threads/runs"
|
||||
req, err := c.newRequest(
|
||||
ctx,
|
||||
http.MethodPost,
|
||||
c.fullURL(urlSuffix),
|
||||
withBody(request),
|
||||
withBetaAssistantVersion(c.config.AssistantVersion))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = c.sendRequest(req, &response)
|
||||
return
|
||||
}
|
||||
|
||||
// RetrieveRunStep retrieves a run step.
|
||||
func (c *Client) RetrieveRunStep(
|
||||
ctx context.Context,
|
||||
threadID string,
|
||||
runID string,
|
||||
stepID string,
|
||||
) (response RunStep, err error) {
|
||||
urlSuffix := fmt.Sprintf("/threads/%s/runs/%s/steps/%s", threadID, runID, stepID)
|
||||
req, err := c.newRequest(
|
||||
ctx,
|
||||
http.MethodGet,
|
||||
c.fullURL(urlSuffix),
|
||||
withBetaAssistantVersion(c.config.AssistantVersion))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = c.sendRequest(req, &response)
|
||||
return
|
||||
}
|
||||
|
||||
// ListRunSteps lists run steps.
|
||||
func (c *Client) ListRunSteps(
|
||||
ctx context.Context,
|
||||
threadID string,
|
||||
runID string,
|
||||
pagination Pagination,
|
||||
) (response RunStepList, err error) {
|
||||
urlValues := url.Values{}
|
||||
if pagination.Limit != nil {
|
||||
urlValues.Add("limit", fmt.Sprintf("%d", *pagination.Limit))
|
||||
}
|
||||
if pagination.Order != nil {
|
||||
urlValues.Add("order", *pagination.Order)
|
||||
}
|
||||
if pagination.After != nil {
|
||||
urlValues.Add("after", *pagination.After)
|
||||
}
|
||||
if pagination.Before != nil {
|
||||
urlValues.Add("before", *pagination.Before)
|
||||
}
|
||||
|
||||
encodedValues := ""
|
||||
if len(urlValues) > 0 {
|
||||
encodedValues = "?" + urlValues.Encode()
|
||||
}
|
||||
|
||||
urlSuffix := fmt.Sprintf("/threads/%s/runs/%s/steps%s", threadID, runID, encodedValues)
|
||||
req, err := c.newRequest(
|
||||
ctx,
|
||||
http.MethodGet,
|
||||
c.fullURL(urlSuffix),
|
||||
withBetaAssistantVersion(c.config.AssistantVersion))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = c.sendRequest(req, &response)
|
||||
return
|
||||
}
|
||||
65
vendor/github.com/sashabaranov/go-openai/speech.go
generated
vendored
Normal file
65
vendor/github.com/sashabaranov/go-openai/speech.go
generated
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
package openai
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
type SpeechModel string
|
||||
|
||||
const (
|
||||
TTSModel1 SpeechModel = "tts-1"
|
||||
TTSModel1HD SpeechModel = "tts-1-hd"
|
||||
TTSModelCanary SpeechModel = "canary-tts"
|
||||
TTSModelGPT4oMini SpeechModel = "gpt-4o-mini-tts"
|
||||
)
|
||||
|
||||
type SpeechVoice string
|
||||
|
||||
const (
|
||||
VoiceAlloy SpeechVoice = "alloy"
|
||||
VoiceAsh SpeechVoice = "ash"
|
||||
VoiceBallad SpeechVoice = "ballad"
|
||||
VoiceCoral SpeechVoice = "coral"
|
||||
VoiceEcho SpeechVoice = "echo"
|
||||
VoiceFable SpeechVoice = "fable"
|
||||
VoiceOnyx SpeechVoice = "onyx"
|
||||
VoiceNova SpeechVoice = "nova"
|
||||
VoiceShimmer SpeechVoice = "shimmer"
|
||||
VoiceVerse SpeechVoice = "verse"
|
||||
)
|
||||
|
||||
type SpeechResponseFormat string
|
||||
|
||||
const (
|
||||
SpeechResponseFormatMp3 SpeechResponseFormat = "mp3"
|
||||
SpeechResponseFormatOpus SpeechResponseFormat = "opus"
|
||||
SpeechResponseFormatAac SpeechResponseFormat = "aac"
|
||||
SpeechResponseFormatFlac SpeechResponseFormat = "flac"
|
||||
SpeechResponseFormatWav SpeechResponseFormat = "wav"
|
||||
SpeechResponseFormatPcm SpeechResponseFormat = "pcm"
|
||||
)
|
||||
|
||||
type CreateSpeechRequest struct {
|
||||
Model SpeechModel `json:"model"`
|
||||
Input string `json:"input"`
|
||||
Voice SpeechVoice `json:"voice"`
|
||||
Instructions string `json:"instructions,omitempty"` // Optional, Doesnt work with tts-1 or tts-1-hd.
|
||||
ResponseFormat SpeechResponseFormat `json:"response_format,omitempty"` // Optional, default to mp3
|
||||
Speed float64 `json:"speed,omitempty"` // Optional, default to 1.0
|
||||
}
|
||||
|
||||
func (c *Client) CreateSpeech(ctx context.Context, request CreateSpeechRequest) (response RawResponse, err error) {
|
||||
req, err := c.newRequest(
|
||||
ctx,
|
||||
http.MethodPost,
|
||||
c.fullURL("/audio/speech", withModel(string(request.Model))),
|
||||
withBody(request),
|
||||
withContentType("application/json"),
|
||||
)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return c.sendRequestRaw(req)
|
||||
}
|
||||
55
vendor/github.com/sashabaranov/go-openai/stream.go
generated
vendored
Normal file
55
vendor/github.com/sashabaranov/go-openai/stream.go
generated
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
package openai
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrTooManyEmptyStreamMessages = errors.New("stream has sent too many empty messages")
|
||||
)
|
||||
|
||||
type CompletionStream struct {
|
||||
*streamReader[CompletionResponse]
|
||||
}
|
||||
|
||||
// CreateCompletionStream — API call to create a completion w/ streaming
|
||||
// support. It sets whether to stream back partial progress. If set, tokens will be
|
||||
// sent as data-only server-sent events as they become available, with the
|
||||
// stream terminated by a data: [DONE] message.
|
||||
func (c *Client) CreateCompletionStream(
|
||||
ctx context.Context,
|
||||
request CompletionRequest,
|
||||
) (stream *CompletionStream, err error) {
|
||||
urlSuffix := "/completions"
|
||||
if !checkEndpointSupportsModel(urlSuffix, request.Model) {
|
||||
err = ErrCompletionUnsupportedModel
|
||||
return
|
||||
}
|
||||
|
||||
if !checkPromptType(request.Prompt) {
|
||||
err = ErrCompletionRequestPromptTypeNotSupported
|
||||
return
|
||||
}
|
||||
|
||||
request.Stream = true
|
||||
req, err := c.newRequest(
|
||||
ctx,
|
||||
http.MethodPost,
|
||||
c.fullURL(urlSuffix, withModel(request.Model)),
|
||||
withBody(request),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := sendRequestStream[CompletionResponse](c, req)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
stream = &CompletionStream{
|
||||
streamReader: resp,
|
||||
}
|
||||
return
|
||||
}
|
||||
119
vendor/github.com/sashabaranov/go-openai/stream_reader.go
generated
vendored
Normal file
119
vendor/github.com/sashabaranov/go-openai/stream_reader.go
generated
vendored
Normal file
@@ -0,0 +1,119 @@
|
||||
package openai
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"regexp"
|
||||
|
||||
utils "github.com/sashabaranov/go-openai/internal"
|
||||
)
|
||||
|
||||
var (
|
||||
headerData = regexp.MustCompile(`^data:\s*`)
|
||||
errorPrefix = regexp.MustCompile(`^data:\s*{"error":`)
|
||||
)
|
||||
|
||||
type streamable interface {
|
||||
ChatCompletionStreamResponse | CompletionResponse
|
||||
}
|
||||
|
||||
type streamReader[T streamable] struct {
|
||||
emptyMessagesLimit uint
|
||||
isFinished bool
|
||||
|
||||
reader *bufio.Reader
|
||||
response *http.Response
|
||||
errAccumulator utils.ErrorAccumulator
|
||||
unmarshaler utils.Unmarshaler
|
||||
|
||||
httpHeader
|
||||
}
|
||||
|
||||
func (stream *streamReader[T]) Recv() (response T, err error) {
|
||||
rawLine, err := stream.RecvRaw()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = stream.unmarshaler.Unmarshal(rawLine, &response)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (stream *streamReader[T]) RecvRaw() ([]byte, error) {
|
||||
if stream.isFinished {
|
||||
return nil, io.EOF
|
||||
}
|
||||
|
||||
return stream.processLines()
|
||||
}
|
||||
|
||||
//nolint:gocognit
|
||||
func (stream *streamReader[T]) processLines() ([]byte, error) {
|
||||
var (
|
||||
emptyMessagesCount uint
|
||||
hasErrorPrefix bool
|
||||
)
|
||||
|
||||
for {
|
||||
rawLine, readErr := stream.reader.ReadBytes('\n')
|
||||
if readErr != nil || hasErrorPrefix {
|
||||
respErr := stream.unmarshalError()
|
||||
if respErr != nil {
|
||||
return nil, fmt.Errorf("error, %w", respErr.Error)
|
||||
}
|
||||
return nil, readErr
|
||||
}
|
||||
|
||||
noSpaceLine := bytes.TrimSpace(rawLine)
|
||||
if errorPrefix.Match(noSpaceLine) {
|
||||
hasErrorPrefix = true
|
||||
}
|
||||
if !headerData.Match(noSpaceLine) || hasErrorPrefix {
|
||||
if hasErrorPrefix {
|
||||
noSpaceLine = headerData.ReplaceAll(noSpaceLine, nil)
|
||||
}
|
||||
writeErr := stream.errAccumulator.Write(noSpaceLine)
|
||||
if writeErr != nil {
|
||||
return nil, writeErr
|
||||
}
|
||||
emptyMessagesCount++
|
||||
if emptyMessagesCount > stream.emptyMessagesLimit {
|
||||
return nil, ErrTooManyEmptyStreamMessages
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
noPrefixLine := headerData.ReplaceAll(noSpaceLine, nil)
|
||||
if string(noPrefixLine) == "[DONE]" {
|
||||
stream.isFinished = true
|
||||
return nil, io.EOF
|
||||
}
|
||||
|
||||
return noPrefixLine, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (stream *streamReader[T]) unmarshalError() (errResp *ErrorResponse) {
|
||||
errBytes := stream.errAccumulator.Bytes()
|
||||
if len(errBytes) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
err := stream.unmarshaler.Unmarshal(errBytes, &errResp)
|
||||
if err != nil {
|
||||
errResp = nil
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (stream *streamReader[T]) Close() error {
|
||||
return stream.response.Body.Close()
|
||||
}
|
||||
171
vendor/github.com/sashabaranov/go-openai/thread.go
generated
vendored
Normal file
171
vendor/github.com/sashabaranov/go-openai/thread.go
generated
vendored
Normal file
@@ -0,0 +1,171 @@
|
||||
package openai
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
const (
|
||||
threadsSuffix = "/threads"
|
||||
)
|
||||
|
||||
type Thread struct {
|
||||
ID string `json:"id"`
|
||||
Object string `json:"object"`
|
||||
CreatedAt int64 `json:"created_at"`
|
||||
Metadata map[string]any `json:"metadata"`
|
||||
ToolResources ToolResources `json:"tool_resources,omitempty"`
|
||||
|
||||
httpHeader
|
||||
}
|
||||
|
||||
type ThreadRequest struct {
|
||||
Messages []ThreadMessage `json:"messages,omitempty"`
|
||||
Metadata map[string]any `json:"metadata,omitempty"`
|
||||
ToolResources *ToolResourcesRequest `json:"tool_resources,omitempty"`
|
||||
}
|
||||
|
||||
type ToolResources struct {
|
||||
CodeInterpreter *CodeInterpreterToolResources `json:"code_interpreter,omitempty"`
|
||||
FileSearch *FileSearchToolResources `json:"file_search,omitempty"`
|
||||
}
|
||||
|
||||
type CodeInterpreterToolResources struct {
|
||||
FileIDs []string `json:"file_ids,omitempty"`
|
||||
}
|
||||
|
||||
type FileSearchToolResources struct {
|
||||
VectorStoreIDs []string `json:"vector_store_ids,omitempty"`
|
||||
}
|
||||
|
||||
type ToolResourcesRequest struct {
|
||||
CodeInterpreter *CodeInterpreterToolResourcesRequest `json:"code_interpreter,omitempty"`
|
||||
FileSearch *FileSearchToolResourcesRequest `json:"file_search,omitempty"`
|
||||
}
|
||||
|
||||
type CodeInterpreterToolResourcesRequest struct {
|
||||
FileIDs []string `json:"file_ids,omitempty"`
|
||||
}
|
||||
|
||||
type FileSearchToolResourcesRequest struct {
|
||||
VectorStoreIDs []string `json:"vector_store_ids,omitempty"`
|
||||
VectorStores []VectorStoreToolResources `json:"vector_stores,omitempty"`
|
||||
}
|
||||
|
||||
type VectorStoreToolResources struct {
|
||||
FileIDs []string `json:"file_ids,omitempty"`
|
||||
ChunkingStrategy *ChunkingStrategy `json:"chunking_strategy,omitempty"`
|
||||
Metadata map[string]any `json:"metadata,omitempty"`
|
||||
}
|
||||
|
||||
type ChunkingStrategy struct {
|
||||
Type ChunkingStrategyType `json:"type"`
|
||||
Static *StaticChunkingStrategy `json:"static,omitempty"`
|
||||
}
|
||||
|
||||
type StaticChunkingStrategy struct {
|
||||
MaxChunkSizeTokens int `json:"max_chunk_size_tokens"`
|
||||
ChunkOverlapTokens int `json:"chunk_overlap_tokens"`
|
||||
}
|
||||
|
||||
type ChunkingStrategyType string
|
||||
|
||||
const (
|
||||
ChunkingStrategyTypeAuto ChunkingStrategyType = "auto"
|
||||
ChunkingStrategyTypeStatic ChunkingStrategyType = "static"
|
||||
)
|
||||
|
||||
type ModifyThreadRequest struct {
|
||||
Metadata map[string]any `json:"metadata"`
|
||||
ToolResources *ToolResources `json:"tool_resources,omitempty"`
|
||||
}
|
||||
|
||||
type ThreadMessageRole string
|
||||
|
||||
const (
|
||||
ThreadMessageRoleAssistant ThreadMessageRole = "assistant"
|
||||
ThreadMessageRoleUser ThreadMessageRole = "user"
|
||||
)
|
||||
|
||||
type ThreadMessage struct {
|
||||
Role ThreadMessageRole `json:"role"`
|
||||
Content string `json:"content"`
|
||||
FileIDs []string `json:"file_ids,omitempty"`
|
||||
Attachments []ThreadAttachment `json:"attachments,omitempty"`
|
||||
Metadata map[string]any `json:"metadata,omitempty"`
|
||||
}
|
||||
|
||||
type ThreadAttachment struct {
|
||||
FileID string `json:"file_id"`
|
||||
Tools []ThreadAttachmentTool `json:"tools"`
|
||||
}
|
||||
|
||||
type ThreadAttachmentTool struct {
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
type ThreadDeleteResponse struct {
|
||||
ID string `json:"id"`
|
||||
Object string `json:"object"`
|
||||
Deleted bool `json:"deleted"`
|
||||
|
||||
httpHeader
|
||||
}
|
||||
|
||||
// CreateThread creates a new thread.
|
||||
func (c *Client) CreateThread(ctx context.Context, request ThreadRequest) (response Thread, err error) {
|
||||
req, err := c.newRequest(ctx, http.MethodPost, c.fullURL(threadsSuffix), withBody(request),
|
||||
withBetaAssistantVersion(c.config.AssistantVersion))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = c.sendRequest(req, &response)
|
||||
return
|
||||
}
|
||||
|
||||
// RetrieveThread retrieves a thread.
|
||||
func (c *Client) RetrieveThread(ctx context.Context, threadID string) (response Thread, err error) {
|
||||
urlSuffix := threadsSuffix + "/" + threadID
|
||||
req, err := c.newRequest(ctx, http.MethodGet, c.fullURL(urlSuffix),
|
||||
withBetaAssistantVersion(c.config.AssistantVersion))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = c.sendRequest(req, &response)
|
||||
return
|
||||
}
|
||||
|
||||
// ModifyThread modifies a thread.
|
||||
func (c *Client) ModifyThread(
|
||||
ctx context.Context,
|
||||
threadID string,
|
||||
request ModifyThreadRequest,
|
||||
) (response Thread, err error) {
|
||||
urlSuffix := threadsSuffix + "/" + threadID
|
||||
req, err := c.newRequest(ctx, http.MethodPost, c.fullURL(urlSuffix), withBody(request),
|
||||
withBetaAssistantVersion(c.config.AssistantVersion))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = c.sendRequest(req, &response)
|
||||
return
|
||||
}
|
||||
|
||||
// DeleteThread deletes a thread.
|
||||
func (c *Client) DeleteThread(
|
||||
ctx context.Context,
|
||||
threadID string,
|
||||
) (response ThreadDeleteResponse, err error) {
|
||||
urlSuffix := threadsSuffix + "/" + threadID
|
||||
req, err := c.newRequest(ctx, http.MethodDelete, c.fullURL(urlSuffix),
|
||||
withBetaAssistantVersion(c.config.AssistantVersion))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = c.sendRequest(req, &response)
|
||||
return
|
||||
}
|
||||
348
vendor/github.com/sashabaranov/go-openai/vector_store.go
generated
vendored
Normal file
348
vendor/github.com/sashabaranov/go-openai/vector_store.go
generated
vendored
Normal file
@@ -0,0 +1,348 @@
|
||||
package openai
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
const (
|
||||
vectorStoresSuffix = "/vector_stores"
|
||||
vectorStoresFilesSuffix = "/files"
|
||||
vectorStoresFileBatchesSuffix = "/file_batches"
|
||||
)
|
||||
|
||||
type VectorStoreFileCount struct {
|
||||
InProgress int `json:"in_progress"`
|
||||
Completed int `json:"completed"`
|
||||
Failed int `json:"failed"`
|
||||
Cancelled int `json:"cancelled"`
|
||||
Total int `json:"total"`
|
||||
}
|
||||
|
||||
type VectorStore struct {
|
||||
ID string `json:"id"`
|
||||
Object string `json:"object"`
|
||||
CreatedAt int64 `json:"created_at"`
|
||||
Name string `json:"name"`
|
||||
UsageBytes int `json:"usage_bytes"`
|
||||
FileCounts VectorStoreFileCount `json:"file_counts"`
|
||||
Status string `json:"status"`
|
||||
ExpiresAfter *VectorStoreExpires `json:"expires_after"`
|
||||
ExpiresAt *int `json:"expires_at"`
|
||||
Metadata map[string]any `json:"metadata"`
|
||||
|
||||
httpHeader
|
||||
}
|
||||
|
||||
type VectorStoreExpires struct {
|
||||
Anchor string `json:"anchor"`
|
||||
Days int `json:"days"`
|
||||
}
|
||||
|
||||
// VectorStoreRequest provides the vector store request parameters.
|
||||
type VectorStoreRequest struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
FileIDs []string `json:"file_ids,omitempty"`
|
||||
ExpiresAfter *VectorStoreExpires `json:"expires_after,omitempty"`
|
||||
Metadata map[string]any `json:"metadata,omitempty"`
|
||||
}
|
||||
|
||||
// VectorStoresList is a list of vector store.
|
||||
type VectorStoresList struct {
|
||||
VectorStores []VectorStore `json:"data"`
|
||||
LastID *string `json:"last_id"`
|
||||
FirstID *string `json:"first_id"`
|
||||
HasMore bool `json:"has_more"`
|
||||
httpHeader
|
||||
}
|
||||
|
||||
type VectorStoreDeleteResponse struct {
|
||||
ID string `json:"id"`
|
||||
Object string `json:"object"`
|
||||
Deleted bool `json:"deleted"`
|
||||
|
||||
httpHeader
|
||||
}
|
||||
|
||||
type VectorStoreFile struct {
|
||||
ID string `json:"id"`
|
||||
Object string `json:"object"`
|
||||
CreatedAt int64 `json:"created_at"`
|
||||
VectorStoreID string `json:"vector_store_id"`
|
||||
UsageBytes int `json:"usage_bytes"`
|
||||
Status string `json:"status"`
|
||||
|
||||
httpHeader
|
||||
}
|
||||
|
||||
type VectorStoreFileRequest struct {
|
||||
FileID string `json:"file_id"`
|
||||
}
|
||||
|
||||
type VectorStoreFilesList struct {
|
||||
VectorStoreFiles []VectorStoreFile `json:"data"`
|
||||
FirstID *string `json:"first_id"`
|
||||
LastID *string `json:"last_id"`
|
||||
HasMore bool `json:"has_more"`
|
||||
|
||||
httpHeader
|
||||
}
|
||||
|
||||
type VectorStoreFileBatch struct {
|
||||
ID string `json:"id"`
|
||||
Object string `json:"object"`
|
||||
CreatedAt int64 `json:"created_at"`
|
||||
VectorStoreID string `json:"vector_store_id"`
|
||||
Status string `json:"status"`
|
||||
FileCounts VectorStoreFileCount `json:"file_counts"`
|
||||
|
||||
httpHeader
|
||||
}
|
||||
|
||||
type VectorStoreFileBatchRequest struct {
|
||||
FileIDs []string `json:"file_ids"`
|
||||
}
|
||||
|
||||
// CreateVectorStore creates a new vector store.
|
||||
func (c *Client) CreateVectorStore(ctx context.Context, request VectorStoreRequest) (response VectorStore, err error) {
|
||||
req, _ := c.newRequest(
|
||||
ctx,
|
||||
http.MethodPost,
|
||||
c.fullURL(vectorStoresSuffix),
|
||||
withBody(request),
|
||||
withBetaAssistantVersion(c.config.AssistantVersion),
|
||||
)
|
||||
|
||||
err = c.sendRequest(req, &response)
|
||||
return
|
||||
}
|
||||
|
||||
// RetrieveVectorStore retrieves an vector store.
|
||||
func (c *Client) RetrieveVectorStore(
|
||||
ctx context.Context,
|
||||
vectorStoreID string,
|
||||
) (response VectorStore, err error) {
|
||||
urlSuffix := fmt.Sprintf("%s/%s", vectorStoresSuffix, vectorStoreID)
|
||||
req, _ := c.newRequest(ctx, http.MethodGet, c.fullURL(urlSuffix),
|
||||
withBetaAssistantVersion(c.config.AssistantVersion))
|
||||
|
||||
err = c.sendRequest(req, &response)
|
||||
return
|
||||
}
|
||||
|
||||
// ModifyVectorStore modifies a vector store.
|
||||
func (c *Client) ModifyVectorStore(
|
||||
ctx context.Context,
|
||||
vectorStoreID string,
|
||||
request VectorStoreRequest,
|
||||
) (response VectorStore, err error) {
|
||||
urlSuffix := fmt.Sprintf("%s/%s", vectorStoresSuffix, vectorStoreID)
|
||||
req, _ := c.newRequest(ctx, http.MethodPost, c.fullURL(urlSuffix), withBody(request),
|
||||
withBetaAssistantVersion(c.config.AssistantVersion))
|
||||
|
||||
err = c.sendRequest(req, &response)
|
||||
return
|
||||
}
|
||||
|
||||
// DeleteVectorStore deletes an vector store.
|
||||
func (c *Client) DeleteVectorStore(
|
||||
ctx context.Context,
|
||||
vectorStoreID string,
|
||||
) (response VectorStoreDeleteResponse, err error) {
|
||||
urlSuffix := fmt.Sprintf("%s/%s", vectorStoresSuffix, vectorStoreID)
|
||||
req, _ := c.newRequest(ctx, http.MethodDelete, c.fullURL(urlSuffix),
|
||||
withBetaAssistantVersion(c.config.AssistantVersion))
|
||||
|
||||
err = c.sendRequest(req, &response)
|
||||
return
|
||||
}
|
||||
|
||||
// ListVectorStores Lists the currently available vector store.
|
||||
func (c *Client) ListVectorStores(
|
||||
ctx context.Context,
|
||||
pagination Pagination,
|
||||
) (response VectorStoresList, err error) {
|
||||
urlValues := url.Values{}
|
||||
|
||||
if pagination.After != nil {
|
||||
urlValues.Add("after", *pagination.After)
|
||||
}
|
||||
if pagination.Order != nil {
|
||||
urlValues.Add("order", *pagination.Order)
|
||||
}
|
||||
if pagination.Limit != nil {
|
||||
urlValues.Add("limit", fmt.Sprintf("%d", *pagination.Limit))
|
||||
}
|
||||
if pagination.Before != nil {
|
||||
urlValues.Add("before", *pagination.Before)
|
||||
}
|
||||
|
||||
encodedValues := ""
|
||||
if len(urlValues) > 0 {
|
||||
encodedValues = "?" + urlValues.Encode()
|
||||
}
|
||||
|
||||
urlSuffix := fmt.Sprintf("%s%s", vectorStoresSuffix, encodedValues)
|
||||
req, _ := c.newRequest(ctx, http.MethodGet, c.fullURL(urlSuffix),
|
||||
withBetaAssistantVersion(c.config.AssistantVersion))
|
||||
|
||||
err = c.sendRequest(req, &response)
|
||||
return
|
||||
}
|
||||
|
||||
// CreateVectorStoreFile creates a new vector store file.
|
||||
func (c *Client) CreateVectorStoreFile(
|
||||
ctx context.Context,
|
||||
vectorStoreID string,
|
||||
request VectorStoreFileRequest,
|
||||
) (response VectorStoreFile, err error) {
|
||||
urlSuffix := fmt.Sprintf("%s/%s%s", vectorStoresSuffix, vectorStoreID, vectorStoresFilesSuffix)
|
||||
req, _ := c.newRequest(ctx, http.MethodPost, c.fullURL(urlSuffix),
|
||||
withBody(request),
|
||||
withBetaAssistantVersion(c.config.AssistantVersion))
|
||||
|
||||
err = c.sendRequest(req, &response)
|
||||
return
|
||||
}
|
||||
|
||||
// RetrieveVectorStoreFile retrieves a vector store file.
|
||||
func (c *Client) RetrieveVectorStoreFile(
|
||||
ctx context.Context,
|
||||
vectorStoreID string,
|
||||
fileID string,
|
||||
) (response VectorStoreFile, err error) {
|
||||
urlSuffix := fmt.Sprintf("%s/%s%s/%s", vectorStoresSuffix, vectorStoreID, vectorStoresFilesSuffix, fileID)
|
||||
req, _ := c.newRequest(ctx, http.MethodGet, c.fullURL(urlSuffix),
|
||||
withBetaAssistantVersion(c.config.AssistantVersion))
|
||||
|
||||
err = c.sendRequest(req, &response)
|
||||
return
|
||||
}
|
||||
|
||||
// DeleteVectorStoreFile deletes an existing file.
|
||||
func (c *Client) DeleteVectorStoreFile(
|
||||
ctx context.Context,
|
||||
vectorStoreID string,
|
||||
fileID string,
|
||||
) (err error) {
|
||||
urlSuffix := fmt.Sprintf("%s/%s%s/%s", vectorStoresSuffix, vectorStoreID, vectorStoresFilesSuffix, fileID)
|
||||
req, _ := c.newRequest(ctx, http.MethodDelete, c.fullURL(urlSuffix),
|
||||
withBetaAssistantVersion(c.config.AssistantVersion))
|
||||
|
||||
err = c.sendRequest(req, nil)
|
||||
return
|
||||
}
|
||||
|
||||
// ListVectorStoreFiles Lists the currently available files for a vector store.
|
||||
func (c *Client) ListVectorStoreFiles(
|
||||
ctx context.Context,
|
||||
vectorStoreID string,
|
||||
pagination Pagination,
|
||||
) (response VectorStoreFilesList, err error) {
|
||||
urlValues := url.Values{}
|
||||
if pagination.After != nil {
|
||||
urlValues.Add("after", *pagination.After)
|
||||
}
|
||||
if pagination.Limit != nil {
|
||||
urlValues.Add("limit", fmt.Sprintf("%d", *pagination.Limit))
|
||||
}
|
||||
if pagination.Before != nil {
|
||||
urlValues.Add("before", *pagination.Before)
|
||||
}
|
||||
if pagination.Order != nil {
|
||||
urlValues.Add("order", *pagination.Order)
|
||||
}
|
||||
|
||||
encodedValues := ""
|
||||
if len(urlValues) > 0 {
|
||||
encodedValues = "?" + urlValues.Encode()
|
||||
}
|
||||
|
||||
urlSuffix := fmt.Sprintf("%s/%s%s%s", vectorStoresSuffix, vectorStoreID, vectorStoresFilesSuffix, encodedValues)
|
||||
req, _ := c.newRequest(ctx, http.MethodGet, c.fullURL(urlSuffix),
|
||||
withBetaAssistantVersion(c.config.AssistantVersion))
|
||||
|
||||
err = c.sendRequest(req, &response)
|
||||
return
|
||||
}
|
||||
|
||||
// CreateVectorStoreFileBatch creates a new vector store file batch.
|
||||
func (c *Client) CreateVectorStoreFileBatch(
|
||||
ctx context.Context,
|
||||
vectorStoreID string,
|
||||
request VectorStoreFileBatchRequest,
|
||||
) (response VectorStoreFileBatch, err error) {
|
||||
urlSuffix := fmt.Sprintf("%s/%s%s", vectorStoresSuffix, vectorStoreID, vectorStoresFileBatchesSuffix)
|
||||
req, _ := c.newRequest(ctx, http.MethodPost, c.fullURL(urlSuffix),
|
||||
withBody(request),
|
||||
withBetaAssistantVersion(c.config.AssistantVersion))
|
||||
|
||||
err = c.sendRequest(req, &response)
|
||||
return
|
||||
}
|
||||
|
||||
// RetrieveVectorStoreFileBatch retrieves a vector store file batch.
|
||||
func (c *Client) RetrieveVectorStoreFileBatch(
|
||||
ctx context.Context,
|
||||
vectorStoreID string,
|
||||
batchID string,
|
||||
) (response VectorStoreFileBatch, err error) {
|
||||
urlSuffix := fmt.Sprintf("%s/%s%s/%s", vectorStoresSuffix, vectorStoreID, vectorStoresFileBatchesSuffix, batchID)
|
||||
req, _ := c.newRequest(ctx, http.MethodGet, c.fullURL(urlSuffix),
|
||||
withBetaAssistantVersion(c.config.AssistantVersion))
|
||||
|
||||
err = c.sendRequest(req, &response)
|
||||
return
|
||||
}
|
||||
|
||||
// CancelVectorStoreFileBatch cancel a new vector store file batch.
|
||||
func (c *Client) CancelVectorStoreFileBatch(
|
||||
ctx context.Context,
|
||||
vectorStoreID string,
|
||||
batchID string,
|
||||
) (response VectorStoreFileBatch, err error) {
|
||||
urlSuffix := fmt.Sprintf("%s/%s%s/%s%s", vectorStoresSuffix,
|
||||
vectorStoreID, vectorStoresFileBatchesSuffix, batchID, "/cancel")
|
||||
req, _ := c.newRequest(ctx, http.MethodPost, c.fullURL(urlSuffix),
|
||||
withBetaAssistantVersion(c.config.AssistantVersion))
|
||||
|
||||
err = c.sendRequest(req, &response)
|
||||
return
|
||||
}
|
||||
|
||||
// ListVectorStoreFiles Lists the currently available files for a vector store.
|
||||
func (c *Client) ListVectorStoreFilesInBatch(
|
||||
ctx context.Context,
|
||||
vectorStoreID string,
|
||||
batchID string,
|
||||
pagination Pagination,
|
||||
) (response VectorStoreFilesList, err error) {
|
||||
urlValues := url.Values{}
|
||||
if pagination.After != nil {
|
||||
urlValues.Add("after", *pagination.After)
|
||||
}
|
||||
if pagination.Limit != nil {
|
||||
urlValues.Add("limit", fmt.Sprintf("%d", *pagination.Limit))
|
||||
}
|
||||
if pagination.Before != nil {
|
||||
urlValues.Add("before", *pagination.Before)
|
||||
}
|
||||
if pagination.Order != nil {
|
||||
urlValues.Add("order", *pagination.Order)
|
||||
}
|
||||
|
||||
encodedValues := ""
|
||||
if len(urlValues) > 0 {
|
||||
encodedValues = "?" + urlValues.Encode()
|
||||
}
|
||||
|
||||
urlSuffix := fmt.Sprintf("%s/%s%s/%s%s%s", vectorStoresSuffix,
|
||||
vectorStoreID, vectorStoresFileBatchesSuffix, batchID, "/files", encodedValues)
|
||||
req, _ := c.newRequest(ctx, http.MethodGet, c.fullURL(urlSuffix),
|
||||
withBetaAssistantVersion(c.config.AssistantVersion))
|
||||
|
||||
err = c.sendRequest(req, &response)
|
||||
return
|
||||
}
|
||||
Reference in New Issue
Block a user