// // Ignore non-text parts (e.g., media, tools) as DeepSeek API doesn't support them.
// }
// return sb.String()
// }
/*
// Choice represents a completion choice generated by the model.
type Choice struct {
Index int `json:"index"` // Index of the choice in the list of choices.
Message Message `json:"message"` // The message generated by the model.
Logprobs any `json:"logprobs,omitempty"` // Log probabilities of the tokens, if available. // Changed to any as of April 21 2025 because the logprobs field is sometimes a flot64 and sometimes a Logprobs struct.
FinishReason string `json:"finish_reason"` // Reason why the completion finished.
}
// A Part is one part of a [Document]. This may be plain text or it
// may be a URL (possibly a "data:" URL with embedded data).
type Part struct {
Kind PartKind `json:"kind,omitempty"`
ContentType string `json:"contentType,omitempty"` // valid for kind==blob
Text string `json:"text,omitempty"` // valid for kind∈{text,blob}
ToolRequest *ToolRequest `json:"toolRequest,omitempty"` // valid for kind==partToolRequest
ToolResponse *ToolResponse `json:"toolResponse,omitempty"` // valid for kind==partToolResponse
Custom map[string]any `json:"custom,omitempty"` // valid for plugin-specific custom parts
Metadata map[string]any `json:"metadata,omitempty"` // valid for all kinds
// Ollama has two API endpoints, one with a chat interface and another with a generate response interface.
// That's why have multiple request interfaces for the Ollama API below.
/*
TODO: Support optional, advanced parameters:
format: the format to return a response in. Currently the only accepted value is json
options: additional model parameters listed in the documentation for the Modelfile such as temperature
system: system message to (overrides what is defined in the Modelfile)
template: the prompt template to use (overrides what is defined in the Modelfile)
context: the context parameter returned from a previous request to /generate, this can be used to keep a short conversational memory
stream: if false the response will be returned as a single response object, rather than a stream of objects
raw: if true no formatting will be applied to the prompt. You may choose to use the raw parameter if you are specifying a full templated prompt in your request to the API
keep_alive: controls how long the model will stay loaded into memory following the request (default: 5m)
*/
typeollamaChatRequeststruct{
Messages[]*ollamaMessage`json:"messages"`
Images[]string`json:"images,omitempty"`
Modelstring`json:"model"`
Streambool`json:"stream"`
Formatstring`json:"format,omitempty"`
Tools[]ollamaTool`json:"tools,omitempty"`
}
typeollamaModelRequeststruct{
Systemstring`json:"system,omitempty"`
Images[]string`json:"images,omitempty"`
Modelstring`json:"model"`
Promptstring`json:"prompt"`
Streambool`json:"stream"`
Formatstring`json:"format,omitempty"`
}
// Tool definition from Ollama API
typeollamaToolstruct{
Typestring`json:"type"`
FunctionollamaFunction`json:"function"`
}
// Function definition for Ollama API
typeollamaFunctionstruct{
Namestring`json:"name"`
Descriptionstring`json:"description"`
Parametersmap[string]any`json:"parameters"`
}
// Tool Call from Ollama API
typeollamaToolCallstruct{
FunctionollamaFunctionCall`json:"function"`
}
// Function Call for Ollama API
typeollamaFunctionCallstruct{
Namestring`json:"name"`
Argumentsany`json:"arguments"`
}
// TODO: Add optional parameters (images, format, options, etc.) based on your use case