Skip to main content
POST
/
ai
/
chat
/
responses
chat responses
curl --request POST \
  --url https://ai.pmock.com/api/ai/chat/responses \
  --header 'Content-Type: application/json' \
  --data '
{
  "model": "gpt-5.1",
  "input": "<unknown>",
  "instructions": "<string>",
  "stream": false,
  "temperature": 123,
  "maxOutputTokens": 123,
  "topP": 123,
  "tools": [
    "<unknown>"
  ],
  "toolChoice": "<unknown>",
  "text": "<unknown>"
}
'
{}

Maven

<dependency>
    <groupId>com.openai</groupId>
    <artifactId>openai-java</artifactId>
    <version>x.x.x</version>
</dependency>

Normal Request Example

@Test
void chatV1ResponsesClientTest() {
    OpenAIClient client = OpenAIOkHttpClient.builder()
            .apiKey(apiKey) // Your access key
            .baseUrl("https://ai.pmock.com/api/ai/chat")
            .build();

    ResponseCreateParams build = ResponseCreateParams.builder()
            .model("gpt-5.2")
            .instructions("You are a helpful assistant.")
            .input("Hello!")
            .build();

    com.openai.models.responses.Response response = client.responses().create(build);
    System.out.println(response.output().get(0).message().get().content().get(0).outputText());
}

Stream Response

@Test
void streamChatV1ResponsesClientTest() {
    OpenAIClient client = OpenAIOkHttpClient.builder()
            .apiKey(apiKey) // Your access key
            .baseUrl("https://ai.pmock.com/api/ai/chat")
            .build();

    ResponseCreateParams build = ResponseCreateParams.builder()
            .model("gpt-5.2")
            .instructions("You are a helpful assistant.")
            .input("Hello!")
            .build();

    try (StreamResponse<ResponseStreamEvent> streaming = client.responses().createStreaming(build)) {
        streaming.stream().forEach(chunk -> {
            chunk.outputTextDelta().ifPresent(event -> {
                String delta = event.delta();
                System.out.print(delta);
            });
        });
    }
}

Body

application/json
model
enum<string>
required

model

Available options:
gpt-5.1,
gpt-5.1-all,
gpt-5.1-thinking,
gpt-5.1-thinking-all,
gpt-5.2,
gemini-3-pro,
gemini-2.5-pro,
gemini-2.5-flash,
gemini-3-flash,
gemini-3.1-pro,
claude-opus-4-6,
claude-haiku-4-5-20251001,
claude-sonnet-4-6,
gpt-5.4,
gpt-5.3,
gpt-5.3-codex
input
any
required

input messages

instructions
string

instructions

stream
boolean
default:false

streaming output

temperature
number<double>

Temperature (0-2)

maxOutputTokens
integer<int32>

Maximum token count

topP
number<double>

top P

tools
any[]

Tool List (Function Calling)

toolChoice
any

Tool selection strategy

text
any

Text

Response

200 - */*

OK

The response is of type object.