Skip to main content
POST
/
ai
/
openai
/
chat
/
completions
compatible openai chat
curl --request POST \
  --url https://ai.pmock.com/api/ai/openai/chat/completions \
  --header 'Content-Type: application/json' \
  --data '
{
  "messages": [
    {
      "content": "<unknown>",
      "role": "user",
      "name": "<string>"
    }
  ],
  "model": "gpt-5.1",
  "stream": false,
  "temperature": 123,
  "maxTokens": 123,
  "maxCompletionTokens": 123,
  "topP": 123,
  "frequencyPenalty": 123,
  "presencePenalty": 123,
  "stop": "<unknown>",
  "n": 123,
  "seed": 123,
  "user": "<string>",
  "tools": [
    "<unknown>"
  ],
  "toolChoice": "<unknown>",
  "responseFormat": "<unknown>",
  "logprobs": true,
  "topLogprobs": 123,
  "logitBias": "<unknown>",
  "parallelToolCalls": true
}
'
{}

Normal Request Example

String API_KEY = "ak";
String API_SECRET = "sk";
OpenAIClient client = OpenAIOkHttpClient.builder()
        .apiKey(API_KEY) // It can be any value, as we do not use this authentication, but we recommend filling in API_KEY
        .headers(Headers.builder()
                .put("wuinai-access-key", API_KEY)
                .build())
        .baseUrl("https://ai.pmock.com/api/ai/openai")
        .build();

ChatCompletionCreateParams params =
        ChatCompletionCreateParams.builder()
                .model("gpt-5.1")
                .addSystemMessage("You are a helpful assistant.")
                .addUserMessage("Windows")
                .additionalHeaders(Headers.builder()
                        .put("wuinai-access-token", signature).build()) // signature is the authentication of Introduction
                .build();

ChatCompletion response =
        client.chat().completions().create(params);

System.out.println(
        response.choices().get(0).message().content()
);

Stream Request Example

String API_KEY = "ak";
String API_SECRET = "sk";

OpenAIClient client = OpenAIOkHttpClient.builder()
        .apiKey(API_KEY) // It can be any value, as we do not use this authentication, but we recommend filling in API_KEY
        .headers(Headers.builder().put("wuinai-access-key", API_KEY).build())
        .baseUrl("https://ai.pmock.com/api/ai/openai")
        .build();

ChatCompletionCreateParams streamParams = ChatCompletionCreateParams.builder()
        .model("gpt-5.1")
        .addSystemMessage("You are a helpful assistant.")
        .addUserMessage("Hi")
        .additionalHeaders(Headers.builder()
                .put("wuinai-access-token", signature).build()) // signature is the authentication of Introduction
        .build();

StreamResponse<ChatCompletionChunk> streaming = null;
boolean success = false;
try {
    streaming = client.chat().completions().createStreaming(streamParams);
    streaming.stream().forEach(chunk -> {
            System.out.print(chunk.choices().get(0).delta().content().orElse(""));
    });
    success = true;
} catch (Exception e) {
    System.err.println("流式请求失败: " + e.getMessage());
    if (streaming != null) {
        streaming.close();
    }
}

if (success) {
    System.out.println("流式响应成功完成!");
}

Maven

<dependency>
    <groupId>com.openai</groupId>
    <artifactId>openai-java</artifactId>
    <version>x.x.x</version>
</dependency>

Normal Response

{
	"id": "chatcmpl-xxx",
	"object": "chat.completion",
	"created": 1768810259,
	"model": "gpt-5.1",
	"choices": [
		{
			"index": 0,
			"message": {
				"role": "assistant",
				"content": "Hey! How's it going?"
			},
			"finish_reason": "stop"
		}
	],
	"usage": {
		"prompt_tokens": 0,
		"completion_tokens": 0,
		"total_tokens": 0,
		"prompt_tokens_details": {
			"text_tokens": 0
		},
		"completion_tokens_details": {
			"content_tokens": 0
		}
	}
}

Stream Response

data: {"created":1768823313,"model":"gpt-5.1","id":"chatcmpl-xxx","choices":[{"delta":{"content":"?"},"index":0}],"object":"chat.completion.chunk"}
data: {"created":1768823313,"model":"gpt-5.1","id":"chatcmpl-xxx","choices":[{"delta":{},"index":0}],"object":"chat.completion.chunk"}
data: {"created":1768823313,"model":"gpt-5.1","id":"chatcmpl-xxx","choices":[{"delta":{},"finish_reason":"stop","index":0}],"object":"chat.completion.chunk"}
data: {"created":1768823316,"usage":{"completion_tokens":0,"completion_tokens_details":{},"prompt_tokens":0,"prompt_tokens_details":{},"total_tokens":0},"model":"gpt-5.1","id":"chatcmpl-xxx","choices":[{"delta":{},"index":0}],"object":"chat.completion.chunk"}
data: [DONE]

Body

application/json
messages
object[]
required

messages

Minimum array length: 1
model
enum<string>
required

model

Available options:
gpt-5.1,
gpt-5.1-all,
gpt-5.1-thinking,
gpt-5.1-thinking-all,
gpt-5.2,
gemini-3-pro,
gemini-2.5-pro,
gemini-2.5-flash,
gemini-3-flash,
claude-4.5
stream
boolean
default:false

streaming output

temperature
number<double>

Temperature (0-2)

maxTokens
integer<int32>

Maximum token count

maxCompletionTokens
integer<int32>

Maximum number of completed tokens

topP
number<double>

top P

frequencyPenalty
number<double>

frequency penalty(-2.0 to 2.0)

presencePenalty
number<double>

presence penalty(-2.0 to 2.0)

stop
any

Stop sequence

n
integer<int32>

Generated Quantity

seed
integer<int32>

random seed

user
string

user

tools
any[]

Tool List (Function Calling)

toolChoice
any

Tool selection strategy

responseFormat
any

Response format

logprobs
boolean

logprobs

topLogprobs
integer<int32>

top logprobs

logitBias
any

logit bias

parallelToolCalls
boolean

parallel tool calls

Response

OK

The response is of type object.