Skip to content

Serve

run_server()

Run the PITA API server using command-line arguments.

This function provides backward compatibility for running the server via

python -m pita.api.serve

For programmatic use, prefer start_server() instead.

Source code in pita/api/serve.py
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
def run_server():
    """
    Run the PITA API server using command-line arguments.

    This function provides backward compatibility for running the server via:
        python -m pita.api.serve

    For programmatic use, prefer start_server() instead.
    """
    parser = argparse.ArgumentParser(description="PITA API Server")
    default_config = get_default_config()
    parser.add_argument("--model", type=str, default=default_config["model"], help="Model name or path")
    parser.add_argument("--engine", type=str, default=default_config["engine"], choices=["vllm", "llama_cpp"], help="Inference engine")
    parser.add_argument("--tokenizer", type=str, default=default_config["tokenizer"], help="Tokenizer path (optional)")
    parser.add_argument("--port", type=int, default=default_config["port"], help="Port number")
    parser.add_argument("--host", type=str, default=default_config["host"], help="Host address")

    args = parser.parse_args()

    start_server(
        model=args.model,
        engine=args.engine,
        tokenizer=args.tokenizer,
        port=args.port,
        host=args.host
    )

start_server(model: Optional[str] = None, engine: Optional[str] = None, tokenizer: Optional[str] = None, port: Optional[int] = None, host: Optional[str] = None)

Start the PITA API server with the specified configuration.

This function is the main entry point for starting the server programmatically or via the CLI. Parameters default to environment variables if not specified.

Parameters:

Name Type Description Default
model Optional[str]

Model name or path (default: PITA_MODEL env or 'Qwen/Qwen2.5-0.5B-Instruct')

None
engine Optional[str]

Inference engine - 'vllm' or 'llama_cpp' (default: PITA_ENGINE env or 'vllm')

None
tokenizer Optional[str]

Tokenizer path (default: PITA_TOKENIZER env or None)

None
port Optional[int]

Port number (default: PITA_PORT env or 8001)

None
host Optional[str]

Host address (default: PITA_HOST env or '0.0.0.0')

None
Source code in pita/api/serve.py
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
def start_server(
    model: Optional[str] = None,
    engine: Optional[str] = None,
    tokenizer: Optional[str] = None,
    port: Optional[int] = None,
    host: Optional[str] = None
):
    """
    Start the PITA API server with the specified configuration.

    This function is the main entry point for starting the server programmatically
    or via the CLI. Parameters default to environment variables if not specified.

    Args:
        model: Model name or path (default: PITA_MODEL env or 'Qwen/Qwen2.5-0.5B-Instruct')
        engine: Inference engine - 'vllm' or 'llama_cpp' (default: PITA_ENGINE env or 'vllm')
        tokenizer: Tokenizer path (default: PITA_TOKENIZER env or None)
        port: Port number (default: PITA_PORT env or 8001)
        host: Host address (default: PITA_HOST env or '0.0.0.0')
    """
    default_config = get_default_config()

    # Use provided values or fall back to defaults from environment
    config = {
        "model": model if model is not None else default_config["model"],
        "engine": engine if engine is not None else default_config["engine"],
        "tokenizer": tokenizer if tokenizer is not None else default_config["tokenizer"],
        "port": port if port is not None else default_config["port"],
        "host": host if host is not None else default_config["host"],
    }

    # Create a dedicated app instance with the specified config
    server_app = create_app(config)

    uvicorn.run(server_app, host=config["host"], port=config["port"])