Neuron AI
GitHubForumNewsletter
  • Getting Started
    • Introduction
  • Key Concepts
  • Installation
  • Agent
  • Tools & Function Calls
  • Streaming
  • RAG
  • Attachments (Documents & Images)
  • Advanced
    • Structured Output
    • Logging & Observability
    • MCP Connector
    • Error Handling
  • Post Processor
  • Asynchronous Processing
  • Components
    • AI provider
    • Chat History & Memory
    • Embeddings Provider
    • Vector Store
    • Data loader
  • Examples
    • YouTube Agent
Powered by GitBook
On this page
  • Anthropic
  • OpenAI
  • AzureOpenAI
  • Ollama
  • Gemini
  • Implement OpenAI compatible providers
  • Implement a new provider
  1. Components

AI provider

Interact with LLM providers or extend the framework to implement new ones.

With Neuron you can switch between LLM providers with just one line of code, without any impact on your agent implementation.

Anthropic

namespace App\Neuron;

use NeuronAI\Agent;
use NeuronAI\Chat\Messages\UserMessage;
use NeuronAI\Providers\AIProviderInterface;
use NeuronAI\Providers\Anthropic\Anthropic;
use NeuronAI\Observability\AgentMonitoring;

class MyAgent extends Agent
{
    protected function provider(): AIProviderInterface
    {
        return new Anthropic(
            key: 'ANTHROPIC_API_KEY',
            model: 'ANTHROPIC_MODEL',
        );
    }
}

// The Inspector instance in your application - https://inspector.dev/
$inspector = new \Inspector\Inspector(
    new \Inspector\Configuration('INSPECTOR_INGESTION_KEY')
);

echo MyAgent::make()
    ->observe(new AgentMonitoring($inspector))
    ->chat(new UserMessage("Hi!"));
// Hi, how can I help you today?

OpenAI

namespace App\Neuron;

use NeuronAI\Agent;
use NeuronAI\Chat\Messages\UserMessage;
use NeuronAI\Providers\AIProviderInterface;
use NeuronAI\Providers\OpenAI\OpenAI;
use NeuronAI\Observability\AgentMonitoring;

class MyAgent extends Agent
{
    public function provider(): AIProviderInterface
    {
        return new OpenAI(
            key: 'OPENAI_API_KEY',
            model: 'OPENAI_MODEL',
        );
    }
}

// The Inspector instance in your application - https://inspector.dev/
$inspector = new \Inspector\Inspector(
    new \Inspector\Configuration('INSPECTOR_INGESTION_KEY')
);

echo MyAgent::make()
    ->observe(new AgentMonitoring($inspector))
    ->chat(new UserMessage("Hi!"));
// Hi, how can I help you today?

AzureOpenAI

This provider allows you to connect with OpenAI models provided in the Azure cloud platform.

namespace App\Neuron;

use NeuronAI\Agent;
use NeuronAI\Chat\Messages\UserMessage;
use NeuronAI\Providers\AIProviderInterface;
use NeuronAI\Providers\AzureOpenAI;
use NeuronAI\Observability\AgentMonitoring;

class MyAgent extends Agent
{
    public function provider(): AIProviderInterface
    {
        return new AzureOpenAI(
            key: 'AZURE_API_KEY',
            endpoint: 'AZURE_ENDPOINT',
            model: 'OPENAI_MODEL',
            version: 'AZURE_API_VERSION'
        );
    }
}

// The Inspector instance in your application - https://inspector.dev/
$inspector = new \Inspector\Inspector(
    new \Inspector\Configuration('INSPECTOR_INGESTION_KEY')
);

echo MyAgent::make()
    ->observe(new AgentMonitoring($inspector))
    ->chat(new UserMessage("Hi!"));
// Hi, how can I help you today?

Ollama

Ollama does not support tools with stream.

namespace App\Neuron;

use NeuronAI\Agent;
use NeuronAI\Chat\Messages\UserMessage;
use NeuronAI\Providers\AIProviderInterface;
use NeuronAI\Providers\Ollama\Ollama;
use NeuronAI\Observability\AgentMonitoring;

class MyAgent extends Agent
{
    public function provider(): AIProviderInterface
    {
        return new Ollama(
            url: 'OLLAMA_URL',
            model: 'OLLAMA_MODEL',
        );
    }
}

// The Inspector instance in your application - https://inspector.dev/
$inspector = new \Inspector\Inspector(
    new \Inspector\Configuration('INSPECTOR_INGESTION_KEY')
);

echo MyAgent::make()
    ->observe(new AgentMonitoring($inspector))
    ->chat(new UserMessage("Hi!"));
// Hi, how can I help you today?

Gemini

namespace App\Neuron;

use NeuronAI\Agent;
use NeuronAI\Chat\Messages\UserMessage;
use NeuronAI\Providers\AIProviderInterface;
use NeuronAI\Providers\Gemini\Gemini;

class MyAgent extends Agent
{
    public function provider(): AIProviderInterface
    {
        return new Gemini(
            key: 'GEMINI_API_KEY',
            model: 'GEMINI_MODEL',
        );
    }
}

// The Inspector instance in your application - https://inspector.dev/
$inspector = new \Inspector\Inspector(
    new \Inspector\Configuration('INSPECTOR_INGESTION_KEY')
);

echo MyAgent::make()
    ->observe(new AgentMonitoring($inspector))
    ->chat(new UserMessage("Hi!"));
// Hi, how can I help you today?

Gemini does not support tools with structured output.

Implement OpenAI compatible providers

If you want to interact to an LLM provider that support the same API format of OpenAI you can easily create e dedicated class in a few lines of code, just extending our OpenAI provider:

namespace App\Neuron\Providers;

use NeuronAI\Providers\OpenAI\OpenAI;

class TogetherAI extends OpenAI
{
    protected string $baseUri = "https://api.together.xyz";
}

That's it.

You can now use this new provider into your Agent implementation:

use App\Neuron\Providers\TogetherAI;
use NeuronAI\Agent;
use NeuronAI\Providers\AIProviderInterface;

class MyAgent extends Agent
{
    public function provider(): AIProviderInterface
    {
        return new TogetherAI(
            key: 'TOGETHER_API_KEY',
            model: 'TOGETHER_MODEL',
        );
    }
}

Implement a new provider

If you want to create a new provider you have to implement the AIProviderInterface interface:

namespace NeuronAI\Providers;

use NeuronAI\Chat\Messages\Message;
use NeuronAI\Tools\ToolInterface;
use NeuronAI\Providers\MessageMapperInterface;

interface AIProviderInterface
{
    /**
     * Send predefined instruction to the LLM.
     */
    public function systemPrompt(?string $prompt): AIProviderInterface;

    /**
     * Set the tools to be exposed to the LLM.
     *
     * @param array<ToolInterface> $tools
     */
    public function setTools(array $tools): AIProviderInterface;
    
    /**
     * The component responsible for mapping the NeuronAI Message to the AI provider format.
     */
    public function messageMapper(): MessageMapperInterface;

    /**
     * Send a prompt to the AI agent.
     */
    public function chat(array $messages): Message;
    
    /**
     * Yield the LLM response.
     */
    public function stream(array|string $messages, callable $executeToolsCallback): \Generator;
    
    /**
     * Schema validated response.
     */
    public function structured(string $class, Message|array $messages, int $maxRetry = 1): mixed;
}

The chat method should contains the call the underlying LLM. If the provider doesn't support tools and function calls, you can implement it with a placeholder.

This is the basic template for a new AI provider implementation.

namespace App\Neuron\Providers;

use GuzzleHttp\Client;
use GuzzleHttp\RequestOptions;
use NeuronAI\Chat\Messages\AssistantMessage;
use NeuronAI\Chat\Messages\Message;
use NeuronAI\Providers\AIProviderInterface;
use NeuronAI\Providers\HandleWithTools;
use NeuronAI\Providers\MessageMapperInterface;

class MyAIProvider implements AIProviderInterface
{
    use HandleWithTools;
    
    /**
     * The http client.
     *
     * @var Client
     */
    protected Client $client;

    /**
     * System instructions.
     *
     * @var string
     */
    protected string $system;

    /**
     * The component responsible for mapping the NeuronAI Message to the AI provider format.
     *
     * @var MessageMapperInterface
     */
    protected MessageMapperInterface $messageMapper;
    
    public function __construct(
        protected string $key,
        protected string $model
    ) {
        $this->client = new Client([
            'base_uri' => 'https://api.provider.com/v1',
            'headers' => [
                'Content-Type' => 'application/json',
                'Authorization' => "Bearer {$this->key}",
            ]
        ]);
    }

    /**
     * @inerhitDoc
     */
    public function systemPrompt(string $prompt): AIProviderInterface
    {
        $this->system = $prompt;
        return $this;
    }

    public function messageMapper(): MessageMapperInterface
    {
        if (!isset($this->messageMapper)) {
            $this->messageMapper = new MessageMapper();
        }
        return $this->messageMapper;
    }

    /**
     * @inerhitDoc
     */
    public function chat(array $messages): Message
    {
        $result = $this->client->post('chat', [
            RequestOptions::JSON => [
                'model' => $this->model,
                'messages' => \array_map(function (Message $message) {
                    return $message->jsonSerialize();
                }, $messages)
            ]
        ])->getBody()->getContents();
        
        $result = \json_decode($result, true);

        return new AssistantMessage($result['content']);
    }
}

After creating your own implementation you can use it in the agent:

namespace App\Neuron;

use App\Neuron\Providers\MyAIProvider;
use NeuronAI\Agent;
use NeuronAI\Providers\AIProviderInterface;

class MyAgent extends Agent
{
    public function provider(): AIProviderInterface
    {
        return new MyAIProvider (
            key: 'PROVIDER_API_KEY',
            model: 'PROVIDER_MODEL',
        );
    }
}
PreviousAsynchronous ProcessingNextChat History & Memory

Last updated 5 days ago

We strongly recommend you to submit new provider implementations via PR on the official repository or using other support channels. The new implementation can receives an important boost in its advancement by the community.

Inspector.dev