Documentation

Django + ModelRiver

Add AI capabilities to your Django application. REST API views, Celery background tasks, and Django Channels for real-time streaming.

Overview

Django is Python's batteries-included web framework. Adding ModelRiver AI capabilities to Django views, REST APIs, and background tasks requires just a few lines of configuration.


Quick start

Install dependencies

Bash
pip install django openai djangorestframework python-dotenv

Configuration

PYTHON
1# settings.py
2import os
3 
4MODELRIVER_API_KEY = os.environ.get("MODELRIVER_API_KEY", "")
5MODELRIVER_BASE_URL = "https://api.modelriver.com/v1"

AI client service

PYTHON
1# ai/client.py
2from openai import OpenAI
3from django.conf import settings
4 
5def get_client():
6 return OpenAI(
7 base_url=settings.MODELRIVER_BASE_URL,
8 api_key=settings.MODELRIVER_API_KEY,
9 )
10 
11def chat(workflow: str, messages: list, **kwargs) -> str:
12 client = get_client()
13 response = client.chat.completions.create(
14 model=workflow,
15 messages=messages,
16 **kwargs,
17 )
18 return response.choices[0].message.content

Django views

PYTHON
1# views.py
2from django.http import JsonResponse
3from django.views.decorators.csrf import csrf_exempt
4from django.views.decorators.http import require_POST
5import json
6from .client import chat
7 
8@csrf_exempt
9@require_POST
10def chat_view(request):
11 body = json.loads(request.body)
12 message = body.get("message", "")
13 
14 response = chat(
15 workflow="my-chat-workflow",
16 messages=[{"role": "user", "content": message}],
17 )
18 
19 return JsonResponse({"content": response})

Django REST Framework

PYTHON
1# serializers.py
2from rest_framework import serializers
3 
4class ChatSerializer(serializers.Serializer):
5 message = serializers.CharField(max_length=4000)
6 workflow = serializers.CharField(default="my-chat-workflow")
7 
8class ChatResponseSerializer(serializers.Serializer):
9 content = serializers.CharField()
10 tokens = serializers.IntegerField()
11 
12# views.py
13from rest_framework.views import APIView
14from rest_framework.response import Response
15from openai import OpenAI
16from django.conf import settings
17 
18class ChatAPIView(APIView):
19 def post(self, request):
20 serializer = ChatSerializer(data=request.data)
21 serializer.is_valid(raise_exception=True)
22 
23 client = OpenAI(
24 base_url=settings.MODELRIVER_BASE_URL,
25 api_key=settings.MODELRIVER_API_KEY,
26 )
27 
28 response = client.chat.completions.create(
29 model=serializer.validated_data["workflow"],
30 messages=[{"role": "user", "content": serializer.validated_data["message"]}],
31 )
32 
33 return Response({
34 "content": response.choices[0].message.content,
35 "tokens": response.usage.total_tokens,
36 })

Celery background tasks

For heavy AI processing, use Celery to avoid blocking web workers:

PYTHON
1# tasks.py
2from celery import shared_task
3from openai import OpenAI
4from django.conf import settings
5 
6@shared_task
7def summarise_document(doc_id: int):
8 from myapp.models import Document
9 
10 doc = Document.objects.get(id=doc_id)
11 
12 client = OpenAI(
13 base_url=settings.MODELRIVER_BASE_URL,
14 api_key=settings.MODELRIVER_API_KEY,
15 )
16 
17 response = client.chat.completions.create(
18 model="my-summary-workflow",
19 messages=[
20 {"role": "system", "content": "Summarise the following document concisely."},
21 {"role": "user", "content": doc.content},
22 ],
23 )
24 
25 doc.summary = response.choices[0].message.content
26 doc.save()
27 return doc.summary
28 
29# Usage in views
30@csrf_exempt
31@require_POST
32def summarise_view(request, doc_id):
33 summarise_document.delay(doc_id)
34 return JsonResponse({"status": "processing"})

Streaming with Django Channels

PYTHON
1# consumers.py
2import json
3from channels.generic.websocket import AsyncWebsocketConsumer
4from openai import AsyncOpenAI
5from django.conf import settings
6 
7class ChatConsumer(AsyncWebsocketConsumer):
8 async def connect(self):
9 await self.accept()
10 self.messages = []
11 self.client = AsyncOpenAI(
12 base_url=settings.MODELRIVER_BASE_URL,
13 api_key=settings.MODELRIVER_API_KEY,
14 )
15 
16 async def receive(self, text_data=None):
17 data = json.loads(text_data)
18 self.messages.append({"role": "user", "content": data["message"]})
19 
20 stream = await self.client.chat.completions.create(
21 model="my-chat-workflow",
22 messages=self.messages,
23 stream=True,
24 )
25 
26 full_response = ""
27 async for chunk in stream:
28 content = chunk.choices[0].delta.content
29 if content:
30 full_response += content
31 await self.send(json.dumps({"type": "chunk", "content": content}))
32 
33 self.messages.append({"role": "assistant", "content": full_response})
34 await self.send(json.dumps({"type": "done"}))

Best practices

  1. Use Celery for batch processing: Summarisation, embeddings, and analysis should run async
  2. Create a reusable AI service: Centralise ModelRiver configuration in one module
  3. Add error handling: Catch openai.APIError and return user-friendly error responses
  4. Use Django Channels for streaming: Standard views can't stream SSE efficiently
  5. Monitor in Request Logs: Track per-view costs in Observability

Next steps