Overview
Django is Python's batteries-included web framework. Adding ModelRiver AI capabilities to Django views, REST APIs, and background tasks requires just a few lines of configuration.
Quick start
Install dependencies
Bash
pip install django openai djangorestframework python-dotenvConfiguration
PYTHON
1# settings.py2import os3 4MODELRIVER_API_KEY = os.environ.get("MODELRIVER_API_KEY", "")5MODELRIVER_BASE_URL = "https://api.modelriver.com/v1"AI client service
PYTHON
1# ai/client.py2from openai import OpenAI3from django.conf import settings4 5def get_client():6 return OpenAI(7 base_url=settings.MODELRIVER_BASE_URL,8 api_key=settings.MODELRIVER_API_KEY,9 )10 11def chat(workflow: str, messages: list, **kwargs) -> str:12 client = get_client()13 response = client.chat.completions.create(14 model=workflow,15 messages=messages,16 **kwargs,17 )18 return response.choices[0].message.contentDjango views
PYTHON
1# views.py2from django.http import JsonResponse3from django.views.decorators.csrf import csrf_exempt4from django.views.decorators.http import require_POST5import json6from .client import chat7 8@csrf_exempt9@require_POST10def chat_view(request):11 body = json.loads(request.body)12 message = body.get("message", "")13 14 response = chat(15 workflow="my-chat-workflow",16 messages=[{"role": "user", "content": message}],17 )18 19 return JsonResponse({"content": response})Django REST Framework
PYTHON
1# serializers.py2from rest_framework import serializers3 4class ChatSerializer(serializers.Serializer):5 message = serializers.CharField(max_length=4000)6 workflow = serializers.CharField(default="my-chat-workflow")7 8class ChatResponseSerializer(serializers.Serializer):9 content = serializers.CharField()10 tokens = serializers.IntegerField()11 12# views.py13from rest_framework.views import APIView14from rest_framework.response import Response15from openai import OpenAI16from django.conf import settings17 18class ChatAPIView(APIView):19 def post(self, request):20 serializer = ChatSerializer(data=request.data)21 serializer.is_valid(raise_exception=True)22 23 client = OpenAI(24 base_url=settings.MODELRIVER_BASE_URL,25 api_key=settings.MODELRIVER_API_KEY,26 )27 28 response = client.chat.completions.create(29 model=serializer.validated_data["workflow"],30 messages=[{"role": "user", "content": serializer.validated_data["message"]}],31 )32 33 return Response({34 "content": response.choices[0].message.content,35 "tokens": response.usage.total_tokens,36 })Celery background tasks
For heavy AI processing, use Celery to avoid blocking web workers:
PYTHON
1# tasks.py2from celery import shared_task3from openai import OpenAI4from django.conf import settings5 6@shared_task7def summarise_document(doc_id: int):8 from myapp.models import Document9 10 doc = Document.objects.get(id=doc_id)11 12 client = OpenAI(13 base_url=settings.MODELRIVER_BASE_URL,14 api_key=settings.MODELRIVER_API_KEY,15 )16 17 response = client.chat.completions.create(18 model="my-summary-workflow",19 messages=[20 {"role": "system", "content": "Summarise the following document concisely."},21 {"role": "user", "content": doc.content},22 ],23 )24 25 doc.summary = response.choices[0].message.content26 doc.save()27 return doc.summary28 29# Usage in views30@csrf_exempt31@require_POST32def summarise_view(request, doc_id):33 summarise_document.delay(doc_id)34 return JsonResponse({"status": "processing"})Streaming with Django Channels
PYTHON
1# consumers.py2import json3from channels.generic.websocket import AsyncWebsocketConsumer4from openai import AsyncOpenAI5from django.conf import settings6 7class ChatConsumer(AsyncWebsocketConsumer):8 async def connect(self):9 await self.accept()10 self.messages = []11 self.client = AsyncOpenAI(12 base_url=settings.MODELRIVER_BASE_URL,13 api_key=settings.MODELRIVER_API_KEY,14 )15 16 async def receive(self, text_data=None):17 data = json.loads(text_data)18 self.messages.append({"role": "user", "content": data["message"]})19 20 stream = await self.client.chat.completions.create(21 model="my-chat-workflow",22 messages=self.messages,23 stream=True,24 )25 26 full_response = ""27 async for chunk in stream:28 content = chunk.choices[0].delta.content29 if content:30 full_response += content31 await self.send(json.dumps({"type": "chunk", "content": content}))32 33 self.messages.append({"role": "assistant", "content": full_response})34 await self.send(json.dumps({"type": "done"}))Best practices
- Use Celery for batch processing: Summarisation, embeddings, and analysis should run async
- Create a reusable AI service: Centralise ModelRiver configuration in one module
- Add error handling: Catch
openai.APIErrorand return user-friendly error responses - Use Django Channels for streaming: Standard views can't stream SSE efficiently
- Monitor in Request Logs: Track per-view costs in Observability
Next steps
- FastAPI integration: Async-first alternative
- API reference: Endpoint documentation
- Webhooks: Async processing with webhooks