Building an AI SaaS

1/25/2024 • AI TOOLS
Share:

After building several AI-powered applications, I’ve developed a systematic approach to creating successful AI SaaS products. Here’s my complete guide to building one from scratch.

Table of Contents

Project Overview

We’ll build an AI chat application with:

  • Free tier (5 messages)
  • Authentication
  • Credits system
  • Payment integration
  • Chat history
  • User profiles

Core Features Implementation

1. Authentication Flow

// src/contexts/AuthContext.tsx
import { createContext, useContext, useState } from 'react';
import { supabase } from '../lib/supabase';

export const AuthContext = createContext({});

export function AuthProvider({ children }) {
  const [user, setUser] = useState(null);
  const [session, setSession] = useState(null);

  // Check for free messages before requiring auth
  const checkAuthRequirement = async (messageCount) => {
    if (messageCount >= 5 && !session) {
      return true; // Require auth
    }
    return false;
  };

  return (
    <AuthContext.Provider value={{ user, session, checkAuthRequirement }}>
      {children}
    </AuthContext.Provider>
  );
}

2. Chat Implementation

// src/components/Chat.tsx
import { useState, useEffect } from 'react';
import { supabase } from '../lib/supabase';
import { useAuth } from '../contexts/AuthContext';

export default function Chat() {
  const [messages, setMessages] = useState([]);
  const [messageCount, setMessageCount] = useState(0);
  const { session, checkAuthRequirement } = useAuth();

  const handleSendMessage = async (content) => {
    // Check free tier limit
    const requireAuth = await checkAuthRequirement(messageCount);
    if (requireAuth) {
      showAuthModal();
      return;
    }

    // Process message with OpenAI
    const response = await processWithAI(content);
    
    // Save to database if authenticated
    if (session) {
      await saveToDatabase(content, response);
    }
    
    setMessageCount(prev => prev + 1);
  };
}

3. OpenAI Integration

// src/lib/openai.ts
import { Configuration, OpenAIApi } from 'openai';

const configuration = new Configuration({
  apiKey: process.env.OPENAI_API_KEY,
});

const openai = new OpenAIApi(configuration);

export async function processWithAI(content: string) {
  const completion = await openai.createChatCompletion({
    model: "gpt-3.5-turbo",
    messages: [{ role: "user", content }],
  });

  return completion.data.choices[0].message.content;
}

4. Credits System

// src/lib/credits.ts
export async function checkCredits(userId: string) {
  const { data: credits } = await supabase
    .from('credits')
    .select('amount')
    .eq('user_id', userId)
    .single();

  return credits?.amount || 0;
}

export async function deductCredit(userId: string) {
  const { data, error } = await supabase
    .from('credits')
    .update({ amount: credits - 1 })
    .eq('user_id', userId);

  return !error;
}

5. Stripe Integration

// src/lib/stripe.ts
import Stripe from 'stripe';

const stripe = new Stripe(process.env.STRIPE_SECRET_KEY);

export async function createCheckoutSession(userId: string, priceId: string) {
  const session = await stripe.checkout.sessions.create({
    line_items: [{
      price: priceId,
      quantity: 1,
    }],
    mode: 'payment',
    success_url: `${process.env.DOMAIN}/success?session_id={CHECKOUT_SESSION_ID}`,
    cancel_url: `${process.env.DOMAIN}/cancel`,
    metadata: {
      userId,
    },
  });

  return session;
}

Database Schema

-- Users table
create table users (
  id uuid references auth.users primary key,
  email text,
  credits integer default 0
);

-- Chats table
create table chats (
  id uuid primary key default uuid_generate_v4(),
  user_id uuid references users(id),
  content text,
  response text,
  created_at timestamp with time zone default timezone('utc'::text, now())
);

-- Credits table
create table credits (
  id uuid primary key default uuid_generate_v4(),
  user_id uuid references users(id),
  amount integer default 0,
  updated_at timestamp with time zone default timezone('utc'::text, now())
);

User Interface Components

Profile Section

// src/components/UserProfile.tsx
export default function UserProfile() {
  const { user } = useAuth();
  const [credits, setCredits] = useState(0);
  const [chats, setChats] = useState([]);

  useEffect(() => {
    if (user) {
      fetchUserData();
    }
  }, [user]);

  return (
    <div className="p-6">
      <h2>Profile</h2>
      <div>Credits: {credits}</div>
      <div>Chat History</div>
      {chats.map(chat => (
        <ChatItem key={chat.id} chat={chat} />
      ))}
    </div>
  );
}

Credits Purchase UI

// src/components/PurchaseCredits.tsx
export default function PurchaseCredits() {
  const plans = [
    { credits: 100, price: 10, id: 'price_100' },
    { credits: 500, price: 40, id: 'price_500' },
    { credits: 1000, price: 70, id: 'price_1000' },
  ];

  const handlePurchase = async (priceId) => {
    const session = await createCheckoutSession(user.id, priceId);
    window.location.href = session.url;
  };

  return (
    <div className="grid grid-cols-3 gap-6">
      {plans.map(plan => (
        <PricingCard
          key={plan.id}
          plan={plan}
          onPurchase={() => handlePurchase(plan.id)}
        />
      ))}
    </div>
  );
}

Best Practices & Tips

  1. State Management

    • Use contexts for global state
    • Keep chat state persistent
    • Handle loading states properly
  2. Error Handling

    • Implement proper error boundaries
    • Show user-friendly error messages
    • Log errors for debugging
  3. Performance

    • Implement pagination for chat history
    • Cache API responses
    • Optimize database queries
  4. Security

    • Secure API endpoints
    • Validate user inputs
    • Protect sensitive data

Common Issues & Solutions

  1. Chat Persistence
// Save chat state in localStorage
useEffect(() => {
  const savedChats = localStorage.getItem('chats');
  if (savedChats) {
    setMessages(JSON.parse(savedChats));
  }
}, []);
  1. Authentication Modal
// Close modal after successful auth
useEffect(() => {
  if (session) {
    setShowAuthModal(false);
  }
}, [session]);
  1. Credit System
// Check credits before API call
const handleChat = async () => {
  const hasCredits = await checkCredits(user.id);
  if (!hasCredits) {
    showPurchaseModal();
    return;
  }
  // Process chat...
};

Technical Stack

Here’s our complete technical stack:

  • Frontend: Next.js 14 with App Router
  • Backend: Edge Functions (Vercel/Cloudflare)
  • Database: Supabase (PostgreSQL)
  • Authentication: Supabase Auth
  • Payments: Stripe
  • AI Provider: OpenAI
  • Analytics: PostHog
  • Monitoring: Sentry
  • Testing: Jest + Playwright

Development Environment Setup

# Initial setup
npm create next-app@latest ai-saas --typescript --tailwind
cd ai-saas

# Install dependencies
npm install @supabase/supabase-js stripe openai @vercel/analytics
npm install @tailwindcss/typography @tailwindcss/forms --save-dev

# Environment variables
cp .env.example .env.local

Advanced OpenAI Integration

import { Configuration, OpenAIApi } from 'openai';
import { rateLimit } from '../utils/rate-limit';

export class AIService {
  private static instance: AIService;
  private openai: OpenAIApi;
  
  private constructor() {
    this.openai = new OpenAIApi(new Configuration({
      apiKey: process.env.OPENAI_API_KEY,
    }));
  }

  static getInstance(): AIService {
    if (!AIService.instance) {
      AIService.instance = new AIService();
    }
    return AIService.instance;
  }

  async processMessage(content: string, context: string[] = []) {
    // Rate limiting check
    const identifier = context[0]?.userId || 'anonymous';
    const rateLimitResult = await rateLimit(identifier);
    
    if (!rateLimitResult.success) {
      throw new Error('Rate limit exceeded');
    }

    try {
      const completion = await this.openai.createChatCompletion({
        model: "gpt-3.5-turbo",
        messages: [
          { role: "system", content: "You are a helpful AI assistant." },
          ...context.map(msg => ({ role: "user", content: msg })),
          { role: "user", content }
        ],
        temperature: 0.7,
        max_tokens: 500,
        presence_penalty: 0.6,
      });

      return completion.data.choices[0].message.content;
    } catch (error) {
      console.error('OpenAI API Error:', error);
      throw new Error('Failed to process message');
    }
  }
}

Rate Limiting Implementation

import { Redis } from '@upstash/redis';

const redis = new Redis({
  url: process.env.UPSTASH_REDIS_URL,
  token: process.env.UPSTASH_REDIS_TOKEN,
});

export async function rateLimit(identifier: string, limit = 10, window = 60) {
  const key = `rate_limit:${identifier}`;
  
  try {
    const requests = await redis.incr(key);
    
    if (requests === 1) {
      await redis.expire(key, window);
    }

    return {
      success: requests <= limit,
      remaining: Math.max(0, limit - requests),
    };
  } catch (error) {
    console.error('Rate limiting error:', error);
    return { success: true, remaining: limit }; // Fail open
  }
}

Analytics Integration

import posthog from 'posthog-js';

export class Analytics {
  static init() {
    if (typeof window !== 'undefined') {
      posthog.init(process.env.NEXT_PUBLIC_POSTHOG_KEY, {
        api_host: process.env.NEXT_PUBLIC_POSTHOG_HOST,
      });
    }
  }

  static trackEvent(event: string, properties?: Record<string, any>) {
    posthog.capture(event, properties);
  }

  static identifyUser(userId: string, traits?: Record<string, any>) {
    posthog.identify(userId, traits);
  }
}

Testing Strategy

import { render, fireEvent, waitFor } from '@testing-library/react';
import Chat from '../components/Chat';

describe('Chat Component', () => {
  it('should handle message sending', async () => {
    const { getByRole, getByText } = render(<Chat />);
    
    const input = getByRole('textbox');
    const sendButton = getByRole('button', { name: /send/i });

    fireEvent.change(input, { target: { value: 'Hello AI' } });
    fireEvent.click(sendButton);

    await waitFor(() => {
      expect(getByText(/Hello AI/)).toBeInTheDocument();
    });
  });
});

Deployment Guide

{
  "env": {
    "OPENAI_API_KEY": "@openai_api_key",
    "STRIPE_SECRET_KEY": "@stripe_secret_key",
    "SUPABASE_URL": "@supabase_url",
    "SUPABASE_ANON_KEY": "@supabase_anon_key"
  },
  "build": {
    "env": {
      "NEXT_PUBLIC_SUPABASE_URL": "@supabase_url",
      "NEXT_PUBLIC_SUPABASE_ANON_KEY": "@supabase_anon_key"
    }
  }
}

Want to learn more about building AI applications? Follow me for more tutorials and insights!