Skip to main content

Google Gemini Integration

Documentation for integrating Google Gemini Pro AI capabilities into Remind Tools.

Setup

API Key Configuration

# .env file
GEMINI_API_KEY=your_api_key_here

Client Initialization

import 'package:google_generative_ai/google_generative_ai.dart';

class GeminiService {
  late final GenerativeModel _model;
  
  GeminiService() {
    _model = GenerativeModel(
      model: 'gemini-pro',
      apiKey: Environment.geminiApiKey,
      generationConfig: GenerationConfig(
        temperature: 0.7,
        topK: 40,
        topP: 0.95,
        maxOutputTokens: 1024,
      ),
    );
  }
}

Use Cases

Trip Itinerary Generation

Future<String> generateItinerary({
  required String destination,
  required int days,
  required List<String> interests,
}) async {
  final prompt = '''
    Create a $days-day itinerary for $destination.
    Traveler interests: ${interests.join(', ')}
    Include:
    - Daily activities with times
    - Restaurant recommendations
    - Transportation tips
    - Budget estimates
  ''';
  
  final response = await _model.generateContent([
    Content.text(prompt)
  ]);
  
  return response.text ?? '';
}

Expense Categorization

Future<String> categorizeExpense({
  required String description,
  required double amount,
}) async {
  final prompt = '''
    Categorize this expense:
    Description: $description
    Amount: \$$amount
    
    Return one of: Food, Transport, Accommodation, 
    Entertainment, Shopping, Other
  ''';
  
  final response = await _model.generateContent([
    Content.text(prompt)
  ]);
  
  return response.text ?? 'Other';
}

Smart Suggestions

Future<List<String>> getSuggestions({
  required String context,
  required String userQuery,
}) async {
  final prompt = '''
    Context: $context
    User Query: $userQuery
    
    Provide 3 relevant suggestions.
  ''';
  
  final response = await _model.generateContent([
    Content.text(prompt)
  ]);
  
  return parseListResponse(response.text);
}

Advanced Features

Multi-modal Input

// Analyze receipt image
Future<ExpenseData> analyzeReceipt(Uint8List imageBytes) async {
  final response = await _model.generateContent([
    Content.multi([
      TextPart('Extract expense details from this receipt:'),
      DataPart('image/jpeg', imageBytes),
    ])
  ]);
  
  return parseExpenseData(response.text);
}

Streaming Responses

Stream<String> streamResponse(String prompt) async* {
  final response = _model.generateContentStream([
    Content.text(prompt)
  ]);
  
  await for (final chunk in response) {
    yield chunk.text ?? '';
  }
}

Rate Limiting

Gemini Pro has rate limits: 60 requests per minute
class RateLimiter {
  final _requestTimes = <DateTime>[];
  final _maxRequests = 60;
  final _timeWindow = Duration(minutes: 1);
  
  Future<void> checkLimit() async {
    final now = DateTime.now();
    _requestTimes.removeWhere(
      (time) => now.difference(time) > _timeWindow
    );
    
    if (_requestTimes.length >= _maxRequests) {
      final oldestRequest = _requestTimes.first;
      final waitTime = _timeWindow - now.difference(oldestRequest);
      await Future.delayed(waitTime);
    }
    
    _requestTimes.add(now);
  }
}

Error Handling

try {
  final response = await _model.generateContent([...]);
  return response.text;
} on GenerativeAIException catch (e) {
  // Handle API errors
  logger.error('Gemini API error: $e');
  return fallbackResponse();
} catch (e) {
  // Handle other errors
  logger.error('Unexpected error: $e');
  rethrow;
}

Best Practices

  1. Cache responses when appropriate
  2. Implement retry logic with exponential backoff
  3. Use streaming for long responses
  4. Validate and sanitize AI outputs
  5. Monitor token usage to control costs
  6. Provide fallbacks for API failures