React Hooks

Client-side React hooks for text generation, vision, speech-to-text, and text-to-speech.

Installation

Terminal
npm install @tryhamster/gerbil

useGerbil

The main hook for text generation and streaming:

MyComponent.tsx
01"use client";
02
03import { useGerbil } from "@tryhamster/gerbil/react";
04
05function MyComponent() {
06 const {
07 generate, // Generate text (returns Promise)
08 stream, // Stream text (returns AsyncIterator)
09 isLoading, // Loading state
10 error, // Error object if any
11 abort, // Abort current request
12 } = useGerbil({
13 endpoint: "/api/ai", // Your API endpoint
14 });
15
16 const handleGenerate = async () => {
17 const result = await generate("Write a haiku");
18 console.log(result.text);
19 };
20
21 const handleStream = async () => {
22 for await (const chunk of stream("Tell me a story")) {
23 console.log(chunk);
24 }
25 };
26
27 return (
28 <div>
29 <button onClick={handleGenerate} disabled={isLoading}>
30 Generate
31 </button>
32 <button onClick={handleStream} disabled={isLoading}>
33 Stream
34 </button>
35 <button onClick={abort}>Cancel</button>
36 {error && <p>Error: {error.message}</p>}
37 </div>
38 );
39}

useGerbil Options

options.tsx
01const gerbil = useGerbil({
02 // Required
03 endpoint: "/api/ai",
04
05 // Optional defaults
06 defaultOptions: {
07 maxTokens: 500,
08 temperature: 0.7,
09 thinking: false,
10 },
11
12 // Callbacks
13 onStart: () => console.log("Started"),
14 onToken: (token) => console.log("Token:", token),
15 onFinish: (result) => console.log("Done:", result),
16 onError: (error) => console.error("Error:", error),
17
18 // Request options
19 headers: {
20 "Authorization": "Bearer token",
21 },
22});

generate()

Generate text and wait for the complete response:

generate.tsx
01const { generate } = useGerbil({ endpoint: "/api/ai" });
02
03// Basic usage
04const result = await generate("Hello!");
05console.log(result.text);
06
07// With options
08const result = await generate("Explain React", {
09 maxTokens: 500,
10 temperature: 0.8,
11 system: "You are a helpful teacher.",
12});
13
14// Result shape
15interface GenerateResult {
16 text: string;
17 thinking?: string; // If thinking mode enabled
18 tokensGenerated: number;
19 tokensPerSecond: number;
20 totalTime: number;
21}

stream()

Stream text token by token:

stream.tsx
01const { stream } = useGerbil({ endpoint: "/api/ai" });
02
03// Basic streaming
04const [text, setText] = useState("");
05
06const handleStream = async () => {
07 setText("");
08 for await (const chunk of stream("Tell me a story")) {
09 setText((prev) => prev + chunk);
10 }
11};
12
13// With options
14for await (const chunk of stream("Explain hooks", {
15 maxTokens: 500,
16 onToken: (token) => console.log(token),
17})) {
18 // Process each chunk
19}

useChat

Full-featured chat hook with message history:

ChatUI.tsx
01"use client";
02
03import { useChat } from "@tryhamster/gerbil/react";
04
05function ChatUI() {
06 const {
07 messages, // Array of messages
08 input, // Current input value
09 setInput, // Set input value
10 handleSubmit, // Submit handler for forms
11 handleInputChange, // Input change handler
12 isLoading, // Loading state
13 error, // Error object
14 reload, // Regenerate last response
15 stop, // Stop current generation
16 append, // Add a message manually
17 setMessages, // Replace all messages
18 } = useChat({
19 endpoint: "/api/chat",
20 system: "You are a helpful assistant.",
21 });
22
23 return (
24 <div>
25 {/* Message list */}
26 <div>
27 {messages.map((m, i) => (
28 <div key={i}>
29 <strong>{m.role}:</strong> {m.content}
30 </div>
31 ))}
32 </div>
33
34 {/* Input form */}
35 <form onSubmit={handleSubmit}>
36 <input
37 value={input}
38 onChange={handleInputChange}
39 placeholder="Type a message..."
40 />
41 <button type="submit" disabled={isLoading}>
42 Send
43 </button>
44 </form>
45 </div>
46 );
47}

useChat Options

chat-options.tsx
01const chat = useChat({
02 // Required
03 endpoint: "/api/chat",
04
05 // System prompt
06 system: "You are a helpful assistant.",
07
08 // Initial messages
09 initialMessages: [
10 { role: "assistant", content: "Hello! How can I help?" },
11 ],
12
13 // Enable thinking mode
14 thinking: true,
15
16 // Generation options
17 maxTokens: 500,
18 temperature: 0.7,
19
20 // Callbacks
21 onFinish: (message) => {
22 console.log("Response:", message);
23 },
24 onError: (error) => {
25 console.error("Error:", error);
26 },
27
28 // Unique ID for persistence
29 id: "my-chat",
30});

Message Type

types.ts
interface Message {
role: "user" | "assistant" | "system";
content: string;
thinking?: string; // Reasoning (if thinking mode)
id?: string; // Unique ID
createdAt?: Date; // Timestamp
}

Thinking Mode

Display the model's reasoning process:

ChatWithThinking.tsx
01function ChatWithThinking() {
02 const { messages, input, setInput, handleSubmit } = useChat({
03 endpoint: "/api/chat",
04 thinking: true,
05 });
06
07 return (
08 <div>
09 {messages.map((m, i) => (
10 <div key={i}>
11 {/* Show thinking if available */}
12 {m.thinking && (
13 <div className="text-gray-500 italic text-sm mb-2">
14 <strong>Thinking:</strong> {m.thinking}
15 </div>
16 )}
17 <div>
18 <strong>{m.role}:</strong> {m.content}
19 </div>
20 </div>
21 ))}
22 <form onSubmit={handleSubmit}>
23 <input value={input} onChange={(e) => setInput(e.target.value)} />
24 <button type="submit">Send</button>
25 </form>
26 </div>
27 );
28}

Streaming UI Pattern

StreamingChat.tsx
01function StreamingChat() {
02 const [output, setOutput] = useState("");
03 const { stream, isLoading, abort } = useGerbil({
04 endpoint: "/api/ai",
05 });
06
07 const handleSubmit = async (prompt: string) => {
08 setOutput("");
09
10 try {
11 for await (const chunk of stream(prompt)) {
12 setOutput((prev) => prev + chunk);
13 }
14 } catch (e) {
15 if (e.name === "AbortError") {
16 console.log("Aborted");
17 } else {
18 throw e;
19 }
20 }
21 };
22
23 return (
24 <div>
25 <div className="whitespace-pre-wrap">
26 {output}
27 {isLoading && <span className="animate-pulse"></span>}
28 </div>
29 {isLoading && (
30 <button onClick={abort}>Stop</button>
31 )}
32 </div>
33 );
34}

JSON Generation

ExtractForm.tsx
01import { useGerbil } from "@tryhamster/gerbil/react";
02import { z } from "zod";
03
04const PersonSchema = z.object({
05 name: z.string(),
06 age: z.number(),
07 city: z.string(),
08});
09
10function ExtractForm() {
11 const { json, isLoading } = useGerbil({
12 endpoint: "/api/ai",
13 });
14
15 const handleExtract = async (text: string) => {
16 const data = await json(text, {
17 schema: PersonSchema,
18 });
19
20 console.log(data);
21 // { name: "John", age: 32, city: "NYC" }
22 };
23
24 return (
25 <button onClick={() => handleExtract("John is 32 from NYC")}>
26 Extract
27 </button>
28 );
29}

Error Handling

ChatWithErrors.tsx
01function ChatWithErrors() {
02 const { messages, error, handleSubmit, reload } = useChat({
03 endpoint: "/api/chat",
04 onError: (error) => {
05 console.error("Chat error:", error);
06 // Show toast, log to analytics, etc.
07 },
08 });
09
10 if (error) {
11 return (
12 <div className="p-4 bg-red-100 text-red-800 rounded">
13 <p>Something went wrong: {error.message}</p>
14 <button onClick={reload}>Try Again</button>
15 </div>
16 );
17 }
18
19 return (
20 // ... chat UI
21 );
22}

Persistence

Save and restore chat history:

PersistentChat.tsx
01function PersistentChat() {
02 const { messages, setMessages, ...chat } = useChat({
03 endpoint: "/api/chat",
04 id: "my-chat", // Unique ID for this chat
05 });
06
07 // Save to localStorage
08 useEffect(() => {
09 localStorage.setItem("chat-history", JSON.stringify(messages));
10 }, [messages]);
11
12 // Restore on mount
13 useEffect(() => {
14 const saved = localStorage.getItem("chat-history");
15 if (saved) {
16 setMessages(JSON.parse(saved));
17 }
18 }, []);
19
20 return (
21 // ... chat UI
22 );
23}

With React Context

GerbilProvider.tsx
01// GerbilProvider.tsx
02import { createContext, useContext } from "react";
03import { useGerbil } from "@tryhamster/gerbil/react";
04
05const GerbilContext = createContext<ReturnType<typeof useGerbil> | null>(null);
06
07export function GerbilProvider({ children }: { children: React.ReactNode }) {
08 const gerbil = useGerbil({
09 endpoint: "/api/ai",
10 });
11
12 return (
13 <GerbilContext.Provider value={gerbil}>
14 {children}
15 </GerbilContext.Provider>
16 );
17}
18
19export function useGerbilContext() {
20 const context = useContext(GerbilContext);
21 if (!context) {
22 throw new Error("useGerbilContext must be used within GerbilProvider");
23 }
24 return context;
25}
26
27// Usage
28function MyComponent() {
29 const { generate, isLoading } = useGerbilContext();
30 // ...
31}

useSpeech (Text-to-Speech)

Generate natural speech with Kokoro-82M (28 voices):

TextToSpeech.tsx
01import { useSpeech } from "@tryhamster/gerbil/browser";
02
03function TextToSpeech() {
04 const {
05 speak, // (text: string, opts?) => Promise<void>
06 stop, // () => void
07 isSpeaking, // boolean
08 isLoading, // boolean - model loading
09 isReady, // boolean - model ready
10 error, // string | null
11 listVoices, // () => VoiceInfo[]
12 currentVoice, // string
13 setVoice, // (id: string) => void
14 currentSpeed, // number
15 setSpeed, // (speed: number) => void
16 } = useSpeech({
17 voice: "af_heart", // Default voice
18 speed: 1.0, // Speed multiplier (0.5-2.0)
19 autoLoad: false, // Load on first speak()
20 });
21
22 if (isLoading) return <div>Loading TTS model...</div>;
23
24 return (
25 <div>
26 <select onChange={(e) => setVoice(e.target.value)} value={currentVoice}>
27 {listVoices().map((v) => (
28 <option key={v.id} value={v.id}>
29 {v.name} ({v.language})
30 </option>
31 ))}
32 </select>
33
34 <button onClick={() => speak("Hello! I'm Gerbil.")}>
35 {isSpeaking ? "Speaking..." : "Speak"}
36 </button>
37
38 {isSpeaking && <button onClick={stop}>Stop</button>}
39 </div>
40 );
41}

Available Voices

Kokoro provides 28 voices. Top picks:

voices.ts
// American Female (Recommended)
"af_heart" // Highest quality, warm tone
"af_bella" // Warm and friendly
"af_nicole" // Soft and gentle
// American Male
"am_fenrir" // Best male quality
"am_michael" // Warm and friendly
// British Female
"bf_emma" // Elegant and clear
// British Male
"bm_george" // Distinguished

useVoiceInput (Speech-to-Text)

Record and transcribe voice with Whisper:

VoiceInput.tsx
01import { useVoiceInput } from "@tryhamster/gerbil/browser";
02
03function VoiceInput() {
04 const {
05 startRecording, // () => Promise<void>
06 stopRecording, // () => Promise<string>
07 cancelRecording, // () => void
08 transcribe, // (audio: Float32Array) => Promise<string>
09 isRecording, // boolean
10 isTranscribing, // boolean
11 isLoading, // boolean - model loading
12 isReady, // boolean - model ready
13 transcript, // string - latest result
14 error, // string | null
15 } = useVoiceInput({
16 model: "whisper-tiny.en", // STT model
17 autoLoad: false, // Load on first record
18 onTranscript: (text) => console.log("User said:", text),
19 });
20
21 return (
22 <div>
23 <button onClick={isRecording ? stopRecording : startRecording}>
24 {isRecording ? "🔴 Stop" : "🎤 Record"}
25 </button>
26
27 {isTranscribing && <span>Transcribing...</span>}
28 {transcript && <p>You said: {transcript}</p>}
29 {error && <p className="text-red-500">{error}</p>}
30 </div>
31 );
32}

STT Models

stt-models.ts
// Available models
"whisper-tiny.en" // 39MB - Fastest
"whisper-tiny" // 39MB - Multilingual
"whisper-base.en" // 74MB - Balanced
"whisper-base" // 74MB - Multilingual
"whisper-small.en" // 244MB - High quality
"whisper-large-v3-turbo" // 809MB - Best quality, 80+ langs

useVoiceChat (Full Voice Conversation)

Complete voice-to-voice loop: STT → LLM → TTS

VoiceAssistant.tsx
01import { useVoiceChat } from "@tryhamster/gerbil/browser";
02
03function VoiceAssistant() {
04 const {
05 messages, // Chat history
06 startListening, // () => Promise<void>
07 stopListening, // () => Promise<void>
08 isListening, // boolean
09 isSpeaking, // boolean
10 stage, // "idle" | "listening" | "transcribing" | "thinking" | "speaking"
11 isReady, // boolean - all models ready
12 error, // string | null
13 } = useVoiceChat({
14 llmModel: "qwen3-0.6b",
15 sttModel: "whisper-tiny.en",
16 voice: "af_bella",
17 system: "You are a helpful voice assistant. Keep responses brief.",
18 });
19
20 return (
21 <div>
22 {/* Conversation history */}
23 {messages.map((m) => (
24 <div key={m.id}>
25 <strong>{m.role}:</strong> {m.content}
26 </div>
27 ))}
28
29 {/* Push-to-talk button */}
30 <button
31 onMouseDown={startListening}
32 onMouseUp={stopListening}
33 onTouchStart={startListening}
34 onTouchEnd={stopListening}
35 >
36 {stage === "idle" && "🎤 Hold to Speak"}
37 {stage === "listening" && "🔴 Listening..."}
38 {stage === "transcribing" && "📝 Transcribing..."}
39 {stage === "thinking" && "🤔 Thinking..."}
40 {stage === "speaking" && "🔊 Speaking..."}
41 </button>
42 </div>
43 );
44}

useCompletion (Browser-Only)

Run models directly in the browser with WebGPU:

BrowserAI.tsx
01import { useCompletion } from "@tryhamster/gerbil/browser";
02
03function BrowserAI() {
04 const {
05 complete, // (prompt: string) => Promise<void>
06 completion, // string - generated text
07 thinking, // string - reasoning (if enabled)
08 isLoading, // boolean - model loading
09 loadingProgress, // { status, progress, file }
10 isGenerating, // boolean - generating
11 tps, // number - tokens per second
12 error, // string | null
13 stop, // () => void
14 } = useCompletion({
15 model: "qwen3-0.6b",
16 thinking: true,
17 maxTokens: 256,
18 });
19
20 return (
21 <div>
22 {isLoading && (
23 <div>Loading model... {loadingProgress?.progress}%</div>
24 )}
25
26 <button onClick={() => complete("Explain React in one sentence")}>
27 Generate
28 </button>
29
30 {thinking && (
31 <div className="text-gray-500">
32 <strong>Thinking:</strong> {thinking}
33 </div>
34 )}
35
36 <div>{completion}</div>
37
38 {isGenerating && (
39 <div>
40 <span>{tps.toFixed(1)} tok/s</span>
41 <button onClick={stop}>Stop</button>
42 </div>
43 )}
44 </div>
45 );
46}

Vision with useCompletion

VisionDemo.tsx
01import { useCompletion } from "@tryhamster/gerbil/browser";
02
03function VisionDemo() {
04 const [image, setImage] = useState<string | null>(null);
05 const { complete, completion, isLoading } = useCompletion({
06 model: "ministral-3b", // Vision-capable model
07 });
08
09 const handleAnalyze = async () => {
10 if (!image) return;
11 await complete("What's in this image?", {
12 images: [{ source: image }],
13 });
14 };
15
16 return (
17 <div>
18 <input
19 type="file"
20 accept="image/*"
21 onChange={(e) => {
22 const file = e.target.files?.[0];
23 if (file) {
24 const reader = new FileReader();
25 reader.onload = () => setImage(reader.result as string);
26 reader.readAsDataURL(file);
27 }
28 }}
29 />
30
31 {image && <img src={image} alt="Preview" className="max-w-xs" />}
32
33 <button onClick={handleAnalyze} disabled={!image || isLoading}>
34 Analyze Image
35 </button>
36
37 {completion && <p>{completion}</p>}
38 </div>
39 );
40}