Melhora chat com audio anexado e auto-scroll
All checks were successful
CI/CD Web + Desktop / Detect changes (push) Successful in 8s
CI/CD Web + Desktop / Deploy (VPS Linux) (push) Successful in 5m33s
Quality Checks / Lint, Test and Build (push) Successful in 6m14s
CI/CD Web + Desktop / Deploy Convex functions (push) Successful in 1m48s

This commit is contained in:
rever-tecnologia 2025-12-19 15:12:50 -03:00
parent 43017e6fef
commit 6efbbd49e7
12 changed files with 1954 additions and 228 deletions

View file

@ -394,6 +394,7 @@ pub struct UploadResult {
// Extensoes permitidas
const ALLOWED_EXTENSIONS: &[&str] = &[
".jpg", ".jpeg", ".png", ".gif", ".webp",
".mp3", ".wav", ".ogg", ".webm", ".m4a",
".pdf", ".txt", ".doc", ".docx", ".xls", ".xlsx",
];
@ -434,6 +435,11 @@ pub fn get_mime_type(file_name: &str) -> String {
"png" => "image/png",
"gif" => "image/gif",
"webp" => "image/webp",
"mp3" => "audio/mpeg",
"wav" => "audio/wav",
"ogg" => "audio/ogg",
"webm" => "audio/webm",
"m4a" => "audio/mp4",
"pdf" => "application/pdf",
"txt" => "text/plain",
"doc" => "application/msword",

View file

@ -12,10 +12,11 @@ import { useCallback, useEffect, useMemo, useRef, useState } from "react"
import { open as openDialog } from "@tauri-apps/plugin-dialog"
import { openUrl as openExternal } from "@tauri-apps/plugin-opener"
import { invoke } from "@tauri-apps/api/core"
import { Send, X, Loader2, MessageCircle, Paperclip, FileText, Image as ImageIcon, File, User, ChevronUp, Minimize2, Eye, Download, Check, MessagesSquare } from "lucide-react"
import { Send, X, Loader2, MessageCircle, Paperclip, FileText, Image as ImageIcon, File, User, ChevronUp, Minimize2, Eye, Download, Check, MessagesSquare, Mic, Square, Trash2, Play, Pause } from "lucide-react"
import type { Id } from "@convex/_generated/dataModel"
import { useMachineMessages, useMachineSessions, usePostMachineMessage, useMarkMachineMessagesRead, type MachineMessage } from "./useConvexMachineQueries"
import { useMachineMessages, useMachineSessions, usePostMachineMessage, useMarkMachineMessagesRead, useGenerateMachineUploadUrl, type MachineMessage } from "./useConvexMachineQueries"
import { useConvexMachine } from "./ConvexMachineProvider"
import { useAudioRecorder } from "./useAudioRecorder"
const MAX_MESSAGES_IN_MEMORY = 200
const MARK_READ_BATCH_SIZE = 50
@ -23,14 +24,19 @@ const SCROLL_BOTTOM_THRESHOLD_PX = 120
const ALLOWED_EXTENSIONS = [
"jpg", "jpeg", "png", "gif", "webp",
"mp3", "wav", "ogg", "webm", "m4a",
"pdf", "txt", "doc", "docx", "xls", "xlsx",
]
const MAX_AUDIO_BYTES = 5 * 1024 * 1024
const MAX_AUDIO_DURATION_SECONDS = 300
interface UploadedAttachment {
storageId: string
name: string
size?: number
type?: string
previewUrl?: string
}
interface ChatAttachment {
@ -57,6 +63,11 @@ function isImageAttachment(attachment: ChatAttachment) {
return ["jpg", "jpeg", "png", "gif", "webp"].includes(ext)
}
function isAudioAttachment(attachment: ChatAttachment) {
if (attachment.type?.startsWith("audio/")) return true
return /\.(mp3|wav|ogg|webm|m4a)$/i.test(attachment.name)
}
function formatAttachmentSize(size?: number) {
if (!size) return null
if (size < 1024) return `${size}B`
@ -65,6 +76,13 @@ function formatAttachmentSize(size?: number) {
return `${(kb / 1024).toFixed(1)}MB`
}
function formatDuration(seconds: number) {
const safe = Math.max(0, Math.floor(seconds))
const mins = Math.floor(safe / 60)
const secs = safe % 60
return `${String(mins).padStart(2, "0")}:${String(secs).padStart(2, "0")}`
}
function getUnreadAgentMessageIds(messages: MachineMessage[], unreadCount: number): string[] {
if (unreadCount <= 0 || messages.length === 0) return []
const ids: string[] = []
@ -86,6 +104,217 @@ function chunkArray<T>(items: T[], size: number): T[][] {
return result
}
function extractPeaks(buffer: AudioBuffer, bars = 48) {
const channel = buffer.getChannelData(0)
const blockSize = Math.max(1, Math.floor(channel.length / bars))
const peaks = new Array(bars).fill(0)
for (let i = 0; i < bars; i += 1) {
let max = 0
const start = i * blockSize
const end = Math.min(start + blockSize, channel.length)
for (let j = start; j < end; j += 1) {
const value = Math.abs(channel[j])
if (value > max) max = value
}
peaks[i] = max
}
const maxPeak = Math.max(...peaks, 0.001)
return peaks.map((value) => value / maxPeak)
}
function AudioWaveform({
peaks,
progress,
isAgent,
}: {
peaks: number[]
progress: number
isAgent: boolean
}) {
const playedBars = Math.round(progress * peaks.length)
return (
<div className="flex h-8 items-center gap-[2px]">
{peaks.map((value, index) => {
const height = Math.max(4, Math.round(value * 24))
const played = index <= playedBars
const color = played
? isAgent
? "bg-emerald-300"
: "bg-emerald-500"
: isAgent
? "bg-white/30"
: "bg-slate-300"
return (
<span
key={index}
className={`w-[2px] rounded-full ${color}`}
style={{ height }}
/>
)
})}
</div>
)
}
function AudioAttachmentPlayer({
url,
name,
size,
isAgent,
}: {
url: string
name: string
size?: number
isAgent: boolean
}) {
const audioRef = useRef<HTMLAudioElement | null>(null)
const [isPlaying, setIsPlaying] = useState(false)
const [duration, setDuration] = useState(0)
const [currentTime, setCurrentTime] = useState(0)
const [peaks, setPeaks] = useState<number[]>([])
const [isLoadingWaveform, setIsLoadingWaveform] = useState(true)
const [downloading, setDownloading] = useState(false)
const [downloaded, setDownloaded] = useState(false)
useEffect(() => {
let cancelled = false
setIsLoadingWaveform(true)
setPeaks([])
const loadWaveform = async () => {
try {
if (typeof AudioContext === "undefined") {
return
}
const response = await fetch(url)
const buffer = await response.arrayBuffer()
const audioContext = new AudioContext()
const decoded = await audioContext.decodeAudioData(buffer)
await audioContext.close()
if (!cancelled) {
setPeaks(extractPeaks(decoded))
}
} catch (error) {
console.error("Falha ao gerar waveform:", error)
} finally {
if (!cancelled) {
setIsLoadingWaveform(false)
}
}
}
loadWaveform()
return () => {
cancelled = true
}
}, [url])
const handleToggle = async () => {
if (!audioRef.current) return
if (isPlaying) {
audioRef.current.pause()
return
}
try {
await audioRef.current.play()
} catch (error) {
console.error("Falha ao tocar audio:", error)
}
}
const handleDownload = async () => {
if (downloading) return
setDownloading(true)
try {
const response = await fetch(url)
const blob = await response.blob()
const downloadUrl = URL.createObjectURL(blob)
const anchor = document.createElement("a")
anchor.href = downloadUrl
anchor.download = name
document.body.appendChild(anchor)
anchor.click()
document.body.removeChild(anchor)
URL.revokeObjectURL(downloadUrl)
setDownloaded(true)
setTimeout(() => setDownloaded(false), 2000)
} catch (error) {
console.error("Falha ao baixar audio:", error)
} finally {
setDownloading(false)
}
}
const progress = duration > 0 ? currentTime / duration : 0
const sizeLabel = formatAttachmentSize(size)
return (
<div
className={`flex items-center gap-3 rounded-2xl border px-3 py-2 ${
isAgent ? "border-white/10 bg-white/10 text-white" : "border-slate-200 bg-white text-slate-900"
}`}
>
<button
type="button"
onClick={handleToggle}
className={`flex size-9 items-center justify-center rounded-full ${
isAgent ? "bg-white/10 text-white hover:bg-white/20" : "bg-slate-100 text-slate-700 hover:bg-slate-200"
}`}
aria-label={isPlaying ? "Pausar áudio" : "Reproduzir áudio"}
>
{isPlaying ? <Pause className="size-4" /> : <Play className="size-4" />}
</button>
<div className="flex-1">
{isLoadingWaveform ? (
<div className={`h-8 rounded-lg ${isAgent ? "bg-white/10" : "bg-slate-100"}`} />
) : peaks.length > 0 ? (
<AudioWaveform peaks={peaks} progress={progress} isAgent={isAgent} />
) : (
<div className={`h-8 rounded-lg ${isAgent ? "bg-white/10" : "bg-slate-100"}`} />
)}
<div className={`mt-1 flex items-center justify-between text-[10px] ${isAgent ? "text-white/60" : "text-slate-400"}`}>
<span>{formatDuration(currentTime)}</span>
<span className="truncate">{sizeLabel ?? formatDuration(duration)}</span>
</div>
</div>
<button
type="button"
onClick={handleDownload}
className={`flex size-8 items-center justify-center rounded-md ${
isAgent ? "text-white/70 hover:text-white" : "text-slate-500 hover:text-slate-700"
}`}
aria-label="Baixar áudio"
>
{downloading ? (
<Loader2 className="size-4 animate-spin" />
) : downloaded ? (
<Check className="size-4 text-emerald-400" />
) : (
<Download className="size-4" />
)}
</button>
<audio
ref={audioRef}
src={url}
preload="metadata"
onLoadedMetadata={(event) => {
const target = event.currentTarget
setDuration(Number.isFinite(target.duration) ? target.duration : 0)
}}
onTimeUpdate={(event) => {
setCurrentTime(event.currentTarget.currentTime)
}}
onPlay={() => setIsPlaying(true)}
onPause={() => setIsPlaying(false)}
onEnded={() => setIsPlaying(false)}
className="hidden"
/>
</div>
)
}
function MessageAttachment({
attachment,
isAgent,
@ -154,6 +383,7 @@ function MessageAttachment({
const sizeLabel = formatAttachmentSize(attachment.size)
const isImage = isImageAttachment(attachment)
const isAudio = isAudioAttachment(attachment)
if (loading) {
return (
@ -164,6 +394,17 @@ function MessageAttachment({
)
}
if (isAudio && url) {
return (
<AudioAttachmentPlayer
url={url}
name={attachment.name}
size={attachment.size}
isAgent={isAgent}
/>
)
}
if (isImage && url) {
return (
<div className={`group relative overflow-hidden rounded-lg border ${isAgent ? "border-white/10" : "border-slate-200"}`}>
@ -260,6 +501,7 @@ export function ChatWidget({ ticketId, ticketRef }: ChatWidgetProps) {
{ limit: MAX_MESSAGES_IN_MEMORY }
)
const postMessage = usePostMachineMessage()
const generateUploadUrl = useGenerateMachineUploadUrl()
const markMessagesRead = useMarkMachineMessagesRead()
// Limitar mensagens em memoria
@ -279,6 +521,7 @@ export function ChatWidget({ ticketId, ticketRef }: ChatWidgetProps) {
>(null)
const autoReadInFlightRef = useRef(false)
const lastAutoReadCountRef = useRef<number | null>(null)
const hasInitialScrollRef = useRef(false)
const unreadAgentMessageIds = useMemo(() => getUnreadAgentMessageIds(messages, unreadCount), [messages, unreadCount])
const firstUnreadAgentMessageId = unreadAgentMessageIds[0] ?? null
@ -290,14 +533,67 @@ export function ChatWidget({ ticketId, ticketRef }: ChatWidgetProps) {
}, 0)
}, [machineSessions, ticketId])
const queueAudioAttachment = useCallback(async (file: File) => {
const fileType = file.type || "audio/webm"
if (file.size > MAX_AUDIO_BYTES) {
alert("Áudio excede o limite de 5MB.")
return
}
const { uploadUrl } = await generateUploadUrl({
fileName: file.name,
fileType,
fileSize: file.size,
})
const uploadResponse = await fetch(uploadUrl, {
method: "POST",
headers: { "Content-Type": fileType },
body: file,
})
if (!uploadResponse.ok) {
const text = await uploadResponse.text().catch(() => "")
throw new Error(text || "Falha ao enviar áudio")
}
const { storageId } = (await uploadResponse.json()) as { storageId: string }
const previewUrl = URL.createObjectURL(file)
setPendingAttachments((prev) => [
...prev,
{
storageId: storageId as Id<"_storage">,
name: file.name,
size: file.size,
type: fileType,
previewUrl,
},
])
}, [generateUploadUrl])
const audioRecorder = useAudioRecorder({
maxDurationSeconds: MAX_AUDIO_DURATION_SECONDS,
maxFileSizeBytes: MAX_AUDIO_BYTES,
onAudioReady: async ({ file }) => {
await queueAudioAttachment(file)
},
onError: (message) => {
alert(message)
},
})
const isAudioBusy = audioRecorder.isRecording || audioRecorder.isProcessing
const handleOpenHub = useCallback(async () => {
try {
await invoke("open_hub_window")
await invoke("set_hub_minimized", { minimized: false })
await invoke("close_chat_window", { ticketId })
} catch (err) {
console.error("Erro ao abrir hub:", err)
}
}, [])
}, [ticketId])
const updateIsAtBottom = useCallback(() => {
const el = messagesContainerRef.current
@ -416,6 +712,14 @@ export function ChatWidget({ ticketId, ticketRef }: ChatWidgetProps) {
prevMessagesLengthRef.current = messages.length
}, [messages.length])
// Scroll inicial ao carregar as mensagens
useEffect(() => {
if (hasInitialScrollRef.current) return
if (isMinimized || messages.length === 0) return
pendingScrollActionRef.current = { type: "bottom", behavior: "auto", markRead: unreadCount > 0 }
hasInitialScrollRef.current = true
}, [isMinimized, messages.length, unreadCount])
// Executar scroll pendente
useEffect(() => {
if (isMinimized) return
@ -514,7 +818,13 @@ export function ChatWidget({ ticketId, ticketRef }: ChatWidgetProps) {
// Remover anexo pendente
const handleRemoveAttachment = (storageId: string) => {
setPendingAttachments(prev => prev.filter(a => a.storageId !== storageId))
setPendingAttachments((prev) => {
const removed = prev.find((a) => a.storageId === storageId)
if (removed?.previewUrl) {
URL.revokeObjectURL(removed.previewUrl)
}
return prev.filter((a) => a.storageId !== storageId)
})
}
// Enviar mensagem
@ -538,6 +848,11 @@ export function ChatWidget({ ticketId, ticketRef }: ChatWidgetProps) {
type: a.type,
})) : undefined,
})
attachmentsToSend.forEach((attachment) => {
if (attachment.previewUrl) {
URL.revokeObjectURL(attachment.previewUrl)
}
})
pendingScrollActionRef.current = { type: "bottom", behavior: "smooth", markRead: false }
} catch (err) {
console.error("Erro ao enviar mensagem:", err)
@ -563,11 +878,7 @@ export function ChatWidget({ ticketId, ticketRef }: ChatWidgetProps) {
}
const handleExpand = async () => {
if (firstUnreadAgentMessageId) {
pendingScrollActionRef.current = { type: "message", messageId: firstUnreadAgentMessageId, behavior: "auto", markRead: unreadCount > 0 }
} else {
pendingScrollActionRef.current = { type: "bottom", behavior: "auto", markRead: false }
}
pendingScrollActionRef.current = { type: "bottom", behavior: "auto", markRead: unreadCount > 0 }
setIsMinimized(false)
try {
@ -830,22 +1141,85 @@ export function ChatWidget({ ticketId, ticketRef }: ChatWidgetProps) {
{/* Anexos pendentes */}
{pendingAttachments.length > 0 && (
<div className="mb-2 flex flex-wrap gap-2">
{pendingAttachments.map((att) => (
<div
key={att.storageId}
className="flex items-center gap-1 rounded-lg bg-slate-100 px-2 py-1 text-xs"
>
{getFileIcon(att.name)}
<span className="max-w-[100px] truncate">{att.name}</span>
<button
onClick={() => handleRemoveAttachment(att.storageId)}
className="ml-1 rounded p-0.5 text-slate-400 hover:bg-slate-200 hover:text-slate-600"
aria-label={`Remover anexo ${att.name}`}
{pendingAttachments.map((att) => {
const isAudioPending = isAudioAttachment(att) && att.previewUrl
if (isAudioPending) {
return (
<div
key={att.storageId}
className="flex items-center gap-2 rounded-lg bg-slate-100 px-2 py-1 text-xs"
>
<audio controls src={att.previewUrl} className="h-7 w-40" />
<button
onClick={() => handleRemoveAttachment(att.storageId)}
className="flex size-6 items-center justify-center rounded-full text-slate-500 hover:bg-slate-200"
aria-label="Remover áudio"
>
<X className="size-3" />
</button>
</div>
)
}
return (
<div
key={att.storageId}
className="flex items-center gap-1 rounded-lg bg-slate-100 px-2 py-1 text-xs"
>
<X className="size-3" />
{getFileIcon(att.name)}
<span className="max-w-[100px] truncate">{att.name}</span>
<button
onClick={() => handleRemoveAttachment(att.storageId)}
className="ml-1 rounded p-0.5 text-slate-400 hover:bg-slate-200 hover:text-slate-600"
aria-label={`Remover anexo ${att.name}`}
>
<X className="size-3" />
</button>
</div>
)
})}
</div>
)}
{(audioRecorder.isRecording || audioRecorder.isProcessing) && (
<div
className={`mb-2 flex items-center justify-between gap-3 rounded-lg border px-3 py-2 text-xs ${
audioRecorder.isRecording
? "border-rose-200 bg-rose-50 text-rose-700"
: "border-slate-200 bg-slate-50 text-slate-600"
}`}
>
{audioRecorder.isRecording ? (
<>
<div className="flex flex-1 items-center gap-3">
<span className="size-2 rounded-full bg-rose-500 animate-pulse" />
<span className="font-semibold tabular-nums">
{formatDuration(audioRecorder.durationSeconds)} / {formatDuration(MAX_AUDIO_DURATION_SECONDS)}
</span>
<div className="flex h-6 flex-1 items-center gap-[2px]">
{audioRecorder.levels.map((level, index) => (
<span
key={index}
className="w-[2px] rounded-full bg-rose-400"
style={{ height: `${Math.max(4, Math.round(level * 22))}px` }}
/>
))}
</div>
</div>
<button
type="button"
onClick={audioRecorder.cancelRecording}
className="flex size-8 items-center justify-center rounded-md text-rose-600 hover:bg-rose-100"
aria-label="Descartar áudio"
>
<Trash2 className="size-4" />
</button>
</>
) : (
<div className="flex items-center gap-2">
<Loader2 className="size-3.5 animate-spin" />
Anexando áudio...
</div>
))}
)}
</div>
)}
<div className="flex items-end gap-2">
@ -856,10 +1230,25 @@ export function ChatWidget({ ticketId, ticketRef }: ChatWidgetProps) {
placeholder="Digite sua mensagem..."
className="max-h-24 min-h-[36px] flex-1 resize-none rounded-lg border border-slate-200 px-3 py-2 text-sm focus:border-slate-400 focus:outline-none focus:ring-1 focus:ring-slate-400"
rows={1}
disabled={isSending || isAudioBusy}
/>
<button
onClick={audioRecorder.isRecording ? audioRecorder.stopRecording : audioRecorder.startRecording}
disabled={isUploading || isSending || audioRecorder.isProcessing}
className="flex size-9 items-center justify-center rounded-lg text-slate-500 transition hover:bg-slate-100 hover:text-slate-700 disabled:opacity-50"
aria-label={audioRecorder.isRecording ? "Parar gravação de áudio" : "Gravar áudio"}
>
{audioRecorder.isProcessing ? (
<Loader2 className="size-4 animate-spin" />
) : audioRecorder.isRecording ? (
<Square className="size-4" />
) : (
<Mic className="size-4" />
)}
</button>
<button
onClick={handleAttach}
disabled={isUploading || isSending}
disabled={isUploading || isSending || isAudioBusy}
className="flex size-9 items-center justify-center rounded-lg text-slate-500 transition hover:bg-slate-100 hover:text-slate-700 disabled:opacity-50"
aria-label="Anexar arquivo"
>
@ -871,7 +1260,7 @@ export function ChatWidget({ ticketId, ticketRef }: ChatWidgetProps) {
</button>
<button
onClick={handleSend}
disabled={(!inputValue.trim() && pendingAttachments.length === 0) || isSending}
disabled={(!inputValue.trim() && pendingAttachments.length === 0) || isSending || isAudioBusy}
className="flex size-9 items-center justify-center rounded-lg bg-black text-white transition hover:bg-black/90 disabled:opacity-50"
aria-label="Enviar mensagem"
>

View file

@ -0,0 +1,41 @@
const AUDIO_MIME_CANDIDATES = [
"audio/webm;codecs=opus",
"audio/webm",
"audio/ogg;codecs=opus",
"audio/ogg",
"audio/mp4",
"audio/mpeg",
"audio/wav",
]
const AUDIO_MIME_EXTENSION_MAP: Record<string, string> = {
"audio/webm": "webm",
"audio/ogg": "ogg",
"audio/mp4": "m4a",
"audio/mpeg": "mp3",
"audio/wav": "wav",
}
export function normalizeMimeType(mimeType: string) {
return mimeType.split(";")[0].trim().toLowerCase()
}
export function pickSupportedMimeType(isTypeSupported?: (mimeType: string) => boolean) {
const checker = isTypeSupported ?? (
typeof MediaRecorder === "undefined" ? undefined : MediaRecorder.isTypeSupported.bind(MediaRecorder)
)
if (!checker) return ""
for (const candidate of AUDIO_MIME_CANDIDATES) {
if (checker(candidate)) return candidate
}
return ""
}
export function buildAudioFileName(mimeType: string, now: Date = new Date()) {
const normalized = normalizeMimeType(mimeType)
const ext = AUDIO_MIME_EXTENSION_MAP[normalized] ?? "webm"
const timestamp = now.toISOString().replace(/[:.]/g, "-")
return `audio-${timestamp}.${ext}`
}

View file

@ -0,0 +1,253 @@
"use client"
import { useCallback, useEffect, useRef, useState } from "react"
import { buildAudioFileName, pickSupportedMimeType } from "./audio-recorder-utils"
type AudioRecorderPayload = {
file: File
durationSeconds: number
}
type AudioRecorderOptions = {
onAudioReady: (payload: AudioRecorderPayload) => Promise<void>
onError?: (message: string) => void
maxDurationSeconds?: number
maxFileSizeBytes?: number
audioBitsPerSecond?: number
levelBars?: number
}
type AudioRecorderState = {
isRecording: boolean
isProcessing: boolean
durationSeconds: number
levels: number[]
startRecording: () => Promise<void>
stopRecording: () => void
cancelRecording: () => void
}
export function useAudioRecorder(options: AudioRecorderOptions): AudioRecorderState {
const {
onAudioReady,
onError,
maxDurationSeconds = 300,
maxFileSizeBytes = 5 * 1024 * 1024,
audioBitsPerSecond = 64000,
levelBars = 32,
} = options
const [isRecording, setIsRecording] = useState(false)
const [isProcessing, setIsProcessing] = useState(false)
const [durationSeconds, setDurationSeconds] = useState(0)
const [levels, setLevels] = useState<number[]>(() => Array.from({ length: levelBars }, () => 0))
const durationRef = useRef(0)
const recorderRef = useRef<MediaRecorder | null>(null)
const streamRef = useRef<MediaStream | null>(null)
const audioContextRef = useRef<AudioContext | null>(null)
const analyserRef = useRef<AnalyserNode | null>(null)
const chunksRef = useRef<BlobPart[]>([])
const timerRef = useRef<number | null>(null)
const stopTimeoutRef = useRef<number | null>(null)
const rafRef = useRef<number | null>(null)
const cancelRef = useRef(false)
const mountedRef = useRef(true)
useEffect(() => {
return () => {
mountedRef.current = false
}
}, [])
const cleanup = useCallback(() => {
if (timerRef.current) {
clearInterval(timerRef.current)
timerRef.current = null
}
if (stopTimeoutRef.current) {
clearTimeout(stopTimeoutRef.current)
stopTimeoutRef.current = null
}
if (rafRef.current) {
cancelAnimationFrame(rafRef.current)
rafRef.current = null
}
if (streamRef.current) {
streamRef.current.getTracks().forEach((track) => track.stop())
streamRef.current = null
}
if (audioContextRef.current) {
void audioContextRef.current.close()
audioContextRef.current = null
}
analyserRef.current = null
recorderRef.current = null
chunksRef.current = []
}, [])
const updateLevels = useCallback(() => {
const analyser = analyserRef.current
if (!analyser) return
const bufferLength = analyser.fftSize
const dataArray = new Uint8Array(bufferLength)
analyser.getByteTimeDomainData(dataArray)
const step = Math.floor(bufferLength / levelBars)
const nextLevels = Array.from({ length: levelBars }, (_, index) => {
let sum = 0
const start = index * step
const end = Math.min(start + step, bufferLength)
for (let i = start; i < end; i += 1) {
sum += Math.abs(dataArray[i] - 128)
}
const avg = sum / Math.max(1, end - start)
return Math.min(1, avg / 128)
})
if (mountedRef.current) {
setLevels(nextLevels)
rafRef.current = requestAnimationFrame(updateLevels)
}
}, [levelBars])
const stopRecording = useCallback(() => {
if (!recorderRef.current || !isRecording) return
setIsRecording(false)
try {
recorderRef.current.stop()
} catch (error) {
console.error("Falha ao parar gravação:", error)
cleanup()
}
}, [cleanup, isRecording])
const cancelRecording = useCallback(() => {
cancelRef.current = true
stopRecording()
}, [stopRecording])
const startRecording = useCallback(async () => {
if (isRecording || isProcessing) return
if (typeof navigator === "undefined" || !navigator.mediaDevices?.getUserMedia) {
onError?.("Gravação de áudio indisponível neste dispositivo.")
return
}
try {
const stream = await navigator.mediaDevices.getUserMedia({ audio: true })
streamRef.current = stream
const audioContext = new AudioContext()
const analyser = audioContext.createAnalyser()
analyser.fftSize = 256
const source = audioContext.createMediaStreamSource(stream)
source.connect(analyser)
audioContextRef.current = audioContext
analyserRef.current = analyser
const mimeType = pickSupportedMimeType()
const recorderOptions: MediaRecorderOptions = mimeType
? { mimeType, audioBitsPerSecond }
: { audioBitsPerSecond }
const recorder = new MediaRecorder(stream, recorderOptions)
recorderRef.current = recorder
chunksRef.current = []
cancelRef.current = false
recorder.ondataavailable = (event) => {
if (event.data.size > 0) {
chunksRef.current.push(event.data)
}
}
recorder.onstop = async () => {
const blobType = recorder.mimeType || mimeType || "audio/webm"
const blob = new Blob(chunksRef.current, { type: blobType })
chunksRef.current = []
cleanup()
if (cancelRef.current) {
if (mountedRef.current) {
setLevels(Array.from({ length: levelBars }, () => 0))
}
return
}
if (blob.size > maxFileSizeBytes) {
onError?.("Áudio excede o limite de 5MB. Tente gravar por menos tempo.")
if (mountedRef.current) {
setLevels(Array.from({ length: levelBars }, () => 0))
}
return
}
const fileName = buildAudioFileName(blobType)
const file = new File([blob], fileName, { type: blobType })
setIsProcessing(true)
try {
await onAudioReady({ file, durationSeconds: durationRef.current })
} catch (error) {
const message = error instanceof Error ? error.message : "Falha ao enviar áudio."
onError?.(message)
} finally {
if (mountedRef.current) {
setIsProcessing(false)
setLevels(Array.from({ length: levelBars }, () => 0))
}
}
}
recorder.start()
durationRef.current = 0
setDurationSeconds(0)
setIsRecording(true)
updateLevels()
timerRef.current = window.setInterval(() => {
setDurationSeconds((prev) => {
const next = prev + 1
durationRef.current = next
if (next >= maxDurationSeconds) {
stopRecording()
return next
}
return next
})
}, 1000)
stopTimeoutRef.current = window.setTimeout(() => {
stopRecording()
}, maxDurationSeconds * 1000)
} catch (error) {
console.error("Falha ao iniciar gravação:", error)
onError?.("Não foi possível iniciar a gravação de áudio.")
cleanup()
}
}, [
audioBitsPerSecond,
cleanup,
isProcessing,
isRecording,
levelBars,
maxDurationSeconds,
maxFileSizeBytes,
onAudioReady,
onError,
stopRecording,
updateLevels,
])
return {
isRecording,
isProcessing,
durationSeconds,
levels,
startRecording,
stopRecording,
cancelRecording,
}
}

View file

@ -1037,6 +1037,13 @@ const ALLOWED_MIME_TYPES = [
"image/png",
"image/gif",
"image/webp",
// Audio
"audio/webm",
"audio/ogg",
"audio/wav",
"audio/mpeg",
"audio/mp4",
"audio/x-m4a",
// Documentos
"application/pdf",
"text/plain",
@ -1048,6 +1055,7 @@ const ALLOWED_MIME_TYPES = [
const ALLOWED_EXTENSIONS = [
".jpg", ".jpeg", ".png", ".gif", ".webp",
".webm", ".ogg", ".wav", ".mp3", ".m4a",
".pdf", ".txt", ".doc", ".docx", ".xls", ".xlsx",
]
@ -1101,7 +1109,8 @@ export const generateMachineUploadUrl = action({
throw new ConvexError(`Tipo de arquivo não permitido. Permitidos: ${ALLOWED_EXTENSIONS.join(", ")}`)
}
if (!ALLOWED_MIME_TYPES.includes(args.fileType)) {
const normalizedType = args.fileType.split(";")[0].trim().toLowerCase()
if (!ALLOWED_MIME_TYPES.includes(normalizedType)) {
throw new ConvexError("Tipo MIME não permitido")
}

View file

@ -0,0 +1,41 @@
const AUDIO_MIME_CANDIDATES = [
"audio/webm;codecs=opus",
"audio/webm",
"audio/ogg;codecs=opus",
"audio/ogg",
"audio/mp4",
"audio/mpeg",
"audio/wav",
]
const AUDIO_MIME_EXTENSION_MAP: Record<string, string> = {
"audio/webm": "webm",
"audio/ogg": "ogg",
"audio/mp4": "m4a",
"audio/mpeg": "mp3",
"audio/wav": "wav",
}
export function normalizeMimeType(mimeType: string) {
return mimeType.split(";")[0].trim().toLowerCase()
}
export function pickSupportedMimeType(isTypeSupported?: (mimeType: string) => boolean) {
const checker = isTypeSupported ?? (
typeof MediaRecorder === "undefined" ? undefined : MediaRecorder.isTypeSupported.bind(MediaRecorder)
)
if (!checker) return ""
for (const candidate of AUDIO_MIME_CANDIDATES) {
if (checker(candidate)) return candidate
}
return ""
}
export function buildAudioFileName(mimeType: string, now: Date = new Date()) {
const normalized = normalizeMimeType(mimeType)
const ext = AUDIO_MIME_EXTENSION_MAP[normalized] ?? "webm"
const timestamp = now.toISOString().replace(/[:.]/g, "-")
return `audio-${timestamp}.${ext}`
}

View file

@ -0,0 +1,47 @@
type AttachmentLike = {
name: string
type?: string | null
}
export function formatAttachmentSize(size?: number | null) {
if (!size) return null
if (size < 1024) return `${size}B`
const kb = size / 1024
if (kb < 1024) return `${Math.round(kb)}KB`
return `${(kb / 1024).toFixed(1)}MB`
}
export function formatDuration(seconds: number) {
const safe = Math.max(0, Math.floor(seconds))
const mins = Math.floor(safe / 60)
const secs = safe % 60
return `${String(mins).padStart(2, "0")}:${String(secs).padStart(2, "0")}`
}
export function isAudioAttachment(attachment: AttachmentLike) {
if (attachment.type?.startsWith("audio/")) return true
return /\.(mp3|wav|ogg|webm|m4a)$/i.test(attachment.name)
}
export function isImageAttachment(attachment: AttachmentLike) {
if (attachment.type?.startsWith("image/")) return true
return /\.(png|jpe?g|gif|webp)$/i.test(attachment.name)
}
export function extractPeaks(buffer: AudioBuffer, bars = 48) {
const channel = buffer.getChannelData(0)
const blockSize = Math.max(1, Math.floor(channel.length / bars))
const peaks = new Array(bars).fill(0)
for (let i = 0; i < bars; i += 1) {
let max = 0
const start = i * blockSize
const end = Math.min(start + blockSize, channel.length)
for (let j = start; j < end; j += 1) {
const value = Math.abs(channel[j])
if (value > max) max = value
}
peaks[i] = max
}
const maxPeak = Math.max(...peaks, 0.001)
return peaks.map((value) => value / maxPeak)
}

View file

@ -0,0 +1,403 @@
"use client"
import { useAction } from "convex/react"
import { useEffect, useRef, useState } from "react"
import type { Id } from "@/convex/_generated/dataModel"
import { api } from "@/convex/_generated/api"
import { cn } from "@/lib/utils"
import { Button } from "@/components/ui/button"
import { Spinner } from "@/components/ui/spinner"
import { toast } from "sonner"
import { Check, Download, Eye, FileText, Image as ImageIcon, Pause, Play } from "lucide-react"
import {
extractPeaks,
formatAttachmentSize,
formatDuration,
isAudioAttachment,
isImageAttachment,
} from "./chat-attachment-utils"
type ChatAttachment = {
storageId: Id<"_storage">
name: string
size: number | null
type: string | null
}
type AttachmentTone = "light" | "dark"
type ChatMessageAttachmentProps = {
attachment: ChatAttachment
tone?: AttachmentTone
}
function AudioWaveform({
peaks,
progress,
tone,
}: {
peaks: number[]
progress: number
tone: AttachmentTone
}) {
const playedBars = Math.round(progress * peaks.length)
return (
<div className="flex h-9 items-center gap-[2px]">
{peaks.map((value, index) => {
const height = Math.max(4, Math.round(value * 28))
const played = index <= playedBars
const color = played
? tone === "dark"
? "bg-emerald-300"
: "bg-emerald-500"
: tone === "dark"
? "bg-white/30"
: "bg-slate-300"
return (
<span
key={index}
className={cn("w-[2px] rounded-full transition-colors", color)}
style={{ height }}
/>
)
})}
</div>
)
}
function AudioAttachmentPlayer({
url,
name,
size,
tone,
}: {
url: string
name: string
size?: number | null
tone: AttachmentTone
}) {
const audioRef = useRef<HTMLAudioElement | null>(null)
const [isPlaying, setIsPlaying] = useState(false)
const [duration, setDuration] = useState(0)
const [currentTime, setCurrentTime] = useState(0)
const [peaks, setPeaks] = useState<number[]>([])
const [isLoadingWaveform, setIsLoadingWaveform] = useState(true)
const [downloading, setDownloading] = useState(false)
const [downloaded, setDownloaded] = useState(false)
useEffect(() => {
setIsLoadingWaveform(true)
setPeaks([])
let cancelled = false
const loadWaveform = async () => {
try {
if (typeof AudioContext === "undefined") {
return
}
const response = await fetch(url)
const buffer = await response.arrayBuffer()
const audioContext = new AudioContext()
const decoded = await audioContext.decodeAudioData(buffer)
await audioContext.close()
if (!cancelled) {
setPeaks(extractPeaks(decoded))
}
} catch (error) {
console.error("Falha ao gerar waveform:", error)
} finally {
if (!cancelled) {
setIsLoadingWaveform(false)
}
}
}
loadWaveform()
return () => {
cancelled = true
}
}, [url])
const handleToggle = async () => {
if (!audioRef.current) return
if (isPlaying) {
audioRef.current.pause()
return
}
try {
await audioRef.current.play()
} catch (error) {
console.error("Falha ao tocar audio:", error)
}
}
const handleDownload = async () => {
if (downloading) return
setDownloading(true)
try {
const response = await fetch(url)
const blob = await response.blob()
const downloadUrl = URL.createObjectURL(blob)
const anchor = document.createElement("a")
anchor.href = downloadUrl
anchor.download = name
document.body.appendChild(anchor)
anchor.click()
document.body.removeChild(anchor)
URL.revokeObjectURL(downloadUrl)
setDownloaded(true)
setTimeout(() => setDownloaded(false), 2000)
} catch (error) {
toast.error("Erro ao baixar áudio")
} finally {
setDownloading(false)
}
}
const progress = duration > 0 ? currentTime / duration : 0
const sizeLabel = formatAttachmentSize(size)
return (
<div
className={cn(
"flex items-center gap-3 rounded-2xl border px-3 py-2",
tone === "dark"
? "border-white/10 bg-white/10 text-white"
: "border-slate-200 bg-white text-slate-900"
)}
>
<Button
type="button"
variant={tone === "dark" ? "secondary" : "outline"}
size="icon"
className={cn(
"size-9 rounded-full",
tone === "dark" && "border-white/20 bg-white/10 text-white hover:bg-white/20"
)}
onClick={handleToggle}
aria-label={isPlaying ? "Pausar áudio" : "Reproduzir áudio"}
>
{isPlaying ? <Pause className="size-4" /> : <Play className="size-4" />}
</Button>
<div className="flex-1">
{isLoadingWaveform ? (
<div className={cn("h-9 rounded-lg", tone === "dark" ? "bg-white/10" : "bg-slate-100")} />
) : peaks.length > 0 ? (
<AudioWaveform peaks={peaks} progress={progress} tone={tone} />
) : (
<div className={cn("h-9 rounded-lg", tone === "dark" ? "bg-white/10" : "bg-slate-100")} />
)}
<div
className={cn(
"mt-1 flex items-center justify-between text-[10px]",
tone === "dark" ? "text-white/60" : "text-slate-400"
)}
>
<span>{formatDuration(currentTime)}</span>
<span className="truncate">{sizeLabel ?? formatDuration(duration)}</span>
</div>
</div>
<Button
type="button"
variant="ghost"
size="icon"
className={cn(
"size-8",
tone === "dark" ? "text-white/70 hover:text-white" : "text-slate-500 hover:text-slate-700"
)}
onClick={handleDownload}
aria-label="Baixar áudio"
>
{downloading ? (
<Spinner className="size-4" />
) : downloaded ? (
<Check className="size-4 text-emerald-400" />
) : (
<Download className="size-4" />
)}
</Button>
<audio
ref={audioRef}
src={url}
preload="metadata"
onLoadedMetadata={(event) => {
const target = event.currentTarget
setDuration(Number.isFinite(target.duration) ? target.duration : 0)
}}
onTimeUpdate={(event) => {
setCurrentTime(event.currentTarget.currentTime)
}}
onPlay={() => setIsPlaying(true)}
onPause={() => setIsPlaying(false)}
onEnded={() => setIsPlaying(false)}
className="hidden"
/>
</div>
)
}
export function ChatMessageAttachment({ attachment, tone = "light" }: ChatMessageAttachmentProps) {
const getFileUrl = useAction(api.files.getUrl)
const [url, setUrl] = useState<string | null>(null)
const [loading, setLoading] = useState(true)
const [downloading, setDownloading] = useState(false)
const [downloaded, setDownloaded] = useState(false)
const sizeLabel = formatAttachmentSize(attachment.size)
const isAudio = isAudioAttachment(attachment)
const isImage = isImageAttachment(attachment)
useEffect(() => {
let cancelled = false
async function loadUrl() {
try {
const fileUrl = await getFileUrl({ storageId: attachment.storageId })
if (!cancelled && fileUrl) {
setUrl(fileUrl)
}
} catch (error) {
console.error("Erro ao carregar anexo:", error)
} finally {
if (!cancelled) setLoading(false)
}
}
loadUrl()
return () => { cancelled = true }
}, [attachment.storageId, getFileUrl])
const handleView = () => {
if (!url) return
window.open(url, "_blank", "noopener,noreferrer")
}
const handleDownload = async () => {
if (!url || downloading) return
setDownloading(true)
try {
const response = await fetch(url)
const blob = await response.blob()
const downloadUrl = URL.createObjectURL(blob)
const anchor = document.createElement("a")
anchor.href = downloadUrl
anchor.download = attachment.name
document.body.appendChild(anchor)
anchor.click()
document.body.removeChild(anchor)
URL.revokeObjectURL(downloadUrl)
setDownloaded(true)
setTimeout(() => setDownloaded(false), 2000)
} catch (error) {
toast.error("Erro ao baixar arquivo")
} finally {
setDownloading(false)
}
}
if (loading) {
return (
<div className={cn(
"flex size-12 items-center justify-center rounded-lg",
tone === "dark" ? "bg-white/10" : "border border-slate-200 bg-slate-50"
)}>
<Spinner className={cn("size-4", tone === "dark" ? "text-white/70" : "text-slate-400")} />
</div>
)
}
if (isAudio && url) {
return (
<AudioAttachmentPlayer
url={url}
name={attachment.name}
size={attachment.size}
tone={tone}
/>
)
}
if (isImage && url) {
return (
<div className={cn(
"group relative overflow-hidden rounded-lg border",
tone === "dark" ? "border-white/10" : "border-slate-200"
)}>
{/* eslint-disable-next-line @next/next/no-img-element */}
<img
src={url}
alt={attachment.name}
className="size-16 cursor-pointer object-cover"
onClick={handleView}
/>
<div className="absolute inset-0 flex items-center justify-center gap-1 bg-black/50 opacity-0 transition-opacity group-hover:opacity-100">
<button
onClick={handleView}
className="flex size-6 items-center justify-center rounded-full bg-white/20 hover:bg-white/30"
aria-label="Visualizar anexo"
>
<Eye className="size-3.5 text-white" />
</button>
<button
onClick={handleDownload}
disabled={downloading}
className="flex size-6 items-center justify-center rounded-full bg-white/20 hover:bg-white/30"
aria-label="Baixar anexo"
>
{downloading ? (
<Spinner className="size-3 text-white" />
) : downloaded ? (
<Check className="size-3.5 text-emerald-400" />
) : (
<Download className="size-3.5 text-white" />
)}
</button>
</div>
</div>
)
}
return (
<div className={cn(
"flex items-center gap-1.5 rounded-lg border px-2 py-1.5 text-xs",
tone === "dark"
? "border-white/10 bg-white/10 text-white"
: "border-slate-200 bg-slate-50 text-slate-900"
)}>
{attachment.type?.startsWith("image/") ? (
<ImageIcon className="size-4 text-slate-500" />
) : (
<FileText className={cn("size-4", tone === "dark" ? "text-white/70" : "text-slate-500")} />
)}
<span className={cn("max-w-[120px] truncate", tone === "dark" ? "text-white/90" : "text-slate-700")}>
{attachment.name}
</span>
{sizeLabel && (
<span className={cn("text-[10px]", tone === "dark" ? "text-white/60" : "text-slate-400")}>
({sizeLabel})
</span>
)}
<button
onClick={handleView}
className={cn("rounded p-0.5", tone === "dark" ? "hover:bg-white/10" : "hover:bg-slate-200")}
aria-label="Visualizar anexo"
>
<Eye className={cn("size-3", tone === "dark" ? "text-white/70" : "text-slate-400")} />
</button>
<button
onClick={handleDownload}
disabled={downloading}
className={cn("rounded p-0.5", tone === "dark" ? "hover:bg-white/10" : "hover:bg-slate-200")}
aria-label="Baixar anexo"
>
{downloading ? (
<Spinner className={cn("size-3", tone === "dark" ? "text-white/70" : "text-slate-400")} />
) : downloaded ? (
<Check className="size-3 text-emerald-400" />
) : (
<Download className={cn("size-3", tone === "dark" ? "text-white/70" : "text-slate-400")} />
)}
</button>
</div>
)
}

View file

@ -1,6 +1,5 @@
"use client"
import Image from "next/image"
import { useCallback, useEffect, useRef, useState } from "react"
import { useAction, useMutation, useQuery } from "convex/react"
import type { Id } from "@/convex/_generated/dataModel"
@ -22,16 +21,19 @@ import {
XCircle,
Paperclip,
FileText,
Image as ImageIcon,
Download,
Mic,
Square,
Trash2,
ExternalLink,
Eye,
Check,
} from "lucide-react"
import { ChatSessionList } from "./chat-session-list"
import { ChatMessageAttachment } from "./chat-message-attachment"
import { useAudioRecorder } from "./use-audio-recorder"
const MAX_MESSAGE_LENGTH = 4000
const MAX_ATTACHMENT_SIZE = 5 * 1024 * 1024 // 5MB
const MAX_AUDIO_BYTES = 5 * 1024 * 1024 // 5MB
const MAX_AUDIO_DURATION_SECONDS = 300
const MAX_ATTACHMENTS = 5
const STORAGE_KEY = "chat-widget-state"
@ -51,6 +53,13 @@ function formatTime(timestamp: number) {
})
}
function formatDuration(seconds: number) {
const safe = Math.max(0, Math.floor(seconds))
const mins = Math.floor(safe / 60)
const secs = safe % 60
return `${String(mins).padStart(2, "0")}:${String(secs).padStart(2, "0")}`
}
function formatDateSeparator(timestamp: number) {
const date = new Date(timestamp)
const today = new Date()
@ -141,131 +150,6 @@ type ChatData = {
}>
}
// Componente de preview de anexo na mensagem
function MessageAttachment({ attachment }: { attachment: ChatAttachment }) {
const getFileUrl = useAction(api.files.getUrl)
const [url, setUrl] = useState<string | null>(null)
const [loading, setLoading] = useState(true)
useEffect(() => {
let cancelled = false
async function loadUrl() {
try {
const fileUrl = await getFileUrl({ storageId: attachment.storageId })
if (!cancelled && fileUrl) {
setUrl(fileUrl)
}
} catch (error) {
console.error("Erro ao carregar anexo:", error)
} finally {
if (!cancelled) setLoading(false)
}
}
loadUrl()
return () => { cancelled = true }
}, [attachment.storageId, getFileUrl])
const isImage = attachment.type?.startsWith("image/")
const [downloading, setDownloading] = useState(false)
const [downloaded, setDownloaded] = useState(false)
const handleView = () => {
if (!url) return
window.open(url, "_blank", "noopener,noreferrer")
}
const handleDownload = async () => {
if (!url || downloading) return
setDownloading(true)
try {
const response = await fetch(url)
const blob = await response.blob()
const downloadUrl = URL.createObjectURL(blob)
const a = document.createElement("a")
a.href = downloadUrl
a.download = attachment.name
document.body.appendChild(a)
a.click()
document.body.removeChild(a)
URL.revokeObjectURL(downloadUrl)
setDownloaded(true)
setTimeout(() => setDownloaded(false), 2000)
} catch (error) {
toast.error("Erro ao baixar arquivo")
} finally {
setDownloading(false)
}
}
if (loading) {
return (
<div className="flex size-12 items-center justify-center rounded-lg border border-slate-200 bg-slate-50">
<Spinner className="size-4 text-slate-400" />
</div>
)
}
if (isImage && url) {
return (
<div className="group relative overflow-hidden rounded-lg border border-slate-200">
{/* eslint-disable-next-line @next/next/no-img-element */}
<img
src={url}
alt={attachment.name}
className="size-16 cursor-pointer object-cover"
onClick={handleView}
/>
<div className="absolute inset-0 flex items-center justify-center gap-1 bg-black/50 opacity-0 transition-opacity group-hover:opacity-100">
<button
onClick={handleView}
className="flex size-6 items-center justify-center rounded-full bg-white/20 hover:bg-white/30"
>
<Eye className="size-3.5 text-white" />
</button>
<button
onClick={handleDownload}
disabled={downloading}
className="flex size-6 items-center justify-center rounded-full bg-white/20 hover:bg-white/30"
>
{downloading ? (
<Spinner className="size-3 text-white" />
) : downloaded ? (
<Check className="size-3.5 text-emerald-400" />
) : (
<Download className="size-3.5 text-white" />
)}
</button>
</div>
</div>
)
}
return (
<div className="flex items-center gap-1.5 rounded-lg border border-slate-200 bg-slate-50 px-2 py-1.5 text-xs">
<FileText className="size-4 text-slate-500" />
<span className="max-w-[80px] truncate text-slate-700">{attachment.name}</span>
<button
onClick={handleView}
className="rounded p-0.5 hover:bg-slate-200"
>
<Eye className="size-3 text-slate-400" />
</button>
<button
onClick={handleDownload}
disabled={downloading}
className="rounded p-0.5 hover:bg-slate-200"
>
{downloading ? (
<Spinner className="size-3 text-slate-400" />
) : downloaded ? (
<Check className="size-3 text-emerald-500" />
) : (
<Download className="size-3 text-slate-400" />
)}
</button>
</div>
)
}
export function ChatWidget() {
// Detectar se esta rodando no Tauri (desktop) - nesse caso, nao renderizar
@ -353,11 +237,60 @@ export function ChatWidget() {
const endLiveChat = useMutation(api.liveChat.endSession)
const generateUploadUrl = useAction(api.files.generateUploadUrl)
const queueAudioAttachment = useCallback(async (file: File) => {
if (!viewerId || !activeTicketId) return
if (attachments.length >= MAX_ATTACHMENTS) {
throw new Error("Limite de anexos atingido. Remova um anexo para gravar outro áudio.")
}
if (file.size > MAX_AUDIO_BYTES) {
throw new Error("Áudio excede o limite de 5MB.")
}
const uploadUrl = await generateUploadUrl()
const fileType = file.type || "audio/webm"
const uploadResponse = await fetch(uploadUrl, {
method: "POST",
headers: { "Content-Type": fileType },
body: file,
})
if (!uploadResponse.ok) {
const text = await uploadResponse.text().catch(() => "")
throw new Error(text || "Erro ao enviar áudio")
}
const { storageId } = (await uploadResponse.json()) as { storageId: string }
const previewUrl = URL.createObjectURL(file)
setAttachments((prev) => [
...prev,
{
storageId,
name: file.name,
size: file.size,
type: fileType,
previewUrl,
},
])
}, [activeTicketId, attachments.length, generateUploadUrl, viewerId])
const audioRecorder = useAudioRecorder({
maxDurationSeconds: MAX_AUDIO_DURATION_SECONDS,
maxFileSizeBytes: MAX_AUDIO_BYTES,
onAudioReady: async ({ file }) => {
await queueAudioAttachment(file)
},
onError: (message) => {
toast.error(message)
},
})
const messages = chat?.messages ?? []
const lastMessageId = messages.length > 0 ? messages[messages.length - 1]?.id : null
const totalUnread = activeSessions?.reduce((sum, s) => sum + s.unreadCount, 0) ?? 0
const liveChat = chat?.liveChat
const machineOnline = liveChat?.machineOnline ?? false
const machineHostname = liveChat?.machineHostname
const isAudioBusy = audioRecorder.isRecording || audioRecorder.isProcessing
// Sincronizar estado entre abas usando evento storage do localStorage
// O evento storage dispara automaticamente em TODAS as outras abas quando localStorage muda
@ -447,12 +380,14 @@ export function ChatWidget() {
setActiveTicketId(mine.ticketId)
}, [activeSessions, viewerId])
// Scroll para última mensagem
// Scroll para última mensagem ao abrir ou trocar de sessão
useEffect(() => {
if (messagesEndRef.current && isOpen && !isMinimized) {
messagesEndRef.current.scrollIntoView({ behavior: "smooth" })
}
}, [messages.length, isOpen, isMinimized])
if (!isOpen || isMinimized || viewMode !== "chat") return
if (!messagesEndRef.current) return
requestAnimationFrame(() => {
messagesEndRef.current?.scrollIntoView({ behavior: "smooth" })
})
}, [activeTicketId, isMinimized, isOpen, lastMessageId, viewMode])
// Ref para rastrear se ja marcamos como lidas nesta abertura do chat
const hasMarkedReadRef = useRef<boolean>(false)
@ -859,9 +794,13 @@ export function ChatWidget() {
{shouldShowBody && <p className="whitespace-pre-wrap text-sm">{msg.body}</p>}
{/* Anexos da mensagem */}
{msg.attachments && msg.attachments.length > 0 && (
<div className={cn("mt-2 flex flex-wrap gap-1.5", isOwn && "justify-end")}>
{msg.attachments.map((att, i) => (
<MessageAttachment key={i} attachment={att} />
<div className={cn("mt-2 flex flex-wrap gap-2", isOwn && "justify-end")}>
{msg.attachments.map((att) => (
<ChatMessageAttachment
key={att.storageId}
attachment={att}
tone={isOwn ? "dark" : "light"}
/>
))}
</div>
)}
@ -881,31 +820,53 @@ export function ChatWidget() {
{/* Preview de anexos pendentes */}
{attachments.length > 0 && (
<div className="flex flex-wrap gap-2 border-t border-slate-100 bg-slate-50 p-2">
{attachments.map((file, index) => (
<div key={index} className="group relative">
{file.type?.startsWith("image/") && file.previewUrl ? (
/* eslint-disable-next-line @next/next/no-img-element */
<img
src={file.previewUrl}
alt={file.name}
className="size-14 rounded-lg border border-slate-200 object-cover"
/>
) : (
<div className="flex size-14 items-center justify-center rounded-lg border border-slate-200 bg-white">
<FileText className="size-5 text-slate-400" />
</div>
)}
{attachments.map((file, index) => {
const isAudio = file.type?.startsWith("audio/") || /\.(mp3|wav|ogg|webm|m4a)$/i.test(file.name)
if (isAudio && file.previewUrl) {
return (
<div
key={index}
className="flex items-center gap-2 rounded-lg border border-slate-200 bg-white px-2 py-1"
>
<audio controls src={file.previewUrl} className="h-8 w-40" />
<button
onClick={() => removeAttachment(index)}
className="absolute -right-1.5 -top-1.5 flex size-5 items-center justify-center rounded-full bg-red-500 text-white opacity-0 transition-opacity group-hover:opacity-100"
className="flex size-6 items-center justify-center rounded-full text-slate-500 hover:bg-slate-100"
aria-label="Remover áudio"
>
<X className="size-3" />
</button>
<p className="mt-0.5 max-w-[56px] truncate text-center text-[10px] text-slate-500">
{file.name}
</p>
</div>
))}
)
}
return (
<div key={index} className="group relative">
{file.type?.startsWith("image/") && file.previewUrl ? (
/* eslint-disable-next-line @next/next/no-img-element */
<img
src={file.previewUrl}
alt={file.name}
className="size-14 rounded-lg border border-slate-200 object-cover"
/>
) : (
<div className="flex size-14 items-center justify-center rounded-lg border border-slate-200 bg-white">
<FileText className="size-5 text-slate-400" />
</div>
)}
<button
onClick={() => removeAttachment(index)}
className="absolute -right-1.5 -top-1.5 flex size-5 items-center justify-center rounded-full bg-red-500 text-white opacity-0 transition-opacity group-hover:opacity-100"
aria-label="Remover anexo"
>
<X className="size-3" />
</button>
<p className="mt-0.5 max-w-[56px] truncate text-center text-[10px] text-slate-500">
{file.name}
</p>
</div>
)
})}
{isUploading && (
<div className="flex size-14 items-center justify-center rounded-lg border border-dashed border-slate-300 bg-slate-50">
<Spinner className="size-5 text-slate-400" />
@ -929,6 +890,52 @@ export function ChatWidget() {
</div>
)}
{(audioRecorder.isRecording || audioRecorder.isProcessing) && (
<div
className={cn(
"mb-2 flex items-center justify-between gap-3 rounded-lg border px-3 py-2 text-xs",
audioRecorder.isRecording
? "border-rose-200 bg-rose-50 text-rose-700"
: "border-slate-200 bg-slate-50 text-slate-600"
)}
>
{audioRecorder.isRecording ? (
<>
<div className="flex flex-1 items-center gap-3">
<span className="size-2 rounded-full bg-rose-500 animate-pulse" />
<span className="font-semibold tabular-nums">
{formatDuration(audioRecorder.durationSeconds)} / {formatDuration(MAX_AUDIO_DURATION_SECONDS)}
</span>
<div className="flex h-6 flex-1 items-center gap-[2px]">
{audioRecorder.levels.map((level, index) => (
<span
key={index}
className="w-[2px] rounded-full bg-rose-400"
style={{ height: `${Math.max(4, Math.round(level * 22))}px` }}
/>
))}
</div>
</div>
<Button
type="button"
variant="ghost"
size="icon"
onClick={audioRecorder.cancelRecording}
className="size-8 text-rose-600 hover:bg-rose-100"
aria-label="Descartar áudio"
>
<Trash2 className="size-4" />
</Button>
</>
) : (
<div className="flex items-center gap-2">
<Spinner className="size-3.5" />
Anexando áudio...
</div>
)}
</div>
)}
<div className="flex items-end gap-2">
<textarea
ref={inputRef}
@ -938,8 +945,26 @@ export function ChatWidget() {
placeholder="Digite sua mensagem..."
className="max-h-20 min-h-[36px] flex-1 resize-none rounded-lg border border-slate-200 px-3 py-2 text-sm focus:border-slate-400 focus:outline-none focus:ring-1 focus:ring-slate-400"
rows={1}
disabled={isSending}
disabled={isSending || isAudioBusy}
/>
{/* Botão de áudio */}
<Button
type="button"
variant="ghost"
size="icon"
className="size-9 text-slate-500 hover:bg-slate-100 hover:text-slate-700"
onClick={audioRecorder.isRecording ? audioRecorder.stopRecording : audioRecorder.startRecording}
disabled={isSending || isUploading || audioRecorder.isProcessing}
aria-label={audioRecorder.isRecording ? "Parar gravação de áudio" : "Gravar áudio"}
>
{audioRecorder.isProcessing ? (
<Spinner className="size-4" />
) : audioRecorder.isRecording ? (
<Square className="size-4" />
) : (
<Mic className="size-4" />
)}
</Button>
{/* Botão de anexar */}
<Button
type="button"
@ -947,7 +972,7 @@ export function ChatWidget() {
size="icon"
className="size-9 text-slate-500 hover:bg-slate-100 hover:text-slate-700"
onClick={() => fileInputRef.current?.click()}
disabled={attachments.length >= MAX_ATTACHMENTS || isUploading}
disabled={attachments.length >= MAX_ATTACHMENTS || isUploading || isAudioBusy}
>
{isUploading ? (
<Spinner className="size-4" />
@ -959,7 +984,7 @@ export function ChatWidget() {
<Button
type="button"
onClick={handleSend}
disabled={(!draft.trim() && attachments.length === 0) || isSending}
disabled={(!draft.trim() && attachments.length === 0) || isSending || isAudioBusy}
className="size-9 bg-black text-white hover:bg-black/90"
size="icon"
>

View file

@ -0,0 +1,253 @@
"use client"
import { useCallback, useEffect, useRef, useState } from "react"
import { buildAudioFileName, pickSupportedMimeType } from "./audio-recorder-utils"
type AudioRecorderPayload = {
file: File
durationSeconds: number
}
type AudioRecorderOptions = {
onAudioReady: (payload: AudioRecorderPayload) => Promise<void>
onError?: (message: string) => void
maxDurationSeconds?: number
maxFileSizeBytes?: number
audioBitsPerSecond?: number
levelBars?: number
}
type AudioRecorderState = {
isRecording: boolean
isProcessing: boolean
durationSeconds: number
levels: number[]
startRecording: () => Promise<void>
stopRecording: () => void
cancelRecording: () => void
}
export function useAudioRecorder(options: AudioRecorderOptions): AudioRecorderState {
const {
onAudioReady,
onError,
maxDurationSeconds = 300,
maxFileSizeBytes = 5 * 1024 * 1024,
audioBitsPerSecond = 64000,
levelBars = 32,
} = options
const [isRecording, setIsRecording] = useState(false)
const [isProcessing, setIsProcessing] = useState(false)
const [durationSeconds, setDurationSeconds] = useState(0)
const [levels, setLevels] = useState<number[]>(() => Array.from({ length: levelBars }, () => 0))
const durationRef = useRef(0)
const recorderRef = useRef<MediaRecorder | null>(null)
const streamRef = useRef<MediaStream | null>(null)
const audioContextRef = useRef<AudioContext | null>(null)
const analyserRef = useRef<AnalyserNode | null>(null)
const chunksRef = useRef<BlobPart[]>([])
const timerRef = useRef<number | null>(null)
const stopTimeoutRef = useRef<number | null>(null)
const rafRef = useRef<number | null>(null)
const cancelRef = useRef(false)
const mountedRef = useRef(true)
useEffect(() => {
return () => {
mountedRef.current = false
}
}, [])
const cleanup = useCallback(() => {
if (timerRef.current) {
clearInterval(timerRef.current)
timerRef.current = null
}
if (stopTimeoutRef.current) {
clearTimeout(stopTimeoutRef.current)
stopTimeoutRef.current = null
}
if (rafRef.current) {
cancelAnimationFrame(rafRef.current)
rafRef.current = null
}
if (streamRef.current) {
streamRef.current.getTracks().forEach((track) => track.stop())
streamRef.current = null
}
if (audioContextRef.current) {
void audioContextRef.current.close()
audioContextRef.current = null
}
analyserRef.current = null
recorderRef.current = null
chunksRef.current = []
}, [])
const updateLevels = useCallback(() => {
const analyser = analyserRef.current
if (!analyser) return
const bufferLength = analyser.fftSize
const dataArray = new Uint8Array(bufferLength)
analyser.getByteTimeDomainData(dataArray)
const step = Math.floor(bufferLength / levelBars)
const nextLevels = Array.from({ length: levelBars }, (_, index) => {
let sum = 0
const start = index * step
const end = Math.min(start + step, bufferLength)
for (let i = start; i < end; i += 1) {
sum += Math.abs(dataArray[i] - 128)
}
const avg = sum / Math.max(1, end - start)
return Math.min(1, avg / 128)
})
if (mountedRef.current) {
setLevels(nextLevels)
rafRef.current = requestAnimationFrame(updateLevels)
}
}, [levelBars])
const stopRecording = useCallback(() => {
if (!recorderRef.current || !isRecording) return
setIsRecording(false)
try {
recorderRef.current.stop()
} catch (error) {
console.error("Falha ao parar gravação:", error)
cleanup()
}
}, [cleanup, isRecording])
const cancelRecording = useCallback(() => {
cancelRef.current = true
stopRecording()
}, [stopRecording])
const startRecording = useCallback(async () => {
if (isRecording || isProcessing) return
if (typeof navigator === "undefined" || !navigator.mediaDevices?.getUserMedia) {
onError?.("Gravação de áudio indisponível neste dispositivo.")
return
}
try {
const stream = await navigator.mediaDevices.getUserMedia({ audio: true })
streamRef.current = stream
const audioContext = new AudioContext()
const analyser = audioContext.createAnalyser()
analyser.fftSize = 256
const source = audioContext.createMediaStreamSource(stream)
source.connect(analyser)
audioContextRef.current = audioContext
analyserRef.current = analyser
const mimeType = pickSupportedMimeType()
const recorderOptions: MediaRecorderOptions = mimeType
? { mimeType, audioBitsPerSecond }
: { audioBitsPerSecond }
const recorder = new MediaRecorder(stream, recorderOptions)
recorderRef.current = recorder
chunksRef.current = []
cancelRef.current = false
recorder.ondataavailable = (event) => {
if (event.data.size > 0) {
chunksRef.current.push(event.data)
}
}
recorder.onstop = async () => {
const blobType = recorder.mimeType || mimeType || "audio/webm"
const blob = new Blob(chunksRef.current, { type: blobType })
chunksRef.current = []
cleanup()
if (cancelRef.current) {
if (mountedRef.current) {
setLevels(Array.from({ length: levelBars }, () => 0))
}
return
}
if (blob.size > maxFileSizeBytes) {
onError?.("Áudio excede o limite de 5MB. Tente gravar por menos tempo.")
if (mountedRef.current) {
setLevels(Array.from({ length: levelBars }, () => 0))
}
return
}
const fileName = buildAudioFileName(blobType)
const file = new File([blob], fileName, { type: blobType })
setIsProcessing(true)
try {
await onAudioReady({ file, durationSeconds: durationRef.current })
} catch (error) {
const message = error instanceof Error ? error.message : "Falha ao enviar áudio."
onError?.(message)
} finally {
if (mountedRef.current) {
setIsProcessing(false)
setLevels(Array.from({ length: levelBars }, () => 0))
}
}
}
recorder.start()
durationRef.current = 0
setDurationSeconds(0)
setIsRecording(true)
updateLevels()
timerRef.current = window.setInterval(() => {
setDurationSeconds((prev) => {
const next = prev + 1
durationRef.current = next
if (next >= maxDurationSeconds) {
stopRecording()
return next
}
return next
})
}, 1000)
stopTimeoutRef.current = window.setTimeout(() => {
stopRecording()
}, maxDurationSeconds * 1000)
} catch (error) {
console.error("Falha ao iniciar gravação:", error)
onError?.("Não foi possível iniciar a gravação de áudio.")
cleanup()
}
}, [
audioBitsPerSecond,
cleanup,
isProcessing,
isRecording,
levelBars,
maxDurationSeconds,
maxFileSizeBytes,
onAudioReady,
onError,
stopRecording,
updateLevels,
])
return {
isRecording,
isProcessing,
durationSeconds,
levels,
startRecording,
stopRecording,
cancelRecording,
}
}

View file

@ -1,7 +1,7 @@
"use client"
import { useEffect, useMemo, useRef, useState } from "react"
import { useMutation, useQuery } from "convex/react"
import { useEffect, useRef, useState } from "react"
import { useAction, useMutation, useQuery } from "convex/react"
import type { Id } from "@/convex/_generated/dataModel"
import { api } from "@/convex/_generated/api"
import { useAuth } from "@/lib/auth-client"
@ -12,9 +12,13 @@ import { cn } from "@/lib/utils"
import { toast } from "sonner"
import { formatDistanceToNowStrict } from "date-fns"
import { ptBR } from "date-fns/locale"
import { MessageCircle, Send, WifiOff, X, User, Headphones } from "lucide-react"
import { MessageCircle, Send, WifiOff, X, User, Headphones, Mic, Square, Trash2 } from "lucide-react"
import { ChatMessageAttachment } from "@/components/chat/chat-message-attachment"
import { useAudioRecorder } from "@/components/chat/use-audio-recorder"
const MAX_MESSAGE_LENGTH = 4000
const MAX_AUDIO_BYTES = 5 * 1024 * 1024
const MAX_AUDIO_DURATION_SECONDS = 300
function formatRelative(timestamp: number) {
try {
@ -31,10 +35,25 @@ function formatTime(timestamp: number) {
})
}
function formatDuration(seconds: number) {
const safe = Math.max(0, Math.floor(seconds))
const mins = Math.floor(safe / 60)
const secs = safe % 60
return `${String(mins).padStart(2, "0")}:${String(secs).padStart(2, "0")}`
}
type TicketChatPanelProps = {
ticketId: string
}
type PendingAudio = {
storageId: Id<"_storage">
name: string
size: number
type: string
previewUrl: string
}
export function TicketChatPanel({ ticketId }: TicketChatPanelProps) {
const { convexUserId } = useAuth()
const viewerId = convexUserId ?? null
@ -79,6 +98,7 @@ export function TicketChatPanel({ ticketId }: TicketChatPanelProps) {
const postChatMessage = useMutation(api.tickets.postChatMessage)
const startLiveChat = useMutation(api.liveChat.startSession)
const endLiveChat = useMutation(api.liveChat.endSession)
const generateUploadUrl = useAction(api.files.generateUploadUrl)
const messagesEndRef = useRef<HTMLDivElement | null>(null)
const inputRef = useRef<HTMLTextAreaElement | null>(null)
@ -88,8 +108,10 @@ export function TicketChatPanel({ ticketId }: TicketChatPanelProps) {
const [isSending, setIsSending] = useState(false)
const [isStartingChat, setIsStartingChat] = useState(false)
const [isEndingChat, setIsEndingChat] = useState(false)
const [pendingAudio, setPendingAudio] = useState<PendingAudio | null>(null)
const messages = chat?.messages ?? []
const lastMessageId = messages.length > 0 ? messages[messages.length - 1]?.id : null
const canPost = Boolean(chat?.canPost && viewerId)
const chatEnabled = Boolean(chat?.chatEnabled)
const liveChat = chat?.liveChat
@ -144,16 +166,58 @@ export function TicketChatPanel({ ticketId }: TicketChatPanelProps) {
}, [markChatRead, chat, ticketId, viewerId, isVisible])
useEffect(() => {
if (messagesEndRef.current) {
messagesEndRef.current.scrollIntoView({ behavior: "smooth" })
}
}, [messages.length])
if (!messagesEndRef.current) return
requestAnimationFrame(() => {
messagesEndRef.current?.scrollIntoView({ behavior: "smooth" })
})
}, [lastMessageId, ticketId])
const disabledReason = useMemo(() => {
if (!chatEnabled) return "Chat desativado para este ticket"
if (!canPost) return "Voce nao tem permissao para enviar mensagens"
return null
}, [canPost, chatEnabled])
const queueAudioAttachment = async (file: File) => {
if (file.size > MAX_AUDIO_BYTES) {
throw new Error("Áudio excede o limite de 5MB.")
}
const uploadUrl = await generateUploadUrl()
const fileType = file.type || "audio/webm"
const uploadResponse = await fetch(uploadUrl, {
method: "POST",
headers: { "Content-Type": fileType },
body: file,
})
if (!uploadResponse.ok) {
const text = await uploadResponse.text().catch(() => "")
throw new Error(text || "Erro ao enviar áudio")
}
const { storageId } = (await uploadResponse.json()) as { storageId: string }
const previewUrl = URL.createObjectURL(file)
setPendingAudio((current) => {
if (current?.previewUrl) {
URL.revokeObjectURL(current.previewUrl)
}
return {
storageId: storageId as unknown as Id<"_storage">,
name: file.name,
size: file.size,
type: fileType,
previewUrl,
}
})
}
const audioRecorder = useAudioRecorder({
maxDurationSeconds: MAX_AUDIO_DURATION_SECONDS,
maxFileSizeBytes: MAX_AUDIO_BYTES,
onAudioReady: async ({ file }) => {
await queueAudioAttachment(file)
},
onError: (message) => {
toast.error(message)
},
})
const audioBusy = audioRecorder.isRecording || audioRecorder.isProcessing
const handleStartLiveChat = async () => {
if (!viewerId) return
@ -206,7 +270,8 @@ export function TicketChatPanel({ ticketId }: TicketChatPanelProps) {
}
const handleSend = async () => {
if (!viewerId || !canPost || draft.trim().length === 0) return
if (!viewerId || !canPost) return
if (draft.trim().length === 0 && !pendingAudio) return
if (draft.length > MAX_MESSAGE_LENGTH) {
toast.error(`Mensagem muito longa (max. ${MAX_MESSAGE_LENGTH} caracteres).`)
return
@ -235,12 +300,24 @@ export function TicketChatPanel({ ticketId }: TicketChatPanelProps) {
setIsSending(true)
try {
const attachments = pendingAudio ? [{
storageId: pendingAudio.storageId,
name: pendingAudio.name,
size: pendingAudio.size,
type: pendingAudio.type,
}] : undefined
await postChatMessage({
ticketId: ticketId as Id<"tickets">,
actorId: viewerId as Id<"users">,
body: draft.trim(),
attachments,
})
setDraft("")
if (pendingAudio?.previewUrl) {
URL.revokeObjectURL(pendingAudio.previewUrl)
}
setPendingAudio(null)
inputRef.current?.focus()
} catch (error) {
console.error(error)
@ -353,6 +430,9 @@ export function TicketChatPanel({ ticketId }: TicketChatPanelProps) {
<div className="space-y-4">
{messages.map((message) => {
const isOwn = String(message.authorId) === String(viewerId)
const bodyText = message.body?.trim() ?? ""
const shouldShowBody =
bodyText.length > 0 && !(bodyText === "[Anexo]" && (message.attachments?.length ?? 0) > 0)
return (
<div
key={message.id}
@ -376,13 +456,24 @@ export function TicketChatPanel({ ticketId }: TicketChatPanelProps) {
? "rounded-br-md bg-black text-white"
: "rounded-bl-md bg-white text-slate-900 shadow-sm border border-slate-100"
)}
>
{!isOwn && (
<p className={cn("mb-1 text-xs font-medium", isOwn ? "text-white/70" : "text-slate-500")}>
{message.authorName ?? "Usuario"}
</p>
>
{!isOwn && (
<p className={cn("mb-1 text-xs font-medium", isOwn ? "text-white/70" : "text-slate-500")}>
{message.authorName ?? "Usuario"}
</p>
)}
{shouldShowBody && <p className="whitespace-pre-wrap text-sm">{message.body}</p>}
{message.attachments && message.attachments.length > 0 && (
<div className={cn("mt-2 flex flex-wrap gap-2", isOwn && "justify-end")}>
{message.attachments.map((attachment) => (
<ChatMessageAttachment
key={attachment.storageId}
attachment={attachment}
tone={isOwn ? "dark" : "light"}
/>
))}
</div>
)}
<p className="whitespace-pre-wrap text-sm">{message.body}</p>
<p
className={cn(
"mt-1 text-right text-xs",
@ -408,30 +499,114 @@ export function TicketChatPanel({ ticketId }: TicketChatPanelProps) {
) : !canPost ? (
<p className="text-center text-sm text-slate-400">Voce nao pode enviar mensagens neste chat</p>
) : (
<div className="flex items-end gap-2">
<textarea
ref={inputRef}
value={draft}
onChange={(e) => setDraft(e.target.value)}
onKeyDown={handleKeyDown}
placeholder="Digite sua mensagem..."
className="max-h-24 min-h-[40px] flex-1 resize-none rounded-lg border border-slate-200 px-3 py-2 text-sm focus:border-black focus:outline-none focus:ring-1 focus:ring-black"
rows={1}
disabled={isSending}
/>
<Button
type="button"
onClick={handleSend}
disabled={!draft.trim() || isSending}
className="flex size-10 items-center justify-center rounded-lg bg-black text-white hover:bg-black/90"
>
{isSending ? (
<Spinner className="size-4" />
) : (
<Send className="size-4" />
)}
</Button>
</div>
<>
{(audioRecorder.isRecording || audioRecorder.isProcessing) && (
<div
className={cn(
"mb-2 flex items-center justify-between gap-3 rounded-lg border px-3 py-2 text-xs",
audioRecorder.isRecording
? "border-rose-200 bg-rose-50 text-rose-700"
: "border-slate-200 bg-slate-50 text-slate-600"
)}
>
{audioRecorder.isRecording ? (
<>
<div className="flex flex-1 items-center gap-3">
<span className="size-2 rounded-full bg-rose-500 animate-pulse" />
<span className="font-semibold tabular-nums">
{formatDuration(audioRecorder.durationSeconds)} / {formatDuration(MAX_AUDIO_DURATION_SECONDS)}
</span>
<div className="flex h-6 flex-1 items-center gap-[2px]">
{audioRecorder.levels.map((level, index) => (
<span
key={index}
className="w-[2px] rounded-full bg-rose-400"
style={{ height: `${Math.max(4, Math.round(level * 22))}px` }}
/>
))}
</div>
</div>
<Button
type="button"
variant="ghost"
size="icon"
onClick={audioRecorder.cancelRecording}
className="size-8 text-rose-600 hover:bg-rose-100"
aria-label="Descartar áudio"
>
<Trash2 className="size-4" />
</Button>
</>
) : (
<div className="flex items-center gap-2">
<Spinner className="size-3.5" />
Anexando áudio...
</div>
)}
</div>
)}
{pendingAudio && (
<div className="mb-2 flex items-center gap-2 rounded-lg border border-slate-200 bg-white px-2 py-1">
<audio controls src={pendingAudio.previewUrl} className="h-8 w-44" />
<button
type="button"
onClick={() => {
if (pendingAudio.previewUrl) {
URL.revokeObjectURL(pendingAudio.previewUrl)
}
setPendingAudio(null)
}}
className="flex size-7 items-center justify-center rounded-full text-slate-500 hover:bg-slate-100"
aria-label="Remover áudio"
>
<X className="size-3.5" />
</button>
</div>
)}
<div className="flex items-end gap-2">
<textarea
ref={inputRef}
value={draft}
onChange={(e) => setDraft(e.target.value)}
onKeyDown={handleKeyDown}
placeholder="Digite sua mensagem..."
className="max-h-24 min-h-[40px] flex-1 resize-none rounded-lg border border-slate-200 px-3 py-2 text-sm focus:border-black focus:outline-none focus:ring-1 focus:ring-black"
rows={1}
disabled={isSending || audioBusy}
/>
<Button
type="button"
variant="ghost"
size="icon"
className="size-10 text-slate-500 hover:bg-slate-100 hover:text-slate-700"
onClick={audioRecorder.isRecording ? audioRecorder.stopRecording : audioRecorder.startRecording}
disabled={isSending || audioRecorder.isProcessing}
aria-label={audioRecorder.isRecording ? "Parar gravação de áudio" : "Gravar áudio"}
>
{audioRecorder.isProcessing ? (
<Spinner className="size-4" />
) : audioRecorder.isRecording ? (
<Square className="size-4" />
) : (
<Mic className="size-4" />
)}
</Button>
<Button
type="button"
onClick={handleSend}
disabled={(!draft.trim() && !pendingAudio) || isSending || audioBusy}
className="flex size-10 items-center justify-center rounded-lg bg-black text-white hover:bg-black/90"
>
{isSending ? (
<Spinner className="size-4" />
) : (
<Send className="size-4" />
)}
</Button>
</div>
</>
)}
</div>
</Card>

84
tests/audio-utils.test.ts Normal file
View file

@ -0,0 +1,84 @@
import { describe, it, expect } from "bun:test"
import {
buildAudioFileName,
normalizeMimeType,
pickSupportedMimeType,
} from "@/components/chat/audio-recorder-utils"
import {
extractPeaks,
formatAttachmentSize,
formatDuration,
isAudioAttachment,
isImageAttachment,
} from "@/components/chat/chat-attachment-utils"
describe("audio-recorder-utils", () => {
it("normaliza o mimeType removendo parametros", () => {
expect(normalizeMimeType("audio/ogg;codecs=opus")).toBe("audio/ogg")
})
it("seleciona o primeiro mime suportado", () => {
const supported = new Set(["audio/ogg", "audio/mp4"])
const result = pickSupportedMimeType((mimeType) => supported.has(mimeType))
expect(result).toBe("audio/ogg")
})
it("retorna vazio quando nenhum mime for suportado", () => {
const result = pickSupportedMimeType(() => false)
expect(result).toBe("")
})
it("gera nome de arquivo com extensao correta", () => {
const fixedDate = new Date("2025-01-01T12:34:56.789Z")
const fileName = buildAudioFileName("audio/mpeg", fixedDate)
expect(fileName).toBe("audio-2025-01-01T12-34-56-789Z.mp3")
})
it("usa webm como fallback para mimeType desconhecido", () => {
const fixedDate = new Date("2025-01-01T12:34:56.789Z")
const fileName = buildAudioFileName("audio/unknown", fixedDate)
expect(fileName).toBe("audio-2025-01-01T12-34-56-789Z.webm")
})
})
describe("chat-attachment-utils", () => {
it("formata tamanhos de arquivo em bytes, KB e MB", () => {
expect(formatAttachmentSize(undefined)).toBeNull()
expect(formatAttachmentSize(512)).toBe("512B")
expect(formatAttachmentSize(1024)).toBe("1KB")
expect(formatAttachmentSize(1024 * 1024)).toBe("1.0MB")
})
it("formata duracao em minutos e segundos", () => {
expect(formatDuration(0)).toBe("00:00")
expect(formatDuration(5)).toBe("00:05")
expect(formatDuration(65)).toBe("01:05")
})
it("detecta anexos de audio por type ou extensao", () => {
expect(isAudioAttachment({ name: "voz.mp3", type: null })).toBe(true)
expect(isAudioAttachment({ name: "arquivo.dat", type: "audio/ogg" })).toBe(true)
expect(isAudioAttachment({ name: "foto.png", type: null })).toBe(false)
})
it("detecta anexos de imagem por type ou extensao", () => {
expect(isImageAttachment({ name: "foto.PNG", type: null })).toBe(true)
expect(isImageAttachment({ name: "arquivo.txt", type: "image/jpeg" })).toBe(true)
expect(isImageAttachment({ name: "audio.mp3", type: null })).toBe(false)
})
it("extrai picos normalizados com quantidade de barras fixa", () => {
const buffer = {
getChannelData: () => new Float32Array([0, 0.5, 0.2, -0.8, 0.1, -0.1, 0.9, 0.3]),
} as unknown as AudioBuffer
const peaks = extractPeaks(buffer, 4)
expect(peaks).toHaveLength(4)
expect(Math.max(...peaks)).toBeCloseTo(1, 5)
for (const value of peaks) {
expect(value).toBeGreaterThanOrEqual(0)
expect(value).toBeLessThanOrEqual(1)
}
})
})