253 lines
7.2 KiB
TypeScript
253 lines
7.2 KiB
TypeScript
"use client"
|
|
|
|
import { useCallback, useEffect, useRef, useState } from "react"
|
|
import { buildAudioFileName, pickSupportedMimeType } from "./audio-recorder-utils"
|
|
|
|
type AudioRecorderPayload = {
|
|
file: File
|
|
durationSeconds: number
|
|
}
|
|
|
|
type AudioRecorderOptions = {
|
|
onAudioReady: (payload: AudioRecorderPayload) => Promise<void>
|
|
onError?: (message: string) => void
|
|
maxDurationSeconds?: number
|
|
maxFileSizeBytes?: number
|
|
audioBitsPerSecond?: number
|
|
levelBars?: number
|
|
}
|
|
|
|
type AudioRecorderState = {
|
|
isRecording: boolean
|
|
isProcessing: boolean
|
|
durationSeconds: number
|
|
levels: number[]
|
|
startRecording: () => Promise<void>
|
|
stopRecording: () => void
|
|
cancelRecording: () => void
|
|
}
|
|
|
|
export function useAudioRecorder(options: AudioRecorderOptions): AudioRecorderState {
|
|
const {
|
|
onAudioReady,
|
|
onError,
|
|
maxDurationSeconds = 300,
|
|
maxFileSizeBytes = 5 * 1024 * 1024,
|
|
audioBitsPerSecond = 64000,
|
|
levelBars = 32,
|
|
} = options
|
|
|
|
const [isRecording, setIsRecording] = useState(false)
|
|
const [isProcessing, setIsProcessing] = useState(false)
|
|
const [durationSeconds, setDurationSeconds] = useState(0)
|
|
const [levels, setLevels] = useState<number[]>(() => Array.from({ length: levelBars }, () => 0))
|
|
|
|
const durationRef = useRef(0)
|
|
const recorderRef = useRef<MediaRecorder | null>(null)
|
|
const streamRef = useRef<MediaStream | null>(null)
|
|
const audioContextRef = useRef<AudioContext | null>(null)
|
|
const analyserRef = useRef<AnalyserNode | null>(null)
|
|
const chunksRef = useRef<BlobPart[]>([])
|
|
const timerRef = useRef<number | null>(null)
|
|
const stopTimeoutRef = useRef<number | null>(null)
|
|
const rafRef = useRef<number | null>(null)
|
|
const cancelRef = useRef(false)
|
|
const mountedRef = useRef(true)
|
|
|
|
useEffect(() => {
|
|
return () => {
|
|
mountedRef.current = false
|
|
}
|
|
}, [])
|
|
|
|
const cleanup = useCallback(() => {
|
|
if (timerRef.current) {
|
|
clearInterval(timerRef.current)
|
|
timerRef.current = null
|
|
}
|
|
if (stopTimeoutRef.current) {
|
|
clearTimeout(stopTimeoutRef.current)
|
|
stopTimeoutRef.current = null
|
|
}
|
|
if (rafRef.current) {
|
|
cancelAnimationFrame(rafRef.current)
|
|
rafRef.current = null
|
|
}
|
|
if (streamRef.current) {
|
|
streamRef.current.getTracks().forEach((track) => track.stop())
|
|
streamRef.current = null
|
|
}
|
|
if (audioContextRef.current) {
|
|
void audioContextRef.current.close()
|
|
audioContextRef.current = null
|
|
}
|
|
analyserRef.current = null
|
|
recorderRef.current = null
|
|
chunksRef.current = []
|
|
}, [])
|
|
|
|
const updateLevels = useCallback(() => {
|
|
const analyser = analyserRef.current
|
|
if (!analyser) return
|
|
const bufferLength = analyser.fftSize
|
|
const dataArray = new Uint8Array(bufferLength)
|
|
analyser.getByteTimeDomainData(dataArray)
|
|
|
|
const step = Math.floor(bufferLength / levelBars)
|
|
const nextLevels = Array.from({ length: levelBars }, (_, index) => {
|
|
let sum = 0
|
|
const start = index * step
|
|
const end = Math.min(start + step, bufferLength)
|
|
for (let i = start; i < end; i += 1) {
|
|
sum += Math.abs(dataArray[i] - 128)
|
|
}
|
|
const avg = sum / Math.max(1, end - start)
|
|
return Math.min(1, avg / 128)
|
|
})
|
|
|
|
if (mountedRef.current) {
|
|
setLevels(nextLevels)
|
|
rafRef.current = requestAnimationFrame(updateLevels)
|
|
}
|
|
}, [levelBars])
|
|
|
|
const stopRecording = useCallback(() => {
|
|
if (!recorderRef.current || !isRecording) return
|
|
setIsRecording(false)
|
|
try {
|
|
recorderRef.current.stop()
|
|
} catch (error) {
|
|
console.error("Falha ao parar gravação:", error)
|
|
cleanup()
|
|
}
|
|
}, [cleanup, isRecording])
|
|
|
|
const cancelRecording = useCallback(() => {
|
|
cancelRef.current = true
|
|
stopRecording()
|
|
}, [stopRecording])
|
|
|
|
const startRecording = useCallback(async () => {
|
|
if (isRecording || isProcessing) return
|
|
if (typeof navigator === "undefined" || !navigator.mediaDevices?.getUserMedia) {
|
|
onError?.("Gravação de áudio indisponível neste dispositivo.")
|
|
return
|
|
}
|
|
|
|
try {
|
|
const stream = await navigator.mediaDevices.getUserMedia({ audio: true })
|
|
streamRef.current = stream
|
|
|
|
const audioContext = new AudioContext()
|
|
const analyser = audioContext.createAnalyser()
|
|
analyser.fftSize = 256
|
|
const source = audioContext.createMediaStreamSource(stream)
|
|
source.connect(analyser)
|
|
audioContextRef.current = audioContext
|
|
analyserRef.current = analyser
|
|
|
|
const mimeType = pickSupportedMimeType()
|
|
const recorderOptions: MediaRecorderOptions = mimeType
|
|
? { mimeType, audioBitsPerSecond }
|
|
: { audioBitsPerSecond }
|
|
|
|
const recorder = new MediaRecorder(stream, recorderOptions)
|
|
recorderRef.current = recorder
|
|
chunksRef.current = []
|
|
cancelRef.current = false
|
|
|
|
recorder.ondataavailable = (event) => {
|
|
if (event.data.size > 0) {
|
|
chunksRef.current.push(event.data)
|
|
}
|
|
}
|
|
|
|
recorder.onstop = async () => {
|
|
const blobType = recorder.mimeType || mimeType || "audio/webm"
|
|
const blob = new Blob(chunksRef.current, { type: blobType })
|
|
chunksRef.current = []
|
|
|
|
cleanup()
|
|
|
|
if (cancelRef.current) {
|
|
if (mountedRef.current) {
|
|
setLevels(Array.from({ length: levelBars }, () => 0))
|
|
}
|
|
return
|
|
}
|
|
|
|
if (blob.size > maxFileSizeBytes) {
|
|
onError?.("Áudio excede o limite de 5MB. Tente gravar por menos tempo.")
|
|
if (mountedRef.current) {
|
|
setLevels(Array.from({ length: levelBars }, () => 0))
|
|
}
|
|
return
|
|
}
|
|
|
|
const fileName = buildAudioFileName(blobType)
|
|
const file = new File([blob], fileName, { type: blobType })
|
|
|
|
setIsProcessing(true)
|
|
try {
|
|
await onAudioReady({ file, durationSeconds: durationRef.current })
|
|
} catch (error) {
|
|
const message = error instanceof Error ? error.message : "Falha ao enviar áudio."
|
|
onError?.(message)
|
|
} finally {
|
|
if (mountedRef.current) {
|
|
setIsProcessing(false)
|
|
setLevels(Array.from({ length: levelBars }, () => 0))
|
|
}
|
|
}
|
|
}
|
|
|
|
recorder.start()
|
|
durationRef.current = 0
|
|
setDurationSeconds(0)
|
|
setIsRecording(true)
|
|
updateLevels()
|
|
|
|
timerRef.current = window.setInterval(() => {
|
|
setDurationSeconds((prev) => {
|
|
const next = prev + 1
|
|
durationRef.current = next
|
|
if (next >= maxDurationSeconds) {
|
|
stopRecording()
|
|
return next
|
|
}
|
|
return next
|
|
})
|
|
}, 1000)
|
|
|
|
stopTimeoutRef.current = window.setTimeout(() => {
|
|
stopRecording()
|
|
}, maxDurationSeconds * 1000)
|
|
} catch (error) {
|
|
console.error("Falha ao iniciar gravação:", error)
|
|
onError?.("Não foi possível iniciar a gravação de áudio.")
|
|
cleanup()
|
|
}
|
|
}, [
|
|
audioBitsPerSecond,
|
|
cleanup,
|
|
isProcessing,
|
|
isRecording,
|
|
levelBars,
|
|
maxDurationSeconds,
|
|
maxFileSizeBytes,
|
|
onAudioReady,
|
|
onError,
|
|
stopRecording,
|
|
updateLevels,
|
|
])
|
|
|
|
return {
|
|
isRecording,
|
|
isProcessing,
|
|
durationSeconds,
|
|
levels,
|
|
startRecording,
|
|
stopRecording,
|
|
cancelRecording,
|
|
}
|
|
}
|