Skip to content

Commit

Permalink
fix: audio set progress error
Browse files Browse the repository at this point in the history
  • Loading branch information
hibig committed Dec 3, 2024
1 parent 687dabc commit 83294ab
Show file tree
Hide file tree
Showing 8 changed files with 135 additions and 32 deletions.
2 changes: 1 addition & 1 deletion config/config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ const isProduction = env === 'production';
const t = Date.now();
export default defineConfig({
proxy: {
...proxy('http://192.168.50.5')
...proxy('http://192.168.50.4')
},
history: {
type: 'hash'
Expand Down
73 changes: 70 additions & 3 deletions src/components/speech-content/audio-player.tsx
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import _ from 'lodash';
import React, {
forwardRef,
useCallback,
Expand All @@ -19,6 +20,8 @@ interface AudioPlayerProps {
onFinish?: () => void;
onAnalyse?: (analyseData: any, frequencyBinCount: any) => void;
onAudioprocess?: (current: number) => void;
onPlay?: () => void;
onPause?: () => void;
}

const AudioPlayer: React.FC<
Expand Down Expand Up @@ -49,6 +52,12 @@ const AudioPlayer: React.FC<
analyser.current.connect(audioContext.current.destination);
}, []);

const debouncePause = _.debounce(() => {
if (wavesurfer.current?.isPlaying()) {
wavesurfer.current?.pause();
}
}, 100);

const listenEvents = () => {
wavesurfer.current?.on('ready', (duration: number) => {
props.onReady?.(duration);
Expand All @@ -61,16 +70,31 @@ const AudioPlayer: React.FC<
props.onFinish?.();
});
wavesurfer.current?.on('audioprocess', (current: number) => {
console.log('audioprocess==========:', current);
props.onAudioprocess?.(current);
});
wavesurfer.current?.on('seeking', (current: number) => {
console.log('seeking', current);
});

wavesurfer.current?.on('timeupdate', (current: number) => {
console.log('timeupdate', current);
});

wavesurfer.current?.on('play', () => {
analyser.current?.getByteFrequencyData(dataArray.current);
props.onAnalyse?.(dataArray.current, analyser);
props.onPlay?.();
});

wavesurfer.current?.on('pause', () => {
analyser.current?.getByteFrequencyData(dataArray.current);
props.onAnalyse?.(dataArray.current, analyser);
props.onPause?.();
});
};

const createWavesurfer = () => {
wavesurfer.current?.destroy();
wavesurfer.current = WaveSurfer.create({
container: container.current,
url: audioUrl,
Expand Down Expand Up @@ -100,15 +124,54 @@ const AudioPlayer: React.FC<
}
};

const play = () => {
const play = async () => {
if (wavesurfer.current) {
wavesurfer.current.play();
}
};
const isPlaying = () => {
if (wavesurfer.current) {
wavesurfer.current.isPlaying();
}
};

const playPause = () => {
if (wavesurfer.current) {
wavesurfer.current.playPause();
}
};

const debounceSeekTo = _.debounce((value: number) => {
if (wavesurfer.current) {
wavesurfer.current.seekTo(value);
}
}, 50);

const debounceSetTime = _.debounce((value: number) => {
if (wavesurfer.current) {
wavesurfer.current.setTime(value);
}
}, 50);

const seekTo = (value: number) => {
if (wavesurfer.current) {
debounceSeekTo(value);
}
};
const setTime = (value: number) => {
if (wavesurfer.current) {
debounceSetTime(value);
}
};

const seekAndPlay = (value: number) => {
if (wavesurfer.current) {
wavesurfer.current.seekTo(value);
wavesurfer.current.once('seeking', () => {
wavesurfer.current
?.play()
.catch((error) => console.error('Playback error:', error));
});
}
};

Expand All @@ -130,7 +193,11 @@ const AudioPlayer: React.FC<
play,
pause,
duration,
seekTo
seekTo,
setTime,
playPause,
isPlaying,
wavesurfer
};
});

Expand Down
57 changes: 36 additions & 21 deletions src/components/speech-content/speech-item.tsx
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
import AudioAnimation from '@/components/audio-animation';
import useResizeObserver from '@/components/logs-viewer/use-size';
import {
DownloadOutlined,
PauseCircleOutlined,
Expand All @@ -9,7 +8,7 @@ import { useIntl } from '@umijs/max';
import { Button, Slider, Tooltip } from 'antd';
import dayjs from 'dayjs';
import _, { throttle } from 'lodash';
import React, { useCallback, useEffect, useRef, useState } from 'react';
import React, { useCallback, useRef, useState } from 'react';
import AudioPlayer from './audio-player';
import './styles/index.less';
import './styles/slider-progress.less';
Expand Down Expand Up @@ -39,7 +38,7 @@ interface SpeechContentProps {
const SpeechItem: React.FC<SpeechContentProps> = (props) => {
const intl = useIntl();
const [isPlay, setIsPlay] = useState(props.autoplay);
const [duration, setDuration] = useState(0);
const [duration, setDuration] = useState<number>(0);
const [animationSize, setAnimationSize] = useState({ width: 900, height: 0 });
const [currentTime, setCurrentTime] = useState(0);
const [audioChunks, setAudioChunks] = useState<any>({
Expand All @@ -49,17 +48,26 @@ const SpeechItem: React.FC<SpeechContentProps> = (props) => {
const wrapper = useRef<any>(null);
const ref = useRef<any>(null);

const size = useResizeObserver(wrapper);

const handlePlay = () => {
if (isPlay) {
const handlePlay = useCallback(async () => {
try {
console.log(
'isPlay:',
isPlay,
ref.current?.wavesurfer.current?.isPlaying()
);
ref.current?.pause();
setIsPlay(false);
return;
if (ref.current?.wavesurfer.current?.isPlaying()) {
ref.current?.pause();
setIsPlay(false);
return;
} else {
await ref.current?.wavesurfer.current?.play();
setIsPlay(true);
}
} catch (error) {
console.log('error:', error);
}
ref.current?.play();
setIsPlay(true);
};
}, [ref.current]);

const handleOnAnalyse = useCallback((data: any, analyser: any) => {
setAudioChunks((pre: any) => {
Expand All @@ -73,15 +81,19 @@ const SpeechItem: React.FC<SpeechContentProps> = (props) => {
const handleOnFinish = useCallback(() => {
setIsPlay(false);
}, []);
const handleOnPlay = useCallback(() => {
setIsPlay(true);
}, []);
const handleOnPause = useCallback(() => {
setIsPlay(false);
}, []);

const throttleUpdateCurrentTime = throttle((current: number) => {
setCurrentTime(current);
}, 100);

const handleOnAudioprocess = useCallback(
(current: number) => {
console.log('current:', current);
// setCurrentTime(() => current);
throttleUpdateCurrentTime(current);
},
[throttleUpdateCurrentTime]
Expand All @@ -103,15 +115,16 @@ const SpeechItem: React.FC<SpeechContentProps> = (props) => {
});
}, []);

const handleSliderChange = (value: number) => {
ref.current?.seekTo(value);
const debounceSeek = _.debounce((value: number) => {
ref.current?.seekTo(value / duration);
setCurrentTime(value);
}, 200);

const handleSliderChange = (value: number) => {
debounceSeek(value);
};

useEffect(() => {
console.log('width:', size);
}, [size]);
const onDownload = () => {
const onDownload = useCallback(() => {
const url = props.audioUrl || '';
const filename = `audio-${dayjs().format('YYYYMMDDHHmmss')}.${props.format}`;

Expand All @@ -121,7 +134,7 @@ const SpeechItem: React.FC<SpeechContentProps> = (props) => {
document.body.appendChild(link);
link.click();
link.remove();
};
}, [props.audioUrl, props.format]);

return (
<div>
Expand All @@ -137,6 +150,8 @@ const SpeechItem: React.FC<SpeechContentProps> = (props) => {
onReady={handleReay}
onClick={handleOnClick}
onFinish={handleOnFinish}
onPlay={handleOnPlay}
onPause={handleOnPause}
onAnalyse={handleOnAnalyse}
onAudioprocess={handleOnAudioprocess}
ref={ref}
Expand Down
4 changes: 4 additions & 0 deletions src/components/speech-content/styles/index.less
Original file line number Diff line number Diff line change
Expand Up @@ -99,3 +99,7 @@
}
}
}

.audio-container {
pointer-events: none;
}
6 changes: 4 additions & 2 deletions src/locales/en-US/models.ts
Original file line number Diff line number Diff line change
Expand Up @@ -59,9 +59,11 @@ export default {
'models.form.backend': 'Backend',
'models.form.backend_parameters': 'Backend Parameters',
'models.search.gguf.tips':
'1. GGUF models backend is llama-box(supports Linux, macOS and Windows).',
'1. GGUF models use llama-box(supports Linux, macOS and Windows).',
'models.search.vllm.tips':
'2. Non-GGUF models use vox-box for audio and vLLM for others.',
'2. Non-GGUF models use vox-box for audio and vLLM(x86 Linux only) for others.',
'models.search.voxbox.tips':
'3. To deploy an audio model, uncheck the GGUF checkbox.',
'models.form.ollamalink': 'Find More in Ollama Library',
'models.form.backend_parameters.llamabox.placeholder':
'e.g., --ctx-size=8192',
Expand Down
5 changes: 3 additions & 2 deletions src/locales/zh-CN/models.ts
Original file line number Diff line number Diff line change
Expand Up @@ -58,9 +58,10 @@ export default {
'models.form.backend': '后端',
'models.form.backend_parameters': '后端参数',
'models.search.gguf.tips':
'1. GGUF 模型后端为 llama-box(支持 Linux, macOS 和 Windows)。',
'1. GGUF 模型用 llama-box(支持 Linux, macOS 和 Windows)。',
'models.search.vllm.tips':
'2. 非 GGUF 的音频模型用 vox-box,其它非 GGUF 的模型用 vLLM。',
'2. 非 GGUF 的音频模型用 vox-box,其它非 GGUF 的模型用 vLLM(仅支持 x86 Linux)。',
'models.search.voxbox.tips': '3. 若需部署音频模型取消勾选 GGUF 复选框。',
'models.form.ollamalink': '在 Ollama Library 中查找',
'models.form.backend_parameters.llamabox.placeholder':
'例如,--ctx-size=8192',
Expand Down
3 changes: 3 additions & 0 deletions src/pages/llmodels/components/search-model.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -256,6 +256,9 @@ const SearchModel: React.FC<SearchInputProps> = (props) => {
<div>
{intl.formatMessage({ id: 'models.search.vllm.tips' })}
</div>
<div>
{intl.formatMessage({ id: 'models.search.voxbox.tips' })}
</div>
</div>
}
>
Expand Down
17 changes: 14 additions & 3 deletions src/pages/playground/components/ground-tts.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,17 @@ const GroundLeft: React.FC<MessageProps> = forwardRef((props, ref) => {
autoplay: boolean;
audioUrl: string;
}[]
>([]);
>([
{
input: '',
voice: '',
format: '',
speed: 0,
uid: 0,
autoplay: false,
audioUrl: ''
}
]);
const locale = getLocale();
const intl = useIntl();
const [searchParams] = useSearchParams();
Expand Down Expand Up @@ -140,6 +150,7 @@ const GroundLeft: React.FC<MessageProps> = forwardRef((props, ref) => {
setMessageId();
setTokenResult(null);
setCurrentPrompt(current?.content || '');
setMessageList([]);

controllerRef.current?.abort?.();
controllerRef.current = new AbortController();
Expand All @@ -157,7 +168,7 @@ const GroundLeft: React.FC<MessageProps> = forwardRef((props, ref) => {

console.log('result:', res);

if (res?.status_code !== 200) {
if (res?.status_code && res?.status_code !== 200) {
setTokenResult({
error: true,
errorMessage:
Expand Down Expand Up @@ -212,7 +223,7 @@ const GroundLeft: React.FC<MessageProps> = forwardRef((props, ref) => {
const res = await queryModelVoices({
model: value
});
if (res?.status_code !== 200) {
if (res?.status_code && res?.status_code !== 200) {
setVoiceError({
error: true,
errorMessage:
Expand Down

0 comments on commit 83294ab

Please sign in to comment.