音频传输过程中,我们可以对采集到的音频数据进行前处理和后处理,获取想要的播放效果。对于有自行处理音频数据需求的场景,百家云提供原始数据功能。你可以在将数据发送给编码器前进行前处理,对捕捉到的音频信号进行修改。相关函数接口在 c++-sdk 2.5.5 及以上版本支持。
用户可以调用 setLocalVideoRenderCallback 或者 setRemoteVideoRenderCallback 开启视频自定义渲染,之后可以通过接口 onRenderVideoFrame 接收本地采集出的原始视频数据,或者远端解码后渲染前的视频数据,可以做额外处理,再进行渲染。
class BRTCVideoRenderViewMgr : public BRTCVideoRenderCallback { public: virtual void onRenderVideoFrame(const char* userId, BRTCVideoStreamType streamType, BRTCVideoFrame* frame) { BRTCVideoRenderView* viewPtr = nullptr; for (auto& itr : m_mapViews) { if (itr.first == std::make_pair(std::string(userId),streamType) && itr.second != nullptr) viewPtr = itr.second; break; } if (viewPtr != nullptr) { viewPtr->AppendVideoFrame((unsigned char*)frame->data, frame->length, frame->width, frame->height,frame->videoFormat, frame->rotation); } } private: std::multimap<std::pair<std::string, BRTCVideoStreamType>, BRTCVideoRenderView*> m_mapViews; // userId 和 VideoView* 的映射 map } BRTCVideoRenderViewMgr* getShareViewMgrInstance() { static BRTCVideoRenderViewMgr uniqueInstance; return &uniqueInstance; } void BRTCCore::onEnterRoom() { m_pBrtc->setLocalVideoRenderCallback(BRTCVideoPixelFormat_ARGB32, BRTCVideoBufferType_Buffer, (BRTCVideoRenderCallback*)getShareViewMgrInstance()); // 设置视频编码参数 m_pBrtc->setVideoEncoderParam(videoEncParams); // 本地图像渲染设置 m_pBrtc->setLocalRenderParams(renderParams); // 关闭远端画面镜像 m_pBrtc->setVideoEncoderMirror(false); // 设置网络 qos 参数,弱网情况下保清晰 m_pBrtc->setNetworkQosParam(qosParams); // 启用音量大小提示 m_pBrtc->enableAudioVolumeEvaluation(300); // 打开双路编码功能 m_pBrtc->enableEncSmallVideoStream(true, param); // 打开本地摄像头视频,预览使用自定义的渲染 m_pBrtc->startLocalPreview(nullptr); // 本地视频推送到服务端 m_pBrtc->muteLocalVideo(false); } void BRTCCore::onVideoAvailable(std::string userId, bool available) { RemoteUserInfo* remoteInfo = CDataCenter::GetInstance()->FindRemoteUser(userId); if (available && remoteInfo != nullptr && remoteInfo->user_id != "") { m_pBrtc->startRemoteView(userId.c_str(), BRTCVideoStreamTypeBig, NULL); m_pBrtc->setRemoteVideoRenderCallback( userId.c_str(), BRTCVideoPixelFormat_ARGB32, BRTCVideoBufferType_Buffer, (BRTCVideoRenderCallback*)getShareViewMgrInstance()); } m_pVideoViewLayout->muteVideo(UTF82Wide(userId), BRTCVideoStreamTypeBig, !available); }
用户可以调用 enableCustomVideoCapture 接口开启视频自定义采集,开启视频自定义采集后 SDK 内部会跳过摄像头设备采集流程,之后通过接口 sendCustomVideoData 向 SDK 投送自己采集或者处理后的的视频帧,SDK 会将视频帧进行编码并通过自身的网络模块传输出去。
void BRTCCore::startCustomCaptureVideo(const std::wstring& filePath, int width, int height,BRTCVideoStreamType streamType) { if (streamType == BRTCVideoStreamTypeBig) { m_videoFilePath = BrtcUtil::getAppDirectoryW() + L"brtcres/320x240_video.yuv"; _video_file_length = 0; _video_width = 320; _video_height = 240; std::ifstream ifs(m_videoFilePath, std::ifstream::binary); if (!ifs) { return; } ifs.seekg(0, std::ios::end); _video_file_length = static_cast<uint32_t>(ifs.tellg()); ifs.close(); m_bStartCustomCaptureVideo = true; m_pBrtc->enableCustomVideoCapture(BRTCVideoStreamTypeBig, true); if (custom_video_thread_ == nullptr) { auto task = [=]() { while (m_bStartCustomCaptureVideo) { sendCustomVideoFrame(BRTCVideoStreamTypeBig); Sleep(66); } }; custom_video_thread_ = new std::thread(task); } } else if (streamType == BRTCVideoStreamTypeSub) { m_subVideoFilePath = BrtcUtil::getAppDirectoryW() + L"brtcres/320x240_video.yuv"; _sub_video_file_length = 0; _sub_video_width = 320; _sub_video_height = 240; std::ifstream ifs(m_subVideoFilePath, std::ifstream::binary); if (!ifs) { return; } ifs.seekg(0, std::ios::end); _sub_video_file_length = static_cast<uint32_t>(ifs.tellg()); ifs.close(); m_bStartCustomCaptureSubVideo = true; m_pBrtc->enableCustomVideoCapture(BRTCVideoStreamTypeSub, true); if (custom_sub_video_thread_ == nullptr) { auto task = [=]() { while (m_bStartCustomCaptureSubVideo) { sendCustomVideoFrame(BRTCVideoStreamTypeSub); Sleep(66); } }; custom_sub_video_thread_ = new std::thread(task); } } } void BRTCCore::sendCustomVideoFrame(BRTCVideoStreamType streamType) { if (streamType == BRTCVideoStreamTypeBig) { if (!m_bStartCustomCaptureVideo) { return; } if (m_pBrtc) { std::ifstream ifs(m_videoFilePath, std::ifstream::binary); if (!ifs) { return; } uint32_t bufferSize = _video_width * _video_height * 3 / 2; if (_video_buffer == nullptr) { _video_buffer = new char[bufferSize + 2]; } if (_offset_videoread + bufferSize > _video_file_length) { _offset_videoread = 0; } ifs.seekg(_offset_videoread); ifs.read(_video_buffer, bufferSize); _offset_videoread += bufferSize; BRTCVideoFrame frame; frame.videoFormat = BRTCVideoPixelFormat_I420; frame.length = bufferSize; frame.data = _video_buffer; frame.width = _video_width; frame.height = _video_height; frame.timestamp = m_pBrtc->generateCustomPTS(); m_pBrtc->sendCustomVideoData(BRTCVideoStreamTypeBig, &frame); } } else if (streamType == BRTCVideoStreamTypeSub) { if (!m_bStartCustomCaptureSubVideo) { return; } if (m_pBrtc) { std::ifstream ifs(m_subVideoFilePath, std::ifstream::binary); if (!ifs) { return; } uint32_t bufferSize = _sub_video_width * _sub_video_height * 3 / 2; if (_sub_video_buffer == nullptr) { _sub_video_buffer = new char[bufferSize + 2]; } if (_offset_subvideoread + bufferSize > _sub_video_file_length) { _offset_subvideoread = 0; } ifs.seekg(_offset_subvideoread); ifs.read(_sub_video_buffer, bufferSize); _offset_subvideoread += bufferSize; BRTCVideoFrame frame; frame.videoFormat = BRTCVideoPixelFormat_I420; frame.length = bufferSize; frame.data = _sub_video_buffer; frame.width = _sub_video_width; frame.height = _sub_video_height; frame.timestamp = m_pBrtc->generateCustomPTS(); m_pBrtc->sendCustomVideoData(BRTCVideoStreamTypeSub, &frame); } } }
class BRTCCore : public BRTCAudioFrameCallback { public: // interface BRTCAudioFrameCallback void onCapturedRawAudioFrame(BRTCAudioFrame* frame) override; void onLocalProcessedAudioFrame(BRTCAudioFrame* frame) override; void onCustomAudioRenderingFrame(BRTCAudioFrame* frame) override; private: BRTC* m_pBrtc = nullptr; } BRTCCore::BRTCCore() { m_pBrtc = getBRTCShareInstance(); m_pBrtc->setCallback(this); m_pBrtc->setAudioFrameCallback(this); BRTCAudioFrameCallbackFormat audio_frame_callback_format; audio_frame_callback_format.channel = 1; audio_frame_callback_format.sampleRate = 48000; audio_frame_callback_format.samplesPerCall = 480; m_pBrtc->setCustomAudioRenderingFrameCallbackFormat(&audio_frame_callback_format); m_pDeviceManager = m_pBrtc->getDeviceManager(); m_pAudioEffectMgr = m_pBrtc->getAudioEffectManager(); } void BRTCCore::onCapturedRawAudioFrame(BRTCAudioFrame* frame) { // 保存到本地文件缓存 if (capture_audio_file_ == nullptr) { std::string filePath = BrtcUtil::getAppDirectoryA() + "capture_frame_" + rtc::ToString(frame->sampleRate) + "_" + rtc::ToString(frame->channel) + ".pcm"; ::fopen_s(&capture_audio_file_, filePath.c_str(), "ab+"); } ::fwrite(frame->data, frame->length, 1, capture_audio_file_); ::fflush(capture_audio_file_); } void BRTCCore::onLocalProcessedAudioFrame(BRTCAudioFrame* frame) { // 存到本地文件缓存 if (local_processed_audio_file_ == nullptr) { std::string filePath = BrtcUtil::getAppDirectoryA() + "processed_frame_" + rtc::ToString(frame->sampleRate) + "_" + rtc::ToString(frame->channel) + ".pcm"; ::fopen_s(&local_processed_audio_file_, filePath.c_str(), "ab+"); } ::fwrite(frame->data, frame->length, 1, local_processed_audio_file_); ::fflush(local_processed_audio_file_); } void BRTCCore::onCustomAudioRenderingFrame(BRTCAudioFrame* frame) { // 保存到本地文件缓存 if (custom_audio_render_file_ == nullptr) { std::string filePath = BrtcUtil::getAppDirectoryA() + "custom_rendering_frame_" + rtc::ToString(frame->sampleRate) + "_" + rtc::ToString(frame->channel) + ".pcm"; ::fopen_s(&custom_audio_render_file_, filePath.c_str(), "ab+"); } ::fwrite(frame->data, frame->length, 1, custom_audio_render_file_); ::fflush(custom_audio_render_file_); }
用户调用 enableCustomAudioCapture 接口开启音频自定义采集,之后通过接口 sendCustomAudioData 将用户自己采集的音频发送给 SDK,并推送给远端用户。SDK 会跳过内部的音频设备采集。 注意 开启音频自定义采集会同时开启音频自定义渲染。
void BRTCCore::startCustomCaptureAudio(const std::wstring& filePath, int samplerate, int channel) { if (audio_capture_event_ == NULL) { audio_capture_event_ = CreateEvent(NULL, TRUE, FALSE, NULL); } // 推送指定文件的音频数据到远端 m_audioFilePath = BrtcUtil::getAppDirectoryW() + L"brtcres/48_1_audio.pcm"; _audio_samplerate = 48000; _audio_channel = 1; if (!audio_ifs_.is_open()) { audio_ifs_.open(m_audioFilePath, std::ifstream::binary); } if (!audio_ifs_.is_open()) { return; } audio_ifs_.seekg(0, std::ios::end); _audio_file_length = static_cast<uint32_t>(audio_ifs_.tellg()); audio_ifs_.seekg(0, std::ios::beg); LOG(INFO) << "audio file length: " << _audio_file_length; m_bStartCustomCaptureAudio = true; m_pBrtc->enableCustomAudioCapture(true); if (custom_audio_thread_ == nullptr) { auto task = [=]() { // 在新创建的线程中,循环调用 sendCustomAudioFrame ,直到 m_bStartCustomCaptureAudio 被修改为 false 停止推送音频数据 while (m_bStartCustomCaptureAudio) { sendCustomAudioFrame(); } }; custom_audio_thread_ = new std::thread(task); SetThreadPriority(custom_audio_thread_->native_handle(), THREAD_PRIORITY_TIME_CRITICAL); } } void BRTCCore::sendCustomAudioFrame() { if (!m_bStartCustomCaptureAudio) { return; } if (!audio_ifs_.is_open()) { return; } if (m_pBrtc) { uint32_t bufferSize = (480 * _audio_samplerate / 48000) * (_audio_channel * 16 / 8); if (_audio_buffer == nullptr) { _audio_buffer = new char[bufferSize + 2]; } if (_offset_audioread + bufferSize > _audio_file_length) { _offset_audioread = 0; audio_ifs_.seekg(_offset_audioread); } // 读取指定文件的 pcm 音频数据 audio_ifs_.read(_audio_buffer, bufferSize); _offset_audioread += bufferSize; if (start_capture_ms == 0) { start_capture_ms = audio_clock_.GetCurrentTickMs(); } BRTCAudioFrame frame; frame.audioFormat = BRTCAudioFrameFormatPCM; frame.length = bufferSize; frame.data = _audio_buffer; frame.sampleRate = _audio_samplerate; frame.channel = _audio_channel; frame.timestamp = m_pBrtc->generateCustomPTS(); // 将 pcm 音频数据发送到远端 m_pBrtc->sendCustomAudioData(&frame); uint64_t nextCaptureTime = start_capture_ms + kAudioCaptureSpan; uint64_t currentTime = audio_clock_.GetCurrentTickMs(); if (currentTime < nextCaptureTime) { while (true) { currentTime = audio_clock_.GetCurrentTickMs(); int64_t diff = nextCaptureTime - currentTime; if (diff > 0) { // 等待下一个采集时间点 WaitForSingleObject(audio_capture_event_, static_cast<DWORD>(diff / 2)); } else { break; } } start_capture_ms = currentTime; } else { start_capture_ms = currentTime; } } }
原始数据采集
音频传输过程中,我们可以对采集到的音频数据进行前处理和后处理,获取想要的播放效果。
对于有自行处理音频数据需求的场景,百家云提供原始数据功能。你可以在将数据发送给编码器前进行前处理,对捕捉到的音频信号进行修改。
相关函数接口在 c++-sdk 2.5.5 及以上版本支持。
视频原始数据读取和自定义渲染
用户可以调用 setLocalVideoRenderCallback 或者 setRemoteVideoRenderCallback 开启视频自定义渲染,之后可以通过接口 onRenderVideoFrame 接收本地采集出的原始视频数据,或者远端解码后渲染前的视频数据,可以做额外处理,再进行渲染。
用户自定义视频数据发送
用户可以调用 enableCustomVideoCapture 接口开启视频自定义采集,开启视频自定义采集后 SDK 内部会跳过摄像头设备采集流程,之后通过接口 sendCustomVideoData 向 SDK 投送自己采集或者处理后的的视频帧,SDK 会将视频帧进行编码并通过自身的网络模块传输出去。
音频原始数据 hook 和自定义渲染
用户自定义音频数据发送
用户调用 enableCustomAudioCapture 接口开启音频自定义采集,之后通过接口 sendCustomAudioData 将用户自己采集的音频发送给 SDK,并推送给远端用户。SDK 会跳过内部的音频设备采集。 注意 开启音频自定义采集会同时开启音频自定义渲染。