ESP-BOX-3 / LichuangDev enable AEC to support realtime chat (#429)
* read frame duration from server * fit wechat style emoji size * Make Wechat UI look better * Add Realtime Chat to ESP-BOX-3/LichuangDev * disable debug log * Fix Sh1106 Compile Error Bug. IDF 5.3.2 Not supporting sh1106-esp-idf. (#424) * Fix ESP32 Board Led No Light Bug (#427) * add board esp32-s3-touch-lcd-3.5 (#415) * add board esp32-s3-touch-lcd-3.5 * add axp2101 --------- Co-authored-by: flyingtjy <flyingtjy@gmail.com> --------- Co-authored-by: ooxxU <71391474@qq.com> Co-authored-by: flying1425 <79792003+flying1425@users.noreply.github.com> Co-authored-by: flyingtjy <flyingtjy@gmail.com>
This commit is contained in:
parent
e4c76eaa46
commit
fa899a310e
@ -4,7 +4,7 @@
|
|||||||
# CMakeLists in this exact order for cmake to work correctly
|
# CMakeLists in this exact order for cmake to work correctly
|
||||||
cmake_minimum_required(VERSION 3.16)
|
cmake_minimum_required(VERSION 3.16)
|
||||||
|
|
||||||
set(PROJECT_VER "1.5.2")
|
set(PROJECT_VER "1.5.4")
|
||||||
|
|
||||||
# Add this line to disable the specific warning
|
# Add this line to disable the specific warning
|
||||||
add_compile_options(-Wno-missing-field-initializers)
|
add_compile_options(-Wno-missing-field-initializers)
|
||||||
|
|||||||
@ -230,6 +230,13 @@ config USE_WECHAT_MESSAGE_STYLE
|
|||||||
help
|
help
|
||||||
使用微信聊天界面风格
|
使用微信聊天界面风格
|
||||||
|
|
||||||
|
config USE_WAKE_WORD_DETECT
|
||||||
|
bool "启用唤醒词检测"
|
||||||
|
default y
|
||||||
|
depends on IDF_TARGET_ESP32S3 && SPIRAM
|
||||||
|
help
|
||||||
|
需要 ESP32 S3 与 AFE 支持
|
||||||
|
|
||||||
config USE_AUDIO_PROCESSOR
|
config USE_AUDIO_PROCESSOR
|
||||||
bool "启用音频降噪、增益处理"
|
bool "启用音频降噪、增益处理"
|
||||||
default y
|
default y
|
||||||
@ -237,10 +244,11 @@ config USE_AUDIO_PROCESSOR
|
|||||||
help
|
help
|
||||||
需要 ESP32 S3 与 AFE 支持
|
需要 ESP32 S3 与 AFE 支持
|
||||||
|
|
||||||
config USE_WAKE_WORD_DETECT
|
config USE_REALTIME_CHAT
|
||||||
bool "启用唤醒词检测"
|
bool "启用可语音打断的实时对话模式(需要 AEC 支持)"
|
||||||
default y
|
default n
|
||||||
depends on IDF_TARGET_ESP32S3 && SPIRAM
|
depends on USE_AUDIO_PROCESSOR && (BOARD_TYPE_ESP_BOX_3 || BOARD_TYPE_ESP_BOX || BOARD_TYPE_LICHUANG_DEV)
|
||||||
help
|
help
|
||||||
需要 ESP32 S3 与 AFE 支持
|
需要 ESP32 S3 与 AEC 开启,因为性能不够,不建议和微信聊天界面风格同时开启
|
||||||
|
|
||||||
endmenu
|
endmenu
|
||||||
|
|||||||
@ -203,6 +203,7 @@ void Application::Alert(const char* status, const char* message, const char* emo
|
|||||||
display->SetEmotion(emotion);
|
display->SetEmotion(emotion);
|
||||||
display->SetChatMessage("system", message);
|
display->SetChatMessage("system", message);
|
||||||
if (!sound.empty()) {
|
if (!sound.empty()) {
|
||||||
|
ResetDecoder();
|
||||||
PlaySound(sound);
|
PlaySound(sound);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -217,9 +218,8 @@ void Application::DismissAlert() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void Application::PlaySound(const std::string_view& sound) {
|
void Application::PlaySound(const std::string_view& sound) {
|
||||||
auto codec = Board::GetInstance().GetAudioCodec();
|
// The assets are encoded at 16000Hz, 60ms frame duration
|
||||||
codec->EnableOutput(true);
|
SetDecodeSampleRate(16000, 60);
|
||||||
SetDecodeSampleRate(16000);
|
|
||||||
const char* data = sound.data();
|
const char* data = sound.data();
|
||||||
size_t size = sound.size();
|
size_t size = sound.size();
|
||||||
for (const char* p = data; p < data + size; ) {
|
for (const char* p = data; p < data + size; ) {
|
||||||
@ -255,9 +255,7 @@ void Application::ToggleChatState() {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
keep_listening_ = true;
|
SetListeningMode(realtime_chat_enabled_ ? kListeningModeRealtime : kListeningModeAutoStop);
|
||||||
protocol_->SendStartListening(kListeningModeAutoStop);
|
|
||||||
SetDeviceState(kDeviceStateListening);
|
|
||||||
});
|
});
|
||||||
} else if (device_state_ == kDeviceStateSpeaking) {
|
} else if (device_state_ == kDeviceStateSpeaking) {
|
||||||
Schedule([this]() {
|
Schedule([this]() {
|
||||||
@ -281,7 +279,6 @@ void Application::StartListening() {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
keep_listening_ = false;
|
|
||||||
if (device_state_ == kDeviceStateIdle) {
|
if (device_state_ == kDeviceStateIdle) {
|
||||||
Schedule([this]() {
|
Schedule([this]() {
|
||||||
if (!protocol_->IsAudioChannelOpened()) {
|
if (!protocol_->IsAudioChannelOpened()) {
|
||||||
@ -290,14 +287,13 @@ void Application::StartListening() {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
protocol_->SendStartListening(kListeningModeManualStop);
|
|
||||||
SetDeviceState(kDeviceStateListening);
|
SetListeningMode(kListeningModeManualStop);
|
||||||
});
|
});
|
||||||
} else if (device_state_ == kDeviceStateSpeaking) {
|
} else if (device_state_ == kDeviceStateSpeaking) {
|
||||||
Schedule([this]() {
|
Schedule([this]() {
|
||||||
AbortSpeaking(kAbortReasonNone);
|
AbortSpeaking(kAbortReasonNone);
|
||||||
protocol_->SendStartListening(kListeningModeManualStop);
|
SetListeningMode(kListeningModeManualStop);
|
||||||
SetDeviceState(kDeviceStateListening);
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -320,12 +316,12 @@ void Application::Start() {
|
|||||||
|
|
||||||
/* Setup the audio codec */
|
/* Setup the audio codec */
|
||||||
auto codec = board.GetAudioCodec();
|
auto codec = board.GetAudioCodec();
|
||||||
opus_decode_sample_rate_ = codec->output_sample_rate();
|
opus_decoder_ = std::make_unique<OpusDecoderWrapper>(codec->output_sample_rate(), 1, OPUS_FRAME_DURATION_MS);
|
||||||
opus_decoder_ = std::make_unique<OpusDecoderWrapper>(opus_decode_sample_rate_, 1);
|
|
||||||
opus_encoder_ = std::make_unique<OpusEncoderWrapper>(16000, 1, OPUS_FRAME_DURATION_MS);
|
opus_encoder_ = std::make_unique<OpusEncoderWrapper>(16000, 1, OPUS_FRAME_DURATION_MS);
|
||||||
// For ML307 boards, we use complexity 5 to save bandwidth
|
if (realtime_chat_enabled_) {
|
||||||
// For other boards, we use complexity 3 to save CPU
|
ESP_LOGI(TAG, "Realtime chat enabled, setting opus encoder complexity to 0");
|
||||||
if (board.GetBoardType() == "ml307") {
|
opus_encoder_->SetComplexity(0);
|
||||||
|
} else if (board.GetBoardType() == "ml307") {
|
||||||
ESP_LOGI(TAG, "ML307 board detected, setting opus encoder complexity to 5");
|
ESP_LOGI(TAG, "ML307 board detected, setting opus encoder complexity to 5");
|
||||||
opus_encoder_->SetComplexity(5);
|
opus_encoder_->SetComplexity(5);
|
||||||
} else {
|
} else {
|
||||||
@ -337,24 +333,20 @@ void Application::Start() {
|
|||||||
input_resampler_.Configure(codec->input_sample_rate(), 16000);
|
input_resampler_.Configure(codec->input_sample_rate(), 16000);
|
||||||
reference_resampler_.Configure(codec->input_sample_rate(), 16000);
|
reference_resampler_.Configure(codec->input_sample_rate(), 16000);
|
||||||
}
|
}
|
||||||
codec->OnInputReady([this, codec]() {
|
|
||||||
BaseType_t higher_priority_task_woken = pdFALSE;
|
|
||||||
xEventGroupSetBitsFromISR(event_group_, AUDIO_INPUT_READY_EVENT, &higher_priority_task_woken);
|
|
||||||
return higher_priority_task_woken == pdTRUE;
|
|
||||||
});
|
|
||||||
codec->OnOutputReady([this]() {
|
|
||||||
BaseType_t higher_priority_task_woken = pdFALSE;
|
|
||||||
xEventGroupSetBitsFromISR(event_group_, AUDIO_OUTPUT_READY_EVENT, &higher_priority_task_woken);
|
|
||||||
return higher_priority_task_woken == pdTRUE;
|
|
||||||
});
|
|
||||||
codec->Start();
|
codec->Start();
|
||||||
|
|
||||||
|
xTaskCreatePinnedToCore([](void* arg) {
|
||||||
|
Application* app = (Application*)arg;
|
||||||
|
app->AudioLoop();
|
||||||
|
vTaskDelete(NULL);
|
||||||
|
}, "audio_loop", 4096 * 2, this, 8, &audio_loop_task_handle_, realtime_chat_enabled_ ? 1 : 0);
|
||||||
|
|
||||||
/* Start the main loop */
|
/* Start the main loop */
|
||||||
xTaskCreate([](void* arg) {
|
xTaskCreatePinnedToCore([](void* arg) {
|
||||||
Application* app = (Application*)arg;
|
Application* app = (Application*)arg;
|
||||||
app->MainLoop();
|
app->MainLoop();
|
||||||
vTaskDelete(NULL);
|
vTaskDelete(NULL);
|
||||||
}, "main_loop", 4096 * 2, this, 4, nullptr);
|
}, "main_loop", 4096, this, 4, &main_loop_task_handle_, 0);
|
||||||
|
|
||||||
/* Wait for the network to be ready */
|
/* Wait for the network to be ready */
|
||||||
board.StartNetwork();
|
board.StartNetwork();
|
||||||
@ -372,9 +364,7 @@ void Application::Start() {
|
|||||||
});
|
});
|
||||||
protocol_->OnIncomingAudio([this](std::vector<uint8_t>&& data) {
|
protocol_->OnIncomingAudio([this](std::vector<uint8_t>&& data) {
|
||||||
std::lock_guard<std::mutex> lock(mutex_);
|
std::lock_guard<std::mutex> lock(mutex_);
|
||||||
if (device_state_ == kDeviceStateSpeaking) {
|
audio_decode_queue_.emplace_back(std::move(data));
|
||||||
audio_decode_queue_.emplace_back(std::move(data));
|
|
||||||
}
|
|
||||||
});
|
});
|
||||||
protocol_->OnAudioChannelOpened([this, codec, &board]() {
|
protocol_->OnAudioChannelOpened([this, codec, &board]() {
|
||||||
board.SetPowerSaveMode(false);
|
board.SetPowerSaveMode(false);
|
||||||
@ -382,7 +372,7 @@ void Application::Start() {
|
|||||||
ESP_LOGW(TAG, "Server sample rate %d does not match device output sample rate %d, resampling may cause distortion",
|
ESP_LOGW(TAG, "Server sample rate %d does not match device output sample rate %d, resampling may cause distortion",
|
||||||
protocol_->server_sample_rate(), codec->output_sample_rate());
|
protocol_->server_sample_rate(), codec->output_sample_rate());
|
||||||
}
|
}
|
||||||
SetDecodeSampleRate(protocol_->server_sample_rate());
|
SetDecodeSampleRate(protocol_->server_sample_rate(), protocol_->server_frame_duration());
|
||||||
auto& thing_manager = iot::ThingManager::GetInstance();
|
auto& thing_manager = iot::ThingManager::GetInstance();
|
||||||
protocol_->SendIotDescriptors(thing_manager.GetDescriptorsJson());
|
protocol_->SendIotDescriptors(thing_manager.GetDescriptorsJson());
|
||||||
std::string states;
|
std::string states;
|
||||||
@ -412,13 +402,12 @@ void Application::Start() {
|
|||||||
});
|
});
|
||||||
} else if (strcmp(state->valuestring, "stop") == 0) {
|
} else if (strcmp(state->valuestring, "stop") == 0) {
|
||||||
Schedule([this]() {
|
Schedule([this]() {
|
||||||
|
background_task_->WaitForCompletion();
|
||||||
if (device_state_ == kDeviceStateSpeaking) {
|
if (device_state_ == kDeviceStateSpeaking) {
|
||||||
background_task_->WaitForCompletion();
|
if (listening_mode_ == kListeningModeManualStop) {
|
||||||
if (keep_listening_) {
|
|
||||||
protocol_->SendStartListening(kListeningModeAutoStop);
|
|
||||||
SetDeviceState(kDeviceStateListening);
|
|
||||||
} else {
|
|
||||||
SetDeviceState(kDeviceStateIdle);
|
SetDeviceState(kDeviceStateIdle);
|
||||||
|
} else {
|
||||||
|
SetDeviceState(kDeviceStateListening);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
@ -474,7 +463,7 @@ void Application::Start() {
|
|||||||
}, "check_new_version", 4096 * 2, this, 2, nullptr);
|
}, "check_new_version", 4096 * 2, this, 2, nullptr);
|
||||||
|
|
||||||
#if CONFIG_USE_AUDIO_PROCESSOR
|
#if CONFIG_USE_AUDIO_PROCESSOR
|
||||||
audio_processor_.Initialize(codec->input_channels(), codec->input_reference());
|
audio_processor_.Initialize(codec, realtime_chat_enabled_);
|
||||||
audio_processor_.OnOutput([this](std::vector<int16_t>&& data) {
|
audio_processor_.OnOutput([this](std::vector<int16_t>&& data) {
|
||||||
background_task_->Schedule([this, data = std::move(data)]() mutable {
|
background_task_->Schedule([this, data = std::move(data)]() mutable {
|
||||||
opus_encoder_->Encode(std::move(data), [this](std::vector<uint8_t>&& opus) {
|
opus_encoder_->Encode(std::move(data), [this](std::vector<uint8_t>&& opus) {
|
||||||
@ -500,7 +489,7 @@ void Application::Start() {
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if CONFIG_USE_WAKE_WORD_DETECT
|
#if CONFIG_USE_WAKE_WORD_DETECT
|
||||||
wake_word_detect_.Initialize(codec->input_channels(), codec->input_reference());
|
wake_word_detect_.Initialize(codec);
|
||||||
wake_word_detect_.OnWakeWordDetected([this](const std::string& wake_word) {
|
wake_word_detect_.OnWakeWordDetected([this](const std::string& wake_word) {
|
||||||
Schedule([this, &wake_word]() {
|
Schedule([this, &wake_word]() {
|
||||||
if (device_state_ == kDeviceStateIdle) {
|
if (device_state_ == kDeviceStateIdle) {
|
||||||
@ -520,8 +509,7 @@ void Application::Start() {
|
|||||||
// Set the chat state to wake word detected
|
// Set the chat state to wake word detected
|
||||||
protocol_->SendWakeWordDetected(wake_word);
|
protocol_->SendWakeWordDetected(wake_word);
|
||||||
ESP_LOGI(TAG, "Wake word detected: %s", wake_word.c_str());
|
ESP_LOGI(TAG, "Wake word detected: %s", wake_word.c_str());
|
||||||
keep_listening_ = true;
|
SetListeningMode(realtime_chat_enabled_ ? kListeningModeRealtime : kListeningModeAutoStop);
|
||||||
SetDeviceState(kDeviceStateIdle);
|
|
||||||
} else if (device_state_ == kDeviceStateSpeaking) {
|
} else if (device_state_ == kDeviceStateSpeaking) {
|
||||||
AbortSpeaking(kAbortReasonWakeWordDetected);
|
AbortSpeaking(kAbortReasonWakeWordDetected);
|
||||||
} else if (device_state_ == kDeviceStateActivating) {
|
} else if (device_state_ == kDeviceStateActivating) {
|
||||||
@ -534,6 +522,13 @@ void Application::Start() {
|
|||||||
|
|
||||||
SetDeviceState(kDeviceStateIdle);
|
SetDeviceState(kDeviceStateIdle);
|
||||||
esp_timer_start_periodic(clock_timer_handle_, 1000000);
|
esp_timer_start_periodic(clock_timer_handle_, 1000000);
|
||||||
|
|
||||||
|
#if 0
|
||||||
|
while (true) {
|
||||||
|
SystemInfo::PrintRealTimeStats(pdMS_TO_TICKS(1000));
|
||||||
|
vTaskDelay(pdMS_TO_TICKS(10000));
|
||||||
|
}
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
void Application::OnClockTimer() {
|
void Application::OnClockTimer() {
|
||||||
@ -541,7 +536,6 @@ void Application::OnClockTimer() {
|
|||||||
|
|
||||||
// Print the debug info every 10 seconds
|
// Print the debug info every 10 seconds
|
||||||
if (clock_ticks_ % 10 == 0) {
|
if (clock_ticks_ % 10 == 0) {
|
||||||
// SystemInfo::PrintRealTimeStats(pdMS_TO_TICKS(1000));
|
|
||||||
int free_sram = heap_caps_get_free_size(MALLOC_CAP_INTERNAL);
|
int free_sram = heap_caps_get_free_size(MALLOC_CAP_INTERNAL);
|
||||||
int min_free_sram = heap_caps_get_minimum_free_size(MALLOC_CAP_INTERNAL);
|
int min_free_sram = heap_caps_get_minimum_free_size(MALLOC_CAP_INTERNAL);
|
||||||
ESP_LOGI(TAG, "Free internal: %u minimal internal: %u", free_sram, min_free_sram);
|
ESP_LOGI(TAG, "Free internal: %u minimal internal: %u", free_sram, min_free_sram);
|
||||||
@ -561,6 +555,7 @@ void Application::OnClockTimer() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Add a async task to MainLoop
|
||||||
void Application::Schedule(std::function<void()> callback) {
|
void Application::Schedule(std::function<void()> callback) {
|
||||||
{
|
{
|
||||||
std::lock_guard<std::mutex> lock(mutex_);
|
std::lock_guard<std::mutex> lock(mutex_);
|
||||||
@ -574,16 +569,8 @@ void Application::Schedule(std::function<void()> callback) {
|
|||||||
// they should use Schedule to call this function
|
// they should use Schedule to call this function
|
||||||
void Application::MainLoop() {
|
void Application::MainLoop() {
|
||||||
while (true) {
|
while (true) {
|
||||||
auto bits = xEventGroupWaitBits(event_group_,
|
auto bits = xEventGroupWaitBits(event_group_, SCHEDULE_EVENT, pdTRUE, pdFALSE, portMAX_DELAY);
|
||||||
SCHEDULE_EVENT | AUDIO_INPUT_READY_EVENT | AUDIO_OUTPUT_READY_EVENT,
|
|
||||||
pdTRUE, pdFALSE, portMAX_DELAY);
|
|
||||||
|
|
||||||
if (bits & AUDIO_INPUT_READY_EVENT) {
|
|
||||||
InputAudio();
|
|
||||||
}
|
|
||||||
if (bits & AUDIO_OUTPUT_READY_EVENT) {
|
|
||||||
OutputAudio();
|
|
||||||
}
|
|
||||||
if (bits & SCHEDULE_EVENT) {
|
if (bits & SCHEDULE_EVENT) {
|
||||||
std::unique_lock<std::mutex> lock(mutex_);
|
std::unique_lock<std::mutex> lock(mutex_);
|
||||||
std::list<std::function<void()>> tasks = std::move(main_tasks_);
|
std::list<std::function<void()>> tasks = std::move(main_tasks_);
|
||||||
@ -595,14 +582,18 @@ void Application::MainLoop() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Application::ResetDecoder() {
|
// The Audio Loop is used to input and output audio data
|
||||||
std::lock_guard<std::mutex> lock(mutex_);
|
void Application::AudioLoop() {
|
||||||
opus_decoder_->ResetState();
|
auto codec = Board::GetInstance().GetAudioCodec();
|
||||||
audio_decode_queue_.clear();
|
while (true) {
|
||||||
last_output_time_ = std::chrono::steady_clock::now();
|
OnAudioInput();
|
||||||
|
if (codec->output_enabled()) {
|
||||||
|
OnAudioOutput();
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Application::OutputAudio() {
|
void Application::OnAudioOutput() {
|
||||||
auto now = std::chrono::steady_clock::now();
|
auto now = std::chrono::steady_clock::now();
|
||||||
auto codec = Board::GetInstance().GetAudioCodec();
|
auto codec = Board::GetInstance().GetAudioCodec();
|
||||||
const int max_silence_seconds = 10;
|
const int max_silence_seconds = 10;
|
||||||
@ -624,7 +615,6 @@ void Application::OutputAudio() {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
last_output_time_ = now;
|
|
||||||
auto opus = std::move(audio_decode_queue_.front());
|
auto opus = std::move(audio_decode_queue_.front());
|
||||||
audio_decode_queue_.pop_front();
|
audio_decode_queue_.pop_front();
|
||||||
lock.unlock();
|
lock.unlock();
|
||||||
@ -638,27 +628,57 @@ void Application::OutputAudio() {
|
|||||||
if (!opus_decoder_->Decode(std::move(opus), pcm)) {
|
if (!opus_decoder_->Decode(std::move(opus), pcm)) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Resample if the sample rate is different
|
// Resample if the sample rate is different
|
||||||
if (opus_decode_sample_rate_ != codec->output_sample_rate()) {
|
if (opus_decoder_->sample_rate() != codec->output_sample_rate()) {
|
||||||
int target_size = output_resampler_.GetOutputSamples(pcm.size());
|
int target_size = output_resampler_.GetOutputSamples(pcm.size());
|
||||||
std::vector<int16_t> resampled(target_size);
|
std::vector<int16_t> resampled(target_size);
|
||||||
output_resampler_.Process(pcm.data(), pcm.size(), resampled.data());
|
output_resampler_.Process(pcm.data(), pcm.size(), resampled.data());
|
||||||
pcm = std::move(resampled);
|
pcm = std::move(resampled);
|
||||||
}
|
}
|
||||||
|
|
||||||
codec->OutputData(pcm);
|
codec->OutputData(pcm);
|
||||||
|
last_output_time_ = std::chrono::steady_clock::now();
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
void Application::InputAudio() {
|
void Application::OnAudioInput() {
|
||||||
auto codec = Board::GetInstance().GetAudioCodec();
|
|
||||||
std::vector<int16_t> data;
|
std::vector<int16_t> data;
|
||||||
if (!codec->InputData(data)) {
|
|
||||||
|
#if CONFIG_USE_WAKE_WORD_DETECT
|
||||||
|
if (wake_word_detect_.IsDetectionRunning()) {
|
||||||
|
ReadAudio(data, 16000, wake_word_detect_.GetFeedSize());
|
||||||
|
wake_word_detect_.Feed(data);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
#if CONFIG_USE_AUDIO_PROCESSOR
|
||||||
|
if (audio_processor_.IsRunning()) {
|
||||||
|
ReadAudio(data, 16000, audio_processor_.GetFeedSize());
|
||||||
|
audio_processor_.Feed(data);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
if (device_state_ == kDeviceStateListening) {
|
||||||
|
ReadAudio(data, 16000, 30 * 16000 / 1000);
|
||||||
|
background_task_->Schedule([this, data = std::move(data)]() mutable {
|
||||||
|
opus_encoder_->Encode(std::move(data), [this](std::vector<uint8_t>&& opus) {
|
||||||
|
Schedule([this, opus = std::move(opus)]() {
|
||||||
|
protocol_->SendAudio(opus);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
vTaskDelay(pdMS_TO_TICKS(30));
|
||||||
|
}
|
||||||
|
|
||||||
if (codec->input_sample_rate() != 16000) {
|
void Application::ReadAudio(std::vector<int16_t>& data, int sample_rate, int samples) {
|
||||||
|
auto codec = Board::GetInstance().GetAudioCodec();
|
||||||
|
if (codec->input_sample_rate() != sample_rate) {
|
||||||
|
data.resize(samples * codec->input_sample_rate() / sample_rate);
|
||||||
|
if (!codec->InputData(data)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
if (codec->input_channels() == 2) {
|
if (codec->input_channels() == 2) {
|
||||||
auto mic_channel = std::vector<int16_t>(data.size() / 2);
|
auto mic_channel = std::vector<int16_t>(data.size() / 2);
|
||||||
auto reference_channel = std::vector<int16_t>(data.size() / 2);
|
auto reference_channel = std::vector<int16_t>(data.size() / 2);
|
||||||
@ -680,28 +700,12 @@ void Application::InputAudio() {
|
|||||||
input_resampler_.Process(data.data(), data.size(), resampled.data());
|
input_resampler_.Process(data.data(), data.size(), resampled.data());
|
||||||
data = std::move(resampled);
|
data = std::move(resampled);
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
data.resize(samples);
|
||||||
|
if (!codec->InputData(data)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#if CONFIG_USE_WAKE_WORD_DETECT
|
|
||||||
if (wake_word_detect_.IsDetectionRunning()) {
|
|
||||||
wake_word_detect_.Feed(data);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
#if CONFIG_USE_AUDIO_PROCESSOR
|
|
||||||
if (audio_processor_.IsRunning()) {
|
|
||||||
audio_processor_.Input(data);
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
if (device_state_ == kDeviceStateListening) {
|
|
||||||
background_task_->Schedule([this, data = std::move(data)]() mutable {
|
|
||||||
opus_encoder_->Encode(std::move(data), [this](std::vector<uint8_t>&& opus) {
|
|
||||||
Schedule([this, opus = std::move(opus)]() {
|
|
||||||
protocol_->SendAudio(opus);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void Application::AbortSpeaking(AbortReason reason) {
|
void Application::AbortSpeaking(AbortReason reason) {
|
||||||
@ -710,6 +714,11 @@ void Application::AbortSpeaking(AbortReason reason) {
|
|||||||
protocol_->SendAbortSpeaking(reason);
|
protocol_->SendAbortSpeaking(reason);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Application::SetListeningMode(ListeningMode mode) {
|
||||||
|
listening_mode_ = mode;
|
||||||
|
SetDeviceState(kDeviceStateListening);
|
||||||
|
}
|
||||||
|
|
||||||
void Application::SetDeviceState(DeviceState state) {
|
void Application::SetDeviceState(DeviceState state) {
|
||||||
if (device_state_ == state) {
|
if (device_state_ == state) {
|
||||||
return;
|
return;
|
||||||
@ -723,7 +732,6 @@ void Application::SetDeviceState(DeviceState state) {
|
|||||||
background_task_->WaitForCompletion();
|
background_task_->WaitForCompletion();
|
||||||
|
|
||||||
auto& board = Board::GetInstance();
|
auto& board = Board::GetInstance();
|
||||||
auto codec = board.GetAudioCodec();
|
|
||||||
auto display = board.GetDisplay();
|
auto display = board.GetDisplay();
|
||||||
auto led = board.GetLed();
|
auto led = board.GetLed();
|
||||||
led->OnStateChanged();
|
led->OnStateChanged();
|
||||||
@ -747,30 +755,43 @@ void Application::SetDeviceState(DeviceState state) {
|
|||||||
case kDeviceStateListening:
|
case kDeviceStateListening:
|
||||||
display->SetStatus(Lang::Strings::LISTENING);
|
display->SetStatus(Lang::Strings::LISTENING);
|
||||||
display->SetEmotion("neutral");
|
display->SetEmotion("neutral");
|
||||||
ResetDecoder();
|
|
||||||
opus_encoder_->ResetState();
|
// Update the IoT states before sending the start listening command
|
||||||
#if CONFIG_USE_AUDIO_PROCESSOR
|
|
||||||
audio_processor_.Start();
|
|
||||||
#endif
|
|
||||||
#if CONFIG_USE_WAKE_WORD_DETECT
|
|
||||||
wake_word_detect_.StopDetection();
|
|
||||||
#endif
|
|
||||||
UpdateIotStates();
|
UpdateIotStates();
|
||||||
if (previous_state == kDeviceStateSpeaking) {
|
|
||||||
// FIXME: Wait for the speaker to empty the buffer
|
// Make sure the audio processor is running
|
||||||
vTaskDelay(pdMS_TO_TICKS(120));
|
#if CONFIG_USE_AUDIO_PROCESSOR
|
||||||
|
if (!audio_processor_.IsRunning()) {
|
||||||
|
#else
|
||||||
|
if (true) {
|
||||||
|
#endif
|
||||||
|
// Send the start listening command
|
||||||
|
protocol_->SendStartListening(listening_mode_);
|
||||||
|
if (listening_mode_ == kListeningModeAutoStop && previous_state == kDeviceStateSpeaking) {
|
||||||
|
// FIXME: Wait for the speaker to empty the buffer
|
||||||
|
vTaskDelay(pdMS_TO_TICKS(120));
|
||||||
|
}
|
||||||
|
opus_encoder_->ResetState();
|
||||||
|
#if CONFIG_USE_WAKE_WORD_DETECT
|
||||||
|
wake_word_detect_.StopDetection();
|
||||||
|
#endif
|
||||||
|
#if CONFIG_USE_AUDIO_PROCESSOR
|
||||||
|
audio_processor_.Start();
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case kDeviceStateSpeaking:
|
case kDeviceStateSpeaking:
|
||||||
display->SetStatus(Lang::Strings::SPEAKING);
|
display->SetStatus(Lang::Strings::SPEAKING);
|
||||||
ResetDecoder();
|
|
||||||
codec->EnableOutput(true);
|
if (listening_mode_ != kListeningModeRealtime) {
|
||||||
#if CONFIG_USE_AUDIO_PROCESSOR
|
#if CONFIG_USE_AUDIO_PROCESSOR
|
||||||
audio_processor_.Stop();
|
audio_processor_.Stop();
|
||||||
#endif
|
#endif
|
||||||
#if CONFIG_USE_WAKE_WORD_DETECT
|
#if CONFIG_USE_WAKE_WORD_DETECT
|
||||||
wake_word_detect_.StartDetection();
|
wake_word_detect_.StartDetection();
|
||||||
#endif
|
#endif
|
||||||
|
}
|
||||||
|
ResetDecoder();
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
// Do nothing
|
// Do nothing
|
||||||
@ -778,19 +799,28 @@ void Application::SetDeviceState(DeviceState state) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Application::SetDecodeSampleRate(int sample_rate) {
|
void Application::ResetDecoder() {
|
||||||
if (opus_decode_sample_rate_ == sample_rate) {
|
std::lock_guard<std::mutex> lock(mutex_);
|
||||||
|
opus_decoder_->ResetState();
|
||||||
|
audio_decode_queue_.clear();
|
||||||
|
last_output_time_ = std::chrono::steady_clock::now();
|
||||||
|
|
||||||
|
auto codec = Board::GetInstance().GetAudioCodec();
|
||||||
|
codec->EnableOutput(true);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Application::SetDecodeSampleRate(int sample_rate, int frame_duration) {
|
||||||
|
if (opus_decoder_->sample_rate() == sample_rate && opus_decoder_->duration_ms() == frame_duration) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
opus_decode_sample_rate_ = sample_rate;
|
|
||||||
opus_decoder_.reset();
|
opus_decoder_.reset();
|
||||||
opus_decoder_ = std::make_unique<OpusDecoderWrapper>(opus_decode_sample_rate_, 1);
|
opus_decoder_ = std::make_unique<OpusDecoderWrapper>(sample_rate, 1, frame_duration);
|
||||||
|
|
||||||
auto codec = Board::GetInstance().GetAudioCodec();
|
auto codec = Board::GetInstance().GetAudioCodec();
|
||||||
if (opus_decode_sample_rate_ != codec->output_sample_rate()) {
|
if (opus_decoder_->sample_rate() != codec->output_sample_rate()) {
|
||||||
ESP_LOGI(TAG, "Resampling audio from %d to %d", opus_decode_sample_rate_, codec->output_sample_rate());
|
ESP_LOGI(TAG, "Resampling audio from %d to %d", opus_decoder_->sample_rate(), codec->output_sample_rate());
|
||||||
output_resampler_.Configure(opus_decode_sample_rate_, codec->output_sample_rate());
|
output_resampler_.Configure(opus_decoder_->sample_rate(), codec->output_sample_rate());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -88,12 +88,20 @@ private:
|
|||||||
EventGroupHandle_t event_group_ = nullptr;
|
EventGroupHandle_t event_group_ = nullptr;
|
||||||
esp_timer_handle_t clock_timer_handle_ = nullptr;
|
esp_timer_handle_t clock_timer_handle_ = nullptr;
|
||||||
volatile DeviceState device_state_ = kDeviceStateUnknown;
|
volatile DeviceState device_state_ = kDeviceStateUnknown;
|
||||||
bool keep_listening_ = false;
|
ListeningMode listening_mode_ = kListeningModeAutoStop;
|
||||||
|
#if CONFIG_USE_REALTIME_CHAT
|
||||||
|
bool realtime_chat_enabled_ = true;
|
||||||
|
#else
|
||||||
|
bool realtime_chat_enabled_ = false;
|
||||||
|
#endif
|
||||||
bool aborted_ = false;
|
bool aborted_ = false;
|
||||||
bool voice_detected_ = false;
|
bool voice_detected_ = false;
|
||||||
int clock_ticks_ = 0;
|
int clock_ticks_ = 0;
|
||||||
|
TaskHandle_t main_loop_task_handle_ = nullptr;
|
||||||
|
TaskHandle_t check_new_version_task_handle_ = nullptr;
|
||||||
|
|
||||||
// Audio encode / decode
|
// Audio encode / decode
|
||||||
|
TaskHandle_t audio_loop_task_handle_ = nullptr;
|
||||||
BackgroundTask* background_task_ = nullptr;
|
BackgroundTask* background_task_ = nullptr;
|
||||||
std::chrono::steady_clock::time_point last_output_time_;
|
std::chrono::steady_clock::time_point last_output_time_;
|
||||||
std::list<std::vector<uint8_t>> audio_decode_queue_;
|
std::list<std::vector<uint8_t>> audio_decode_queue_;
|
||||||
@ -101,19 +109,21 @@ private:
|
|||||||
std::unique_ptr<OpusEncoderWrapper> opus_encoder_;
|
std::unique_ptr<OpusEncoderWrapper> opus_encoder_;
|
||||||
std::unique_ptr<OpusDecoderWrapper> opus_decoder_;
|
std::unique_ptr<OpusDecoderWrapper> opus_decoder_;
|
||||||
|
|
||||||
int opus_decode_sample_rate_ = -1;
|
|
||||||
OpusResampler input_resampler_;
|
OpusResampler input_resampler_;
|
||||||
OpusResampler reference_resampler_;
|
OpusResampler reference_resampler_;
|
||||||
OpusResampler output_resampler_;
|
OpusResampler output_resampler_;
|
||||||
|
|
||||||
void MainLoop();
|
void MainLoop();
|
||||||
void InputAudio();
|
void OnAudioInput();
|
||||||
void OutputAudio();
|
void OnAudioOutput();
|
||||||
|
void ReadAudio(std::vector<int16_t>& data, int sample_rate, int samples);
|
||||||
void ResetDecoder();
|
void ResetDecoder();
|
||||||
void SetDecodeSampleRate(int sample_rate);
|
void SetDecodeSampleRate(int sample_rate, int frame_duration);
|
||||||
void CheckNewVersion();
|
void CheckNewVersion();
|
||||||
void ShowActivationCode();
|
void ShowActivationCode();
|
||||||
void OnClockTimer();
|
void OnClockTimer();
|
||||||
|
void SetListeningMode(ListeningMode mode);
|
||||||
|
void AudioLoop();
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // _APPLICATION_H_
|
#endif // _APPLICATION_H_
|
||||||
|
|||||||
@ -14,23 +14,11 @@ AudioCodec::AudioCodec() {
|
|||||||
AudioCodec::~AudioCodec() {
|
AudioCodec::~AudioCodec() {
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioCodec::OnInputReady(std::function<bool()> callback) {
|
|
||||||
on_input_ready_ = callback;
|
|
||||||
}
|
|
||||||
|
|
||||||
void AudioCodec::OnOutputReady(std::function<bool()> callback) {
|
|
||||||
on_output_ready_ = callback;
|
|
||||||
}
|
|
||||||
|
|
||||||
void AudioCodec::OutputData(std::vector<int16_t>& data) {
|
void AudioCodec::OutputData(std::vector<int16_t>& data) {
|
||||||
Write(data.data(), data.size());
|
Write(data.data(), data.size());
|
||||||
}
|
}
|
||||||
|
|
||||||
bool AudioCodec::InputData(std::vector<int16_t>& data) {
|
bool AudioCodec::InputData(std::vector<int16_t>& data) {
|
||||||
int duration = 30;
|
|
||||||
int input_frame_size = input_sample_rate_ / 1000 * duration * input_channels_;
|
|
||||||
|
|
||||||
data.resize(input_frame_size);
|
|
||||||
int samples = Read(data.data(), data.size());
|
int samples = Read(data.data(), data.size());
|
||||||
if (samples > 0) {
|
if (samples > 0) {
|
||||||
return true;
|
return true;
|
||||||
@ -38,22 +26,6 @@ bool AudioCodec::InputData(std::vector<int16_t>& data) {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
IRAM_ATTR bool AudioCodec::on_sent(i2s_chan_handle_t handle, i2s_event_data_t *event, void *user_ctx) {
|
|
||||||
auto audio_codec = (AudioCodec*)user_ctx;
|
|
||||||
if (audio_codec->output_enabled_ && audio_codec->on_output_ready_) {
|
|
||||||
return audio_codec->on_output_ready_();
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
IRAM_ATTR bool AudioCodec::on_recv(i2s_chan_handle_t handle, i2s_event_data_t *event, void *user_ctx) {
|
|
||||||
auto audio_codec = (AudioCodec*)user_ctx;
|
|
||||||
if (audio_codec->input_enabled_ && audio_codec->on_input_ready_) {
|
|
||||||
return audio_codec->on_input_ready_();
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
void AudioCodec::Start() {
|
void AudioCodec::Start() {
|
||||||
Settings settings("audio", false);
|
Settings settings("audio", false);
|
||||||
output_volume_ = settings.GetInt("output_volume", output_volume_);
|
output_volume_ = settings.GetInt("output_volume", output_volume_);
|
||||||
@ -62,15 +34,6 @@ void AudioCodec::Start() {
|
|||||||
output_volume_ = 10;
|
output_volume_ = 10;
|
||||||
}
|
}
|
||||||
|
|
||||||
// 注册音频数据回调
|
|
||||||
i2s_event_callbacks_t rx_callbacks = {};
|
|
||||||
rx_callbacks.on_recv = on_recv;
|
|
||||||
i2s_channel_register_event_callback(rx_handle_, &rx_callbacks, this);
|
|
||||||
|
|
||||||
i2s_event_callbacks_t tx_callbacks = {};
|
|
||||||
tx_callbacks.on_sent = on_sent;
|
|
||||||
i2s_channel_register_event_callback(tx_handle_, &tx_callbacks, this);
|
|
||||||
|
|
||||||
ESP_ERROR_CHECK(i2s_channel_enable(tx_handle_));
|
ESP_ERROR_CHECK(i2s_channel_enable(tx_handle_));
|
||||||
ESP_ERROR_CHECK(i2s_channel_enable(rx_handle_));
|
ESP_ERROR_CHECK(i2s_channel_enable(rx_handle_));
|
||||||
|
|
||||||
|
|||||||
@ -23,8 +23,6 @@ public:
|
|||||||
void Start();
|
void Start();
|
||||||
void OutputData(std::vector<int16_t>& data);
|
void OutputData(std::vector<int16_t>& data);
|
||||||
bool InputData(std::vector<int16_t>& data);
|
bool InputData(std::vector<int16_t>& data);
|
||||||
void OnOutputReady(std::function<bool()> callback);
|
|
||||||
void OnInputReady(std::function<bool()> callback);
|
|
||||||
|
|
||||||
inline bool duplex() const { return duplex_; }
|
inline bool duplex() const { return duplex_; }
|
||||||
inline bool input_reference() const { return input_reference_; }
|
inline bool input_reference() const { return input_reference_; }
|
||||||
@ -33,13 +31,8 @@ public:
|
|||||||
inline int input_channels() const { return input_channels_; }
|
inline int input_channels() const { return input_channels_; }
|
||||||
inline int output_channels() const { return output_channels_; }
|
inline int output_channels() const { return output_channels_; }
|
||||||
inline int output_volume() const { return output_volume_; }
|
inline int output_volume() const { return output_volume_; }
|
||||||
|
inline bool input_enabled() const { return input_enabled_; }
|
||||||
private:
|
inline bool output_enabled() const { return output_enabled_; }
|
||||||
std::function<bool()> on_input_ready_;
|
|
||||||
std::function<bool()> on_output_ready_;
|
|
||||||
|
|
||||||
IRAM_ATTR static bool on_recv(i2s_chan_handle_t handle, i2s_event_data_t *event, void *user_ctx);
|
|
||||||
IRAM_ATTR static bool on_sent(i2s_chan_handle_t handle, i2s_event_data_t *event, void *user_ctx);
|
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
i2s_chan_handle_t tx_handle_ = nullptr;
|
i2s_chan_handle_t tx_handle_ = nullptr;
|
||||||
|
|||||||
@ -10,31 +10,41 @@ AudioProcessor::AudioProcessor()
|
|||||||
event_group_ = xEventGroupCreate();
|
event_group_ = xEventGroupCreate();
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioProcessor::Initialize(int channels, bool reference) {
|
void AudioProcessor::Initialize(AudioCodec* codec, bool realtime_chat) {
|
||||||
channels_ = channels;
|
codec_ = codec;
|
||||||
reference_ = reference;
|
int ref_num = codec_->input_reference() ? 1 : 0;
|
||||||
int ref_num = reference_ ? 1 : 0;
|
|
||||||
|
|
||||||
std::string input_format;
|
std::string input_format;
|
||||||
for (int i = 0; i < channels_ - ref_num; i++) {
|
for (int i = 0; i < codec_->input_channels() - ref_num; i++) {
|
||||||
input_format.push_back('M');
|
input_format.push_back('M');
|
||||||
}
|
}
|
||||||
for (int i = 0; i < ref_num; i++) {
|
for (int i = 0; i < ref_num; i++) {
|
||||||
input_format.push_back('R');
|
input_format.push_back('R');
|
||||||
}
|
}
|
||||||
|
|
||||||
|
srmodel_list_t *models = esp_srmodel_init("model");
|
||||||
|
char* ns_model_name = esp_srmodel_filter(models, ESP_NSNET_PREFIX, NULL);
|
||||||
|
|
||||||
afe_config_t* afe_config = afe_config_init(input_format.c_str(), NULL, AFE_TYPE_VC, AFE_MODE_HIGH_PERF);
|
afe_config_t* afe_config = afe_config_init(input_format.c_str(), NULL, AFE_TYPE_VC, AFE_MODE_HIGH_PERF);
|
||||||
afe_config->aec_init = false;
|
if (realtime_chat) {
|
||||||
afe_config->aec_mode = AEC_MODE_VOIP_HIGH_PERF;
|
afe_config->aec_init = true;
|
||||||
|
afe_config->aec_mode = AEC_MODE_VOIP_LOW_COST;
|
||||||
|
} else {
|
||||||
|
afe_config->aec_init = false;
|
||||||
|
}
|
||||||
afe_config->ns_init = true;
|
afe_config->ns_init = true;
|
||||||
afe_config->vad_init = true;
|
afe_config->ns_model_name = ns_model_name;
|
||||||
afe_config->vad_mode = VAD_MODE_0;
|
afe_config->afe_ns_mode = AFE_NS_MODE_NET;
|
||||||
afe_config->vad_min_noise_ms = 100;
|
if (realtime_chat) {
|
||||||
|
afe_config->vad_init = false;
|
||||||
|
} else {
|
||||||
|
afe_config->vad_init = true;
|
||||||
|
afe_config->vad_mode = VAD_MODE_0;
|
||||||
|
afe_config->vad_min_noise_ms = 100;
|
||||||
|
}
|
||||||
afe_config->afe_perferred_core = 1;
|
afe_config->afe_perferred_core = 1;
|
||||||
afe_config->afe_perferred_priority = 1;
|
afe_config->afe_perferred_priority = 1;
|
||||||
afe_config->agc_init = true;
|
afe_config->agc_init = false;
|
||||||
afe_config->agc_mode = AFE_AGC_MODE_WEBRTC;
|
|
||||||
afe_config->agc_compression_gain_db = 10;
|
|
||||||
afe_config->memory_alloc_mode = AFE_MEMORY_ALLOC_MORE_PSRAM;
|
afe_config->memory_alloc_mode = AFE_MEMORY_ALLOC_MORE_PSRAM;
|
||||||
|
|
||||||
afe_iface_ = esp_afe_handle_from_config(afe_config);
|
afe_iface_ = esp_afe_handle_from_config(afe_config);
|
||||||
@ -54,15 +64,12 @@ AudioProcessor::~AudioProcessor() {
|
|||||||
vEventGroupDelete(event_group_);
|
vEventGroupDelete(event_group_);
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioProcessor::Input(const std::vector<int16_t>& data) {
|
size_t AudioProcessor::GetFeedSize() {
|
||||||
input_buffer_.insert(input_buffer_.end(), data.begin(), data.end());
|
return afe_iface_->get_feed_chunksize(afe_data_) * codec_->input_channels();
|
||||||
|
}
|
||||||
|
|
||||||
auto feed_size = afe_iface_->get_feed_chunksize(afe_data_) * channels_;
|
void AudioProcessor::Feed(const std::vector<int16_t>& data) {
|
||||||
while (input_buffer_.size() >= feed_size) {
|
afe_iface_->feed(afe_data_, data.data());
|
||||||
auto chunk = input_buffer_.data();
|
|
||||||
afe_iface_->feed(afe_data_, chunk);
|
|
||||||
input_buffer_.erase(input_buffer_.begin(), input_buffer_.begin() + feed_size);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioProcessor::Start() {
|
void AudioProcessor::Start() {
|
||||||
|
|||||||
@ -10,28 +10,29 @@
|
|||||||
#include <vector>
|
#include <vector>
|
||||||
#include <functional>
|
#include <functional>
|
||||||
|
|
||||||
|
#include "audio_codec.h"
|
||||||
|
|
||||||
class AudioProcessor {
|
class AudioProcessor {
|
||||||
public:
|
public:
|
||||||
AudioProcessor();
|
AudioProcessor();
|
||||||
~AudioProcessor();
|
~AudioProcessor();
|
||||||
|
|
||||||
void Initialize(int channels, bool reference);
|
void Initialize(AudioCodec* codec, bool realtime_chat);
|
||||||
void Input(const std::vector<int16_t>& data);
|
void Feed(const std::vector<int16_t>& data);
|
||||||
void Start();
|
void Start();
|
||||||
void Stop();
|
void Stop();
|
||||||
bool IsRunning();
|
bool IsRunning();
|
||||||
void OnOutput(std::function<void(std::vector<int16_t>&& data)> callback);
|
void OnOutput(std::function<void(std::vector<int16_t>&& data)> callback);
|
||||||
void OnVadStateChange(std::function<void(bool speaking)> callback);
|
void OnVadStateChange(std::function<void(bool speaking)> callback);
|
||||||
|
size_t GetFeedSize();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
EventGroupHandle_t event_group_ = nullptr;
|
EventGroupHandle_t event_group_ = nullptr;
|
||||||
esp_afe_sr_iface_t* afe_iface_ = nullptr;
|
esp_afe_sr_iface_t* afe_iface_ = nullptr;
|
||||||
esp_afe_sr_data_t* afe_data_ = nullptr;
|
esp_afe_sr_data_t* afe_data_ = nullptr;
|
||||||
std::vector<int16_t> input_buffer_;
|
|
||||||
std::function<void(std::vector<int16_t>&& data)> output_callback_;
|
std::function<void(std::vector<int16_t>&& data)> output_callback_;
|
||||||
std::function<void(bool speaking)> vad_state_change_callback_;
|
std::function<void(bool speaking)> vad_state_change_callback_;
|
||||||
int channels_;
|
AudioCodec* codec_ = nullptr;
|
||||||
bool reference_;
|
|
||||||
bool is_speaking_ = false;
|
bool is_speaking_ = false;
|
||||||
|
|
||||||
void AudioProcessorTask();
|
void AudioProcessorTask();
|
||||||
|
|||||||
@ -30,10 +30,9 @@ WakeWordDetect::~WakeWordDetect() {
|
|||||||
vEventGroupDelete(event_group_);
|
vEventGroupDelete(event_group_);
|
||||||
}
|
}
|
||||||
|
|
||||||
void WakeWordDetect::Initialize(int channels, bool reference) {
|
void WakeWordDetect::Initialize(AudioCodec* codec) {
|
||||||
channels_ = channels;
|
codec_ = codec;
|
||||||
reference_ = reference;
|
int ref_num = codec_->input_reference() ? 1 : 0;
|
||||||
int ref_num = reference_ ? 1 : 0;
|
|
||||||
|
|
||||||
srmodel_list_t *models = esp_srmodel_init("model");
|
srmodel_list_t *models = esp_srmodel_init("model");
|
||||||
for (int i = 0; i < models->num; i++) {
|
for (int i = 0; i < models->num; i++) {
|
||||||
@ -51,14 +50,14 @@ void WakeWordDetect::Initialize(int channels, bool reference) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
std::string input_format;
|
std::string input_format;
|
||||||
for (int i = 0; i < channels_ - ref_num; i++) {
|
for (int i = 0; i < codec_->input_channels() - ref_num; i++) {
|
||||||
input_format.push_back('M');
|
input_format.push_back('M');
|
||||||
}
|
}
|
||||||
for (int i = 0; i < ref_num; i++) {
|
for (int i = 0; i < ref_num; i++) {
|
||||||
input_format.push_back('R');
|
input_format.push_back('R');
|
||||||
}
|
}
|
||||||
afe_config_t* afe_config = afe_config_init(input_format.c_str(), models, AFE_TYPE_SR, AFE_MODE_HIGH_PERF);
|
afe_config_t* afe_config = afe_config_init(input_format.c_str(), models, AFE_TYPE_SR, AFE_MODE_HIGH_PERF);
|
||||||
afe_config->aec_init = reference_;
|
afe_config->aec_init = codec_->input_reference();
|
||||||
afe_config->aec_mode = AEC_MODE_SR_HIGH_PERF;
|
afe_config->aec_mode = AEC_MODE_SR_HIGH_PERF;
|
||||||
afe_config->afe_perferred_core = 1;
|
afe_config->afe_perferred_core = 1;
|
||||||
afe_config->afe_perferred_priority = 1;
|
afe_config->afe_perferred_priority = 1;
|
||||||
@ -92,13 +91,11 @@ bool WakeWordDetect::IsDetectionRunning() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void WakeWordDetect::Feed(const std::vector<int16_t>& data) {
|
void WakeWordDetect::Feed(const std::vector<int16_t>& data) {
|
||||||
input_buffer_.insert(input_buffer_.end(), data.begin(), data.end());
|
afe_iface_->feed(afe_data_, data.data());
|
||||||
|
}
|
||||||
|
|
||||||
auto feed_size = afe_iface_->get_feed_chunksize(afe_data_) * channels_;
|
size_t WakeWordDetect::GetFeedSize() {
|
||||||
while (input_buffer_.size() >= feed_size) {
|
return afe_iface_->get_feed_chunksize(afe_data_) * codec_->input_channels();
|
||||||
afe_iface_->feed(afe_data_, input_buffer_.data());
|
|
||||||
input_buffer_.erase(input_buffer_.begin(), input_buffer_.begin() + feed_size);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void WakeWordDetect::AudioDetectionTask() {
|
void WakeWordDetect::AudioDetectionTask() {
|
||||||
|
|||||||
@ -15,18 +15,20 @@
|
|||||||
#include <mutex>
|
#include <mutex>
|
||||||
#include <condition_variable>
|
#include <condition_variable>
|
||||||
|
|
||||||
|
#include "audio_codec.h"
|
||||||
|
|
||||||
class WakeWordDetect {
|
class WakeWordDetect {
|
||||||
public:
|
public:
|
||||||
WakeWordDetect();
|
WakeWordDetect();
|
||||||
~WakeWordDetect();
|
~WakeWordDetect();
|
||||||
|
|
||||||
void Initialize(int channels, bool reference);
|
void Initialize(AudioCodec* codec);
|
||||||
void Feed(const std::vector<int16_t>& data);
|
void Feed(const std::vector<int16_t>& data);
|
||||||
void OnWakeWordDetected(std::function<void(const std::string& wake_word)> callback);
|
void OnWakeWordDetected(std::function<void(const std::string& wake_word)> callback);
|
||||||
void StartDetection();
|
void StartDetection();
|
||||||
void StopDetection();
|
void StopDetection();
|
||||||
bool IsDetectionRunning();
|
bool IsDetectionRunning();
|
||||||
|
size_t GetFeedSize();
|
||||||
void EncodeWakeWordData();
|
void EncodeWakeWordData();
|
||||||
bool GetWakeWordOpus(std::vector<uint8_t>& opus);
|
bool GetWakeWordOpus(std::vector<uint8_t>& opus);
|
||||||
const std::string& GetLastDetectedWakeWord() const { return last_detected_wake_word_; }
|
const std::string& GetLastDetectedWakeWord() const { return last_detected_wake_word_; }
|
||||||
@ -36,11 +38,9 @@ private:
|
|||||||
esp_afe_sr_data_t* afe_data_ = nullptr;
|
esp_afe_sr_data_t* afe_data_ = nullptr;
|
||||||
char* wakenet_model_ = NULL;
|
char* wakenet_model_ = NULL;
|
||||||
std::vector<std::string> wake_words_;
|
std::vector<std::string> wake_words_;
|
||||||
std::vector<int16_t> input_buffer_;
|
|
||||||
EventGroupHandle_t event_group_;
|
EventGroupHandle_t event_group_;
|
||||||
std::function<void(const std::string& wake_word)> wake_word_detected_callback_;
|
std::function<void(const std::string& wake_word)> wake_word_detected_callback_;
|
||||||
int channels_;
|
AudioCodec* codec_ = nullptr;
|
||||||
bool reference_;
|
|
||||||
std::string last_detected_wake_word_;
|
std::string last_detected_wake_word_;
|
||||||
|
|
||||||
TaskHandle_t wake_word_encode_task_ = nullptr;
|
TaskHandle_t wake_word_encode_task_ = nullptr;
|
||||||
|
|||||||
@ -126,7 +126,11 @@ private:
|
|||||||
{
|
{
|
||||||
.text_font = &font_puhui_20_4,
|
.text_font = &font_puhui_20_4,
|
||||||
.icon_font = &font_awesome_20_4,
|
.icon_font = &font_awesome_20_4,
|
||||||
|
#if CONFIG_USE_WECHAT_MESSAGE_STYLE
|
||||||
|
.emoji_font = font_emoji_32_init(),
|
||||||
|
#else
|
||||||
.emoji_font = font_emoji_64_init(),
|
.emoji_font = font_emoji_64_init(),
|
||||||
|
#endif
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -3,7 +3,9 @@
|
|||||||
"builds": [
|
"builds": [
|
||||||
{
|
{
|
||||||
"name": "esp32-s3-touch-amoled-1.8",
|
"name": "esp32-s3-touch-amoled-1.8",
|
||||||
"sdkconfig_append": []
|
"sdkconfig_append": [
|
||||||
|
"CONFIG_USE_WECHAT_MESSAGE_STYLE=y"
|
||||||
|
]
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
@ -87,7 +87,11 @@ public:
|
|||||||
{
|
{
|
||||||
.text_font = &font_puhui_30_4,
|
.text_font = &font_puhui_30_4,
|
||||||
.icon_font = &font_awesome_30_4,
|
.icon_font = &font_awesome_30_4,
|
||||||
|
#if CONFIG_USE_WECHAT_MESSAGE_STYLE
|
||||||
|
.emoji_font = font_emoji_32_init(),
|
||||||
|
#else
|
||||||
.emoji_font = font_emoji_64_init(),
|
.emoji_font = font_emoji_64_init(),
|
||||||
|
#endif
|
||||||
}) {
|
}) {
|
||||||
DisplayLockGuard lock(this);
|
DisplayLockGuard lock(this);
|
||||||
lv_obj_set_style_pad_left(status_bar_, LV_HOR_RES * 0.1, 0);
|
lv_obj_set_style_pad_left(status_bar_, LV_HOR_RES * 0.1, 0);
|
||||||
|
|||||||
@ -103,6 +103,7 @@ SpiLcdDisplay::SpiLcdDisplay(esp_lcd_panel_io_handle_t panel_io, esp_lcd_panel_h
|
|||||||
ESP_LOGI(TAG, "Initialize LVGL port");
|
ESP_LOGI(TAG, "Initialize LVGL port");
|
||||||
lvgl_port_cfg_t port_cfg = ESP_LVGL_PORT_INIT_CONFIG();
|
lvgl_port_cfg_t port_cfg = ESP_LVGL_PORT_INIT_CONFIG();
|
||||||
port_cfg.task_priority = 1;
|
port_cfg.task_priority = 1;
|
||||||
|
port_cfg.timer_period_ms = 50;
|
||||||
lvgl_port_init(&port_cfg);
|
lvgl_port_init(&port_cfg);
|
||||||
|
|
||||||
ESP_LOGI(TAG, "Adding LCD screen");
|
ESP_LOGI(TAG, "Adding LCD screen");
|
||||||
@ -278,7 +279,7 @@ void LcdDisplay::SetupUI() {
|
|||||||
|
|
||||||
/* Status bar */
|
/* Status bar */
|
||||||
status_bar_ = lv_obj_create(container_);
|
status_bar_ = lv_obj_create(container_);
|
||||||
lv_obj_set_size(status_bar_, LV_HOR_RES, fonts_.emoji_font->line_height);
|
lv_obj_set_size(status_bar_, LV_HOR_RES, LV_SIZE_CONTENT);
|
||||||
lv_obj_set_style_radius(status_bar_, 0, 0);
|
lv_obj_set_style_radius(status_bar_, 0, 0);
|
||||||
lv_obj_set_style_bg_color(status_bar_, current_theme.background, 0);
|
lv_obj_set_style_bg_color(status_bar_, current_theme.background, 0);
|
||||||
lv_obj_set_style_text_color(status_bar_, current_theme.text, 0);
|
lv_obj_set_style_text_color(status_bar_, current_theme.text, 0);
|
||||||
@ -288,7 +289,7 @@ void LcdDisplay::SetupUI() {
|
|||||||
lv_obj_set_style_radius(content_, 0, 0);
|
lv_obj_set_style_radius(content_, 0, 0);
|
||||||
lv_obj_set_width(content_, LV_HOR_RES);
|
lv_obj_set_width(content_, LV_HOR_RES);
|
||||||
lv_obj_set_flex_grow(content_, 1);
|
lv_obj_set_flex_grow(content_, 1);
|
||||||
lv_obj_set_style_pad_all(content_, 5, 0);
|
lv_obj_set_style_pad_all(content_, 10, 0);
|
||||||
lv_obj_set_style_bg_color(content_, current_theme.chat_background, 0); // Background for chat area
|
lv_obj_set_style_bg_color(content_, current_theme.chat_background, 0); // Background for chat area
|
||||||
lv_obj_set_style_border_color(content_, current_theme.border, 0); // Border color for chat area
|
lv_obj_set_style_border_color(content_, current_theme.border, 0); // Border color for chat area
|
||||||
|
|
||||||
@ -309,8 +310,10 @@ void LcdDisplay::SetupUI() {
|
|||||||
lv_obj_set_style_pad_all(status_bar_, 0, 0);
|
lv_obj_set_style_pad_all(status_bar_, 0, 0);
|
||||||
lv_obj_set_style_border_width(status_bar_, 0, 0);
|
lv_obj_set_style_border_width(status_bar_, 0, 0);
|
||||||
lv_obj_set_style_pad_column(status_bar_, 0, 0);
|
lv_obj_set_style_pad_column(status_bar_, 0, 0);
|
||||||
lv_obj_set_style_pad_left(status_bar_, 2, 0);
|
lv_obj_set_style_pad_left(status_bar_, 10, 0);
|
||||||
lv_obj_set_style_pad_right(status_bar_, 2, 0);
|
lv_obj_set_style_pad_right(status_bar_, 10, 0);
|
||||||
|
lv_obj_set_style_pad_top(status_bar_, 2, 0);
|
||||||
|
lv_obj_set_style_pad_bottom(status_bar_, 2, 0);
|
||||||
lv_obj_set_scrollbar_mode(status_bar_, LV_SCROLLBAR_MODE_OFF);
|
lv_obj_set_scrollbar_mode(status_bar_, LV_SCROLLBAR_MODE_OFF);
|
||||||
// 设置状态栏的内容垂直居中
|
// 设置状态栏的内容垂直居中
|
||||||
lv_obj_set_flex_align(status_bar_, LV_FLEX_ALIGN_SPACE_BETWEEN, LV_FLEX_ALIGN_CENTER, LV_FLEX_ALIGN_CENTER);
|
lv_obj_set_flex_align(status_bar_, LV_FLEX_ALIGN_SPACE_BETWEEN, LV_FLEX_ALIGN_CENTER, LV_FLEX_ALIGN_CENTER);
|
||||||
@ -366,7 +369,7 @@ void LcdDisplay::SetupUI() {
|
|||||||
lv_obj_add_flag(low_battery_popup_, LV_OBJ_FLAG_HIDDEN);
|
lv_obj_add_flag(low_battery_popup_, LV_OBJ_FLAG_HIDDEN);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define MAX_MESSAGES 50
|
#define MAX_MESSAGES 20
|
||||||
void LcdDisplay::SetChatMessage(const char* role, const char* content) {
|
void LcdDisplay::SetChatMessage(const char* role, const char* content) {
|
||||||
DisplayLockGuard lock(this);
|
DisplayLockGuard lock(this);
|
||||||
if (content_ == nullptr) {
|
if (content_ == nullptr) {
|
||||||
@ -376,6 +379,21 @@ void LcdDisplay::SetChatMessage(const char* role, const char* content) {
|
|||||||
//避免出现空的消息框
|
//避免出现空的消息框
|
||||||
if(strlen(content) == 0) return;
|
if(strlen(content) == 0) return;
|
||||||
|
|
||||||
|
// 检查消息数量是否超过限制
|
||||||
|
uint32_t child_count = lv_obj_get_child_cnt(content_);
|
||||||
|
if (child_count >= MAX_MESSAGES) {
|
||||||
|
// 删除最早的消息(第一个子对象)
|
||||||
|
lv_obj_t* first_child = lv_obj_get_child(content_, 0);
|
||||||
|
lv_obj_t* last_child = lv_obj_get_child(content_, child_count - 1);
|
||||||
|
if (first_child != nullptr) {
|
||||||
|
lv_obj_del(first_child);
|
||||||
|
}
|
||||||
|
// Scroll to the last message immediately
|
||||||
|
if (last_child != nullptr) {
|
||||||
|
lv_obj_scroll_to_view_recursive(last_child, LV_ANIM_OFF);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Create a message bubble
|
// Create a message bubble
|
||||||
lv_obj_t* msg_bubble = lv_obj_create(content_);
|
lv_obj_t* msg_bubble = lv_obj_create(content_);
|
||||||
lv_obj_set_style_radius(msg_bubble, 8, 0);
|
lv_obj_set_style_radius(msg_bubble, 8, 0);
|
||||||
@ -431,9 +449,6 @@ void LcdDisplay::SetChatMessage(const char* role, const char* content) {
|
|||||||
lv_obj_set_width(msg_bubble, LV_SIZE_CONTENT);
|
lv_obj_set_width(msg_bubble, LV_SIZE_CONTENT);
|
||||||
lv_obj_set_height(msg_bubble, LV_SIZE_CONTENT);
|
lv_obj_set_height(msg_bubble, LV_SIZE_CONTENT);
|
||||||
|
|
||||||
// Add some margin
|
|
||||||
lv_obj_set_style_margin_right(msg_bubble, 10, 0);
|
|
||||||
|
|
||||||
// Don't grow
|
// Don't grow
|
||||||
lv_obj_set_style_flex_grow(msg_bubble, 0, 0);
|
lv_obj_set_style_flex_grow(msg_bubble, 0, 0);
|
||||||
} else if (strcmp(role, "assistant") == 0) {
|
} else if (strcmp(role, "assistant") == 0) {
|
||||||
@ -449,9 +464,6 @@ void LcdDisplay::SetChatMessage(const char* role, const char* content) {
|
|||||||
lv_obj_set_width(msg_bubble, LV_SIZE_CONTENT);
|
lv_obj_set_width(msg_bubble, LV_SIZE_CONTENT);
|
||||||
lv_obj_set_height(msg_bubble, LV_SIZE_CONTENT);
|
lv_obj_set_height(msg_bubble, LV_SIZE_CONTENT);
|
||||||
|
|
||||||
// Add some margin
|
|
||||||
lv_obj_set_style_margin_left(msg_bubble, -4, 0);
|
|
||||||
|
|
||||||
// Don't grow
|
// Don't grow
|
||||||
lv_obj_set_style_flex_grow(msg_bubble, 0, 0);
|
lv_obj_set_style_flex_grow(msg_bubble, 0, 0);
|
||||||
} else if (strcmp(role, "system") == 0) {
|
} else if (strcmp(role, "system") == 0) {
|
||||||
@ -487,7 +499,7 @@ void LcdDisplay::SetChatMessage(const char* role, const char* content) {
|
|||||||
lv_obj_set_parent(msg_bubble, container);
|
lv_obj_set_parent(msg_bubble, container);
|
||||||
|
|
||||||
// Right align the bubble in the container
|
// Right align the bubble in the container
|
||||||
lv_obj_align(msg_bubble, LV_ALIGN_RIGHT_MID, -10, 0);
|
lv_obj_align(msg_bubble, LV_ALIGN_RIGHT_MID, -25, 0);
|
||||||
|
|
||||||
// Auto-scroll to this container
|
// Auto-scroll to this container
|
||||||
lv_obj_scroll_to_view_recursive(container, LV_ANIM_ON);
|
lv_obj_scroll_to_view_recursive(container, LV_ANIM_ON);
|
||||||
@ -521,22 +533,6 @@ void LcdDisplay::SetChatMessage(const char* role, const char* content) {
|
|||||||
|
|
||||||
// Store reference to the latest message label
|
// Store reference to the latest message label
|
||||||
chat_message_label_ = msg_text;
|
chat_message_label_ = msg_text;
|
||||||
|
|
||||||
// 检查消息数量是否超过限制
|
|
||||||
uint32_t msg_count = lv_obj_get_child_cnt(content_);
|
|
||||||
while (msg_count >= MAX_MESSAGES) {
|
|
||||||
// 删除最早的消息(第一个子节点)
|
|
||||||
lv_obj_t* oldest_msg = lv_obj_get_child(content_, 0);
|
|
||||||
if (oldest_msg != nullptr) {
|
|
||||||
lv_obj_del(oldest_msg);
|
|
||||||
msg_count--;
|
|
||||||
// 删除最早的消息会导致所有气泡整体往上移
|
|
||||||
// 所以需要重新滚动到当前消息气泡位置
|
|
||||||
lv_obj_scroll_to_view_recursive(msg_bubble, LV_ANIM_ON);
|
|
||||||
}else{
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
void LcdDisplay::SetupUI() {
|
void LcdDisplay::SetupUI() {
|
||||||
|
|||||||
@ -22,6 +22,7 @@ OledDisplay::OledDisplay(esp_lcd_panel_io_handle_t panel_io, esp_lcd_panel_handl
|
|||||||
ESP_LOGI(TAG, "Initialize LVGL");
|
ESP_LOGI(TAG, "Initialize LVGL");
|
||||||
lvgl_port_cfg_t port_cfg = ESP_LVGL_PORT_INIT_CONFIG();
|
lvgl_port_cfg_t port_cfg = ESP_LVGL_PORT_INIT_CONFIG();
|
||||||
port_cfg.task_priority = 1;
|
port_cfg.task_priority = 1;
|
||||||
|
port_cfg.timer_period_ms = 50;
|
||||||
lvgl_port_init(&port_cfg);
|
lvgl_port_init(&port_cfg);
|
||||||
|
|
||||||
ESP_LOGI(TAG, "Adding LCD screen");
|
ESP_LOGI(TAG, "Adding LCD screen");
|
||||||
|
|||||||
@ -9,8 +9,8 @@ dependencies:
|
|||||||
espressif/esp_lcd_panel_io_additions: "^1.0.1"
|
espressif/esp_lcd_panel_io_additions: "^1.0.1"
|
||||||
78/esp_lcd_nv3023: "~1.0.0"
|
78/esp_lcd_nv3023: "~1.0.0"
|
||||||
78/esp-wifi-connect: "~2.3.1"
|
78/esp-wifi-connect: "~2.3.1"
|
||||||
78/esp-opus-encoder: "~2.1.0"
|
78/esp-opus-encoder: "~2.3.0"
|
||||||
78/esp-ml307: "~1.7.2"
|
78/esp-ml307: "~1.7.3"
|
||||||
78/xiaozhi-fonts: "~1.3.2"
|
78/xiaozhi-fonts: "~1.3.2"
|
||||||
espressif/led_strip: "^2.4.1"
|
espressif/led_strip: "^2.4.1"
|
||||||
espressif/esp_codec_dev: "~1.3.2"
|
espressif/esp_codec_dev: "~1.3.2"
|
||||||
|
|||||||
@ -254,6 +254,10 @@ void MqttProtocol::ParseServerHello(const cJSON* root) {
|
|||||||
if (sample_rate != NULL) {
|
if (sample_rate != NULL) {
|
||||||
server_sample_rate_ = sample_rate->valueint;
|
server_sample_rate_ = sample_rate->valueint;
|
||||||
}
|
}
|
||||||
|
auto frame_duration = cJSON_GetObjectItem(audio_params, "frame_duration");
|
||||||
|
if (frame_duration != NULL) {
|
||||||
|
server_frame_duration_ = frame_duration->valueint;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
auto udp = cJSON_GetObjectItem(root, "udp");
|
auto udp = cJSON_GetObjectItem(root, "udp");
|
||||||
|
|||||||
@ -49,7 +49,7 @@ void Protocol::SendWakeWordDetected(const std::string& wake_word) {
|
|||||||
void Protocol::SendStartListening(ListeningMode mode) {
|
void Protocol::SendStartListening(ListeningMode mode) {
|
||||||
std::string message = "{\"session_id\":\"" + session_id_ + "\"";
|
std::string message = "{\"session_id\":\"" + session_id_ + "\"";
|
||||||
message += ",\"type\":\"listen\",\"state\":\"start\"";
|
message += ",\"type\":\"listen\",\"state\":\"start\"";
|
||||||
if (mode == kListeningModeAlwaysOn) {
|
if (mode == kListeningModeRealtime) {
|
||||||
message += ",\"mode\":\"realtime\"";
|
message += ",\"mode\":\"realtime\"";
|
||||||
} else if (mode == kListeningModeAutoStop) {
|
} else if (mode == kListeningModeAutoStop) {
|
||||||
message += ",\"mode\":\"auto\"";
|
message += ",\"mode\":\"auto\"";
|
||||||
|
|||||||
@ -21,7 +21,7 @@ enum AbortReason {
|
|||||||
enum ListeningMode {
|
enum ListeningMode {
|
||||||
kListeningModeAutoStop,
|
kListeningModeAutoStop,
|
||||||
kListeningModeManualStop,
|
kListeningModeManualStop,
|
||||||
kListeningModeAlwaysOn // 需要 AEC 支持
|
kListeningModeRealtime // 需要 AEC 支持
|
||||||
};
|
};
|
||||||
|
|
||||||
class Protocol {
|
class Protocol {
|
||||||
@ -31,6 +31,9 @@ public:
|
|||||||
inline int server_sample_rate() const {
|
inline int server_sample_rate() const {
|
||||||
return server_sample_rate_;
|
return server_sample_rate_;
|
||||||
}
|
}
|
||||||
|
inline int server_frame_duration() const {
|
||||||
|
return server_frame_duration_;
|
||||||
|
}
|
||||||
inline const std::string& session_id() const {
|
inline const std::string& session_id() const {
|
||||||
return session_id_;
|
return session_id_;
|
||||||
}
|
}
|
||||||
@ -60,7 +63,8 @@ protected:
|
|||||||
std::function<void()> on_audio_channel_closed_;
|
std::function<void()> on_audio_channel_closed_;
|
||||||
std::function<void(const std::string& message)> on_network_error_;
|
std::function<void(const std::string& message)> on_network_error_;
|
||||||
|
|
||||||
int server_sample_rate_ = 16000;
|
int server_sample_rate_ = 24000;
|
||||||
|
int server_frame_duration_ = 60;
|
||||||
bool error_occurred_ = false;
|
bool error_occurred_ = false;
|
||||||
std::string session_id_;
|
std::string session_id_;
|
||||||
std::chrono::time_point<std::chrono::steady_clock> last_incoming_time_;
|
std::chrono::time_point<std::chrono::steady_clock> last_incoming_time_;
|
||||||
|
|||||||
@ -146,6 +146,10 @@ void WebsocketProtocol::ParseServerHello(const cJSON* root) {
|
|||||||
if (sample_rate != NULL) {
|
if (sample_rate != NULL) {
|
||||||
server_sample_rate_ = sample_rate->valueint;
|
server_sample_rate_ = sample_rate->valueint;
|
||||||
}
|
}
|
||||||
|
auto frame_duration = cJSON_GetObjectItem(audio_params, "frame_duration");
|
||||||
|
if (frame_duration != NULL) {
|
||||||
|
server_frame_duration_ = frame_duration->valueint;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
xEventGroupSetBits(event_group_handle_, WEBSOCKET_PROTOCOL_SERVER_HELLO_EVENT);
|
xEventGroupSetBits(event_group_handle_, WEBSOCKET_PROTOCOL_SERVER_HELLO_EVENT);
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user