Fix gRPC stream panic: use async stream combinators instead of block_on

The gRPC streaming code was using tokio::runtime::Handle::current().block_on()
inside filter_map closures, which caused a panic ('Cannot start a runtime from
within a runtime') when called from an async context.

Fixed by replacing the pattern with .then(async move { ... }).filter_map(|x| x)
which properly handles async operations in stream pipelines.

This fixes the gRPC Ping/Pong freeze issue and restores request cancellation.
This commit is contained in:
Gregory Schier
2026-01-10 14:31:39 -08:00
parent fe01796536
commit aa79fb05f9
2 changed files with 49 additions and 45 deletions

View File

@@ -392,7 +392,7 @@ async fn cmd_grpc_go<R: Runtime>(
let encryption_manager = encryption_manager.clone(); let encryption_manager = encryption_manager.clone();
let msg = block_in_place(|| { let msg = block_in_place(|| {
tauri::async_runtime::block_on(async { tauri::async_runtime::block_on(async {
render_template( let result = render_template(
msg.as_str(), msg.as_str(),
environment_chain, environment_chain,
&PluginTemplateCallback::new( &PluginTemplateCallback::new(
@@ -406,8 +406,8 @@ async fn cmd_grpc_go<R: Runtime>(
), ),
&RenderOptions { error_behavior: RenderErrorBehavior::Throw }, &RenderOptions { error_behavior: RenderErrorBehavior::Throw },
) )
.await .await;
.expect("Failed to render template") result.expect("Failed to render template")
}) })
}); });
in_msg_tx.try_send(msg.clone()).unwrap(); in_msg_tx.try_send(msg.clone()).unwrap();

View File

@@ -131,14 +131,15 @@ impl GrpcConnection {
let md = metadata.clone(); let md = metadata.clone();
let use_reflection = self.use_reflection.clone(); let use_reflection = self.use_reflection.clone();
let client_cert = client_cert.clone(); let client_cert = client_cert.clone();
stream.filter_map(move |json| { stream
.then(move |json| {
let pool = pool.clone(); let pool = pool.clone();
let uri = uri.clone(); let uri = uri.clone();
let input_message = input_message.clone(); let input_message = input_message.clone();
let md = md.clone(); let md = md.clone();
let use_reflection = use_reflection.clone(); let use_reflection = use_reflection.clone();
let client_cert = client_cert.clone(); let client_cert = client_cert.clone();
tokio::runtime::Handle::current().block_on(async move { async move {
if use_reflection { if use_reflection {
if let Err(e) = if let Err(e) =
reflect_types_for_message(pool, &uri, &json, &md, client_cert).await reflect_types_for_message(pool, &uri, &json, &md, client_cert).await
@@ -154,8 +155,9 @@ impl GrpcConnection {
None None
} }
} }
}
}) })
}) .filter_map(|x| x)
}; };
let mut client = tonic::client::Grpc::with_origin(self.conn.clone(), self.uri.clone()); let mut client = tonic::client::Grpc::with_origin(self.conn.clone(), self.uri.clone());
@@ -185,14 +187,15 @@ impl GrpcConnection {
let md = metadata.clone(); let md = metadata.clone();
let use_reflection = self.use_reflection.clone(); let use_reflection = self.use_reflection.clone();
let client_cert = client_cert.clone(); let client_cert = client_cert.clone();
stream.filter_map(move |json| { stream
.then(move |json| {
let pool = pool.clone(); let pool = pool.clone();
let uri = uri.clone(); let uri = uri.clone();
let input_message = input_message.clone(); let input_message = input_message.clone();
let md = md.clone(); let md = md.clone();
let use_reflection = use_reflection.clone(); let use_reflection = use_reflection.clone();
let client_cert = client_cert.clone(); let client_cert = client_cert.clone();
tokio::runtime::Handle::current().block_on(async move { async move {
if use_reflection { if use_reflection {
if let Err(e) = if let Err(e) =
reflect_types_for_message(pool, &uri, &json, &md, client_cert).await reflect_types_for_message(pool, &uri, &json, &md, client_cert).await
@@ -208,8 +211,9 @@ impl GrpcConnection {
None None
} }
} }
}
}) })
}) .filter_map(|x| x)
}; };
let mut client = tonic::client::Grpc::with_origin(self.conn.clone(), self.uri.clone()); let mut client = tonic::client::Grpc::with_origin(self.conn.clone(), self.uri.clone());