Skip to content

Commit

Permalink
Subpart8 for async drop (major2) - dropline in scopes for potentially…
Browse files Browse the repository at this point in the history
… async drops
  • Loading branch information
azhogin committed Aug 30, 2024
1 parent cc9bedd commit b755978
Showing 1 changed file with 124 additions and 20 deletions.
144 changes: 124 additions & 20 deletions compiler/rustc_mir_build/src/build/scope.rs
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,7 @@ use rustc_index::{IndexSlice, IndexVec};
use rustc_middle::middle::region;
use rustc_middle::mir::*;
use rustc_middle::thir::{ExprId, LintLevel};
use rustc_middle::ty::{self, TyCtxt};
use rustc_middle::{bug, span_bug};
use rustc_session::lint::Level;
use rustc_span::source_map::Spanned;
Expand Down Expand Up @@ -837,22 +838,45 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
block.unit()
}

fn is_async_drop_impl(
tcx: TyCtxt<'tcx>,
local_decls: &IndexVec<Local, LocalDecl<'tcx>>,
param_env: ty::ParamEnv<'tcx>,
local: Local,
) -> bool {
let ty = local_decls[local].ty;
if ty.is_async_drop(tcx, param_env) || ty.is_coroutine() {
return true;
}
ty.needs_async_drop(tcx, param_env)
}
fn is_async_drop(&self, local: Local) -> bool {
Self::is_async_drop_impl(self.tcx, &self.local_decls, self.param_env, local)
}

fn leave_top_scope(&mut self, block: BasicBlock) -> BasicBlock {
// If we are emitting a `drop` statement, we need to have the cached
// diverge cleanup pads ready in case that drop panics.
let needs_cleanup = self.scopes.scopes.last().is_some_and(|scope| scope.needs_cleanup());
let is_coroutine = self.coroutine.is_some();
let unwind_to = if needs_cleanup { self.diverge_cleanup() } else { DropIdx::MAX };

let scope = self.scopes.scopes.last().expect("leave_top_scope called with no scopes");
let has_async_drops = is_coroutine
&& scope.drops.iter().any(|v| v.kind == DropKind::Value && self.is_async_drop(v.local));
let dropline_to = if has_async_drops { Some(self.diverge_dropline()) } else { None };
let scope = self.scopes.scopes.last().expect("leave_top_scope called with no scopes");
build_scope_drops(
&mut self.cfg,
&mut self.scopes.unwind_drops,
&mut self.scopes.coroutine_drops,
scope,
block,
unwind_to,
dropline_to,
is_coroutine && needs_cleanup,
self.arg_count,
|v: Local| Self::is_async_drop_impl(self.tcx, &self.local_decls, self.param_env, v),
)
.into_block()
}
Expand Down Expand Up @@ -1234,22 +1258,22 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
self.scopes.unwind_drops.add_entry_point(start, next_drop);
}

/// Sets up a path that performs all required cleanup for dropping a
/// coroutine, starting from the given block that ends in
/// [TerminatorKind::Yield].
///
/// This path terminates in CoroutineDrop.
pub(crate) fn coroutine_drop_cleanup(&mut self, yield_block: BasicBlock) {
/// Returns the [DropIdx] for the innermost drop for dropline (coroutine drop path).
/// The `DropIdx` will be created if it doesn't already exist.
fn diverge_dropline(&mut self) -> DropIdx {
// It is okay to use dummy span because the getting scope index on the topmost scope
// must always succeed.
self.diverge_dropline_target(self.scopes.topmost(), DUMMY_SP)
}

/// Similar to diverge_cleanup_target, but for dropline (coroutine drop path)
fn diverge_dropline_target(&mut self, target_scope: region::Scope, span: Span) -> DropIdx {
debug_assert!(
matches!(
self.cfg.block_data(yield_block).terminator().kind,
TerminatorKind::Yield { .. }
),
"coroutine_drop_cleanup called on block with non-yield terminator."
self.coroutine.is_some(),
"diverge_dropline_target is valid only for coroutine"
);
let (uncached_scope, mut cached_drop) = self
.scopes
.scopes
let target = self.scopes.scope_index(target_scope, span);
let (uncached_scope, mut cached_drop) = self.scopes.scopes[..=target]
.iter()
.enumerate()
.rev()
Expand All @@ -1258,13 +1282,34 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
})
.unwrap_or((0, ROOT_NODE));

for scope in &mut self.scopes.scopes[uncached_scope..] {
if uncached_scope > target {
return cached_drop;
}

for scope in &mut self.scopes.scopes[uncached_scope..=target] {
for drop in &scope.drops {
cached_drop = self.scopes.coroutine_drops.add_drop(*drop, cached_drop);
}
scope.cached_coroutine_drop_block = Some(cached_drop);
}

cached_drop
}

/// Sets up a path that performs all required cleanup for dropping a
/// coroutine, starting from the given block that ends in
/// [TerminatorKind::Yield].
///
/// This path terminates in CoroutineDrop.
pub(crate) fn coroutine_drop_cleanup(&mut self, yield_block: BasicBlock) {
debug_assert!(
matches!(
self.cfg.block_data(yield_block).terminator().kind,
TerminatorKind::Yield { .. }
),
"coroutine_drop_cleanup called on block with non-yield terminator."
);
let cached_drop = self.diverge_dropline();
self.scopes.coroutine_drops.add_entry_point(yield_block, cached_drop);
}

Expand Down Expand Up @@ -1349,16 +1394,22 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
}

/// Builds drops for `pop_scope` and `leave_top_scope`.
fn build_scope_drops<'tcx>(
fn build_scope_drops<'tcx, F>(
cfg: &mut CFG<'tcx>,
unwind_drops: &mut DropTree,
coroutine_drops: &mut DropTree,
scope: &Scope,
mut block: BasicBlock,
mut unwind_to: DropIdx,
mut dropline_to: Option<DropIdx>,
storage_dead_on_unwind: bool,
arg_count: usize,
) -> BlockAnd<()> {
debug!("build_scope_drops({:?} -> {:?})", block, scope);
is_async_drop: F,
) -> BlockAnd<()>
where
F: Fn(Local) -> bool,
{
debug!("build_scope_drops({:?} -> {:?}), dropline_to={:?}", block, scope, dropline_to);

// Build up the drops in evaluation order. The end result will
// look like:
Expand Down Expand Up @@ -1392,6 +1443,12 @@ fn build_scope_drops<'tcx>(
debug_assert_eq!(unwind_drops.drops[unwind_to].data.kind, drop_data.kind);
unwind_to = unwind_drops.drops[unwind_to].next;

if let Some(idx) = dropline_to {
debug_assert_eq!(coroutine_drops.drops[idx].data.local, drop_data.local);
debug_assert_eq!(coroutine_drops.drops[idx].data.kind, drop_data.kind);
dropline_to = Some(coroutine_drops.drops[idx].next);
}

// If the operand has been moved, and we are not on an unwind
// path, then don't generate the drop. (We only take this into
// account for non-unwind paths so as not to disturb the
Expand All @@ -1401,6 +1458,11 @@ fn build_scope_drops<'tcx>(
}

unwind_drops.add_entry_point(block, unwind_to);
if let Some(to) = dropline_to
&& is_async_drop(local)
{
coroutine_drops.add_entry_point(block, to);
}

let next = cfg.start_new_block();
cfg.terminate(
Expand All @@ -1423,6 +1485,11 @@ fn build_scope_drops<'tcx>(
debug_assert_eq!(unwind_drops.drops[unwind_to].data.kind, drop_data.kind);
unwind_to = unwind_drops.drops[unwind_to].next;
}
if let Some(idx) = dropline_to {
debug_assert_eq!(coroutine_drops.drops[idx].data.local, drop_data.local);
debug_assert_eq!(coroutine_drops.drops[idx].data.kind, drop_data.kind);
dropline_to = Some(coroutine_drops.drops[idx].next);
}
// Only temps and vars need their storage dead.
assert!(local.index() > arg_count);
cfg.push(block, Statement { source_info, kind: StatementKind::StorageDead(local) });
Expand Down Expand Up @@ -1481,6 +1548,39 @@ impl<'a, 'tcx: 'a> Builder<'a, 'tcx> {
}
}
}
// Link the exit drop tree to dropline drop tree (coroutine drop path) for async drops
if is_coroutine
&& drops.drops.iter().any(|DropNode { data, next: _ }| {
data.kind == DropKind::Value && self.is_async_drop(data.local)
})
{
let dropline_target = self.diverge_dropline_target(else_scope, span);
let mut dropline_indices = IndexVec::from_elem_n(dropline_target, 1);
for (drop_idx, drop_data) in drops.drops.iter_enumerated().skip(1) {
match drop_data.data.kind {
DropKind::Storage => {
let coroutine_drop = self
.scopes
.coroutine_drops
.add_drop(drop_data.data, dropline_indices[drop_data.next]);
dropline_indices.push(coroutine_drop);
}
DropKind::Value => {
let coroutine_drop = self
.scopes
.coroutine_drops
.add_drop(drop_data.data, dropline_indices[drop_data.next]);
if self.is_async_drop(drop_data.data.local) {
self.scopes.coroutine_drops.add_entry_point(
blocks[drop_idx].unwrap(),
dropline_indices[drop_data.next],
);
}
dropline_indices.push(coroutine_drop);
}
}
}
}
blocks[ROOT_NODE].map(BasicBlock::unit)
}

Expand Down Expand Up @@ -1526,9 +1626,11 @@ impl<'a, 'tcx: 'a> Builder<'a, 'tcx> {
// to be captured by the coroutine. I'm not sure how important this
// optimization is, but it is here.
for (drop_idx, drop_node) in drops.drops.iter_enumerated() {
if let DropKind::Value = drop_node.data.kind {
if let DropKind::Value = drop_node.data.kind
&& let Some(bb) = blocks[drop_idx]
{
debug_assert!(drop_node.next < drops.drops.next_index());
drops.entry_points.push((drop_node.next, blocks[drop_idx].unwrap()));
drops.entry_points.push((drop_node.next, bb));
}
}
Self::build_unwind_tree(cfg, drops, fn_span, resume_block);
Expand Down Expand Up @@ -1582,6 +1684,8 @@ impl<'tcx> DropTreeBuilder<'tcx> for CoroutineDrop {
let term = cfg.block_data_mut(from).terminator_mut();
if let TerminatorKind::Yield { ref mut drop, .. } = term.kind {
*drop = Some(to);
} else if let TerminatorKind::Drop { ref mut drop, .. } = term.kind {
*drop = Some(to);
} else {
span_bug!(
term.source_info.span,
Expand Down

0 comments on commit b755978

Please sign in to comment.