Auto merge of #144062 - bjorn3:lto_refactors2, r=davidtwco
Various refactors to the LTO handling code (part 2) Continuing from https://github.com/rust-lang/rust/pull/143388 this removes a bit of dead code and moves the LTO symbol export calculation from individual backends to cg_ssa.
This commit is contained in:
@@ -9,7 +9,7 @@ use std::{fs, io, mem, str, thread};
|
||||
use rustc_abi::Size;
|
||||
use rustc_ast::attr;
|
||||
use rustc_ast::expand::autodiff_attrs::AutoDiffItem;
|
||||
use rustc_data_structures::fx::{FxHashMap, FxIndexMap};
|
||||
use rustc_data_structures::fx::FxIndexMap;
|
||||
use rustc_data_structures::jobserver::{self, Acquired};
|
||||
use rustc_data_structures::memmap::Mmap;
|
||||
use rustc_data_structures::profiling::{SelfProfilerRef, VerboseTimingGuard};
|
||||
@@ -20,14 +20,12 @@ use rustc_errors::{
|
||||
Suggestions,
|
||||
};
|
||||
use rustc_fs_util::link_or_copy;
|
||||
use rustc_hir::def_id::{CrateNum, LOCAL_CRATE};
|
||||
use rustc_incremental::{
|
||||
copy_cgu_workproduct_to_incr_comp_cache_dir, in_incr_comp_dir, in_incr_comp_dir_sess,
|
||||
};
|
||||
use rustc_metadata::fs::copy_to_stdout;
|
||||
use rustc_middle::bug;
|
||||
use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
|
||||
use rustc_middle::middle::exported_symbols::SymbolExportInfo;
|
||||
use rustc_middle::ty::TyCtxt;
|
||||
use rustc_session::Session;
|
||||
use rustc_session::config::{
|
||||
@@ -40,7 +38,7 @@ use tracing::debug;
|
||||
|
||||
use super::link::{self, ensure_removed};
|
||||
use super::lto::{self, SerializedModule};
|
||||
use super::symbol_export::symbol_name_for_instance_in_crate;
|
||||
use crate::back::lto::check_lto_allowed;
|
||||
use crate::errors::{AutodiffWithoutLto, ErrorCreatingRemarkDir};
|
||||
use crate::traits::*;
|
||||
use crate::{
|
||||
@@ -332,8 +330,6 @@ pub type TargetMachineFactoryFn<B> = Arc<
|
||||
+ Sync,
|
||||
>;
|
||||
|
||||
type ExportedSymbols = FxHashMap<CrateNum, Arc<Vec<(String, SymbolExportInfo)>>>;
|
||||
|
||||
/// Additional resources used by optimize_and_codegen (not module specific)
|
||||
#[derive(Clone)]
|
||||
pub struct CodegenContext<B: WriteBackendMethods> {
|
||||
@@ -343,10 +339,8 @@ pub struct CodegenContext<B: WriteBackendMethods> {
|
||||
pub save_temps: bool,
|
||||
pub fewer_names: bool,
|
||||
pub time_trace: bool,
|
||||
pub exported_symbols: Option<Arc<ExportedSymbols>>,
|
||||
pub opts: Arc<config::Options>,
|
||||
pub crate_types: Vec<CrateType>,
|
||||
pub each_linked_rlib_for_lto: Vec<(CrateNum, PathBuf)>,
|
||||
pub output_filenames: Arc<OutputFilenames>,
|
||||
pub invocation_temp: Option<String>,
|
||||
pub regular_module_config: Arc<ModuleConfig>,
|
||||
@@ -401,13 +395,21 @@ impl<B: WriteBackendMethods> CodegenContext<B> {
|
||||
|
||||
fn generate_thin_lto_work<B: ExtraBackendMethods>(
|
||||
cgcx: &CodegenContext<B>,
|
||||
exported_symbols_for_lto: &[String],
|
||||
each_linked_rlib_for_lto: &[PathBuf],
|
||||
needs_thin_lto: Vec<(String, B::ThinBuffer)>,
|
||||
import_only_modules: Vec<(SerializedModule<B::ModuleBuffer>, WorkProduct)>,
|
||||
) -> Vec<(WorkItem<B>, u64)> {
|
||||
let _prof_timer = cgcx.prof.generic_activity("codegen_thin_generate_lto_work");
|
||||
|
||||
let (lto_modules, copy_jobs) =
|
||||
B::run_thin_lto(cgcx, needs_thin_lto, import_only_modules).unwrap_or_else(|e| e.raise());
|
||||
let (lto_modules, copy_jobs) = B::run_thin_lto(
|
||||
cgcx,
|
||||
exported_symbols_for_lto,
|
||||
each_linked_rlib_for_lto,
|
||||
needs_thin_lto,
|
||||
import_only_modules,
|
||||
)
|
||||
.unwrap_or_else(|e| e.raise());
|
||||
lto_modules
|
||||
.into_iter()
|
||||
.map(|module| {
|
||||
@@ -723,6 +725,8 @@ pub(crate) enum WorkItem<B: WriteBackendMethods> {
|
||||
CopyPostLtoArtifacts(CachedModuleCodegen),
|
||||
/// Performs fat LTO on the given module.
|
||||
FatLto {
|
||||
exported_symbols_for_lto: Arc<Vec<String>>,
|
||||
each_linked_rlib_for_lto: Vec<PathBuf>,
|
||||
needs_fat_lto: Vec<FatLtoInput<B>>,
|
||||
import_only_modules: Vec<(SerializedModule<B::ModuleBuffer>, WorkProduct)>,
|
||||
autodiff: Vec<AutoDiffItem>,
|
||||
@@ -810,7 +814,7 @@ pub(crate) enum WorkItemResult<B: WriteBackendMethods> {
|
||||
}
|
||||
|
||||
pub enum FatLtoInput<B: WriteBackendMethods> {
|
||||
Serialized { name: String, buffer: B::ModuleBuffer },
|
||||
Serialized { name: String, buffer: SerializedModule<B::ModuleBuffer> },
|
||||
InMemory(ModuleCodegen<B::Module>),
|
||||
}
|
||||
|
||||
@@ -899,7 +903,10 @@ fn execute_optimize_work_item<B: ExtraBackendMethods>(
|
||||
fs::write(&path, buffer.data()).unwrap_or_else(|e| {
|
||||
panic!("Error writing pre-lto-bitcode file `{}`: {}", path.display(), e);
|
||||
});
|
||||
Ok(WorkItemResult::NeedsFatLto(FatLtoInput::Serialized { name, buffer }))
|
||||
Ok(WorkItemResult::NeedsFatLto(FatLtoInput::Serialized {
|
||||
name,
|
||||
buffer: SerializedModule::Local(buffer),
|
||||
}))
|
||||
}
|
||||
None => Ok(WorkItemResult::NeedsFatLto(FatLtoInput::InMemory(module))),
|
||||
},
|
||||
@@ -992,12 +999,24 @@ fn execute_copy_from_cache_work_item<B: ExtraBackendMethods>(
|
||||
|
||||
fn execute_fat_lto_work_item<B: ExtraBackendMethods>(
|
||||
cgcx: &CodegenContext<B>,
|
||||
needs_fat_lto: Vec<FatLtoInput<B>>,
|
||||
exported_symbols_for_lto: &[String],
|
||||
each_linked_rlib_for_lto: &[PathBuf],
|
||||
mut needs_fat_lto: Vec<FatLtoInput<B>>,
|
||||
import_only_modules: Vec<(SerializedModule<B::ModuleBuffer>, WorkProduct)>,
|
||||
autodiff: Vec<AutoDiffItem>,
|
||||
module_config: &ModuleConfig,
|
||||
) -> Result<WorkItemResult<B>, FatalError> {
|
||||
let module = B::run_and_optimize_fat_lto(cgcx, needs_fat_lto, import_only_modules, autodiff)?;
|
||||
for (module, wp) in import_only_modules {
|
||||
needs_fat_lto.push(FatLtoInput::Serialized { name: wp.cgu_name, buffer: module })
|
||||
}
|
||||
|
||||
let module = B::run_and_optimize_fat_lto(
|
||||
cgcx,
|
||||
exported_symbols_for_lto,
|
||||
each_linked_rlib_for_lto,
|
||||
needs_fat_lto,
|
||||
autodiff,
|
||||
)?;
|
||||
let module = B::codegen(cgcx, module, module_config)?;
|
||||
Ok(WorkItemResult::Finished(module))
|
||||
}
|
||||
@@ -1032,7 +1051,7 @@ pub(crate) enum Message<B: WriteBackendMethods> {
|
||||
|
||||
/// The backend has finished processing a work item for a codegen unit.
|
||||
/// Sent from a backend worker thread.
|
||||
WorkItem { result: Result<WorkItemResult<B>, Option<WorkerFatalError>>, worker_id: usize },
|
||||
WorkItem { result: Result<WorkItemResult<B>, Option<WorkerFatalError>> },
|
||||
|
||||
/// The frontend has finished generating something (backend IR or a
|
||||
/// post-LTO artifact) for a codegen unit, and it should be passed to the
|
||||
@@ -1113,42 +1132,18 @@ fn start_executing_work<B: ExtraBackendMethods>(
|
||||
let autodiff_items = autodiff_items.to_vec();
|
||||
|
||||
let mut each_linked_rlib_for_lto = Vec::new();
|
||||
let mut each_linked_rlib_file_for_lto = Vec::new();
|
||||
drop(link::each_linked_rlib(crate_info, None, &mut |cnum, path| {
|
||||
if link::ignored_for_lto(sess, crate_info, cnum) {
|
||||
return;
|
||||
}
|
||||
each_linked_rlib_for_lto.push((cnum, path.to_path_buf()));
|
||||
each_linked_rlib_for_lto.push(cnum);
|
||||
each_linked_rlib_file_for_lto.push(path.to_path_buf());
|
||||
}));
|
||||
|
||||
// Compute the set of symbols we need to retain when doing LTO (if we need to)
|
||||
let exported_symbols = {
|
||||
let mut exported_symbols = FxHashMap::default();
|
||||
|
||||
let copy_symbols = |cnum| {
|
||||
let symbols = tcx
|
||||
.exported_non_generic_symbols(cnum)
|
||||
.iter()
|
||||
.chain(tcx.exported_generic_symbols(cnum))
|
||||
.map(|&(s, lvl)| (symbol_name_for_instance_in_crate(tcx, s, cnum), lvl))
|
||||
.collect();
|
||||
Arc::new(symbols)
|
||||
};
|
||||
|
||||
match sess.lto() {
|
||||
Lto::No => None,
|
||||
Lto::ThinLocal => {
|
||||
exported_symbols.insert(LOCAL_CRATE, copy_symbols(LOCAL_CRATE));
|
||||
Some(Arc::new(exported_symbols))
|
||||
}
|
||||
Lto::Fat | Lto::Thin => {
|
||||
exported_symbols.insert(LOCAL_CRATE, copy_symbols(LOCAL_CRATE));
|
||||
for &(cnum, ref _path) in &each_linked_rlib_for_lto {
|
||||
exported_symbols.insert(cnum, copy_symbols(cnum));
|
||||
}
|
||||
Some(Arc::new(exported_symbols))
|
||||
}
|
||||
}
|
||||
};
|
||||
let exported_symbols_for_lto =
|
||||
Arc::new(lto::exported_symbols_for_lto(tcx, &each_linked_rlib_for_lto));
|
||||
|
||||
// First up, convert our jobserver into a helper thread so we can use normal
|
||||
// mpsc channels to manage our messages and such.
|
||||
@@ -1183,14 +1178,12 @@ fn start_executing_work<B: ExtraBackendMethods>(
|
||||
|
||||
let cgcx = CodegenContext::<B> {
|
||||
crate_types: tcx.crate_types().to_vec(),
|
||||
each_linked_rlib_for_lto,
|
||||
lto: sess.lto(),
|
||||
fewer_names: sess.fewer_names(),
|
||||
save_temps: sess.opts.cg.save_temps,
|
||||
time_trace: sess.opts.unstable_opts.llvm_time_trace,
|
||||
opts: Arc::new(sess.opts.clone()),
|
||||
prof: sess.prof.clone(),
|
||||
exported_symbols,
|
||||
remark: sess.opts.cg.remark.clone(),
|
||||
remark_dir,
|
||||
incr_comp_session_dir: sess.incr_comp_session_dir_opt().map(|r| r.clone()),
|
||||
@@ -1350,18 +1343,6 @@ fn start_executing_work<B: ExtraBackendMethods>(
|
||||
// necessary. There's already optimizations in place to avoid sending work
|
||||
// back to the coordinator if LTO isn't requested.
|
||||
return B::spawn_named_thread(cgcx.time_trace, "coordinator".to_string(), move || {
|
||||
let mut worker_id_counter = 0;
|
||||
let mut free_worker_ids = Vec::new();
|
||||
let mut get_worker_id = |free_worker_ids: &mut Vec<usize>| {
|
||||
if let Some(id) = free_worker_ids.pop() {
|
||||
id
|
||||
} else {
|
||||
let id = worker_id_counter;
|
||||
worker_id_counter += 1;
|
||||
id
|
||||
}
|
||||
};
|
||||
|
||||
// This is where we collect codegen units that have gone all the way
|
||||
// through codegen and LLVM.
|
||||
let mut compiled_modules = vec![];
|
||||
@@ -1442,12 +1423,7 @@ fn start_executing_work<B: ExtraBackendMethods>(
|
||||
let (item, _) =
|
||||
work_items.pop().expect("queue empty - queue_full_enough() broken?");
|
||||
main_thread_state = MainThreadState::Lending;
|
||||
spawn_work(
|
||||
&cgcx,
|
||||
&mut llvm_start_time,
|
||||
get_worker_id(&mut free_worker_ids),
|
||||
item,
|
||||
);
|
||||
spawn_work(&cgcx, &mut llvm_start_time, item);
|
||||
}
|
||||
}
|
||||
} else if codegen_state == Completed {
|
||||
@@ -1474,12 +1450,18 @@ fn start_executing_work<B: ExtraBackendMethods>(
|
||||
let needs_fat_lto = mem::take(&mut needs_fat_lto);
|
||||
let needs_thin_lto = mem::take(&mut needs_thin_lto);
|
||||
let import_only_modules = mem::take(&mut lto_import_only_modules);
|
||||
let each_linked_rlib_file_for_lto =
|
||||
mem::take(&mut each_linked_rlib_file_for_lto);
|
||||
|
||||
check_lto_allowed(&cgcx);
|
||||
|
||||
if !needs_fat_lto.is_empty() {
|
||||
assert!(needs_thin_lto.is_empty());
|
||||
|
||||
work_items.push((
|
||||
WorkItem::FatLto {
|
||||
exported_symbols_for_lto: Arc::clone(&exported_symbols_for_lto),
|
||||
each_linked_rlib_for_lto: each_linked_rlib_file_for_lto,
|
||||
needs_fat_lto,
|
||||
import_only_modules,
|
||||
autodiff: autodiff_items.clone(),
|
||||
@@ -1495,9 +1477,13 @@ fn start_executing_work<B: ExtraBackendMethods>(
|
||||
dcx.handle().emit_fatal(AutodiffWithoutLto {});
|
||||
}
|
||||
|
||||
for (work, cost) in
|
||||
generate_thin_lto_work(&cgcx, needs_thin_lto, import_only_modules)
|
||||
{
|
||||
for (work, cost) in generate_thin_lto_work(
|
||||
&cgcx,
|
||||
&exported_symbols_for_lto,
|
||||
&each_linked_rlib_file_for_lto,
|
||||
needs_thin_lto,
|
||||
import_only_modules,
|
||||
) {
|
||||
let insertion_index = work_items
|
||||
.binary_search_by_key(&cost, |&(_, cost)| cost)
|
||||
.unwrap_or_else(|e| e);
|
||||
@@ -1516,12 +1502,7 @@ fn start_executing_work<B: ExtraBackendMethods>(
|
||||
MainThreadState::Idle => {
|
||||
if let Some((item, _)) = work_items.pop() {
|
||||
main_thread_state = MainThreadState::Lending;
|
||||
spawn_work(
|
||||
&cgcx,
|
||||
&mut llvm_start_time,
|
||||
get_worker_id(&mut free_worker_ids),
|
||||
item,
|
||||
);
|
||||
spawn_work(&cgcx, &mut llvm_start_time, item);
|
||||
} else {
|
||||
// There is no unstarted work, so let the main thread
|
||||
// take over for a running worker. Otherwise the
|
||||
@@ -1557,12 +1538,7 @@ fn start_executing_work<B: ExtraBackendMethods>(
|
||||
while running_with_own_token < tokens.len()
|
||||
&& let Some((item, _)) = work_items.pop()
|
||||
{
|
||||
spawn_work(
|
||||
&cgcx,
|
||||
&mut llvm_start_time,
|
||||
get_worker_id(&mut free_worker_ids),
|
||||
item,
|
||||
);
|
||||
spawn_work(&cgcx, &mut llvm_start_time, item);
|
||||
running_with_own_token += 1;
|
||||
}
|
||||
}
|
||||
@@ -1570,21 +1546,6 @@ fn start_executing_work<B: ExtraBackendMethods>(
|
||||
// Relinquish accidentally acquired extra tokens.
|
||||
tokens.truncate(running_with_own_token);
|
||||
|
||||
// If a thread exits successfully then we drop a token associated
|
||||
// with that worker and update our `running_with_own_token` count.
|
||||
// We may later re-acquire a token to continue running more work.
|
||||
// We may also not actually drop a token here if the worker was
|
||||
// running with an "ephemeral token".
|
||||
let mut free_worker = |worker_id| {
|
||||
if main_thread_state == MainThreadState::Lending {
|
||||
main_thread_state = MainThreadState::Idle;
|
||||
} else {
|
||||
running_with_own_token -= 1;
|
||||
}
|
||||
|
||||
free_worker_ids.push(worker_id);
|
||||
};
|
||||
|
||||
let msg = coordinator_receive.recv().unwrap();
|
||||
match *msg.downcast::<Message<B>>().ok().unwrap() {
|
||||
// Save the token locally and the next turn of the loop will use
|
||||
@@ -1653,8 +1614,17 @@ fn start_executing_work<B: ExtraBackendMethods>(
|
||||
codegen_state = Aborted;
|
||||
}
|
||||
|
||||
Message::WorkItem { result, worker_id } => {
|
||||
free_worker(worker_id);
|
||||
Message::WorkItem { result } => {
|
||||
// If a thread exits successfully then we drop a token associated
|
||||
// with that worker and update our `running_with_own_token` count.
|
||||
// We may later re-acquire a token to continue running more work.
|
||||
// We may also not actually drop a token here if the worker was
|
||||
// running with an "ephemeral token".
|
||||
if main_thread_state == MainThreadState::Lending {
|
||||
main_thread_state = MainThreadState::Idle;
|
||||
} else {
|
||||
running_with_own_token -= 1;
|
||||
}
|
||||
|
||||
match result {
|
||||
Ok(WorkItemResult::Finished(compiled_module)) => {
|
||||
@@ -1800,7 +1770,6 @@ pub(crate) struct WorkerFatalError;
|
||||
fn spawn_work<'a, B: ExtraBackendMethods>(
|
||||
cgcx: &'a CodegenContext<B>,
|
||||
llvm_start_time: &mut Option<VerboseTimingGuard<'a>>,
|
||||
worker_id: usize,
|
||||
work: WorkItem<B>,
|
||||
) {
|
||||
if cgcx.config(work.module_kind()).time_module && llvm_start_time.is_none() {
|
||||
@@ -1815,24 +1784,21 @@ fn spawn_work<'a, B: ExtraBackendMethods>(
|
||||
struct Bomb<B: ExtraBackendMethods> {
|
||||
coordinator_send: Sender<Box<dyn Any + Send>>,
|
||||
result: Option<Result<WorkItemResult<B>, FatalError>>,
|
||||
worker_id: usize,
|
||||
}
|
||||
impl<B: ExtraBackendMethods> Drop for Bomb<B> {
|
||||
fn drop(&mut self) {
|
||||
let worker_id = self.worker_id;
|
||||
let msg = match self.result.take() {
|
||||
Some(Ok(result)) => Message::WorkItem::<B> { result: Ok(result), worker_id },
|
||||
Some(Ok(result)) => Message::WorkItem::<B> { result: Ok(result) },
|
||||
Some(Err(FatalError)) => {
|
||||
Message::WorkItem::<B> { result: Err(Some(WorkerFatalError)), worker_id }
|
||||
Message::WorkItem::<B> { result: Err(Some(WorkerFatalError)) }
|
||||
}
|
||||
None => Message::WorkItem::<B> { result: Err(None), worker_id },
|
||||
None => Message::WorkItem::<B> { result: Err(None) },
|
||||
};
|
||||
drop(self.coordinator_send.send(Box::new(msg)));
|
||||
}
|
||||
}
|
||||
|
||||
let mut bomb =
|
||||
Bomb::<B> { coordinator_send: cgcx.coordinator_send.clone(), result: None, worker_id };
|
||||
let mut bomb = Bomb::<B> { coordinator_send: cgcx.coordinator_send.clone(), result: None };
|
||||
|
||||
// Execute the work itself, and if it finishes successfully then flag
|
||||
// ourselves as a success as well.
|
||||
@@ -1856,12 +1822,20 @@ fn spawn_work<'a, B: ExtraBackendMethods>(
|
||||
);
|
||||
Ok(execute_copy_from_cache_work_item(&cgcx, m, module_config))
|
||||
}
|
||||
WorkItem::FatLto { needs_fat_lto, import_only_modules, autodiff } => {
|
||||
WorkItem::FatLto {
|
||||
exported_symbols_for_lto,
|
||||
each_linked_rlib_for_lto,
|
||||
needs_fat_lto,
|
||||
import_only_modules,
|
||||
autodiff,
|
||||
} => {
|
||||
let _timer = cgcx
|
||||
.prof
|
||||
.generic_activity_with_arg("codegen_module_perform_lto", "everything");
|
||||
execute_fat_lto_work_item(
|
||||
&cgcx,
|
||||
&exported_symbols_for_lto,
|
||||
&each_linked_rlib_for_lto,
|
||||
needs_fat_lto,
|
||||
import_only_modules,
|
||||
autodiff,
|
||||
|
||||
Reference in New Issue
Block a user