Skip to content

Commit

Permalink
Auto merge of #38148 - frewsxcv:rollup, r=frewsxcv
Browse files Browse the repository at this point in the history
Rollup of 15 pull requests

- Successful merges: #37859, #37919, #38020, #38028, #38029, #38065, #38073, #38077, #38089, #38090, #38096, #38112, #38113, #38130, #38141
- Failed merges:
  • Loading branch information
bors committed Dec 3, 2016
2 parents 2cdbd5e + 2e038ed commit 28d6623
Show file tree
Hide file tree
Showing 21 changed files with 738 additions and 93 deletions.
2 changes: 1 addition & 1 deletion src/bootstrap/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ The script accepts commands, flags, and filters to determine what to do:
# build the whole compiler
./x.py build
# build the stage1 compier
# build the stage1 compiler
./x.py build --stage 1
# build stage0 libstd
Expand Down
10 changes: 5 additions & 5 deletions src/doc/book/testing.md
Original file line number Diff line number Diff line change
Expand Up @@ -589,11 +589,11 @@ please see the [Documentation chapter](documentation.html).

# Testing and concurrency

One thing that is important to note when writing tests are run concurrently
using threads. For this reason you should take care that your tests are written
in such a way as to not depend on each-other, or on any shared state. "Shared
state" can also include the environment, such as the current working directory,
or environment variables.
One thing that is important to note when writing tests is that they may be run
concurrently using threads. For this reason you should take care that your tests
are written in such a way as to not depend on each-other, or on any shared
state. "Shared state" can also include the environment, such as the current
working directory, or environment variables.

If this is an issue it is possible to control this concurrency, either by
setting the environment variable `RUST_TEST_THREADS`, or by passing the argument
Expand Down
9 changes: 5 additions & 4 deletions src/doc/reference.md
Original file line number Diff line number Diff line change
Expand Up @@ -740,13 +740,14 @@ There are several kinds of item:
* [`extern crate` declarations](#extern-crate-declarations)
* [`use` declarations](#use-declarations)
* [modules](#modules)
* [functions](#functions)
* [function definitions](#functions)
* [`extern` blocks](#external-blocks)
* [type definitions](grammar.html#type-definitions)
* [structs](#structs)
* [enumerations](#enumerations)
* [struct definitions](#structs)
* [enumeration definitions](#enumerations)
* [constant items](#constant-items)
* [static items](#static-items)
* [traits](#traits)
* [trait definitions](#traits)
* [implementations](#implementations)

Some items form an implicit scope for the declaration of sub-items. In other
Expand Down
36 changes: 19 additions & 17 deletions src/grammar/verify.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,23 +23,23 @@ use std::fs::File;
use std::io::{BufRead, Read};
use std::path::Path;

use syntax::parse;
use syntax::parse::lexer;
use rustc::dep_graph::DepGraph;
use rustc::session::{self, config};
use rustc::middle::cstore::DummyCrateStore;

use std::rc::Rc;
use syntax::ast;
use syntax::ast::Name;
use syntax::codemap;
use syntax::parse::token::{self, BinOpToken, DelimToken, Lit, Token};
use syntax::parse::lexer::TokenAndSpan;
use syntax_pos::Pos;

use syntax::symbol::{Symbol, keywords};

fn parse_token_list(file: &str) -> HashMap<String, token::Token> {
fn id() -> token::Token {
Token::Ident(ast::Ident::with_empty_ctxt(Name(0)))
Token::Ident(ast::Ident::with_empty_ctxt(keywords::Invalid.name()))
}

let mut res = HashMap::new();
Expand All @@ -65,7 +65,7 @@ fn parse_token_list(file: &str) -> HashMap<String, token::Token> {
"SHL" => Token::BinOp(BinOpToken::Shl),
"LBRACE" => Token::OpenDelim(DelimToken::Brace),
"RARROW" => Token::RArrow,
"LIT_STR" => Token::Literal(Lit::Str_(Name(0)), None),
"LIT_STR" => Token::Literal(Lit::Str_(keywords::Invalid.name()), None),
"DOTDOT" => Token::DotDot,
"MOD_SEP" => Token::ModSep,
"DOTDOTDOT" => Token::DotDotDot,
Expand All @@ -75,21 +75,22 @@ fn parse_token_list(file: &str) -> HashMap<String, token::Token> {
"ANDAND" => Token::AndAnd,
"AT" => Token::At,
"LBRACKET" => Token::OpenDelim(DelimToken::Bracket),
"LIT_STR_RAW" => Token::Literal(Lit::StrRaw(Name(0), 0), None),
"LIT_STR_RAW" => Token::Literal(Lit::StrRaw(keywords::Invalid.name(), 0), None),
"RPAREN" => Token::CloseDelim(DelimToken::Paren),
"SLASH" => Token::BinOp(BinOpToken::Slash),
"COMMA" => Token::Comma,
"LIFETIME" => Token::Lifetime(ast::Ident::with_empty_ctxt(Name(0))),
"LIFETIME" => Token::Lifetime(
ast::Ident::with_empty_ctxt(keywords::Invalid.name())),
"CARET" => Token::BinOp(BinOpToken::Caret),
"TILDE" => Token::Tilde,
"IDENT" => id(),
"PLUS" => Token::BinOp(BinOpToken::Plus),
"LIT_CHAR" => Token::Literal(Lit::Char(Name(0)), None),
"LIT_BYTE" => Token::Literal(Lit::Byte(Name(0)), None),
"LIT_CHAR" => Token::Literal(Lit::Char(keywords::Invalid.name()), None),
"LIT_BYTE" => Token::Literal(Lit::Byte(keywords::Invalid.name()), None),
"EQ" => Token::Eq,
"RBRACKET" => Token::CloseDelim(DelimToken::Bracket),
"COMMENT" => Token::Comment,
"DOC_COMMENT" => Token::DocComment(Name(0)),
"DOC_COMMENT" => Token::DocComment(keywords::Invalid.name()),
"DOT" => Token::Dot,
"EQEQ" => Token::EqEq,
"NE" => Token::Ne,
Expand All @@ -99,9 +100,9 @@ fn parse_token_list(file: &str) -> HashMap<String, token::Token> {
"BINOP" => Token::BinOp(BinOpToken::Plus),
"POUND" => Token::Pound,
"OROR" => Token::OrOr,
"LIT_INTEGER" => Token::Literal(Lit::Integer(Name(0)), None),
"LIT_INTEGER" => Token::Literal(Lit::Integer(keywords::Invalid.name()), None),
"BINOPEQ" => Token::BinOpEq(BinOpToken::Plus),
"LIT_FLOAT" => Token::Literal(Lit::Float(Name(0)), None),
"LIT_FLOAT" => Token::Literal(Lit::Float(keywords::Invalid.name()), None),
"WHITESPACE" => Token::Whitespace,
"UNDERSCORE" => Token::Underscore,
"MINUS" => Token::BinOp(BinOpToken::Minus),
Expand All @@ -111,10 +112,11 @@ fn parse_token_list(file: &str) -> HashMap<String, token::Token> {
"OR" => Token::BinOp(BinOpToken::Or),
"GT" => Token::Gt,
"LE" => Token::Le,
"LIT_BINARY" => Token::Literal(Lit::ByteStr(Name(0)), None),
"LIT_BINARY_RAW" => Token::Literal(Lit::ByteStrRaw(Name(0), 0), None),
"LIT_BINARY" => Token::Literal(Lit::ByteStr(keywords::Invalid.name()), None),
"LIT_BINARY_RAW" => Token::Literal(
Lit::ByteStrRaw(keywords::Invalid.name(), 0), None),
"QUESTION" => Token::Question,
"SHEBANG" => Token::Shebang(Name(0)),
"SHEBANG" => Token::Shebang(keywords::Invalid.name()),
_ => continue,
};

Expand Down Expand Up @@ -158,7 +160,7 @@ fn fix(mut lit: &str) -> ast::Name {
let leading_hashes = count(lit);

// +1/-1 to adjust for single quotes
parse::token::intern(&lit[leading_hashes + 1..lit.len() - leading_hashes - 1])
Symbol::intern(&lit[leading_hashes + 1..lit.len() - leading_hashes - 1])
}

/// Assuming a char/byte literal, strip the 'b' prefix and the single quotes.
Expand All @@ -168,7 +170,7 @@ fn fixchar(mut lit: &str) -> ast::Name {
lit = &lit[1..];
}

parse::token::intern(&lit[1..lit.len() - 1])
Symbol::intern(&lit[1..lit.len() - 1])
}

fn count(lit: &str) -> usize {
Expand Down Expand Up @@ -196,7 +198,7 @@ fn parse_antlr_token(s: &str, tokens: &HashMap<String, token::Token>, surrogate_
let not_found = format!("didn't find token {:?} in the map", toknum);
let proto_tok = tokens.get(toknum).expect(&not_found[..]);

let nm = parse::token::intern(content);
let nm = Symbol::intern(content);

debug!("What we got: content (`{}`), proto: {:?}", content, proto_tok);

Expand Down
10 changes: 10 additions & 0 deletions src/libcore/option.rs
Original file line number Diff line number Diff line change
Expand Up @@ -659,6 +659,16 @@ impl<T> Option<T> {
impl<'a, T: Clone> Option<&'a T> {
/// Maps an `Option<&T>` to an `Option<T>` by cloning the contents of the
/// option.
///
/// # Examples
///
/// ```
/// let x = 12;
/// let opt_x = Some(&x);
/// assert_eq!(opt_x, Some(&12));
/// let cloned = opt_x.cloned();
/// assert_eq!(cloned, Some(12));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn cloned(self) -> Option<T> {
self.map(|t| t.clone())
Expand Down
2 changes: 2 additions & 0 deletions src/librustc/session/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -886,6 +886,8 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options,
"enable incremental compilation (experimental)"),
incremental_info: bool = (false, parse_bool, [UNTRACKED],
"print high-level information about incremental reuse (or the lack thereof)"),
incremental_dump_hash: bool = (false, parse_bool, [UNTRACKED],
"dump hash information in textual format to stdout"),
dump_dep_graph: bool = (false, parse_bool, [UNTRACKED],
"dump the dependency graph to $RUST_DEP_GRAPH (default: /tmp/dep_graph.gv)"),
query_dep_graph: bool = (false, parse_bool, [UNTRACKED],
Expand Down
16 changes: 15 additions & 1 deletion src/librustc_incremental/persist/file_format.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ use std::path::Path;
use std::fs::File;
use std::env;

use rustc::session::Session;
use rustc::session::config::nightly_options;

/// The first few bytes of files generated by incremental compilation
Expand Down Expand Up @@ -59,7 +60,7 @@ pub fn write_file_header<W: io::Write>(stream: &mut W) -> io::Result<()> {
/// incompatible version of the compiler.
/// - Returns `Err(..)` if some kind of IO error occurred while reading the
/// file.
pub fn read_file(path: &Path) -> io::Result<Option<Vec<u8>>> {
pub fn read_file(sess: &Session, path: &Path) -> io::Result<Option<Vec<u8>>> {
if !path.exists() {
return Ok(None);
}
Expand All @@ -72,6 +73,7 @@ pub fn read_file(path: &Path) -> io::Result<Option<Vec<u8>>> {
let mut file_magic = [0u8; 4];
file.read_exact(&mut file_magic)?;
if file_magic != FILE_MAGIC {
report_format_mismatch(sess, path, "Wrong FILE_MAGIC");
return Ok(None)
}
}
Expand All @@ -85,6 +87,7 @@ pub fn read_file(path: &Path) -> io::Result<Option<Vec<u8>>> {
((header_format_version[1] as u16) << 8);

if header_format_version != HEADER_FORMAT_VERSION {
report_format_mismatch(sess, path, "Wrong HEADER_FORMAT_VERSION");
return Ok(None)
}
}
Expand All @@ -99,6 +102,7 @@ pub fn read_file(path: &Path) -> io::Result<Option<Vec<u8>>> {
file.read_exact(&mut buffer[..])?;

if &buffer[..] != rustc_version().as_bytes() {
report_format_mismatch(sess, path, "Different compiler version");
return Ok(None);
}
}
Expand All @@ -109,6 +113,16 @@ pub fn read_file(path: &Path) -> io::Result<Option<Vec<u8>>> {
Ok(Some(data))
}

fn report_format_mismatch(sess: &Session, file: &Path, message: &str) {
debug!("read_file: {}", message);

if sess.opts.debugging_opts.incremental_info {
println!("incremental: ignoring cache artifact `{}`: {}",
file.file_name().unwrap().to_string_lossy(),
message);
}
}

fn rustc_version() -> String {
if nightly_options::is_nightly_build() {
if let Some(val) = env::var_os("RUSTC_FORCE_INCR_COMP_ARTIFACT_HEADER") {
Expand Down
4 changes: 2 additions & 2 deletions src/librustc_incremental/persist/fs.rs
Original file line number Diff line number Diff line change
Expand Up @@ -435,8 +435,8 @@ fn copy_files(target_dir: &Path,
}

if print_stats_on_success {
println!("incr. comp. session directory: {} files hard-linked", files_linked);
println!("incr. comp. session directory: {} files copied", files_copied);
println!("incremental: session directory: {} files hard-linked", files_linked);
println!("incremental: session directory: {} files copied", files_copied);
}

Ok(files_linked > 0 || files_copied == 0)
Expand Down
2 changes: 1 addition & 1 deletion src/librustc_incremental/persist/hash.rs
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@ impl<'a, 'tcx> HashContext<'a, 'tcx> {

let hashes_file_path = metadata_hash_import_path(&session_dir);

match file_format::read_file(&hashes_file_path)
match file_format::read_file(self.tcx.sess, &hashes_file_path)
{
Ok(Some(data)) => {
match self.load_from_data(cnum, &data, svh) {
Expand Down
45 changes: 34 additions & 11 deletions src/librustc_incremental/persist/load.rs
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ fn load_dep_graph_if_exists<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
}

fn load_data(sess: &Session, path: &Path) -> Option<Vec<u8>> {
match file_format::read_file(path) {
match file_format::read_file(sess, path) {
Ok(Some(data)) => return Some(data),
Ok(None) => {
// The file either didn't exist or was produced by an incompatible
Expand Down Expand Up @@ -132,6 +132,10 @@ pub fn decode_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
let prev_commandline_args_hash = u64::decode(&mut dep_graph_decoder)?;

if prev_commandline_args_hash != tcx.sess.opts.dep_tracking_hash() {
if tcx.sess.opts.debugging_opts.incremental_info {
println!("incremental: completely ignoring cache because of \
differing commandline arguments");
}
// We can't reuse the cache, purge it.
debug!("decode_dep_graph: differing commandline arg hashes");
for swp in work_products {
Expand Down Expand Up @@ -192,7 +196,8 @@ pub fn decode_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
if tcx.sess.opts.debugging_opts.incremental_info {
// It'd be nice to pretty-print these paths better than just
// using the `Debug` impls, but wev.
println!("module {:?} is dirty because {:?} changed or was removed",
println!("incremental: module {:?} is dirty because {:?} \
changed or was removed",
target_node,
raw_source_node.map_def(|&index| {
Some(directory.def_path_string(tcx, index))
Expand Down Expand Up @@ -250,11 +255,24 @@ fn dirty_nodes<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
current_hash);
continue;
}

if tcx.sess.opts.debugging_opts.incremental_dump_hash {
println!("node {:?} is dirty as hash is {:?} was {:?}",
dep_node.map_def(|&def_id| Some(tcx.def_path(def_id))).unwrap(),
current_hash,
hash.hash);
}

debug!("initial_dirty_nodes: {:?} is dirty as hash is {:?}, was {:?}",
dep_node.map_def(|&def_id| Some(tcx.def_path(def_id))).unwrap(),
current_hash,
hash.hash);
} else {
if tcx.sess.opts.debugging_opts.incremental_dump_hash {
println!("node {:?} is dirty as it was removed",
hash.dep_node);
}

debug!("initial_dirty_nodes: {:?} is dirty as it was removed",
hash.dep_node);
}
Expand All @@ -277,14 +295,19 @@ fn reconcile_work_products<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
debug!("reconcile_work_products: dep-node for {:?} is dirty", swp);
delete_dirty_work_product(tcx, swp);
} else {
let all_files_exist =
swp.work_product
.saved_files
.iter()
.all(|&(_, ref file_name)| {
let path = in_incr_comp_dir_sess(tcx.sess, &file_name);
path.exists()
});
let mut all_files_exist = true;
for &(_, ref file_name) in swp.work_product.saved_files.iter() {
let path = in_incr_comp_dir_sess(tcx.sess, file_name);
if !path.exists() {
all_files_exist = false;

if tcx.sess.opts.debugging_opts.incremental_info {
println!("incremental: could not find file for up-to-date work product: {}",
path.display());
}
}
}

if all_files_exist {
debug!("reconcile_work_products: all files for {:?} exist", swp);
tcx.dep_graph.insert_previous_work_product(&swp.id, swp.work_product);
Expand Down Expand Up @@ -331,7 +354,7 @@ fn load_prev_metadata_hashes(tcx: TyCtxt,

debug!("load_prev_metadata_hashes() - File: {}", file_path.display());

let data = match file_format::read_file(&file_path) {
let data = match file_format::read_file(tcx.sess, &file_path) {
Ok(Some(data)) => data,
Ok(None) => {
debug!("load_prev_metadata_hashes() - File produced by incompatible \
Expand Down
15 changes: 15 additions & 0 deletions src/librustc_incremental/persist/save.rs
Original file line number Diff line number Diff line change
Expand Up @@ -159,6 +159,12 @@ pub fn encode_dep_graph(preds: &Predecessors,
}
}

if tcx.sess.opts.debugging_opts.incremental_dump_hash {
for (dep_node, hash) in &preds.hashes {
println!("HIR hash for {:?} is {}", dep_node, hash);
}
}

// Create the serialized dep-graph.
let graph = SerializedDepGraph {
edges: edges,
Expand Down Expand Up @@ -248,6 +254,15 @@ pub fn encode_metadata_hashes(tcx: TyCtxt,
let hash = state.finish();

debug!("save: metadata hash for {:?} is {}", def_id, hash);

if tcx.sess.opts.debugging_opts.incremental_dump_hash {
println!("metadata hash for {:?} is {}", def_id, hash);
for dep_node in sources {
println!("metadata hash for {:?} depends on {:?} with hash {}",
def_id, dep_node, preds.hashes[dep_node]);
}
}

serialized_hashes.hashes.push(SerializedMetadataHash {
def_index: def_id.index,
hash: hash,
Expand Down
5 changes: 5 additions & 0 deletions src/librustc_trans/base.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1981,6 +1981,11 @@ fn trans_reuse_previous_work_products(tcx: TyCtxt,
debug!("trans_reuse_previous_work_products: reusing {:?}", work_product);
return Some(work_product);
} else {
if tcx.sess.opts.debugging_opts.incremental_info {
println!("incremental: CGU `{}` invalidated because of \
changed partitioning hash.",
cgu.name());
}
debug!("trans_reuse_previous_work_products: \
not reusing {:?} because hash changed to {:?}",
work_product, hash);
Expand Down
Loading

0 comments on commit 28d6623

Please sign in to comment.