diff --git a/CHANGES b/CHANGES index a681c98433..6e7cb60fd9 100644 --- a/CHANGES +++ b/CHANGES @@ -2,6 +2,20 @@ The list of most significant changes made over time in Intel(R) Threading Building Blocks (Intel(R) TBB). +Intel TBB 2019 Update 8 +TBB_INTERFACE_VERSION == 11008 + +Changes (w.r.t. Intel TBB 2019 Update 7): + +Bugs fixed: + +- Fixed a bug in TBB 2019 Update 7 that could lead to incorrect memory + reallocation on Linux (https://github.com/intel/tbb/issues/148). +- Fixed enqueuing tbb::task into tbb::task_arena not to fail on threads + with no task scheduler initialized + (https://github.com/intel/tbb/issues/116). + +------------------------------------------------------------------------ Intel TBB 2019 Update 7 TBB_INTERFACE_VERSION == 11007 @@ -20,7 +34,7 @@ Changes (w.r.t. Intel TBB 2019 Update 6): - Added packaging of CMake configuration files to TBB packages built using build/build.py script (https://github.com/intel/tbb/issues/141). - + Changes affecting backward compatibility: - Removed the number_of_decrement_predecessors parameter from the @@ -34,7 +48,7 @@ Preview Features: Open-source contributions integrated: -- Fixed makefiles to properly obtain the GCC version for GCC 7 +- Fixed makefiles to properly obtain the GCC version for GCC 7 and later (https://github.com/intel/tbb/pull/147) by Timmmm. ------------------------------------------------------------------------ @@ -93,7 +107,7 @@ Bugs fixed: it follows a thread-bound filter. - Fixed a performance regression observed when multiple parallel algorithms start simultaneously. - + ------------------------------------------------------------------------ Intel TBB 2019 Update 4 TBB_INTERFACE_VERSION == 11004 @@ -102,7 +116,7 @@ Changes (w.r.t. Intel TBB 2019 Update 3): - global_control class is now a fully supported feature. - Added deduction guides for tbb containers: concurrent_hash_map, - concurrent_unordered_map, concurrent_unordered_set. + concurrent_unordered_map, concurrent_unordered_set. - Added tbb::scalable_memory_resource function returning std::pmr::memory_resource interface to the TBB memory allocator. - Added tbb::cache_aligned_resource class that implements @@ -119,7 +133,7 @@ Changes (w.r.t. Intel TBB 2019 Update 3): Bugs fixed: -- Fixed compilation for tbb::concurrent_vector when used with +- Fixed compilation for tbb::concurrent_vector when used with std::pmr::polymorphic_allocator. Open-source contributions integrated: @@ -582,7 +596,7 @@ Changes (w.r.t. Intel TBB 4.4 Update 5): - Added TBB_USE_GLIBCXX_VERSION macro to specify the version of GNU libstdc++ when it cannot be properly recognized, e.g. when used with Clang on Linux* OS. Inspired by a contribution from David A. -- Added graph/stereo example to demostrate tbb::flow::async_msg. +- Added graph/stereo example to demonstrate tbb::flow::async_msg. - Removed a few cases of excessive user data copying in the flow graph. - Reworked split_node to eliminate unnecessary overheads. - Added support for C++11 move semantics to the argument of @@ -1293,7 +1307,7 @@ Changes (w.r.t. Intel TBB 4.1 Update 4): were added on OS X*. - For OS X* exact exception propagation is supported with Clang; it requires use of libc++ and corresponding Intel TBB binaries. -- Support for C++11 initializer lists in constructor and assigment +- Support for C++11 initializer lists in constructor and assignment has been added to concurrent_hash_map, concurrent_unordered_set, concurrent_unordered_multiset, concurrent_unordered_map, concurrent_unordered_multimap. @@ -1889,7 +1903,7 @@ Changes (w.r.t. 20100310 open-source release): - Reworked enumerable_thread_specific to use a custom implementation of hash map that is more efficient for ETS usage models. - Added example for class task_group; see examples/task_group/sudoku. -- Removed two examples, as they were long outdated and superceded: +- Removed two examples, as they were long outdated and superseded: pipeline/text_filter (use pipeline/square); parallel_while/parallel_preorder (use parallel_do/parallel_preorder). - PDF documentation updated. @@ -2107,7 +2121,7 @@ Changes (w.r.t. Intel TBB 2.1 U3 commercial-aligned release): - Added tests for aligned memory allocations and malloc replacement. - Several improvements for better bundling with Intel(R) C++ Compiler. -- A few other small changes in code and documentaion. +- A few other small changes in code and documentation. Bugs fixed: diff --git a/README.md b/README.md index 41150d23f0..4efa64a664 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,5 @@ -# Threading Building Blocks 2019 Update 7 -[![Stable release](https://img.shields.io/badge/version-2019_U7-green.svg)](https://github.com/01org/tbb/releases/tag/2019_U7) +# Threading Building Blocks 2019 Update 8 +[![Stable release](https://img.shields.io/badge/version-2019_U8-green.svg)](https://github.com/01org/tbb/releases/tag/2019_U8) [![Apache License Version 2.0](https://img.shields.io/badge/license-Apache_2.0-green.svg)](LICENSE) Threading Building Blocks (TBB) lets you easily write parallel C++ programs that take diff --git a/build/Makefile.test b/build/Makefile.test index 53adb4cabd..a014e13783 100644 --- a/build/Makefile.test +++ b/build/Makefile.test @@ -111,7 +111,7 @@ TEST_BIGOBJ = test_opencl_node.$(TEST_EXT) \ $(TEST_BIGOBJ): override CXXFLAGS += $(BIGOBJ_KEY) endif -# TODO: remove repetition of .$(TEST_EXT) in the list bellow +# TODO: remove repetition of .$(TEST_EXT) in the list below # The main list of TBB tests TEST_TBB_PLAIN.EXE = test_assembly.$(TEST_EXT) \ test_global_control.$(TEST_EXT) \ diff --git a/build/build.py b/build/build.py index aecc9885a4..4c3c1fb430 100644 --- a/build/build.py +++ b/build/build.py @@ -49,7 +49,7 @@ parser.add_argument('--copy-tool', default=None, help='Use this command for copying ($ tool file dest-dir)') parser.add_argument('--build-args', default="", help='specify extra build args') parser.add_argument('--build-prefix', default='local', help='build dir prefix') -parser.add_argument('--cmake-dir', help='directory to install CMake configuraion files. Default: /lib/cmake/tbb') +parser.add_argument('--cmake-dir', help='directory to install CMake configuration files. Default: /lib/cmake/tbb') if is_win: parser.add_argument('--msbuild', default=False, action='store_true', help='Use msbuild') parser.add_argument('--vs', default="2012", help='select VS version for build') @@ -79,7 +79,7 @@ def custom_cp(src, dst): cmake_dir = jp(args.prefix, "lib", "cmake", "tbb") if args.cmake_dir is None else args.cmake_dir if is_win: - os.environ["OS"] = "Windows_NT" # make sure TBB will interpret it corretly + os.environ["OS"] = "Windows_NT" # make sure TBB will interpret it correctly libext = '.dll' libpref = '' dll_dir = bin_dir diff --git a/build/common.inc b/build/common.inc index 9ae279c013..b95c78d6b4 100644 --- a/build/common.inc +++ b/build/common.inc @@ -156,8 +156,8 @@ ifndef BUILDING_PHASE .DELETE_ON_ERROR: # Make will delete target if error occurred when building it. -# MAKEOVERRIDES contains the command line variable definitions. Reseting it to -# empty allows propogating all exported overridden variables to nested makes. +# MAKEOVERRIDES contains the command line variable definitions. Resetting it to +# empty allows propagating all exported overridden variables to nested makes. # NOTEs: # 1. All variable set in command line are propagated to nested makes. # 2. All variables declared with the "export" keyword are propagated to diff --git a/cmake/README.rst b/cmake/README.rst index 40e2830958..3b16c37c14 100644 --- a/cmake/README.rst +++ b/cmake/README.rst @@ -212,7 +212,7 @@ Variables set during TBB configuration: TBBInstallConfig ^^^^^^^^^^^^^^^^ -Module for generation and installation of TBB CMake configuration files (TBBConfig.cmake and TBBConfigVersion.cmake files) on Linux and macOS. +Module for generation and installation of TBB CMake configuration files (TBBConfig.cmake and TBBConfigVersion.cmake files) on Linux, macOS and Windows. Provides the following functions: @@ -238,7 +238,7 @@ The use case is applicable for package maintainers who create own TBB packages a Parameter Description =========================================== =========================================================== ``INSTALL_DIR `` Directory to install CMake configuration files -``SYSTEM_NAME Linux|Darwin`` OS name to generate config files for +``SYSTEM_NAME Linux|Darwin|Windows`` OS name to generate config files for ``TBB_VERSION_FILE `` Path to ``tbb_stddef.h`` to parse version from and write it to TBBConfigVersion.cmake ``TBB_VERSION ..`` Directly specified TBB version; @@ -264,15 +264,15 @@ The use case is applicable for package maintainers who create own TBB packages a The use case is applicable for users who have installed TBB, but do not have (or have incorrect) CMake configuration files for this TBB. -============================ ============================================== +==================================== ============================================== Parameter Description -============================ ============================================== -``INSTALL_DIR `` Directory to install CMake configuration files -``SYSTEM_NAME Linux|Darwin`` OS name to generate config files for -``LIB_PATH `` Path to installed TBB binaries (.lib files on Windows) -``BIN_PATH `` Path to installed TBB DLLs (applicable for Windows only) -``INC_PATH `` Path to installed TBB headers -============================ ============================================== +==================================== ============================================== +``INSTALL_DIR `` Directory to install CMake configuration files +``SYSTEM_NAME Linux|Darwin|Windows`` OS name to generate config files for +``LIB_PATH `` Path to installed TBB binaries (.lib files on Windows) +``BIN_PATH `` Path to installed TBB DLLs (applicable for Windows only) +``INC_PATH `` Path to installed TBB headers +==================================== ============================================== ``LIB_PATH`` and ``INC_PATH`` will be converted to relative paths based on ``INSTALL_DIR``. By default TBB version will be parsed from ``/tbb/tbb_stddef.h``, diff --git a/examples/common/gui/winvideo.h b/examples/common/gui/winvideo.h index 19b940fc75..372d3d5ad4 100644 --- a/examples/common/gui/winvideo.h +++ b/examples/common/gui/winvideo.h @@ -23,7 +23,7 @@ #ifndef _CRT_SECURE_NO_DEPRECATE #define _CRT_SECURE_NO_DEPRECATE #endif -// Check that the target Windows version has all API calls requried. +// Check that the target Windows version has all API calls required. #ifndef _WIN32_WINNT # define _WIN32_WINNT 0x0400 #endif @@ -41,7 +41,7 @@ #pragma comment(lib, "gdi32.lib") #pragma comment(lib, "user32.lib") -// maximum mumber of lines the output console should have +// maximum number of lines the output console should have static const WORD MAX_CONSOLE_LINES = 500; const COLORREF RGBKEY = RGB(8, 8, 16); // at least 8 for 16-bit palette HWND g_hAppWnd; // The program's window handle diff --git a/examples/common/gui/xcode/tbbExample/OpenGLView.m b/examples/common/gui/xcode/tbbExample/OpenGLView.m index 07cbed1bb1..c7735f95d8 100644 --- a/examples/common/gui/xcode/tbbExample/OpenGLView.m +++ b/examples/common/gui/xcode/tbbExample/OpenGLView.m @@ -25,7 +25,7 @@ void on_mouse_func(int x, int y, int k); void on_key_func(int x); -bool initilized = false; +bool initialized = false; #if TARGET_OS_IPHONE @@ -38,7 +38,7 @@ @implementation OpenGLView - (void)drawRect:(CGRect)start { - if (initilized == false) { + if (initialized == false) { NSLog(@"INITIALIZE"); timer = [NSTimer scheduledTimerWithTimeInterval:0.03 target:self selector:@selector(update_window) userInfo:nil repeats:YES]; imageRect = [[UIScreen mainScreen] bounds]; @@ -46,7 +46,7 @@ - (void)drawRect:(CGRect)start const float ratio=(float)g_sizex/g_sizey; imageRect.size.height=imageRect.size.width/ratio; imageRect.origin.y=(full_height-imageRect.size.height)/2; - initilized = true; + initialized = true; } CGColorSpaceRef colourSpace = CGColorSpaceCreateDeviceRGB(); @@ -89,10 +89,10 @@ @implementation OpenGLView - (void) drawRect:(NSRect)start { - if (initilized == false) { + if (initialized == false) { NSLog(@"INITIALIZE"); timer = [NSTimer scheduledTimerWithTimeInterval:0.03 target:self selector:@selector(update_window) userInfo:nil repeats:YES]; - initilized = true; + initialized = true; } glWindowPos2i(0, (int)self.visibleRect.size.height); glPixelZoom( (float)self.visibleRect.size.width /(float)g_sizex, diff --git a/examples/graph/fgbzip2/fgbzip2.cpp b/examples/graph/fgbzip2/fgbzip2.cpp index 1e8b4cf954..c9cd43f875 100644 --- a/examples/graph/fgbzip2/fgbzip2.cpp +++ b/examples/graph/fgbzip2/fgbzip2.cpp @@ -275,7 +275,7 @@ class AsyncMsgActivity { m_readQueue.pop(readWork); // Reading thread waits for buffers to be received - // (the graph reuses limitted number of buffers) + // (the graph reuses limited number of buffers) // and reads the file while there is something to read while (m_io.hasDataToRead()) { readWork.bufferMsg.seqId = m_io.chunksRead(); @@ -471,10 +471,10 @@ int main(int argc, char* argv[]) { if (verbose) std::cout << "Running flow graph based compression algorithm." << std::endl; fgCompression(io, blockSizeIn100KB); } else if (asyncType == "async_node") { - if (verbose) std::cout << "Running flow graph based compression algorithm with async_node based asynchronious IO operations." << std::endl; + if (verbose) std::cout << "Running flow graph based compression algorithm with async_node based asynchronous IO operations." << std::endl; fgCompressionAsyncNode(io, blockSizeIn100KB); } else if (asyncType == "async_msg") { - if (verbose) std::cout << "Running flow graph based compression algorithm with async_msg based asynchronious IO operations. Using limited memory: " << memoryLimitIn1MB << "MB." << std::endl; + if (verbose) std::cout << "Running flow graph based compression algorithm with async_msg based asynchronous IO operations. Using limited memory: " << memoryLimitIn1MB << "MB." << std::endl; fgCompressionAsyncMsg(io, blockSizeIn100KB, memoryLimitIn1MB); } diff --git a/examples/graph/stereo/lodepng.cpp b/examples/graph/stereo/lodepng.cpp index a0125c23e5..bb3fe16a53 100644 --- a/examples/graph/stereo/lodepng.cpp +++ b/examples/graph/stereo/lodepng.cpp @@ -1734,7 +1734,7 @@ static unsigned deflateDynamic(ucvector* out, size_t* bp, Hash* hash, another huffman tree is used for the dist values ("d"). These two trees are stored using their code lengths, and to compress even more these code lengths are also run-length encoded and huffman compressed. This gives a huffman tree - of code lengths "cl". The code lenghts used to describe this third tree are + of code lengths "cl". The code lengths used to describe this third tree are the code length code lengths ("clcl"). */ @@ -1746,7 +1746,7 @@ static unsigned deflateDynamic(ucvector* out, size_t* bp, Hash* hash, uivector frequencies_ll; /*frequency of lit,len codes*/ uivector frequencies_d; /*frequency of dist codes*/ uivector frequencies_cl; /*frequency of code length codes*/ - uivector bitlen_lld; /*lit,len,dist code lenghts (int bits), literally (without repeat codes).*/ + uivector bitlen_lld; /*lit,len,dist code lengths (int bits), literally (without repeat codes).*/ uivector bitlen_lld_e; /*bitlen_lld encoded with repeat codes (this is a rudemtary run length compression)*/ /*bitlen_cl is the code length code lengths ("clcl"). The bit lengths of codes to represent tree_cl (these are written as is in the file, it would be crazy to compress these using yet another huffman @@ -1887,7 +1887,7 @@ static unsigned deflateDynamic(ucvector* out, size_t* bp, Hash* hash, if(!uivector_resize(&bitlen_cl, tree_cl.numcodes)) ERROR_BREAK(83 /*alloc fail*/); for(i = 0; i != tree_cl.numcodes; ++i) { - /*lenghts of code length tree is in the order as specified by deflate*/ + /*lengths of code length tree is in the order as specified by deflate*/ bitlen_cl.data[i] = HuffmanTree_getLength(&tree_cl, CLCL_ORDER[i]); } while(bitlen_cl.data[bitlen_cl.size - 1] == 0 && bitlen_cl.size > 4) @@ -1903,7 +1903,7 @@ static unsigned deflateDynamic(ucvector* out, size_t* bp, Hash* hash, After the BFINAL and BTYPE, the dynamic block consists out of the following: - 5 bits HLIT, 5 bits HDIST, 4 bits HCLEN - (HCLEN+4)*3 bits code lengths of code length alphabet - - HLIT + 257 code lenghts of lit/length alphabet (encoded using the code length + - HLIT + 257 code lengths of lit/length alphabet (encoded using the code length alphabet, + possible repetition codes 16, 17, 18) - HDIST + 1 code lengths of distance alphabet (encoded using the code length alphabet, + possible repetition codes 16, 17, 18) @@ -1926,10 +1926,10 @@ static unsigned deflateDynamic(ucvector* out, size_t* bp, Hash* hash, addBitsToStream(bp, out, HDIST, 5); addBitsToStream(bp, out, HCLEN, 4); - /*write the code lenghts of the code length alphabet*/ + /*write the code lengths of the code length alphabet*/ for(i = 0; i != HCLEN + 4; ++i) addBitsToStream(bp, out, bitlen_cl.data[i], 3); - /*write the lenghts of the lit/len AND the dist alphabet*/ + /*write the lengths of the lit/len AND the dist alphabet*/ for(i = 0; i != bitlen_lld_e.size; ++i) { addHuffmanSymbol(bp, out, HuffmanTree_getCode(&tree_cl, bitlen_lld_e.data[i]), @@ -3821,7 +3821,7 @@ unsigned lodepng_auto_choose_color(LodePNGColorMode* mode_out, #endif /* #ifdef LODEPNG_COMPILE_ENCODER */ /* -Paeth predicter, used by PNG filter type 4 +Paeth predictor, used by PNG filter type 4 The parameters are of type short, but should come from unsigned chars, the shorts are only needed to make the paeth calculation correct. */ @@ -4161,7 +4161,7 @@ static unsigned postProcessScanlines(unsigned char* out, unsigned char* in, /* This function converts the filtered-padded-interlaced data into pure 2D image buffer with the PNG's colortype. Steps: - *) if no Adam7: 1) unfilter 2) remove padding bits (= posible extra bits per scanline if bpp < 8) + *) if no Adam7: 1) unfilter 2) remove padding bits (= possible extra bits per scanline if bpp < 8) *) if adam7: 1) 7x unfilter 2) 7x remove padding bits 3) Adam7_deinterlace NOTE: the in buffer will be overwritten with intermediate data! */ @@ -5509,7 +5509,7 @@ static unsigned preProcessScanlines(unsigned char** out, size_t* outsize, const { /* This function converts the pure 2D image with the PNG's colortype, into filtered-padded-interlaced data. Steps: - *) if no Adam7: 1) add padding bits (= posible extra bits per scanline if bpp < 8) 2) filter + *) if no Adam7: 1) add padding bits (= possible extra bits per scanline if bpp < 8) 2) filter *) if adam7: 1) Adam7_interlace 2) 7x add padding bits 3) 7x filter */ unsigned bpp = lodepng_get_bpp(&info_png->color); @@ -5949,7 +5949,7 @@ const char* lodepng_error_text(unsigned code) case 54: return "repeat symbol in tree while there was no value symbol yet"; /*jumped past tree while generating huffman tree, this could be when the tree will have more leaves than symbols after generating it out of the - given lenghts. They call this an oversubscribed dynamic bit lengths tree in zlib.*/ + given lengths. They call this an oversubscribed dynamic bit lengths tree in zlib.*/ case 55: return "jumped past tree while generating huffman tree"; case 56: return "given output image colortype or bitdepth not supported for color conversion"; case 57: return "invalid CRC encountered (checking CRC can be disabled)"; diff --git a/examples/graph/stereo/lodepng.h b/examples/graph/stereo/lodepng.h index 532aa25195..5b3f9c5aa1 100644 --- a/examples/graph/stereo/lodepng.h +++ b/examples/graph/stereo/lodepng.h @@ -303,7 +303,7 @@ struct LodePNGCompressSettings /*deflate = compress*/ unsigned btype; /*the block type for LZ (0, 1, 2 or 3, see zlib standard). Should be 2 for proper compression.*/ unsigned use_lz77; /*whether or not to use LZ77. Should be 1 for proper compression.*/ unsigned windowsize; /*must be a power of two <= 32768. higher compresses more but is slower. Default value: 2048.*/ - unsigned minmatch; /*mininum lz77 length. 3 is normally best, 6 can be better for some PNGs. Default: 0*/ + unsigned minmatch; /*minimum lz77 length. 3 is normally best, 6 can be better for some PNGs. Default: 0*/ unsigned nicematch; /*stop searching if >= this length found. Set to 258 for best compression. Default: 128*/ unsigned lazymatching; /*use lazy matching: better compression but a bit slower. Default: true*/ @@ -1214,7 +1214,7 @@ It can convert from almost any color type to any other color type, except the following conversions: RGB to greyscale is not supported, and converting to a palette when the palette doesn't have a required color is not supported. This is not supported on purpose: this is information loss which requires a color -reduction algorithm that is beyong the scope of a PNG encoder (yes, RGB to grey +reduction algorithm that is beyond the scope of a PNG encoder (yes, RGB to grey is easy, but there are multiple ways if you want to give some channels more weight). @@ -1520,7 +1520,7 @@ C and C++. *) Other Compilers If you encounter problems on any compilers, feel free to let me know and I may -try to fix it if the compiler is modern and standards complient. +try to fix it if the compiler is modern and standards compliant. 10. examples @@ -1588,7 +1588,7 @@ state.decoder.remember_unknown_chunks: whether to read in unknown chunks state.info_raw.colortype: desired color type for decoded image state.info_raw.bitdepth: desired bit depth for decoded image state.info_raw....: more color settings, see struct LodePNGColorMode -state.info_png....: no settings for decoder but ouput, see struct LodePNGInfo +state.info_png....: no settings for decoder but output, see struct LodePNGInfo For encoding: diff --git a/examples/graph/stereo/stereo.cpp b/examples/graph/stereo/stereo.cpp index aabf678b45..2abc1a80b2 100644 --- a/examples/graph/stereo/stereo.cpp +++ b/examples/graph/stereo/stereo.cpp @@ -145,7 +145,7 @@ void hostFunction(const std::string& firstFile, const std::string& secondFile, c join_node< tuple< utils::image_buffer, utils::image_buffer > > joinNode(g); function_node< MergeImagesTuple, utils::image_buffer > mergeImages(g, unlimited, [](const MergeImagesTuple& bufferTuple) -> utils::image_buffer { - // Two input images from tupple are merged into the first image, + // Two input images from tuple are merged into the first image, utils::image_buffer leftImageBuffer = std::get<0>(bufferTuple); utils::image_buffer rightImageBuffer = std::get<1>(bufferTuple); diff --git a/examples/parallel_for/tachyon/msvs/win8ui/DirectXPage.xaml.cpp b/examples/parallel_for/tachyon/msvs/win8ui/DirectXPage.xaml.cpp index f9a6fa3abd..e56f6c9bf1 100644 --- a/examples/parallel_for/tachyon/msvs/win8ui/DirectXPage.xaml.cpp +++ b/examples/parallel_for/tachyon/msvs/win8ui/DirectXPage.xaml.cpp @@ -65,7 +65,7 @@ DirectXPage::DirectXPage() : m_eventToken = CompositionTarget::Rendering::add(ref new EventHandler(this, &DirectXPage::OnRendering)); int num_threads = 2*tbb::task_scheduler_init::default_num_threads(); - // The thread slider has geometric sequence with several intermidiate steps for each interval between 2^N and 2^(N+1). + // The thread slider has geometric sequence with several intermediate steps for each interval between 2^N and 2^(N+1). // The nearest (from below) the power of 2. int i_base = log2(num_threads); int base = 1 << i_base; diff --git a/examples/parallel_for/tachyon/msvs/win8ui/Package.appxmanifest b/examples/parallel_for/tachyon/msvs/win8ui/Package.appxmanifest index ed4a813fac..df3c07adc3 100644 --- a/examples/parallel_for/tachyon/msvs/win8ui/Package.appxmanifest +++ b/examples/parallel_for/tachyon/msvs/win8ui/Package.appxmanifest @@ -15,7 +15,7 @@ - + diff --git a/examples/parallel_for/tachyon/src/apitrigeom.cpp b/examples/parallel_for/tachyon/src/apitrigeom.cpp index 8b5be05596..fbf06ea966 100644 --- a/examples/parallel_for/tachyon/src/apitrigeom.cpp +++ b/examples/parallel_for/tachyon/src/apitrigeom.cpp @@ -44,7 +44,7 @@ */ /* - * apitrigeom.cpp - This file contains code for generating triangle tesselated + * apitrigeom.cpp - This file contains code for generating triangle tessellated * geometry, for use with OpenGL, XGL, etc. */ diff --git a/examples/parallel_for/tachyon/src/apitrigeom.h b/examples/parallel_for/tachyon/src/apitrigeom.h index 395a462ea1..1c444f557b 100644 --- a/examples/parallel_for/tachyon/src/apitrigeom.h +++ b/examples/parallel_for/tachyon/src/apitrigeom.h @@ -44,7 +44,7 @@ */ /* - * apitrigeom.h - header for functions to generate triangle tesselated + * apitrigeom.h - header for functions to generate triangle tessellated * geometry for use with OpenGL, XGL, etc. * */ diff --git a/examples/parallel_for/tachyon/src/grid.cpp b/examples/parallel_for/tachyon/src/grid.cpp index 87baa0d731..869fe69e44 100644 --- a/examples/parallel_for/tachyon/src/grid.cpp +++ b/examples/parallel_for/tachyon/src/grid.cpp @@ -142,7 +142,7 @@ static void globalbound(object ** rootlist, vector * gmin, vector * gmax) { vector min, max; object * cur; - if (*rootlist == NULL) /* don't bound non-existant objects */ + if (*rootlist == NULL) /* don't bound non-existent objects */ return; gmin->x = FHUGE; gmin->y = FHUGE; gmin->z = FHUGE; @@ -175,7 +175,7 @@ static int cellbound(grid *g, gridindex *index, vector * cmin, vector * cmax) { cur = g->cells[index->z*g->xsize*g->ysize + index->y*g->xsize + index->x]; - if (cur == NULL) /* don't bound non-existant objects */ + if (cur == NULL) /* don't bound non-existent objects */ return 0; cellmin.x = voxel2x(g, index->x); diff --git a/examples/parallel_for/tachyon/src/objbound.cpp b/examples/parallel_for/tachyon/src/objbound.cpp index 2a7637b399..2c96673e1a 100644 --- a/examples/parallel_for/tachyon/src/objbound.cpp +++ b/examples/parallel_for/tachyon/src/objbound.cpp @@ -45,7 +45,7 @@ /* * objbound.cpp - This file contains the functions to find bounding boxes - * for the various primitives + * for the various primitives */ #include "machine.h" @@ -57,29 +57,29 @@ #include "objbound.h" static void globalbound(object ** rootlist, vector * gmin, vector * gmax) { - vector min, max; + vector min, max; object * cur; - if (*rootlist == NULL) /* don't bound non-existant objects */ + if (*rootlist == NULL) /* don't bound non-existent objects */ return; gmin->x = FHUGE; gmin->y = FHUGE; gmin->z = FHUGE; gmax->x = -FHUGE; gmax->y = -FHUGE; gmax->z = -FHUGE; cur=*rootlist; - while (cur != NULL) { /* Go! */ + while (cur != NULL) { /* Go! */ min.x = -FHUGE; min.y = -FHUGE; min.z = -FHUGE; max.x = FHUGE; max.y = FHUGE; max.z = FHUGE; cur->methods->bbox((void *) cur, &min, &max); - gmin->x = MYMIN( gmin->x , min.x); - gmin->y = MYMIN( gmin->y , min.y); - gmin->z = MYMIN( gmin->z , min.z); - - gmax->x = MYMAX( gmax->x , max.x); - gmax->y = MYMAX( gmax->y , max.y); - gmax->z = MYMAX( gmax->z , max.z); + gmin->x = MYMIN( gmin->x , min.x); + gmin->y = MYMIN( gmin->y , min.y); + gmin->z = MYMIN( gmin->z , min.z); + + gmax->x = MYMAX( gmax->x , max.x); + gmax->y = MYMAX( gmax->y , max.y); + gmax->z = MYMAX( gmax->z , max.z); cur=(object *)cur->nextobj; } @@ -88,12 +88,12 @@ static void globalbound(object ** rootlist, vector * gmin, vector * gmax) { static int objinside(object * obj, vector * min, vector * max) { vector omin, omax; - if (obj == NULL) /* non-existant object, shouldn't get here */ + if (obj == NULL) /* non-existent object, shouldn't get here */ return 0; if (obj->methods->bbox((void *) obj, &omin, &omax)) { if ((min->x <= omin.x) && (min->y <= omin.y) && (min->z <= omin.z) && - (max->x >= omax.x) && (max->y >= omax.y) && (max->z >= omax.z)) { + (max->x >= omax.x) && (max->y >= omax.y) && (max->z >= omax.z)) { return 1; } } @@ -110,7 +110,7 @@ static int countobj(object * root) { while (cur != NULL) { cur=(object *)cur->nextobj; numobj++; - } + } return numobj; } @@ -124,7 +124,7 @@ static void movenextobj(object * thisobj, object ** root) { cur=(object *)thisobj->nextobj; /* the object to be moved */ thisobj->nextobj = cur->nextobj; /* link around the moved obj */ tmp=*root; /* store the root node */ - cur->nextobj=tmp; /* attach root to cur */ + cur->nextobj=tmp; /* attach root to cur */ *root=cur; /* make cur, the new root */ } } @@ -139,7 +139,7 @@ static void octreespace(object ** rootlist, int maxoctnodes) { bndbox * box5, * box6, * box7, * box8; int skipobj; - if (*rootlist == NULL) /* don't subdivide non-existant data */ + if (*rootlist == NULL) /* don't subdivide non-existent data */ return; skipobj=0; @@ -151,134 +151,134 @@ static void octreespace(object ** rootlist, int maxoctnodes) { cmin1=gmin; cmax1=gctr; - box1 = newbndbox(cmin1, cmax1); + box1 = newbndbox(cmin1, cmax1); cmin2=gmin; cmin2.x=gctr.x; cmax2=gmax; cmax2.y=gctr.y; cmax2.z=gctr.z; - box2 = newbndbox(cmin2, cmax2); + box2 = newbndbox(cmin2, cmax2); cmin3=gmin; cmin3.y=gctr.y; cmax3=gmax; cmax3.x=gctr.x; cmax3.z=gctr.z; - box3 = newbndbox(cmin3, cmax3); + box3 = newbndbox(cmin3, cmax3); cmin4=gmin; cmin4.x=gctr.x; cmin4.y=gctr.y; cmax4=gmax; cmax4.z=gctr.z; - box4 = newbndbox(cmin4, cmax4); + box4 = newbndbox(cmin4, cmax4); cmin5=gmin; cmin5.z=gctr.z; cmax5=gctr; cmax5.z=gmax.z; - box5 = newbndbox(cmin5, cmax5); + box5 = newbndbox(cmin5, cmax5); cmin6=gctr; cmin6.y=gmin.y; cmax6=gmax; cmax6.y=gctr.y; - box6 = newbndbox(cmin6, cmax6); + box6 = newbndbox(cmin6, cmax6); cmin7=gctr; cmin7.x=gmin.x; cmax7=gctr; cmax7.y=gmax.y; cmax7.z=gmax.z; - box7 = newbndbox(cmin7, cmax7); + box7 = newbndbox(cmin7, cmax7); cmin8=gctr; cmax8=gmax; - box8 = newbndbox(cmin8, cmax8); + box8 = newbndbox(cmin8, cmax8); cur = *rootlist; - while (cur != NULL) { + while (cur != NULL) { if (objinside((object *)cur->nextobj, &cmin1, &cmax1)) { - movenextobj(cur, &box1->objlist); - } + movenextobj(cur, &box1->objlist); + } else if (objinside((object *)cur->nextobj, &cmin2, &cmax2)) { - movenextobj(cur, &box2->objlist); - } + movenextobj(cur, &box2->objlist); + } else if (objinside((object *)cur->nextobj, &cmin3, &cmax3)) { - movenextobj(cur, &box3->objlist); - } + movenextobj(cur, &box3->objlist); + } else if (objinside((object *)cur->nextobj, &cmin4, &cmax4)) { - movenextobj(cur, &box4->objlist); - } + movenextobj(cur, &box4->objlist); + } else if (objinside((object *)cur->nextobj, &cmin5, &cmax5)) { - movenextobj(cur, &box5->objlist); - } + movenextobj(cur, &box5->objlist); + } else if (objinside((object *)cur->nextobj, &cmin6, &cmax6)) { - movenextobj(cur, &box6->objlist); - } + movenextobj(cur, &box6->objlist); + } else if (objinside((object *)cur->nextobj, &cmin7, &cmax7)) { - movenextobj(cur, &box7->objlist); - } + movenextobj(cur, &box7->objlist); + } else if (objinside((object *)cur->nextobj, &cmin8, &cmax8)) { - movenextobj(cur, &box8->objlist); - } + movenextobj(cur, &box8->objlist); + } else { - skipobj++; + skipobj++; cur=(object *)cur->nextobj; } - } + } /* new scope, for redefinition of cur, and old */ { bndbox * cur, * old; old=box1; - cur=box2; + cur=box2; if (countobj(cur->objlist) > 0) { old->nextobj=cur; - globalbound(&cur->objlist, &cur->min, &cur->max); - old=cur; - } + globalbound(&cur->objlist, &cur->min, &cur->max); + old=cur; + } cur=box3; if (countobj(cur->objlist) > 0) { old->nextobj=cur; - globalbound(&cur->objlist, &cur->min, &cur->max); - old=cur; - } + globalbound(&cur->objlist, &cur->min, &cur->max); + old=cur; + } cur=box4; if (countobj(cur->objlist) > 0) { old->nextobj=cur; - globalbound(&cur->objlist, &cur->min, &cur->max); - old=cur; - } + globalbound(&cur->objlist, &cur->min, &cur->max); + old=cur; + } cur=box5; if (countobj(cur->objlist) > 0) { old->nextobj=cur; - globalbound(&cur->objlist, &cur->min, &cur->max); - old=cur; - } + globalbound(&cur->objlist, &cur->min, &cur->max); + old=cur; + } cur=box6; if (countobj(cur->objlist) > 0) { old->nextobj=cur; - globalbound(&cur->objlist, &cur->min, &cur->max); - old=cur; - } + globalbound(&cur->objlist, &cur->min, &cur->max); + old=cur; + } cur=box7; if (countobj(cur->objlist) > 0) { old->nextobj=cur; - globalbound(&cur->objlist, &cur->min, &cur->max); - old=cur; - } + globalbound(&cur->objlist, &cur->min, &cur->max); + old=cur; + } cur=box8; if (countobj(cur->objlist) > 0) { old->nextobj=cur; - globalbound(&cur->objlist, &cur->min, &cur->max); - old=cur; - } + globalbound(&cur->objlist, &cur->min, &cur->max); + old=cur; + } old->nextobj=*rootlist; if (countobj(box1->objlist) > 0) { - globalbound(&box1->objlist, &box1->min, &box1->max); + globalbound(&box1->objlist, &box1->min, &box1->max); *rootlist=(object *) box1; } else { @@ -318,15 +318,15 @@ void dividespace(int maxoctnodes, object **toplist) { vector gmin, gmax; if (countobj(*toplist) > maxoctnodes) { - globalbound(toplist, &gmin, &gmax); + globalbound(toplist, &gmin, &gmax); - octreespace(toplist, maxoctnodes); + octreespace(toplist, maxoctnodes); gbox = newbndbox(gmin, gmax); gbox->objlist = NULL; - gbox->tex = NULL; + gbox->tex = NULL; gbox->nextobj=NULL; gbox->objlist=*toplist; - *toplist=(object *) gbox; + *toplist=(object *) gbox; } } diff --git a/examples/parallel_for/tachyon/src/types.h b/examples/parallel_for/tachyon/src/types.h index 1ecc72335e..ef6b45e4aa 100644 --- a/examples/parallel_for/tachyon/src/types.h +++ b/examples/parallel_for/tachyon/src/types.h @@ -70,7 +70,7 @@ /* Use prime numbers for best memory system performance */ #define INTTBSIZE 1024 /* maximum intersections we can hold */ #define MAXLIGHTS 39 /* maximum number of lights in a scene */ -#define MAXIMGS 39 /* maxiumum number of distinct images */ +#define MAXIMGS 39 /* maximum number of distinct images */ #define RPCQSIZE 113 /* number of RPC messages to queue */ /* Parameter values for rt_boundmode() */ diff --git a/examples/parallel_reduce/primes/primes.cpp b/examples/parallel_reduce/primes/primes.cpp index dcd9d1c784..c45bd041d2 100644 --- a/examples/parallel_reduce/primes/primes.cpp +++ b/examples/parallel_reduce/primes/primes.cpp @@ -296,7 +296,7 @@ NumberType ParallelCountPrimes( NumberType n , int number_of_threads, NumberType printf("---\n"); using namespace tbb; // Explicit grain size and simple_partitioner() used here instead of automatic grainsize - // determination becase we want SieveRange to be decomposed down to grainSize or smaller. + // determination because we want SieveRange to be decomposed down to grainSize or smaller. // Doing so improves odds that the working set fits in cache when evaluating Sieve::operator(). parallel_reduce( SieveRange( s.multiples.m, n, s.multiples.m, grain_size ), s, simple_partitioner() ); count += s.count; diff --git a/examples/test_all/fibonacci/Fibonacci.cpp b/examples/test_all/fibonacci/Fibonacci.cpp index fbd23b5576..937c7f4143 100644 --- a/examples/test_all/fibonacci/Fibonacci.cpp +++ b/examples/test_all/fibonacci/Fibonacci.cpp @@ -177,7 +177,7 @@ value SharedSerialFib(int n) //! Hash comparer struct IntHashCompare { bool equal( const int j, const int k ) const { return j == k; } - unsigned long hash( const int k ) const { return (unsigned long)k; } + unsigned long hash( const int k ) const { return (unsigned long)k; } }; //! NumbersTable type based on concurrent_hash_map typedef concurrent_hash_map NumbersTable; @@ -193,7 +193,7 @@ class ConcurrentHashSerialFibTask: public task { for( int i = 2; i <= my_n; ++i ) { // there is no difference in to recycle or to make loop NumbersTable::const_accessor f1, f2; // same as iterators if( !Fib.find(f1, i-1) || !Fib.find(f2, i-2) ) { - // Something is seriously wrong, because i-1 and i-2 must have been inserted + // Something is seriously wrong, because i-1 and i-2 must have been inserted // earlier by this thread or another thread. assert(0); } @@ -209,7 +209,7 @@ class ConcurrentHashSerialFibTask: public task { //! Root function value ConcurrentHashSerialFib(int n) { - NumbersTable Fib; + NumbersTable Fib; bool okay; okay = Fib.insert( make_pair(0, 0) ); assert(okay); // assign initial values okay = Fib.insert( make_pair(1, 1) ); assert(okay); @@ -245,7 +245,7 @@ struct QueueStream { }; //! Functor for parallel_for which fills the queue -struct parallel_forFibBody { +struct parallel_forFibBody { QueueStream &my_stream; //! fill functor arguments parallel_forFibBody(QueueStream &s) : my_stream(s) { } @@ -286,8 +286,8 @@ struct QueueInsertTask: public task { //! executing task task* execute() /*override*/ { // Execute of parallel pushing of n-1 initial matrices - parallel_for( blocked_range( 1, my_n, 10 ), parallel_forFibBody(my_stream) ); - my_stream.producer_is_done = true; + parallel_for( blocked_range( 1, my_n, 10 ), parallel_forFibBody(my_stream) ); + my_stream.producer_is_done = true; return 0; } }; @@ -317,7 +317,7 @@ value ParallelQueueFib(int n) // before the second task in the list starts. task::spawn_root_and_wait(list); assert(stream.Queue.unsafe_size() == 1); // it is easy to lose some work - Matrix2x2 M; + Matrix2x2 M; bool result = stream.Queue.try_pop( M ); // get last matrix assert( result ); return M.v[0][0]; // and result number @@ -349,7 +349,7 @@ class MultiplyFilter: public filter { concurrent_queue &Queue = *static_cast *>(p); Matrix2x2 m1, m2; // get two elements - while( !Queue.try_pop( m1 ) ) this_tbb_thread::yield(); + while( !Queue.try_pop( m1 ) ) this_tbb_thread::yield(); while( !Queue.try_pop( m2 ) ) this_tbb_thread::yield(); m1 = m1 * m2; // process them Queue.push( m1 ); // and push back @@ -373,7 +373,7 @@ value ParallelPipeFib(int n) pipeline.clear(); // do not forget clear the pipeline assert( input.Queue.unsafe_size()==1 ); - Matrix2x2 M; + Matrix2x2 M; bool result = input.Queue.try_pop( M ); // get last element assert( result ); return M.v[0][0]; // get value @@ -384,20 +384,20 @@ value ParallelPipeFib(int n) //! Functor for parallel_reduce struct parallel_reduceFibBody { Matrix2x2 sum; - int splitted; //< flag to make one less operation for splitted bodies + int split_flag; //< flag to make one less operation for split bodies //! Constructor fills sum with initial matrix - parallel_reduceFibBody() : sum( Matrix1110 ), splitted(0) { } + parallel_reduceFibBody() : sum( Matrix1110 ), split_flag(0) { } //! Splitting constructor - parallel_reduceFibBody( parallel_reduceFibBody& other, split ) : sum( Matrix1110 ), splitted(1/*note that it is splitted*/) {} + parallel_reduceFibBody( parallel_reduceFibBody& other, split ) : sum( Matrix1110 ), split_flag(1/*note that it is split*/) {} //! Join point void join( parallel_reduceFibBody &s ) { sum = sum * s.sum; } //! Process multiplications void operator()( const blocked_range &r ) { - for( int k = r.begin() + splitted; k < r.end(); ++k ) + for( int k = r.begin() + split_flag; k < r.end(); ++k ) sum = sum * Matrix1110; - splitted = 0; // reset flag, because this method can be reused for next range + split_flag = 0; // reset flag, because this method can be reused for next range } }; //! Root function @@ -469,7 +469,7 @@ struct FibTask: public task { value x, y; bool second_phase; //< flag of continuation // task arguments - FibTask( int n_, value& sum_ ) : + FibTask( int n_, value& sum_ ) : n(n_), sum(sum_), second_phase(false) {} //! Execute task @@ -494,7 +494,7 @@ struct FibTask: public task { } }; //! Root function -value ParallelTaskFib(int n) { +value ParallelTaskFib(int n) { value sum; FibTask& a = *new(task::allocate_root()) FibTask(n, sum); task::spawn_root_and_wait(a); @@ -515,8 +515,8 @@ void IntRange::set_from_string( const char* s ) { char* end; high = low = strtol(s,&end,0); switch( *end ) { - case ':': - high = strtol(end+1,0,0); + case ':': + high = strtol(end+1,0,0); break; case '\0': break; diff --git a/include/tbb/cache_aligned_allocator.h b/include/tbb/cache_aligned_allocator.h index 3a00e03588..a9983298b0 100644 --- a/include/tbb/cache_aligned_allocator.h +++ b/include/tbb/cache_aligned_allocator.h @@ -144,7 +144,7 @@ class cache_aligned_resource : public std::pmr::memory_resource { } private: - //! We don't know what memory resource set. Use padding to gurantee alignment + //! We don't know what memory resource set. Use padding to guarantee alignment void* do_allocate(size_t bytes, size_t alignment) override { size_t cache_line_alignment = correct_alignment(alignment); uintptr_t base = (uintptr_t)m_upstream->allocate(correct_size(bytes) + cache_line_alignment); @@ -183,7 +183,7 @@ class cache_aligned_resource : public std::pmr::memory_resource { } size_t correct_alignment(size_t alignment) { - __TBB_ASSERT(tbb::internal::is_power_of_two(alignment), "Alignemnt is not a power of 2"); + __TBB_ASSERT(tbb::internal::is_power_of_two(alignment), "Alignment is not a power of 2"); #if __TBB_CPP17_HW_INTERFERENCE_SIZE_PRESENT size_t cache_line_size = std::hardware_destructive_interference_size; #else diff --git a/include/tbb/internal/_concurrent_queue_impl.h b/include/tbb/internal/_concurrent_queue_impl.h index 7955a40ac8..c1aafc9e20 100644 --- a/include/tbb/internal/_concurrent_queue_impl.h +++ b/include/tbb/internal/_concurrent_queue_impl.h @@ -885,7 +885,7 @@ class concurrent_queue_base_v3: no_copy { //! Get size of queue ptrdiff_t __TBB_EXPORTED_METHOD internal_size() const; - //! Check if the queue is emtpy + //! Check if the queue is empty bool __TBB_EXPORTED_METHOD internal_empty() const; //! Set the queue capacity diff --git a/include/tbb/internal/_flow_graph_item_buffer_impl.h b/include/tbb/internal/_flow_graph_item_buffer_impl.h index 569545fc9e..9ac4dbbbf4 100644 --- a/include/tbb/internal/_flow_graph_item_buffer_impl.h +++ b/include/tbb/internal/_flow_graph_item_buffer_impl.h @@ -140,7 +140,7 @@ namespace internal { return get_my_item(my_tail - 1); } - // following methods are for reservation of the front of a bufffer. + // following methods are for reservation of the front of a buffer. void reserve_item(size_type i) { __TBB_ASSERT(my_item_valid(i) && !my_item_reserved(i), "item cannot be reserved"); item(i).second = reserved_item; } void release_item(size_type i) { __TBB_ASSERT(my_item_reserved(i), "item is not reserved"); item(i).second = has_item; } diff --git a/include/tbb/internal/_tbb_windef.h b/include/tbb/internal/_tbb_windef.h index 7fe8df428c..1268ba271c 100644 --- a/include/tbb/internal/_tbb_windef.h +++ b/include/tbb/internal/_tbb_windef.h @@ -18,7 +18,7 @@ #error Do not #include this internal file directly; use public TBB headers instead. #endif /* __TBB_tbb_windef_H */ -// Check that the target Windows version has all API calls requried for TBB. +// Check that the target Windows version has all API calls required for TBB. // Do not increase the version in condition beyond 0x0500 without prior discussion! #if defined(_WIN32_WINNT) && _WIN32_WINNT<0x0501 #error TBB is unable to run on old Windows versions; _WIN32_WINNT must be 0x0501 or greater. diff --git a/include/tbb/task.h b/include/tbb/task.h index b062717f8c..6eff290fcd 100644 --- a/include/tbb/task.h +++ b/include/tbb/task.h @@ -433,7 +433,7 @@ class task_group_context : internal::no_copy { intptr_t my_priority; #endif /* __TBB_TASK_PRIORITY */ - //! Decription of algorithm for scheduler based instrumentation. + //! Description of algorithm for scheduler based instrumentation. internal::string_index my_name; //! Trailing padding protecting accesses to frequently used members from false sharing diff --git a/include/tbb/task_arena.h b/include/tbb/task_arena.h index 855634c611..f1fef56e40 100644 --- a/include/tbb/task_arena.h +++ b/include/tbb/task_arena.h @@ -53,7 +53,7 @@ class delegate_base : no_assign { virtual ~delegate_base() {} }; -// If decltype is availabe, the helper detects the return type of functor of specified type, +// If decltype is available, the helper detects the return type of functor of specified type, // otherwise it defines the void type. template struct return_type_or_void { diff --git a/include/tbb/tbb_stddef.h b/include/tbb/tbb_stddef.h index 3112f73385..f9aa90b3a3 100644 --- a/include/tbb/tbb_stddef.h +++ b/include/tbb/tbb_stddef.h @@ -22,7 +22,7 @@ #define TBB_VERSION_MINOR 0 // Engineering-focused interface version -#define TBB_INTERFACE_VERSION 11007 +#define TBB_INTERFACE_VERSION 11008 #define TBB_INTERFACE_VERSION_MAJOR TBB_INTERFACE_VERSION/1000 // The oldest major interface version still supported diff --git a/include/tbb/tbb_thread.h b/include/tbb/tbb_thread.h index 5a3b5cc5c3..52b96e4d54 100644 --- a/include/tbb/tbb_thread.h +++ b/include/tbb/tbb_thread.h @@ -242,7 +242,7 @@ namespace internal { friend tbb_thread_v3::id __TBB_EXPORTED_FUNC thread_get_id_v3(); friend inline size_t tbb_hasher( const tbb_thread_v3::id& id ) { - __TBB_STATIC_ASSERT(sizeof(id.my_id) <= sizeof(size_t), "Implementaion assumes that thread_id_type fits into machine word"); + __TBB_STATIC_ASSERT(sizeof(id.my_id) <= sizeof(size_t), "Implementation assumes that thread_id_type fits into machine word"); return tbb::tbb_hasher(id.my_id); } diff --git a/python/tbb/pool.py b/python/tbb/pool.py index c521f5f2b4..3a5da11c64 100644 --- a/python/tbb/pool.py +++ b/python/tbb/pool.py @@ -323,7 +323,7 @@ def __init__(self, collector=None, callback=None): the collector will be called when the result from the Job is ready \param callback when not None, function to call when the - result becomes available (this is the paramater passed to the + result becomes available (this is the parameter passed to the Pool::*_async() methods. """ self._success = False diff --git a/src/perf/run_statistics.sh b/src/perf/run_statistics.sh index c3048f4c4d..a4152fc4e4 100644 --- a/src/perf/run_statistics.sh +++ b/src/perf/run_statistics.sh @@ -17,7 +17,7 @@ export LD_LIBRARY_PATH=.:$LD_LIBRARY_PATH #setting output format .csv, 'pivot' - is pivot table mode, ++ means append export STAT_FORMAT=pivot-csv++ -#check existing files because of apend mode +#check existing files because of append mode ls *.csv rm -i *.csv #setting a delimiter in txt or csv file diff --git a/src/perf/time_lru_cache_throughput.cpp b/src/perf/time_lru_cache_throughput.cpp index 4bcbf749b9..c202a11d88 100644 --- a/src/perf/time_lru_cache_throughput.cpp +++ b/src/perf/time_lru_cache_throughput.cpp @@ -34,7 +34,7 @@ #include #include "tbb/mutex.h" -//TODO: probably move this to separate header utlity file +//TODO: probably move this to separate header utility file namespace micro_benchmarking{ namespace utils{ template diff --git a/src/rml/server/rml_server.cpp b/src/rml/server/rml_server.cpp index 7cc9f1f27b..5db496fe5b 100644 --- a/src/rml/server/rml_server.cpp +++ b/src/rml/server/rml_server.cpp @@ -1677,7 +1677,7 @@ void wakeup_some_tbb_threads() { /* First, atomically grab the connection, then increase the server ref count to keep it from being released prematurely. Second, check if the balance is available for TBB - and the tbb conneciton has slack to exploit. If the answer is true, go ahead and + and the tbb connection has slack to exploit. If the answer is true, go ahead and try to wake some up. */ if( generic_connection::get_addr(active_tbb_connections)==0 ) // the next connection will see the change; return. diff --git a/src/rml/test/test_server.h b/src/rml/test/test_server.h index b1e9cc129d..7b01e6cc31 100644 --- a/src/rml/test/test_server.h +++ b/src/rml/test/test_server.h @@ -16,7 +16,7 @@ /* This header contains code shared by test_omp_server.cpp and test_tbb_server.cpp There is no ifndef guard - test is supposed to include this file exactly once. - The test is also exected to have #include of rml_omp.h or rml_tbb.h before + The test is also executed to have #include of rml_omp.h or rml_tbb.h before including this header. This header should not use any parts of TBB that require linking in the TBB run-time. diff --git a/src/tbb/arena.cpp b/src/tbb/arena.cpp index 9b4d627e4d..28717e752f 100644 --- a/src/tbb/arena.cpp +++ b/src/tbb/arena.cpp @@ -574,20 +574,20 @@ void arena::enqueue_task( task& t, intptr_t prio, FastRandom &random ) if( prio == internal::priority_critical || internal::is_critical( t ) ) { // TODO: consider using of 'scheduler::handled_as_critical' internal::make_critical( t ); -#if __TBB_TASK_ISOLATION generic_scheduler* s = governor::local_scheduler_if_initialized(); - __TBB_ASSERT( s, "Scheduler must be initialized at this moment" ); - // propagate isolation level to critical task - t.prefix().isolation = s->my_innermost_running_task->prefix().isolation; -#endif ITT_NOTIFY(sync_releasing, &my_critical_task_stream); - if( !s || !s->my_arena_slot ) { - // Either scheduler is not initialized or it is not attached to the arena, use random - // lane for the task. - my_critical_task_stream.push( &t, 0, internal::random_lane_selector(random) ); - } else { + if( s && s->my_arena_slot ) { + // Scheduler is initialized and it is attached to the arena, + // propagate isolation level to critical task +#if __TBB_TASK_ISOLATION + t.prefix().isolation = s->my_innermost_running_task->prefix().isolation; +#endif unsigned& lane = s->my_arena_slot->hint_for_critical; my_critical_task_stream.push( &t, 0, tbb::internal::subsequent_lane_selector(lane) ); + } else { + // Either scheduler is not initialized or it is not attached to the arena + // use random lane for the task + my_critical_task_stream.push( &t, 0, internal::random_lane_selector(random) ); } advertise_new_work(); return; @@ -796,7 +796,7 @@ void task_arena_base::internal_attach( ) { void task_arena_base::internal_enqueue( task& t, intptr_t prio ) const { __TBB_ASSERT(my_arena, NULL); - generic_scheduler* s = governor::local_scheduler_if_initialized(); + generic_scheduler* s = governor::local_scheduler_weak(); // scheduler is only needed for FastRandom instance __TBB_ASSERT(s, "Scheduler is not initialized"); // we allocated a task so can expect the scheduler #if __TBB_TASK_GROUP_CONTEXT // Is there a better place for checking the state of my_default_ctx? diff --git a/src/tbb/condition_variable.cpp b/src/tbb/condition_variable.cpp index 58b3745f92..36ea3efbb2 100644 --- a/src/tbb/condition_variable.cpp +++ b/src/tbb/condition_variable.cpp @@ -139,7 +139,7 @@ void init_condvar_module() __TBB_ASSERT( (uintptr_t)__TBB_init_condvar==(uintptr_t)&init_condvar_using_event, NULL ); #if __TBB_WIN8UI_SUPPORT // We expect condition variables to be always available for Windows* store applications, - // so there is no need to check presense and use alternative implementation. + // so there is no need to check presence and use alternative implementation. __TBB_init_condvar = (void (WINAPI *)(PCONDITION_VARIABLE))&InitializeConditionVariable; __TBB_condvar_wait = (BOOL(WINAPI *)(PCONDITION_VARIABLE, LPCRITICAL_SECTION, DWORD))&SleepConditionVariableCS; __TBB_condvar_notify_one = (void (WINAPI *)(PCONDITION_VARIABLE))&WakeConditionVariable; diff --git a/src/tbb/mac32-tbb-export.lst b/src/tbb/mac32-tbb-export.lst index ccc7ac95b8..1ef5869bde 100644 --- a/src/tbb/mac32-tbb-export.lst +++ b/src/tbb/mac32-tbb-export.lst @@ -20,7 +20,7 @@ Sometimes macOS* requires leading underscore (e. g. in export list file), but sometimes not (e. g. when searching symbol in a dynamic library via dlsym()). Symbols in this file SHOULD be listed WITHOUT one leading underscore. __TBB_SYMBOL macro should add underscore when - necessary, depending on the indended usage. + necessary, depending on the intended usage. */ // cache_aligned_allocator.cpp diff --git a/src/tbb/mac64-tbb-export.lst b/src/tbb/mac64-tbb-export.lst index 22c5f3bff4..c2d82d9759 100644 --- a/src/tbb/mac64-tbb-export.lst +++ b/src/tbb/mac64-tbb-export.lst @@ -20,7 +20,7 @@ Sometimes macOS* requires leading underscore (e. g. in export list file), but sometimes not (e. g. when searching symbol in a dynamic library via dlsym()). Symbols in this file SHOULD be listed WITHOUT one leading underscore. __TBB_SYMBOL macro should add underscore when - necessary, depending on the indended usage. + necessary, depending on the intended usage. */ // cache_aligned_allocator.cpp diff --git a/src/tbb/market.cpp b/src/tbb/market.cpp index 8ff5c549bd..98f3a022e1 100644 --- a/src/tbb/market.cpp +++ b/src/tbb/market.cpp @@ -186,7 +186,7 @@ bool market::release ( bool is_public, bool blocking_terminate ) { // Theoretically, new private references to the market can be added during waiting making it potentially // endless. // TODO: revise why the weak scheduler needs market's pointer and try to remove this wait. - // Note that the market should know about its schedulers for cancelation/exception/priority propagation, + // Note that the market should know about its schedulers for cancellation/exception/priority propagation, // see e.g. task_group_context::cancel_group_execution() while ( __TBB_load_with_acquire( my_public_ref_count ) == 1 && __TBB_load_with_acquire( my_ref_count ) > 1 ) __TBB_Yield(); diff --git a/src/tbb/scheduler.cpp b/src/tbb/scheduler.cpp index 2aa402d50d..9b08d40369 100644 --- a/src/tbb/scheduler.cpp +++ b/src/tbb/scheduler.cpp @@ -1136,7 +1136,7 @@ task* generic_scheduler::steal_task_from( __TBB_ISOLATION_ARG( arena_slot& victi break; GATHER_STATISTIC( ++my_counters.proxies_bypassed ); } - // The task cannot be executed either due to isolation or proxy contraints. + // The task cannot be executed either due to isolation or proxy constraints. result = NULL; tasks_omitted = true; } else if ( !tasks_omitted ) { diff --git a/src/tbb/task_stream_extended.h b/src/tbb/task_stream_extended.h index 891379d3aa..8103d1ef64 100644 --- a/src/tbb/task_stream_extended.h +++ b/src/tbb/task_stream_extended.h @@ -30,7 +30,7 @@ #endif #if !__TBB_CPF_BUILD -#error This code bears a preview status until it proves its usefulness/peformance suitability. +#error This code bears a preview status until it proves its usefulness/performance suitability. #endif #include "tbb/tbb_stddef.h" diff --git a/src/tbb/tools_api/ittnotify.h b/src/tbb/tools_api/ittnotify.h index d4810860e3..b6fbef5fb3 100644 --- a/src/tbb/tools_api/ittnotify.h +++ b/src/tbb/tools_api/ittnotify.h @@ -2705,7 +2705,7 @@ ITT_STUB(ITTAPI, __itt_clock_domain*, clock_domain_create, (__itt_get_clock_info /** * @ingroup clockdomains - * @brief Recalculate clock domains frequences and clock base timestamps. + * @brief Recalculate clock domains frequencies and clock base timestamps. */ void ITTAPI __itt_clock_domain_reset(void); @@ -3715,7 +3715,7 @@ ITT_STUB(ITTAPI, __itt_caller, stack_caller_create, (void)) /** @endcond */ /** - * @brief Destroy the inforamtion about stitch point identified by the pointer previously returned by __itt_stack_caller_create() + * @brief Destroy the information about stitch point identified by the pointer previously returned by __itt_stack_caller_create() */ void ITTAPI __itt_stack_caller_destroy(__itt_caller id); diff --git a/src/tbb/tools_api/ittnotify_static.c b/src/tbb/tools_api/ittnotify_static.c index 6e082cba71..08c69b36f6 100644 --- a/src/tbb/tools_api/ittnotify_static.c +++ b/src/tbb/tools_api/ittnotify_static.c @@ -603,7 +603,7 @@ static const char* __itt_fsplit(const char* s, const char* sep, const char** out /* This function return value of env variable that placed into static buffer. * !!! The same static buffer is used for subsequent calls. !!! - * This was done to aviod dynamic allocation for few calls. + * This was done to avoid dynamic allocation for few calls. * Actually we need this function only four times. */ static const char* __itt_get_env_var(const char* name) diff --git a/src/tbbmalloc/MapMemory.h b/src/tbbmalloc/MapMemory.h index bfe5bdc504..1b63ee60c9 100644 --- a/src/tbbmalloc/MapMemory.h +++ b/src/tbbmalloc/MapMemory.h @@ -99,7 +99,7 @@ inline void* mmapTHP(size_t bytes) { offset = HUGE_PAGE_SIZE - ((uintptr_t)result & (HUGE_PAGE_SIZE - 1)); munmap(result, offset); - // New region begining + // New region beginning result = (void*)((uintptr_t)result + offset); } @@ -108,7 +108,7 @@ inline void* mmapTHP(size_t bytes) { } // Assume, that mmap virtual addresses grow down by default - // So, set a hint as a result of a last successfull allocation + // So, set a hint as a result of a last successful allocation // and then use it minus requested size as a new mapping point. // TODO: Atomic store is meant here, fence not needed, but // currently we don't have such function. diff --git a/src/tbbmalloc/backend.cpp b/src/tbbmalloc/backend.cpp index b5d2e391b0..a93ac74b33 100644 --- a/src/tbbmalloc/backend.cpp +++ b/src/tbbmalloc/backend.cpp @@ -709,7 +709,7 @@ FreeBlock *Backend::askMemFromOS(size_t blockSize, intptr_t startModifiedCnt, // no regions found, try to clean cache if (!block || block == (FreeBlock*)VALID_BLOCK_IN_BIN) return releaseMemInCaches(startModifiedCnt, lockedBinsThreshold, numOfLockedBins); - // Since a region can hold more than one block it can be splitted. + // Since a region can hold more than one block it can be split. *splittableRet = true; } // after asking memory from OS, release caches if we above the memory limits @@ -964,9 +964,9 @@ void *Backend::remap(void *ptr, size_t oldSize, size_t newSize, size_t alignment if (oldRegion->type != MEMREG_ONE_BLOCK) return NULL; // we are not single in the region const size_t userOffset = (uintptr_t)ptr - (uintptr_t)oldRegion; - const size_t alignedSize = LargeObjectCache::alignToBin(newSize); + const size_t alignedSize = LargeObjectCache::alignToBin(newSize + userOffset); const size_t requestSize = - alignUp(userOffset + alignedSize + sizeof(LastFreeBlock), extMemPool->granularity); + alignUp(sizeof(MemRegion) + alignedSize + sizeof(LastFreeBlock), extMemPool->granularity); if (requestSize < alignedSize) // is wrapped around? return NULL; regionList.remove(oldRegion); @@ -991,7 +991,7 @@ void *Backend::remap(void *ptr, size_t oldSize, size_t newSize, size_t alignment // TODO: get rid of useless pair blockConsumed()/blockReleased() bkndSync.blockReleased(); - // object must start at same offest from region's start + // object must start at same offset from region's start void *object = (void*)((uintptr_t)region + userOffset); MALLOC_ASSERT(isAligned(object, alignment), ASSERT_TEXT); LargeObjectHdr *header = (LargeObjectHdr*)object - 1; @@ -1200,7 +1200,7 @@ bool Backend::scanCoalescQ(bool forceCoalescQDrop) // matches blockConsumed() from CoalRequestQ::putBlock() coalescAndPutList(currCoalescList, forceCoalescQDrop, /*reportBlocksProcessed=*/true); - // returns status of coalescQ.getAll(), as an indication of possibe changes in backend + // returns status of coalescQ.getAll(), as an indication of possible changes in backend // TODO: coalescAndPutList() may report is some new free blocks became available or not return currCoalescList; } diff --git a/src/tbbmalloc/backend.h b/src/tbbmalloc/backend.h index 2acf9a0982..981086d249 100644 --- a/src/tbbmalloc/backend.h +++ b/src/tbbmalloc/backend.h @@ -92,7 +92,7 @@ enum MemRegionType { MEMREG_SLAB_BLOCKS = 0, // The region can hold several large object blocks MEMREG_LARGE_BLOCKS, - // The region holds only one block with a reqested size + // The region holds only one block with a requested size MEMREG_ONE_BLOCK }; @@ -255,7 +255,7 @@ class Backend { // register bins related to advance regions AdvRegionsBins advRegBins; - // Storage for splitted FreeBlocks + // Storage for split FreeBlocks IndexedBins freeLargeBlockBins, freeSlabAlignedBins; diff --git a/src/tbbmalloc/frontend.cpp b/src/tbbmalloc/frontend.cpp index 7d18ed73e4..d780437fd5 100644 --- a/src/tbbmalloc/frontend.cpp +++ b/src/tbbmalloc/frontend.cpp @@ -639,7 +639,7 @@ bool TLSData::cleanupBlockBins() bool ExtMemoryPool::releaseAllLocalCaches() { - // Iterate all registred TLS data and clean LLOC and Slab pools + // Iterate all registered TLS data and clean LLOC and Slab pools bool released = allLocalCaches.cleanup(/*cleanOnlyUnused=*/false); // Bins privatization is done only for the current thread @@ -1235,7 +1235,7 @@ void Bin::outofTLSBin(Block* block) if (block == activeBlk) { activeBlk = block->previous? block->previous : block->next; } - /* Delink the block */ + /* Unlink the block */ if (block->previous) { MALLOC_ASSERT( block->previous->next == block, ASSERT_TEXT ); block->previous->next = block->next; diff --git a/src/tbbmalloc/large_objects.h b/src/tbbmalloc/large_objects.h index 1e93fb7544..520c9c97ca 100644 --- a/src/tbbmalloc/large_objects.h +++ b/src/tbbmalloc/large_objects.h @@ -147,7 +147,7 @@ class LargeObjectCacheImpl { LargeMemoryBlock *first, *last; /* age of an oldest block in the list; equal to last->age, if last defined, - used for quick cheching it without acquiring the lock. */ + used for quick checking it without acquiring the lock. */ uintptr_t oldest; /* currAge when something was excluded out of list because of the age, not because of cache hit */ diff --git a/src/tbbmalloc/proxy.cpp b/src/tbbmalloc/proxy.cpp index 2e1708b08c..d96ae7a016 100644 --- a/src/tbbmalloc/proxy.cpp +++ b/src/tbbmalloc/proxy.cpp @@ -406,10 +406,10 @@ void* __TBB_malloc_safer_realloc_##CRTLIB( void *ptr, size_t size ) return __TBB_malloc_safer_realloc( ptr, size, &func_ptrs ); \ } \ \ -void* __TBB_malloc_safer__aligned_realloc_##CRTLIB( void *ptr, size_t size, size_t aligment ) \ +void* __TBB_malloc_safer__aligned_realloc_##CRTLIB( void *ptr, size_t size, size_t alignment ) \ { \ orig_aligned_ptrs func_ptrs = {orig__aligned_free_##CRTLIB, orig__aligned_msize_##CRTLIB}; \ - return __TBB_malloc_safer_aligned_realloc( ptr, size, aligment, &func_ptrs ); \ + return __TBB_malloc_safer_aligned_realloc( ptr, size, alignment, &func_ptrs ); \ } // Only for ucrtbase: substitution for _o_free diff --git a/src/tbbmalloc/tbb_function_replacement.cpp b/src/tbbmalloc/tbb_function_replacement.cpp index 90c4fcfac0..7410fce18c 100644 --- a/src/tbbmalloc/tbb_function_replacement.cpp +++ b/src/tbbmalloc/tbb_function_replacement.cpp @@ -57,7 +57,7 @@ namespace Log { static char *records[RECORDS_COUNT + 1]; static bool replacement_status = true; - // Internal counter, thats contain number of next string for record + // Internal counter that contains number of next string for record static unsigned record_number = 0; // Function that writes info about (not)found opcodes to the Log journal diff --git a/src/tbbmalloc/tbbmalloc_internal.h b/src/tbbmalloc/tbbmalloc_internal.h index 12d5c6cb0d..cec0b0bd76 100644 --- a/src/tbbmalloc/tbbmalloc_internal.h +++ b/src/tbbmalloc/tbbmalloc_internal.h @@ -419,7 +419,7 @@ class HugePagesStatus { unsigned long long hugePageSize = 0; #if __linux__ - // Check huge pages existense + // Check huge pages existence unsigned long long meminfoHugePagesTotal = 0; parseFileItem meminfoItems[] = { diff --git a/src/test/harness_allocator.h b/src/test/harness_allocator.h index ca9c5e88da..93e388cb87 100644 --- a/src/test/harness_allocator.h +++ b/src/test/harness_allocator.h @@ -807,10 +807,10 @@ class allocator_aware_data { allocator_aware_data(int v, const allocator_type& allocator = allocator_type()) : my_allocator(allocator), my_value(v) {} allocator_aware_data(const allocator_aware_data&) { - ASSERT(!assert_on_constructions, "Allocator should propogate to the data during copy construction"); + ASSERT(!assert_on_constructions, "Allocator should propagate to the data during copy construction"); } allocator_aware_data(allocator_aware_data&&) { - ASSERT(!assert_on_constructions, "Allocator should propogate to the data during move construction"); + ASSERT(!assert_on_constructions, "Allocator should propagate to the data during move construction"); } allocator_aware_data(const allocator_aware_data& rhs, const allocator_type& allocator) : my_allocator(allocator), my_value(rhs.my_value) {} diff --git a/src/test/harness_graph.h b/src/test/harness_graph.h index d09754d84a..1ba4e694e0 100644 --- a/src/test/harness_graph.h +++ b/src/test/harness_graph.h @@ -63,21 +63,21 @@ using tbb::flow::internal::SUCCESSFULLY_ENQUEUED; // conversion operators to the class, since we don't want it in general, // only in these tests. template -struct convertor { +struct converter { static OutputType convert_value(const InputType &i) { return OutputType(i); } }; template -struct convertor { +struct converter { static tbb::flow::continue_msg convert_value(const InputType &/*i*/) { return tbb::flow::continue_msg(); } }; template -struct convertor { +struct converter { static OutputType convert_value(const tbb::flow::continue_msg &/*i*/) { return OutputType(); } @@ -88,7 +88,7 @@ template struct mof_helper { template static inline void output_converted_value(const InputType &i, ports_type &p) { - (void)tbb::flow::get(p).try_put(convertor::type::output_type>::convert_value(i)); + (void)tbb::flow::get(p).try_put(converter::type::output_type>::convert_value(i)); output_converted_value(i, p); } }; @@ -98,7 +98,7 @@ struct mof_helper<1> { template static inline void output_converted_value(const InputType &i, ports_type &p) { // just emit a default-constructed object - (void)tbb::flow::get<0>(p).try_put(convertor::type::output_type>::convert_value(i)); + (void)tbb::flow::get<0>(p).try_put(converter::type::output_type>::convert_value(i)); } }; diff --git a/src/test/harness_test_cases_framework.h b/src/test/harness_test_cases_framework.h index c1eef673f7..694ed26eec 100644 --- a/src/test/harness_test_cases_framework.h +++ b/src/test/harness_test_cases_framework.h @@ -156,7 +156,7 @@ namespace test_framework_unit_tests{ test_suite ts; ts.register_test_case("tc_name",&do_nothing_tc); bool silent =true; - ASSERT(!ts(!silent).empty(),"in verbose mode all messages should be outputed"); + ASSERT(!ts(!silent).empty(),"in verbose mode all messages should be outputted"); } }; } diff --git a/src/test/test_allocator_STL.h b/src/test/test_allocator_STL.h index 4ca99504e0..ac01eefcb0 100644 --- a/src/test/test_allocator_STL.h +++ b/src/test/test_allocator_STL.h @@ -70,7 +70,7 @@ struct MoveOperationTracker { MoveOperationTracker(MoveOperationTracker&& m) __TBB_NOEXCEPT( true ) : my_value( m.my_value ) { } MoveOperationTracker& operator=(MoveOperationTracker const&) { - ASSERT( false, "Copy assigment operator is called" ); + ASSERT( false, "Copy assignment operator is called" ); return *this; } MoveOperationTracker& operator=(MoveOperationTracker&& m) __TBB_NOEXCEPT( true ) { @@ -91,7 +91,7 @@ struct MoveOperationTracker { template void TestAllocatorWithSTL(const Allocator &a = Allocator() ) { -// Allocator type convertion section +// Allocator type conversion section #if __TBB_ALLOCATOR_TRAITS_PRESENT typedef typename std::allocator_traits::template rebind_alloc Ai; typedef typename std::allocator_traits::template rebind_alloc > Acii; diff --git a/src/test/test_async_msg.cpp b/src/test/test_async_msg.cpp index ae4d049640..76d9d3be77 100644 --- a/src/test/test_async_msg.cpp +++ b/src/test/test_async_msg.cpp @@ -459,7 +459,7 @@ struct CheckerMakeEdge { static const bool valueRemove = !is_same_type(0), static_cast(0))), ImpossibleType>::value; __TBB_STATIC_ASSERT( valueMake == valueRemove, "make_edge() availability is NOT equal to remove_edge() availability" ); - + static const bool value = valueMake; }; @@ -469,10 +469,10 @@ struct TypeChecker { ++g_CheckerCounter; REMARK("%d: %s -> %s: %s %s \n", g_CheckerCounter, typeid(T1).name(), typeid(T2).name(), - (bAllowed ? "YES" : "no"), (bConvertable ? " (Convertable)" : "")); + (bAllowed ? "YES" : "no"), (bConvertible ? " (Convertible)" : "")); } -// +// // Check connection: function_node <-> function_node // R E C E I V I N G T Y P E // S 'bAllowed' | int | float | A | async_msg | async_msg | async_msg | UserAsync | UserAsync | UserAsync | @@ -487,13 +487,13 @@ struct TypeChecker { // Y UserAsync_int | Y | | | | | | Y | | | // P UserAsync_float | | Y | | | | | | Y | | // E UserAsync_A | | | Y | | | | | | Y | -// +// // Test make_edge() & remove_edge() availability static const bool bAllowed = is_same_type::value || is_same_type::filtered_type, T2>::value || is_same_type::filtered_type>::value; - static const bool bConvertable = bAllowed + static const bool bConvertible = bAllowed || std::is_base_of::value || (is_same_type::filtered_type, int>::value && is_same_type::value) || (is_same_type::filtered_type, float>::value && is_same_type::value); @@ -518,7 +518,7 @@ struct TypeChecker { // Test untyped_receiver->try_put(T2) availability __TBB_STATIC_ASSERT( (true == CheckerTryPut::value), "untyped_receiver cannot try_put(T2)" ); // Test receiver->try_put(T2) availability - __TBB_STATIC_ASSERT( (bConvertable == CheckerTryPut, T2>::value), "invalid availability of receiver->try_put(T2)" ); + __TBB_STATIC_ASSERT( (bConvertible == CheckerTryPut, T2>::value), "invalid availability of receiver->try_put(T2)" ); }; template diff --git a/src/test/test_async_node.cpp b/src/test/test_async_node.cpp index 602f4f9a21..e6d33a9592 100644 --- a/src/test/test_async_node.cpp +++ b/src/test/test_async_node.cpp @@ -88,7 +88,7 @@ tbb::atomic async_body_exec_count; tbb::atomic async_activity_processed_msg_count; tbb::atomic end_body_exec_count; -// queueing required in test_reset for testing of cancelation +// queueing required in test_reset for testing of cancellation typedef tbb::flow::async_node< int, int, tbb::flow::queueing > counting_async_node_type; typedef counting_async_node_type::gateway_type counting_gateway_type; @@ -435,7 +435,7 @@ struct basic_test { start_node.try_put(i); } g.wait_for_all(); - ASSERT( async_body_exec_count == NUMBER_OF_MSGS, "AsyncBody procesed wrong number of signals" ); + ASSERT( async_body_exec_count == NUMBER_OF_MSGS, "AsyncBody processed wrong number of signals" ); ASSERT( async_activity_processed_msg_count == NUMBER_OF_MSGS, "AsyncActivity processed wrong number of signals" ); ASSERT( end_body_exec_count == NUMBER_OF_MSGS, "EndBody processed wrong number of signals"); REMARK("async_body_exec_count == %d == async_activity_processed_msg_count == %d == end_body_exec_count == %d\n", @@ -471,7 +471,7 @@ int test_copy_ctor() { REMARK("async_body_exec_count = %d\n", int(async_body_exec_count)); REMARK("r1.my_count == %d and r2.my_count = %d\n", int(r1.my_count), int(r2.my_count)); - ASSERT( int(async_body_exec_count) == NUMBER_OF_MSGS, "AsyncBody procesed wrong number of signals" ); + ASSERT( int(async_body_exec_count) == NUMBER_OF_MSGS, "AsyncBody processed wrong number of signals" ); ASSERT( int(r1.my_count) == N, "counting receiver r1 has not received N items" ); ASSERT( int(r2.my_count) == 0, "counting receiver r2 has not received 0 items" ); @@ -482,7 +482,7 @@ int test_copy_ctor() { REMARK("async_body_exec_count = %d\n", int(async_body_exec_count)); REMARK("r1.my_count == %d and r2.my_count = %d\n", int(r1.my_count), int(r2.my_count)); - ASSERT( int(async_body_exec_count) == 2*NUMBER_OF_MSGS, "AsyncBody procesed wrong number of signals" ); + ASSERT( int(async_body_exec_count) == 2*NUMBER_OF_MSGS, "AsyncBody processed wrong number of signals" ); ASSERT( int(r1.my_count) == N, "counting receiver r1 has not received N items" ); ASSERT( int(r2.my_count) == N, "counting receiver r2 has not received N items" ); return Harness::Done; @@ -582,7 +582,7 @@ struct spin_test { start_node.try_put(i); } g.wait_for_all(); - ASSERT( async_body_exec_count == nthreads*NUMBER_OF_MSGS, "AsyncBody procesed wrong number of signals" ); + ASSERT( async_body_exec_count == nthreads*NUMBER_OF_MSGS, "AsyncBody processed wrong number of signals" ); ASSERT( async_activity_processed_msg_count == nthreads*NUMBER_OF_MSGS, "AsyncActivity processed wrong number of signals" ); ASSERT( end_body_exec_count == nthreads*NUMBER_OF_MSGS, "EndBody processed wrong number of signals"); ASSERT_WARNING( main_tid_count != 0, "Main thread did not participate in end_body tasks"); @@ -677,7 +677,7 @@ class equeueing_on_inner_level { Harness::SpinBarrier spin_barrier( nthreads ); async_activity_type my_async_activity( UNKNOWN_NUMBER_OF_ITEMS, true ); - + tbb::parallel_for( 0, nthreads, body_graph_with_async( spin_barrier, my_async_activity ) ); return Harness::Done; } diff --git a/src/test/test_buffer_node.cpp b/src/test/test_buffer_node.cpp index 3a1825ce75..c2a8f09abc 100644 --- a/src/test/test_buffer_node.cpp +++ b/src/test/test_buffer_node.cpp @@ -186,8 +186,8 @@ int test_reservation() { // // Tests // -// multilpe parallel senders, items in arbitrary order -// multilpe parallel senders, multiple parallel receivers, items in arbitrary order and all items received +// multiple parallel senders, items in arbitrary order +// multiple parallel senders, multiple parallel receivers, items in arbitrary order and all items received // * overlapped puts / gets // * all puts finished before any getS // diff --git a/src/test/test_concurrent_associative_common.h b/src/test/test_concurrent_associative_common.h index 382827e324..614a0df9a0 100644 --- a/src/test/test_concurrent_associative_common.h +++ b/src/test/test_concurrent_associative_common.h @@ -1111,7 +1111,7 @@ namespace node_handling{ "node handle does not contains expected value" ); // Bool conversion - ASSERT( nh2, "Node hanlde: Wrong not handle bool conversion" ); + ASSERT( nh2, "Node handle: Wrong not handle bool conversion" ); // Change key/mapped/value of node handle auto expected_value2 = *test_table.begin(); @@ -1227,7 +1227,7 @@ namespace node_handling{ ASSERT( table_to_insert.size() == table_size, "Insert: After empty node insertion table size changed" ); - // Standart insertion + // Standard insertion nh = GenerateNodeHandle(value); result = table_to_insert.insert(hint..., std::move(nh)); diff --git a/src/test/test_concurrent_hash_map.cpp b/src/test/test_concurrent_hash_map.cpp index d4d4a7a78e..1bc4de278d 100644 --- a/src/test/test_concurrent_hash_map.cpp +++ b/src/test/test_concurrent_hash_map.cpp @@ -498,7 +498,7 @@ void Check( AtomicByte array[], size_t n, size_t expected_size ) { } } -//! Test travering the tabel with a parallel range +//! Test traversing the table with a parallel range void ParallelTraverseTable( MyTable& table, size_t n, size_t expected_size ) { REMARK("testing parallel traversal\n"); ASSERT( table.size()==expected_size, NULL ); diff --git a/src/test/test_concurrent_priority_queue.cpp b/src/test/test_concurrent_priority_queue.cpp index 4529b120a9..736aa20921 100644 --- a/src/test/test_concurrent_priority_queue.cpp +++ b/src/test/test_concurrent_priority_queue.cpp @@ -208,13 +208,13 @@ void TestHelpers(){ TestToVector(); } -//Comparator with assert in default consructor +//Comparator with assert in default constructor template class less_a : public std::less { public: explicit less_a(bool no_assert = false) { - ASSERT(no_assert,"empty consructor should not be called"); + ASSERT(no_assert,"empty constructor should not be called"); }; }; diff --git a/src/test/test_flow_graph_priorities.cpp b/src/test/test_flow_graph_priorities.cpp index 058c13520e..18caa095a8 100644 --- a/src/test/test_flow_graph_priorities.cpp +++ b/src/test/test_flow_graph_priorities.cpp @@ -106,7 +106,7 @@ void test_node( NodeTypeCreator node_creator_func, NodePortRetriever get_sender graph g; broadcast_node bn(g); function_node tn(g, unlimited, passthru_body()); - // Using pointers to nodes to avoid errors on compilers, which try to generate assigment + // Using pointers to nodes to avoid errors on compilers, which try to generate assignment // operator for the nodes std::vector nodes; for( unsigned i = 0; i < node_num; ++i ) { @@ -427,7 +427,7 @@ void do_nested_work( const tbb::tbb_thread::id& tid, } } -// Using pointers to nodes to avoid errors on compilers, which try to generate assigment operator +// Using pointers to nodes to avoid errors on compilers, which try to generate assignment operator // for the nodes typedef std::vector< continue_node* > nodes_container_t; @@ -485,7 +485,7 @@ using tbb::task_arena; struct ResetGraphFunctor { graph& my_graph; ResetGraphFunctor(graph& g) : my_graph(g) {} - // copy construtor to please some old compilers + // copy constructor to please some old compilers ResetGraphFunctor(const ResetGraphFunctor& rgf) : my_graph(rgf.my_graph) {} void operator()() const { my_graph.reset(); } }; @@ -501,7 +501,7 @@ struct OuterBody { task_arena& my_inner_arena; OuterBody( int max_threads, task_arena& inner_arena ) : my_max_threads(max_threads), my_inner_arena(inner_arena) {} - // copy construtor to please some old compilers + // copy constructor to please some old compilers OuterBody( const OuterBody& rhs ) : my_max_threads(rhs.my_max_threads), my_inner_arena(rhs.my_inner_arena) {} int operator()( const int& ) { diff --git a/src/test/test_function_node.cpp b/src/test/test_function_node.cpp index 5aad9b2c6e..68256af2b1 100644 --- a/src/test/test_function_node.cpp +++ b/src/test/test_function_node.cpp @@ -28,7 +28,7 @@ #define MAX_NODES 4 //! Performs test on function nodes with limited concurrency and buffering -/** Theses tests check: +/** These tests check: 1) that the number of executing copies never exceed the concurrency limit 2) that the node never rejects 3) that no items are lost @@ -251,7 +251,7 @@ void run_buffered_levels( int c ) { //! Performs test on executable nodes with limited concurrency -/** Theses tests check: +/** These tests check: 1) that the nodes will accepts puts up to the concurrency limit, 2) the nodes do not exceed the concurrency limit even when run with more threads (this is checked in the harness_graph_executor), 3) the nodes will receive puts from multiple successors simultaneously, @@ -382,7 +382,7 @@ struct parallel_puts : private NoAssign { }; //! Performs test on executable nodes with unlimited concurrency -/** Theses tests check: +/** These tests check: 1) that the nodes will accept all puts 2) the nodes will receive puts from multiple predecessors simultaneously, and 3) the nodes will send to multiple successors. diff --git a/src/test/test_initializer_list.h b/src/test/test_initializer_list.h index ef81dbdca1..caa925bd96 100644 --- a/src/test/test_initializer_list.h +++ b/src/test/test_initializer_list.h @@ -92,10 +92,10 @@ namespace initializer_list_support_tests{ element_type test_seq[] = INIT_SEQ; \ container_type expected(test_seq,test_seq + Harness::array_length(test_seq)); \ \ - /*test for explicit contructor call*/ \ + /*test for explicit constructor call*/ \ container_type vd INIT_SEQ; \ ASSERT(vd == expected,"initialization via explicit constructor call with init list failed"); \ - /*test for explicit contructor call with std::initializer_list*/ \ + /*test for explicit constructor call with std::initializer_list*/ \ \ std::initializer_list init_list = INIT_SEQ; \ container_type v1 (init_list); \ diff --git a/src/test/test_join_node_msg_key_matching.cpp b/src/test/test_join_node_msg_key_matching.cpp index 25ed3597d0..54aa90121b 100644 --- a/src/test/test_join_node_msg_key_matching.cpp +++ b/src/test/test_join_node_msg_key_matching.cpp @@ -18,7 +18,7 @@ #define TBB_PREVIEW_FLOW_GRAPH_FEATURES 1 // This preview feature depends on -// TBB_PREVIEW_FLOW_GRAPH_FEATURES macro, and should not accidentaly be dependent on +// TBB_PREVIEW_FLOW_GRAPH_FEATURES macro, and should not accidentally be dependent on // this deprecated feature #define TBB_DEPRECATED_FLOW_NODE_EXTRACTION 0 diff --git a/src/test/test_limiter_node.cpp b/src/test/test_limiter_node.cpp index 647e972133..322fc70e82 100644 --- a/src/test/test_limiter_node.cpp +++ b/src/test/test_limiter_node.cpp @@ -178,7 +178,7 @@ void test_puts_with_decrements( int num_threads, tbb::flow::limiter_node< T >& l // Tests // // limiter only forwards below the limit, multiple parallel senders / single receiver -// mutiple parallel senders that put to decrement at each accept, limiter accepts new messages +// multiple parallel senders that put to decrement at each accept, limiter accepts new messages // // template< typename T > diff --git a/src/test/test_malloc_compliance.cpp b/src/test/test_malloc_compliance.cpp index fc1918b1f8..c9c2344aa5 100644 --- a/src/test/test_malloc_compliance.cpp +++ b/src/test/test_malloc_compliance.cpp @@ -21,7 +21,7 @@ bool __tbb_test_errno = false; #include "tbb/tbb_config.h" #if __TBB_WIN8UI_SUPPORT -// testing allocator itself not iterfaces +// testing allocator itself not interfaces // so we can use desktop functions #define _CRT_USE_WINAPI_FAMILY_DESKTOP_APP !_M_ARM #define HARNESS_NO_PARSE_COMMAND_LINE 1 @@ -615,7 +615,7 @@ void CMemTest::AddrArifm() { PtrSize *arr = (PtrSize*)Tmalloc(COUNT_ELEM*sizeof(PtrSize)); - if (FullLog) REPORT("\nUnique pointer using Address arithmetics\n"); + if (FullLog) REPORT("\nUnique pointer using Address arithmetic\n"); if (FullLog) REPORT("malloc...."); ASSERT(arr, NULL); for (int i=0; i uintptr_t CacheBinModel::cacheCurrTime; template intptr_t CacheBinModel::tooLargeLOC; -template +template void LOCModelTester() { defaultMemPool->extMemPool.loc.cleanAll(); defaultMemPool->extMemPool.loc.reset(); @@ -1005,7 +1005,7 @@ void LOCModelTester() { CacheBinModel::tooLargeLOC = defaultMemPool->extMemPool.loc.largeCache.tooLargeLOC; CacheBinModel cacheBinModel(defaultMemPool->extMemPool.loc.largeCache.bin[binIdx], allocationSize); - Scenarion scen; + Scenario scen; for (rml::internal::LargeMemoryBlock *lmb = scen.next(); (intptr_t)lmb != (intptr_t)-1; lmb = scen.next()) { if ( lmb ) { int num=1; @@ -1227,21 +1227,21 @@ void TestTHP() { // Map memory allocPtrs[i] = backend->allocRawMem(allocSize); - MALLOC_ASSERT(allocPtrs[i], "Allocation not succeded."); + MALLOC_ASSERT(allocPtrs[i], "Allocation not succeeded."); MALLOC_ASSERT(allocSize == HUGE_PAGE_SIZE, - "Allocation size have to be aligned on Huge Page size internaly."); + "Allocation size have to be aligned on Huge Page size internally."); // First touch policy - no real pages allocated by OS without accessing the region memset(allocPtrs[i], 1, allocSize); MALLOC_ASSERT(isAligned(allocPtrs[i], HUGE_PAGE_SIZE), - "The pointer returned by scalable_malloc is not alligned on huge page size."); + "The pointer returned by scalable_malloc is not aligned on huge page size."); } // Wait for the system to update process memory info files after allocations Harness::Sleep(4000); - // Generaly, kernel tries to allocate transparent huge pages, but sometimes it cannot do this + // Generally, kernel tries to allocate transparent huge pages, but sometimes it cannot do this // (tested on SLES 11/12), so consider this system info checks as a remark. // Also, some systems can allocate more memory then needed in background (tested on Ubuntu 14.04) size_t newSystemTHPCount = getSystemTHPCount(); @@ -1418,8 +1418,8 @@ void TesFunctionReplacementLog() { #include // pow function // Huge objects cache: Size = MinSize * (2 ^ (Index / StepFactor) formula gives value for the bin size, -// but it is not matched with our sizeToIdx aproximation algorithm, where step sizes between major -// (power of 2) sizes are equal. Used internaly for the test. Static cast to avoid warnings. +// but it is not matched with our sizeToIdx approximation algorithm, where step sizes between major +// (power of 2) sizes are equal. Used internally for the test. Static cast to avoid warnings. inline size_t hocIdxToSizeFormula(int idx) { return static_cast(float(rml::internal::LargeObjectCache::maxLargeSize) * pow(2, float(idx) / float(rml::internal::LargeObjectCache::HugeBSProps::StepFactor))); diff --git a/src/test/test_multifunction_node.cpp b/src/test/test_multifunction_node.cpp index a49f0b4b84..8591ab9ec5 100644 --- a/src/test/test_multifunction_node.cpp +++ b/src/test/test_multifunction_node.cpp @@ -32,7 +32,7 @@ #define MAX_NODES 4 //! Performs test on function nodes with limited concurrency and buffering -/** Theses tests check: +/** These tests check: 1) that the number of executing copies never exceed the concurrency limit 2) that the node never rejects 3) that no items are lost @@ -240,7 +240,7 @@ void run_buffered_levels( int c ) { //! Performs test on executable nodes with limited concurrency -/** Theses tests check: +/** These tests check: 1) that the nodes will accepts puts up to the concurrency limit, 2) the nodes do not exceed the concurrency limit even when run with more threads (this is checked in the harness_graph_executor), 3) the nodes will receive puts from multiple successors simultaneously, diff --git a/src/test/test_opencl_node.cpp b/src/test/test_opencl_node.cpp index bd7bc110cf..4b06dc8f22 100644 --- a/src/test/test_opencl_node.cpp +++ b/src/test/test_opencl_node.cpp @@ -95,7 +95,7 @@ struct test_default_device_selector { cl_uint selected_platform_index = 0; cl_platform_id platform = platforms[selected_platform_index]; - // Count the number of plaform devices and compare with selector list + // Count the number of platform devices and compare with selector list cl_uint device_count; clGetDeviceIDs(platform, CL_DEVICE_TYPE_ALL, 0, NULL, &device_count); // It should be the same @@ -795,7 +795,7 @@ struct BufferWithKey : public opencl_buffer { KeyType my_key; int my_idx; - // TODO: investigate why defaul ctor is required + // TODO: investigate why default ctor is required BufferWithKey() {} BufferWithKey( size_t N, int idx ) : opencl_buffer( N ), my_idx( idx ) {} const KeyType& key() const { return my_key; } diff --git a/src/test/test_parallel_do.cpp b/src/test/test_parallel_do.cpp index d741e72a1e..8879c11b53 100644 --- a/src/test/test_parallel_do.cpp +++ b/src/test/test_parallel_do.cpp @@ -332,8 +332,8 @@ namespace TestMoveSem { MovePreferable(bool addtofeed_) : Movable(), addtofeed(addtofeed_) {} MovePreferable(MovePreferable&& other) : Movable(std::move(other)), addtofeed(other.addtofeed) {}; // base class is explicitly initialized in the copy ctor to avoid -Wextra warnings - MovePreferable(const MovePreferable& other) : Movable(other) { REPORT("Error: copy ctor prefered.\n"); }; - MovePreferable& operator=(const MovePreferable&) { REPORT("Error: copy assing operator prefered.\n"); return *this; } + MovePreferable(const MovePreferable& other) : Movable(other) { REPORT("Error: copy ctor preferred.\n"); }; + MovePreferable& operator=(const MovePreferable&) { REPORT("Error: copy assign operator preferred.\n"); return *this; } bool addtofeed; }; struct MoveOnly : MovePreferable, NoCopy { diff --git a/src/test/test_parallel_for.cpp b/src/test/test_parallel_for.cpp index a274ca3b15..fb17c6e8ec 100644 --- a/src/test/test_parallel_for.cpp +++ b/src/test/test_parallel_for.cpp @@ -634,13 +634,17 @@ struct ArenaBody { struct CombineBody { MapType operator()(MapType x, const MapType& y) const { x.insert(y.begin(), y.end()); - for (MapType::iterator it = x.begin(); it != x.end();++it) - for (MapType::iterator internal_it = x.begin(); internal_it != x.end(); ++internal_it) { - if (it != internal_it && internal_it->second.first <= it->second.first && it->second.second <= internal_it->second.second) { - x.erase(internal_it); + for (MapType::iterator it1 = x.begin(); it1 != x.end(); ++it1) { + for (MapType::iterator it2 = x.begin(); it2 != x.end(); ++it2) { + if (it1 == it2) continue; + bool is_1_subrange_of_2 = + it2->second.first <= it1->second.first && it1->second.second <= it2->second.second; + if (is_1_subrange_of_2) { + x.erase(it2); break; } } + } return x; } }; @@ -648,33 +652,45 @@ struct CombineBody { range_borders combine_range(const MapType& map) { range_borders result_range = map.begin()->second; for (MapType::const_iterator it = map.begin(); it != map.end(); it++) - result_range = range_borders((std::min)(result_range.first, it->second.first), (std::max)(result_range.second, it->second.second)); + result_range = range_borders( + (std::min)(result_range.first, it->second.first), + (std::max)(result_range.second, it->second.second) + ); return result_range; } template void test_body() { - for (unsigned int num_threads = tbb::tbb_thread::hardware_concurrency() / 4 + 1; num_threads < tbb::tbb_thread::hardware_concurrency(); num_threads *= 2) - for (size_t range_begin = 0, range_end = num_threads * 10 - 1, i = 0; i < 3; range_begin += num_threads, range_end += num_threads + 1, ++i) { + unsigned hw_concurrency = tbb::tbb_thread::hardware_concurrency(); + for (unsigned int num_threads = hw_concurrency / 4 + 1; num_threads < hw_concurrency; num_threads *= 2) { + REMARK(" num_threads=%lu\n", num_threads); + for (size_t range_begin = 0, range_end = num_threads * 10 - 1, i = 0; i < 3; + range_begin += num_threads, range_end += num_threads + 1, ++i) { + REMARK(" processing range [%lu, %lu)\n", range_begin, range_end); ets = ETSType(MapType()); - tbb::task_arena limited(num_threads); + tbb::task_arena limited(num_threads); // at least two slots in arena. limited.execute(ArenaBody(range_begin, range_end)); MapType combined_map = ets.combine(CombineBody()); range_borders result_borders = combine_range(combined_map); ASSERT(result_borders.first == range_begin, "Restored range begin does not match initial one"); ASSERT(result_borders.second == range_end, "Restored range end does not match initial one"); - ASSERT((combined_map.size() == num_threads), "Incorrect number or post-proportional split ranges"); - size_t expected_size = (range_end - range_begin) / num_threads; + size_t map_size = combined_map.size(); + // In a single-thread arena, partitioners still do one split of a range. + size_t range_partitions = num_threads > 1 ? num_threads : 2; + ASSERT((map_size == range_partitions), "Incorrect number or post-proportional split ranges"); + size_t expected_size = (range_end - range_begin) / range_partitions; for (MapType::iterator it = combined_map.begin(); it != combined_map.end(); ++it) { size_t size = it->second.second - it->second.first; ASSERT((size == expected_size || size == expected_size + 1), "Incorrect post-proportional range size"); } } - + } } void test() { + REMARK("parallel_for with affinity partitioner within task_arena\n"); test_body(); + REMARK("parallel_for with static partitioner within task_arena\n"); test_body(); } diff --git a/src/test/test_parallel_for_each.cpp b/src/test/test_parallel_for_each.cpp index 577941a5f1..90b2396049 100644 --- a/src/test/test_parallel_for_each.cpp +++ b/src/test/test_parallel_for_each.cpp @@ -25,7 +25,7 @@ #include "harness_iterator.h" #include -// Some old compilers can't deduce template paremeter type for parallel_for_each +// Some old compilers can't deduce template parameter type for parallel_for_each // if the function name is passed without explicit cast to function pointer. typedef void (*TestFunctionType)(size_t); diff --git a/src/test/test_priority_queue_node.cpp b/src/test/test_priority_queue_node.cpp index 4d8c70e3f8..d3b3f54553 100644 --- a/src/test/test_priority_queue_node.cpp +++ b/src/test/test_priority_queue_node.cpp @@ -153,8 +153,8 @@ int test_reservation(int) { // // Tests // -// multilpe parallel senders, items in FIFO (relatively to sender) order -// multilpe parallel senders, multiple parallel receivers, items in FIFO order (relative to sender/receiver) and all items received +// multiple parallel senders, items in FIFO (relatively to sender) order +// multiple parallel senders, multiple parallel receivers, items in FIFO order (relative to sender/receiver) and all items received // * overlapped puts / gets // * all puts finished before any getS // diff --git a/src/test/test_queue_node.cpp b/src/test/test_queue_node.cpp index 5619c68e61..de688dcc52 100644 --- a/src/test/test_queue_node.cpp +++ b/src/test/test_queue_node.cpp @@ -229,8 +229,8 @@ int test_reservation() { // // Tests // -// multilpe parallel senders, items in FIFO (relatively to sender) order -// multilpe parallel senders, multiple parallel receivers, items in FIFO order (relative to sender/receiver) and all items received +// multiple parallel senders, items in FIFO (relatively to sender) order +// multiple parallel senders, multiple parallel receivers, items in FIFO order (relative to sender/receiver) and all items received // * overlapped puts / gets // * all puts finished before any getS // diff --git a/src/test/test_semaphore.cpp b/src/test/test_semaphore.cpp index c81c0ab455..7f8244d74f 100644 --- a/src/test/test_semaphore.cpp +++ b/src/test/test_semaphore.cpp @@ -50,7 +50,6 @@ Harness::SpinBarrier sBarrier; // set semaphore to initial value // see that semaphore only allows that number of threads to be active class Body: NoAssign { - const int nThreads; const int nIters; tbb::internal::semaphore &mySem; vector &ourCounts; @@ -61,7 +60,7 @@ class Body: NoAssign { Body(int nThread_, int nIter_, semaphore &mySem_, vector& ourCounts_, vector& tottime_ - ) : nThreads(nThread_), nIters(nIter_), mySem(mySem_), ourCounts(ourCounts_), tottime(tottime_) { sBarrier.initialize(nThread_); pCount = 0; } + ) : nIters(nIter_), mySem(mySem_), ourCounts(ourCounts_), tottime(tottime_) { sBarrier.initialize(nThread_); pCount = 0; } void operator()(const int tid) const { sBarrier.wait(); for(int i=0; i < nIters; ++i) { diff --git a/src/test/test_task_arena.cpp b/src/test/test_task_arena.cpp index c68f23f5d4..16eb683fa1 100644 --- a/src/test/test_task_arena.cpp +++ b/src/test/test_task_arena.cpp @@ -44,6 +44,8 @@ #include "harness.h" #include "harness_barrier.h" +#include "tbb/tbb_thread.h" + #if _MSC_VER // plays around __TBB_NO_IMPLICIT_LINKAGE. __TBB_LIB_NAME should be defined (in makefiles) #pragma comment(lib, __TBB_STRING(__TBB_LIB_NAME)) @@ -1608,6 +1610,19 @@ class CheckArenaNumThreads : public tbb::task { Harness::SpinBarrier CheckArenaNumThreads::m_barrier; +class EnqueueTaskIntoTaskArena +{ +public: + EnqueueTaskIntoTaskArena(tbb::task& t, tbb::task_arena& a) : my_task(t), my_arena(a) {} + void operator() () + { + tbb::task::enqueue(my_task, my_arena); + } +private: + tbb::task& my_task; + tbb::task_arena& my_arena; +}; + void TestTaskEnqueueInArena() { int pp[8]={3, 4, 5, 7, 8, 11, 13, 17}; @@ -1617,10 +1632,21 @@ void TestTaskEnqueueInArena() int reserved_for_masters = p - 1; tbb::task_arena a(p, reserved_for_masters); a.initialize(); - CheckArenaNumThreads& t = *new( tbb::task::allocate_root() ) CheckArenaNumThreads(p, reserved_for_masters); - tbb::task::enqueue(t, a); - CheckArenaNumThreads::m_barrier.wait(); - a.debug_wait_until_empty(); + //Enqueue on master thread + { + CheckArenaNumThreads& t = *new( tbb::task::allocate_root() ) CheckArenaNumThreads(p, reserved_for_masters); + tbb::task::enqueue(t, a); + CheckArenaNumThreads::m_barrier.wait(); + a.debug_wait_until_empty(); + } + //Enqueue on thread without scheduler + { + CheckArenaNumThreads& t = *new( tbb::task::allocate_root() ) CheckArenaNumThreads(p, reserved_for_masters); + tbb::tbb_thread thr(EnqueueTaskIntoTaskArena(t, a)); + CheckArenaNumThreads::m_barrier.wait(); + a.debug_wait_until_empty(); + thr.join(); + } } } diff --git a/src/test/test_task_scheduler_init.cpp b/src/test/test_task_scheduler_init.cpp index a3658f0dd6..4b596dd57c 100644 --- a/src/test/test_task_scheduler_init.cpp +++ b/src/test/test_task_scheduler_init.cpp @@ -31,7 +31,7 @@ #if _MSC_VER #pragma warning (push) // MSVC discovers that ASSERT(false) inside TestBlockingTerminateNS::ExceptionTest2::Body makes the code - // in parallel_for after the body call unreachable. So supress the warning. + // in parallel_for after the body call unreachable. So suppress the warning. #pragma warning (disable: 4702) #endif #include "tbb/parallel_for.h" diff --git a/src/test/test_tbb_version.cpp b/src/test/test_tbb_version.cpp index 22db4d4770..3793349533 100644 --- a/src/test/test_tbb_version.cpp +++ b/src/test/test_tbb_version.cpp @@ -225,7 +225,7 @@ int main(int argc, char *argv[] ) { void initialize_strings_vector(std::vector * vector) { vector->push_back(string_pair("TBB: VERSION\t\t2019.0", required)); // check TBB_VERSION - vector->push_back(string_pair("TBB: INTERFACE VERSION\t11007", required)); // check TBB_INTERFACE_VERSION + vector->push_back(string_pair("TBB: INTERFACE VERSION\t11008", required)); // check TBB_INTERFACE_VERSION vector->push_back(string_pair("TBB: BUILD_DATE", required)); vector->push_back(string_pair("TBB: BUILD_HOST", required)); vector->push_back(string_pair("TBB: BUILD_OS", required)); diff --git a/src/test/test_tick_count.cpp b/src/test/test_tick_count.cpp index ea645c6e4a..2780e85c93 100644 --- a/src/test/test_tick_count.cpp +++ b/src/test/test_tick_count.cpp @@ -131,7 +131,7 @@ void TestTickCountDifference( int n ) { do { NativeParallelFor( n, TickCountDifferenceBody( n ) ); if ( barrier_time > tolerance ) - // The machine seems to be oversubscibed so skip the test. + // The machine seems to be oversubscribed so skip the test. continue; for ( int i = 0; i < n; ++i ) { for ( int j = 0; j < i; ++j ) { @@ -146,7 +146,7 @@ void TestTickCountDifference( int n ) { } while ( ++num_trials < 10 && (tbb::tick_count::now() - start_time).seconds() < 5 ); REMARK( "Difference test time: %g sec\n", (tbb::tick_count::now() - start_time).seconds() ); // TODO: Find the cause of the machine high load, fix it and upgrade ASSERT_WARNING to ASSERT - ASSERT_WARNING( num_trials == 10, "The machine seems to be heavily oversubscibed, difference test was skipped." ); + ASSERT_WARNING( num_trials == 10, "The machine seems to be heavily oversubscribed, difference test was skipped." ); delete[] tick_count_array; }