diff --git a/src/core/BoxGeometry.hpp b/src/core/BoxGeometry.hpp index b30998a41e1..d6a3a74f2d2 100644 --- a/src/core/BoxGeometry.hpp +++ b/src/core/BoxGeometry.hpp @@ -61,6 +61,12 @@ class BoxGeometry { * @param box_l Length that should be set. */ void set_length(Utils::Vector3d const &box_l) { m_length = box_l; } + + /** + * @brief Box volume + * @return Return the volume of the box. + */ + double volume() const { return m_length[0] * m_length[1] * m_length[2]; } }; #endif // CORE_BOX_GEOMETRY_HPP diff --git a/src/core/accumulators/Correlator.cpp b/src/core/accumulators/Correlator.cpp index 009d3c9f4fd..c2051701e7c 100644 --- a/src/core/accumulators/Correlator.cpp +++ b/src/core/accumulators/Correlator.cpp @@ -190,18 +190,15 @@ constexpr const char init_errors[][64] = { int Correlator::get_correlation_time(double *correlation_time) { // We calculate the correlation time for each m_dim_corr by normalizing the - // correlation, - // integrating it and finding out where C(tau)=tau; - double C_tau; - int ok_flag; + // correlation, integrating it and finding out where C(tau)=tau for (unsigned j = 0; j < m_dim_corr; j++) { correlation_time[j] = 0.; } // here we still have to fix the stuff a bit! for (unsigned j = 0; j < m_dim_corr; j++) { - C_tau = 1 * m_dt; - ok_flag = 0; + double C_tau = 1 * m_dt; + bool ok_flag = false; for (unsigned k = 1; k < m_n_result - 1; k++) { if (n_sweeps[k] == 0) break; @@ -215,7 +212,7 @@ int Correlator::get_correlation_time(double *correlation_time) { 2 * sqrt(tau[k - 1] * m_dt / n_data)) { correlation_time[j] = C_tau * (1 + (2 * (double)tau[k] + 1) / (double)n_data); - ok_flag = 1; + ok_flag = true; break; } } @@ -228,7 +225,6 @@ int Correlator::get_correlation_time(double *correlation_time) { } void Correlator::initialize() { - unsigned int i, j, k; hierarchy_depth = 0; // Class members are assigned via the initializer list @@ -260,8 +256,9 @@ void Correlator::initialize() { dim_A = 0; dim_B = 0; - if (A_obs) + if (A_obs) { dim_A = A_obs->n_values(); + } if (!B_obs) { B_obs = A_obs; } @@ -352,24 +349,26 @@ void Correlator::initialize() { result.resize(std::array{{m_n_result, m_dim_corr}}); - for (i = 0; i < m_n_result; i++) { - for (j = 0; j < m_dim_corr; j++) + for (int i = 0; i < m_n_result; i++) { + for (int j = 0; j < m_dim_corr; j++) { // and initialize the values result[i][j] = 0; + } } newest = std::vector(hierarchy_depth, m_tau_lin); tau.resize(m_n_result); - for (i = 0; i < m_tau_lin + 1; i++) { + for (int i = 0; i < m_tau_lin + 1; i++) { tau[i] = i; } - for (j = 1; j < hierarchy_depth; j++) - for (k = 0; k < m_tau_lin / 2; k++) { + for (int j = 1; j < hierarchy_depth; j++) { + for (int k = 0; k < m_tau_lin / 2; k++) { tau[m_tau_lin + 1 + (j - 1) * m_tau_lin / 2 + k] = (k + (m_tau_lin / 2) + 1) * (1 << j); } + } } void Correlator::update() { @@ -378,20 +377,15 @@ void Correlator::update() { "No data can be added after finalize() was called."); } // We must now go through the hierarchy and make sure there is space for the - // new - // datapoint. For every hierarchy level we have to decide if it necessary to - // move - // something - int i, j; - int highest_level_to_compress; - unsigned int index_new, index_old, index_res; + // new datapoint. For every hierarchy level we have to decide if it necessary + // to move something + int highest_level_to_compress = -1; t++; - highest_level_to_compress = -1; - i = 0; - // Lets find out how far we have to go back in the hierarchy to make space for - // the new value + // Let's find out how far we have to go back in the hierarchy to make space + // for the new value + int i = 0; while (true) { if (((t - ((m_tau_lin + 1) * ((1 << (i + 1)) - 1) + 1)) % (1 << (i + 1)) == 0)) { @@ -405,9 +399,9 @@ void Correlator::update() { } // Now we know we must make space on the levels 0..highest_level_to_compress - // Now lets compress the data level by level. + // Now let's compress the data level by level. - for (i = highest_level_to_compress; i >= 0; i--) { + for (int i = highest_level_to_compress; i >= 0; i--) { // We increase the index indicating the newest on level i+1 by one (plus // folding) newest[i + 1] = (newest[i + 1] + 1) % (m_tau_lin + 1); @@ -441,9 +435,9 @@ void Correlator::update() { } // Now update the lowest level correlation estimates - for (j = 0; j < min(m_tau_lin + 1, n_vals[0]); j++) { - index_new = newest[0]; - index_old = (newest[0] - j + m_tau_lin + 1) % (m_tau_lin + 1); + for (unsigned j = 0; j < min(m_tau_lin + 1, n_vals[0]); j++) { + auto const index_new = newest[0]; + auto const index_old = (newest[0] - j + m_tau_lin + 1) % (m_tau_lin + 1); auto const temp = (corr_operation)(A[0][index_old], B[0][index_new], m_correlation_args); assert(temp.size() == m_dim_corr); @@ -457,9 +451,9 @@ void Correlator::update() { for (int i = 1; i < highest_level_to_compress + 2; i++) { for (unsigned j = (m_tau_lin + 1) / 2 + 1; j < min(m_tau_lin + 1, n_vals[i]); j++) { - index_new = newest[i]; - index_old = (newest[i] - j + m_tau_lin + 1) % (m_tau_lin + 1); - index_res = + auto const index_new = newest[i]; + auto const index_old = (newest[i] - j + m_tau_lin + 1) % (m_tau_lin + 1); + auto const index_res = m_tau_lin + (i - 1) * m_tau_lin / 2 + (j - m_tau_lin / 2 + 1) - 1; auto const temp = (corr_operation)(A[i][index_old], B[i][index_new], m_correlation_args); @@ -480,20 +474,15 @@ int Correlator::finalize() { throw std::runtime_error("Correlator::finalize() can only be called once."); } // We must now go through the hierarchy and make sure there is space for the - // new - // datapoint. For every hierarchy level we have to decide if it necessary to - // move - // something - int i, j; - int ll; // current lowest level - int vals_ll; // number of values remaining in the lowest level + // new datapoint. For every hierarchy level we have to decide if it necessary + // to move something int highest_level_to_compress; - unsigned int index_new, index_old, index_res; - // make a flag that the correlation is finalized - finalized = 1; + // mark the correlation as finalized + finalized = true; - for (ll = 0; ll < hierarchy_depth - 1; ll++) { + for (int ll = 0; ll < hierarchy_depth - 1; ll++) { + int vals_ll; // number of values remaining in the lowest level if (n_vals[ll] > m_tau_lin + 1) vals_ll = m_tau_lin + n_vals[ll] % 2; else @@ -507,9 +496,9 @@ int Correlator::finalize() { highest_level_to_compress = -1; } - i = ll + 1; // lowest level, for which we have to check for compression - // Lets find out how far we have to go back in the hierarchy to make space - // for the new value + int i = ll + 1; // lowest level for which we have to check for compression + // Let's find out how far we have to go back in the hierarchy to make + // space for the new value while (highest_level_to_compress > -1) { if (n_vals[i] % 2) { if (i < (hierarchy_depth - 1) && n_vals[i] > m_tau_lin) { @@ -526,9 +515,9 @@ int Correlator::finalize() { // Now we know we must make space on the levels // 0..highest_level_to_compress - // Now lets compress the data level by level. + // Now let's compress the data level by level. - for (i = highest_level_to_compress; i >= ll; i--) { + for (int i = highest_level_to_compress; i >= ll; i--) { // We increase the index indicating the newest on level i+1 by one (plus // folding) newest[i + 1] = (newest[i + 1] + 1) % (m_tau_lin + 1); @@ -542,12 +531,13 @@ int Correlator::finalize() { newest[ll] = (newest[ll] + 1) % (m_tau_lin + 1); // We only need to update correlation estimates for the higher levels - for (i = ll + 1; i < highest_level_to_compress + 2; i++) { - for (j = (m_tau_lin + 1) / 2 + 1; j < min(m_tau_lin + 1, n_vals[i]); + for (int i = ll + 1; i < highest_level_to_compress + 2; i++) { + for (int j = (m_tau_lin + 1) / 2 + 1; j < min(m_tau_lin + 1, n_vals[i]); j++) { - index_new = newest[i]; - index_old = (newest[i] - j + m_tau_lin + 1) % (m_tau_lin + 1); - index_res = + auto const index_new = newest[i]; + auto const index_old = + (newest[i] - j + m_tau_lin + 1) % (m_tau_lin + 1); + auto const index_res = m_tau_lin + (i - 1) * m_tau_lin / 2 + (j - m_tau_lin / 2 + 1) - 1; auto const temp = (corr_operation)(A[i][index_old], B[i][index_new], @@ -569,7 +559,7 @@ std::vector Correlator::get_correlation() { std::vector res; // time + n_sweeps + corr_1...corr_n - int cols = 2 + m_dim_corr; + int const cols = 2 + m_dim_corr; res.resize(m_n_result * cols); for (int i = 0; i < m_n_result; i++) { diff --git a/src/core/accumulators/Correlator.hpp b/src/core/accumulators/Correlator.hpp index 7b18362ba51..e120b9debd5 100644 --- a/src/core/accumulators/Correlator.hpp +++ b/src/core/accumulators/Correlator.hpp @@ -165,7 +165,7 @@ class Correlator : public AccumulatorBase { Correlator(int tau_lin, double tau_max, int delta_N, std::string compress1_, std::string compress2_, std::string corr_operation, obs_ptr obs1, obs_ptr obs2, Utils::Vector3d correlation_args_ = {}) - : AccumulatorBase(delta_N), finalized(0), t(0), + : AccumulatorBase(delta_N), finalized(false), t(0), m_correlation_args(correlation_args_), m_tau_lin(tau_lin), m_dt(delta_N * time_step), m_tau_max(tau_max), compressA_name(std::move(compress1_)), @@ -233,8 +233,8 @@ class Correlator : public AccumulatorBase { void set_internal_state(std::string const &); private: - unsigned int finalized; ///< flag whether non-zero of correlation is finalized - unsigned int t; ///< global time in number of frames + bool finalized; ///< whether the correlation is finalized + unsigned int t; ///< global time in number of frames Utils::Vector3d m_correlation_args; ///< additional arguments, which the ///< correlation may need (currently diff --git a/src/core/cells.cpp b/src/core/cells.cpp index bc6c7293b3b..cb4fdc5d327 100644 --- a/src/core/cells.cpp +++ b/src/core/cells.cpp @@ -62,7 +62,7 @@ CellPList ghost_cells = {nullptr, 0, 0}; /** Type of cell structure in use */ CellStructure cell_structure; -/** On of Cells::Resort, announces the level of resort needed. +/** One of @ref Cells::Resort, announces the level of resort needed. */ unsigned resort_particles = Cells::RESORT_NONE; bool rebuild_verletlist = true; diff --git a/src/core/cells.hpp b/src/core/cells.hpp index af8bfdba5f6..668520d5396 100644 --- a/src/core/cells.hpp +++ b/src/core/cells.hpp @@ -145,19 +145,15 @@ struct CellStructure { bool use_verlet_list = true; - /** Maximal pair range supported by current - * cell system. - */ + /** Maximal pair range supported by current cell system. */ Utils::Vector3d max_range = {}; - /** - * Minimum range that has to be supported. - */ + /** Minimum range that has to be supported. */ double min_range; - /** returns the global local_cells */ + /** Return the global local_cells */ CellPList local_cells() const; - /** returns the global ghost_cells */ + /** Return the global ghost_cells */ CellPList ghost_cells() const; /** Communicator to exchange ghost particles. */ @@ -168,7 +164,7 @@ struct CellStructure { /** Cell system dependent function to find the right cell for a * particle. * \param p Particle. - * \return pointer to cell where to put the particle, nullptr + * \return pointer to cell where to put the particle, nullptr * if the particle does not belong on this node. */ Cell *(*particle_to_cell)(const Particle &p) = nullptr; @@ -184,18 +180,16 @@ struct CellStructure { /** list of all cells. */ extern std::vector cells; -/** list of all cells containing particles physically on the local - node */ +/** list of all cells containing particles physically on the local node */ extern CellPList local_cells; /** list of all cells containing ghosts */ extern CellPList ghost_cells; -/** Type of cell structure in use ( \ref Cell Structure ). */ +/** Type of cell structure in use. */ extern CellStructure cell_structure; /** If non-zero, cell systems should reset the position for checking - * the Verlet criterion. Moreover, the Verlet list has to be - * rebuilt. + * the Verlet criterion. Moreover, the Verlet list has to be rebuilt. */ extern bool rebuild_verletlist; @@ -207,9 +201,9 @@ extern bool rebuild_verletlist; /*@{*/ /** Reinitialize the cell structures. - * @param new_cs gives the new topology to use afterwards. May be set to - * \ref CELL_STRUCTURE_CURRENT for not changing it. - * @param range Desired interaction range + * @param new_cs The new topology to use afterwards. May be set to + * @ref CELL_STRUCTURE_CURRENT for not changing it. + * @param range Desired interaction range */ void cells_re_init(int new_cs, double range); @@ -233,9 +227,6 @@ inline void realloc_cellplist(CellPList *cpl, int size) { /** Sort the particles into the cells and initialize the ghost particle * structures. - * @param global_flag if this is CELLS_GLOBAL_EXCHANGE, particle positions can - * have changed arbitrarily, otherwise the change should - * have been smaller then skin. */ void cells_resort_particles(int global_flag); @@ -244,22 +235,22 @@ void cells_resort_particles(int global_flag); * It calculates the maximal interaction range, and as said reinitializes * the cells structure if something significant has changed. * - * If bit CELL_FLAG_FAST is set, the routine should try to save time. + * If bit @ref CELL_FLAG_FAST is set, the routine should try to save time. * Currently this means that if the maximal range decreased, it does * not reorganize the particles. This is used in the NpT algorithm to * avoid frequent reorganization of particles. * - * If bit CELL_FLAG_GRIDCHANGED is set, it means the nodes' topology + * If bit @ref CELL_FLAG_GRIDCHANGED is set, it means the nodes' topology * has changed, i. e. the grid or periodicity. In this case a full * reorganization is due. * - * @param flags a bitmask of CELL_FLAG_GRIDCHANGED, - * and/or CELL_FLAG_FAST, see above. + * @param flags a bitmask of @ref CELL_FLAG_GRIDCHANGED, + * and/or @ref CELL_FLAG_FAST, see above. */ void cells_on_geometry_change(int flags); -/** Update ghost information. If \ref resort_particles == 1, - * also a resorting of the particles takes place. +/** Update ghost information. If @ref resort_particles is not + * @ref Cells::RESORT_NONE, the particles are also resorted. */ void cells_update_ghosts(); @@ -267,20 +258,14 @@ void cells_update_ghosts(); int cells_get_n_particles(); /** - * @brief Get pairs closer than distance from the cells. - * - * This is mostly for testing purposes and uses link_cell - * to get pairs out of the cellsystem by a simple distance - * criterion. + * @brief Get pairs closer than @p distance from the cells. * * Pairs are sorted so that first.id < second.id */ std::vector> mpi_get_pairs(double distance); /** - * @brief Increase the local resort level at least to level. - * - * The changed level has to be communicated via annouce_resort_particles. + * @brief Increase the local resort level at least to @p level. */ void set_resort_particles(Cells::Resort level); diff --git a/src/core/electrostatics_magnetostatics/fft.cpp b/src/core/electrostatics_magnetostatics/fft.cpp index 064f7ebd0be..ddad2e3e729 100644 --- a/src/core/electrostatics_magnetostatics/fft.cpp +++ b/src/core/electrostatics_magnetostatics/fft.cpp @@ -184,9 +184,9 @@ find_comm_groups(Utils::Vector3i const &grid1, Utils::Vector3i const &grid2, */ int calc_local_mesh(const int *n_pos, const int *n_grid, const int *mesh, const double *mesh_off, int *loc_mesh, int *start) { - int i, last[3], size = 1; + int last[3], size = 1; - for (i = 0; i < 3; i++) { + for (int i = 0; i < 3; i++) { start[i] = (int)ceil((mesh[i] / (double)n_grid[i]) * n_pos[i] - mesh_off[i]); last[i] = (int)floor((mesh[i] / (double)n_grid[i]) * (n_pos[i] + 1) - @@ -233,14 +233,14 @@ int calc_local_mesh(const int *n_pos, const int *n_grid, const int *mesh, int calc_send_block(const int *pos1, const int *grid1, const int *pos2, const int *grid2, const int *mesh, const double *mesh_off, int *block) { - int i, size = 1; + int size = 1; int mesh1[3], first1[3], last1[3]; int mesh2[3], first2[3], last2[3]; calc_local_mesh(pos1, grid1, mesh, mesh_off, mesh1, first1); calc_local_mesh(pos2, grid2, mesh, mesh_off, mesh2, first2); - for (i = 0; i < 3; i++) { + for (int i = 0; i < 3; i++) { last1[i] = first1[i] + mesh1[i] - 1; last2[i] = first2[i] + mesh2[i] - 1; block[i] = std::max(first1[i], first2[i]) - first1[i]; @@ -431,10 +431,10 @@ void back_grid_comm(fft_forw_plan plan_f, fft_back_plan plan_b, * \return index of the row direction [0,1,2]. */ int map_3don2d_grid(int const g3d[3], int g2d[3], int mult[3]) { - int i, row_dir = -1; + int row_dir = -1; /* trivial case */ if (g3d[2] == 1) { - for (i = 0; i < 3; i++) + for (int i = 0; i < 3; i++) mult[i] = 1; return 2; } @@ -449,9 +449,9 @@ int map_3don2d_grid(int const g3d[3], int g2d[3], int mult[3]) { } else if (g2d[0] % g3d[1] == 0) { if (g2d[1] % g3d[0] == 0) { row_dir = 2; - i = g2d[0]; + int const tmp = g2d[0]; g2d[0] = g2d[1]; - g2d[1] = i; + g2d[1] = tmp; } else if (g2d[1] % g3d[2] == 0) { row_dir = 0; g2d[2] = g2d[1]; @@ -470,7 +470,7 @@ int map_3don2d_grid(int const g3d[3], int g2d[3], int mult[3]) { g2d[0] = 1; } } - for (i = 0; i < 3; i++) + for (int i = 0; i < 3; i++) mult[i] = g2d[i] / g3d[i]; return row_dir; } diff --git a/src/core/electrostatics_magnetostatics/mdlc_correction.cpp b/src/core/electrostatics_magnetostatics/mdlc_correction.cpp index 4977284c31c..0e9adf3607c 100644 --- a/src/core/electrostatics_magnetostatics/mdlc_correction.cpp +++ b/src/core/electrostatics_magnetostatics/mdlc_correction.cpp @@ -49,18 +49,16 @@ void calc_mu_max() { } inline double g1_DLC_dip(double g, double x) { - double a, c, cc2, x3; - c = g / x; - cc2 = c * c; - x3 = x * x * x; - a = g * g * g / x + 1.5 * cc2 + 1.5 * g / x3 + 0.75 / (x3 * x); + auto const c = g / x; + auto const cc2 = c * c; + auto const x3 = x * x * x; + auto const a = g * g * g / x + 1.5 * cc2 + 1.5 * g / x3 + 0.75 / (x3 * x); return a; } inline double g2_DLC_dip(double g, double x) { - double a, x2; - x2 = x * x; - a = g * g / x + 2.0 * g / x2 + 2.0 / (x2 * x); + auto const x2 = x * x; + auto const a = g * g / x + 2.0 * g / x2 + 2.0 / (x2 * x); return a; } @@ -98,8 +96,6 @@ double get_DLC_dipolar(int kcut, std::vector &fs, std::vector &ts, const ParticleRange &particles) { - int ip; - std::vector ReSjp(n_local_particles), ReSjm(n_local_particles); std::vector ImSjp(n_local_particles), ImSjm(n_local_particles); std::vector ReGrad_Mup(n_local_particles), @@ -117,8 +113,8 @@ double get_DLC_dipolar(int kcut, std::vector &fs, for (int ix = -kcut; ix <= +kcut; ix++) { for (int iy = -kcut; iy <= +kcut; iy++) { if (!(ix == 0 && iy == 0)) { - auto const gx = (double)ix * facux; - auto const gy = (double)iy * facuy; + auto const gx = static_cast(ix) * facux; + auto const gy = static_cast(iy) * facuy; auto const gr = sqrt(gx * gx + gy * gy); @@ -129,7 +125,7 @@ double get_DLC_dipolar(int kcut, std::vector &fs, double S[4] = {0.0, 0.0, 0.0, 0.0}; // S of Brodka method, or is S[4] = // {Re(S+), Im(S+), Re(S-), Im(S-)} - ip = 0; + int ip = 0; for (auto const &p : particles) { if (p.p.dipm > 0) { @@ -215,14 +211,9 @@ double get_DLC_dipolar(int kcut, std::vector &fs, } // end of loops for gx,gy // Convert from the corrections to the Electrical field to the corrections - // for - // the torques .... + // for the torques .... - // printf("Electrical field: Ex %le, Ey %le, Ez - // %le",-tx[0]*M_PI/(box_l[0]*box_l[1]),-ty[0]*M_PI/(box_l[0]*box_l[1]), - //-tz[0]*M_PI/(box_l[0]*box_l[1]) ); - - ip = 0; + int ip = 0; for (auto const &p : particles) { if (p.p.dipm > 0) { ts[ip] = vector_product(p.calc_dip(), ts[ip]); @@ -232,8 +223,6 @@ double get_DLC_dipolar(int kcut, std::vector &fs, // Multiply by the factors we have left during the loops - // printf("box_l: %le %le %le \n",box_l[0],box_l[1],box_l[2]); - auto const piarea = M_PI / (box_geo.length()[0] * box_geo.length()[1]); for (int j = 0; j < n_local_particles; j++) { @@ -243,10 +232,6 @@ double get_DLC_dipolar(int kcut, std::vector &fs, energy *= (-piarea); - // fclose(FilePtr); - - // printf("Energy0= %20.15le \n",energy); - return energy; } @@ -255,52 +240,38 @@ double get_DLC_dipolar(int kcut, std::vector &fs, */ double get_DLC_energy_dipolar(int kcut, const ParticleRange &particles) { - int ix, iy, ip; - double gx, gy, gr; - - double S[4], global_S[4]; - double a, b, c, d, er, ez, f, fa1; - double s1; - double energy, piarea, facux, facuy; - n_local_particles = particles.size(); - facux = 2.0 * M_PI / box_geo.length()[0]; - facuy = 2.0 * M_PI / box_geo.length()[1]; - - energy = 0.0; + auto const facux = 2.0 * M_PI / box_geo.length()[0]; + auto const facuy = 2.0 * M_PI / box_geo.length()[1]; - for (ix = -kcut; ix <= +kcut; ix++) { - for (iy = -kcut; iy <= +kcut; iy++) { + double energy = 0.0; + for (int ix = -kcut; ix <= +kcut; ix++) { + for (int iy = -kcut; iy <= +kcut; iy++) { if (!(ix == 0 && iy == 0)) { - gx = (double)ix * facux; - gy = (double)iy * facuy; - - gr = sqrt(gx * gx + gy * gy); - - fa1 = - 1. / (gr * (exp(gr * box_geo.length()[2]) - - 1.0)); // We assume short slab direction is z direction + auto const gx = static_cast(ix) * facux; + auto const gy = static_cast(iy) * facuy; + auto const gr = sqrt(gx * gx + gy * gy); + // We assume short slab direction is z direction + auto const fa1 = 1. / (gr * (exp(gr * box_geo.length()[2]) - 1.0)); // ... Compute S+,(S+)*,S-,(S-)*, and Spj,Smj for the current g - S[0] = S[1] = S[2] = S[3] = 0.0; - - ip = 0; - + double S[4] = {0.0, 0.0, 0.0, 0.0}; + int ip = 0; for (auto const &p : particles) { if (p.p.dipm > 0) { const Utils::Vector3d dip = p.calc_dip(); - a = gx * dip[0] + gy * dip[1]; + auto const a = gx * dip[0] + gy * dip[1]; { - b = gr * dip[2]; - er = gx * p.r.p[0] + gy * p.r.p[1]; - ez = gr * p.r.p[2]; - c = cos(er); - d = sin(er); - f = exp(ez); + auto const b = gr * dip[2]; + auto const er = gx * p.r.p[0] + gy * p.r.p[1]; + auto const ez = gr * p.r.p[2]; + auto const c = cos(er); + auto const d = sin(er); + auto const f = exp(ez); S[0] += (b * c - a * d) * f; S[1] += (c * a + b * d) * f; @@ -311,10 +282,11 @@ double get_DLC_energy_dipolar(int kcut, const ParticleRange &particles) { ip++; } + double global_S[4]; MPI_Reduce(S, global_S, 4, MPI_DOUBLE, MPI_SUM, 0, comm_cart); // We compute the contribution to the energy ............ - s1 = (global_S[0] * global_S[2] + global_S[1] * global_S[3]); + auto const s1 = global_S[0] * global_S[2] + global_S[1] * global_S[3]; // s2=(ReSm*ReSp+ImSm*ImSp); s2=s1!!! energy += fa1 * (s1 * 2.0); @@ -325,7 +297,7 @@ double get_DLC_energy_dipolar(int kcut, const ParticleRange &particles) { // Multiply by the factors we have left during the loops - piarea = M_PI / (box_geo.length()[0] * box_geo.length()[1]); + auto const piarea = M_PI / (box_geo.length()[0] * box_geo.length()[1]); energy *= (-piarea); return (this_node == 0) ? energy : 0.0; } @@ -334,12 +306,10 @@ double get_DLC_energy_dipolar(int kcut, const ParticleRange &particles) { * methods when we have a slab geometry */ void add_mdlc_force_corrections(const ParticleRange &particles) { - int dip_DLC_kcut = dlc_params.far_cut; - double mz = 0.0, mx = 0.0, my = 0.0, volume, mtot = 0.0; n_local_particles = particles.size(); - volume = box_geo.length()[0] * box_geo.length()[1] * box_geo.length()[2]; + auto const volume = box_geo.volume(); // --- Create arrays that should contain the corrections to // the forces and torques, and set them to zero. @@ -350,7 +320,7 @@ void add_mdlc_force_corrections(const ParticleRange &particles) { //---- Compute the corrections ---------------------------------- // First the DLC correction - get_DLC_dipolar(dip_DLC_kcut, dip_DLC_f, dip_DLC_t, particles); + get_DLC_dipolar(dlc_params.far_cut, dip_DLC_f, dip_DLC_t, particles); // Now we compute the correction like Yeh and Klapp to take into account // the fact that you are using a 3D PBC method which uses spherical @@ -359,7 +329,8 @@ void add_mdlc_force_corrections(const ParticleRange &particles) { // This correction is often called SDC = Shape Dependent Correction. // See @cite brodka04a. - mz = slab_dip_count_mu(&mtot, &mx, &my, particles); + double mx = 0.0, my = 0.0, mtot = 0.0; + auto const mz = slab_dip_count_mu(&mtot, &mx, &my, particles); // --- Transfer the computed corrections to the Forces, Energy and torques // of the particles @@ -374,9 +345,8 @@ void add_mdlc_force_corrections(const ParticleRange &particles) { auto const dip = p.calc_dip(); auto const correc = 4. * M_PI / volume; Utils::Vector3d d; - // in the Next lines: the second term (correc*...)is the SDC - // correction - // for the torques + // in the Next lines: the second term (correc*...) is the SDC + // correction for the torques if (dp3m.params.epsilon == P3M_EPSILON_METALLIC) { d = {0.0, 0.0, -correc * mz}; } else { @@ -395,22 +365,14 @@ void add_mdlc_force_corrections(const ParticleRange &particles) { * 3D dipolar methods when we have a slab geometry */ double add_mdlc_energy_corrections(const ParticleRange &particles) { - double dip_DLC_energy = 0.0; - double mz = 0.0, mx = 0.0, my = 0.0, volume, mtot = 0.0; - int dip_DLC_kcut; - - volume = box_geo.length()[0] * box_geo.length()[1] * box_geo.length()[2]; - dip_DLC_kcut = dlc_params.far_cut; + auto const volume = box_geo.volume(); //---- Compute the corrections ---------------------------------- // First the DLC correction - dip_DLC_energy += - dipole.prefactor * get_DLC_energy_dipolar(dip_DLC_kcut, particles); - - // printf("Energy DLC = %20.15le - // \n",dip_DLC_energy); + double dip_DLC_energy = + dipole.prefactor * get_DLC_energy_dipolar(dlc_params.far_cut, particles); // Now we compute the correction like Yeh and Klapp to take into account // the fact that you are using a 3D PBC method which uses spherical @@ -419,22 +381,24 @@ double add_mdlc_energy_corrections(const ParticleRange &particles) { // This correction is often called SDC = Shape Dependent Correction. // See @cite brodka04a. - mz = slab_dip_count_mu(&mtot, &mx, &my, particles); + double mx = 0.0, my = 0.0, mtot = 0.0; + auto const mz = slab_dip_count_mu(&mtot, &mx, &my, particles); + auto const mz2 = mz * mz; if (this_node == 0) { #ifdef DP3M if (dipole.method == DIPOLAR_MDLC_P3M) { if (dp3m.params.epsilon == P3M_EPSILON_METALLIC) { - dip_DLC_energy += dipole.prefactor * 2. * M_PI / volume * (mz * mz); + dip_DLC_energy += dipole.prefactor * 2. * M_PI / volume * mz2; } else { dip_DLC_energy += dipole.prefactor * 2. * M_PI / volume * - (mz * mz - mtot * mtot / (2.0 * dp3m.params.epsilon + 1.0)); + (mz2 - mtot * mtot / (2.0 * dp3m.params.epsilon + 1.0)); } } else #endif { - dip_DLC_energy += dipole.prefactor * 2. * M_PI / volume * (mz * mz); + dip_DLC_energy += dipole.prefactor * 2. * M_PI / volume * mz2; fprintf(stderr, "You are not using the P3M method, therefore " "dp3m.params.epsilon unknown, I assume metallic borders " "\n"); @@ -457,18 +421,17 @@ double add_mdlc_energy_corrections(const ParticleRange &particles) { * it makes no sense to have an accurate result for DLC-dipolar. */ int mdlc_tune(double error) { - double de, n, gc, lz, lx, a, fa1, fa2, fa0, h; - int kc, limitkc = 200, flag; - - n = (double)n_part; - lz = box_geo.length()[2]; - a = box_geo.length()[0] * box_geo.length()[1]; + auto const n = (double)n_part; + auto const lx = box_geo.length()[0]; + auto const ly = box_geo.length()[1]; + auto const lz = box_geo.length()[2]; + auto const a = lx * ly; mpi_bcast_max_mu(); /* we take the maximum dipole in the system, to be sure that the errors in the other case will be equal or less than for this one */ - h = dlc_params.h; + auto const h = dlc_params.h; if (h < 0) return ES_ERROR; @@ -485,28 +448,29 @@ int mdlc_tune(double error) { errexit(); } - lx = box_geo.length()[0]; - - flag = 0; + bool flag = false; + int kc; + int const limitkc = 200; for (kc = 1; kc < limitkc; kc++) { - gc = kc * 2.0 * Utils::pi() / lx; - fa0 = sqrt(9.0 * exp(+2. * gc * h) * g1_DLC_dip(gc, lz - h) + - 22.0 * g1_DLC_dip(gc, lz) + - 9.0 * exp(-2.0 * gc * h) * g1_DLC_dip(gc, lz + h)); - fa1 = 0.5 * sqrt(Utils::pi() / (2.0 * a)) * fa0; - fa2 = g2_DLC_dip(gc, lz); - de = n * (mu_max * mu_max) / (4.0 * (exp(gc * lz) - 1.0)) * (fa1 + fa2); + auto const gc = kc * 2.0 * Utils::pi() / lx; + auto const fa0 = sqrt(9.0 * exp(+2. * gc * h) * g1_DLC_dip(gc, lz - h) + + 22.0 * g1_DLC_dip(gc, lz) + + 9.0 * exp(-2.0 * gc * h) * g1_DLC_dip(gc, lz + h)); + auto const fa1 = 0.5 * sqrt(Utils::pi() / (2.0 * a)) * fa0; + auto const fa2 = g2_DLC_dip(gc, lz); + auto const de = + n * (mu_max * mu_max) / (4.0 * (exp(gc * lz) - 1.0)) * (fa1 + fa2); if (de < error) { - flag = 1; + flag = true; break; } } - if (flag == 0) { + if (!flag) { fprintf(stderr, "tune DLC dipolar: Sorry, unable to find a proper cut-off " "for such system and accuracy.\n"); fprintf(stderr, "Try modifying the variable limitkc in the c-code: " - "dlc_correction.cpp ... \n"); + "mdlc_correction.cpp ... \n"); return ES_ERROR; } diff --git a/src/core/electrostatics_magnetostatics/p3m-dipolar.cpp b/src/core/electrostatics_magnetostatics/p3m-dipolar.cpp index 7c53d1c6a4f..506af4f4faf 100644 --- a/src/core/electrostatics_magnetostatics/p3m-dipolar.cpp +++ b/src/core/electrostatics_magnetostatics/p3m-dipolar.cpp @@ -709,13 +709,10 @@ static void P3M_assign_torques(double prefac, int d_rs, int cp_cnt = 0, cf_cnt = 0; /* index, index jumps for dp3m.rs_mesh array */ int q_ind; - int q_m_off = (dp3m.local_mesh.dim[2] - dp3m.params.cao); - int q_s_off = + auto const q_m_off = (dp3m.local_mesh.dim[2] - dp3m.params.cao); + auto const q_s_off = dp3m.local_mesh.dim[2] * (dp3m.local_mesh.dim[1] - dp3m.params.cao); - cp_cnt = 0; - cf_cnt = 0; - for (auto &p : particles) { if ((p.p.dipm) != 0.0) { const Utils::Vector3d dip = p.calc_dip(); @@ -806,18 +803,17 @@ double dp3m_calc_kspace_forces(bool force_flag, bool energy_flag, const ParticleRange &particles) { int i, d, d_rs, ind, j[3]; /* k-space energy */ - double dipole_prefac; double surface_term = 0.0; double k_space_energy_dip = 0.0, node_k_space_energy_dip = 0.0; double tmp0, tmp1; - dipole_prefac = + auto const dipole_prefac = dipole.prefactor / (double)(dp3m.params.mesh[0] * dp3m.params.mesh[1] * dp3m.params.mesh[2]); if (dp3m.sum_mu2 > 0) { /* Gather information for FFT grid inside the nodes domain (inner local - * mesh) and Perform forward 3D FFT (Charge Assignment Mesh). */ + * mesh) and perform forward 3D FFT (Charge Assignment Mesh). */ dp3m_gather_fft_grid(dp3m.rs_mesh_dip[0].data()); dp3m_gather_fft_grid(dp3m.rs_mesh_dip[1].data()); dp3m_gather_fft_grid(dp3m.rs_mesh_dip[2].data()); @@ -877,8 +873,7 @@ double dp3m_calc_kspace_forces(bool force_flag, bool energy_flag, pow(dp3m.params.alpha_L * (1. / box_geo.length()[0]), 3) * Utils::sqrt_pi_i() / 3.0); - double volume = - box_geo.length()[0] * box_geo.length()[1] * box_geo.length()[2]; + auto const volume = box_geo.volume(); k_space_energy_dip += dipole.prefactor * dp3m.energy_correction / volume; /* add the dipolar energy correction due to systematic Madelung-Self effects */ @@ -1057,9 +1052,8 @@ double dp3m_calc_kspace_forces(bool force_flag, bool energy_flag, double calc_surface_term(bool force_flag, bool energy_flag, const ParticleRange &particles) { - const double pref = dipole.prefactor * 4 * M_PI * (1. / box_geo.length()[0]) * - (1. / box_geo.length()[1]) * (1. / box_geo.length()[2]) / - (2 * dp3m.params.epsilon + 1); + auto const pref = dipole.prefactor * 4 * M_PI / box_geo.volume() / + (2 * dp3m.params.epsilon + 1); double suma, a[3]; double en; @@ -2389,8 +2383,7 @@ void dp3m_compute_constants_energy_dipolar() { if (dp3m.energy_correction != 0.0) return; - double volume = - box_geo.length()[0] * box_geo.length()[1] * box_geo.length()[2]; + auto const volume = box_geo.volume(); Ukp3m = dp3m_average_dipolar_self_energy(box_geo.length()[0], dp3m.params.mesh[0]) * volume; diff --git a/src/core/electrostatics_magnetostatics/p3m.cpp b/src/core/electrostatics_magnetostatics/p3m.cpp index 2d7211fc976..869d920cd2b 100644 --- a/src/core/electrostatics_magnetostatics/p3m.cpp +++ b/src/core/electrostatics_magnetostatics/p3m.cpp @@ -733,8 +733,7 @@ double p3m_calc_kspace_forces(bool force_flag, bool energy_flag, (2 * box_geo.length()[0] * box_geo.length()[1] * box_geo.length()[2]); /* Gather information for FFT grid inside the nodes domain (inner local mesh) - */ - /* and Perform forward 3D FFT (Charge Assignment Mesh). */ + * and perform forward 3D FFT (Charge Assignment Mesh). */ if (p3m.sum_q2 > 0) { p3m_gather_fft_grid(p3m.rs_mesh); fft_perform_forw(p3m.rs_mesh, p3m.fft, comm_cart); @@ -862,9 +861,8 @@ double p3m_calc_kspace_forces(bool force_flag, bool energy_flag, double p3m_calc_dipole_term(bool force_flag, bool energy_flag, const ParticleRange &particles) { - double pref = coulomb.prefactor * 4 * M_PI * (1. / box_geo.length()[0]) * - (1. / box_geo.length()[1]) * (1. / box_geo.length()[2]) / - (2 * p3m.params.epsilon + 1); + auto const pref = coulomb.prefactor * 4 * M_PI / box_geo.volume() / + (2 * p3m.params.epsilon + 1); double lcl_dm[3], gbl_dm[3]; double en; diff --git a/src/core/integrators/velocity_verlet_npt.cpp b/src/core/integrators/velocity_verlet_npt.cpp index 4906ca38693..90b499a780b 100644 --- a/src/core/integrators/velocity_verlet_npt.cpp +++ b/src/core/integrators/velocity_verlet_npt.cpp @@ -101,8 +101,7 @@ void velocity_verlet_npt_propagate_pos(const ParticleRange &particles) { << "your choice of piston= " << nptiso.piston << ", dt= " << time_step << ", p_diff= " << nptiso.p_diff << " just caused the volume to become negative, decrease dt"; - nptiso.volume = - box_geo.length()[0] * box_geo.length()[1] * box_geo.length()[2]; + nptiso.volume = box_geo.volume(); scal[2] = 1; } diff --git a/src/core/polynom.hpp b/src/core/polynom.hpp index a77c73cd7c7..f00954a517d 100644 --- a/src/core/polynom.hpp +++ b/src/core/polynom.hpp @@ -19,11 +19,11 @@ * along with this program. If not, see . */ /** \file - Datatypes and functions for polynomials. - Evaluation possible both as Taylor and Chebychev series. Note that the - length of the double list is equal to the order of the polynomial plus 1, so - that Polynom->n does not give the order of the polynomial, but one more. -*/ + * Datatypes and functions for polynomials. + * Evaluation possible both as Taylor and Chebychev series. Note that the + * length of the double list is equal to the order of the polynomial plus 1, + * so that Polynom::n does not give the order of the polynomial, but one more. + */ #ifndef POLYNOM_H #define POLYNOM_H @@ -44,15 +44,15 @@ inline double evaluateAsTaylorSeriesAt(Polynom *series, double x) { } /** evaluate the polynomial interpreted as a Chebychev series. Requires a series - with at least three coefficients, i.e. no linear approximations! */ + * with at least three coefficients, i.e. no linear approximations! + */ inline double evaluateAsChebychevSeriesAt(Polynom *series, double x) { - int j; double *c = series->e; double x2 = 2.0 * x; double dd = c[series->n - 1]; double d = x2 * dd + c[series->n - 2]; - for (j = series->n - 3; j >= 1; j--) { - double tmp = d; + for (int j = series->n - 3; j >= 1; j--) { + auto const tmp = d; d = x2 * d - dd + c[j]; dd = tmp; } diff --git a/src/core/pressure.cpp b/src/core/pressure.cpp index b41d03cc567..d875646b67b 100644 --- a/src/core/pressure.cpp +++ b/src/core/pressure.cpp @@ -104,9 +104,7 @@ inline void add_single_particle_virials(int v_comp, Particle &p) { void pressure_calc(double *result, double *result_t, double *result_nb, double *result_t_nb, int v_comp) { - int n, i; - double volume = - box_geo.length()[0] * box_geo.length()[1] * box_geo.length()[2]; + auto const volume = box_geo.volume(); if (!interactions_sanity_checks()) return; @@ -137,20 +135,20 @@ void pressure_calc(double *result, double *result_t, double *result_nb, virials.virtual_sites, p_tensor.virtual_sites); #endif - for (n = 1; n < virials.data.n; n++) + for (int n = 1; n < virials.data.n; n++) virials.data.e[n] /= 3.0 * volume; - for (i = 0; i < 9; i++) + for (int i = 0; i < 9; i++) p_tensor.data.e[i] /= (volume * time_step * time_step); - for (i = 9; i < p_tensor.data.n; i++) + for (int i = 9; i < p_tensor.data.n; i++) p_tensor.data.e[i] /= volume; /* Intra- and Inter- part of nonbonded interaction */ - for (n = 0; n < virials_non_bonded.data_nb.n; n++) + for (int n = 0; n < virials_non_bonded.data_nb.n; n++) virials_non_bonded.data_nb.e[n] /= 3.0 * volume; - for (i = 0; i < p_tensor_non_bonded.data_nb.n; i++) + for (int i = 0; i < p_tensor_non_bonded.data_nb.n; i++) p_tensor_non_bonded.data_nb.e[i] /= volume; /* gather data */ diff --git a/src/core/reaction_ensemble.cpp b/src/core/reaction_ensemble.cpp index ae98f5e5f59..d07dd148fa9 100644 --- a/src/core/reaction_ensemble.cpp +++ b/src/core/reaction_ensemble.cpp @@ -202,7 +202,7 @@ void ReactionAlgorithm::check_reaction_ensemble() { */ void ReactionAlgorithm::set_cuboid_reaction_ensemble_volume() { if (volume < 0) - volume = box_geo.length()[0] * box_geo.length()[1] * box_geo.length()[2]; + volume = box_geo.volume(); } /** diff --git a/src/core/specfunc.cpp b/src/core/specfunc.cpp index 691e0d14771..21a6f9b87fb 100644 --- a/src/core/specfunc.cpp +++ b/src/core/specfunc.cpp @@ -283,7 +283,7 @@ static Polynom ai12_cs{ai12_data}; /** Coefficients for Maclaurin summation in hzeta(). * B_{2j}/(2j)! */ -static double hzeta_c[15] = { +static double const hzeta_c[15] = { 1.00000000000000000000000000000, 0.083333333333333333333333333333, -0.00138888888888888888888888888889, 0.000033068783068783068783068783069, -8.2671957671957671957671957672e-07, 2.0876756987868098979210090321e-08, @@ -295,9 +295,7 @@ static double hzeta_c[15] = { double hzeta(double s, double q) { double max_bits = 54.0; - int jmax = 12, kmax = 10; - int j, k; - double pmax, scp, pcp, ans; + int const jmax = 12, kmax = 10; if ((s > max_bits && q < 1.0) || (s > 0.5 * max_bits && q < 0.25)) return pow(q, -s); @@ -310,16 +308,16 @@ double hzeta(double s, double q) { /* Euler-Maclaurin summation formula * [Moshier, p. 400, with several typo corrections] */ - pmax = pow(kmax + q, -s); - scp = s; - pcp = pmax / (kmax + q); - ans = pmax * ((kmax + q) / (s - 1.0) + 0.5); + auto const pmax = pow(kmax + q, -s); + auto scp = s; + auto pcp = pmax / (kmax + q); + auto ans = pmax * ((kmax + q) / (s - 1.0) + 0.5); - for (k = 0; k < kmax; k++) + for (int k = 0; k < kmax; k++) ans += pow(k + q, -s); - for (j = 0; j <= jmax; j++) { - double delta = hzeta_c[j + 1] * scp * pcp; + for (int j = 0; j <= jmax; j++) { + auto const delta = hzeta_c[j + 1] * scp * pcp; ans += delta; scp *= (s + 2 * j + 1) * (s + 2 * j + 2); pcp /= (kmax + q) * (kmax + q); @@ -392,16 +390,16 @@ static int ak01_orders[] = { double LPK0(double x) { if (x >= 27.) { - double tmp = .5 * exp(-x) / sqrt(x); + auto const tmp = .5 * exp(-x) / sqrt(x); return tmp * ak0_data[0]; } if (x >= 23.) { - double tmp = exp(-x) / sqrt(x), xx = (16. / 3.) / x - 5. / 3.; + auto const tmp = exp(-x) / sqrt(x), xx = (16. / 3.) / x - 5. / 3.; return tmp * (xx * ak0_data[1] + 0.5 * ak0_data[0]); } if (x > 2) { int j = ak01_orders[((int)x) - 2]; - double tmp, x2; + double x2; double dd0, d0; double *s0; if (x <= 8) { @@ -414,18 +412,18 @@ double LPK0(double x) { dd0 = s0[j]; d0 = x2 * dd0 + s0[j - 1]; for (j -= 2; j >= 1; j--) { - double tmp0 = d0; + auto const tmp0 = d0; d0 = x2 * d0 - dd0 + s0[j]; dd0 = tmp0; } - tmp = exp(-x) / sqrt(x); + auto const tmp = exp(-x) / sqrt(x); return tmp * (0.5 * (s0[0] + x2 * d0) - dd0); } /* x <= 2 */ { /* I0/1 series */ int j = 10; - double ret, tmp, x2 = (2. / 4.5) * x * x - 2.; + double ret, x2 = (2. / 4.5) * x * x - 2.; double dd0, d0; dd0 = bi0_data[j]; d0 = x2 * dd0 + bi0_data[j - 1]; @@ -434,7 +432,7 @@ double LPK0(double x) { d0 = x2 * d0 - dd0 + bi0_data[j]; dd0 = tmp0; } - tmp = log(x) - M_LN2; + auto const tmp = log(x) - M_LN2; ret = -tmp * (0.5 * (bi0_data[0] + x2 * d0) - dd0); /* K0/K1 correction */ @@ -443,7 +441,7 @@ double LPK0(double x) { dd0 = bk0_data[j]; d0 = x2 * dd0 + bk0_data[j - 1]; for (j -= 2; j >= 1; j--) { - double tmp0 = d0; + auto const tmp0 = d0; d0 = x2 * d0 - dd0 + bk0_data[j]; dd0 = tmp0; } @@ -453,16 +451,16 @@ double LPK0(double x) { double LPK1(double x) { if (x >= 27.) { - double tmp = .5 * exp(-x) / sqrt(x); + auto const tmp = .5 * exp(-x) / sqrt(x); return tmp * ak1_data[0]; } if (x >= 23.) { - double tmp = exp(-x) / sqrt(x), xx = (16. / 3.) / x - 5. / 3.; + auto const tmp = exp(-x) / sqrt(x), xx = (16. / 3.) / x - 5. / 3.; return tmp * (xx * ak1_data[1] + 0.5 * ak1_data[0]); } if (x > 2) { int j = ak01_orders[((int)x) - 2]; - double tmp, x2; + double x2; double dd1, d1; double *s1; if (x <= 8) { @@ -475,27 +473,27 @@ double LPK1(double x) { dd1 = s1[j]; d1 = x2 * dd1 + s1[j - 1]; for (j -= 2; j >= 1; j--) { - double tmp1 = d1; + auto const tmp1 = d1; d1 = x2 * d1 - dd1 + s1[j]; dd1 = tmp1; } - tmp = exp(-x) / sqrt(x); + auto const tmp = exp(-x) / sqrt(x); return tmp * (0.5 * (s1[0] + x2 * d1) - dd1); } /* x <= 2 */ { /* I0/1 series */ int j = 10; - double ret, tmp, x2 = (2. / 4.5) * x * x - 2.; + double ret, x2 = (2. / 4.5) * x * x - 2.; double dd1, d1; dd1 = bi1_data[j]; d1 = x2 * dd1 + bi1_data[j - 1]; for (j -= 2; j >= 1; j--) { - double tmp1 = d1; + auto const tmp1 = d1; d1 = x2 * d1 - dd1 + bi1_data[j]; dd1 = tmp1; } - tmp = log(x) - M_LN2; + auto const tmp = log(x) - M_LN2; ret = x * tmp * (0.5 * (bi1_data[0] + x2 * d1) - dd1); /* K0/K1 correction */ @@ -504,7 +502,7 @@ double LPK1(double x) { dd1 = bk1_data[j]; d1 = x2 * dd1 + bk1_data[j - 1]; for (j -= 2; j >= 1; j--) { - double tmp1 = d1; + auto const tmp1 = d1; d1 = x2 * d1 - dd1 + bk1_data[j]; dd1 = tmp1; } @@ -514,20 +512,20 @@ double LPK1(double x) { void LPK01(double x, double *K0, double *K1) { if (x >= 27.) { - double tmp = .5 * exp(-x) / sqrt(x); + auto const tmp = .5 * exp(-x) / sqrt(x); *K0 = tmp * ak0_data[0]; *K1 = tmp * ak1_data[0]; return; } if (x >= 23.) { - double tmp = exp(-x) / sqrt(x), xx = (16. / 3.) / x - 5. / 3.; + auto const tmp = exp(-x) / sqrt(x), xx = (16. / 3.) / x - 5. / 3.; *K0 = tmp * (xx * ak0_data[1] + 0.5 * ak0_data[0]); *K1 = tmp * (xx * ak1_data[1] + 0.5 * ak1_data[0]); return; } if (x > 2) { int j = ak01_orders[((int)x) - 2]; - double tmp, x2; + double x2; double dd0, dd1, d0, d1; double *s0, *s1; if (x <= 8) { @@ -544,13 +542,13 @@ void LPK01(double x, double *K0, double *K1) { d0 = x2 * dd0 + s0[j - 1]; d1 = x2 * dd1 + s1[j - 1]; for (j -= 2; j >= 1; j--) { - double tmp0 = d0, tmp1 = d1; + auto const tmp0 = d0, tmp1 = d1; d0 = x2 * d0 - dd0 + s0[j]; d1 = x2 * d1 - dd1 + s1[j]; dd0 = tmp0; dd1 = tmp1; } - tmp = exp(-x) / sqrt(x); + auto const tmp = exp(-x) / sqrt(x); *K0 = tmp * (0.5 * (s0[0] + x2 * d0) - dd0); *K1 = tmp * (0.5 * (s1[0] + x2 * d1) - dd1); return; @@ -559,20 +557,20 @@ void LPK01(double x, double *K0, double *K1) { { /* I0/1 series */ int j = 10; - double tmp, x2 = (2. / 4.5) * x * x - 2.; + double x2 = (2. / 4.5) * x * x - 2.; double dd0, dd1, d0, d1; dd0 = bi0_data[j]; dd1 = bi1_data[j]; d0 = x2 * dd0 + bi0_data[j - 1]; d1 = x2 * dd1 + bi1_data[j - 1]; for (j -= 2; j >= 1; j--) { - double tmp0 = d0, tmp1 = d1; + auto const tmp0 = d0, tmp1 = d1; d0 = x2 * d0 - dd0 + bi0_data[j]; d1 = x2 * d1 - dd1 + bi1_data[j]; dd0 = tmp0; dd1 = tmp1; } - tmp = log(x) - M_LN2; + auto const tmp = log(x) - M_LN2; *K0 = -tmp * (0.5 * (bi0_data[0] + x2 * d0) - dd0); *K1 = x * tmp * (0.5 * (bi1_data[0] + x2 * d1) - dd1); @@ -584,7 +582,7 @@ void LPK01(double x, double *K0, double *K1) { d0 = x2 * dd0 + bk0_data[j - 1]; d1 = x2 * dd1 + bk1_data[j - 1]; for (j -= 2; j >= 1; j--) { - double tmp0 = d0, tmp1 = d1; + auto const tmp0 = d0, tmp1 = d1; d0 = x2 * d0 - dd0 + bk0_data[j]; d1 = x2 * d1 - dd1 + bk1_data[j]; dd0 = tmp0; diff --git a/src/core/statistics.cpp b/src/core/statistics.cpp index 95fa55a0ad0..bdddbbef40f 100644 --- a/src/core/statistics.cpp +++ b/src/core/statistics.cpp @@ -210,9 +210,9 @@ double distto(PartCfg &partCfg, const Utils::Vector3d &pos, int pid) { void calc_part_distribution(PartCfg &partCfg, int const *p1_types, int n_p1, int const *p2_types, int n_p2, double r_min, - double r_max, int r_bins, int log_flag, double *low, - double *dist) { - int t1, t2, ind, cnt = 0; + double r_max, int r_bins, bool log_flag, + double *low, double *dist) { + int ind, cnt = 0; double inv_bin_width = 0.0; double min_dist, min_dist2 = 0.0, start_dist2; @@ -222,20 +222,20 @@ void calc_part_distribution(PartCfg &partCfg, int const *p1_types, int n_p1, *low = 0.0; for (int i = 0; i < r_bins; i++) dist[i] = 0.0; - if (log_flag == 1) + if (log_flag) inv_bin_width = (double)r_bins / (log(r_max) - log(r_min)); else inv_bin_width = (double)r_bins / (r_max - r_min); /* particle loop: p1_types */ for (auto const &p1 : partCfg) { - for (t1 = 0; t1 < n_p1; t1++) { + for (int t1 = 0; t1 < n_p1; t1++) { if (p1.p.type == p1_types[t1]) { min_dist2 = start_dist2; /* particle loop: p2_types */ for (auto const &p2 : partCfg) { if (p1 != p2) { - for (t2 = 0; t2 < n_p2; t2++) { + for (int t2 = 0; t2 < n_p2; t2++) { if (p2.p.type == p2_types[t2]) { auto const act_dist2 = get_mi_vector(p1.r.p, p2.r.p, box_geo).norm2(); @@ -250,7 +250,7 @@ void calc_part_distribution(PartCfg &partCfg, int const *p1_types, int n_p1, if (min_dist <= r_max) { if (min_dist >= r_min) { /* calculate bin index */ - if (log_flag == 1) + if (log_flag) ind = (int)((log(min_dist) - log(r_min)) * inv_bin_width); else ind = (int)((min_dist - r_min) * inv_bin_width); @@ -285,32 +285,30 @@ void calc_rdf(PartCfg &partCfg, int const *p1_types, int n_p1, int const *p2_types, int n_p2, double r_min, double r_max, int r_bins, double *rdf) { long int cnt = 0; - int i, t1, t2, ind; + int ind; bool mixed_flag = false; - double inv_bin_width = 0.0, bin_width = 0.0; - double volume, bin_volume, r_in, r_out; - if (n_p1 == n_p2) { - for (i = 0; i < n_p1; i++) + for (int i = 0; i < n_p1; i++) if (p1_types[i] != p2_types[i]) mixed_flag = true; - } else + } else { mixed_flag = true; + } - bin_width = (r_max - r_min) / (double)r_bins; - inv_bin_width = 1.0 / bin_width; - for (i = 0; i < r_bins; i++) + auto const bin_width = (r_max - r_min) / (double)r_bins; + auto const inv_bin_width = 1.0 / bin_width; + for (int i = 0; i < r_bins; i++) rdf[i] = 0.0; /* particle loop: p1_types */ for (auto it = partCfg.begin(); it != partCfg.end(); ++it) { - for (t1 = 0; t1 < n_p1; t1++) { + for (int t1 = 0; t1 < n_p1; t1++) { if (it->p.type == p1_types[t1]) { /* distinguish mixed and identical rdf's */ auto jt = mixed_flag ? partCfg.begin() : std::next(it); /* particle loop: p2_types */ for (; jt != partCfg.end(); ++jt) { - for (t2 = 0; t2 < n_p2; t2++) { + for (int t2 = 0; t2 < n_p2; t2++) { if (jt->p.type == p2_types[t2]) { auto const dist = get_mi_vector(it->r.p, jt->r.p, box_geo).norm(); if (dist > r_min && dist < r_max) { @@ -328,12 +326,12 @@ void calc_rdf(PartCfg &partCfg, int const *p1_types, int n_p1, return; /* normalization */ - volume = box_geo.length()[0] * box_geo.length()[1] * box_geo.length()[2]; - for (i = 0; i < r_bins; i++) { - r_in = i * bin_width + r_min; - r_out = r_in + bin_width; - bin_volume = (4.0 / 3.0) * Utils::pi() * - ((r_out * r_out * r_out) - (r_in * r_in * r_in)); + auto const volume = box_geo.volume(); + for (int i = 0; i < r_bins; i++) { + auto const r_in = i * bin_width + r_min; + auto const r_out = r_in + bin_width; + auto const bin_volume = (4.0 / 3.0) * Utils::pi() * + ((r_out * r_out * r_out) - (r_in * r_in * r_in)); rdf[i] *= volume / (bin_volume * cnt); } } @@ -351,8 +349,6 @@ void calc_rdf_av(PartCfg &partCfg, int const *p1_types, int n_p1, long int cnt = 0; int cnt_conf = 1; bool mixed_flag = false; - double inv_bin_width = 0.0, bin_width = 0.0; - double volume, bin_volume, r_in, r_out; std::vector rdf_tmp(r_bins); if (n_p1 == n_p2) { @@ -362,9 +358,9 @@ void calc_rdf_av(PartCfg &partCfg, int const *p1_types, int n_p1, } else mixed_flag = true; - bin_width = (r_max - r_min) / (double)r_bins; - inv_bin_width = 1.0 / bin_width; - volume = box_geo.length()[0] * box_geo.length()[1] * box_geo.length()[2]; + auto const bin_width = (r_max - r_min) / (double)r_bins; + auto const inv_bin_width = 1.0 / bin_width; + auto const volume = box_geo.volume(); for (int l = 0; l < r_bins; l++) rdf_tmp[l] = rdf[l] = 0.0; @@ -411,10 +407,10 @@ void calc_rdf_av(PartCfg &partCfg, int const *p1_types, int n_p1, // normalization for (int i = 0; i < r_bins; i++) { - r_in = i * bin_width + r_min; - r_out = r_in + bin_width; - bin_volume = (4.0 / 3.0) * Utils::pi() * - ((r_out * r_out * r_out) - (r_in * r_in * r_in)); + auto const r_in = i * bin_width + r_min; + auto const r_out = r_in + bin_width; + auto const bin_volume = (4.0 / 3.0) * Utils::pi() * + ((r_out * r_out * r_out) - (r_in * r_in * r_in)); rdf[i] += rdf_tmp[i] * volume / (bin_volume * cnt); } @@ -427,14 +423,11 @@ void calc_rdf_av(PartCfg &partCfg, int const *p1_types, int n_p1, std::vector calc_structurefactor(PartCfg &partCfg, int const *p_types, int n_types, int order) { - int i, j, k, n, qi, t, order2; - double qr, twoPI_L, C_sum, S_sum; - - order2 = order * order; + auto const order2 = order * order; std::vector ff; ff.resize(2 * order2); ff[2 * order2] = 0; - twoPI_L = 2 * Utils::pi() / box_geo.length()[0]; + auto const twoPI_L = 2 * Utils::pi() / box_geo.length()[0]; if ((n_types < 0) || (n_types > max_seen_particle_type)) { fprintf(stderr, "WARNING: Wrong number of particle types!"); @@ -446,19 +439,20 @@ std::vector calc_structurefactor(PartCfg &partCfg, int const *p_types, fflush(nullptr); errexit(); } else { - for (qi = 0; qi < 2 * order2; qi++) { + for (int qi = 0; qi < 2 * order2; qi++) { ff[qi] = 0.0; } - for (i = 0; i <= order; i++) { - for (j = -order; j <= order; j++) { - for (k = -order; k <= order; k++) { - n = i * i + j * j + k * k; + for (int i = 0; i <= order; i++) { + for (int j = -order; j <= order; j++) { + for (int k = -order; k <= order; k++) { + auto const n = i * i + j * j + k * k; if ((n <= order2) && (n >= 1)) { - C_sum = S_sum = 0.0; + double C_sum = 0.0, S_sum = 0.0; for (auto const &p : partCfg) { - for (t = 0; t < n_types; t++) { + for (int t = 0; t < n_types; t++) { if (p.p.type == p_types[t]) { - qr = twoPI_L * (i * p.r.p[0] + j * p.r.p[1] + k * p.r.p[2]); + auto const qr = + twoPI_L * (Utils::Vector3i{{i, j, k}} * p.r.p); C_sum += cos(qr); S_sum += sin(qr); } @@ -470,14 +464,14 @@ std::vector calc_structurefactor(PartCfg &partCfg, int const *p_types, } } } - n = 0; + int n = 0; for (auto const &p : partCfg) { - for (t = 0; t < n_types; t++) { + for (int t = 0; t < n_types; t++) { if (p.p.type == p_types[t]) n++; } } - for (qi = 0; qi < order2; qi++) + for (int qi = 0; qi < order2; qi++) if (ff[2 * qi + 1] != 0) ff[2 * qi] /= n * ff[2 * qi + 1]; } @@ -494,7 +488,7 @@ std::vector> modify_stucturefactor(int order, } } - double qfak = 2.0 * Utils::pi() / box_geo.length()[0]; + auto const qfak = 2.0 * Utils::pi() / box_geo.length()[0]; std::vector intern; intern.assign(2, 0.0); std::vector> structure_factor; @@ -642,11 +636,10 @@ void analyze_append(PartCfg &partCfg) { } void analyze_configs(double const *tmp_config, int count) { - int i; n_part_conf = count; configs.resize(n_configs + 1); configs[n_configs].resize(3 * n_part_conf); - for (i = 0; i < n_part_conf; i++) { + for (int i = 0; i < n_part_conf; i++) { configs[n_configs][3 * i] = tmp_config[3 * i]; configs[n_configs][3 * i + 1] = tmp_config[3 * i + 1]; configs[n_configs][3 * i + 2] = tmp_config[3 * i + 2]; @@ -693,7 +686,7 @@ void obsstat_realloc_and_clear(Observable_stat *stat, int n_pre, int n_bonded, void obsstat_realloc_and_clear_non_bonded(Observable_stat_non_bonded *stat_nb, int n_nonbonded, int c_size) { - int i, total = c_size * (n_nonbonded + n_nonbonded); + auto const total = c_size * (n_nonbonded + n_nonbonded); stat_nb->data_nb.resize(total); stat_nb->chunk_size_nb = c_size; @@ -701,7 +694,7 @@ void obsstat_realloc_and_clear_non_bonded(Observable_stat_non_bonded *stat_nb, stat_nb->non_bonded_intra = stat_nb->data_nb.e; stat_nb->non_bonded_inter = stat_nb->non_bonded_intra + c_size * n_nonbonded; - for (i = 0; i < total; i++) + for (int i = 0; i < total; i++) stat_nb->data_nb[i] = 0.0; } @@ -712,7 +705,6 @@ void invalidate_obs() { } void update_pressure(int v_comp) { - int i; double p_vel[3]; /* if desired (v_comp==1) replace ideal component with instantaneous one */ if (total_pressure.init_status != 1 + v_comp) { @@ -729,7 +721,7 @@ void update_pressure(int v_comp) { total_pressure.data.e[0] = 0.0; MPI_Reduce(nptiso.p_vel, p_vel, 3, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD); - for (i = 0; i < 3; i++) + for (int i = 0; i < 3; i++) if (nptiso.geometry & nptiso.nptgeom_dir[i]) total_pressure.data.e[0] += p_vel[i]; total_pressure.data.e[0] /= (nptiso.dimension * nptiso.volume); diff --git a/src/core/statistics.hpp b/src/core/statistics.hpp index 3fdd49219e0..781758c8437 100644 --- a/src/core/statistics.hpp +++ b/src/core/statistics.hpp @@ -88,8 +88,8 @@ void analyze_append(PartCfg &partCfg); * Calculates the distance distribution of particles with types given * in the @p p1_types list around particles with types given in the * @p p2_types list. The distances range from @p r_min to @p r_max, binned - * into @p r_bins bins which are either equidistant (@p log_flag==0) or - * logarithmically equidistant (@p log_flag==1). The result is stored + * into @p r_bins bins which are either equidistant (@p log_flag==false) or + * logarithmically equidistant (@p log_flag==true). The result is stored * in the @p array dist. * @param p1_types list with types of particles to find the distribution for. * @param n_p1 length of @p p1_types. @@ -105,8 +105,8 @@ void analyze_append(PartCfg &partCfg); */ void calc_part_distribution(PartCfg &, int const *p1_types, int n_p1, int const *p2_types, int n_p2, double r_min, - double r_max, int r_bins, int log_flag, double *low, - double *dist); + double r_max, int r_bins, bool log_flag, + double *low, double *dist); /** Calculate the radial distribution function. * diff --git a/src/core/virtual_sites/lb_inertialess_tracers.cpp b/src/core/virtual_sites/lb_inertialess_tracers.cpp index ca95eb24aee..85187c3ba00 100644 --- a/src/core/virtual_sites/lb_inertialess_tracers.cpp +++ b/src/core/virtual_sites/lb_inertialess_tracers.cpp @@ -133,7 +133,7 @@ void IBM_UpdateParticlePositions(ParticleRange particles) { // Check if the particle might have crossed a box border (criterion see // e-mail Axel 28.8.2014) - // if possible resort_particles = 1 + // if possible, use resort_particles = Cells::RESORT_LOCAL) const double dist2 = (p[j].r.p - p[j].l.p_old).norm2(); if (dist2 > skin2) { set_resort_particles(Cells::RESORT_LOCAL); diff --git a/src/python/espressomd/analyze.pxd b/src/python/espressomd/analyze.pxd index 3f49d90be9f..77bd1222245 100644 --- a/src/python/espressomd/analyze.pxd +++ b/src/python/espressomd/analyze.pxd @@ -99,7 +99,7 @@ cdef extern from "statistics.hpp": void calc_part_distribution( PartCfg & , int * p1_types, int n_p1, int * p2_types, int n_p2, - double r_min, double r_max, int r_bins, int log_flag, double * low, + double r_min, double r_max, int r_bins, bint log_flag, double * low, double * dist) cdef extern from "statistics_chain.hpp": diff --git a/src/python/espressomd/analyze.pyx b/src/python/espressomd/analyze.pyx index 3a6e30b2208..56a827afbfe 100644 --- a/src/python/espressomd/analyze.pyx +++ b/src/python/espressomd/analyze.pyx @@ -995,11 +995,11 @@ class Analysis: Maximum distance. r_bins : :obj:`int` Number of bins. - log_flag : :obj:`int` - When set to 0, the bins are linearly equidistant; when set to 1, - the bins are logarithmically equidistant. - int_flag : :obj:`int` - When set to 1, the result is an integrated distribution. + log_flag : :obj:`bool` + When set to ``False``, the bins are linearly equidistant; when set + to ``True``, the bins are logarithmically equidistant. + int_flag : :obj:`bool` + When set to ``True``, the result is an integrated distribution. Returns ------- @@ -1017,7 +1017,7 @@ class Analysis: if r_max is None: r_max = min_box_l / 2.0 - if r_min < 0.0 or (log_flag == 1 and r_min == 0.0): + if r_min < 0.0 or (log_flag and r_min == 0.0): raise ValueError("r_min was chosen too small!") if r_max <= r_min: raise ValueError("r_max has to be greater than r_min!") @@ -1033,7 +1033,7 @@ class Analysis: analyze.calc_part_distribution( analyze.partCfg(), p1_types.e, p1_types.n, p2_types.e, p2_types.n, - r_min, r_max, r_bins, log_flag, & low, distribution.data()) + r_min, r_max, r_bins, < bint > log_flag, & low, distribution.data()) np_distribution = create_nparray_from_double_array( distribution.data(), r_bins) diff --git a/src/utils/include/utils/List.hpp b/src/utils/include/utils/List.hpp index d97513ff826..e1738a3b8f9 100644 --- a/src/utils/include/utils/List.hpp +++ b/src/utils/include/utils/List.hpp @@ -210,10 +210,11 @@ template class List { /** Dynamically allocated field. */ T *e; - /** number of used elements in the field. */ + /** Number of used elements in the field. */ size_type n; - /** allocated size of the field. This value is ONLY changed - in the routines specified in list operations ! */ + /** Allocated size of the field. This value is ONLY changed + * in the routines specified in list operations! + */ size_type max; }; diff --git a/testsuite/python/wang_landau_reaction_ensemble.py b/testsuite/python/wang_landau_reaction_ensemble.py index 4b2f5ebe4ab..66ab5452343 100644 --- a/testsuite/python/wang_landau_reaction_ensemble.py +++ b/testsuite/python/wang_landau_reaction_ensemble.py @@ -77,7 +77,7 @@ class ReactionEnsembleTest(ut.TestCase): # generate preliminary_energy_run_results here, this should be done in a # separate simulation without energy reweighting using the update energy # functions - np.savetxt("energy_boundaries.dat", np.c_[[0, 1], [0, 0], [9, 9]], + np.savetxt("energy_boundaries.dat", np.transpose([[0, 1], [0, 0], [9, 9]]), delimiter='\t', header="nbar E_potmin E_potmax") WLRE.add_collective_variable_degree_of_association(