diff --git a/avaframe/ana1Tests/damBreak.py b/avaframe/ana1Tests/damBreakTest.py similarity index 90% rename from avaframe/ana1Tests/damBreak.py rename to avaframe/ana1Tests/damBreakTest.py index 15e0cab82..b5ff5f721 100644 --- a/avaframe/ana1Tests/damBreak.py +++ b/avaframe/ana1Tests/damBreakTest.py @@ -21,7 +21,7 @@ log = logging.getLogger(__name__) -def damBreakSol(avaDir, cfg, cfgC, outDirTest): +def damBreakSol(avaDir, cfg, damBreakCfg, com1DFACfg, outDirTest): """ Compute analytical Flow thickness and velocity for dam break test case for a granular flow over a dry rough sloping bed with the Savage Hutter model @@ -32,8 +32,10 @@ def damBreakSol(avaDir, cfg, cfgC, outDirTest): path to avalanche directory cfg: configParser object main avaframe configuration - here showPlot flag used - cfgC: configParser object - configuration setting for avalanche simulation including DAMBREAK section + damBreakCfg: configParser object + dam break configuration + com1DFACfg: configParser object + com1DFA configuration outDirTest: pathlib path path to output directory @@ -58,21 +60,22 @@ def damBreakSol(avaDir, cfg, cfgC, outDirTest): # Set Parameters # Coordinate system chosen in the direction of the inclined plane - g = cfgC['GENERAL'].getfloat('gravAcc') # acceleration due to gravity [ms-2] - phiDeg = cfgC['DAMBREAK'].getfloat('phi') # slope angle [°] - deltaDeg = cfgC['DAMBREAK'].getfloat('delta') # friction angle [°] + cfgGen = com1DFACfg['GENERAL'] + g = cfgGen.getfloat('gravAcc') # acceleration due to gravity [ms-2] + phiDeg = damBreakCfg.getfloat('phi') # slope angle [°] + deltaDeg = damBreakCfg.getfloat('delta') # friction angle [°] phi = np.radians(phiDeg) # slope angle [rad] delta = np.radians(deltaDeg) # bed friction angle [rad] gz = g * np.cos(phi) # projection of g perpendicular to the inclined plane m0 = gz * (np.tan(phi) - np.tan(delta)) # constant x-acceleration resulting from gravity and friction force - hL = cfgC['GENERAL'].getfloat('relTh') # initial height [m] in Riemann problem in state 1 (x<0), hR (x>0)=0 + hL = cfgGen.getfloat('relTh') # initial height [m] in Riemann problem in state 1 (x<0), hR (x>0)=0 cL = np.sqrt(gz * hL) - x0R = cfgC['DAMBREAK'].getfloat('xBack') / np.cos(phi) - tSave = cfgC['DAMBREAK'].getfloat('tSave') + x0R = damBreakCfg.getfloat('xBack') / np.cos(phi) + tSave = damBreakCfg.getfloat('tSave') # Define time [0-1] seconds and space [-2,2] meters domains multiplied times 100 - tAna = np.arange(0, cfgC['DAMBREAK'].getfloat('tEnd'), cfgC['DAMBREAK'].getfloat('dt')) - x = np.arange(cfgC['DAMBREAK'].getfloat('xStart') / np.cos(phi) , cfgC['DAMBREAK'].getfloat('xEnd') / np.cos(phi), - cfgC['DAMBREAK'].getfloat('dx')) + tAna = np.arange(0, damBreakCfg.getfloat('tEnd'), damBreakCfg.getfloat('dt')) + x = np.arange(damBreakCfg.getfloat('xStart') / np.cos(phi), damBreakCfg.getfloat('xEnd') / np.cos(phi), + damBreakCfg.getfloat('dx')) nt = len(tAna) nx = len(x) # Initialise Flow thickness solution and velocity @@ -227,13 +230,19 @@ def analyzeResults(avalancheDir, fieldsList, timeList, solDam, fieldHeader, cfg, xAna = solDam['xAna'] hAna = solDam['hAna'] uAna = solDam['uAna'] - # get plots limits - limits = outAna1Plots.getPlotLimits(cfgDam, fieldsList, fieldHeader) hErrorL2Array = np.zeros((len(fieldsList))) vhErrorL2Array = np.zeros((len(fieldsList))) hErrorLMaxArray = np.zeros((len(fieldsList))) vhErrorLMaxArray = np.zeros((len(fieldsList))) count = 0 + tSave = cfgDam.getfloat('tSave') + indT = min(np.searchsorted(timeList, tSave), min(len(timeList)-1, len(fieldsList)-1)) + tSave = timeList[indT] + # get plots limits + if cfgDam.getboolean('plotSequence'): + limits = outAna1Plots.getPlotLimits(cfgDam, fieldsList, fieldHeader) + else: + limits = outAna1Plots.getPlotLimits(cfgDam, [fieldsList[indT]], fieldHeader) # run the comparison routine for each saved time step for t, field in zip(timeList, fieldsList): # get similartiy solution h, u at required time step @@ -251,9 +260,9 @@ def analyzeResults(avalancheDir, fieldsList, timeList, solDam, fieldHeader, cfg, # make analytical results 2D nrows, ncols = np.shape(hNumPlus) - O = np.ones((nrows, ncols)) - hDamPlus = O*hDamPlus - uDamPlus = O*uDamPlus + OnesRaster = np.ones((nrows, ncols)) + hDamPlus = OnesRaster*hDamPlus + uDamPlus = OnesRaster*uDamPlus hEL2Plus, hL2RelPlus, hLmaxPlus, hLmaxRelPlus = anaTools.normL2Scal(hDamPlus, hNumPlus, cellSize, np.cos(phiRad)) @@ -273,15 +282,12 @@ def analyzeResults(avalancheDir, fieldsList, timeList, solDam, fieldHeader, cfg, log.debug("L2 %s error on the Flow thickness at t=%.2f s is : %.4f" % (title, t, hEL2Plus)) log.debug("L2 %s error on the momentum at t=%.2f s is : %.4f" % (title, t, vhL2Plus)) # Make all individual time step comparison plot - if cfgDam.getboolean('plotSequence'): + if cfgDam.getboolean('plotSequence') or t == tSave: outAna1Plots.plotComparisonDam(cfgDam, simHash, fieldsList[0], field, fieldHeader, solDam, t, limits, outDirTest) count = count + 1 - tSave = cfgDam.getfloat('tSave') - indT = min(np.searchsorted(timeList, tSave), min(len(timeList)-1, len(fieldsList)-1)) - tSave = timeList[indT] - if cfgDam.getboolean('plotErrorTime') and len(timeList)>1: + if cfgDam.getboolean('plotErrorTime') and len(timeList) > 1: # Create result plots outAna1Plots.plotErrorTime(timeList, hErrorL2Array, hErrorLMaxArray, vhErrorL2Array, vhErrorLMaxArray, diff --git a/avaframe/data/avaDamBreak/Inputs/damBreak_com1DFACfg.ini b/avaframe/ana1Tests/damBreakTestCfg.ini similarity index 77% rename from avaframe/data/avaDamBreak/Inputs/damBreak_com1DFACfg.ini rename to avaframe/ana1Tests/damBreakTestCfg.ini index 6251f6072..973a48495 100644 --- a/avaframe/data/avaDamBreak/Inputs/damBreak_com1DFACfg.ini +++ b/avaframe/ana1Tests/damBreakTestCfg.ini @@ -1,15 +1,67 @@ -### Config File - This file contains the main settings for the simulation run for the dam break problem -## This file is part of Avaframe. +### Config File - This file contains the main settings for the dam break test module +## Set your parameters +# This file will be overridden by local_damBreakTestCfg.ini if it exists +# So copy this file to local_damBreakTestCfg.ini, adjust your variables there +[DAMBREAK] +#sphKernel radius for the com1DFA simulations +sphKernelRadius = 6|5|4|3 + +# slope angle [°] +phi = 22 +# bed friction angle [°] +delta = 21 +u0 = 0. +# time end and resolution for the analytic solution +tEnd = 30 +dt = 0.1 +# space resolution +dx = 0.5 +# domain extend for error computation +# in x direction +xStart = -200 +xEnd = 220 +# in y direction +yStart = -50 +yEnd = 50 +# start x position of the dam +xBack = -120 +# xFront = 0 -[GENERAL] +# set time step of analysis +tSave = 15 +# use only the component of the velocity/momentum in the flow direction (vector going down the inclined plane in x dir) +projectVelocity = False + +#++++Plotting parameters++++++ +# list of parameters to display in the summary plot (list of parameters separated by |) +paramInfo = sphKernelRadius|aPPK|nPPK0|cMax|nPart +# list of parameters to display in the convergence plot (list of parameters separated by |) +paramConvInfo = sphKR0|nPPK0|cMax +# plotting flags +# only analyze and plot the tSave time step +onlyLast = False +# plot error function of time for each simulation +plotErrorTime = False +# plot individual figure for the h, hu and u error for each saved time step +plotSequence = False +# use relative difference +relativError = True +# when plotting, the domain extent is scaleCoef times bigger than the data extent +scaleCoef = 1.05 + + + +[com1DFA_override] +# use default com1DFA config as base configuration (True) and override following parameters +# if False and local is available use local +defaultConfig = True #++++++++++++++++ Simulation type # list of simulations that shall be performed (null, ent, res, entres, available (use all available input data)) simTypeList = null - #+++++++++++++ Output++++++++++++ # desired result Parameters (ppr, pfd, pfv, FD, FV, P, particles) - separated by | -resType = FD|FV|Vx|Vy|Vz +resType = FT|FV|Vx|Vy|Vz # saving time step, i.e. time in seconds (first and last time step are always saved) # option 1: give an interval with start:interval in seconds (tStep = 0:5 - this will save desired results every 5 seconds for the full simulation) # option 2: explicitly list all desired time steps (closest to actual computational time step) separated by | (example tSteps = 1|50.2|100) @@ -24,7 +76,6 @@ iniStep = True maxIterations = 30 # buffer zone factor multiplied with sphKernelRadius bufferZoneFactor = 4 -fdOptionIni = False #+++++++++SNOW properties # True if release thickness should be read from shapefile file; if False - relTh read from ini file @@ -32,21 +83,23 @@ relThFromShp = False relTh = 1 #++++++++++++Time stepping parameters -tEnd = 5 +# End time [s] +tEnd = 20 # to use a variable time step (time step depends on kernel radius) sphKernelRadiusTimeStepping = True -# courant number if option cflTimeStepping is chosen. -# Upper time step limit coefficient if option sphKernelRadiusTimeStepping is chosen. +# courant number cMax = 0.01 #+++++++++++++SPH parameters # SPH gradient option # 0) No pressure gradients computed # 1) SamosAT style (no reprojecion on the surface, dz = 0 and gz is used) -# 2) SamosAT done in the cartesian coord system (reprojecion on the surface, dz != 0 and g3 is used) -# 3) SamosAT but done in the local coord system (will hopefully allow us to add the earth pressure coef) +# 2) SamostAt but done in the cartesian coord system (will hopefully allow us to add the earth pressure coef) +# 3) SamostAt but done in the local coord system (will hopefully allow us to add the earth pressure coef) # and this time reprojecion on the surface, dz not 0 and g3 is used sphOption = 2 +# sph kernel smoothing length [m] +sphKernelRadius = 3 # Choice of artificial viscosity # 0) No artificial viscosity # 1) SAMOS artificial viscosity @@ -57,25 +110,25 @@ viscOption = 0 # number of particles defined by: MPPDIR= mass per particle direct, MPPDH= mass per particles through release thickness, # MPPKR= mass per particles through number of particles per kernel radius massPerParticleDeterminationMethod = MPPKR +# number of particles per kernel radius (if MPPKR is used) # is computed with: nPPK = nPPK0 * (sphKR/sphKR0)^aPPK # where sphKR is the sphKernelRadius specified further up # reference kernel radius sphKR0 = 5 # reference number of particles per kernel radius -nPPK0 = 10|20|30 +nPPK0 = 15 # variation of nppK exponent -aPPK = -0.5|-1|-1.5|-2 +aPPK = 0|-0.5|-1|-2|-3 # if splitOption=0 # threshold for splitting particles, split if: mPart > massPerPart x thresholdMassSplit -thresholdMassSplit = 10 +thresholdMassSplit = 5 +#+++++++++++++Mesh and interpolation # remesh the input DEM # expected mesh size [m] meshCellSize = 3 -# sph kernel smoothing length [m] -sphKernelRadius = 3 #++++++++++++Friction model # add the friction using an explicit formulation (1) @@ -85,46 +138,3 @@ explicitFriction = 1 frictModel = Coulomb #+++++++++++++SamosAt friction model mu = 0.3838640350354158 - - -[DAMBREAK] -# slope angle [°] -phi = 22 -# bed friction angle [°] -delta = 21 -u0 = 0. -# time end and resolution for the analytic solution -tEnd = 30 -dt = 0.1 -# space resolution -dx = 0.5 -# domain extend for error computation -# in x direction -xStart = -200 -xEnd = 220 -# in y direction -yStart = -50 -yEnd = 50 -# start x position of the dam -xBack = -120 -# xFront = 0 - -# set time step of analysis -tSave = 5 -# use only the component of the velocity/momentum in the flow direction (vector going down the inclined plane in x dir) -projectVelocity = False - -#++++Plotting parameters++++++ -# list of parameters to display in the summary plot (list of parameters separated by |) -paramInfo = sphKernelRadius|aPPK|nPPK0|nPart -# plotting flags -# only analyze and plot the tSave time step -onlyLast = False -# plot error function of time for each simulation -plotErrorTime = False -# plot individual figure for the h, hu and u error for each saved time step -plotSequence = False -# use relative difference -relativError = True -# when plotting, the domain extent is scaleCoef times bigger than the data extent -scaleCoef = 1.05 diff --git a/avaframe/ana1Tests/energyLineTest.py b/avaframe/ana1Tests/energyLineTest.py index 72e103ee4..17d3e0d35 100644 --- a/avaframe/ana1Tests/energyLineTest.py +++ b/avaframe/ana1Tests/energyLineTest.py @@ -135,11 +135,12 @@ def generateCom1DFAEnergyPlot(avalancheDir, energyLineTestCfg, com1DFACfg, avaPr zMin = zLim[0] ax2.vlines(x=avaProfileMass['s'][-1], ymin=zMin, ymax=avaProfileMass['z'][-1], color='r', linestyle='--') - ax2.vlines(x=sIntersection, color='b', ymin=zMin, ymax=zIntersection, linestyle='--') + if ~np.isnan(sIntersection): + ax2.vlines(x=sIntersection, color='b', ymin=zMin, ymax=zIntersection, linestyle='--') + ax2.hlines(y=zIntersection, color='b', xmin=0, xmax=sIntersection, linestyle='--') ax2.hlines(y=avaProfileMass['z'][-1], xmin=0, xmax=avaProfileMass['s'][-1], color='r', linestyle='--') - ax2.hlines(y=zIntersection, color='b', xmin=0, xmax=sIntersection, linestyle='--') ax2.set_xlabel('s [m]') ax2.set_ylabel('z [m]') @@ -183,8 +184,8 @@ def generateCom1DFAEnergyPlot(avalancheDir, energyLineTestCfg, com1DFACfg, avaPr rmseVelocityElevation = resultEnergyTest['rmseVelocityElevation'] errorS = abs(runOutSError) errorZ = abs(runOutZError) - sMin = min(avaProfileMass['s'][-1], sIntersection) - max(errorS, 0) - sMax = max(avaProfileMass['s'][-1], sIntersection) + max(errorS, 0) + sMin = np.nanmin(np.array([avaProfileMass['s'][-1], sIntersection])) - np.nanmax(np.array([errorS, 0])) + sMax = np.nanmax(np.array([avaProfileMass['s'][-1], sIntersection])) + np.nanmax(np.array([errorS, 0])) zMin = avaProfileMass['z'][-1] + min(slopeExt*(sMax - avaProfileMass['s'][-1]), 0 - 2*errorZ)-z0 zMax = avaProfileMass['z'][0] - sMin*np.tan(min(runOutAngleRad, alphaRad))-z0 if avaProfileMass['z'][-1] == zIntersection: @@ -193,11 +194,12 @@ def generateCom1DFAEnergyPlot(avalancheDir, energyLineTestCfg, com1DFACfg, avaPr zMin = zMin - (zMax-zMin)*0.1 ax3.vlines(x=avaProfileMass['s'][-1], ymin=zMin, ymax=avaProfileMass['z'][-1]-z0, color='r', linestyle='--') - ax3.vlines(x=sIntersection, color='b', ymin=zMin, ymax=zIntersection-z0, linestyle='--') + if ~np.isnan(sIntersection): + ax3.vlines(x=sIntersection, color='b', ymin=zMin, ymax=zIntersection-z0, linestyle='--') + ax3.hlines(y=zIntersection-z0, color='b', xmin=sMin, xmax=sIntersection, linestyle='--') ax3.hlines(y=avaProfileMass['z'][-1]-z0, xmin=sMin, xmax=avaProfileMass['s'][-1], color='r', linestyle='--') - ax3.hlines(y=zIntersection-z0, color='b', xmin=sMin, xmax=sIntersection, linestyle='--') ax3.set_xlabel('s [m]') ax3.set_ylabel('$\Delta z$ [m]') @@ -292,7 +294,7 @@ def getAlphaProfileIntersection(energyLineTestCfg, avaProfileMass, mu, csz): # find the intersection segment idx = np.argwhere(np.diff(np.sign(z - alphaLine))).flatten() idx = idx[-1] - coefExt = 0 + coefExt = 1 # did not find the intersection, look further while s[idx] == 0 and coefExt < 4: s = np.append(avaProfileMass['s'], (1+coefExt)*avaProfileMass['s'][-1]) @@ -310,8 +312,14 @@ def getAlphaProfileIntersection(energyLineTestCfg, avaProfileMass, mu, csz): zP1 = z[idx+1] zA0 = alphaLine[idx] zA1 = alphaLine[idx+1] - sIntersection = s0 + (s1-s0)*(zA0-zP0)/((zP1-zP0)-(zA1-zA0)) - zIntersection = zP0 + (sIntersection-s0) * (zP1-zP0) / (s1-s0) + if s0 == 0: + sIntersection = np.nan + zIntersection = np.nan + s[-1] = avaProfileMass['s'][-1] + z[-1] = avaProfileMass['z'][-1] + else: + sIntersection = s0 + (s1-s0)*(zA0-zP0)/((zP1-zP0)-(zA1-zA0)) + zIntersection = zP0 + (sIntersection-s0) * (zP1-zP0) / (s1-s0) if coefExt > 0: s[-1] = sIntersection z[-1] = zIntersection @@ -363,7 +371,10 @@ def getEnergyInfo(avaProfileMass, g, mu, sIntersection, zIntersection, runOutAng GK = sIntersection * mu z_end = avaProfileMass['z'][0] - GK zEneTarget = avaProfileMass['z'][0] - avaProfileMass['s'] * mu - sGeomL = [avaProfileMass['s'][0], sIntersection] + if ~np.isnan(sIntersection): + sGeomL = [avaProfileMass['s'][0], sIntersection] + else: + sGeomL = [avaProfileMass['s'][0], avaProfileMass['s'][-1]] zGeomL = [avaProfileMass['z'][0], z_end] # compute errors # rmse of the energy height diff --git a/avaframe/ana1Tests/energyLineTestCfg.ini b/avaframe/ana1Tests/energyLineTestCfg.ini index a4de8fef2..c056d91a5 100644 --- a/avaframe/ana1Tests/energyLineTestCfg.ini +++ b/avaframe/ana1Tests/energyLineTestCfg.ini @@ -93,5 +93,20 @@ frictModel = Coulomb #+++++++++++++General Friction parameters # tan of bed friction angle used for: samosAT, Coulomb, Voellmy mu = 0.4 +#+++++++++++++Voellmy friction model +xsi = 10000. -dam = True +#+++++++++++++++++ Dam Parameters +# the dam foot print is given in Intputs/DAM as a shape file line +# the dam is located on the left side of the dam line +# (when one travels from the first point to the last point of the shapefile dam line) +# use dam? +dam = False +# behavior of the particles when they hit the dam (are they fully reflected: restitutionCoefficient = 1 or only reflected +# along the tangent component of the dam: restitutionCoefficient = 0 or something in between: restitutionCoefficient = 0-1) +restitutionCoefficient = 1 +# number of iterations allowed for the interactions between a particle and the dam during one time step: +# 1: particle can only bounce once (may cross the dam at another location then) +nIterDam = 1 +# 1 to activate energy loss due to snow flowing over dam +dissDam = 1 diff --git a/avaframe/ana1Tests/figAnalytic_energyLineTestCfg.ini b/avaframe/ana1Tests/figAnalytic_energyLineTestCfg.ini new file mode 100644 index 000000000..66c68f646 --- /dev/null +++ b/avaframe/ana1Tests/figAnalytic_energyLineTestCfg.ini @@ -0,0 +1,82 @@ +### Config File - This file contains the main settings for the energy line test module +## Set your parameters +# This file will be overridden by local_energyLineTestCfg.ini if it exists +# So copy this file to local_energyLineTestCfg.ini, adjust your variables there + +[energyLineTest] +# do you want to run the DFA module (all results in the Outputs/com1DFA forlder will be deleted) +runDFAModule = True +# for extending the path at the bottom, use the last points of the mass average path to get the slope +# (all points at a distance nCellsMinExtend x cellSize < distance from the end) +nCellsExtrapolation = 4 +# plot the mass averaged points function of the corrected s too (only available if pathFromPart = True) +plotScor = False + +# True to get the path from particles, from fields otherwise +# if True, particles need to be saved in resType +# if False, FT|FV|FM need to be saved in resType +pathFromPart = True + +# for plotting the results, add the shifted alpha line +shiftedAlphaLine = False + +[com1DFA_override] +# use default com1DFA config as base configuration (True) and override following parameters +# if False and local is available use local +defaultConfig = True +#++++++++++++++++ Simulation type +# list of simulations that shall be performed (null, ent, res, entres, available (use all available input data)) +simTypeList = null + +#+++++++++++++ Output++++++++++++ +# desired result Parameters (ppr, pft, pfv, pta, FT, FV, P, Vx, Vy, Vz, TA, particles) - separated by | +resType = FT|FV|FM|particles +# saving time step, i.e. time in seconds (first and last time step are always saved) +# option 1: give an interval with start:interval in seconds (tStep = 0:5 - this will save desired results every 5 seconds for the full simulation) +# option 2: explicitly list all desired time steps (closest to actual computational time step) separated by | (example tSteps = 1|50.2|100) +# NOTE: initial and last time step are always saved! +tSteps = 0:1 + +#+++++++++SNOW properties +#+++++Release thickness++++ +# True if release thickness should be read from shapefile file; if False - relTh read from ini file +relThFromShp = False +# release thickness (only considered if relThFromShp=False) +relTh = 1 + +#++++++++++++Time stepping parameters +# to use a variable time step (time step depends on kernel radius) +sphKernelRadiusTimeStepping = True +# Upper time step limit coefficient if option sphKernelRadiusTimeStepping is chosen. +cMax = 0.02 +# stopCriterion (stops when massFlowing<0.01*peakMassFlowing or ke<0.01*pke) +stopCrit = 0.0 + +#+++++++++++++SPH parameters +# SPH gradient option +# it is possible to set other values here (2: SamosAT done in the cartesian coord system (reprojecion on the surface, dz != 0 and g3 is used)) +sphOption = 0 +# Choice of artificial viscosity +# it is possible to set other values here (1: add some artificial viscosity) +viscOption = 0 + +#++++++++++++++++ Particles +# number of particles defined by: MPPDIR= mass per particle direct, MPPDH= mass per particles through release thickness, +# MPPKR= mass per particles through number of particles per kernel radius +massPerParticleDeterminationMethod = MPPKR + +#+++++++++++++Flow model parameters+++++++++ +# take curvature term into account in the gravity acceleration term for computing the friction force +# (and gradient if curvAccInGradient=1) +# 0 if deactivated, 1 if activated +curvAccInFriction = 0 + +#++++++++++++Friction model +# add the friction using an explicit formulation (1) +# 0 use an implicit method +explicitFriction = 1 +# friction type (samosAT, Coulomb, Voellmy) +frictModel = Coulomb +#+++++++++++++General Friction parameters +# tan of bed friction angle used for: samosAT, Coulomb, Voellmy +mu = 0.4 diff --git a/avaframe/ana1Tests/figAnalytic_rotationTestCfg.ini b/avaframe/ana1Tests/figAnalytic_rotationTestCfg.ini new file mode 100644 index 000000000..08703aed2 --- /dev/null +++ b/avaframe/ana1Tests/figAnalytic_rotationTestCfg.ini @@ -0,0 +1,138 @@ +### Config File - This file contains the main settings for the energy line test module +## Set your parameters +# This file will be overridden by local_energyLineTestCfg.ini if it exists +# So copy this file to local_energyLineTestCfg.ini, adjust your variables there + +[rotationTest] +# for the report (which computational module do you use) +comModule = com1DFA +# do you want to run the DFA module (all results in the Outputs/com1DFA folder will be deleted) +runDFAModule = True + +[energyLineTest_override] +# use default energyLineTest config as base configuration (True) and override following parameters +# if False and local is available use local +defaultConfig = True +# for extending the path at the bottom, use the last points of the mass average path to get the slope +# (all points at a distance nCellsMinExtend x cellSize < distance from the end) +nCellsExtrapolation = 4 +# plot the mass averaged points function of the corrected s too (only available if pathFromPart = True) +plotScor = False + +# True to get the path from particles, from fields otherwise +# if True, particles need to be saved in resType +# if False, FT|FV|FM need to be saved in resType +pathFromPart = True + +[com1DFA_override] +# use default com1DFA config as base configuration (True) and override following parameters +# if False and local is available use local +defaultConfig = True +#++++++++++++++++ Simulation type +# list of simulations that shall be performed (null, ent, res, entres, available (use all available input data)) +simTypeList = null + +#+++++++++++++ Output++++++++++++ +# desired result Parameters (ppr, pft, pfv, pta, FT, FV, P, Vx, Vy, Vz, TA, particles) - separated by | +resType = FT|FV|FM|particles +# saving time step, i.e. time in seconds (first and last time step are always saved) +# option 1: give an interval with start:interval in seconds (tStep = 0:5 - this will save desired results every 5 seconds for the full simulation) +# option 2: explicitly list all desired time steps (closest to actual computational time step) separated by | (example tSteps = 1|50.2|100) +# NOTE: initial and last time step are always saved! +tSteps = 0:2 + +#+++++++++SNOW properties +#+++++Release thickness++++ +# True if release thickness should be read from shapefile file; if False - relTh read from ini file +relThFromShp = False +# release thickness (only considered if relThFromShp=False) +relTh = 1 + +#++++++++++++Time stepping parameters +# to use a variable time step (time step depends on kernel radius) +sphKernelRadiusTimeStepping = True +# Upper time step limit coefficient if option sphKernelRadiusTimeStepping is chosen. +cMax = 0.02 +# stopCriterion (stops when massFlowing<0.01*peakMassFlowing or ke<0.01*pke) +stopCrit = 0.0 + +#+++++++++++++SPH parameters +# SPH gradient option +# 0) No pressure gradients computed +# 1) SamosAT style (no reprojecion on the surface, dz = 0 and gz is used) +# 2) SamosAT done in the cartesian coord system (reprojecion on the surface, dz != 0 and g3 is used) +# 3) SamosAT but done in the local coord system (will hopefully allow us to add the earth pressure coef) +# and this time reprojecion on the surface, dz not 0 and g3 is used +sphOption = 0 +# Choice of artificial viscosity +# 0) No artificial viscosity +# 1) SAMOS artificial viscosity +# 2) Ata artificial viscosity +viscOption = 0 + +#++++++++++++++++ Particles +# number of particles defined by: MPPDIR= mass per particle direct, MPPDH= mass per particles through release thickness, +# MPPKR= mass per particles through number of particles per kernel radius +massPerParticleDeterminationMethod = MPPKR + +#+++++++++++++Flow model parameters+++++++++ +# subgridMixingFactor +subgridMixingFactor = 100. +# take curvature term into account in the gravity acceleration term for computing the friction force +# (and gradient if curvAccInGradient=1) +# 0 if deactivated, 1 if activated +curvAccInFriction = 0 +# take curvature term into account in the tangential momentum equation +# 0 if deactivated, 1 if activated +curvAccInTangent = 0 +# if curvAccInFriction=1, take curvature term into account in the pressure gradient (if curvAccInGradient=1) +curvAccInGradient = 0 + +#++++++++++++Friction model +# add the friction using an explicit formulation (1) +# 0 use an implicit method +explicitFriction = 1 +# friction type (samosAT, Coulomb, Voellmy) +frictModel = Coulomb +#+++++++++++++General Friction parameters +# tan of bed friction angle used for: samosAT, Coulomb, Voellmy +mu = 0.45 +#+++++++++++++Voellmy friction model +xsi = 10000. + + +[DFAPathGeneration_override] +# use default DFAPathGeneration config as base configuration (True) and override following parameters +# if False and local is available use local +defaultConfig = True +# the path extracted from the com1DFA simulation is re-sampled +# re-sampling step size is defined resampleDistance = nCellsResample x cellSize) +# this results in a path with a horizontal distance between points <= resampleDistance +nCellsResample = 5 + +# for extending the path at the bottom, extend path towards the bottom of the runout in the +# direction extracted form the first/last points of the path (all points at a distance +# nCellsMinExtend x cellSize < distance < nCellsMaxExtend x cellSize from the start/end) +nCellsMinExtend = 2 + +# for the extrapolation at the bottom, add factBottomExt * sMax to the path +factBottomExt = 0.2 +maxIterationExtBot = 10 +nBottomExtPrecision = 10 + +[ana3AIMEC_override] +# use default ana3AIMEC config as base configuration (True) and override following parameters +# if False and local is available use local +defaultConfig = True +# data result type for general analysis (ppr|pft|pfd). If left empty takes the result types available for all simulations +resTypes = ppr|pft|pfv +# parameter used for ordering the simulations - multiple possible; (e.g.relTh|deltaTh) +varParList = +# computational module that was used to produce avalanche simulations (to locate peakFiles) +anaMod = com1DFARotated +# directly set reference simulation by its name (name of simulation result file or parts of it that definitively +# identify one particular simulation) +referenceSimName = rel0 +# start with this, will be changed to True if the simulations have entrainment +# Mass analysis +flagMass = False diff --git a/avaframe/ana1Tests/figConvergence_damBreakTestCfg.ini b/avaframe/ana1Tests/figConvergence_damBreakTestCfg.ini new file mode 100644 index 000000000..6ae21103b --- /dev/null +++ b/avaframe/ana1Tests/figConvergence_damBreakTestCfg.ini @@ -0,0 +1,50 @@ +### Config File - This file contains the main settings for the dam break test module +## Set your parameters +# This file will be overridden by local_damBreakTestCfg.ini if it exists +# So copy this file to local_damBreakTestCfg.ini, adjust your variables there + +[DAMBREAK] +#sphKernel radius for the com1DFA simulations +sphKernelRadius = 6|5|4|3 + +# set time step of analysis +tSave = 15 + +#++++Plotting parameters++++++ +# list of parameters to display in the summary plot (list of parameters separated by |) +paramInfo = sphKernelRadius|aPPK|nPPK0|cMax|nPart +# list of parameters to display in the convergence plot (list of parameters separated by |) +paramConvInfo = sphKR0|nPPK0|cMax +# plotting flags +# only analyze and plot the tSave time step +onlyLast = False +# plot error function of time for each simulation +plotErrorTime = False +# plot individual figure for the h, hu and u error for each saved time step +plotSequence = False +# use relative difference +relativError = True +# when plotting, the domain extent is scaleCoef times bigger than the data extent +scaleCoef = 1.05 + + + +[com1DFA_override] +# use default com1DFA config as base configuration (True) and override following parameters +# if False and local is available use local +defaultConfig = True + +#++++++++++++Time stepping parameters +# End time [s] +tEnd = 20 +# courant number +# it is possible to set other values here (0.04|0.02|0.01|0.005) +cMax = 0.02 + +#++++++++++++++++ Particles +# reference number of particles per kernel radius +# it is possible to set other values here (20|30|40) +nPPK0 = 15 +# variation of nppK exponent +# it is possible to set other values here (0|-0.5|-1|-1.5|-2|-2.5|-3) +aPPK = 0|-0.5|-1|-2|-3 diff --git a/avaframe/ana1Tests/figConvergence_simiSolTestCfg.ini b/avaframe/ana1Tests/figConvergence_simiSolTestCfg.ini new file mode 100644 index 000000000..e28ad0601 --- /dev/null +++ b/avaframe/ana1Tests/figConvergence_simiSolTestCfg.ini @@ -0,0 +1,50 @@ +### Config File - This file contains the main settings for the similarity solution test module +## Set your parameters +# This file will be overridden by local_simiSolTestCfg.ini if it exists +# So copy this file to local_simiSolTestCfg.ini, adjust your variables there + +[SIMISOL] +#sphKernel radius for the com1DFA simulations +sphKernelRadius = 10|8|6|5|4|3 + +# save analysis plots at time step dtSol +dtSol = 50. + +# which time step should be saved +tSave = 20 + +#++++Plotting parameters++++++ +# list of parameters to display in the summary plot (list of parameters separated by |) +paramInfo = sphKernelRadius|aPPK|nPPK0|cMax|nPart +# list of parameters to display in the convergence plot (list of parameters separated by |) +paramConvInfo = sphKR0|nPPK0|cMax +# plotting flags +# only analyze and plot the tSave time step +onlyLast = False +# plot error function of time for each simulation +plotErrorTime = False +# plot individual figure for the h, hu and u error for each saved time step +plotSequence = False +# use relative difference +relativError = True +# when plotting, the domain extent is scaleCoef times bigger than the data extent +scaleCoef = 1.02 + + +[com1DFA_override] +# use default com1DFA config as base configuration (True) and override following parameters +# if False and local is available use local +defaultConfig = True + +#++++++++++++Time stepping parameters +# End time [s] +tEnd = 30 +# it is possible to set other values here (0.04|0.02|0.005) +cMax = 0.02 + +# reference number of particles per kernel radius +# it is possible to set other values here (20|30|40) +nPPK0 = 15 +# variation of nppK exponent +# it is possible to set other values here (0|-0.5|-1|-1.5|-2|-2.5|-3) +aPPK = 0|-0.5|-1|-2|-3 diff --git a/avaframe/ana1Tests/figCurvature_energyLineTestCfg.ini b/avaframe/ana1Tests/figCurvature_energyLineTestCfg.ini new file mode 100644 index 000000000..a726d5b5d --- /dev/null +++ b/avaframe/ana1Tests/figCurvature_energyLineTestCfg.ini @@ -0,0 +1,82 @@ +### Config File - This file contains the main settings for the energy line test module +## Set your parameters +# This file will be overridden by local_energyLineTestCfg.ini if it exists +# So copy this file to local_energyLineTestCfg.ini, adjust your variables there + +[energyLineTest] +# do you want to run the DFA module (all results in the Outputs/com1DFA forlder will be deleted) +runDFAModule = True +# for extending the path at the bottom, use the last points of the mass average path to get the slope +# (all points at a distance nCellsMinExtend x cellSize < distance from the end) +nCellsExtrapolation = 4 +# plot the mass averaged points function of the corrected s too (only available if pathFromPart = True) +plotScor = False + +# True to get the path from particles, from fields otherwise +# if True, particles need to be saved in resType +# if False, FT|FV|FM need to be saved in resType +pathFromPart = True + +# for plotting the results, add the shifted alpha line +shiftedAlphaLine = True + +[com1DFA_override] +# use default com1DFA config as base configuration (True) and override following parameters +# if False and local is available use local +defaultConfig = True +#++++++++++++++++ Simulation type +# list of simulations that shall be performed (null, ent, res, entres, available (use all available input data)) +simTypeList = null + +#+++++++++++++ Output++++++++++++ +# desired result Parameters (ppr, pft, pfv, pta, FT, FV, P, Vx, Vy, Vz, TA, particles) - separated by | +resType = FT|FV|FM|particles +# saving time step, i.e. time in seconds (first and last time step are always saved) +# option 1: give an interval with start:interval in seconds (tStep = 0:5 - this will save desired results every 5 seconds for the full simulation) +# option 2: explicitly list all desired time steps (closest to actual computational time step) separated by | (example tSteps = 1|50.2|100) +# NOTE: initial and last time step are always saved! +tSteps = 0:1 + +#+++++++++SNOW properties +#+++++Release thickness++++ +# True if release thickness should be read from shapefile file; if False - relTh read from ini file +relThFromShp = False +# release thickness (only considered if relThFromShp=False) +relTh = 1 + +#++++++++++++Time stepping parameters +# to use a variable time step (time step depends on kernel radius) +sphKernelRadiusTimeStepping = True +# Upper time step limit coefficient if option sphKernelRadiusTimeStepping is chosen. +cMax = 0.02 +# stopCriterion (stops when massFlowing<0.01*peakMassFlowing or ke<0.01*pke) +stopCrit = 0.0 + +#+++++++++++++SPH parameters +# SPH gradient option +# it is possible to set other values here (2: SamosAT done in the cartesian coord system (reprojecion on the surface, dz != 0 and g3 is used)) +sphOption = 0 +# Choice of artificial viscosity +# it is possible to set other values here (1: add some artificial viscosity) +viscOption = 0 + +#++++++++++++++++ Particles +# number of particles defined by: MPPDIR= mass per particle direct, MPPDH= mass per particles through release thickness, +# MPPKR= mass per particles through number of particles per kernel radius +massPerParticleDeterminationMethod = MPPKR + +#+++++++++++++Flow model parameters+++++++++ +# take curvature term into account in the gravity acceleration term for computing the friction force +# (and gradient if curvAccInGradient=1) +# 0 if deactivated, 1 if activated +curvAccInFriction = 1 + +#++++++++++++Friction model +# add the friction using an explicit formulation (1) +# 0 use an implicit method +explicitFriction = 1 +# friction type (samosAT, Coulomb, Voellmy) +frictModel = Coulomb +#+++++++++++++General Friction parameters +# tan of bed friction angle used for: samosAT, Coulomb, Voellmy +mu = 0.4 diff --git a/avaframe/ana1Tests/figPressure_energyLineTestCfg.ini b/avaframe/ana1Tests/figPressure_energyLineTestCfg.ini new file mode 100644 index 000000000..f7f5d27e6 --- /dev/null +++ b/avaframe/ana1Tests/figPressure_energyLineTestCfg.ini @@ -0,0 +1,82 @@ +### Config File - This file contains the main settings for the energy line test module +## Set your parameters +# This file will be overridden by local_energyLineTestCfg.ini if it exists +# So copy this file to local_energyLineTestCfg.ini, adjust your variables there + +[energyLineTest] +# do you want to run the DFA module (all results in the Outputs/com1DFA forlder will be deleted) +runDFAModule = True +# for extending the path at the bottom, use the last points of the mass average path to get the slope +# (all points at a distance nCellsMinExtend x cellSize < distance from the end) +nCellsExtrapolation = 4 +# plot the mass averaged points function of the corrected s too (only available if pathFromPart = True) +plotScor = False + +# True to get the path from particles, from fields otherwise +# if True, particles need to be saved in resType +# if False, FT|FV|FM need to be saved in resType +pathFromPart = True + +# for plotting the results, add the shifted alpha line +shiftedAlphaLine = False + +[com1DFA_override] +# use default com1DFA config as base configuration (True) and override following parameters +# if False and local is available use local +defaultConfig = True +#++++++++++++++++ Simulation type +# list of simulations that shall be performed (null, ent, res, entres, available (use all available input data)) +simTypeList = null + +#+++++++++++++ Output++++++++++++ +# desired result Parameters (ppr, pft, pfv, pta, FT, FV, P, Vx, Vy, Vz, TA, particles) - separated by | +resType = FT|FV|FM|particles +# saving time step, i.e. time in seconds (first and last time step are always saved) +# option 1: give an interval with start:interval in seconds (tStep = 0:5 - this will save desired results every 5 seconds for the full simulation) +# option 2: explicitly list all desired time steps (closest to actual computational time step) separated by | (example tSteps = 1|50.2|100) +# NOTE: initial and last time step are always saved! +tSteps = 0:1 + +#+++++++++SNOW properties +#+++++Release thickness++++ +# True if release thickness should be read from shapefile file; if False - relTh read from ini file +relThFromShp = False +# release thickness (only considered if relThFromShp=False) +relTh = 1 + +#++++++++++++Time stepping parameters +# to use a variable time step (time step depends on kernel radius) +sphKernelRadiusTimeStepping = True +# Upper time step limit coefficient if option sphKernelRadiusTimeStepping is chosen. +cMax = 0.02 +# stopCriterion (stops when massFlowing<0.01*peakMassFlowing or ke<0.01*pke) +stopCrit = 0.0 + +#+++++++++++++SPH parameters +# SPH gradient option +# it is possible to set other values here (2: SamosAT done in the cartesian coord system (reprojecion on the surface, dz != 0 and g3 is used)) +sphOption = 2 +# Choice of artificial viscosity +# it is possible to set other values here (1: add some artificial viscosity) +viscOption = 1 + +#++++++++++++++++ Particles +# number of particles defined by: MPPDIR= mass per particle direct, MPPDH= mass per particles through release thickness, +# MPPKR= mass per particles through number of particles per kernel radius +massPerParticleDeterminationMethod = MPPKR + +#+++++++++++++Flow model parameters+++++++++ +# take curvature term into account in the gravity acceleration term for computing the friction force +# (and gradient if curvAccInGradient=1) +# 0 if deactivated, 1 if activated +curvAccInFriction = 0 + +#++++++++++++Friction model +# add the friction using an explicit formulation (1) +# 0 use an implicit method +explicitFriction = 1 +# friction type (samosAT, Coulomb, Voellmy) +frictModel = Coulomb +#+++++++++++++General Friction parameters +# tan of bed friction angle used for: samosAT, Coulomb, Voellmy +mu = 0.4 diff --git a/avaframe/ana1Tests/figPressure_rotationTestCfg.ini b/avaframe/ana1Tests/figPressure_rotationTestCfg.ini new file mode 100644 index 000000000..cdb132e08 --- /dev/null +++ b/avaframe/ana1Tests/figPressure_rotationTestCfg.ini @@ -0,0 +1,138 @@ +### Config File - This file contains the main settings for the energy line test module +## Set your parameters +# This file will be overridden by local_energyLineTestCfg.ini if it exists +# So copy this file to local_energyLineTestCfg.ini, adjust your variables there + +[rotationTest] +# for the report (which computational module do you use) +comModule = com1DFA +# do you want to run the DFA module (all results in the Outputs/com1DFA folder will be deleted) +runDFAModule = True + +[energyLineTest_override] +# use default energyLineTest config as base configuration (True) and override following parameters +# if False and local is available use local +defaultConfig = True +# for extending the path at the bottom, use the last points of the mass average path to get the slope +# (all points at a distance nCellsMinExtend x cellSize < distance from the end) +nCellsExtrapolation = 4 +# plot the mass averaged points function of the corrected s too (only available if pathFromPart = True) +plotScor = False + +# True to get the path from particles, from fields otherwise +# if True, particles need to be saved in resType +# if False, FT|FV|FM need to be saved in resType +pathFromPart = True + +[com1DFA_override] +# use default com1DFA config as base configuration (True) and override following parameters +# if False and local is available use local +defaultConfig = True +#++++++++++++++++ Simulation type +# list of simulations that shall be performed (null, ent, res, entres, available (use all available input data)) +simTypeList = null + +#+++++++++++++ Output++++++++++++ +# desired result Parameters (ppr, pft, pfv, pta, FT, FV, P, Vx, Vy, Vz, TA, particles) - separated by | +resType = FT|FV|FM|particles +# saving time step, i.e. time in seconds (first and last time step are always saved) +# option 1: give an interval with start:interval in seconds (tStep = 0:5 - this will save desired results every 5 seconds for the full simulation) +# option 2: explicitly list all desired time steps (closest to actual computational time step) separated by | (example tSteps = 1|50.2|100) +# NOTE: initial and last time step are always saved! +tSteps = 0:2 + +#+++++++++SNOW properties +#+++++Release thickness++++ +# True if release thickness should be read from shapefile file; if False - relTh read from ini file +relThFromShp = False +# release thickness (only considered if relThFromShp=False) +relTh = 1 + +#++++++++++++Time stepping parameters +# to use a variable time step (time step depends on kernel radius) +sphKernelRadiusTimeStepping = True +# Upper time step limit coefficient if option sphKernelRadiusTimeStepping is chosen. +cMax = 0.02 +# stopCriterion (stops when massFlowing<0.01*peakMassFlowing or ke<0.01*pke) +stopCrit = 0.0 + +#+++++++++++++SPH parameters +# SPH gradient option +# 0) No pressure gradients computed +# 1) SamosAT style (no reprojecion on the surface, dz = 0 and gz is used) +# 2) SamosAT done in the cartesian coord system (reprojecion on the surface, dz != 0 and g3 is used) +# 3) SamosAT but done in the local coord system (will hopefully allow us to add the earth pressure coef) +# and this time reprojecion on the surface, dz not 0 and g3 is used +sphOption = 2 +# Choice of artificial viscosity +# 0) No artificial viscosity +# 1) SAMOS artificial viscosity +# 2) Ata artificial viscosity +viscOption = 1 + +#++++++++++++++++ Particles +# number of particles defined by: MPPDIR= mass per particle direct, MPPDH= mass per particles through release thickness, +# MPPKR= mass per particles through number of particles per kernel radius +massPerParticleDeterminationMethod = MPPKR + +#+++++++++++++Flow model parameters+++++++++ +# subgridMixingFactor +subgridMixingFactor = 100. +# take curvature term into account in the gravity acceleration term for computing the friction force +# (and gradient if curvAccInGradient=1) +# 0 if deactivated, 1 if activated +curvAccInFriction = 0 +# take curvature term into account in the tangential momentum equation +# 0 if deactivated, 1 if activated +curvAccInTangent = 0 +# if curvAccInFriction=1, take curvature term into account in the pressure gradient (if curvAccInGradient=1) +curvAccInGradient = 0 + +#++++++++++++Friction model +# add the friction using an explicit formulation (1) +# 0 use an implicit method +explicitFriction = 1 +# friction type (samosAT, Coulomb, Voellmy) +frictModel = Coulomb +#+++++++++++++General Friction parameters +# tan of bed friction angle used for: samosAT, Coulomb, Voellmy +mu = 0.45 +#+++++++++++++Voellmy friction model +xsi = 10000. + + +[DFAPathGeneration_override] +# use default DFAPathGeneration config as base configuration (True) and override following parameters +# if False and local is available use local +defaultConfig = True +# the path extracted from the com1DFA simulation is re-sampled +# re-sampling step size is defined resampleDistance = nCellsResample x cellSize) +# this results in a path with a horizontal distance between points <= resampleDistance +nCellsResample = 5 + +# for extending the path at the bottom, extend path towards the bottom of the runout in the +# direction extracted form the first/last points of the path (all points at a distance +# nCellsMinExtend x cellSize < distance < nCellsMaxExtend x cellSize from the start/end) +nCellsMinExtend = 2 + +# for the extrapolation at the bottom, add factBottomExt * sMax to the path +factBottomExt = 0.2 +maxIterationExtBot = 10 +nBottomExtPrecision = 10 + +[ana3AIMEC_override] +# use default ana3AIMEC config as base configuration (True) and override following parameters +# if False and local is available use local +defaultConfig = True +# data result type for general analysis (ppr|pft|pfd). If left empty takes the result types available for all simulations +resTypes = ppr|pft|pfv +# parameter used for ordering the simulations - multiple possible; (e.g.relTh|deltaTh) +varParList = +# computational module that was used to produce avalanche simulations (to locate peakFiles) +anaMod = com1DFARotated +# directly set reference simulation by its name (name of simulation result file or parts of it that definitively +# identify one particular simulation) +referenceSimName = rel0 +# start with this, will be changed to True if the simulations have entrainment +# Mass analysis +flagMass = False diff --git a/avaframe/ana1Tests/rotationTestCfg.ini b/avaframe/ana1Tests/rotationTestCfg.ini index b022c96d2..5563ef915 100644 --- a/avaframe/ana1Tests/rotationTestCfg.ini +++ b/avaframe/ana1Tests/rotationTestCfg.ini @@ -7,7 +7,7 @@ # for the report (which computational module do you use) comModule = com1DFA # do you want to run the DFA module (all results in the Outputs/com1DFA folder will be deleted) -runDFAModule = False +runDFAModule = True [energyLineTest_override] # use default energyLineTest config as base configuration (True) and override following parameters @@ -51,6 +51,7 @@ relTh = 1 #++++++++++++Time stepping parameters # to use a variable time step (time step depends on kernel radius) sphKernelRadiusTimeStepping = True +# Upper time step limit coefficient if option sphKernelRadiusTimeStepping is chosen. cMax = 0.02 # stopCriterion (stops when massFlowing<0.01*peakMassFlowing or ke<0.01*pke) stopCrit = 0.0 @@ -77,10 +78,15 @@ massPerParticleDeterminationMethod = MPPKR #+++++++++++++Flow model parameters+++++++++ # subgridMixingFactor subgridMixingFactor = 100. -# curvature acceleration coefficient -# take curvature term into account in the gravity acceleration term +# take curvature term into account in the gravity acceleration term for computing the friction force +# (and gradient if curvAccInGradient=1) # 0 if deactivated, 1 if activated -curvAcceleration = 0 +curvAccInFriction = 0 +# take curvature term into account in the tangential momentum equation +# 0 if deactivated, 1 if activated +curvAccInTangent = 0 +# if curvAccInFriction=1, take curvature term into account in the pressure gradient (if curvAccInGradient=1) +curvAccInGradient = 0 #++++++++++++Friction model # add the friction using an explicit formulation (1) @@ -90,7 +96,10 @@ explicitFriction = 1 frictModel = Coulomb #+++++++++++++General Friction parameters # tan of bed friction angle used for: samosAT, Coulomb, Voellmy -mu = 0.45 +mu = 0.4 +#+++++++++++++Voellmy friction model +xsi = 10000. + [DFAPathGeneration_override] # use default DFAPathGeneration config as base configuration (True) and override following parameters @@ -117,6 +126,13 @@ nBottomExtPrecision = 10 defaultConfig = True # data result type for general analysis (ppr|pft|pfd). If left empty takes the result types available for all simulations resTypes = ppr|pft|pfv +# data result type for runout analysis (ppr, pft, pfv) +runoutResType = pft +# limit value for evaluation of runout (depends on the runoutResType chosen) +thresholdValue = 0.5 +# contour levels value for the difference plot (depends on the runoutResType chosen) +# use | delimiter (for ppr 1|3|5|10, for pft 0.1|0.25|0.5|0.75|1) +contourLevels = 0.25|0.5|1|2|3 # parameter used for ordering the simulations - multiple possible; (e.g.relTh|deltaTh) varParList = # computational module that was used to produce avalanche simulations (to locate peakFiles) diff --git a/avaframe/ana1Tests/simiSolTest.py b/avaframe/ana1Tests/simiSolTest.py index 7c890583c..e2a4f7edf 100644 --- a/avaframe/ana1Tests/simiSolTest.py +++ b/avaframe/ana1Tests/simiSolTest.py @@ -16,7 +16,6 @@ import logging # local imports -from avaframe.in3Utils import cfgUtils import avaframe.in3Utils.geoTrans as geoTrans import avaframe.com1DFA.com1DFA as com1DFA import avaframe.com1DFA.DFAtools as DFAtls @@ -32,7 +31,7 @@ log = logging.getLogger(__name__) -def mainSimilaritySol(simiSolCfg): +def mainSimilaritySol(simiSolCfg, com1DFACfg): """ Compute similarity solution Parameters ----------- @@ -50,18 +49,14 @@ def mainSimilaritySol(simiSolCfg): f_p_sol: first derivativ of f array """ - - # Load configuration - cfg = cfgUtils.getModuleConfig(com1DFA, simiSolCfg) - cfgGen = cfg['GENERAL'] - cfgSimi = cfg['SIMISOL'] - bedFrictionAngleDeg = cfgSimi.getfloat('bedFrictionAngle') - planeinclinationAngleDeg = cfgSimi.getfloat('planeinclinationAngle') - internalFrictionAngleDeg = cfgSimi.getfloat('internalFrictionAngle') + cfgGen = com1DFACfg['GENERAL'] + bedFrictionAngleDeg = simiSolCfg.getfloat('bedFrictionAngle') + planeinclinationAngleDeg = simiSolCfg.getfloat('planeinclinationAngle') + internalFrictionAngleDeg = simiSolCfg.getfloat('internalFrictionAngle') # Dimensioning parameters L - L_x = cfgSimi.getfloat('L_x') - L_y = cfgSimi.getfloat('L_y') - H = cfgSimi.getfloat('relTh') + L_x = simiSolCfg.getfloat('L_x') + L_y = simiSolCfg.getfloat('L_y') + H = simiSolCfg.getfloat('relTh') # Set parameters Pi = math.pi @@ -92,7 +87,7 @@ def mainSimilaritySol(simiSolCfg): x_0 = [1.0, 0.0, 1.0, 0.0] # here a circle as start point # compute earth pressure coefficients - flagEarth = cfgSimi.getboolean('flagEarth') + flagEarth = simiSolCfg.getboolean('flagEarth') if flagEarth: earthPressureCoefficients = defineEarthPressCoeff(phi, delta) else: @@ -712,13 +707,20 @@ def analyzeResults(avalancheDir, fieldsList, timeList, solSimi, fieldHeader, cfg vhErrorL2Array = np.zeros((len(fieldsList))) hErrorLMaxArray = np.zeros((len(fieldsList))) vhErrorLMaxArray = np.zeros((len(fieldsList))) - # get plots limits - limits = outAna1Plots.getPlotLimits(cfgSimi['SIMISOL'], fieldsList, fieldHeader) count = 0 + tSave = cfgSimi['SIMISOL'].getfloat('tSave') + indT = min(np.searchsorted(timeList, tSave), min(len(timeList)-1, len(fieldsList)-1)) + tSave = timeList[indT] + # get plots limits + if cfgSimi['SIMISOL'].getboolean('plotSequence'): + limits = outAna1Plots.getPlotLimits(cfgSimi['SIMISOL'], fieldsList, fieldHeader) + else: + limits = outAna1Plots.getPlotLimits(cfgSimi['SIMISOL'], [fieldsList[indT]], fieldHeader) # run the comparison routine for each saved time step for t, field in zip(timeList, fieldsList): # get similartiy solution h, u at required time step indTime = np.searchsorted(solSimi['time'], t) + log.debug('time simulation = %.2f, time simiSol = %.2f' % (t, solSimi['time'][indTime])) simiDict = getSimiSolParameters(solSimi, fieldHeader, indTime, cfgSimi['SIMISOL'], relTh, gravAcc) cellSize = fieldHeader['cellsize'] cosAngle = simiDict['cos'] @@ -743,7 +745,7 @@ def analyzeResults(avalancheDir, fieldsList, timeList, solSimi, fieldHeader, cfg log.debug("L2 %s error on the Flow Thickness at t=%.2f s is : %.4f" % (title, t, hErrorL2)) log.debug("L2 %s error on the momentum at t=%.2f s is : %.4f" % (title, t, vhErrorL2)) # Make all individual time step comparison plot - if cfgSimi['SIMISOL'].getboolean('plotSequence'): + if cfgSimi['SIMISOL'].getboolean('plotSequence') or t == tSave: outAna1Plots.saveSimiSolProfile(cfgMain, cfgSimi['SIMISOL'], field, limits, simiDict, t, fieldHeader, outDirTest, simHash) outAna1Plots.makeContourSimiPlot(avalancheDir, simHash, hNumerical, limits, simiDict, fieldHeader, t, @@ -751,22 +753,20 @@ def analyzeResults(avalancheDir, fieldsList, timeList, solSimi, fieldHeader, cfg count = count + 1 # Create result plots - tSave = cfgSimi['SIMISOL'].getfloat('tSave') - indT = min(np.searchsorted(timeList, tSave), min(len(timeList)-1, len(fieldsList)-1)) - tSave = timeList[indT] indTime = np.searchsorted(solSimi['time'], tSave) + log.debug('time simulation = %.2f, time simiSol = %.2f' % (tSave, solSimi['time'][indTime])) simiDict = getSimiSolParameters(solSimi, fieldHeader, indTime, cfgSimi['SIMISOL'], relTh, gravAcc) outAna1Plots.plotSimiSolSummary(avalancheDir, timeList, fieldsList, fieldHeader, simiDict, hErrorL2Array, hErrorLMaxArray, vhErrorL2Array, vhErrorLMaxArray, outDirTest, simDFrow, simHash, cfgSimi['SIMISOL']) - if cfgSimi['SIMISOL'].getboolean('plotErrorTime') and len(timeList)>1: + if cfgSimi['SIMISOL'].getboolean('plotErrorTime') and len(timeList) > 1: outAna1Plots.plotErrorTime(timeList, hErrorL2Array, hErrorLMaxArray, vhErrorL2Array, vhErrorLMaxArray, cfgSimi['SIMISOL'].getboolean('relativError'), tSave, simHash, outDirTest) return hErrorL2Array, hErrorLMaxArray, vhErrorL2Array, vhErrorLMaxArray, tSave -def getReleaseThickness(avaDir, cfg, demFile): +def getReleaseThickness(avaDir, cfgSimi, demFile, csz): """ Define release thickness for the similarity solution test Release area is defined as an elipse or main radius Lx and Ly. @@ -777,10 +777,12 @@ def getReleaseThickness(avaDir, cfg, demFile): ----------- avaDir: str path to avalanche directory - cfg: dict - confguration settings + cfgSimi: dict + similarity solution confguration settings demFile: str path to DEM file + csz: float + expected cell size Returns -------- @@ -791,17 +793,15 @@ def getReleaseThickness(avaDir, cfg, demFile): # Read dem demOri = IOf.readRaster(demFile) - csz = cfg.getfloat('GENERAL', 'meshCellSize') - _, _, ncols, nrows = geoTrans.makeCoordGridFromHeader(demOri['header'], cellSizeNew=csz, larger=True) + _, _, ncols, nrows = geoTrans.makeCoordGridFromHeader(demOri['header'], cellSizeNew=csz, larger=False) xllc = demOri['header']['xllcenter'] yllc = demOri['header']['yllcenter'] # define release thickness distribution - cfgSimi = cfg['SIMISOL'] L_x = cfgSimi.getfloat('L_x') L_y = cfgSimi.getfloat('L_y') - Hini = cfg['SIMISOL'].getfloat('relTh') + Hini = cfgSimi.getfloat('relTh') planeinclinationAngleDeg = cfgSimi.getfloat('planeinclinationAngle') x = np.linspace(0, ncols-1, ncols)*csz+xllc y = np.linspace(0, nrows-1, nrows)*csz+yllc diff --git a/avaframe/data/avaSimilaritySol/Inputs/simiSol_com1DFACfg.ini b/avaframe/ana1Tests/simiSolTestCfg.ini similarity index 79% rename from avaframe/data/avaSimilaritySol/Inputs/simiSol_com1DFACfg.ini rename to avaframe/ana1Tests/simiSolTestCfg.ini index 655ecf7a1..52a7a1fdc 100644 --- a/avaframe/data/avaSimilaritySol/Inputs/simiSol_com1DFACfg.ini +++ b/avaframe/ana1Tests/simiSolTestCfg.ini @@ -1,22 +1,70 @@ -### Config File - This file contains the main settings for the similarity -### solution run with com1DFAPy +### Config File - This file contains the main settings for the similarity solution test module +## Set your parameters +# This file will be overridden by local_simiSolTestCfg.ini if it exists +# So copy this file to local_simiSolTestCfg.ini, adjust your variables there + +[SIMISOL] +#sphKernel radius for the com1DFA simulations +sphKernelRadius = 10|8|6|5|4|3 + +# dimensioning parameters +L_x = 80. +L_y = 80. + +# release thickness +relTh = 4. + +# bed friction angle +bedFrictionAngle = 25. +# internal friction angle +internalFrictionAngle = 25. +# plane inclination angle +planeinclinationAngle = 35. + +# Flag earthpressure coefficients +# if false takes 1 +flagEarth = False + +# save analysis plots at time step dtSol +dtSol = 50. + +# which time step should be saved +tSave = 20 + +#++++Plotting parameters++++++ +# list of parameters to display in the summary plot (list of parameters separated by |) +paramInfo = sphKernelRadius|aPPK|nPPK0|cMax|nPart +# list of parameters to display in the convergence plot (list of parameters separated by |) +paramConvInfo = sphKR0|nPPK0|cMax +# plotting flags +# only analyze and plot the tSave time step +onlyLast = False +# plot error function of time for each simulation +plotErrorTime = False +# plot individual figure for the h, hu and u error for each saved time step +plotSequence = False +# use relative difference +relativError = True +# when plotting, the domain extent is scaleCoef times bigger than the data extent +scaleCoef = 1.02 -[GENERAL] +[com1DFA_override] +# use default com1DFA config as base configuration (True) and override following parameters +# if False and local is available use local +defaultConfig = True +#++++++++++++++++ Simulation type +# list of simulations that shall be performed (null, ent, res, entres, available (use all available input data)) +simTypeList = null #+++++++++++++ Output++++++++++++ # desired result Parameters (ppr, pfd, pfv, FD, FV, P, particles) - separated by | -resType = FD|FV|Vx|Vy|Vz +resType = FT|FV|Vx|Vy|Vz # saving time step, i.e. time in seconds (first and last time step are always saved) # option 1: give an interval with start:interval in seconds (tStep = 0:5 - this will save desired results every 5 seconds for the full simulation) # option 2: explicitly list all desired time steps (closest to actual computational time step) separated by | (example tSteps = 1|50.2|100) # NOTE: initial and last time step are always saved! tSteps = 0:1 -#++++++++++++++++ particle Initialisation +++++++++ -# initial particle distribution, options: random, semirandom, uniform -# semirandom: particles are uniformly distributed with a little random variation -initPartDistType = random - #+++++++++SNOW properties # True if release thickness should be read from shapefile file; if False - relTh read from ini file relThFromShp = False @@ -24,11 +72,11 @@ relThFromFile = True #++++++++++++Time stepping parameters # End time [s] -tEnd = 5 +tEnd = 30 # to use a variable time step (time step depends on kernel radius) sphKernelRadiusTimeStepping = True # courant number -cMax = 0.02 +cMax = 0.01 #+++++++++++++SPH parameters # SPH gradient option @@ -56,9 +104,9 @@ massPerParticleDeterminationMethod = MPPKR # reference kernel radius sphKR0 = 5 # reference number of particles per kernel radius -nPPK0 = 10|20|30 +nPPK0 = 15 # variation of nppK exponent -aPPK = -0.5|-1|-1.5|-2 +aPPK = 0|-0.5|-1|-2|-3 # if splitOption=0 # threshold for splitting particles, split if: mPart > massPerPart x thresholdMassSplit @@ -78,47 +126,7 @@ subgridMixingFactor = 10 # friction type (samosAT, Coulomb) frictModel = Coulomb # add the friction using an explicit formulation (1) -# 0 use an implicit method +# 0 use an implicit explicitFriction = 1 #+++++++++++++SamosAt friction model -mu = 0.466307658 - -[SIMISOL] -# dimensioning parameters -L_x = 80. -L_y = 80. - -# release thickness -relTh = 4. - -# bed friction angle -bedFrictionAngle = 25. -# internal friction angle -internalFrictionAngle = 25. -# plane inclination angle -planeinclinationAngle = 35. - -# Flag earthpressure coefficients -# if false takes 1 -flagEarth = False - -# save analysis plots at time step dtSol -dtSol = 50. - -# which time step should be saved -tSave = 5 - -#++++Plotting parameters++++++ -# list of parameters to display in the summary plot (list of parameters separated by |) -paramInfo = sphKernelRadius|aPPK|nPPK0|nPart -# plotting flags -# only analyze and plot the tSave time step -onlyLast = False -# plot error function of time for each simulation -plotErrorTime = False -# plot individual figure for the h, hu and u error for each saved time step -plotSequence = False -# use relative difference -relativError = True -# when plotting, the domain extent is scaleCoef times bigger than the data extent -scaleCoef = 1.02 +mu = 0.466307658154998593 diff --git a/avaframe/out3Plot/outAIMEC.py b/avaframe/out3Plot/outAIMEC.py index f1e183314..71bb751cc 100644 --- a/avaframe/out3Plot/outAIMEC.py +++ b/avaframe/out3Plot/outAIMEC.py @@ -230,12 +230,12 @@ def visuRunoutStat(rasterTransfo, inputsDF, resAnalysisDF, newRasters, cfgSetup, indStartOfRunout = rasterTransfo['indStartOfRunout'] rasterdataPres = newRasters['newRefRaster' + runoutResType.upper()] runout = resAnalysisDF['sRunout'].to_numpy() - pprCrossMax = np.stack(resAnalysisDF[runoutResType.lower() + 'CrossMax'].to_numpy()) + aCrossMax = np.stack(resAnalysisDF[runoutResType.lower() + 'CrossMax'].to_numpy()) ############################################ # prepare for plot - pMean = np.mean(pprCrossMax, axis=0) - pMedian = np.median(pprCrossMax, axis=0) - pPercentile = np.percentile(pprCrossMax, [percentile/2, 50, 100-percentile/2], axis=0) + pMean = np.mean(aCrossMax, axis=0) + pMedian = np.median(aCrossMax, axis=0) + pPercentile = np.percentile(aCrossMax, [percentile/2, 50, 100-percentile/2], axis=0) maskedArray = np.ma.masked_where(rasterdataPres <= float(thresholdValue), rasterdataPres) # get plots limits @@ -306,7 +306,7 @@ def visuRunoutStat(rasterTransfo, inputsDF, resAnalysisDF, newRasters, cfgSetup, ax2.set_ylabel('s [m]') ax2.set_ylim([s.min(), s.max()]) ax2.set_xlim(auto=True) - ax2.set_xlabel('$P_{max}(s)$ [%s]' % unit) + ax2.set_xlabel('$%s_{max}(s)$ [%s]' % (name, unit)) outFileName = '_'.join([projectName, runoutResType, str(thresholdValue).replace('.', 'p'), 'slComparisonStat']) diff --git a/avaframe/out3Plot/outAna1Plots.py b/avaframe/out3Plot/outAna1Plots.py index 4a5976d23..523026c62 100644 --- a/avaframe/out3Plot/outAna1Plots.py +++ b/avaframe/out3Plot/outAna1Plots.py @@ -57,7 +57,7 @@ def saveSimiSolProfile(cfgMain, cfgSimi, fields, limits, simiDict, tSave, header comSol['tSave'] = tSave # profile in flow direction - fig1 = plt.figure(figsize=(4*pU.figW, 2*pU.figH)) + fig1 = plt.figure(figsize=(2.5*pU.figW, 1*pU.figH)) fig1.suptitle('Similarty solution test, t = %.2f s (simulation %s)' % (tSave, simHash), fontsize=20) ax1 = plt.subplot2grid((1, 2), (0, 0)) comSol['indFinal'] = int(nrows * 0.5) -1 @@ -70,20 +70,20 @@ def saveSimiSolProfile(cfgMain, cfgSimi, fields, limits, simiDict, tSave, header ax2, ax22 = _plotVariable(ax2, cfgSimi, simiDict, comSol, limits, 'yaxis') ax2.set_title('Profile across flow direction (x = %.2f m)' % xCenter) - pU.saveAndOrPlot({'pathResult': outDirTest / 'pics'}, 'compareProfileSimiSol%s_%.2f.' % (simHash, tSave), fig1) + pU.saveAndOrPlot({'pathResult': outDirTest }, 'compareProfileSimiSol%s_%.2f.' % (simHash, tSave), fig1) def makeContourSimiPlot(avalancheDir, simHash, fieldFT, limits, simiDict, fieldHeader, tSave, outDirTest): """ """ - fig, ax1 = plt.subplots(nrows=1, ncols=3, sharex=True, figsize=(pU.figW*4, pU.figH*2)) + fig, ax1 = plt.subplots(nrows=1, ncols=3, sharex=True, figsize=(pU.figW*2, pU.figH)) # make flow momentum comparison plot ax1 = plt.subplot2grid((1, 1), (0, 0)) ax1 = addContour2Plot(ax1, fieldFT, simiDict, fieldHeader, limits, nLevels=16) ax1.set_title(pU.cfgPlotUtils['nameFT'] + ' contours at t = %.2f s' % tSave) pU.putAvaNameOnPlot(ax1, avalancheDir) - pU.saveAndOrPlot({'pathResult': outDirTest / 'pics'}, 'compareContourSimiSol%s_%.2f.' % (simHash, tSave), fig) + pU.saveAndOrPlot({'pathResult': outDirTest }, 'compareContourSimiSol%s_%.2f.' % (simHash, tSave), fig) return fig @@ -198,7 +198,7 @@ def _plotVariable(ax1, cfg, simiDict, comSol, limits, axis, particles=False): def plotSimiSolSummary(avalancheDir, timeList, fieldsList, fieldHeader, simiDict, hErrorL2Array, hErrorLMaxArray, - vhErrorL2Array, vhErrorLMaxArray, outDirTest, simDFrow, simHash, cfgSimi): + vhErrorL2Array, vhErrorLMaxArray, outDirTest, simDFrow, simHash, cfgSimi): """ Plot sumary figure of the similarity solution test Parameters @@ -260,46 +260,45 @@ def plotSimiSolSummary(avalancheDir, timeList, fieldsList, fieldHeader, simiDict comSol['outDirTest'] = outDirTest comSol['tSave'] = tSave - # create figures and plots - fig = plt.figure(figsize=(pU.figW*4, pU.figH*2)) + fig = plt.figure(figsize=(pU.figW*3.5, pU.figH*2)) fig.suptitle('Similarty solution test, t = %.2f s (simulation %s)' % (tSave, simHash), fontsize=30) # make comparison profile plot in flow direction - ax1 = plt.subplot2grid((2, 6), (0, 0), colspan=3) - comSol['indFinal'] = int(nrows * 0.5) -1 + ax1 = plt.subplot2grid((2, 8), (0, 0), colspan=4) + comSol['indFinal'] = int(nrows * 0.5) - 1 ax1, ax11 = _plotVariable(ax1, cfgSimi, simiDict, comSol, limits, 'xaxis') ax1.set_title('Profile in flow direction (y = 0 m)') # make flow momentum comparison plot - ax2 = plt.subplot2grid((2, 6), (0, 3), colspan=3) + ax2 = plt.subplot2grid((2, 8), (0, 4), colspan=4) comSol['indFinal'] = int(np.round((xCenter - xllc)/csz) + 1) ax2, ax22 = _plotVariable(ax2, cfgSimi, simiDict, comSol, limits, 'yaxis') ax2.set_title('Profile across flow direction (x = %.2f m)' % xCenter) # make bird view plot - ax3 = plt.subplot2grid((2, 6), (1, 0), colspan=2) + ax3 = plt.subplot2grid((2, 8), (1, 0), colspan=3) ax3 = addContour2Plot(ax3, fieldsList[indT]['FT'], simiDict, fieldHeader, limits) ax3.set_title(pU.cfgPlotUtils['nameFT'] + ' contours') pU.putAvaNameOnPlot(ax3, avalancheDir) # make error plot - ax4 = plt.subplot2grid((2, 6), (1, 2), colspan=2) + ax4 = plt.subplot2grid((2, 8), (1, 3), colspan=3) title = '\n between analytical solution and com1DFA' title = getTitleError(relativ, title) ax4.set_title(title) ax4, ax5 = addErrorTime(ax4, timeList, hErrorL2Array, hErrorLMaxArray, vhErrorL2Array, vhErrorLMaxArray, relativ, tSave) - ax7 = plt.subplot2grid((2, 6), (1, 4), colspan=2) + ax7 = plt.subplot2grid((2, 8), (1, 6), colspan=2) ax7.axis("off") ax7.invert_yaxis() text = '' for param in paramInfo: text = text + (param + ' = %.2f' % simDFrow[param]) + '\n' - ax7.text(0.5, 0.5, text, transform=ax7.transAxes, ha='center', va='center', fontsize=pU.fs) + ax7.text(0.5, 0.5, text, transform=ax7.transAxes, ha='center', va='center', fontsize=pU.cfg['titleSize']) outFileName = '_'.join([simHash, 'SimiSolTest']) - pU.saveAndOrPlot({'pathResult': outDirTest / 'pics'}, outFileName, fig) + pU.saveAndOrPlot({'pathResult': outDirTest}, outFileName, fig) # Dam Break plots @@ -307,8 +306,8 @@ def _plotVariableDamBreak(var, x, xMiddle, dtInd, dtStep, label): fig = plt.figure(figsize=(pU.figW, pU.figH)) plt.title('Dry-Bed') - plt.plot(x, var[:,0], 'k--', label='t = 0s') - plt.plot(x, var[:,dtInd], label='t = %.1fs' % dtStep) + plt.plot(x, var[:, 0], 'k--', label='t = 0s') + plt.plot(x, var[:, dtInd], label='t = %.1fs' % dtStep) plt.xlabel('x-coordinate [m]') plt.ylabel(label) plt.legend() @@ -327,12 +326,12 @@ def plotDamAnaResults(t, x, xMiddle, h, u, tSave, cfg, outDirTest): name = pU.cfgPlotUtils['nameFT'] + '[' + pU.cfgPlotUtils['unitFT'] + ']' fig = _plotVariableDamBreak(h, x, xMiddle, dtInd, tSave, name) outputName = 'damBreakFlowThickness' - pU.saveAndOrPlot({'pathResult': outDirTest / 'pics'}, outputName, fig) + pU.saveAndOrPlot({'pathResult': outDirTest}, outputName, fig) name = pU.cfgPlotUtils['nameFV'] + '[' + pU.cfgPlotUtils['unitFV'] + ']' fig = _plotVariableDamBreak(u, x, xMiddle, dtInd, tSave, name) outputName = 'damBreakFlowVelocity' - pU.saveAndOrPlot({'pathResult': outDirTest / 'pics'}, outputName, fig) + pU.saveAndOrPlot({'pathResult': outDirTest}, outputName, fig) def plotComparisonDam(cfg, simHash, fields0, fieldsT, header, solDam, tSave, limits, outDirTest): @@ -408,7 +407,7 @@ def plotComparisonDam(cfg, simHash, fields0, fieldsT, header, solDam, tSave, lim # setup index for time of analyitcal solution indTime = np.searchsorted(solDam['tAna'], tSave) - fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3, sharex=True, figsize=(pU.figW*4, pU.figH*2)) + fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3, sharex=True, figsize=(pU.figW*2, pU.figH*1)) ax1 = _plotDamProfile(ax1, x, y, nx_loc, cfg, dataIniFT, dataAnaFT, solDam['xAna'], solDam['xMidAna'], solDam['hAna'], indTime, limits['maxFT'], pU.cfgPlotUtils['nameFT'], pU.cfgPlotUtils['unitFT']) @@ -425,11 +424,11 @@ def plotComparisonDam(cfg, simHash, fields0, fieldsT, header, solDam, tSave, lim ax3 = _plotDamProfile(ax3, x, y, nx_loc, cfg, dataIniFT*dataIniV, dataAnaFT*dataAnaV, solDam['xAna'], solDam['xMidAna'], solDam['hAna']*solDam['uAna'], indTime, limits['maxFM'], pU.cfgPlotUtils['nameFTV'], pU.cfgPlotUtils['unitFTV']) - ax3.set_title(getLabel(pU.cfgPlotUtils['nameFTV'] + ' profile', '', dir='', vert=True)) + ax3.set_title(pU.cfgPlotUtils['nameFTV'] + ' profile') fig.suptitle('Simulation %s, t = %.2f s' % (simHash, tSave), fontsize=30) outputName = 'compareDamBreak%s_%.02f' % (simHash, tSave) - pU.saveAndOrPlot({'pathResult': outDirTest / 'pics'}, outputName, fig) + pU.saveAndOrPlot({'pathResult': outDirTest}, outputName, fig) return ax1, ax2, ax3 @@ -440,11 +439,11 @@ def _plotDamProfile(ax, x, y, nx_loc, cfg, data1, data2, xAna, xMidAna, yAna, in ax.plot(x, y, 'grey', linestyle='--') ax.plot(x, data1[nx_loc, :], 'k--', label='initial') ax.plot(x, data2[nx_loc, :], 'b', label='numerical') - ax.plot(xAna, yAna[:,indT], 'r-', label='analytical') + ax.plot(xAna, yAna[:, indT], 'r-', label='analytical') ax.set_xlabel('x [m]') ax.set_ylabel('%s [%s]' % (label, unit)) ax.set_xlim([cfg.getfloat('xStart'), cfg.getfloat('xEnd')]) - ax.set_ylim([-0.05, max(yMax, scaleCoef*max(yAna[:,indT]))]) + ax.set_ylim([-0.05, max(yMax, scaleCoef*max(yAna[:, indT]))]) # ax.axvline(xMidAna[indT], color='grey', linestyle='--') ax.axvspan(xMidAna[indT], cfg.getfloat('xEnd'), color='grey', alpha=0.3, lw=0, label='error computation \n domain') @@ -489,13 +488,6 @@ def plotDamBreakSummary(avalancheDir, timeList, fieldsList, fieldHeader, solDam, configuration setting for avalanche simulation including DAMBREAK section """ - # Initialise DEM - demFile = gI.getDEMPath(avalancheDir) - demOri = IOf.readRaster(demFile, noDataToNan=True) - dem = com1DFA.initializeMesh(cfg['GENERAL'], demOri, cfg['GENERAL'].getint('methodMeshNormal')) - dem['header']['xllcenter'] = dem['originalHeader']['xllcenter'] - dem['header']['yllcenter'] = dem['originalHeader']['yllcenter'] - cfgDam = cfg['DAMBREAK'] phi = cfgDam.getfloat('phi') phiRad = np.radians(phi) @@ -548,7 +540,7 @@ def plotDamBreakSummary(avalancheDir, timeList, fieldsList, fieldHeader, solDam, limits = getPlotLimits(cfgDam, fieldsList[:indT], fieldHeader) # create figures and plots - fig = plt.figure(figsize=(pU.figW*4, pU.figH*2)) + fig = plt.figure(figsize=(pU.figW*2.5, pU.figH*1.5)) fig.suptitle('DamBreak test, t = %.2f s (simulation %s)' % (tSave, simHash), fontsize=30) # make flow thickness comparison plot ax1 = plt.subplot2grid((2, 6), (0, 0), colspan=2) @@ -563,7 +555,7 @@ def plotDamBreakSummary(avalancheDir, timeList, fieldsList, fieldHeader, solDam, ax2 = _plotDamProfile(ax2, x, y, nx_loc, cfgDam, dataIniFT*dataIniV, dataFT*dataV, solDam['xAna'], solDam['xMidAna'], solDam['hAna']*solDam['uAna'], indTime, limits['maxFM'], pU.cfgPlotUtils['nameFTV'], pU.cfgPlotUtils['unitFTV']) - ax2.set_title(getLabel(pU.cfgPlotUtils['nameFTV'] + ' profile', '', dir='', vert=True)) + ax2.set_title(pU.cfgPlotUtils['nameFTV'] + ' profile') # make flow velocity comparison plot ax3 = plt.subplot2grid((2, 6), (0, 2), colspan=2) @@ -577,7 +569,6 @@ def plotDamBreakSummary(avalancheDir, timeList, fieldsList, fieldHeader, solDam, ax6 = plt.subplot2grid((2, 6), (1, 0), colspan=2) ax6, extent, cbar0, cs1 = outCom1DFA.addResult2Plot(ax6, fieldHeader, dataFT, 'FT') cbar0.ax.set_ylabel(pU.cfgPlotUtils['nameFT']) - ax6 = outCom1DFA.addDem2Plot(ax6, dem, what='slope', extent=extent) rowsMin, rowsMax, colsMin, colsMax = pU.constrainPlotsToData(dataFT, fieldHeader['cellsize'], extentOption=True, constrainedData=False, buffer='') # draw rectangle corresponding to the error measurement domain @@ -610,10 +601,11 @@ def plotDamBreakSummary(avalancheDir, timeList, fieldsList, fieldHeader, solDam, text = '' for param in paramInfo: text = text + (param + ' = %.2f' % simDFrow[param]) + '\n' - ax7.text(0.5, 0.5, text, transform=ax7.transAxes, ha='center', va='center', fontsize=pU.fs) + ax7.text(0.5, 0.5, text, transform=ax7.transAxes, ha='center', va='center', fontsize=pU.cfg['titleSize']) outFileName = '_'.join([simHash, 'DamBreakTest']) - pU.saveAndOrPlot({'pathResult': outDirTest / 'pics'}, outFileName, fig) + pU.saveAndOrPlot({'pathResult': outDirTest}, outFileName, fig) + # Genaral plots def addErrorTime(ax1, time, hErrorL2Array, hErrorLMaxArray, vhErrorL2Array, vhErrorLMaxArray, relativ, t): @@ -687,13 +679,13 @@ def plotErrorTime(time, hErrorL2Array, hErrorLMaxArray, vhErrorL2Array, vhErrorL output directory """ title = (' between similarity solution and com1DFA \n(simulation %s)' - % (outputName)) + % (outputName)) title = getTitleError(relativ, title) fig1, ax1 = plt.subplots(figsize=(2*pU.figW, 2*pU.figH)) ax1.set_title(title) ax1, ax2 = addErrorTime(ax1, time, hErrorL2Array, hErrorLMaxArray, vhErrorL2Array, vhErrorLMaxArray, relativ, t) - pU.saveAndOrPlot({'pathResult': outDirTest / 'pics'}, 'Error_time_' + str(outputName), fig1) + pU.saveAndOrPlot({'pathResult': outDirTest}, 'Error_time_' + str(outputName), fig1) return fig1 @@ -732,7 +724,7 @@ def addContour2Plot(ax1, fieldFT, simiDict, fieldHeader, limits, nLevels=9): # ax3.clabel(CS, inline=1, fontsize=8) h1,_ = cs1.legend_elements() h2,_ = cs2.legend_elements() - ax1.legend([h1[-1], h2[-1]], ['simulation', 'analytical']) + ax1.legend([h1[-1], h2[-1]], ['simulation', 'analytical'], loc='upper left') ax1.set_ylim([-widthY, widthY]) ax1.set_xlim([-widthX+xCenter, widthX+xCenter]) ax1.set_xlabel('x [m]') @@ -740,7 +732,7 @@ def addContour2Plot(ax1, fieldFT, simiDict, fieldHeader, limits, nLevels=9): # add vertical and horizontal line showing the location of the profile plots cuts ax1.axvline(xCenter, color='b', linestyle=':') ax1.axhline(0, color='r', linestyle=':') - ax1.text(xCenter+5, -widthY, "x = %.2f m" % (xCenter), color='b') + ax1.text(xCenter+5, widthY, "x = %.2f m" % (xCenter), color='b', verticalalignment='top') ax1.text(-widthX+xCenter, 5, "y = 0 m", color='r') return ax1 @@ -773,11 +765,12 @@ def plotErrorConvergence(simDF, outDirTest, cfgSimi, xField, yField, coloredBy, fit: boolean if True add power law regression """ + paramConvInfo = cfgSimi['paramConvInfo'].split('|') tSave = simDF['timeError'][0] relativ = cfgSimi.getboolean('relativError') cmap, _, ticks, norm = pU.makeColorMap(pU.cmapAvaframeCont, min(simDF[coloredBy]), max(simDF[coloredBy]), continuous=pU.contCmap) - fig1, ax1 = plt.subplots(nrows=1, ncols=1, figsize=(3*pU.figW, 2*pU.figH)) + fig1, ax1 = plt.subplots(nrows=1, ncols=1, figsize=(2*pU.figW, pU.figH)) # get the sizing function sizeList = simDF[sizedBy].unique() lenSize = len(sizeList) @@ -807,20 +800,25 @@ def plotErrorConvergence(simDF, outDirTest, cfgSimi, xField, yField, coloredBy, ax1.plot(xArray, p0H*xArray**p1H, color=color) if np.size(rSquaredH) == 0: rSquaredH = np.nan - log.debug('power law fit sphKernelRadius = %.2f m: hErrorL2 = %.1f * Npart^{%.2f}, r=%.2f' % - (colorValue, p0H, p1H, rSquaredH)) + log.debug('power law fit sphKernelRadius = %.2f m: %s = %.1f * Npart^{%.2f}, r=%.2f' % + (colorValue, yField, p0H, p1H, rSquaredH)) if logScale: ax1.set_yscale('log') ax1.set_xscale('log') - fig1.suptitle('Convergence of DFA simulation for the similarity solution test at t = %.2fs' % tSave) - ax1.set_title(getTitleError(relativ, r' L2 on flow thickness')) + fig1.suptitle('Convergence of DFA simulation at t = %.2fs' % tSave) + if yField == 'vhErrorL2': + what = getLabel(' on ', '', dir='', vert=True) + else: + what = r' on flow thickness $h$' + # ax1.set_title(getTitleError(relativ, what)) ax1.set_xlabel(xField) - ax1.set_ylabel(getTitleError(relativ, r' L2 on flow thickness')) + ax1.set_ylabel(getTitleError(relativ, what)) if lenColor<=10: lenColor = None - legend1 = ax1.legend(*scatter1.legend_elements(num=lenColor), loc="upper center", title=coloredBy) + legend1 = ax1.legend(*scatter1.legend_elements(num=lenColor), bbox_to_anchor=(1, 0.95), loc="upper right", + title=coloredBy) ax1.add_artist(legend1) # produce a legend with a cross section of sizes from the scatter @@ -828,10 +826,17 @@ def plotErrorConvergence(simDF, outDirTest, cfgSimi, xField, yField, coloredBy, lenSize = None kw = dict(prop="sizes", color=scatter1.cmap(0.7), func=lambda s: (s-10)*(maxSize - minSize)/70 + minSize) - legend3 = ax1.legend(*scatter1.legend_elements(num=lenSize, **kw), loc="upper right", title=sizedBy) - ax1.grid(color='grey', linestyle='-', linewidth=0.25, alpha=0.5) - ax1.grid(color='grey', which='minor', linestyle='--', linewidth=0.25, alpha=0.5) - pU.saveAndOrPlot({'pathResult': outDirTest / 'pics'}, 'ErrorLog%ds' % int(tSave), fig1) + legend3 = ax1.legend(*scatter1.legend_elements(num=lenSize, **kw), bbox_to_anchor=(0.8, 0.95), loc="upper right", + title=sizedBy) + ax1.grid(color='grey', linestyle='-', linewidth=0.5, alpha=0.75) + ax1.grid(color='grey', which='minor', linestyle='--', linewidth=0.5, alpha=0.5) + + text = '' + for param in paramConvInfo: + text = text + (param + ' = %.2f' % simDF[param][0]) + '\n' + ax1.text(0.05, 0.05, text, transform=ax1.transAxes, ha='left', va='bottom', fontsize=pU.fs) + + pU.saveAndOrPlot({'pathResult': outDirTest}, 'ErrorLog%ds' % int(tSave), fig1) return fig1, ax1 @@ -908,9 +913,9 @@ def plotPresentation(simDF, outDirTest, cfgSimi, xField, yField, coloredBy, size scatter1 = ax1.scatter(simDFNew[xField], simDFNew[yField], c=simDFNew[coloredBy], s=sizeList, cmap=cmap, norm=norm, marker=pU.markers[0], alpha=1) fig1.suptitle('Convergence of DFA simulation for the similarity solution test at t = %.2fs' % tSave) - ax1.set_title(getTitleError(relativ, r' L2 on flow thickness')) + ax1.set_title(getTitleError(relativ, r' on flow thickness')) ax1.set_xlabel(xField) - ax1.set_ylabel(getTitleError(relativ, r' L2 on flow thickness')) + ax1.set_ylabel(getTitleError(relativ, r' on flow thickness')) legend1 = ax1.legend(*scatter1.legend_elements(num=lenColor), loc="upper center", title=coloredBy) ax1.add_artist(legend1) @@ -920,7 +925,7 @@ def plotPresentation(simDF, outDirTest, cfgSimi, xField, yField, coloredBy, size legend3 = ax1.legend(*scatter1.legend_elements(num=lenSize, **kw), loc="upper right", title=sizedBy) ax1.grid(color='grey', linestyle='-', linewidth=0.25, alpha=0.5) ax1.grid(color='grey', which='minor', linestyle='--', linewidth=0.25, alpha=0.5) - pU.saveAndOrPlot({'pathResult': outDirTest / 'pics'}, 'ErrorPresentation%d' % count, fig1) + pU.saveAndOrPlot({'pathResult': outDirTest}, 'ErrorPresentation%d' % count, fig1) if fit: p, rSquaredH, _, _, _ = np.polyfit(np.log(xArray), np.log(hErrorL2), deg=1, full=True) p1H = p[0] @@ -936,7 +941,7 @@ def plotPresentation(simDF, outDirTest, cfgSimi, xField, yField, coloredBy, size log.debug('power law fit sphKernelRadius = %.2f m: hErrorL2 = %.1f * Npart^{%.2f}, r=%.2f' % (colorValue, p0H, p1H, rSquaredH)) - pU.saveAndOrPlot({'pathResult': outDirTest / 'pics'}, 'ErrorPresentation%dFit' % count, fig1) + pU.saveAndOrPlot({'pathResult': outDirTest}, 'ErrorPresentation%dFit' % count, fig1) count = count + 1 simDFOld = copy.deepcopy(simDFNew) @@ -1046,7 +1051,7 @@ def plotTimeCPULog(simDF, outDirTest, cfgSimi, xField, coloredBy, sizedBy, logSc legend2 = ax1.legend(*scatter.legend_elements(**kw), loc="upper right", title=sizedBy) ax1.grid(color='grey', linestyle='-', linewidth=0.25, alpha=0.5) ax1.grid(color='grey', which='minor', linestyle='--', linewidth=0.25, alpha=0.5) - pU.saveAndOrPlot({'pathResult': outDirTest / 'pics'}, 'timeCPU%ds' % int(tSave), fig1) + pU.saveAndOrPlot({'pathResult': outDirTest}, 'timeCPU%ds' % int(tSave), fig1) def getPlotLimits(cfgSimi, fieldsList, fieldHeader): @@ -1087,9 +1092,9 @@ def getPlotLimits(cfgSimi, fieldsList, fieldHeader): def getTitleError(relativ, ending=''): """Get error plot title (relativ error or not?)""" if relativ: - return 'Relative error difference' + ending + return 'Relative L2 deviation' + ending else: - return 'Error difference' + ending + return 'L2 deviation' + ending def getLabel(start, end, dir='', vert=True): diff --git a/avaframe/out3Plot/plotUtils.py b/avaframe/out3Plot/plotUtils.py index 874827328..45ea857ef 100644 --- a/avaframe/out3Plot/plotUtils.py +++ b/avaframe/out3Plot/plotUtils.py @@ -63,11 +63,17 @@ matplotlib.rcParams['axes.labelsize'] = cfg['labelSize'] matplotlib.rcParams['axes.linewidth'] = 1.0 matplotlib.rcParams['axes.edgecolor'] = 'lightgrey' -matplotlib.rcParams['axes.labelcolor'] = 'grey' +matplotlib.rcParams['axes.labelcolor'] = 'black' matplotlib.rcParams['xtick.color'] = 'grey' matplotlib.rcParams['xtick.major.width'] = 1.0 matplotlib.rcParams['xtick.major.size'] = 3 matplotlib.rcParams['xtick.labelsize'] = cfg['tickLabelSize'] +matplotlib.rcParams['xtick.direction'] = 'in' +matplotlib.rcParams['xtick.top'] = True +matplotlib.rcParams['xtick.bottom'] = True +matplotlib.rcParams['ytick.direction'] = 'in' +matplotlib.rcParams['ytick.left'] = True +matplotlib.rcParams['ytick.right'] = True matplotlib.rcParams['ytick.color'] = 'grey' matplotlib.rcParams['ytick.major.width'] = 1.0 matplotlib.rcParams['ytick.major.size'] = 3 diff --git a/avaframe/runScripts/runAnalyzeDamBreak.py b/avaframe/runScripts/runAnalyzeDamBreak.py deleted file mode 100644 index 0d630a550..000000000 --- a/avaframe/runScripts/runAnalyzeDamBreak.py +++ /dev/null @@ -1,69 +0,0 @@ -""" - Run script for running the dam break problem on a sloping bed and compare to simulation results -""" - -# Load modules -import numpy as np -import pathlib -import pandas as pd - -# Local imports -from avaframe.com1DFA import com1DFA -from avaframe.in3Utils import fileHandlerUtils as fU -from avaframe.ana1Tests import damBreak -from avaframe.in3Utils import cfgUtils -from avaframe.in3Utils import logUtils -import avaframe.out3Plot.outAna1Plots as outAna1Plots - - - -# log file name; leave empty to use default runLog.log -logName = 'runAnalyzeDamBreakProblem' - -# Load settings from general configuration file -cfgMain = cfgUtils.getGeneralConfig() -avalancheDir = 'data/avaDamBreak' -cfgMain['MAIN']['avalancheDir'] = avalancheDir - -# Start logging -log = logUtils.initiateLogger(avalancheDir, logName) -log.info('MAIN SCRIPT') -log.info('Current avalanche: %s', avalancheDir) - -# create output directory for test result plots -outDirTest = pathlib.Path(avalancheDir, 'Outputs', 'ana1Tests') -fU.makeADir(outDirTest) - -# Load configuration -damBreakCfg = pathlib.Path(avalancheDir, 'Inputs', 'damBreak_com1DFACfg.ini') -cfg = cfgUtils.getModuleConfig(com1DFA, damBreakCfg) -cfgGen = cfg['GENERAL'] - -# Load flow thickness from analytical solution -solDam = damBreak.damBreakSol(avalancheDir, cfgMain, cfg, outDirTest) - -# create dataFrame of results -simDF, _ = cfgUtils.readAllConfigurationInfo(avalancheDir) - - -# if the analysis already exists and you only want to replot uncomment this (and put the good result name) -# pathToResults = pathlib.Path(avalancheDir, 'Outputs', 'ana1Tests', 'results5.p') -# if pathToResults.is_file(): -# simDF = pd.read_pickle(pathToResults) -simDF = damBreak.postProcessDamBreak(avalancheDir, cfgMain, cfg, simDF, solDam, outDirTest) - -# make convergence plot (if you add the fiting lines, make sure only the coloredBy and sizedBy parameters are varied) -fig1, ax1 = outAna1Plots.plotErrorConvergence(simDF, outDirTest, cfg['DAMBREAK'], 'nPart', 'vhErrorL2', - 'aPPK', 'nPPK0', logScale=True, fit=False) - -# make convergence plot -outAna1Plots.plotTimeCPULog(simDF, outDirTest, cfg['DAMBREAK'], 'nPart', 'aPPK', 'nPPK0') - -# do some filtering for the presentation plot -simDF = simDF[simDF['sphKernelRadius']==3] - -# make convergence plot (if you add the fiting lines, make sure only the coloredBy and sizedBy parameters are varied) -# same as plotErrorConvergence but adds the points corresponding to different coloredBy values one after the others -# and saves itermediate plots -fig1, ax1 = outAna1Plots.plotPresentation(simDF, outDirTest, cfg['DAMBREAK'], 'nPart', 'hErrorL2', - 'aPPK', 'nPPK0', logScale=True, fit=True) diff --git a/avaframe/runScripts/runAnalyzeSimilaritySol.py b/avaframe/runScripts/runAnalyzeSimilaritySol.py deleted file mode 100644 index 0a7af9ab6..000000000 --- a/avaframe/runScripts/runAnalyzeSimilaritySol.py +++ /dev/null @@ -1,81 +0,0 @@ -""" - Compare the com1DFA kernel results to the similarity solution - (to get com1DFA simulations first run runScripts/runSimilaritySol.py) - This script computes the similarity solution for a gliding avalanche on - a inclined plane according to similarity solution from : - Hutter, K., Siegel, M., Savage, S.B. et al. - Two-dimensional spreading of a granular avalanche down an inclined plane - Part I. theory. Acta Mechanica 100, 37–68 (1993). - https://doi.org/10.1007/BF01176861 - and compares it to the DFA kernel com1DFA -""" - -import pathlib -import pandas as pd - -# Local imports -import avaframe.in3Utils.initializeProject as initProj -import avaframe.in3Utils.fileHandlerUtils as fU -from avaframe.in3Utils import cfgUtils -from avaframe.in3Utils import logUtils -import avaframe.com1DFA.com1DFA as com1DFA -from avaframe.in1Data import getInput as gI -import avaframe.ana1Tests.simiSolTest as simiSolTest -import avaframe.out3Plot.outAna1Plots as outAna1Plots - - -# +++++++++SETUP CONFIGURATION++++++++++++++++++++++++ -# log file name; leave empty to use default runLog.log -logName = 'runSimilarityTest' - -# Load general configuration -cfgMain = cfgUtils.getGeneralConfig() -avalancheDir = 'data/avaSimilaritySol' - -# Clean avalancheDir directory of old work and output files -initProj.cleanSingleAvaDir(avalancheDir, keep=logName, deleteOutput=False) - -# Start logging -log = logUtils.initiateLogger(avalancheDir, logName) -log.info('MAIN SCRIPT') -log.info('Current avalanche: %s', avalancheDir) - -# Load configuration for similarity solution test -simiSolCfg = pathlib.Path(avalancheDir, 'Inputs', 'simiSol_com1DFACfg.ini') -cfg = cfgUtils.getModuleConfig(com1DFA, simiSolCfg) - -# create output directory for test result plots -outDirTest = pathlib.Path(avalancheDir, 'Outputs', 'ana1Tests') -fU.makeADir(outDirTest) - -# Load configuration info of all com1DFA simulations -simDF, _ = cfgUtils.readAllConfigurationInfo(avalancheDir) - -# compute the similartiy solution (this corresponds to our reference) -log.info('Computing similarity solution') -solSimi = simiSolTest.mainSimilaritySol(simiSolCfg) - -# if the analysis already exists and you only want to replot uncomment this (and put the good result name) -# pathToResults = pathlib.Path(avalancheDir, 'Outputs', 'ana1Tests', 'results5.p') -# if pathToResults.is_file(): -# simDF = pd.read_pickle(pathToResults) -simDF = simiSolTest.postProcessSimiSol(avalancheDir, cfgMain, cfg, simDF, solSimi, outDirTest) - - -# make convergence plot (if you add the fiting lines, make sure only the coloredBy and sizedBy parameters are varied) -fig1, ax1 = outAna1Plots.plotErrorConvergence(simDF, outDirTest, cfg['SIMISOL'], 'nPart', 'vhErrorL2', - 'aPPK', 'nPPK0', logScale=True, fit=False) - -# make convergence plot -outAna1Plots.plotTimeCPULog(simDF, outDirTest, cfg['SIMISOL'], 'nPart', 'sphKernelRadius', 'nPPK0') - -# do some filtering for the presentation plot -simDF = simDF[simDF['sphKernelRadius']==3] - -# make convergence plot (if you add the fiting lines, make sure only the coloredBy and sizedBy parameters are varied) -# same as plotErrorConvergence but adds the points corresponding to different coloredBy values one after the others -# and saves itermediate plots -fig1, ax1 = outAna1Plots.plotPresentation(simDF, outDirTest, cfg['SIMISOL'], 'nPart', 'hErrorL2', - 'aPPK', 'nPPK0', logScale=True, fit=True) - -outAna1Plots.plotTimeCPULog(simDF, outDirTest, cfg['SIMISOL'], 'nPart', 'aPPK', 'nPPK0') diff --git a/avaframe/runScripts/runDamBreak.py b/avaframe/runScripts/runDamBreak.py index a6672208a..4acb43d84 100644 --- a/avaframe/runScripts/runDamBreak.py +++ b/avaframe/runScripts/runDamBreak.py @@ -1,5 +1,5 @@ """ - Run script for running the dam break problem on a sloping bed and compare to simulation results + Run script for running the dam break test DFA simulations on an inclined plane and compare to simulation results """ # Load modules @@ -7,38 +7,93 @@ from configupdater import ConfigUpdater # Local imports +import avaframe.in3Utils.fileHandlerUtils as fU from avaframe.com1DFA import com1DFA import avaframe.in3Utils.initializeProject as initProj from avaframe.in3Utils import cfgUtils +from avaframe.in3Utils import cfgHandling from avaframe.in3Utils import logUtils +import avaframe.ana1Tests.damBreakTest as damBreakTest +import avaframe.out3Plot.outAna1Plots as outAna1Plots +# +++++++++ REQUIRED ++++++++++++++++++++++++ # log file name; leave empty to use default runLog.log -logName = 'runDamBreakProblem' +logName = 'runDamBreakTest' +# if left empty, use the damBreakTestCfg.ini and local_damBreakTestCfg.ini configuration files +# use 'ana1Tests/figConvergence_damBreakTestCfg.ini' to produce the similarity solution plots from the Theory Paper +fileOverride = '' +# ++++++++++++++++++++++++++++++ # Load settings from general configuration file cfgMain = cfgUtils.getGeneralConfig() avalancheDir = 'data/avaDamBreak' cfgMain['MAIN']['avalancheDir'] = avalancheDir -# Clean input directory(ies) of old work and output files -# initProj.cleanModuleFiles(avalancheDir, com1DFA) +# Clean input directory(ies) of old work files +initProj.cleanSingleAvaDir(avalancheDir, keep=logName, deleteOutput=False) +# setup work folder +workPath = pathlib.Path(avalancheDir, 'Work', 'ana1Tests', 'damBreakTest') +fU.makeADir(workPath) +# create output directory for test result plots +outDirTest = pathlib.Path(avalancheDir, 'Outputs', 'ana1Tests', 'damBreakTest') +fU.makeADir(outDirTest) # Start logging log = logUtils.initiateLogger(avalancheDir, logName) log.info('MAIN SCRIPT') log.info('Current avalanche: %s', avalancheDir) -# Load configuration -damBreakCfg = pathlib.Path(avalancheDir, 'Inputs', 'damBreak_com1DFACfg.ini') +# Load configuration for dam break test +damBreakCfg = cfgUtils.getModuleConfig(damBreakTest, fileOverride=fileOverride) +# ++++++++++ set configurations for all the used modules and override ++++++++++++ +# get comDFA configuration and save to file +com1DFACfg = cfgUtils.getModuleConfig(com1DFA, fileOverride='', modInfo=False, toPrint=False, + onlyDefault=damBreakCfg['com1DFA_override']['defaultConfig']) +com1DFACfg, damBreakCfg = cfgHandling.applyCfgOverride(com1DFACfg, damBreakCfg, com1DFA, addModValues=False) +com1DFACfgFile = cfgUtils.writeCfgFile(avalancheDir, com1DFA, com1DFACfg, fileName='com1DFA_settings', + filePath=workPath) -# Overwriting the sphKernelRadius and meshCellSize from the ini file with the values specified in [] -for sphKernelRadius in [5, 3]: +# run DFA simulations +sphKernelRadiusList = damBreakCfg['DAMBREAK']['sphKernelRadius'].split('|') +sphKernelRadiusList = [float(i) for i in sphKernelRadiusList] +for sphKernelRadius in sphKernelRadiusList: updater = ConfigUpdater() - updater.read(damBreakCfg) + updater.read(com1DFACfgFile) updater['GENERAL']['sphKernelRadius'].value = sphKernelRadius updater['GENERAL']['meshCellSize'].value = sphKernelRadius updater.update_file() # call com1DFA to perform simulation - provide configuration file and release thickness function - dem, plotDict, reportDictList, simDF = com1DFA.com1DFAMain(avalancheDir, cfgMain, cfgFile=damBreakCfg) + initProj.cleanModuleFiles(avalancheDir, com1DFA, deleteOutput=False) + dem, plotDict, reportDictList, simDF = com1DFA.com1DFAMain(avalancheDir, cfgMain, cfgFile=com1DFACfgFile) + +# analyze simulations +# Load configuration info of all com1DFA simulations +simDF, _ = cfgUtils.readAllConfigurationInfo(avalancheDir) + +# Load flow thickness from analytical solution +solDam = damBreakTest.damBreakSol(avalancheDir, cfgMain, damBreakCfg['DAMBREAK'], com1DFACfg, outDirTest) + + +# if the analysis already exists and you only want to replot uncomment this (and put the good result name) +# pathToResults = pathlib.Path(avalancheDir, 'Outputs', 'ana1Tests', 'damBreakTest', 'results5.p') +# if pathToResults.is_file(): +# simDF = pd.read_pickle(pathToResults) +simDF = damBreakTest.postProcessDamBreak(avalancheDir, cfgMain, damBreakCfg, simDF, solDam, outDirTest) + +# do some filtering for the presentation plot +# simDF = simDF[simDF['sphKernelRadius']==3] + +# make convergence plot (if you add the fiting lines, make sure only the coloredBy and sizedBy parameters are varied) +fig1, ax1 = outAna1Plots.plotErrorConvergence(simDF, outDirTest, damBreakCfg['DAMBREAK'], 'nPart', 'vhErrorL2', + 'aPPK', 'sphKernelRadius', logScale=True, fit=True) + +# # make convergence plot +# outAna1Plots.plotTimeCPULog(simDF, outDirTest, damBreakCfg['DAMBREAK'], 'nPart', 'aPPK', 'sphKernelRadius') +# +# # make convergence plot (if you add the fiting lines, make sure only the coloredBy and sizedBy parameters are varied) +# # same as plotErrorConvergence but adds the points corresponding to different coloredBy values one after the others +# # and saves itermediate plots +# fig1, ax1 = outAna1Plots.plotPresentation(simDF, outDirTest, damBreakCfg['DAMBREAK'], 'nPart', 'vhErrorL2', +# 'aPPK', 'sphKernelRadius', logScale=True, fit=True) diff --git a/avaframe/runScripts/runEnergyLineTest.py b/avaframe/runScripts/runEnergyLineTest.py index 83d7ada33..dad0f2d4c 100644 --- a/avaframe/runScripts/runEnergyLineTest.py +++ b/avaframe/runScripts/runEnergyLineTest.py @@ -22,6 +22,11 @@ # +++++++++REQUIRED+++++++++++++ # log file name; leave empty to use default runLog.log logName = 'runEnergyLineTest' +# if left empty, use the energyLineTestCfg.ini and local_energyLineTestCfg.ini configuration files +# use 'ana1Tests/figAnalytic_energyLineTestCfg.ini' or 'ana1Tests/figCurvature_energyLineTestCfg.ini' +# or 'ana1Tests/figPressure_energyLineTestCfg.ini' to produce the energy line plots from the Theory Paper +# (use 'avaSlide' in the avaFrameCfg.ini) +fileOverride = '' # ++++++++++++++++++++++++++++++ # Load avalanche directory from general configuration file @@ -38,7 +43,7 @@ iP.cleanSingleAvaDir(avalancheDir, keep=logName, deleteOutput=False) workPath = pathlib.Path(avalancheDir, 'Work', 'energyLineTest') fU.makeADir(workPath) -energyLineTestCfg = cfgUtils.getModuleConfig(energyLineTest) +energyLineTestCfg = cfgUtils.getModuleConfig(energyLineTest, fileOverride=fileOverride) # ++++++++++ set configurations for all the used modules and override ++++++++++++ # get comDFA configuration and save to file diff --git a/avaframe/runScripts/runPlotSimiSol.py b/avaframe/runScripts/runPlotSimiSol.py deleted file mode 100644 index 1569f44ba..000000000 --- a/avaframe/runScripts/runPlotSimiSol.py +++ /dev/null @@ -1,41 +0,0 @@ -""" - Run com1DFA kernel and compare it tothe similarity solution - This script computes the similarity solution for a gliding avalanche on - a inclined plane according to similarity solution from : - Hutter, K., Siegel, M., Savage, S.B. et al. - Two-dimensional spreading of a granular avalanche down an inclined plane - Part I. theory. Acta Mechanica 100, 37–68 (1993). - https://doi.org/10.1007/BF01176861 - and compares it to the DFA kernel com1DFA -""" - -import pathlib -import pickle - -# Local imports -from avaframe.in3Utils import cfgUtils -from avaframe.in3Utils import logUtils -import avaframe.out3Plot.outAna1Plots as outAna1Plots -import avaframe.com1DFA.com1DFA as com1DFA - - -# +++++++++SETUP CONFIGURATION++++++++++++++++++++++++ -# log file name; leave empty to use default runLog.log -logName = 'runSimilarityTest' - -# Load general configuration -cfgMain = cfgUtils.getGeneralConfig() -avalancheDir = 'data/avaSimilaritySol' - -# Start logging -log = logUtils.initiateLogger(avalancheDir, logName) - -simiSolCfg = pathlib.Path(avalancheDir, 'Inputs', 'simiSol_com1DFACfg.ini') -cfg = cfgUtils.getModuleConfig(com1DFA, simiSolCfg) - -# create output directory for test result plots -outDirTest = pathlib.Path(avalancheDir, 'Outputs', 'ana1Tests') -with open(outDirTest / 'results.p', 'rb') as file: - simDF10 = pickle.load(file) - -outAna1Plots.plotErrorLog(simDF10, outDirTest, cfg['SIMISOL']) diff --git a/avaframe/runScripts/runRotationTest.py b/avaframe/runScripts/runRotationTest.py index 0e2695439..c7eb2549a 100644 --- a/avaframe/runScripts/runRotationTest.py +++ b/avaframe/runScripts/runRotationTest.py @@ -23,6 +23,10 @@ # +++++++++REQUIRED+++++++++++++ # log file name; leave empty to use default runLog.log logName = 'runRotationTest' +# if left empty, use the rotationTestCfg.ini and local_rotationTestCfg.ini configuration files +# use 'ana1Tests/figPressure_rotationTestCfg.ini' or 'ana1Tests/figAnalytic_rotationTestCfg.ini' to produce +# the rotation test plots from the Theory Paper (use 'avaTripleParabola' or 'avaTripleBowl' in the avaFrameCfg.ini) +fileOverride = '' # ++++++++++++++++++++++++++++++ # Load avalanche directory from general configuration file @@ -37,10 +41,10 @@ # ---------------- # Clean input directory(ies) of old work and output files -iP.cleanSingleAvaDir(avalancheDir, keep=logName, deleteOutput=False) +iP.cleanSingleAvaDir(avalancheDir, keep=logName, deleteOutput=True) workPath = pathlib.Path(avalancheDir, 'Work', 'energyLineTest') fU.makeADir(workPath) -rotationTestCfg = cfgUtils.getModuleConfig(rotationTest) +rotationTestCfg = cfgUtils.getModuleConfig(rotationTest, fileOverride=fileOverride) # ++++++++++ set configurations for all the used modules and override ++++++++++++ # get comDFA configuration and save to file diff --git a/avaframe/runScripts/runSimilaritySol.py b/avaframe/runScripts/runSimilaritySol.py index 9e02e42fb..c9bcfa53a 100644 --- a/avaframe/runScripts/runSimilaritySol.py +++ b/avaframe/runScripts/runSimilaritySol.py @@ -1,5 +1,5 @@ """ - Run com1DFA kernel and compare it tothe similarity solution + Run com1DFA kernel and compare it to the similarity solution This script computes the similarity solution for a gliding avalanche on a inclined plane according to similarity solution from : Hutter, K., Siegel, M., Savage, S.B. et al. @@ -10,28 +10,42 @@ """ import pathlib +import pandas as pd from configupdater import ConfigUpdater # Local imports import avaframe.in3Utils.initializeProject as initProj import avaframe.in3Utils.fileHandlerUtils as fU from avaframe.in3Utils import cfgUtils +from avaframe.in3Utils import cfgHandling from avaframe.in3Utils import logUtils import avaframe.com1DFA.com1DFA as com1DFA from avaframe.in1Data import getInput as gI import avaframe.ana1Tests.simiSolTest as simiSolTest +import avaframe.out3Plot.outAna1Plots as outAna1Plots -# +++++++++SETUP CONFIGURATION++++++++++++++++++++++++ +# +++++++++ REQUIRED ++++++++++++++++++++++++ # log file name; leave empty to use default runLog.log logName = 'runSimilarityTest' +# if left empty, use the simiSolTestCfg.ini and local_simiSolTestCfg.ini configuration files +# use 'ana1Tests/figConvergence_simiSolTestCfg.ini' to produce the similarity solution plots from the Theory Paper +fileOverride = '' +# ++++++++++++++++++++++++++++++ + # Load general configuration cfgMain = cfgUtils.getGeneralConfig() avalancheDir = 'data/avaSimilaritySol' -# Clean input directory(ies) of old work and output files +# Clean input directory(ies) of old work files initProj.cleanSingleAvaDir(avalancheDir, keep=logName, deleteOutput=False) +# setup work folder +workPath = pathlib.Path(avalancheDir, 'Work', 'ana1Tests', 'simiSolTest') +fU.makeADir(workPath) +# create output directory for test result plots +outDirTest = pathlib.Path(avalancheDir, 'Outputs', 'ana1Tests') +fU.makeADir(outDirTest) # Start logging log = logUtils.initiateLogger(avalancheDir, logName) @@ -39,23 +53,60 @@ log.info('Current avalanche: %s', avalancheDir) # Load configuration for similarity solution test -simiSolCfg = pathlib.Path(avalancheDir, 'Inputs', 'simiSol_com1DFACfg.ini') - -# create output directory for test result plots -outDirTest = pathlib.Path(avalancheDir, 'Outputs', 'ana1Tests') -fU.makeADir(outDirTest) +simiSolCfg = cfgUtils.getModuleConfig(simiSolTest, fileOverride=fileOverride) +# ++++++++++ set configurations for all the used modules and override ++++++++++++ +# get comDFA configuration and save to file +com1DFACfg = cfgUtils.getModuleConfig(com1DFA, fileOverride='', modInfo=False, toPrint=False, + onlyDefault=simiSolCfg['com1DFA_override']['defaultConfig']) +com1DFACfg, simiSolCfg = cfgHandling.applyCfgOverride(com1DFACfg, simiSolCfg, com1DFA, addModValues=False) +com1DFACfgFile = cfgUtils.writeCfgFile(avalancheDir, com1DFA, com1DFACfg, fileName='com1DFA_settings', + filePath=workPath) -for sphKernelRadius in [5, 3]: +# run DFA simulations +sphKernelRadiusList = simiSolCfg['SIMISOL']['sphKernelRadius'].split('|') +sphKernelRadiusList = [float(i) for i in sphKernelRadiusList] +for sphKernelRadius in sphKernelRadiusList: updater = ConfigUpdater() - updater.read(simiSolCfg) + updater.read(com1DFACfgFile) updater['GENERAL']['sphKernelRadius'].value = sphKernelRadius updater['GENERAL']['meshCellSize'].value = sphKernelRadius updater.update_file() - cfg = cfgUtils.getModuleConfig(com1DFA, simiSolCfg) # Define release thickness distribution demFile = gI.getDEMPath(avalancheDir) - relDict = simiSolTest.getReleaseThickness(avalancheDir, cfg, demFile) + relDict = simiSolTest.getReleaseThickness(avalancheDir, simiSolCfg['SIMISOL'], demFile, sphKernelRadius) # call com1DFA to perform simulations - provide configuration file and release thickness function # (may be multiple sims) - _, _, _, simDF = com1DFA.com1DFAMain(avalancheDir, cfgMain, cfgFile=simiSolCfg) + initProj.cleanModuleFiles(avalancheDir, com1DFA, deleteOutput=False) + _, _, _, simDF = com1DFA.com1DFAMain(avalancheDir, cfgMain, cfgFile=com1DFACfgFile) + + +# analyze simulations +# Load configuration info of all com1DFA simulations +simDF, _ = cfgUtils.readAllConfigurationInfo(avalancheDir) + +# compute the similartiy solution (this corresponds to our reference) +log.info('Computing similarity solution') +solSimi = simiSolTest.mainSimilaritySol(simiSolCfg['SIMISOL'], com1DFACfg) + +# if the analysis already exists and you only want to replot uncomment this (and put the good result name) +# pathToResults = pathlib.Path(avalancheDir, 'Outputs', 'ana1Tests', 'simiSolTest', 'results20.p') +# if pathToResults.is_file(): +# simDF = pd.read_pickle(pathToResults) +simDF = simiSolTest.postProcessSimiSol(avalancheDir, cfgMain, simiSolCfg, simDF, solSimi, outDirTest) + +# do some filtering before plotting +# simDF = simDF[simDF['nPPK0'] == 30] + +# make convergence plot (if you add the fitting lines, make sure only the coloredBy and sizedBy parameters are varied) +fig1, ax1 = outAna1Plots.plotErrorConvergence(simDF, outDirTest, simiSolCfg['SIMISOL'], 'nPart', 'vhErrorL2', + 'aPPK', 'sphKernelRadius', logScale=True, fit=True) + +# # make cpu time plot +# outAna1Plots.plotTimeCPULog(simDF, outDirTest, simiSolCfg['SIMISOL'], 'nPart', 'sphKernelRadius', 'sphKernelRadius') +# +# # make convergence plot (if you add the fiting lines, make sure only the coloredBy and sizedBy parameters are varied) +# # same as plotErrorConvergence but adds the points corresponding to different coloredBy values one after the others +# # and saves itermediate plots +# fig1, ax1 = outAna1Plots.plotPresentation(simDF, outDirTest, simiSolCfg['SIMISOL'], 'nPart', 'hErrorL2', +# 'aPPK', 'sphKernelRadius', logScale=True, fit=True) diff --git a/avaframe/tests/data/testDamBreak/damBreak_com1DFACfg.ini b/avaframe/tests/data/testDamBreak/damBreak_com1DFACfg.ini index ee457f65d..a8fe490ee 100644 --- a/avaframe/tests/data/testDamBreak/damBreak_com1DFACfg.ini +++ b/avaframe/tests/data/testDamBreak/damBreak_com1DFACfg.ini @@ -1,15 +1,74 @@ ### Config File - This file contains the main settings for the simulation run for the dam break problem ## This file is part of Avaframe. +### Config File - This file contains the main settings for the dam break test module +## Set your parameters +# This file will be overridden by local_damBreakTestCfg.ini if it exists +# So copy this file to local_damBreakTestCfg.ini, adjust your variables there -[GENERAL] +[DAMBREAK] +#sphKernel radius for the com1DFA simulations +sphKernelRadius = 10 + +# slope angle [°] +phi = 22 +# bed friction angle [°] +delta = 21 +u0 = 0. +# time end and resolution for the analytic solution +tEnd = 30 +dt = 0.1 +# space resolution +dx = 0.5 +# domain extend for error computation +# in x direction +xStart = -200 +xEnd = 220 +# in y direction +yStart = -50 +yEnd = 50 +# start x position of the dam +xBack = -120 +# xFront = 0 + +# set time step of analysis +tSave = 1 +# use only the component of the velocity/momentum in the flow direction (vector going down the inclined plane in x dir) +projectVelocity = False + +#++++Plotting parameters++++++ +# list of parameters to display in the summary plot (list of parameters separated by |) +paramInfo = sphKernelRadius|aPPK|nPPK0|nPart +# list of parameters to display in the convergence plot (list of parameters separated by |) +paramConvInfo = sphKR0|nPPK0|cMax +# plotting flags +# only analyze and plot the tSave time step +onlyLast = False +# plot error function of time for each simulation +plotErrorTime = False +# plot individual figure for the h, hu and u error for each saved time step +plotSequence = False +# use relative difference +relativError = True +# when plotting, the domain extent is scaleCoef times bigger than the data extent +scaleCoef = 1.05 + + + +[com1DFA_override] +# use default com1DFA config as base configuration (True) and override following parameters +# if False and local is available use local +defaultConfig = True #++++++++++++++++ Simulation type # list of simulations that shall be performed (null, ent, res, entres, available (use all available input data)) simTypeList = null - #+++++++++++++ Output++++++++++++ -# desired result Parameters (ppr, pft, pfv, FT, FV, P, particles) - separated by | +# desired result Parameters (ppr, pfd, pfv, FD, FV, P, particles) - separated by | resType = FT|FV|Vx|Vy|Vz +# saving time step, i.e. time in seconds (first and last time step are always saved) +# option 1: give an interval with start:interval in seconds (tStep = 0:5 - this will save desired results every 5 seconds for the full simulation) +# option 2: explicitly list all desired time steps (closest to actual computational time step) separated by | (example tSteps = 1|50.2|100) +# NOTE: initial and last time step are always saved! tSteps = 0:1 #++++++++++++++++ particle Initialisation +++++++++ @@ -18,7 +77,8 @@ tSteps = 0:1 iniStep = True # max number of iterations - high number might cause significant increase in computational time maxIterations = 30 -ftOptionIni = False +# buffer zone factor multiplied with sphKernelRadius +bufferZoneFactor = 4 #+++++++++SNOW properties # True if release thickness should be read from shapefile file; if False - relTh read from ini file @@ -91,46 +151,3 @@ explicitFriction = 1 frictModel = Coulomb #+++++++++++++SamosAt friction model mu = 0.3838640350354158 - - -[DAMBREAK] -# slope angle [°] -phi = 22 -# bed friction angle [°] -delta = 21 -u0 = 0. -# time end and resolution for the analytic solution -tEnd = 30 -dt = 0.1 -# space resolution -dx = 0.5 -# domain extend for error computation -# in x direction -xStart = -200 -xEnd = 220 -# in y direction -yStart = -50 -yEnd = 50 -# start x position of the dam -xBack = -120 -# xFront = 0 - -# set time step of analysis -tSave = 1 -# use only the component of the velocity/momentum in the flow direction (vector going down the inclined plane in x dir) -projectVelocity = False - -#++++Plotting parameters++++++ -# list of parameters to display in the summary plot (list of parameters separated by |) -paramInfo = sphKernelRadius|aPPK|nPPK0|nPart -# plotting flags -# only analyze and plot the tSave time step -onlyLast = True -# plot error function of time for each simulation -plotErrorTime = False -# plot individual figure for the h, hu and u error for each saved time step -plotSequence = False -# use relative difference -relativError = True -# when plotting, the domain extent is scaleCoef times bigger than the data extent -scaleCoef = 1.02 diff --git a/avaframe/tests/data/testEnergyLine/energyLineTestCfg.ini b/avaframe/tests/data/testEnergyLine/energyLineTestCfg.ini index 0f008797a..23cab6ad1 100644 --- a/avaframe/tests/data/testEnergyLine/energyLineTestCfg.ini +++ b/avaframe/tests/data/testEnergyLine/energyLineTestCfg.ini @@ -119,4 +119,17 @@ frictModel = Coulomb # tan of bed friction angle used for: samosAT, Coulomb, Voellmy mu = 0.4 +#+++++++++++++++++ Dam Parameters +# the dam foot print is given in Intputs/DAM as a shape file line +# the dam is located on the left side of the dam line +# (when one travels from the first point to the last point of the shapefile dam line) +# use dam? dam = False +# behavior of the particles when they hit the dam (are they fully reflected: restitutionCoefficient = 1 or only reflected +# along the tangent component of the dam: restitutionCoefficient = 0 or something in between: restitutionCoefficient = 0-1) +restitutionCoefficient = 1 +# number of iterations allowed for the interactions between a particle and the dam during one time step: +# 1: particle can only bounce once (may cross the dam at another location then) +nIterDam = 1 +# 1 to activate energy loss due to snow flowing over dam +dissDam = 1 diff --git a/avaframe/tests/data/testSimiSol/simiSol_com1DFACfg.ini b/avaframe/tests/data/testSimiSol/simiSol_com1DFACfg.ini index fd1e4986e..9af35bb3a 100644 --- a/avaframe/tests/data/testSimiSol/simiSol_com1DFACfg.ini +++ b/avaframe/tests/data/testSimiSol/simiSol_com1DFACfg.ini @@ -1,11 +1,68 @@ -### Config File - This file contains the main settings for the similarity -### solution run with com1DFAPy +### Config File - This file contains the main settings for the similarity solution test module +## Set your parameters +# This file will be overridden by local_simiSolTestCfg.ini if it exists +# So copy this file to local_simiSolTestCfg.ini, adjust your variables there +[SIMISOL] +#sphKernel radius for the com1DFA simulations +sphKernelRadius = 8 + +# dimensioning parameters +L_x = 80. +L_y = 80. + +# release thickness +relTh = 4. + +# bed friction angle +bedFrictionAngle = 25. +# internal friction angle +internalFrictionAngle = 25. +# plane inclination angle +planeinclinationAngle = 35. + +# Flag earthpressure coefficients +# if false takes 1 +flagEarth = False + +# save analysis plots at time step dtSol +dtSol = 50. + +# which time step should be saved +tSave = 5 + +#++++Plotting parameters++++++ +# list of parameters to display in the summary plot (list of parameters separated by |) +paramInfo = sphKernelRadius|aPPK|nPPK0|nPart +# list of parameters to display in the convergence plot (list of parameters separated by |) +paramConvInfo = sphKR0|nPPK0|cMax +# plotting flags +# only analyze and plot the tSave time step +onlyLast = False +# plot error function of time for each simulation +plotErrorTime = False +# plot individual figure for the h, hu and u error for each saved time step +plotSequence = False +# use relative difference +relativError = True +# when plotting, the domain extent is scaleCoef times bigger than the data extent +scaleCoef = 1.02 -[GENERAL] + +[com1DFA_override] +# use default com1DFA config as base configuration (True) and override following parameters +# if False and local is available use local +defaultConfig = True +#++++++++++++++++ Simulation type +# list of simulations that shall be performed (null, ent, res, entres, available (use all available input data)) +simTypeList = null #+++++++++++++ Output++++++++++++ -# desired result Parameters (ppr, pft, pfv, FT, FV, P, particles) - separated by | +# desired result Parameters (ppr, pfd, pfv, FD, FV, P, particles) - separated by | resType = FT|FV|Vx|Vy|Vz +# saving time step, i.e. time in seconds (first and last time step are always saved) +# option 1: give an interval with start:interval in seconds (tStep = 0:5 - this will save desired results every 5 seconds for the full simulation) +# option 2: explicitly list all desired time steps (closest to actual computational time step) separated by | (example tSteps = 1|50.2|100) +# NOTE: initial and last time step are always saved! tSteps = 0:1 #+++++++++SNOW properties @@ -68,42 +125,3 @@ frictModel = Coulomb explicitFriction = 1 #+++++++++++++SamosAt friction model mu = 0.466307658 - -[SIMISOL] -# which time step should be saved -tSave = 5 -# dimensioning parameters -L_x = 80. -L_y = 80. - -# release thickness -relTh = 4. - -# bed friction angle -bedFrictionAngle = 25. -# internal friction angle -internalFrictionAngle = 25. -# plane inclination angle -planeinclinationAngle = 35. - -# Flag earthpressure coefficients -# if false takes 1 -flagEarth = False - -# save analysis plots at time step dtSol -dtSol = 50. - -#++++Plotting parameters++++++ -# list of parameters to display in the summary plot (list of parameters separated by |) -paramInfo = sphKernelRadius|aPPK|nPPK0|nPart -# plotting flags -# only analyze and plot the tSave time step -onlyLast = True -# plot error function of time for each simulation -plotErrorTime = False -# plot individual figure for the h, hu and u error for each saved time step -plotSequence = False -# use relative difference -relativError = True -# when plotting, the domain extent is scaleCoef times bigger than the data extent -scaleCoef = 1.02 diff --git a/avaframe/tests/test_ana1Tests.py b/avaframe/tests/test_ana1Tests.py index 9462dcc8d..76c0bce6c 100644 --- a/avaframe/tests/test_ana1Tests.py +++ b/avaframe/tests/test_ana1Tests.py @@ -71,8 +71,14 @@ def test_getIntersection(capfd): dirname = pathlib.Path(__file__).parents[0] energyLineTestCfgFile = dirname / '..' / 'tests' / 'data' / 'testEnergyLine' / 'energyLineTestCfg.ini' - energyLineTestCfg, modInfo = cfgUtils.getModuleConfig(com1DFA, fileOverride=str(energyLineTestCfgFile), - modInfo=True) + energyLineTestCfg = cfgUtils.getModuleConfig(energyLineTest, fileOverride=energyLineTestCfgFile) + + # ++++++++++ set configurations for all the used modules and override ++++++++++++ + # get comDFA configuration and save to file + com1DFACfg = cfgUtils.getModuleConfig(com1DFA, fileOverride='', modInfo=False, toPrint=False, + onlyDefault=energyLineTestCfg['com1DFA_override']['defaultConfig']) + com1DFACfg, energyLineTestCfg = cfgHandling.applyCfgOverride(com1DFACfg, energyLineTestCfg, com1DFA, addModValues=False) + # com1DFACfgFile = cfgUtils.writeCfgFile(avalancheDir, com1DFA, com1DFACfg, fileName='com1DFA_settings', filePath=workPath) mu = 1 csz = 1 @@ -144,11 +150,17 @@ def test_getEnergyInfo(capfd): dirname = pathlib.Path(__file__).parents[0] energyLineTestCfgFile = dirname / '..' / 'tests' / 'data' / 'testEnergyLine' / 'energyLineTestCfg.ini' - energyLineTestCfg, modInfo = cfgUtils.getModuleConfig(com1DFA, fileOverride=str(energyLineTestCfgFile), - modInfo=True) + energyLineTestCfg = cfgUtils.getModuleConfig(energyLineTest, fileOverride=energyLineTestCfgFile) + + # ++++++++++ set configurations for all the used modules and override ++++++++++++ + # get comDFA configuration and save to file + com1DFACfg = cfgUtils.getModuleConfig(com1DFA, fileOverride='', modInfo=False, toPrint=False, + onlyDefault=energyLineTestCfg['com1DFA_override']['defaultConfig']) + com1DFACfg, energyLineTestCfg = cfgHandling.applyCfgOverride(com1DFACfg, energyLineTestCfg, com1DFA, addModValues=False) + # com1DFACfgFile = cfgUtils.writeCfgFile(avalancheDir, com1DFA, com1DFACfg, fileName='com1DFA_settings', filePath=workPath) g = 9.81 - alphaDeg = 30 + alphaDeg = 40 csz = 5 mu = np.tan(np.radians(alphaDeg)) @@ -169,10 +181,10 @@ def test_getEnergyInfo(capfd): print(zGeomL) print(errorEnergyTest) atol = 1e-10 - assert errorEnergyTest['runOutZError'] == pytest.approx(-10, abs=atol) - assert errorEnergyTest['runOutSError'] == pytest.approx(10, abs=atol) - assert errorEnergyTest['runOutAngleError'] == pytest.approx(15, abs=atol) - assert errorEnergyTest['rmseVelocityElevation'] == pytest.approx(2.559271214294478, abs=atol) + assert errorEnergyTest['runOutZError'] == pytest.approx(2.372464521180824, abs=atol) + assert errorEnergyTest['runOutSError'] == pytest.approx(-4.744929042361651, abs=atol) + assert errorEnergyTest['runOutAngleError'] == pytest.approx(5, abs=atol) + assert errorEnergyTest['rmseVelocityElevation'] == pytest.approx(0.9743001172810509, abs=atol) def test_mainEnegyLineTest(tmp_path): diff --git a/avaframe/tests/test_damBreak.py b/avaframe/tests/test_damBreak.py index f0e5729a4..a7caccd03 100644 --- a/avaframe/tests/test_damBreak.py +++ b/avaframe/tests/test_damBreak.py @@ -12,18 +12,22 @@ # Local imports from avaframe.in3Utils import cfgUtils +from avaframe.in3Utils import cfgHandling from avaframe.in3Utils import fileHandlerUtils as fU import avaframe.com1DFA.com1DFA as com1DFA -import avaframe.ana1Tests.damBreak as damBreak +import avaframe.ana1Tests.damBreakTest as damBreak import avaframe.out3Plot.outAna1Plots as outAna1Plots def test_mainCompareSimSolCom1DFA(tmp_path): dirname = pathlib.Path(__file__).parents[0] - damBreakCfg = dirname / '..' / 'tests' / 'data' / 'testDamBreak' / 'damBreak_com1DFACfg.ini' + damBreakCfgFile = dirname / '..' / 'tests' / 'data' / 'testDamBreak' / 'damBreak_com1DFACfg.ini' sourceDir = dirname / '..' / 'data' / 'avaDamBreak' / 'Inputs' destDir = tmp_path / 'avaDamBreak' / 'Inputs' avalancheDir = tmp_path / 'avaDamBreak' + # setup work folder + workPath = pathlib.Path(avalancheDir, 'Work', 'ana1Tests', 'damBreakTest') + fU.makeADir(workPath) shutil.copytree(sourceDir, destDir) @@ -31,28 +35,35 @@ def test_mainCompareSimSolCom1DFA(tmp_path): fU.makeADir(outDirTest) cfgMain = cfgUtils.getGeneralConfig() - cfg = cfgUtils.getModuleConfig(com1DFA, damBreakCfg) + damBreakCfg = cfgUtils.getModuleConfig(damBreak, fileOverride=damBreakCfgFile) + # ++++++++++ set configurations for all the used modules and override ++++++++++++ + # get comDFA configuration and save to file + com1DFACfg = cfgUtils.getModuleConfig(com1DFA, fileOverride='', modInfo=False, toPrint=False, + onlyDefault=damBreakCfg['com1DFA_override']['defaultConfig']) + com1DFACfg, damBreakCfg = cfgHandling.applyCfgOverride(com1DFACfg, damBreakCfg, com1DFA, addModValues=False) + com1DFACfgFile = cfgUtils.writeCfgFile(avalancheDir, com1DFA, com1DFACfg, fileName='com1DFA_settings', + filePath=workPath) # call com1DFA to perform simulations - provide configuration file and release thickness function # (may be multiple sims) - _, _, _, simDF = com1DFA.com1DFAMain(avalancheDir, cfgMain, cfgFile=damBreakCfg) + _, _, _, simDF = com1DFA.com1DFAMain(avalancheDir, cfgMain, cfgFile=com1DFACfgFile) simDF, _ = cfgUtils.readAllConfigurationInfo(avalancheDir) - solDam = damBreak.damBreakSol(avalancheDir, cfgMain, cfg, outDirTest) + solDam = damBreak.damBreakSol(avalancheDir, cfgMain, damBreakCfg['DAMBREAK'], com1DFACfg, outDirTest) - simDF = damBreak.postProcessDamBreak(avalancheDir, cfgMain, cfg, simDF, solDam, outDirTest) + simDF = damBreak.postProcessDamBreak(avalancheDir, cfgMain, damBreakCfg, simDF, solDam, outDirTest) # make convergence plot - fig1, ax1 = outAna1Plots.plotErrorConvergence(simDF, outDirTest, cfg['DAMBREAK'], 'nPart', 'hErrorL2', + fig1, ax1 = outAna1Plots.plotErrorConvergence(simDF, outDirTest, damBreakCfg['DAMBREAK'], 'nPart', 'hErrorL2', 'aPPK', 'nPPK0', logScale=True) - outAna1Plots.plotTimeCPULog(simDF, outDirTest, cfg['DAMBREAK'], 'nPart', 'aPPK', 'nPPK0') + outAna1Plots.plotTimeCPULog(simDF, outDirTest, damBreakCfg['DAMBREAK'], 'nPart', 'aPPK', 'nPPK0') simDF = simDF[simDF['nPPK0']==15] - fig1, ax1 = outAna1Plots.plotPresentation(simDF, outDirTest, cfg['DAMBREAK'], 'nPart', 'hErrorL2', + fig1, ax1 = outAna1Plots.plotPresentation(simDF, outDirTest, damBreakCfg['DAMBREAK'], 'nPart', 'hErrorL2', 'aPPK', 'nPPK0', logScale=True, fit=True) simDF = simDF[simDF['aPPK']==-0.5] - cfg['DAMBREAK']['plotErrorTime'] = 'True' - cfg['DAMBREAK']['plotSequence'] = 'True' - cfg['DAMBREAK']['onlyLast'] = 'False' - simDF = damBreak.postProcessDamBreak(avalancheDir, cfgMain, cfg, simDF, solDam, outDirTest) + damBreakCfg['DAMBREAK']['plotErrorTime'] = 'True' + damBreakCfg['DAMBREAK']['plotSequence'] = 'True' + damBreakCfg['DAMBREAK']['onlyLast'] = 'False' + simDF = damBreak.postProcessDamBreak(avalancheDir, cfgMain, damBreakCfg, simDF, solDam, outDirTest) diff --git a/avaframe/tests/test_simiSol.py b/avaframe/tests/test_simiSol.py index 49756437f..ac69c856f 100644 --- a/avaframe/tests/test_simiSol.py +++ b/avaframe/tests/test_simiSol.py @@ -12,6 +12,7 @@ # Local imports from avaframe.in3Utils import cfgUtils +from avaframe.in3Utils import cfgHandling from avaframe.in3Utils import fileHandlerUtils as fU import avaframe.com1DFA.com1DFA as com1DFA from avaframe.in1Data import getInput as gI @@ -21,10 +22,13 @@ def test_mainCompareSimSolCom1DFA(tmp_path): dirname = pathlib.Path(__file__).parents[0] - simiSolCfg = dirname / '..' / 'tests' / 'data' / 'testSimiSol' / 'simiSol_com1DFACfg.ini' + simiSolCfgFile = dirname / '..' / 'tests' / 'data' / 'testSimiSol' / 'simiSol_com1DFACfg.ini' sourceDir = dirname / '..' / 'data' / 'avaSimilaritySol' / 'Inputs' destDir = tmp_path / 'avaSimilaritySol' / 'Inputs' avalancheDir = tmp_path / 'avaSimilaritySol' + # setup work folder + workPath = pathlib.Path(avalancheDir, 'Work', 'ana1Tests', 'simiSolTest') + fU.makeADir(workPath) shutil.copytree(sourceDir, destDir) @@ -32,30 +36,40 @@ def test_mainCompareSimSolCom1DFA(tmp_path): fU.makeADir(outDirTest) cfgMain = cfgUtils.getGeneralConfig() - cfg = cfgUtils.getModuleConfig(com1DFA, simiSolCfg) + + # Load configuration for similarity solution test + simiSolCfg = cfgUtils.getModuleConfig(simiSolTest, fileOverride=simiSolCfgFile) + # ++++++++++ set configurations for all the used modules and override ++++++++++++ + # get comDFA configuration and save to file + com1DFACfg = cfgUtils.getModuleConfig(com1DFA, fileOverride='', modInfo=False, toPrint=False, + onlyDefault=simiSolCfg['com1DFA_override']['defaultConfig']) + com1DFACfg, simiSolCfg = cfgHandling.applyCfgOverride(com1DFACfg, simiSolCfg, com1DFA, addModValues=False) + com1DFACfgFile = cfgUtils.writeCfgFile(avalancheDir, com1DFA, com1DFACfg, fileName='com1DFA_settings', + filePath=workPath) # Define release thickness distribution demFile = gI.getDEMPath(avalancheDir) - relDict = simiSolTest.getReleaseThickness(avalancheDir, cfg, demFile) + relDict = simiSolTest.getReleaseThickness(avalancheDir, simiSolCfg['SIMISOL'], demFile, + simiSolCfg['SIMISOL'].getfloat('sphKernelRadius')) # call com1DFA to perform simulations - provide configuration file and release thickness function # (may be multiple sims) - _, _, _, simDF = com1DFA.com1DFAMain(avalancheDir, cfgMain, cfgFile=simiSolCfg) + _, _, _, simDF = com1DFA.com1DFAMain(avalancheDir, cfgMain, cfgFile=com1DFACfgFile) simDF, _ = cfgUtils.readAllConfigurationInfo(avalancheDir) - solSimi = simiSolTest.mainSimilaritySol(simiSolCfg) + solSimi = simiSolTest.mainSimilaritySol(simiSolCfg['SIMISOL'], com1DFACfg) - simDF = simiSolTest.postProcessSimiSol(avalancheDir, cfgMain, cfg, simDF, solSimi, outDirTest) + simDF = simiSolTest.postProcessSimiSol(avalancheDir, cfgMain, simiSolCfg, simDF, solSimi, outDirTest) # make convergence plot - fig1, ax1 = outAna1Plots.plotErrorConvergence(simDF, outDirTest, cfg['SIMISOL'], 'nPart', 'hErrorL2', - 'aPPK', 'cMax', logScale=True) + fig1, ax1 = outAna1Plots.plotErrorConvergence(simDF, outDirTest, simiSolCfg['SIMISOL'], 'nPart', 'vhErrorL2', + 'aPPK', 'cMax', logScale=True) - outAna1Plots.plotTimeCPULog(simDF, outDirTest, cfg['SIMISOL'], 'nPart', 'aPPK', 'nPPK0') + outAna1Plots.plotTimeCPULog(simDF, outDirTest, simiSolCfg['SIMISOL'], 'nPart', 'aPPK', 'nPPK0') simDF = simDF[simDF['nPPK0']==15] - fig1, ax1 = outAna1Plots.plotPresentation(simDF, outDirTest, cfg['SIMISOL'], 'nPart', 'hErrorL2', + fig1, ax1 = outAna1Plots.plotPresentation(simDF, outDirTest, simiSolCfg['SIMISOL'], 'nPart', 'hErrorL2', 'aPPK', 'nPPK0', logScale=True, fit=True) simDF = simDF[simDF['aPPK']==-0.5] - cfg['SIMISOL']['plotErrorTime'] = 'True' - cfg['SIMISOL']['plotSequence'] = 'True' - cfg['SIMISOL']['onlyLast'] = 'False' - simDF = simiSolTest.postProcessSimiSol(avalancheDir, cfgMain, cfg, simDF, solSimi, outDirTest) + simiSolCfg['SIMISOL']['plotErrorTime'] = 'True' + simiSolCfg['SIMISOL']['plotSequence'] = 'True' + simiSolCfg['SIMISOL']['onlyLast'] = 'False' + simDF = simiSolTest.postProcessSimiSol(avalancheDir, cfgMain, simiSolCfg, simDF, solSimi, outDirTest) diff --git a/docs/DFAnumerics.rst b/docs/DFAnumerics.rst index c41d5a221..a930dd9d0 100644 --- a/docs/DFAnumerics.rst +++ b/docs/DFAnumerics.rst @@ -1,64 +1,103 @@ com1DFA DFA-Kernel numerics ============================ +In :ref:`theoryCom1DFA`, the mass and momentum equation were derived using a Lagrangian approach. +In order to solve this set of equations numerically, we employ a mixed of particle and grid approach. +We discretize the material into particles and solve the momentum equation for these particles +(:cite:`Sa2007,SaGr2009`). +The pressure gradients are computed using a SPH method +(**S**\ moothed **P**\ article **H**\ ydrodynamics :cite:`Mo1992`). Some of the +advantages of the SPH method are that free surface flows, material boundaries and +moving boundary conditions are considered implicitly. In addition, large +deformations can be modeled due to the fact that the method is not based +on a mesh. +However, we use a grid to compute several parameters that are required for the computations as +for example surface normal vectors and flow thickness. -.. warning:: - This theory has not been fully reviewed yet. +Numerical discretization: a particle grid approach +----------------------------------------------------- +Space discretization: The particular momentum equation +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -The numerical method used in com1DFA mixes particle methods and -mesh methods. Mass and momentum are tracked using particles but flow -thickness is tracked using the mesh. The mesh is also used to access topographic information -(surface elevation, normal vector) as well as for displaying results. +Discretizing the material into particles (particle quantities are denoted by the subscript :math:`k`, e.g. +:math:`m_k = \rho_0 V_k` is the mass of particle :math:`k`) leads to the following continuity equation: -Mass :eq:`mass-balance3` and momentum :eq:`momentum-balance6` balance -equations as well as basal normal stress :eq:`sigmab` are computed numerically using a SPH method -(**S**\ moothed **P**\ article **H**\ ydrodynamics) (:cite:`Mo1992`) for the variables -:math:`\overline{\mathbf{u}}=(\overline{u}_1, \overline{u}_2)` and -:math:`\overline{h}` by discretization of the released avalanche volume -in a large number of mass elements. SPH in general, is a mesh-less -numerical method for solving partial differential equations. The SPH -algorithm discretizes the numerical problem within a domain using -particles (:cite:`Sa2007,SaGr2009`), which interact -with each-other in a defined zone of influence. Some of the advantages -of the SPH method are that free surface flows, material boundaries and -moving boundary conditions are considered implicitly. In addition, large -deformations can be modeled due to the fact that the method is not based -on a mesh. From a numerical point of view, the SPH method itself is -relatively robust. +.. math:: + \frac{\mathrm{d}}{\mathrm{d}t} m_k = A_k^\text{ent} q^{\text{ent}} + :label: eq-particle-continuity-balance +By assuming that the Lagrangian control volume :math:`V` can be represented by a particle, +we can derive from :eq:`momentum-balance6` to the particular momentum equation in the normal direction and in the tangent plane: -Discretization ----------------- +.. math:: + \left\{ + \begin{aligned} + & p_k^b = \rho_0 \, h_k \, g_k^\text{eff}\\ + &m_k \frac{\mathrm{d}\bar{\mathbf{u}}_k}{\mathrm{d}t} = A_k^b\boldsymbol{\tau^b} + - m_k \, g^\text{eff}_k \, \boldsymbol{\nabla}_{s} h + m_k \mathbf{g}_s + \mathbf{F}_k^{\text{ext}} + - \bar{\mathbf{u}}_k A_k^\text{ent} q_k^{\text{ent}} + - m_k \left( \bar{\mathbf{u}}_k \cdot \frac{\mathrm{d}\mathbf{v}_{3,k}}{\mathrm{d}t} \right)\mathbf{v}_{3,k} + \end{aligned} + \right. + :label: eq-momentum-particle -Space discretization -~~~~~~~~~~~~~~~~~~~~~~ +In this equation (:eq:`eq-momentum-particle`), flow thickness gradient, basal friction and +curvature acceleration terms need to be further developed and discretized. -The domain is discretized in particles. The following properties are assigned to each particle :math:`p_k`: -a mass :math:`m_{p_k}`, a thickness :math:`{h}_{p_k}`, a density :math:`\rho_{p_k}=\rho_0` and -a velocity :math:`\mathbf{{u}}_{p_k}=({u}_{p_k,1}, {u}_{p_k,2})` (**those -quantities are thickness averaged, note that we dropped the overline from** :eq:`hmean-umean` **for simplicity reasons**). -In the following paragraphs, :math:`i` and :math:`j` indexes refer to the different directions in the NCS, -whereas :math:`k` and :math:`l` indexes refer to particles. +Time discretization +~~~~~~~~~~~~~~~~~~~~~ -The quantities velocity, mass and flow thickness are also defined on the fixed mesh. It is possible to navigate -from particle property to mesh property using the interpolation methods described in :ref:`DFAnumerics:Mesh and interpolation`. +The momentum equation is solved numerically in time using an Euler time scheme. +The time derivative of any quantity :math:`f` is approximated by: +.. math:: + \frac{\mathrm{d}f_k}{\mathrm{d}t} \approx + \frac{f_k^{n+1} - f_k^n}{\Delta t} -Time step -~~~~~~~~~~~~~~~~~~~~~~ +where :math:`\Delta t` represents the time step and :math:`f^n = f(t^n)`, :math:`t^n = n \Delta t`. +For the velocity this reads: + +.. math:: + \frac{\mathrm{d}\bar{\mathbf{u}}_k}{\mathrm{d}t} \approx + \frac{\bar{\mathbf{u}}_k^{n+1} - \bar{\mathbf{u}}_k^n}{\Delta t} + +The position of the particles is then updated using a centered Euler scheme: + +.. math:: + \mathbf{x}_{k}^{n+1} = \mathbf{x}_{k}^{n} + \frac{\Delta t}{2m}\left(\bar{\mathbf{u}}^{n+1}_{k} + \bar{\mathbf{u}}^{n}_{k}\right) -A fixed time step can be used or an adaptive time step can be computed using a -Courant-Friedrich-Lewy condition. +Taking the forces into account is done in two subsequent steps as forces acting on the particles can be +sorted into driving forces and friction forces. +Friction forces act against the particle motion only affecting the magnitude of the velocity. +They can in no case become driving forces.. +This is why in a first step the velocity is updated with the driving forces before updating in a +second step the velocity magnitude applying the friction force. -Mesh and interpolation +Grid and interpolation ----------------------- -Here is a description of the mesh and the interpolation method that is used to -switch from particle to mesh values and the other way around. -Mesh +Topography information is usually provided in a raster format which corresponds to a regular rectilinear +mesh, from hereon referred to as grid. +In order to get information on the surface elevation and normal vectors, the topography information +needs to be interpolated at the particle locations, and this needs to be repeated at each time step +since the particles are moving. +Similarly, the particle properties such as mass or momentum, which translate into flow thickness and +velocity, also need to be interpolated onto the grid. +Grid velocity is needed to compute the artificial viscosity term, ensuring numerical stability, +see :ref:`SAMOS Artificial viscosity`. +Grid flow thickness is used to compute the particle flow thickness which is required for computing +the friction force. +Due to the utilized regular rectilinear mesh a bilinear interpolation method is applied for its +simplicity and suitability. +It also ensures the conservation of mass or momentum when interpolating from particles to grid and back. + +Here is a description of the grid and the interpolation method that is used to +switch from particle to grid values and the other way around. + +Grid ~~~~~~ For practical reasons, a 2D rectilinear mesh (grid) is used. Indeed the topographic @@ -66,13 +105,13 @@ input information is read from 2D raster files (with :math:`N_{y}` and :math:`N_ rows and columns) which correspond exactly to a 2D rectilinear mesh. Moreover, as we will see in the following sections, 2D rectilinear meshes are very convenient for interpolations as well as for -particle tracking. The 2D rectilinear mesh is composed of :math:`N_{y}` and +particle tracking. The grid is composed of :math:`N_{y}` and :math:`N_{x}` rows and columns of square cells (of side length :math:`csz`) and :math:`N_{y}+1` and :math:`N_{x}+1` rows and columns of vertices as described in :numref:`rasterGrid`. Each cell has a center and four vertices. The data read from the raster file is assigned to the cell centers. Note that -although this is a 2D mesh, as we use a terrain-following coordinate system to perform -our computations, this 2D mesh is oriented in 3D space and hence the projected side length +although this is a 2D grid, as we use a terrain-following coordinate system to perform +our computations, this 2D grid is oriented in 3D space and hence the projected side length corresponds to :math:`csz`, whereas the actual side length and hence also the :ref:`DFAnumerics:cell area`, depend on the local slope, expressed by the :ref:`DFAnumerics:Cell normals`. @@ -87,7 +126,7 @@ expressed by the :ref:`DFAnumerics:Cell normals`. Cell normals """""""""""""" There are many different methods available for computing normal vectors -on a 2D rectilinear mesh. Several options are available in com1DFA. +on a grid. Several options are available in com1DFA. The first one consists in computing the cross product of the diagonal vectors between four cell centers. This defines the normal vector at the vertices. It is @@ -131,15 +170,15 @@ Surface integration over the cell extent leads to the area of the cell: Interpolation ~~~~~~~~~~~~~~ In the DFA kernel, mass, flow thickness and flow velocity can be defined at particle -location or on the mesh. We need a method to be able to go from particle properties -to mesh (field) values and from mesh values to particle properties. +location or on the grid. We need a method to be able to go from particle properties +to grid (field) values and from grid values to particle properties. -Mesh to particle +Grid to particle """""""""""""""""" -On a 2D rectilinear mesh, scalar and vector fields defined at cell centers -can be evaluated anywhere within the mesh using a bilinear interpolation -between mesh cell centers. Evaluating a vector field simply consists in evaluating +On a grid, scalar and vector fields defined at cell centers +can be evaluated anywhere within the grid using a bilinear interpolation +between grid cell centers. Evaluating a vector field simply consists in evaluating the three components as scalar fields. The bilinear interpolation consists in successive linear interpolations @@ -174,15 +213,15 @@ cell size. .. figure:: _static/BilinearInterp.png :width: 90% - Bilinear interpolation in a unit mesh (cell size is 1). + Bilinear interpolation in a unit grid (cell size is 1). -Particles to mesh +Particles to grid """"""""""""""""""" -Going from particle property to mesh value is also based on bilinear interpolation and +Going from particle property to grid value is also based on bilinear interpolation and weights but requires a bit more care in order to conserve mass and momentum balance. -Flow thickness and velocity fields are determined on the mesh using, as intermediate step, -mass and momentum fields. First, mass and momentum mesh fields can be evaluated by +Flow thickness and velocity fields are determined on the grid using, as intermediate step, +mass and momentum fields. First, mass and momentum grid fields can be evaluated by summing particles mass and momentum. This can be donne using the bilinear weights :math:`w` defined in the previous paragraph (here :math:`f` represents the mass or momentum and :math:`f_{uv}` is the particle value. :math:`f_{nm}` @@ -196,58 +235,17 @@ the mass or momentum and :math:`f_{uv}` is the particle value. :math:`f_{nm}` f_{11} = & w_{11}f_{uv} \end{aligned} -The contribution of each particle to the different mesh points is summed up to -finally give the mesh value. This method ensures that the total mass and -momentum of the particles is preserved (the mass and momentum on the mesh will -sum up to the same total). Flow thickness and velocity mesh fields can then be deduced +The contribution of each particle to the different grid points is summed up to +finally give the grid value. This method ensures that the total mass and +momentum of the particles is preserved (the mass and momentum on the grid will +sum up to the same total). Flow thickness and velocity grid fields can then be deduced from the mass and momentum fields and the cell area (actual area of each grid cell, not the projected area). -Neighbor search ------------------- - -The lateral pressure forces are computed via the SPH flow thickness gradient. -This method is based on particle interactions within a certain neighborhood, meaning that it -is necessary to keep track of all the particles within the neighborhood of each particle. -Computing the gradient of the flow thickness at a particle location, requires to -find all the particles in its surrounding. Considering the number of particles and -their density, computing the gradient ends up in computing a lot of -interactions and represents the most computationally expensive part of the dense -flow avalanche simulation. It is therefore important that the neighbor search is fast and efficient. -:cite:`IhOrSoKoTe2014` describe different rectilinear mesh neighbor search -methods. In com1DFA, the simplest method is used. The idea is to locate each -particle in a cell, this way, it is possible to keep track of the particles -in each cell. To find the neighbors of a particle, one only needs to read the -cell in which the particle is located (dark blue cell in :numref:`neighborSearch`) -, find the direct adjacent cells in all directions (light blue cells) and -simply read all particles within those cells. This is very easily achieved -on rectilinear meshes because locating a particle in a cell is straightforward and -finding the adjacent cells is also easily done. - -.. _neighborSearch: - -.. figure:: _static/neighborSearch.png - :width: 90% - - Support mesh for neighbor search: - if the cell side is bigger than the kernel length :math:`r_{kernel}` (red circle in the picture), - the neighbors for any particle in any given cell (dark blue square) - can be found in the direct neighborhood of the cell itself (light blue squares) - -.. _partInCell: - -.. figure:: _static/partInCell.png - :width: 90% +Flow thickness and its gradient +---------------------------------- - The particles are located in the cells using - two arrays. indPartInCell of size number of cells + 1 - which keeps track of the number of particles in each cell - and partInCell of size number of particles + 1 which lists - the particles contained in the cells. - -SPH gradient --------------- SPH method can be used to solve thickness integrated equations where a 2D (respectively 3D) equation is reduced to a 1D (respectively 2D) one. This is used in ocean engineering to solve shallow water equations (SWE) @@ -258,29 +256,57 @@ In the case of avalanche flow, the "bed" is sloped and irregular. The aim is to adapt the SPH method to apply it to thickness integrated equations on a 2D surface living in a 3D world. -Method -~~~~~~~ -The SPH method is used to express a quantity (the flow thickness in our case) and -its gradient at a certain particle location as a weighted sum of its neighbors -properties. The principle of the method is well described in :cite:`LiLi2010`. -In the case of thickness integrated equations (for example SWE), a scalar function -:math:`f` and its gradient can be expressed as following: +Flow thickness gradient computation using SPH +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. math:: - f_{k} &= \sum\limits_{l}f_{l}A_{l}\,W_{kl}\\ - \mathbf{\nabla}f_{k} &= -\sum\limits_{l}f_{l}A_{l}\,\mathbf{\nabla}W_{kl} - :label: sph formulation +In order to assess the flow thickness gradient, we employ a SPH method (Smoothed Particles Hydrodynamics Method +:cite:`LiLi2010`), where the gradient is directly derived from the particles and does not require any mesh. +In contrast, a mesh method or a MPM (Material Point Method) would directly use a mesh formulation to +approximate the gradient or interpolate the particles properties on an underlying mesh and +then approximate the gradient of the flow thickness using a mesh formulation. + +In theory, a SPH method does not require any mesh to compute the gradient. +However, applying this method requires finding neighbor particles. +This process can be sped up with the help of an underlying grid, different neighbor search methods +are presented in :cite:`IhOrSoKoTe2014`, a "uniform grid method" is used in this paper. -Which gives for the flow thickness: +The SPH method is used to express a quantity (the flow thickness in our case) and its gradient at +a certain particle location as a weighted sum of its neighbors properties. +The principle of the method is well described in :cite:`LiLi2010` and the basic formula reads: .. math:: - \overline{h}_{k} &= \frac{1}{\rho_0}\,\sum\limits_{l}{m_{l}}\,W_{kl}\\ - \mathbf{\nabla}\overline{h}_{k} &= -\frac{1}{\rho_0}\,\sum\limits_{l}{m_{l}}\,\mathbf{\nabla}W_{kl} - :label: sph formulation for fd + \begin{aligned} + f_{k} \simeq \langle f_{k}\rangle &= \sum\limits_{l}f_{l}A_{l}\,W_{kl}\\ + \boldsymbol{\nabla} f_{k} \simeq \langle \boldsymbol{\nabla} f_{k}\rangle &= -\sum\limits_{l}f_{l}A_{l}\,\boldsymbol{\nabla} W_{kl} + \end{aligned} + :label: eq-sph-formulation -Where :math:`W` represents the SPH-Kernel function. +Where :math:`W` represents the SPH-Kernel function (we employ the spiky kernel, see +:eq:`eq-kernel-function`) an the subscript :math:`l` denotes the neighbor particles to +particle :math:`k`. +This kernel function is designed to satisfy the unity condition, be an +approximation of the Dirac function and have a compact support domain +(:cite:`LiLi2010`). -The computation of its gradient depends on the coordinate system used. +:eq:`eq-sph-formulation` gives for the flow thickness: + +.. math:: + h_{k} \simeq \langle h_{k}\rangle &= \frac{1}{\rho_0}\,\sum\limits_{l}{m_{l}}\,W_{kl}\\ + \boldsymbol{\nabla}h_{k} \simeq \langle \boldsymbol{\nabla} h_{k}\rangle &= -\frac{1}{\rho_0}\,\sum\limits_{l}{m_{l}}\,\boldsymbol{\nabla}W_{kl} + :label: sph formulation for fd + +This method is usually either used in a 3D space where particles move +freely in this space and where the weighting factor for the summation is +the volume of the particle or on a 2D horizontal plane where the weighting +factor for the summation is the area of the particle and the gradient is +2D. +Here we want to compute the gradient of the flow thickness on a 2D surface +(the topography) that lives in 3D. The method used is analog to the SPH +gradient computation on the 2D horizontal plane but the gradient is 3D +and tangent to the surface (colinear to the local tangent plane). +The theoretical derivation in the following section shows that the SPH +computation is equivalent in applying the 2D SPH method in the local +tangent plane instead of in the horizontal plane. .. _standard-method: @@ -289,11 +315,11 @@ Standard method Let us start with the computation of the gradient of a scalar function :math:`f \colon \mathbb{R}^2 \to \mathbb{R}` on a horizontal plane. -Let :math:`P_k=\mathbf{x}_k=(x_{k,1},x_{k,2})` and :math:`Q_l=\mathbf{x}_l=(x_{l,1},x_{l,2})` be two points in :math:`\mathbb{R}^2` defined by -their coordinates in the Cartesian coordinate system :math:`(P_k,\mathbf{e_1},\mathbf{e_2})`. :math:`\mathbf{r}_{kl}=\mathbf{x}_k-\mathbf{x}_l` is the vector going from -:math:`Q_l` to :math:`P_k` and :math:`r_{kl} = \left\Vert \mathbf{r}_{kl}\right\Vert` the length of this vector. -Now consider the kernel function :math:`W`: - +Let :math:`P_k=\mathbf{x}_k=(x_{k,1},x_{k,2})` and :math:`Q_l=\mathbf{x}_l=(x_{l,1},x_{l,2})` be +two points in :math:`\mathbb{R}^2` defined by their coordinates in the Cartesian coordinate system +:math:`(P_k,\mathbf{e_1},\mathbf{e_2})`. :math:`\mathbf{r}_{kl}=\mathbf{x}_k-\mathbf{x}_l` is the +vector going from :math:`Q_l` to :math:`P_k` and :math:`r_{kl} = \left\Vert \mathbf{r}_{kl}\right\Vert` +the length of this vector. Now consider the kernel function :math:`W`: .. math:: \left. @@ -315,7 +341,7 @@ In the case of the spiky kernel, :math:`W` reads (2D case): \end{aligned} \right. \end{aligned} - :label: kernel function + :label: eq-kernel-function :math:`\left\Vert \mathbf{r_{kl}}\right\Vert= \left\Vert \mathbf{x_{k}}-\mathbf{x_{l}}\right\Vert` @@ -327,7 +353,7 @@ coordinate system :math:`(x_1,x_2)` leads to: .. math:: - \mathbf{\nabla}W_{kl} = \frac{\partial W}{\partial r}.\mathbf{\nabla}r, + \boldsymbol{\nabla}W_{kl} = \frac{\partial W}{\partial r}.\boldsymbol{\nabla}r, \quad r = \left\Vert \mathbf{r} \right\Vert = \sqrt{(x_{k,1}-x_{l,1})^2 + (x_{k,2}-x_{l,2})^2} :label: kernel function gradient 1 @@ -349,7 +375,7 @@ and which leads to the following expression for the gradient: .. math:: - \mathbf{\nabla}W_{kl} = -3\frac{10}{\pi r_0^5}\left\{ + \boldsymbol{\nabla}W_{kl} = -3\frac{10}{\pi r_0^5}\left\{ \begin{aligned} & (r_0 - \left\Vert \mathbf{r_{kl}}\right\Vert)^2\frac{\mathbf{r_{kl}}}{r_{kl}}, \quad &0\leq \left\Vert \mathbf{r_{kl}}\right\Vert \leq r_0\\ & 0 , & r_0 <\left\Vert \mathbf{r_{kl}}\right\Vert @@ -360,7 +386,7 @@ which leads to the following expression for the gradient: The gradient of :math:`f` is then simply: .. math:: - \mathbf{\nabla}f_{k} = -\sum\limits_{l}f_{l}A_{l}\,\mathbf{\nabla}W_{kl} + \boldsymbol{\nabla}f_{k} = -\sum\limits_{l}f_{l}A_{l}\,\boldsymbol{\nabla}W_{kl} :label: sph gradient 2.5D SPH method @@ -410,19 +436,19 @@ is not independent of :math:`(x_1,x_2)`: \right. The target is the gradient of :math:`\tilde{f}` in terms of the :math:`\mathcal{TP}` variables -:math:`(v_1,v_2)`. Let us call this gradient :math:`\mathbf{\nabla}_\mathcal{TP}`. +:math:`(v_1,v_2)`. Let us call this gradient :math:`\boldsymbol{\nabla}_\mathcal{TP}`. It is then possible to apply the :ref:`standard-method` to compute this gradient: .. math:: - \mathbf{\nabla}_\mathcal{TP}W_{kl} = \frac{\partial W}{\partial r}.\mathbf{\nabla}_\mathcal{TP}r, + \boldsymbol{\nabla}_\mathcal{TP}W_{kl} = \frac{\partial W}{\partial r}.\boldsymbol{\nabla}_\mathcal{TP}r, \quad r = \left\Vert \mathbf{r} \right\Vert = \sqrt{v_{kl,1}^2 + v_{kl,2}^2} :label: kernel function gradient TP 1 Which leads to: .. math:: - \mathbf{\nabla}_\mathcal{TP}W_{kl} = -3\frac{10}{\pi r_0^5}\frac{(r_0 - \left\Vert \mathbf{r_{kl}'}\right\Vert)^2}{r_{kl}'}\left\{ + \boldsymbol{\nabla}_\mathcal{TP}W_{kl} = -3\frac{10}{\pi r_0^5}\frac{(r_0 - \left\Vert \mathbf{r_{kl}'}\right\Vert)^2}{r_{kl}'}\left\{ \begin{aligned} & v_{kl,1}\mathbf{V_1} + v_{kl,2}\mathbf{V_2}, \quad &0\leq \left\Vert \mathbf{r_{kl}'}\right\Vert \leq r_0\\ & 0 , & r_0 <\left\Vert \mathbf{r_{kl}'}\right\Vert @@ -430,17 +456,15 @@ Which leads to: \right. :label: kernel function gradient TP 2 - .. math:: - \mathbf{\nabla}_\mathcal{TP}\tilde{f_{k}} = -\sum\limits_{l}\tilde{f_{l}}A_{l}\,\mathbf{\nabla}W_{kl} + \boldsymbol{\nabla}_\mathcal{TP}\tilde{f_{k}} = -\sum\limits_{l}\tilde{f_{l}}A_{l}\,\boldsymbol{\nabla}W_{kl} :label: sph gradient This gradient can now be expressed in the Cartesian coordinate system. It is clear that the change of coordinate system was not needed: - .. math:: - \mathbf{\nabla}_\mathcal{TP}W_{kl} = -3\frac{10}{\pi r_0^5}\frac{(r_0 - \left\Vert \mathbf{r_{kl}'}\right\Vert)^2}{r_{kl}'}\left\{ + \boldsymbol{\nabla}_\mathcal{TP}W_{kl} = -3\frac{10}{\pi r_0^5}\frac{(r_0 - \left\Vert \mathbf{r_{kl}'}\right\Vert)^2}{r_{kl}'}\left\{ \begin{aligned} & r_{kl,1}\mathbf{e_1} + r_{kl,2}\mathbf{e_2} + r_{kl,3}\mathbf{e_3}, \quad &0\leq \left\Vert \mathbf{r_{kl}'}\right\Vert \leq r_0\\ & 0 , & r_0 <\left\Vert \mathbf{r_{kl}'}\right\Vert @@ -460,198 +484,170 @@ differently. Tangent plane and local coordinate system used to apply the SPH method -Particle splitting and merging -------------------------------- -There are two different approaches treating splitting of particles in com1DFA. -The first one only deals with splitting of particles with too much mass('split only'). The second approach, -"split/merge" approach aims at keeping a stable amount of particles within a given range. This is done in order to -guaranty a sufficient accuracy of the sph flow thickness gradient computation. - -Split (**default**) -~~~~~~~~~~~~~~~~~~~~ -If the ``splitOption`` is set to 0, particles are split because of snow entrainment. In this case, -particles that entrain snow grow, i.e. their mass increases. At one point the mass of the particles is considered to be -too big and this particle is split in two. The splitting operation happens if the mass of the -particle exceeds a threshold value (:math:`mPart > massPerPart \times thresholdMassSplit`), where ``thresholdMassSplit`` -is specified in the configuration file and ``massPerPart`` depends on the chosen ``massPerParticleDeterminationMethod`` -as defined here: :ref:`com1DFAAlgorithm:Initialize particles`. -When a particle is split a new child particle is created with the same properties as the parent apart from -mass and position. Both parent and child get half of the parent mass. The parent and child's position are -adjusted: the first / second is placed forward / backward in the direction of the velocity -vector at a distance :math:`distSplitPart \times rPart` of the initial parent position. Particles are considered to -have a circular basal surface :math:`A = \frac{m}{\rho} = \pi r^2`. - -Split and merge -~~~~~~~~~~~~~~~ -If the ``splitOption`` is set to 1 particles are split or merged in order to keep the particle count -as constant as possible within the kernel radius. -Assessing the number of particles within one kernel radius is done based on the particle area. Particles -are assumed to be cylindrical, i.e the base is a circle. For particle ``k`` we have :math:`A_k = \frac{m_k}{\rho}`. The area -of the support domain of the sph kernel function is :math:`\pi r_0^2`. The aim is to keep ``nPPK`` particles within -the kernel radius. The particles are split if the estimated number of particles per kernel radius :math:`\frac{\pi r_0^2}{A_k}` -falls below a given value of :math:`n_{PPK}^{min} = C_{n_{PPK}}^{min}n_{PPK}`. Particles are split using the same -method as in :ref:`DFAnumerics:Only split approach`. Similarly, particles are merged if the estimated -number of particles per kernel radius exceeds a given value :math:`n_{PPK}^{max} = C_{n_{PPK}}^{max}n_{PPK}`. -In this case particles are merged with their closest neighbor. The new position and velocity is the mass -averaged one. The new mass is the sum. Here, two coefficients ``C_{n_{PPK}}^{min}`` and ``C_{n_{PPK}}^{max}`` were -introduced. A good balance needs to be found for the coefficients so that the particles are not constantly split or -merged but also not too seldom. The split and merge steps happen only once per time step and per particle. - -Artificial viscosity ---------------------- - -Two options are available to add viscosity to stabilize the numerics. The first option -consists in adding artificial viscosity (``viscOption`` = 1). The second option attempts -to adapt the Lax-Friedrich scheme (usually applied to meshes) to the particle method -(``viscOption`` = 2). Finally, ``viscOption`` = 0 deactivates any viscosity force. - -SAMOS Artificial viscosity -~~~~~~~~~~~~~~~~~~~~~~~~~~ +Flow thickness computation +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -In :ref:`theoryCom1DFA:Governing Equations for the Dense Flow Avalanche`, the governing -equations for the DFA were derived and all first order or smaller terms where neglected. -Among those terms is the lateral shear stress. This term leads toward -the homogenization of the velocity field. It means that two neighbor elements -of fluid should have similar velocities. The aim behind adding artificial viscosity is to -take this phenomena into account. The following viscosity force is added: +The particles flow thickness is computed with the help of the grid. +The mass of the particles is interpolated onto the grid using a bilinear interpolation method +(described in :ref:`Grid to particle`). +Then, dividing the mass at the grid cells by the area of the grid cells, while taking the slope of the +cell into account, returns the flow thickness field on the grid. +This property is interpolated back to the particles which leads to the particle flow thickness property. + +We do not compute the flow thickness directly from the particles properties (mass and position) using a +SPH method because it induced instabilities. +Indeed, the cases where too few neighbors are found, lead to very small flow thickness which becomes an +issue for flow thickness dependent +friction laws. Note that using such a SPH method would lead to a fully particular method. +But since the flow thickness is only used in some cases for the friction force computation, using a the +previously describe grid method should not affect significantly the computation. + +Convergence +------------ +.. \label{sec-convergence-criterion} + +We are looking for a criterion that relates the properties of the spatial and +temporal discretization to ensure convergence of the numerical solution. +Simply decreasing the time step and increasing the spatial resolution, +by decreasing the grid cell size and kernel radius and increasing the number of +particles, does not ensure convergence. +The analysis from :cite:`MoVi2000` carried out on a very similar problem +(hyperbolic non linear transport equation with a particle and SPH method) +shows that the kernel radius size can not be varied +independently from the time step and number of particles. +Indeed, they show that the numerical solution converges towards the solution of +the equation at the following condition: .. math:: - \begin{aligned} - \mathbf{F_{viscosity}} = &- \frac{1}{2}\rho C_{Lat}\|\mathbf{du}\|^2 A_{Lat} - \frac{\mathbf{du}}{\|\mathbf{du}\|}\\ - = & - \frac{1}{2}\rho C_{Lat}\|\mathbf{du}\| A_{Lat} \mathbf{du} - \end{aligned} + \left\{ + \begin{aligned} + r_{\text{part}} &\to 0\\ + r_{\text{kernel}} &\to 0\\ + \frac{r_{\text{part}}^m}{r_{\text{kernel}}^{m+1}} &\to 0\quad m=2 + \end{aligned} + \right. + \quad\mbox{and} \quad dt \leq C r_{\text{kernel}} + :label: eq-ben-moussa + +Where :math:`r_{\text{part}}` represents the "size" of a particle +, :math:`r_{\text{kernel}}` represents the SPH kernel radius, :math:`dt` is the time +step and :math:`C` a constant. +The conditions in Eq. :Eq:`eq-ben-moussa` mean that both :math:`r_{\text{part}}` +(particle size) and :math:`r_{\text{kernel}}` (SPH kernel radius) need to go to zero +but also that the particle size needs to go faster to zero than the SPH kernel radius. +Finally, the time step needs to go to zero and this at the same rate as +:math:`r_{\text{kernel}}`. +The particle size can be expressed as a function of the SPH kernel radius: -Where the velocity difference reads :math:`\mathbf{du} = \mathbf{u} - \mathbf{\bar{u}}` -(:math:`\mathbf{\bar{u}}` is the mesh velocity interpolated at the particle position). -:math:`C_{Lat}` is a coefficient that rules the viscous force. It would be the -equivalent of :math:`C_{Drag}` in the case of the drag force. The :math:`C_{Lat}` -is a numerical parameter that depends on the mesh size. Its value is set to 100 -and should be discussed and further tested. +.. math:: + r_{\text{part}} = \left(\frac{A^b}{\pi}\right)^{1/2} = + \left(\frac{A_{\text{kernel}}}{\pi n_{\text{ppk}}}\right)^{1/2} + = \frac{r_{\text{kernel}}}{n_{\text{ppk}}^{1/2}}, -Adding the viscous force -"""""""""""""""""""""""" +where the particles basal area was assumed to be a circle. -The viscous force acting on particle :math:`k` reads: +Note that this does not affect the results except adding a different shape factor in +front of this expression. +:math:`n_{\text{ppk}}` is the number of particles per kernel radius and defines the +density of the particles when initializing a simulation. +Let :math:`n_{\text{ppk}}` be defined by a reference number of particles per kernel +radius :math:`n_{\text{ppk}}^0>0`, a reference kernel radius +:math:`r_{\text{kernel}}^0>0` and real exponent :math:`\alpha`: .. math:: - \begin{aligned} - \mathbf{F_k^{viscosity}} = &-\frac{1}{2}\rho C_{Lat}\|\mathbf{du}_k^{old}\| A_{Lat} - \mathbf{du}_k^{new}\\ - = & -\frac{1}{2}\rho C_{Lat}\|\mathbf{u}_k^{old} - \mathbf{\bar{u}}_k^{old}\| A_{Lat} - (\mathbf{u}_k^{new} - \mathbf{\bar{u}}_k^{old}) - \end{aligned} + n_{\text{ppk}} = n_{\text{ppk}}^0\left(\frac{r_{\text{kernel}}}{r_{\text{kernel}}^0}\right)^{\alpha} -Updating the velocity is done in two steps. First adding the explicit term related to the -mean mesh velocity and then the implicit term which leads to: +This leads to a :math:`r_{\text{part}}`: .. math:: - \mathbf{u}_k^{new} = \frac{\mathbf{u}_k^{old} - C_{vis}\mathbf{\bar{u}}_k^{old}}{1 + C_{vis}} - -With :math:`C_{vis} = \frac{1}{2}\rho C_{Lat}\|\mathbf{du}_k^{old}\| A_{Lat}\frac{dt}{m}` - - -Ata Artificial viscosity -~~~~~~~~~~~~~~~~~~~~~~~~ + r_{\text{part}} = \left(\frac{{r_{\text{kernel}}^0}^\alpha}{n_{\text{ppk}}^0}\right)^{1/2} r_{\text{kernel}}^{1-\alpha/2} -An upwind method based on Lax-Friedrichs scheme -""""""""""""""""""""""""""""""""""""""""""""""""""""" +Replacing :math:`r_{\text{part}}` by the previous equation in +:eq:`eq-ben-moussa` leads to the following condition: -Shallow Water Equations are well known for being hyperbolic transport equations. -They have the particularity of carrying discontinuities or shocks which will cause -numerical instabilities. +.. math:: + \frac{{r_{\text{kernel}}^0}^\alpha}{n_{\text{ppk}}^0} r_{\text{kernel}}^{-1-\alpha} \to 0 + :label: eq-ben-moussa-new -A decentering in time allows to better capture the discontinuities. -This can be done in the manner of the Lax-Friedrich scheme as described in :cite:`AtSo2005`, -which is formally the same as adding a viscous force. Implementing it for the SPH method, -this viscous force applied on a given particle :math:`k` can be expressed as follows: +This brings us to the following choice: .. math:: - \mathbf{F_k^{viscosity} = \sum_{l} \frac{m_l}{\rho_l} \Pi_{kl} \mathbf{\nabla}W_{kl} + \left\{ + \begin{aligned} + dt &= C_{\text{time}} r_{\text{kernel}}\\ + n_{\text{ppk}} &= n_{\text{ppk}}^{0} \left(\frac{r_{\text{kernel}}}{r_{\text{kernel}}^0}\right)^{\alpha} + \end{aligned} + \right. + :label: eq-convergence-relation -with :math:`\Pi_{kl} = \lambda_{kl}(\mathbf{u}_l - \mathbf{u}_k) \cdot -\frac{\mathbf{r}_{kl}}{\vert\vert \mathbf{r}_{kl} \vert\vert}`, and -:math:`\mathbf{\nabla}W_{kl}` is the gradient of the kernel function and -is described in :ref:`DFAnumerics:SPH gradient`. +Which satisfies the convergence criterion if: -:math:`\mathbf{u}_{kl} = \mathbf{u}_k - \mathbf{u}_l` is the relative velocity -between particle k and l, :math:`\mathbf{r}_{kl} = \mathbf{x}_k - \mathbf{x}_l` is -the vector going from particles :math:`l` to particle :math:`k` and -:math:`\lambda_{kl} = \frac{c_k+c_l}{2}` with :math:`c_k = \sqrt{gh_l}` -the wave speed. The :math:`\lambda_{kl}` is obtained by turning expressions -related to time and spatial discretization parameters into an expression -on maximal speed between both particles in the Lax Friedrich scheme. +.. math:: + \alpha < -1 + :label: eq-convergence-criterion -Due to the expression of the viscosity force, it makes sense to -compute it at the same place where the SPH pressure force are computed (for this reason, the -``viscOption`` = 2 corresponding to the "Ata" viscosity option is only available -in combination with the ``sphOption`` = 2). +Note that this criterion leaves some freedom on the choice of exponent :math:`\alpha` and that there are no constraints on the reference kernel radius :math:`r_{\text{kernel}}^0` and reference number of particles per kernel radius :math:`n_{\text{ppk}}^0`. +Even though it seems logical to require a minimum number of particles per kernel radius so that enough neighbors are available +to get a reasonable estimate of the gradient. +These parameters should be adjusted according to the expected accuracy of the results and/or the computer power available. +Determining the optimal parameter values for :math:`\alpha`, :math:`r_{\text{kernel}}^0` and :math:`n_{\text{ppk}}^0`, +for example according to a user's needs in terms of accuracy and computational efficiency, +requires a specific and detailed investigation of the considered case. Forces discretization ---------------------- -Lateral force -~~~~~~~~~~~~~~ - -The SPH method is introduced when expressing the flow thickness gradient for each -particle as a weighted sum of its neighbors -(:cite:`LiLi2010,Sa2007`). From now on the :math:`p` for particles in :math:`p_k` is dropped -(same applies for :math:`p_l`). +Friction force discretization +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +.. \label{sec-discretizing-friction} -The lateral pressure forces on each particle are calculated from the compression -forces on the boundary of the particle. -The boundary is approximated as a square with the base side length -:math:`\Delta s = \sqrt{A_p}` and the respective flow height. This leads to -(subscript :math:`|_{.,i}` stands for the component in the :math:`i^{th}` -direction, :math:`i = {1,2}`): +Expressing the friction force term in :Eq:`eq-momentum-particle` for a particle reads: .. math:: - F_{k,i}^{\text{lat}} = K_{(i)}\oint\limits_{\partial{A_{k}}}\left(\int\limits_{b}^{s}\sigma_{33}\,n_i\,\mathrm{d}x_3\right)\mathrm{d}l - -From equation :eq:`momentum-balance6` + \mathbf{F}_k^\text{fric} = A_k^b \, \boldsymbol{\tau^b} = + - {\left\Vert\mathbf{F}_k^\text{fric}\right\Vert}_\text{max} \mathbf{v}_1 -.. math:: - F_{k,i}^{\text{lat}} = K_{(i)}\,\frac{\Delta{s}}{2}\left((\overline{h}\,\overline{\sigma}^{(b)}_{33})_{x_{i}- - \frac{\Delta{s}}{2}}-(\overline{h}\,\overline{\sigma}^{(b)}_{33})_{x_{i}+\frac{\Delta{s}}{2}}\right) - = K_{(i)}\frac{\Delta{s}^2}{2}\,\left.\frac{d\,\overline{h}\,\overline{\sigma}^{(b)}}{d\,x_i}\right\rvert_{k} +This relation stands if the particle is moving. The starting and stopping processes +satisfy a different equation and are handled differently in the numerical +implementation (using the same equation would lead to a non-physical behavior). +This is described in more details in :ref:`Account for friction forces`}. -The product of the average flow thickness :math:`\overline{h}` and the basal normal pressure :math:`\overline{\sigma}^{(b)}_{33}` -reads (using equation :eq:`sigmab` and dropping the curvature acceleration term): -.. math:: - \overline{h}\,\overline{\sigma}^{(b)} = \overline{h}^2\,\rho_0\,\left(g_3-\overline{u_1}^2\,\frac{\partial^2{b}}{\partial{x_1^2}}\right) - \approx \overline{h}^2\,\rho_0\,g_3 +Lateral force +~~~~~~~~~~~~~~ -Which leads to, using the relation :eq:`sph formulation`: +The SPH method is introduced when expressing the flow thickness gradient for each +particle as a weighted sum of its neighbors (:cite:`LiLi2010,Sa2007`). +Which leads to, using the relation :eq:`eq-sph-formulation`: .. math:: - F_{k,i}^{\text{lat}} = K_{(i)}\,\rho_0\,g_3\,A_{k}\,\overline{h}_{k}\,.\,\left.\frac{d\,\overline{h}}{d\,x_i}\right\rvert_{k} - = -K_{(i)}\,m_{i}\,g_3\,.\,\frac{1}{\rho_0}\,\sum\limits_{l}{m_{l}}\,\left.\frac{d\,W_{kl}}{d\,x_i}\right\rvert_{l} + \mathbf{F}_{k}^{\text{lat}} = + - \rho_0 \, g^\text{eff} \, h A^b \boldsymbol{\nabla}_{\!s} h + = -m_{k}\,g^\text{eff}\,.\,\frac{1}{\rho_0}\,\sum\limits_{l}{m_{l}}\,\left.\boldsymbol{\nabla}_{\!s}W_{kl}\right\rvert_{l} :label: lateral force Bottom friction force ~~~~~~~~~~~~~~~~~~~~~~~ The bottom friction forces on each particle depend on the chose friction model. Using the SamosAT friction model -(using equation :eq:`sigmab` for the expression of :math:`\sigma^{(b)}_{k}`) the formulation of the bottom friction forec -reads: +the formulation of the bottom friction force reads: .. math:: - F_{k,i}^{\text{bot}} = -\frac{\overline{u}_{k,i}}{\|\mathbf{u}_k\|}\,A_{k}\,\tau^{(b)}_{k} - = -\delta_{k1}\,A_{k}\,\left(\tau_0 + \tan{\delta}\,\left(1+\frac{R_s^0}{R_s^0+R_s}\right)\,\sigma^{(b)}_{k} - + \frac{\rho_0\,\mathbf{\overline{u}}_{k}^2}{\left(\frac{1}{\kappa}\,\ln\frac{\overline{h}}{R} + B\right)^2}\right) + \mathbf{F}_{k}^{\text{bot}} = A_{k}\,\boldsymbol{\tau}_{k}^b + = -A_{k}\,\left(\tau_0 + \tan{\delta}\,\left(1+\frac{R_s^0}{R_s^0+R_s}\right)\,\boldsymbol{\sigma}_{k}^b\cdot\mathbf{n^b} + + \frac{\rho_0\,\mathbf{\bar{u}}_{k}^2}{\left(\frac{1}{\kappa}\,\ln\frac{h}{R} + B\right)^2}\right)\mathbf{v}_1 :label: bottom force Added resistance force ~~~~~~~~~~~~~~~~~~~~~~~ The resistance force on each particle reads (where :math:`h^{\text{eff}}_{k}` -is a function of the average flow thickness :math:`\overline{h}_{k}`): +is a function of the average flow thickness :math:`h_{k}`): .. math:: - F_{k,i}^{\text{res}} - = - \rho_0\,A_{k}\,h^{\text{eff}}_{k}\,C_{\text{res}}\,\|\overline{\mathbf{u}}_{k}\|^2\,\frac{\overline{u}_{k,i}}{|\overline{\mathbf{u}}_{k}\|} + \mathbf{F}_{k}^{\text{res}} + = - \rho_0\,A_{k}\,h^{\text{eff}}_{k}\,C_{\text{res}}\,\bar{\mathbf{u}}_{k}^2\,\mathbf{v}_1 :label: resistance force Both the bottom friction and resistance force are friction forces. The expression above represent the maximal @@ -660,19 +656,19 @@ equals the driving forces. See :cite:`MaVi2003` for more information. Entrainment force ~~~~~~~~~~~~~~~~~~~~~~~ -The term :math:`- \overline{u_i}\,\rho_0\,\frac{\mathrm{d}(A\,\overline{h})}{\mathrm{d}t}` +The term :math:`- \bar{\mathbf{u}}\,\rho_0\,\frac{\mathrm{d}(A\,h)}{\mathrm{d}t}` related to the entrained mass in :eq:`momentum-balance3` now reads: .. math:: - - \overline{u}_{k,i}\,\rho_0\,\frac{\mathrm{d}}{\mathrm{d}t}\,\left(A_{k}\,\overline{h}_{k}\right) - = - \overline{u}_{k,i}\,A^{\text{ent}}_{k}\,q^{\text{ent}}_{k} + - \bar{\mathbf{u}}_k\,\rho_0\,\frac{\mathrm{d}}{\mathrm{d}t}\,\left(A_{k}\,h_{k}\right) + = - \bar{\mathbf{u}}_k\,A^{\text{ent}}_{k}\,q^{\text{ent}}_{k} The mass of entrained snow for each particle depends on the type of entrainment involved (plowing or erosion) and reads: .. math:: - \rho_0\,\frac{\mathrm{d}}{\mathrm{d}t}\,\left(A_{k}\,\overline{h}_{k}\right) + \rho_0\,\frac{\mathrm{d}}{\mathrm{d}t}\,\left(A_{k}\,h_{k}\right) = \frac{\mathrm{d}\,m_{k}}{\mathrm{d}t} = A_{k}^\text{ent}\,q_{k}^{\text{ent}} @@ -680,55 +676,252 @@ with .. math:: \begin{aligned} - A_{k}^{\text{plo}} &= w_f\,h_{k}^{\text{ent}}= \sqrt{\frac{m_{k}}{\rho_0\,\overline{h}_{k}}}\,h_{k}^{\text{ent}} - \quad &\mbox{and} \quad &q_{k}^{\text{plo}} = \rho_{\text{ent}}\,\left\Vert \overline{\mathbf{u}}_{k}\right\Vert + A_{k}^{\text{plo}} &= w_f\,h_{k}^{\text{ent}}= \sqrt{\frac{m_{k}}{\rho_0\,h_{k}}}\,h_{k}^{\text{ent}} + \quad &\mbox{and} \quad &q_{k}^{\text{plo}} = \rho_{\text{ent}}\,\left\Vert \bar{\mathbf{u}}_{k}\right\Vert \quad &\mbox{for plowing}\\ - A_{k}^{\text{ero}} &= A_{k} = \frac{m_{k}}{\rho_0\,\overline{h}_{k}} - \quad &\mbox{and} \quad &q_{k}^{\text{ero}} = \frac{\tau_{k}^{(b)}}{e_b}\,\left\Vert \overline{\mathbf{u}}_{k}\right\Vert + A_{k}^{\text{ero}} &= A_{k} = \frac{m_{k}}{\rho_0\,h_{k}} + \quad &\mbox{and} \quad &q_{k}^{\text{ero}} = \frac{\tau_{k}^{(b)}}{e_b}\,\left\Vert \bar{\mathbf{u}}_{k}\right\Vert \quad &\mbox{for erosion}\end{aligned} Finaly, the entrainment force reads: .. math:: - F_{k,i}^{\text{ent}} = -w_f\,(e_s+\,q_{k}^{\text{ent}}\,e_d) + \mathbf{F}_k^{\text{ent}} = -w_f\,(e_s+\,q_{k}^{\text{ent}}\,e_d)\mathbf{v}_1 Adding forces -------------- The different components are added following an operator splitting method. This means particle velocities are updated successively with the different forces. +Numerical stability +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Because the lateral shear force term was removed when deriving the model equations +(because of its relative smallness, :cite:`GrEd2014`), :eq:`eq-momentum-balance-approx` +is hyperbolic. +Hyperbolic systems have the characteristic of carrying discontinuities or shocks which +will cause numerical instabilities. +They would fail to converge if for example an Euler forward in time scheme is used +(:cite:`Le1990`). +Several methods exist to stabilize the numerical integration of an hyperbolic system +of differential equations. +All aim at adding some upwinding in the discretization scheme. +Some methods tackle this problem by introducing some upwinding in the discretization +of the derivatives (:cite:`HaLaLe1983, HaHy1983`). +Others introduce some artificial viscosity (as in :cite:`Mo1992`). + +Two options are available to add viscosity to stabilize the numerics. The first option +consists in adding artificial viscosity (``viscOption`` = 1). This is the default +method and is used for operational applications. The second option attempts +to adapt the Lax-Friedrich scheme (usually applied to grids) to the particle method +(``viscOption`` = 2). This method Finally, ``viscOption`` = 0 deactivates any viscosity force. + +SAMOS Artificial viscosity +"""""""""""""""""""""""""""""" + +The artificial viscosity force acting on particle :math:`k` then reads: + +.. math:: + \begin{aligned} + \mathbf{F_k^{visc}} = &- \frac{1}{2}\rho_0 C_{Lat} A_k^{\text{Lat}}\Vert\mathbf{d\bar{u}}_k\Vert^2 + \frac{\mathbf{d\bar{u}}_k}{\Vert\mathbf{d\bar{u}}_k\Vert}\\ + = & - \frac{1}{2}\rho_0 C_{Lat} A_k^{\text{Lat}}\Vert\mathbf{d\bar{u}}_k\Vert \mathbf{d\bar{u}}_k, + \end{aligned} + +where the velocity difference reads :math:`\mathbf{d\bar{u}}_k = \bar{\mathbf{u}}_k - \widehat{\bar{\mathbf{u}}}_k` (:math:`\widehat{\bar{\mathbf{u}}}_k` represents the averaged velocity of the neighbor particles and is practically the grid velocity interpolated at the particle position). +:math:`C_{Lat}` is a coefficient that controls the viscous force. +It would be the equivalent of :math:`C_{Drag}` in the case of the drag force. +:math:`C_{Lat}` is a numerical parameter that depends on the grid size. + +In this expression, let :math:`\mathbf{\bar{u}}_k^{n}` be the velocity at the beginning of the time step and +:math:`{\bar{\mathbf{u}}_k^{n+1}}^\blacktriangle` be the velocity +after adding the numerical viscosity. +In the norm term :math:`\Vert\mathbf{d\bar{u}}_k\Vert` the particle and grid velocity at the beginning of the time step are used. +This ensures no implicit relation on the norm term or on the average velocity :math:`\widehat{\bar{\mathbf{u}}}_k`. +On the contrary, an implicit formulation is used in :math:`\mathbf{d\bar{u}}_k` because the new value of the velocity is used there. +The artificial viscosity force now reads: + +.. math:: + \mathbf{F_k^{visc}} = -\frac{1}{2}\rho_0 C_{Lat} A_k^{\text{Lat}}\Vert\bar{\mathbf{u}}_k^{n} - \widehat{\bar{\mathbf{u}}}_k^{n}\Vert + \left(\left.\bar{\mathbf{u}}_k^{n+1}\right.^\blacktriangle - \widehat{\bar{\mathbf{u}}}_k^{n}\right) + +Updating the velocity then gives: + +.. math:: + \left.\bar{\mathbf{u}}_k^{n+1}\right.^\blacktriangle = \frac{\bar{\mathbf{u}}_k^{n} - C_{vis}\widehat{\bar{\mathbf{u}}}_k^{n}}{1 + C_{vis}} + +with + +.. math:: + C_{vis} = \frac{1}{2}\rho_0 C_{Lat}A_k^{\text{Lat}} \Vert\bar{\mathbf{u}}_k^{n} - \widehat{\bar{\mathbf{u}}}_k^{n}\Vert\frac{\Delta t}{m}. + +This approach to stabilize the momentum equation (:eq:`eq-momentum-particle`) is not +optimal for different reasons. +Firstly, it introduces a new coefficient :math:`C_{vis}` which is not a physical +quantity and will require to be calibrated. + +Secondly, it artificially adds a force that should be described physically. +So it would be more interesting to take the physical force into account in the first +place. + +Potential solutions could be taking the physical shear force into account, +using for example the :math:`\mu`-I rheology (:cite:`GrEd2014, BaBaGr2016`). +Another option would be to replace the artificial viscosity with a purely +numerical artifact aiming to stabilize the equations such as a SPH version of +the Lax-Friedrich scheme as presented in :cite:`AtSo2005`. + +Ata Artificial viscosity: an upwind method based on Lax-Friedrichs scheme +"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" + +Shallow Water Equations are well known for being hyperbolic transport equations. +They have the particularity of carrying discontinuities or shocks which will cause +numerical instabilities. + +A decentering in time allows to better capture the discontinuities. +This can be done in the manner of the Lax-Friedrich scheme as described in :cite:`AtSo2005`, +which is formally the same as adding a viscous force. Implementing it for the SPH method, +this viscous force applied on a given particle :math:`k` can be expressed as follows: + +.. math:: + \mathbf{F}_k^\text{viscosity} = \sum_{l} \frac{m_l}{\rho_l} \Pi_{kl} \boldsymbol{\nabla}W_{kl} + +with :math:`\Pi_{kl} = \lambda_{kl}(\mathbf{u}_l - \mathbf{u}_k) \cdot +\frac{\mathbf{r}_{kl}}{\vert\vert \mathbf{r}_{kl} \vert\vert}`, and +:math:`\boldsymbol{\nabla}W_{kl}` is the gradient of the kernel function and +is described in :ref:`DFAnumerics:SPH gradient`. + +:math:`\mathbf{u}_{kl} = \mathbf{u}_k - \mathbf{u}_l` is the relative velocity +between particle k and l, :math:`\mathbf{r}_{kl} = \mathbf{x}_k - \mathbf{x}_l` is +the vector going from particles :math:`l` to particle :math:`k` and +:math:`\lambda_{kl} = \frac{c_k+c_l}{2}` with :math:`c_k = \sqrt{gh_l}` +the wave speed. The :math:`\lambda_{kl}` is obtained by turning expressions +related to time and spatial discretization parameters into an expression +on maximal speed between both particles in the Lax Friedrich scheme. + +Due to the expression of the viscosity force, it makes sense to +compute it at the same place where the SPH pressure force are computed (for this reason, the +``viscOption`` = 2 corresponding to the "Ata" viscosity option is only available +in combination with the ``sphOption`` = 2). + Adding artificial viscosity -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +"""""""""""""""""""""""""""""" If the viscosity option (``viscOption``) is set to 1, artificial viscosity is added first, as described -in :ref:`DFAnumerics:Artificial viscosity` (this is the default option). With ``viscOption`` set to 0, no viscosity is added. Finally, if -``viscOption`` is set to 2, artificial viscosity is added during SPH force computation. (TODO add link to description) +in :ref:`DFAnumerics:SAMOS Artificial viscosity` (this is the default option). With ``viscOption`` set to 0, no viscosity is added. Finally, if +``viscOption`` is set to 2, artificial viscosity is added during SPH force computation +(so in :ref:`DFAnumerics:Account for driving forces` according to the :math:`\mathbf{F}_k^\text{viscosity}` +computed in :ref:`DFAnumerics:Ata Artificial viscosity: an upwind method based on Lax-Friedrichs scheme`) -Adding entrainment -~~~~~~~~~~~~~~~~~~~ -Entrainment is taken into account by first adding the component representing the loss of momentum due to -acceleration of the entrained mass :math:`- \overline{u}_{k,i}\,A^{\text{ent}}_{k}\,q^{\text{ent}}_{k}`. -Second by adding the force due to the need to break and compact the -entrained mass (:math:`F_{k,i}^{\text{ent}}`) as described in :ref:`DFAnumerics:Entrainment force`. +Curvature acceleration term +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +.. \label{sec-curvature-acc-term-estimation} -Adding driving forces -~~~~~~~~~~~~~~~~~~~~~~~~ -The driving forces -gravity force and lateral forces- are taken into account next. The velocity is updated explicitly. +The last term of the particular momentum equation (:eq:`eq-momentum-particle`) +as well as the effective gravity :math:`g^{\text{eff}}` are the final terms to be +discretized before the numerical integration. +In both of these terms, the remaining unknown is the curvature acceleration term +:math:`\bar{\mathbf{u}}_k \cdot \frac{\mathrm{d}\mathbf{v}_{3,k}}{\mathrm{d}t}`. +Using the forward Euler time discretization for the temporal derivative of the +normal vector :math:`\mathbf{v}_{3,k}` gives: +.. math:: + \left.\frac{\mathrm{d}\mathbf{v}_{3,k}}{\mathrm{d}t}\right|^n \approx + \frac{\mathbf{v}_{3,k}^{n+1} - \mathbf{v}_{3,k}^n}{\Delta t} + +:math:`\mathbf{v}_{3,k}^n` is a known quantity, the normal vector of the bottom surface at +:math:`\mathbf{x}_k^n` wich is interpolated from the grid normal vector values at the +position of the particle :math:`k` at time :math:`t^n`. +:math:`\mathbf{v}_{3,k}^{n+1}` is unknown since :math:`\mathbf{x}_k^{n+1}` is not known yet, +hence we estimate :math:`\mathbf{x}_k^{n+1}` based the position :math:`\mathbf{x}_k^n` and +the velocity at :math:`t^n`: + +.. math:: + \mathbf{x}_k^{n+1} =\mathbf{x}_k^n + \Delta t \left.\bar{\mathbf{u}}_k^{n+1}\right.^\blacktriangle -Adding friction forces -~~~~~~~~~~~~~~~~~~~~~~~~~~~ +This position at :math:`t^{n+1}` is projected onto the topography and +:math:`\mathbf{v}_{3,k}^{n+1}` can be interpolated from the grid normal vector values. + +Note that the curvature acceleration term is needed to compute the bottom pressure +(:eq:`eq-pressure-distribution`), which is used for the bottom friction +computation and for the pressure gradient computation. +The curvature acceleration term can lead to a negative value, which means detachment +of the particles from the bottom surface. +In **com1DFA**, surface detachment is not allowed and if pressure becomes +negative, it is set back to zero forcing the material to remain in contact with the +topography. + + + +Account for entrainment +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Entrainment is taken into account by: + +* First adding the component representing the loss of momentum due to + acceleration of the entrained mass :math:`- \bar{\mathbf{u}}_{k}\,A^{\text{ent}}_{k}\,q^{\text{ent}}_{k}`. + The entrained mass by a particle :math:`k` during a time step :math:`\Delta t` reads: + + .. math:: + \mathrm{d}m_k^{n} = m_k^{n+1} - m_k^{n} = \Delta t \,A^{\text{ent}}_{k}\,q^{\text{ent}}_{k} + + Which leads by the way to the new mass of particle :math:`m_k^{n+1}`: + + .. math:: + m_k^{n+1} = m_k^{n} + \mathrm{d}m_k^{n} = m_k^{n} + \Delta t \,A^{\text{ent}}_{k}\,q^{\text{ent}}_{k} + + Implicitly updating the velocity leads to (if we call :math:`\bar{\mathbf{u}}_k^{n+1}` + the velocity before adding the momentum loss and + :math:`\left.\bar{\mathbf{u}}_k^{n+1}\right.^\blacktriangle` the velocity after): + + .. math:: + \left.\bar{\mathbf{u}}_k^{n+1}\right.^\blacktriangle = \bar{\mathbf{u}}_k^{n+1} + \frac{m_k^{n}}{m_k^{n} + \mathrm{d}m_k^n} = \bar{\mathbf{u}}_k^{n+1} + \frac{m_k^{n}}{m_k^{n+1}} + +* Second by adding the force due to the need to break and compact the + entrained mass (:math:`\mathbf{F}_k^{\text{ent}}`) as described in :ref:`DFAnumerics:Entrainment force`. + + .. warning:: + ToDO + + .. math:: + \mathbf{F}_k^{\text{ent}} = -w_f\,(e_s+\,q_{k}^{\text{ent}}\,e_d)\mathbf{v}_1 + +Account for driving forces +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Adding the driving forces -gravity force and lateral forces- is done after adding +the artificial viscosity. +The velocity is updated as follows +(:math:`{\bar{\mathbf{u}}_k^{n+1}}^\bigstar` is the velocity after taking the +driving force into account): + +.. math:: + \begin{aligned} + {\bar{\mathbf{u}}_k^{n+1}}^\bigstar &= \left.\bar{\mathbf{u}}_k^{n+1}\right.^\blacktriangle + + \frac{\Delta t}{m_k}\mathbf{F}_{k}^{\text{drive}}\\ + &= \left.\bar{\mathbf{u}}_k^{n+1}\right.^\blacktriangle + + \frac{\Delta t}{m_k} \left(- m_k \, g^\text{eff}_k \, \boldsymbol{\nabla}_{s} h + + m_k \mathbf{g}_s - m_k \left( \left.\bar{\mathbf{u}}_k^{n+1}\right.^\blacktriangle \cdot \left . \frac{\mathrm{d}\mathbf{v}_{3,k}}{\mathrm{d}t}\right|^n \right)\mathbf{v}_{3,k}^n\right) + \end{aligned} + :label: eq-adding-driving-force + +Account for friction forces +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Both the bottom friction and resistance forces act against the flow. Two methods are available to add these forces in com1DFA. An implicit method: """""""""""""""""""" +If the velocity of the particle :math:`k` reads :math:`{\bar{\mathbf{u}}_k^{n+1}}^\bigstar` +after adding the driving forces, adding the friction force leads to: + .. math:: - \mathbf{u}_k^{new} = \frac{\mathbf{u}_k^{old}}{1 + \frac{C_{k}^{\text{fric}}\Delta t}{m_k}} + \bar{\mathbf{u}}_k^{n+1} = \frac{{\bar{\mathbf{u}}_k^{n+1}}^\bigstar}{1 + \frac{C_{k}^{\text{fric}}\Delta t}{m_k}} -where :math:`F_{k,i}^{\text{fric}} = C_{k}^{\text{fric}} u_{k,i}^{new} = F_{k,i}^{\text{res}} + F_{k,i}^{\text{bot}}` +where :math:`\mathbf{F}_k^{\text{fric}} = -C_{k}^{\text{fric}}{\bar{\mathbf{u}}_k^{n+1}}^\bigstar = \mathbf{F}_k^{\text{res}} + \mathbf{F}_k^{\text{bot}}` (the two forces are described in :ref:`DFAnumerics:Bottom friction force` and :ref:`DFAnumerics:Added resistance force`). This implicit method has a few draw-backs. First the flow does not start properly if the @@ -740,27 +933,134 @@ An explicit method: """""""""""""""""""" The method based on :cite:`MaVi2003` addresses these two issues. -The idea is that the friction forces only modify the magnitude of velocity and not the direction. This means dissipation, -so the friction force can not become a driving force. Moreover, the friction force magnitude depends on the particle state, -i.e. if it is flowing or at rest. -The friction force is expressed: +The idea is that the friction force acts against motion, hence it only affects the magnitude of the velocity +and can not be a driving force (:cite:`MaVi2003`). +Moreover, the friction force magnitude depends on the particle state, i.e. if it is +flowing or at rest. +If the velocity of the particle :math:`k` reads :math:`{\bar{\mathbf{u}}_k^{n+1}}^\bigstar` +after adding the driving forces, adding the friction force leads, depending on the +sign of :math:`\frac{m_k \left\Vert{\bar{\mathbf{u}}_k^{n+1}}^\bigstar\right\Vert}{\Delta t} - \left\Vert\mathbf{F}_{k}^{\text{fric}}\right\Vert_{max}` +(where :math:`\left\Vert\mathbf{F}_{k}^{\text{fric}}\right\Vert_{max}` +depends on the chosen friction law introduced in :ref:`theorycom1DFA:Friction Model`), to: + +* :math:`\left\Vert\mathbf{F}^{\text{fric}}\right\Vert = \left\Vert\mathbf{F}_{k}^{\text{fric}}\right\Vert_\text{max}` and + :math:`\bar{\mathbf{u}}_k^{n+1} = {\bar{\mathbf{u}}_k^{n+1}}^\bigstar \left(1 - \frac{\Delta t}{m_k} \frac{\left\Vert\mathbf{F}_{k}^{\text{fric}}\right\Vert_\text{max}}{\left\Vert{\bar{\mathbf{u}}_k^{n+1}}^\bigstar\right\Vert}\right)`, + if :math:`\frac{m_k \left\Vert{\bar{\mathbf{u}}_k^{n+1}}^\bigstar\right\Vert}{\Delta t} > + \left\Vert\mathbf{F}_{k}^{\text{fric}}\right\Vert_\text{max}` + +* :math:`\left\Vert\mathbf{F}_{k}^{\text{fric}}\right\Vert \leq \left\Vert\mathbf{F}_{k}^{\text{fric}}\right\Vert_\text{max}` and the particle stops moving + :math:`\bar{\mathbf{u}}_k^{n+1} = 0` before the end of the time step, if + :math:`\frac{m_k \left\Vert{\bar{\mathbf{u}}_k^{n+1}}^\bigstar\right\Vert}{\Delta t} \leq \left\Vert\mathbf{F}_{k}^{\text{fric}}\right\Vert_\text{max}` + +This method prevents the friction force to become a driving force and nonphysically +change the direction of the velocity. +This would lead to oscillations of the particles instead of stopping. +Adding the friction force following this approach (:cite:`MaVi2003`) allows the +particles to start and stop flowing properly. + + +Reprojection +-------------- -.. math:: - \mathbf{F}_k^{\text{fric}} = -\|\mathbf{F}_{k}^{\text{fric}}\| \frac{\mathbf{u}_k}{\|\mathbf{u}_k\|} -with: +The last term in :eq:`eq-momentum-particle` (accounting for the curvature effects) +adds a non tangential component allowing +the new velocity to lie in a different plane than the one from the previous time step. +This enables the particles to follow the topography. +But because the curvature term was only based on an estimation +(see :ref:`DFANumerics:Curvature acceleration term`), +it can happen that the new particle position is not necessarily on the topography +and the new velocity does not necessarily lie in the tangent plane at this new +position. +Furthermore, in case of a strong convex curvature and high velocities, the particles +can theoretically be in a free fall state (detachment) as mentioned in +:ref:`Pressure distribution, thickness integrated pressure and pressure gradient`. +**com1DFA** does not allow detachment of the particles and the particles are +forced to stay on the topography. This consists in a limitation +of the model/method which will lead to nonphysical behaviors in special cases +(material flowing over a cliff). +In both of the previously mentioned cases, the particles positions are projected back +onto the topography and the velocity direction is corrected to be tangential to the +topography. +The position reprojection is done using an iterative method that attempts to conserve +the distance traveled by each particle between :math:`t^n` and :math:`t^{n+1}``. +The velocity reprojection changes the direction of the velocity but its magnitude is +conserved. -.. math:: - \|\mathbf{F}_{k}^{\text{fric}}\| \leq \|\mathbf{F}_{k}^{\text{fric}}\|_{max} +Neighbor search +------------------ -If the velocity of the particle ``k`` reads :math:`\mathbf{u}_k^{old}` after adding the driving forces, adding the -fiction force leads to : +The lateral pressure forces are computed via the SPH flow thickness gradient. +This method is based on particle interactions within a certain neighborhood, meaning that it +is necessary to keep track of all the particles within the neighborhood of each particle. +Computing the gradient of the flow thickness at a particle location, requires to +find all the particles in its surrounding. Considering the number of particles and +their density, computing the gradient ends up in computing a lot of +interactions and represents the most computationally expensive part of the dense +flow avalanche simulation. It is therefore important that the neighbor search is fast and efficient. +:cite:`IhOrSoKoTe2014` describe different grid neighbor search +methods. In com1DFA, the simplest method is used. The idea is to locate each +particle in a cell, this way, it is possible to keep track of the particles +in each cell. To find the neighbors of a particle, one only needs to read the +cell in which the particle is located (dark blue cell in :numref:`neighborSearch`), +find the direct adjacent cells in all directions (light blue cells) and +simply read all particles within those cells. This is very easily achieved +on grids because locating a particle in a cell is straightforward and +finding the adjacent cells is also easily done. -.. math:: - \mathbf{u}_{k} = \mathbf{u}_k^{old} (1 - \frac{\Delta t}{m} \frac{\|\mathbf{F}_{k}^{\text{fric}}\|}{\|\mathbf{u}^{old}_k\|}), - \quad \|\mathbf{F}_{k}^{\text{fric}}\| = \|\mathbf{F}_{k}^{\text{fric}}\|_{max} +.. _neighborSearch: + +.. figure:: _static/neighborSearch.png + :width: 90% + + Support grid for neighbor search: + if the cell side is bigger than the kernel length :math:`r_{kernel}` (red circle in the picture), + the neighbors for any particle in any given cell (dark blue square) + can be found in the direct neighborhood of the cell itself (light blue squares) + +.. _partInCell: + +.. figure:: _static/partInCell.png + :width: 90% + + The particles are located in the cells using + two arrays. indPartInCell of size number of cells + 1 + which keeps track of the number of particles in each cell + and partInCell of size number of particles + 1 which lists + the particles contained in the cells. +Particle splitting and merging +------------------------------- +There are two different approaches treating splitting of particles in com1DFA. +The first one only deals with splitting of particles with too much mass('split only'). The second approach, +"split/merge" approach aims at keeping a stable amount of particles within a given range. This is done in order to +guaranty a sufficient accuracy of the sph flow thickness gradient computation. -at the condition that :math:`1 \geq \frac{\Delta t}{m} \frac{\|\mathbf{F}_{k}^{\text{fric}}\|_{max}}{\|\mathbf{u}^{old}_k\|}`. -If on the contrary :math:`1 \leq \frac{\Delta t}{m} \frac{\|\mathbf{F}_{k}^{\text{fric}}\|_{max}}{\|\mathbf{u}^{old}_k\|}`, -the friction would change the velocity direction which is nonphysical. In this case, the particle will stop -before the end of the time step. This allows the particles to start and stop flowing properly. +Split (**default**) +~~~~~~~~~~~~~~~~~~~~ +If the ``splitOption`` is set to 0, particles are split because of snow entrainment. In this case, +particles that entrain snow grow, i.e. their mass increases. At one point the mass of the particles is considered to be +too big and this particle is split in two. The splitting operation happens if the mass of the +particle exceeds a threshold value (:math:`mPart > massPerPart \times thresholdMassSplit`), where ``thresholdMassSplit`` +is specified in the configuration file and ``massPerPart`` depends on the chosen ``massPerParticleDeterminationMethod`` +as defined here: :ref:`com1DFAAlgorithm:Initialize particles`. +When a particle is split a new child particle is created with the same properties as the parent apart from +mass and position. Both parent and child get half of the parent mass. The parent and child's position are +adjusted: the first / second is placed forward / backward in the direction of the velocity +vector at a distance :math:`distSplitPart \times rPart` of the initial parent position. Particles are considered to +have a circular basal surface :math:`A = \frac{m}{\rho} = \pi r^2`. + +Split and merge +~~~~~~~~~~~~~~~ +If the ``splitOption`` is set to 1 particles are split or merged in order to keep the particle count +as constant as possible within the kernel radius. +Assessing the number of particles within one kernel radius is done based on the particle area. Particles +are assumed to be cylindrical, i.e the base is a circle. For particle ``k`` we have :math:`A_k = \frac{m_k}{\rho}`. The area +of the support domain of the sph kernel function is :math:`\pi r_0^2`. The aim is to keep ``nPPK`` particles within +the kernel radius. The particles are split if the estimated number of particles per kernel radius :math:`\frac{\pi r_0^2}{A_k}` +falls below a given value of :math:`n_{PPK}^{min} = C_{n_{PPK}}^{min}n_{PPK}`. Particles are split using the same +method as in :ref:`DFAnumerics:Only split approach`. Similarly, particles are merged if the estimated +number of particles per kernel radius exceeds a given value :math:`n_{PPK}^{max} = C_{n_{PPK}}^{max}n_{PPK}`. +In this case particles are merged with their closest neighbor. The new position and velocity is the mass +averaged one. The new mass is the sum. Here, two coefficients ``C_{n_{PPK}}^{min}`` and ``C_{n_{PPK}}^{max}`` were +introduced. A good balance needs to be found for the coefficients so that the particles are not constantly split or +merged but also not too seldom. The split and merge steps happen only once per time step and per particle. diff --git a/docs/references_all.bib b/docs/references_all.bib index 28f6772c6..1e1cc12f7 100644 --- a/docs/references_all.bib +++ b/docs/references_all.bib @@ -1098,6 +1098,16 @@ @ARTICLE{BaTaWu2004 publisher = {Elsevier} } +@ARTICLE{BaBaGr2016, + title={A two-dimensional depth-averaged ${\it\mu}(I)$-rheology for dense granular avalanches}, + volume={787}, + DOI={10.1017/jfm.2015.684}, + journal={Journal of Fluid Mechanics}, + publisher={Cambridge University Press}, + author={Baker, J. L. and Barker, T. and Gray, J. M. N. T.}, + year={2016}, + pages={367–395}} + @ARTICLE{BaBiCaNaPa2005, author = {M. Barbolini and A. Biancardi and F. Cappabianca and L. Natale and M. Pagliardi}, @@ -6979,6 +6989,17 @@ @ARTICLE{GrCh2006 publisher = {Cambridge Univ Press} } +@article{GrEd2014, +author = {Gray, J. and Edwards, Andrew}, +year = {2014}, +month = {09}, +pages = {297-329}, +title = {A depth-averaged μ(I)-rheology for shallow granular free-surface flows}, +volume = {755}, +journal = {Journal of Fluid Mechanics}, +doi = {10.1017/jfm.2014.450} +} + @ARTICLE{GrTh2005, author = {Gray, J. and Thornton, AR and Gray, J. and Thornton, AR}, title = {A theory for particle size segregation in shallow granular free-surface @@ -7546,6 +7567,45 @@ @ARTICLE{HaAlNySeLa2007 publisher = {Elsevier} } +@article{HaHy1983, +title = {Self adjusting grid methods for one-dimensional hyperbolic conservation laws}, +journal = {Journal of Computational Physics}, +volume = {50}, +number = {2}, +pages = {235-269}, +year = {1983}, +issn = {0021-9991}, +doi = {https://doi.org/10.1016/0021-9991(83)90066-9}, +url = {https://www.sciencedirect.com/science/article/pii/0021999183900669}, +author = {Ami Harten and James M Hyman}, +abstract = {It is shown how to automatically adjust the grid to follow the dynamics of the +numerical solution of hyperbolic conservation laws. The grid motion is determined by averaging +the local characteristic velocities of the equations with respect to the amplitudes of the +signals. The resulting algorithm is a simple extension of many currently popular Godunov-type +methods. Computer codes using one of these methods can be easily modified to add the moving +mesh as an option. Numerical examples are given that illustrate the improved accuracy of +Godunov's and Roe's methods on a self-adjusting mesh.} +} + +@article{HaLaLe1983, +author = {Harten, Amiram and Lax, Peter D. and Leer, Bram van}, +title = {On Upstream Differencing and Godunov-Type Schemes for Hyperbolic Conservation Laws}, +journal = {SIAM Review}, +volume = {25}, +number = {1}, +pages = {35-61}, +year = {1983}, +doi = {10.1137/1025002}, +URL = {https://doi.org/10.1137/1025002}, +eprint = {https://doi.org/10.1137/1025002}, +abstract = {This paper reviews some of the recent developments in upstream difference +schemes through a unified representation, in order to enable comparison between the +various schemes. Special attention is given to the Godunov-type schemes that result +from using an approximate solution of the Riemann problem. For schemes based on flux +splitting, the approximate Riemann solution can be interpreted as a solution of the +collisionless Boltzmann equation. } +} + @ARTICLE{HaSeNyMiBrLiFoBeMa2004, author = {Haflidason, H. and Sejrup, H.P. and Nyg{\aa}rd, A. and Mienert, J. and Bryn, P. and Lien, R. and Forsberg, C.F. and Berg, K. and Masson, @@ -11530,6 +11590,13 @@ @ARTICLE{LCBiFoEtFa2002 publisher = {IFP} } +@inproceedings{Le1990, + title={Numerical methods for conservation laws}, + author={Randall J. LeVeque}, + year={1990}, + doi = {http://dx.doi.org/10.1007/978-3-0348-8629-1}, +} + @ARTICLE{LFBoArRo2009, author = {Le Friant, A. and Boudon, G. and Arnulf, A. and Robertson, R.E.A.}, title = {Debris avalanche deposits offshore St. Vincent (West Indies): Impact @@ -13731,6 +13798,17 @@ @ARTICLE{MiNo2006 publisher = {Taylor \& Francis} } +@article{MoVi2000, +author = {Moussa, B. and Vila, Jean Paul}, +year = {2000}, +month = {03}, +pages = {}, +title = {Convergence of SPH Method for Scalar Nonlinear Conservation Laws}, +volume = {37}, +journal = {Siam Journal on Numerical Analysis - SIAM J NUMER ANAL}, +doi = {10.1137/S0036142996307119} +} + @ARTICLE{MoWiBaDo2003, author = {Moe, A. and Wieshofer, S. and Bakkeh{\o}i, S. and Domaas, U.}, title = {Avalanche run-out on counter-slopes}, diff --git a/docs/theoryCom1DFA.rst b/docs/theoryCom1DFA.rst index f73a0a4f7..de8331445 100644 --- a/docs/theoryCom1DFA.rst +++ b/docs/theoryCom1DFA.rst @@ -1,201 +1,262 @@ com1DFA DFA-Kernel theory ============================ -.. warning:: - - This theory has not been fully reviewed yet. Read its content with a critical mind. - Governing Equations for the Dense Flow Avalanche ------------------------------------------------------ -The governing equations of the dense flow avalanche are derived from the -incompressible mass and momentum balance on a Lagrange control volume (:cite:`Zw2000,ZwKlSa2003`). +The derivation is based on the thickness integration of the three-dimensional Navier-Stokes equations, +using a Lagrangian approach with a terrain following coordinate system (:cite:`Zw2000,ZwKlSa2003`). +The equations are simplified using the assumption of shallow flow on a mildly curved topography, +meaning flow thickness is considerably smaller than the length and width of the avalanche and +it is considerably smaller than the topography curvature radius. + +We consider snow as the material, however the choice of material does not influence the derivation in the first place. +We assume constant density :math:`\rho_0` and a flow on a surface :math:`\mathcal{S}`, subjected to the gravity force +and friction on the surface :math:`\mathcal{S}`. +If needed, additional processes such as entrainment or other external effects can be taken into account. +The mass conservation equation applied to a Lagrangian volume of material :math:`V(t)` reads: -Mass balance: +Mass balance ~~~~~~~~~~~~~~~ .. math:: - \frac{d}{dt} \int\limits_{V(t)} \rho_0 \,\mathrm{d}V = \rho_0 \frac{dV(t)}{dt} = - \oint\limits_{\partial V(t)} q^{\text{ent}} \,\mathrm{d}A - :label: mass-balance1 + \frac{\mathrm{d}}{\mathrm{d}t}\underbrace{\int\limits_{V(t)} \rho_0 \,\mathrm{d}V}_{m(t)} = \rho_0 \frac{\mathrm{d}V(t)}{\mathrm{d}t} = + \oint\limits_{\partial V(t)} q^{\text{ent}} \,\mathrm{d}A, + :label: eq-mass-balance1 -Where :math:`q^{\text{ent}}` represents the snow entrainment rate. +where the source term :math:`q^{\text{ent}}` represents the snow entrainment rate (incoming mass flux). -Momentum balance: +Momentum balance ~~~~~~~~~~~~~~~~~~~ .. math:: - \rho_0 \frac{d}{dt} \int\limits_{V(t)} u_i \,\mathrm{d}V = \oint\limits_{\partial V(t)} - \sigma^{\text{tot}}_{ij}n_j \,\mathrm{d}A + \rho_0 \int\limits_{V(t)} g_i \,\mathrm{d}V, \quad i=(1,2,3) - :label: momentum-balance1 + \frac{\mathrm{d}}{\mathrm{d}t}\int\limits_{V(t)} \rho_0 \, \mathbf{u}\,\mathrm{d}V = \underbrace{\oint\limits_{\partial V(t)} + \boldsymbol{\sigma}\cdot\mathbf{n}\,\mathrm{d}A}_{\text{surface forces}} + + \underbrace{\int\limits_{V(t)} \rho_0 \, \mathbf{g}\,\mathrm{d}V}_{\text{body force}} + + \, \mathbf{F}^\text{ent} + \mathbf{F}^\text{res}, + :label: eq-momentum-balance1 -We introduce the volume average of a quantity :math:`P(\mathbf{x},t)`: +where :math:`\mathbf{u}` is the fluid velocity and :math:`\mathbf{g}` the gravity acceleration. +:math:`\boldsymbol{\sigma} = -pI+\boldsymbol{\mathrm{T}}` represents the +stress tensor, where :math:`I` is the identity tensor, :math:`p` the pressure +and :math:`\boldsymbol{\mathrm{T}}` the deviatoric part of the stress tensor. +:math:`\mathbf{n}` is the normal vector to math:`\partial V(t)`. :math:`\mathbf{F}^{\text{ent}}` represents the force required to break the +entrained snow from the ground and to compress it (since the dense-flow +bulk density is usually larger than the density of the entrained snow, +i.e. :math:`\rho_{\text{ent}}<\rho`) and :math:`\mathbf{F}^{\text{res}}` +represents the resistance force due to obstacles (for example trees). -.. math:: - \overline{P}(\mathbf{x},t) = \frac{1}{V(t)} \int\limits_{V(t)} P(\mathbf{x},t) \,\mathrm{d}V -.. :label: volume-average +Hypothesis +~~~~~~~~~~~ -and split the area integral into : +* We consider in the following a shallow flow on moderately curved surfaces. This means + that the aspect ratio, :math:`\varepsilon = H / L`, of the characteristic length L + of the avalanche over its characteristic thickness H stays small. -.. math:: - \oint\limits_{\partial V(t)} \sigma^{\text{tot}}_{ij}n_j \,\mathrm{d}A = - \oint\limits_{\partial V(t)} \sigma_{ij}n_j \,\mathrm{d}A + F_i^{\text{ent}} + F_i^{\text{res}}, \quad i=(1,2,3) -.. :label: area-integral +.. _fig-characteristic_size: -:math:`F_i^{\text{ent}}` represents the force required to break the -entrained snow from the ground and to compress it (since the dense-flow -bulk density is usually larger than the density of the entrained snow, -i.e. :math:`\rho_{\text{ent}}<\rho`) and :math:`F_i^{\text{res}}` -represents the resistance force due to obstacles (for example trees). -This leads to in :eq:`momentum-balance1`: +.. figure:: _static/characteristic_size.png + :width: 90% -.. math:: - \rho_0 \frac{dV(t) \overline{u}_i}{dt} = \rho_0 V \frac{d\overline{u}_i}{dt} + - \rho_0 \overline{u}_i \frac{dV}{dt} = \oint\limits_{\partial V(t)} \sigma_{ij}n_j - \,\mathrm{d}A + \rho_0 V g_i + F_i^{\text{ent}} + F_i^{\text{res}}, \quad i=(1,2,3) -.. :label: momentum-balance2 + Characteristic size of the avalanche along its path (from :cite:`Zw2000`, modified) -Using the mass balance equation :eq:`mass-balance1`, we get: +* A control volume :math:`V(t)` is assumed to be a small prism shape extending from the bottom surface :math:`\mathcal{S}_b` (lying on the topography + :math:`\mathcal{S}`) up to the free surface in the surface normal direction :math:`\mathbf{N^b}` as + illustrated in :numref:`small-lagrange`. + Note that the bottom surface :math:`\mathcal{S}_b` of area :math:`A^b` has no predefined shape. + The octagonal shape used in :numref:`small-lagrange` is just one possible option. -.. math:: - \rho_0 V \frac{d\overline{u}_i}{dt} = \oint\limits_{\partial V(t)} \sigma_{ij}n_j \,\mathrm{d}A - + \rho_0 V g_i + F_i^{\text{ent}} + F_i^{\text{res}} - \overline{u}_i \oint\limits_{\partial V(t)} q^{\text{ent}} \,\mathrm{d}A, \quad i=(1,2,3) - :label: momentum-balance3 -Boundary conditions: -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +.. _small-lagrange: -The free surface is defined by : +.. figure:: _static/smallLagrange.png + :width: 90% - .. math:: F_s(\mathbf{x},t) = z-s(x,y,t)=0 + Small Lagrangian prism-like Control volume -The bottom surface is defined by : +Choice of the coordinate system and thickness averaged quantities +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - .. math:: F_b(\mathbf{x}) = z-b(x,y)=0 +The normal vector :math:`\mathbf{N^b}` to the bottom surface is pointing upwards +whereas :math:`\mathbf{n^b}=-\mathbf{N^b}` is the bottom normal vector to the Lagrangian +control volume (pointing out of the volume). -The boundary conditions at the free surface and bottom of the flow read: +.. math:: + V(t) = \int\limits_{V(t)}\,\mathrm{d}V = \int\limits_{\mathcal{S}_b} + \left(\int\limits_b^s\det(\mathbf{J})\,\mathrm{d}x_3\right)\,\mathrm{d}A + :label: eq-lagrange-volume + +where :math:`\mathbf{J}` is the transformation matrix from the Cartesian +coordinate system to the Natural coordinate system (NCS). +The NCS is an orthonormal +coordinate system :math:`(\mathbf{v_1},\mathbf{v_2},\mathbf{v_3})` aligned +with the bottom surface. :math:`\mathbf{v_3}=\mathbf{N^b}=-\mathbf{n^b}` is the normal +vector to the bottom surface pointing upwards. +:math:`\mathbf{v_1}` is pointing in the direction of the thickness integrated fluid velocity +:math:`\overline {\mathbf{u}}` (introduced below). .. math:: - \left\{\begin{aligned} - &\frac{dF_s}{dt} = \frac{\partial F_s}{\partial t} + u_i\frac{\partial F_s}{\partial x_i} =0 \quad & \mbox{at }F_s(\mathbf{x},t) =0 \quad & \mbox{Kinematic BC (Material boundary)}\\ - &\sigma_{ij}n_j = 0 \quad & \mbox{at }F_s(\mathbf{x},t) =0 \quad & \mbox{Dynamic BC (Traction free surface)}\\ - &u_in_i = 0 \quad & \mbox{at }F_b(\mathbf{x},t) =0 \quad & \mbox{Kinematic BC (No detachment)}\\ - &\tau^{(b)}_i = f(\sigma^{(b)},\overline{u},\overline{h},\rho_0,t,\mathbf{x})\quad & \mbox{at }F_b(\mathbf{x},t) =0\quad & \mbox{Dynamic BC (Chosen friction law)} - \end{aligned} - \right. - :label: boundary-conditions + \mathbf{v_1} = \frac{\bar{\mathbf{u}}} + {\left\Vert\bar{\mathbf{u}}\right\Vert},\quad \mathbf{v_2} = + \frac{\mathbf{v_3}\wedge\mathbf{v_1}}{\left\Vert + \mathbf{v_3}\wedge\mathbf{v_1}\right\Vert}, + \quad \mathbf{v_3} = \mathbf{N^b} + :label: eq-natural-coordinate-system -:math:`\sigma^{(b)}_i = (\sigma_{kl}n_ln_k)n_i` represents the normal stress at the bottom and -:math:`\tau^{(b)}_i = \sigma_{ij}n_j - \sigma^{(b)}_i` represents the shear stress at the bottom surface. -:math:`f` describes the chosen friction model and are described in :ref:`theoryCom1DFA:Friction Model`. -The normals at the free surface (:math:`n_i^{(s)}`) and bottom surface (:math:`n_i^{(b)}`) are: +In the case of shallow flow on moderately curved surfaces, :math:`\det(\mathbf{J}) = (1 - +\kappa_1 x_3)(1 - \kappa_2 x_3) \approx1`. :math:`\kappa_{\{1,2\}}` represent +the surface curvature in :math:`\mathbf{v}_{\{1,2\}}` directions and :math:`x_3` +is the elevation from the bottom surface in the direction :math:`\mathbf{N^b}`. +This approximation is valid if the curvature radius is much larger then the +flow thickness :math:`h`. In this case, the control volume reads: .. math:: - n_i^{(s,b)} = \frac{\partial F_{s,b}}{\partial x_i}\left(\frac{\partial F_{s,b}}{\partial x_j} - \frac{\partial F_{s,b}}{\partial x_j}\right)^{-1/2} -.. :label: surface-normals + V(t) \approx \int\limits_{\mathcal{S}_b}\!\! + \underbrace{\int\limits_b^s\,\mathrm{d}x_3}_{h(t)}\,\mathrm{d}A + :label: eq-lagrange-volume2 -Choice of the coordinate system: -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -The previous equations will be developed in the orthonormal coordinate -system :math:`(B,\mathbf{v_1},\mathbf{v_2},\mathbf{v_3})`, further -referenced as Natural Coordinate System (NCS). In this NCS, -:math:`\mathbf{v_1}` is aligned with the velocity vector at the bottom -and :math:`\mathbf{v_3}` with the normal to the slope, i.e.: +We introduce the following average of a quantity (where :math:`f` is a scalar or vector function): .. math:: - \mathbf{v_1} = \frac{\mathbf{u}}{\left\Vert \mathbf{u}\right\Vert},\quad \mathbf{v_2} = \mathbf{v_3}\wedge\mathbf{v_1}, - \quad \mathbf{v_3} = \mathbf{n^{(b)}} -.. :label: natural-coordinate-system + \begin{aligned} + \widetilde{f} &= \frac{1}{V(t)}\int\limits_{V(t)} f\,\mathrm{d}V\\ + \widehat{f} &= \frac{1}{A^b(t)}\int\limits_{\mathcal{S}_b} f\,\mathrm{d}A\\ + \bar{f} &= \frac{1}{h(t)}\int\limits_{0}^{h(t)} f\,\mathrm{d}x_3 + \end{aligned} + \quad\quad \text{and} \quad \quad + \begin{aligned} + \widetilde {f}(x_3) & + \approx \frac{1}{A^b(\widehat{h}-x_3)}\int\limits_{\mathcal{S}_b} + \left(\int\limits_{x_3}^{h(t)} f\,\mathrm{d}x_3\right)\,\mathrm{d}A \\ + \bar{f}(x_3) &= \frac{1}{(h-x_3)}\int\limits_{x_3}^{h(t)} f\,\mathrm{d}x_3. + \end{aligned} + +Note that :math:`\widetilde {f}(0)=\widetilde {f}` and :math:`\bar{f}(0)=\bar{f}`. +When the control volume goes to 0, i.e. basal area goes to a point, +:math:`\widetilde {f}\xrightarrow{A^b\xrightarrow{}0}\bar{f}` +and :math:`{\widehat{f}\xrightarrow{A^b\xrightarrow{}0}f}`. -The origin :math:`B` of the NCS is attached to the slope. This choice -leads to: +The NCS has some interesting properties that will be useful for projecting and solving the equations. +Because of the orthogonality of this NCS, we have +:math:`\mathbf{v}_i\cdot\mathbf{v}_j = \delta_{ij},\, \{i,j\}\in \{1,2,3\}^2` +which gives after time derivation: .. math:: - n^{(b)}_i = \delta_{i3}, \quad \left.\frac{\partial b}{\partial x_i}\right\rvert_{\mathbf{0}} = 0\quad - \mbox{for} \quad i=(1,2),\quad \mbox{and} \quad u^{(b)}_2 = u^{(b)}_3 = 0 -.. :label: NCS-consequence + \frac{\mathrm{d}\mathbf{v}_i\cdot\mathbf{v}_j}{\mathrm{d}t} = + \mathbf{v}_i\cdot\frac{\mathrm{d}\mathbf{v}_j}{\mathrm{d}t} + + \frac{\mathrm{d}\mathbf{v}_i}{\mathrm{d}t}\cdot\mathbf{v}_j = 0, + :label: eq-natural-coordinate-system-identity-base -Thickness averaged equations: -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -In this NCS and considering a prism-like Control volume, the volume -content :math:`V(t) = A_b(t)\overline{h}` is obtained by multiplication -of the basal area of the prism, :math:`A_b`, with the averaged value of -the flow thickness, +meaning that: .. math:: - \overline{h} = \frac{1}{A_b(t)}\int\limits_{A_b(t)} [s(\mathbf{x})-b(\mathbf{x})]\,\mathrm{d}A = \frac{1}{A_b(t)}\int\limits_{A_b(t)} h(\mathbf{x})\,\mathrm{d}A,\qquad - \overline{u}_i = \frac{1}{V(t)}\int\limits_{V(t)} u_i(\mathbf{x})\,\mathrm{d}V - :label: hmean-umean + \left\{ + \begin{aligned} + \frac{\mathrm{d}\mathbf{v}_i}{\mathrm{d}t}\cdot\mathbf{v}_i &= 0 + \implies \frac{\mathrm{d}\mathbf{v}_i}{\mathrm{d}t} \perp \mathbf{v}_i\\ + \frac{\mathrm{d}\mathbf{v}_i}{\mathrm{d}t}\cdot\mathbf{v}_j &= + -\mathbf{v}_i\cdot\frac{\mathrm{d}\mathbf{v}_j}{\mathrm{d}t},\quad i \neq j. + \end{aligned} + \right. + :label: eq-natural-coordinate-system-identity-1 +It is possible to express :math:`\frac{\mathrm{d}\mathbf{v}_1}{\mathrm{d}t}` in terms +of :math:`(\mathbf{v}_1, \mathbf{v}_2, \mathbf{v}_3)` and using orthogonality +of :math:`\frac{\mathrm{d}\mathbf{v}_i}{\mathrm{d}t}` and :math:`\mathbf{v}_i`: -.. _small-lagrange: +.. math:: + \frac{\mathrm{d}\mathbf{v}_1}{\mathrm{d}t} = \alpha_i\mathbf{v}_i + = \cancelto{\mathbf{0}}{\alpha_1\mathbf{v}_1} + \alpha_2\mathbf{v}_2 + \alpha_3\mathbf{v}_3, \quad \alpha_i = + \frac{\mathrm{d}\mathbf{v}_1}{\mathrm{d}t}\cdot\mathbf{v}_i + :label: eq-v1-in-ncs -.. figure:: _static/smallLagrange.png - :width: 90% +The derivative of the thickness integrated velocity decomposes to: + +.. math:: + \frac{\mathrm{d}\bar{\mathbf{u}}}{\mathrm{d}t} = + \frac{\mathrm{d}\bar{u}_1\mathbf{v}_1}{\mathrm{d}t} = + \bar{u}_1\frac{\mathrm{d}\mathbf{v}_1}{\mathrm{d}t} + + \frac{\mathrm{d}\bar{u}_1}{\mathrm{d}t}\mathbf{v}_1 = + \bar{u}_1(\alpha_2\mathbf{v}_2 + \alpha_3\mathbf{v}_3) + + \frac{\mathrm{d}\bar{u}_1}{\mathrm{d}t}\mathbf{v}_1 + :label: eq-du-in-ncs + + +Boundary conditions +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To complete the conservation :eq:`eq-mass-balance1` and :eq:`eq-momentum-balance1` the following boundary conditions at the bottom (:math:`\mathcal{S}_b`) and free (:math:`\mathcal{S}_{fs}`) surfaces are introduced. +:math:`\boldsymbol{\sigma^s} = \boldsymbol{\sigma}\cdot\mathbf{n^s}`, respectively :math:`\boldsymbol{\sigma^b} = \boldsymbol{\sigma}\cdot\mathbf{n^b}`, represent the restriction of :math:`\boldsymbol{\sigma}` +to the free surface :math:`\mathcal{S}_{fs}`, respectively the bottom surface :math:`\mathcal{S}_b`: + +- traction free free-surface: :math:`\boldsymbol{\sigma_s}\cdot\mathbf{n_s} = \mathbf{0}` on :math:`\mathcal{S}_{fs}` + +- impenetrable bottom surface without detachment, :math:`\mathbf{u^b}\cdot\mathbf{n^b} = \mathbf{0}` on :math:`\mathcal{S}_{b}` + +- bottom friction law: :math:`\boldsymbol{\tau^b} = \boldsymbol{\sigma^b}\cdot\mathbf{n^b}-((\boldsymbol{\sigma^b}\cdot\mathbf{n^b})\cdot\mathbf{n^b})\mathbf{n^b}=\mathbf{f}(\boldsymbol{\sigma^b},\,\bar{\mathbf{u}},\,h,\,\rho_0,\,t,\,\mathbf{x}) = -f(\boldsymbol{\sigma^b},\,\bar{\mathbf{u}},\,h,\,\rho_0,t,\,\mathbf{x})\mathbf{v}_1` on :math:`\mathcal{S}_{b}` - Small Lagrangian prism-like Control volume -Entrainment: -""""""""""""" +Entrainment +~~~~~~~~~~~~ The snow entrainment is either due to plowing at the front of the avalanche or to erosion at the bottom. The entrainment rate at the front :math:`q^{\text{plo}}` can be expressed as a function of the properties of the entrained snow (density :math:`\rho_{\text{ent}}` and snow thickness :math:`h_{\text{ent}}`), the velocity of the avalanche at the -front :math:`\overline{\mathbf{u}}` and length :math:`w_f` of the front (measured perpendicularly -to the flow velocity :math:`\overline{\mathbf{u}}`). It obviously only happens on the front of +front :math:`\bar{\mathbf{u}}` and length :math:`w_f` of the front (measured perpendicularly +to the flow velocity :math:`\bar{\mathbf{u}}`). It obviously only happens on the front of the avalanche: .. math:: \oint\limits_{\partial V(t)} q^{\text{plo}}\,\mathrm{d}A = \int\limits_{l_{\text{front}}}\int_b^s q^{\text{plo}}\, - \mathrm{d}{l}\,\mathrm{d}{z} = \rho_{\text{ent}}\,w_f\,h_{\text{ent}}\,\left\Vert \overline{\mathbf{u}}\right\Vert + \mathrm{d}{l}\,\mathrm{d}{z} = \rho_{\text{ent}}\,w_f\,h_{\text{ent}}\,\left\Vert \bar{\mathbf{u}}\right\Vert :label: ploughing The entrainment rate at the bottom :math:`q^{\text{ero}}` can be expressed as a function of the -bottom area :math:`A_b` of the control volume, the velocity of the avalanche :math:`\overline{\mathbf{u}}`, -the bottom shear stress :math:`\tau^{(b)}` and the specific erosion energy :math:`e_b`: +bottom area :math:`A_b` of the control volume, the velocity of the avalanche :math:`\bar{\mathbf{u}}`, +the bottom shear stress :math:`\boldsymbol{\tau^b}` and the specific erosion energy :math:`e_b`: .. math:: \oint\limits_{\partial V(t)} q^{\text{ero}}\,\mathrm{d}A = \int\limits_{A_b} q^{\text{ero}}\, - \mathrm{d}A = A_b\,\frac{\tau^{(b)}}{e_b}\,\left\Vert \overline{\mathbf{u}}\right\Vert + \mathrm{d}A = A_b\,\frac{\boldsymbol{\tau^b}\cdot\mathbf{v}_1}{e_b}\,\left\Vert \bar{\mathbf{u}}\right\Vert :label: erosion This leads in the mass balance :eq:`mass-balance1` to : .. math:: - \frac{\mathrm{d}V(t)}{\mathrm{d}t} = \frac{\mathrm{d}(A_b\overline{h})}{\mathrm{d}t} - = \frac{\rho_{\text{ent}}}{\rho_0}\,w_f\,h_{\text{ent}}\,\left\Vert \overline{\mathbf{u}}\right\Vert + - \frac{A_b}{\rho_0}\,\frac{\tau^{(b)}}{e_b}\,\left\Vert \overline{\mathbf{u}}\right\Vert + \frac{\mathrm{d}V(t)}{\mathrm{d}t} = \frac{\mathrm{d}(A_bh)}{\mathrm{d}t} + = \frac{\rho_{\text{ent}}}{\rho_0}\,w_f\,h_{\text{ent}}\,\left\Vert \bar{\mathbf{u}}\right\Vert + + \frac{A_b}{\rho_0}\,\frac{\boldsymbol{\tau^b}\cdot\mathbf{v}_1}{e_b}\,\left\Vert \bar{\mathbf{u}}\right\Vert :label: mass-balance2 -The force :math:`F_i^{\text{ent}}` required to break the entrained snow +The force :math:`\mathbf{F}^{\text{ent}}` required to break the entrained snow from the ground and to compress it is expressed as a function of the required breaking energy per fracture surface unit :math:`e_s` (:math:`J.m^{-2}`), the deformation energy per entrained mass element :math:`e_d` (:math:`J.kg^{-1}`) and the entrained snow thickness (:cite:`Sa2007,SaFeFr2008,FiFrGaSo2013`): -.. math:: F_i^{\text{ent}} = -w_f\,(e_s+\,q^{\text{ent}}\,e_d) +.. math:: + \mathbf{F}^{\text{ent}} = -w_f\,(e_s+\,q^{\text{ent}}\,e_d)\mathbf{v}_1 + :label: force-entrainment -Resistance: -""""""""""""" +Resistance +~~~~~~~~~~~ -The force :math:`F_i^{\text{res}}` due to obstacles is expressed -as a function of the characteristic diameter :math:`\overline{d}` and height +The force :math:`\mathbf{F}^{\text{res}}` due to obstacles is expressed +as a function of the characteristic diameter :math:`\bar{d}` and height :math:`h_{\text{res}}` of the obstacles, the spacing :math:`s_{\text{res}}` between the obstacles and an empirical coefficient :math:`c_w` (see :numref:`f-res`). The effective height :math:`h^{\text{eff}}` -is defined as :math:`\min(\overline{h}, h_{res} )`: +is defined as :math:`\min(h, h_{res} )`: .. math:: - F_i^{\text{res}} = -(\frac{1}{2}\,\overline{d}\,c_w/s^2_{\text{res}})\,\rho_0\,A\, - h^{\text{eff}}\,\overline{u}^2\, - \frac{\overline{u}_i}{\|\overline{u}\|} + \mathbf{F}^{\text{res}} = -(\frac{1}{2}\,\bar{d}\,c_w/s^2_{\text{res}})\,\rho_0\,A\, + h^{\text{eff}}\,\bar{u}^2\,\mathbf{v}_1 .. _f-res: @@ -206,217 +267,313 @@ is defined as :math:`\min(\overline{h}, h_{res} )`: Resistance force due to obstacles (from :cite:`FiKo2013`) +Constitutive relation: friction force +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Surface integral forces: -""""""""""""""""""""""""" - -The surface integral is split in three terms, an integral over -:math:`A_b` the bottom :math:`x_3 = b(x_1,x_2)`, :math:`A_s` the top -:math:`x_3 = s(x_1,x_2,t)` and :math:`A_h` the lateral surface. -Introducing the boundary conditions :eq:`boundary-conditions` leads to: +Up to now in the derivations, the bottom shear stress :math:`\boldsymbol{\tau^b}` is unknown. +To close the momentum equation, a constitutive equation describing the basal shear stress tensor +:math:`\boldsymbol{\tau^b}` as a function of the avalanche flow state is required: .. math:: - \begin{aligned} - \oint\limits_{\partial{V(t)}}\sigma_{ij}n_j\,\mathrm{d}A & = - \int\limits_{A_b}\underbrace{\sigma_{ij}\,n_j^{(b)}}_{-\sigma_{i3}}\,\mathrm{d}A + \int\limits_{A_s}\underbrace{\sigma_{ij}\,n_j^{(s)}}_{0}\,\mathrm{d}A + \int\limits_{A_h}\sigma_{ij}\,n_j\,\mathrm{d}A\\ - &= -A_b\overline{\sigma}_{i3}^{(b)} + \oint\limits_{\partial A_b}\left(\int_b^s\sigma_{ij}\,n_j\,\mathrm{d}x_3\right)\,\mathrm{d}l - \end{aligned} -.. :label: surface forces + \boldsymbol{\tau^b} = + \mathbf{f}(\boldsymbol{\sigma^b},\bar{\mathbf{u}},h,\rho_0,t,\mathbf{x}) + :label: eq-bottom-frict-law -Which simplifies the momentum balance :eq:`momentum-balance3` to: +.. % where $\boldsymbol{\sigma^b}$ represents the normal component of the stress tensor at the bottom, +.. % $\bar{\mathbf{u}}$ the thickness average velocity, $h$ the flow thickness $\rho_0$ the density of the material, +.. % $t$ and $\mathbf{x}$ the time and position vector. -.. math:: - \begin{aligned} - \rho_0 V \frac{d\overline{u}_i}{dt} = & \oint\limits_{\partial A_b}\left(\int_b^s\sigma_{ij}\,n_j\, - \mathrm{d}x_3\right)\,\mathrm{d}l -A_b\overline{\sigma}_{i3}^{(b)} + \rho_0 V g_i + F_i^{\text{ent}} + - F_i^{\text{res}} - \overline{u}_i \oint\limits_{\partial V(t)} q^{\text{ent}} \,\mathrm{d}A,\\ - &\quad i=(1,2,3) - \end{aligned} - :label: momentum-balance5 - -The momentum balance in direction :math:`x_3` (normal to the slope) is -used to obtain a relation for the vertical distribution of the stress -tensor (:cite:`Sa2007`). Due to the choice of -coordinate system and because of the kinematic boundary condition at the -bottom, the left side of :eq:`momentum-balance5` can be -expressed as a function of the velocity :math:`\overline{u}_1` in direction -:math:`x_1` and the curvature of the terrain in this same direction -:math:`\frac{\partial^2{b}}{\partial{x_1^2}}` (:cite:`Zw2000`): - -.. math:: - \rho\,A_b\,\overline{h}\,\frac{\,\mathrm{d}\overline{u}_3}{\,\mathrm{d}t} = - \rho\,A_b\,\overline{h}\,\frac{\partial^2{b}}{\partial{x_1^2}}\,\overline{u}_1^2, +.. In the following, we employ a Mohr-Coulomb friction model that describes the friction interaction between two solids. +.. +.. The bottom shear stress reads: +.. +.. .. math:: +.. \boldsymbol{\tau^b} = +.. -\tan{\delta}\,\boldsymbol{\sigma^b}\cdot\mathbf{n^b}\, \frac{\mathbf{\bar{u}}}{\Vert\mathbf{\bar{u}}\Vert}, +.. +.. +.. where :math:`\delta` is the friction angle and :math:`\mu=\tan{\delta}` is the friction coefficient. +.. The bottom shear stress linearly increases with the normal stress component :math:`p^b` \citep{BaSaGr1999}. + +Different friction models accounting for the influence of flow velocity, flow thickness, etc. have been proposed +(e.g. the Voellmy model :cite:`Vo1955`). +Changing the friction model means changing the :math:`\mathbf{f}` function (:eq:`eq-bottom-frict-law`). +In the **com1DFA** module, three friction models are available. +First a Coulomb one which is used in this paper. +Second a Voellmy friction model (:cite:`Vo1955`) and third the samosAT friction model which is the one used for +hazard mapping by Austrian federal agencies (:cite:`Sa2007)`. + +With Mohr-Coulomb friction an avalanche starts to flow if the slope inclination exceeds the friction angle +:math:`\delta`. +In the case of an infinite slope of constant inclination, the avalanche velocity would increase indefinitely. +Using this friction law, flow velocity is overestimated and hence is not suited to model the flow of snow avalanches. +However, because of its relative simplicity, the Mohr-Coulomb friction model is convenient for deriving +analytical solutions and testing numerical implementations. + + +Expression of surface forces in the NCS +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Taking advantage of the NCS and using the boundary conditions, it is possible to split the surface forces into bottom +(on :math:`\mathcal{S}_b`), lateral (on :math:`\mathcal{S}_\text{lat}`) and free surface forces (on :math:`\mathcal{S}_{fs}`) +and perform further simplifications: + +.. math:: + \begin{aligned} + \oint\limits_{\partial{V(t)}} \boldsymbol{\sigma}\cdot\mathbf{n}\,\mathrm{d}A + &= \int\limits_{\mathcal{S}_b} \boldsymbol{\sigma^b}\cdot\mathbf{n^b}\,\mathrm{d}A + + \cancelto{\mathbf{0}}{\int\limits_{\mathcal{S}_{fs}} + \boldsymbol{\sigma_s}\cdot\mathbf{n_s}\,\mathrm{d}A} + + \int\limits_{\mathcal{S}_{lat}} \boldsymbol{\sigma}\cdot\mathbf{n}\,\mathrm{d}A\\ + &= \underbrace{\int\limits_{\mathcal{S}_b} + \boldsymbol{\sigma^b}\cdot\mathbf{n^b}\,\mathrm{d}A}_{\text{bottom force}} + + \underbrace{\oint\limits_{\partial\mathcal{S}_b} + \left(\int\limits_0^h\boldsymbol{\sigma}\cdot\mathbf{n}\, + \mathrm{d}x_3\right)\,\mathrm{d}l}_{\text{lateral force}} + \end{aligned}. + :label: surface forces + +Using the notations introduced in :ref:`theorycom1DFA:Choice of the coordinate system and thickness averaged quantities` +and the decomposition of the stress tensor, +the bottom force can be expressed as a surface normal component and a surface tangential one: + +.. math:: + \begin{aligned} + \int\limits_{\mathcal{S}_b} \boldsymbol{\sigma^b}\cdot\mathbf{n^b}\,\mathrm{d}A + &= \int\limits_{\mathcal{S}_b} (-p^bI + + \boldsymbol{\mathrm{T}})\cdot\mathbf{n^b}\,\mathrm{d}A + = -\int\limits_{\mathcal{S}_b} p^b\mathbf{n^b}\,\mathrm{d}A + + \int\limits_{\mathcal{S}_b} \boldsymbol{\mathrm{T}} \cdot \mathbf{n^b}\,\mathrm{d}A\\ + &= -\int\limits_{\mathcal{S}_b} p^b\mathbf{n^b}\,\mathrm{d}A + + \int\limits_{\mathcal{S}_b} \boldsymbol{\tau^b}\,\mathrm{d}A + = -A^b\widehat{p^b\mathbf{n^b}}+A^b\widehat{\boldsymbol{\tau^b}} + \end{aligned}, + :label: eq-basal-surface-forces + +where :math:`\boldsymbol{\tau^b}` is the basal friction term (introduced in :ref:`theorycom1DFA:Boundary conditions`). +Applying Green's theorem, the lateral force reads: + +.. math:: + \begin{aligned} + \oint\limits_{\partial\mathcal{S}_b} \left(\int\limits_0^h + \boldsymbol{\sigma}\cdot\mathbf{n}\,\mathrm{d}x_3\right)\,\mathrm{d}l + &= \oint\limits_{\partial\mathcal{S}_b} \left(\int\limits_0^h + (-pI + \boldsymbol{\mathrm{T}})\,\mathrm{d}x_3\right)\cdot\mathbf{n} + \,\mathrm{d}l\\ + &= -\oint\limits_{\partial\mathcal{S}_b} + \left(\int\limits_0^hp\,\mathrm{d}x_3\right)\cdot\mathbf{n}\,\mathrm{d}l + + \oint\limits_{\partial\mathcal{S}_b} \left(\int\limits_0^h\boldsymbol{\mathrm{T}} + \,\mathrm{d}x_3\right)\cdot\mathbf{n}\,\mathrm{d}l\\ + & = -\oint\limits_{\partial\mathcal{S}_b} h\bar{p}\mathbf{n}\,\mathrm{d}l + + \oint\limits_{\partial\mathcal{S}_b} h\bar{\boldsymbol{\mathrm{T}}}\cdot\mathbf{n}\,\mathrm{d}l + = -\int\limits_{\mathcal{S}_b} \boldsymbol{\nabla} h\bar{p}\,\mathrm{d}A + + \int\limits_{\mathcal{S}_b} \boldsymbol{\nabla} h\bar{\boldsymbol{\mathrm{T}}}\,\mathrm{d}A\\ + & = -A^b\widehat{\boldsymbol{\nabla} h\bar{p}} + A^b \widehat{\boldsymbol{\nabla} h\bar{\boldsymbol{\mathrm{T}}}} + \end{aligned} + :label: eq-lateral-surface-forces + +Equations :eq:`eq-basal-surface-forces` and :eq:`eq-lateral-surface-forces` represent the +thickness integrated form of the surface forces and can now be used to write the thickness +integrated momentum equation. + +Thickness integrated mass conservation equation +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -rearranging the terms in the momentum equation leads to: +The mass balance :eq:`mass-balance2` +remains unchanged: .. math:: - \overline{\sigma}_{33}(x_3) = \rho_0\,(s-x_3)\left(g_3-\frac{\partial^2{b}}{\partial{x_1^2}}\,\overline{u}_1^2\right)+ \frac{1}{A_b} - \oint\limits_{\partial A_b}\left(\int_{x_3}^s\sigma_{3j}\,n_j\,\mathrm{d}x_3\right)\,\mathrm{d}l - :label: sigma33 + \frac{\mathrm{d}V(t)}{\mathrm{d}t} = \frac{\mathrm{d}\left(A_bh\right)}{\mathrm{d}t} + = \frac{\rho_{\text{ent}}}{\rho_0}\,w_f\,h_{\text{ent}}\,\left\Vert \bar{\mathbf{u}}\right\Vert + + \frac{A_b}{\rho_0}\,\frac{\boldsymbol{\tau^b}}{e_b}\,\left\Vert \bar{\mathbf{u}}\right\Vert + :label: mass-balance3 -Non-dimensional Equations -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. _fig-characteristic_size: -.. figure:: _static/characteristic_size.png - :width: 90% +Thickness integrated momentum equation +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - Characteristic size of the avalanche along its path (from :cite:`Zw2000`, modified) - -The previous equations :eq:`momentum-balance5` and :eq:`sigma33` can be further simplified by -introducing a scaling based on the characteristic values of the physical -quantities describing the avalanche. The characteristic length L, the -thickness H, the acceleration due to gravity g and the characteristic -radius of curvature of the terrain R are the chosen quantities. From -those values, it is possible to form two non dimensional parameters that -describe the flow: - -- Aspect ratio: :math:`\qquad\qquad\varepsilon = H / L\qquad` - -- Curvature: :math:`\qquad\lambda = L / R\qquad` - -The different properties involved are then expressed in terms of -characteristic quantities :math:`L`, :math:`H`, :math:`g`, :math:`\rho_0` and :math:`R` -(see :numref:`fig-characteristic_size`): +Using the definitions of average values given in :ref:`theorycom1DFA:Choice of the coordinate system and thickness averaged quantities` +and the decomposition of the surface forces given by :eq:`eq-basal-surface-forces` +and :eq:`eq-lateral-surface-forces` combined with the expression of the +entrainment force detailed in :eq:`force-entrainment`, the momentum equation reads: .. math:: - \begin{aligned} - x_i &= L\, x_i^*\\ - (dx_3,h,\overline{h}) &= H\,(dx_3^*,h^*,\overline{h}^*)\\ - A_b &= L^2\, A_b^*\\ - t &= \sqrt{L/\text{g}}\, t^*\\ - \overline{u_i} &= \sqrt{\text{g}L}\,\overline{u_i}^*\\ - \text{g}_i &= \text{g} \, \text{g}_i^*\\ - \frac{\partial^2{b}}{\partial{x_1}^2} &= \frac{1}{R}\,\frac{\partial^2{b^*}}{\partial{x_1}^{*2}}\end{aligned} + \rho_0 \frac{\mathrm{d}V(t) \widetilde {\mathbf{u}}}{\mathrm{d}t} = \rho_0 V + \frac{\mathrm{d}\widetilde {\mathbf{u}}}{\mathrm{d}t} + + \rho_0 \widetilde {\mathbf{u}} \frac{\mathrm{d}V}{\mathrm{d}t} = \oint\limits_{\partial V(t)} + \boldsymbol{\sigma}\cdot\mathbf{n}\,\mathrm{d}A + \rho_0 V \mathbf{g} + + \mathbf{F}^{\text{ext}} + :label: eq-momentum-balance2 -The normal part of the stress tensor is directly related to the -hydro-static pressure: -.. math:: \sigma_{ii} = \rho_0\,\text{g}\,H\,\sigma_{ii}^* - -The dimensionless properties are indicated by a superscripted asterisk. -Introducing those properties in :eq:`sigma33`, leads to -: +which leads to: .. math:: - \overline{\sigma^*}_{33} = \left(g^*_3-\lambda\frac{\partial^2{b^*}}{\partial{x_1^{*2}}}\,\overline{u}_1^{*2}\right) - (s^*-x^*_3) + \underbrace{\varepsilon\oint\limits_{\partial A_b^*}\left(\int\limits_{x^*_3}^{s^*}\sigma^*_{31}\,\mathrm{d}x^*_3\right)\,\mathrm{d}l^*}_{O(\varepsilon)}. - :label: sigma33star + \rho_0 V \frac{\mathrm{d}\widetilde {\mathbf{u}}}{\mathrm{d}t} = + \underbrace{-A^b\widehat{p\mathbf{n^b}}} + _{\substack{\text{bottom} \\ \text{ normal force }}} + \underbrace{+A^b\widehat{\boldsymbol{\tau^b}}} + _{\substack{\text{bottom} \\ \text{ shear force }}} + \underbrace{-A^b\widehat{\boldsymbol{\nabla} h\bar{p}}} + _{\substack{\text{lateral} \\ \text{ pressure force }}} + \underbrace{+A^b\cancelto{O(\boldsymbol{\epsilon}^2)} + {\widehat{\boldsymbol{\nabla} h\bar{\boldsymbol{T}}}}} + _{\substack{\text{lateral} \\ \text{ shear force }}} + + \rho_0 V \mathbf{g} + \mathbf{F}^{\text{ext}} \underbrace{-\widetilde {\mathbf{u}}\,\oint\limits_{\partial V(t)} + q^{\text{ent}}\,\mathrm{d}A} + _{\substack{\text{ momentum loss } \\ \text{ entrainment }}} + :label: eq-momentum-balance3 -The height, H of dense flow avalanches is assumed to be small compared -to its length, L. Meaning that the equations are examined in the limit -:math:`\varepsilon \ll 1`. It is then possible to neglect the last term -in :eq:`sigma33star` which leads to (after reinserting -the dimensions): +The lateral shear stress term is neglected because of its relative smallness +in comparison to the other terms as shown by the dimensional analysis carried out in +:cite:`GrEd2014`. The mass conservation reads: .. math:: - \overline{\sigma}_{33}(x_3) = \rho_0\,\underbrace{\left(g_3-\overline{u_1}^2\,\frac{\partial^2{b}}{\partial{x_1^2}}\right)}_{g_\text{eff}} - \left[\overline{h}-x_3\right] - :label: sigma33dim + \rho_0 \frac{\mathrm{d}V}{\mathrm{d}t} = + \oint\limits_{\partial V(t)} q^{\text{ent}} \,\mathrm{d}A, + :label: eq-mass-balance2 -And at the bottom of the avalanche, with :math:`x_3 = 0`, the normal -stress can be expressed as: +Using the approximations from :ref:`theorycom1DFA:Choice of the coordinate system and thickness averaged quantities`, the momentum +equation becomes: .. math:: - \overline{\sigma}^{(b)}_{33} = \rho_0\,\left(g_3-\overline{u_1}^2\,\frac{\partial^2{b}}{\partial{x_1^2}}\right)\,\overline{h} - :label: sigmab - -Calculating the surface integral in equation :eq:`momentum-balance5` requires to -express the other components of the stress tensor. Here again a -magnitude consideration between the shear stresses :math:`\sigma_{12} = \sigma_{21}` and :math:`\sigma_{13}`. -The shear stresses are based on a generalized Newtonian law of materials, -which controls the influence of normal stress and the rate of deformation through the viscosity. - -.. math:: - \tau_{ij} = \eta\left(\frac{\partial{u_i}}{\partial{x_j}}+\frac{\partial{u_j}}{\partial{x_i}}\right), ~ i\neq j - -Because :math:`\partial x_1` and :math:`\partial x_2` are of the order of :math:`L`, whereas :math:`\partial x_3` -is of the order of :math:`H`, it follows that: + \rho_0 V \frac{\mathrm{d}\bar{\mathbf{u}}}{\mathrm{d}t} = - A^bp\mathbf{n^b} + + A^b\boldsymbol{\tau^b} - A^b\boldsymbol{\nabla} h\bar{p} + \rho_0 V \mathbf{g} + + \mathbf{F}^{\text{ext}} + - \bar{\mathbf{u}}\oint\limits_{\partial V(t)} q^{\text{ent}} \,\mathrm{d}A, + :label: eq-momentum-balance-approx -.. math:: - O\left(\frac{\sigma_{12}}{\sigma_{13}}\right) = \frac{H}{L} = \varepsilon \ll 1 -and thus :math:`\sigma_{12} = \sigma_{21}` is negligible compared to :math:`\sigma_{13}`. -:math:`\sigma_{13}` is expressed using the bottom friction law -:math:`\tau^{(b)}_i = f(\sigma^{(b)},\overline{u},\overline{h},\rho_0,t,\mathbf{x})` -introduced in :eq:`boundary-conditions`. +where all quantities are evaluated at the center of the basal area (point O in :numref:`small-lagrange`). +This equation is projected in the normal direction :math:`\mathbf{v}_3 = \mathbf{N^b}` +to get the expression of the basal pressure :math:`p^b`. The projection of this same +equation on the tangential plane leads to the differential equations satisfied by +:math:`\bar{\mathbf{u}}`. +Pressure distribution, thickness integrated pressure and pressure gradient +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -In addition, a relation linking the horizontal normal stresses, -:math:`\sigma_{ii}`, :math:`i = (1,2)`, to the vertical pressure distribution given -by :eq:`sigmab` is introduced. In complete analogy to the arguments used by -Savage and Hutter (:cite:`SaHu1989`) the horizontal normal stresses are given as: +We can project the momentum equation (:eq:`eq-momentum-balance-approx`), using the volume between :math:`x_3` and the surface :math:`h`, in the normal +direction (:math:`\mathbf{v_3} = \mathbf{N^b} = -\mathbf{n^b}`). Applying the +properties of the NCS (:eq:`eq-du-in-ncs`) the surface normal +component of :eq:`eq-momentum-balance-approx` reads: .. math:: - \sigma_{ii} = K_{(i)}\,\sigma_{33} + \begin{aligned} + \rho_0 V(x_3, t) \frac{\mathrm{d}\bar{\mathbf{u}}(x_3)}{\mathrm{d}t} \cdot \mathbf{v_3} =& + \rho_0 A^b (h- x_3)\left(\bar{u}_1(x_3)\frac{\mathrm{d}\mathbf{v_1}}{\mathrm{d}t} \cdot\mathbf{v_3} + + \cancelto{0}{\frac{\mathrm{d}\bar{u}_1(x_3)}{\mathrm{d}t}\mathbf{v_1}\cdot\mathbf{v_3}}\right)\\ + =& -\rho_0 A^b (h - x_3) \bar{u}_1(x_3) \mathbf{v_1} \cdot \frac{\mathrm{d}\mathbf{v_3}}{\mathrm{d}t} + = -\rho_0 A^b (h - x_3) \bar{\mathbf{u}}(x_3) \cdot \frac{\mathrm{d}\mathbf{N^b}}{\mathrm{d}t} \\ + =& - A^bp\,\cancelto{-1}{\mathbf{n^b}\cdot\mathbf{N^b}} + + A^b\cancelto{0}{\boldsymbol{\tau^b}\cdot\mathbf{N^b}} + - A^b\boldsymbol{\nabla} \{(h-x_3)\bar{p}\}\cdot\mathbf{N^b}\\ + & + \rho_0 V \cancelto{g_{N^b}}{\mathbf{g}\cdot\mathbf{N^b}} + + \cancelto{0}{\mathbf{F}^{\text{ext}}\cdot\mathbf{N^b}} + - \cancelto{0}{\bar{\mathbf{u}}\oint\limits_{\partial V(t)} q^{\text{ent}} \,\mathrm{d}A\cdot\mathbf{N^b}} + \end{aligned} + :label: eq-momentum-balance-x3-projected -Where :math:`K_{(i)}` are the earth pressure coefficients (cf. :cite:`ZwKlSa2003,Sa2004`): +Neglecting the normal component of the pressure gradient +gives the expression for pressure. Under the condition that :math:`\bar{\mathbf{u}}(x_3)` is +independent of :math:`x_3`, pressure follows a linear profile from the bottom +surface to the free surface: +Exploiting the normal component of the momentum equation enables to express the pressure and its gradient: .. math:: - \sigma_{11} &= K_{x~akt/pass}\,\sigma_{33}\\ - \sigma_{22} &= K_{y~akt/pass}^{(x~akt/pass)}\,\sigma_{33} + p(x_3) = \rho_0 (h - x_3) \left\{-g_{N^b} + - \bar{\mathbf{u}}\cdot\frac{\mathrm{d}\mathbf{N^b}}{\mathrm{d}t}\right\} + \quad \mbox{and} \quad + p(x_3=0) = p^b + = \rho_0 h \left\{-g_{N^b} + - \bar{\mathbf{u}}\cdot\frac{\mathrm{d}\mathbf{N^b}}{\mathrm{d}t}\right\} + :label: eq-pressure-distribution -With the above specifications, the integral of the stresses over the -flow height is simplified in equation :eq:`momentum-balance5` to: +Note that the bottom pressure should always be positive. +A negative pressure is nonphysical and means that the material is not in contact with the bottom surface anymore. +This can happen in the case of large velocities on convex topography. +If so, the material should be in a free fall state until it gets back in contact with the topography. +A description on how this is handled within the numerical implementation can be found in +:ref:`DFANumerics:Curvature acceleration term`. + +Using the previous result of :eq:`eq-pressure-distribution`, it is possible to express the thickness integrated +pressure :math:`\bar{p}`: + +.. math:: + h\bar{p} = \int\limits_0^h p(x_3)\,\mathrm{d}x_3 + = -\rho_0 \frac{h^2}{2}\left(g_{\mathbf{N^b}} + + \bar{\mathbf{u}} \cdot \frac{\mathrm{d}\mathbf{N^b}} + {\mathrm{d}t}\right) = -\rho_0 \frac{h^2}{2} \, g^\text{eff} + :label: eq-thickness-integrated-pressure + +where :math:`g^\text{eff}` is the effective normal acceleration acting on the volume, including the normal component of +gravity and a curvature component. +The expression of the thickness integrated pressure is used to derive the pressure gradient :math:`\boldsymbol{\nabla} h\bar{p}`. +Assuming :math:`g^\text{eff}` to be locally constant (otherwise :math:`g^\text{eff}` would remain inside the gradient +operator), leads to: + +.. math:: + \label{eq-pressure-gradient} + \boldsymbol{\nabla} h\bar{p} = -\rho_0 \, g^\text{eff} \, h \boldsymbol{\nabla} h + :label: eq-pressure-gradient + +Tangential momentum conservation equation +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Using the derived expression of the thickness integrated pressure +(:eq:`eq-pressure-gradient`), we project the momentum +balance (:eq:`eq-momentum-balance-approx`) in the tangent plane leading to the following equation: .. math:: - \int\limits_b^s\sigma_{ij}\,\mathrm{d}x_3 = \int\limits_b^s K_{(i)}\,\sigma_{33}\,\mathrm{d}x_3 = - K_{(i)}\,\frac{\overline{h}\,\sigma^{(b)}}{2} + \rho_0 V \left(\frac{\mathrm{d}\bar{\mathbf{u}}}{\mathrm{d}t} + - \left(\frac{\mathrm{d}\bar{\mathbf{u}}}{\mathrm{d}t} \cdot + \mathbf{v}_3\right)\mathbf{v}_3\right) = + A^b\boldsymbol{\tau^b} - \rho_0 \, g^\text{eff} \, h A^b \boldsymbol{\nabla}_{s} h + + \rho_0 V \mathbf{g}_s + \mathbf{F}^{\text{ext}} + - \bar{\mathbf{u}}\oint\limits_{\partial V(t)} q^{\text{ent}} \,\mathrm{d}A + :label: eq-momentum-balance-tangent -and the momentum balance can be written: +Where :math:`\boldsymbol{\nabla}_{s} = \boldsymbol{\nabla} - (\boldsymbol{\nabla}\cdot\mathbf{N^b})\mathbf{N^b}` respectively +:math:`\mathbf{g_s} = \mathbf{g} - (\mathbf{g} \cdot \mathbf{N^b})\mathbf{N^b}` is the +tangential component of the gradient operator respectively of the gravity +acceleration. + +After replacing the velocity derivative component in the normal direction +by the expression developed in :eq:`eq-momentum-balance-x3-projected`, +:eq:`eq-momentum-balance-tangent` reads: .. math:: \begin{aligned} - \rho_0\,A\,\overline{h}\,\frac{\,\mathrm{d}\overline{u}_i}{\,\mathrm{d}t} = - &\rho_0\,A\,\overline{h}\,g_i + \underbrace{K_{(i)}\,\oint\limits_{\partial{A}}\left(\frac{\overline{h}\,\sigma^{(b)}}{2}\right)n_i\,\mathrm{d}l}_{F_i^{\text{lat}}} - \underbrace{-\delta_{i1}\,A\,\tau^{(b)}}_{F_i^{\text{bot}}} - \underbrace{- \rho_0\,A\,h_{\text{eff}}\,C_{\text{res}}\,\overline{\mathbf{u}}^2\,\frac{\overline{u_i}}{\|\overline{\mathbf{u}}\|}}_{F_i^{\text{res}}}\\ - &- \overline{u_i}\,\rho_0\,\frac{\mathrm{d}\left(A\,\overline{h}\right)}{\mathrm{d}t} - + F_i^{\text{ent}} + \rho_0 V \frac{\mathrm{d}\bar{\mathbf{u}}}{\mathrm{d}t} =& + A^b\boldsymbol{\tau^b} + - \rho_0 \, g^\text{eff} \, h A^b \boldsymbol{\nabla}_{\!s} h + + \rho_0 V \mathbf{g}_s + + \mathbf{F}^{\text{ent}} + - \mathbf{F}^{\text{res}}\\ + &- \underbrace{\bar{\mathbf{u}}\,\rho_0\,\frac{\mathrm{d}\left(A^b\,h\right)}{\mathrm{d}t}}_{\text{from the mass balance}} + - \underbrace{\rho_0 V \left( \bar{\mathbf{u}} \cdot \frac{\mathrm{d}\mathbf{v}_3}{\mathrm{d}t} \right)\mathbf{v}_3}_{\text{curvature acceleration}} \end{aligned} :label: momentum-balance6 -with - -.. math:: C_{\text{res}} = \frac{1}{2}\,\overline{d}\,\frac{c_w}{s_{\text{res}}^2}. - -The mass balance :eq:`mass-balance2` -remains unchanged: - -.. math:: - \frac{\mathrm{d}V(t)}{\mathrm{d}t} = \frac{\mathrm{d}\left(A_b\overline{h}\right)}{\mathrm{d}t} - = \frac{\rho_{\text{ent}}}{\rho_0}\,w_f\,h_{\text{ent}}\,\left\Vert \overline{\mathbf{u}}\right\Vert - + \frac{A_b}{\rho_0}\,\frac{\tau^{(b)}}{e_b}\,\left\Vert \overline{\mathbf{u}}\right\Vert - :label: mass-balance3 - -The unknown :math:`\overline{u}_1`, :math:`\overline{u}_2` and -:math:`\overline{h}` satisfy :eq:`sigmab`, -:eq:`momentum-balance6` and -:eq:`mass-balance3`. In equation -:eq:`momentum-balance6` the bottom shear -stress :math:`\tau^{(b)}` remains unknown, and and a constitutive equation -has to be introduced in order to completely solve the equations. - - Friction Model ~~~~~~~~~~~~~~~~~ -The problem can be solved by introducing a constitutive equation which -describes the basal shear stress tensor :math:`\tau^{(b)}` as a function -of the flow state of the avalanche. +The constitutive equation which +describes the basal shear stress tensor :math:`\boldsymbol{\tau^b}` as a function +of the flow state of the avalanche can be expressed: .. math:: - \tau^{(b)}_i = f(\sigma^{(b)},\overline{u},\overline{h},\rho_0,t,\mathbf{x}) + \boldsymbol{\tau^b} = f(\boldsymbol{\sigma^b},\bar{u},h,\rho_0,t,\mathbf{x}) :label: samosAT friction model With .. math:: \begin{aligned} - &\sigma^{(b)} \qquad &\text{normal component of the stress tensor}\\ - &\overline{u} \qquad &\text{average velocity}\\ - &\overline{h} \qquad &\text{average flow thickness}\\ + &\boldsymbol{\sigma^b}\cdot\mathbf{n^b} \qquad &\text{normal component of the stress tensor}\\ + &\bar{u} \qquad &\text{average velocity}\\ + &h \qquad &\text{average flow thickness}\\ &\rho_0 \qquad &\text{density}\\ &t \qquad &\text{time}\\ &\mathbf{x} \qquad &\text{position vector}\end{aligned} @@ -432,10 +589,10 @@ The Mohr-Coulomb friction model describes the friction interaction between twos The bottom shear stress simply reads: .. math:: - \tau^{(b)} = \tan{\delta}\,\sigma^{(b)} + \boldsymbol{\tau^b} = -\left(\tan{\delta}\,\boldsymbol{\sigma^b}\cdot\mathbf{n^b}\right) \mathbf{v}_1 :math:`\tan{\delta}=\mu` is the friction coefficient (and :math:`\delta` the friction angle). The bottom shear stress linearly -increases with the normal stress component :math:`\sigma^{(b)}` (:cite:`Zw2000,BaSaGr1999,WaHuPu2004,Sa2007`). +increases with the normal stress component :math:`\boldsymbol{\sigma^b}` (:cite:`Zw2000,BaSaGr1999,WaHuPu2004,Sa2007`). With this friction model, an avalanche starts to flow if the slope inclination is steeper than the friction angle :math:`\delta`. In the case of an infinite slope of constant inclination, @@ -451,7 +608,7 @@ The Chezy friction model describes viscous friction interaction. The bottom shear stress then reads: .. math:: - \tau^{(b)} = c_{\text{dyn}}\,\rho_0\,\bar{u}^2 + \boldsymbol{\tau^b} = -\left(c_{\text{dyn}}\,\rho_0\,\bar{u}^2\right) \mathbf{v}_1 :math:`c_{\text{dyn}}` is the viscous friction coefficient. The bottom shear stress is a quadratic function of the velocity. (:cite:`Zw2000,BaSaGr1999,WaHuPu2004,Sa2007`). @@ -467,7 +624,8 @@ He first had the idea to combine both the Mohr-Coulomb and the Chezy model by su in order to take advantage of both. This leads to the following friction law: .. math:: - \tau^{(b)} = \tan{\delta}\,\sigma^{(b)} + c_\text{dyn}\,\rho_0\,\bar{u}^2 + \boldsymbol{\tau^b} = -\left(\tan{\delta}\,\boldsymbol{\sigma^b}\cdot\mathbf{n^b} + + c_\text{dyn}\,\rho_0\,\bar{u}^2\right) \mathbf{v}_1 This model is described as Voellmy-Fluid :cite:`Sa2004,Sa2007`, and the turbulent @@ -478,12 +636,12 @@ SamosAT friction model """""""""""""""""""""""" SamosAT friction model is a modification of some more classical models -such as Voellmy model :ref:`Voellmy friction model`. The basal shear stress tensor :math:`\tau^{(b)}` +such as Voellmy model :ref:`Voellmy friction model`. The basal shear stress tensor :math:`\boldsymbol{\tau^b}` is expressed as (:cite:`Sa2007`): .. math:: - \tau^{(b)} = \tau_0 + \tan{\delta}\,\left(1+\frac{R_s^0}{R_s^0+R_s}\right)\,\sigma^{(b)} - + \frac{\rho_0\,\overline{u}^2}{\left(\frac{1}{\kappa}\,\ln\frac{\overline{h}}{R} + B\right)^2} + \boldsymbol{\tau^b} = -\left(\tau_0 + \tan{\delta}\,\left(1+\frac{R_s^0}{R_s^0+R_s}\right)\,\boldsymbol{\sigma^b}\cdot\mathbf{n^b} + + \frac{\rho_0\,\bar{u}^2}{\left(\frac{1}{\kappa}\,\ln\frac{h}{R} + B\right)^2}\right) \mathbf{v}_1 With @@ -498,11 +656,11 @@ With The minimum shear stress :math:`\tau_0` defines a lower limit below which no flow takes place with the condition -:math:`\rho_0\,\overline{h}\,g\,\sin{\alpha} > \tau_0`. :math:`\alpha` +:math:`\rho_0\,h\,g\,\sin{\alpha} > \tau_0`. :math:`\alpha` being the slope. :math:`\tau_0` is independent of the flow thickness, which leeds to a strong avalanche deceleration, especially for avalanches with low flow heights. :math:`R_s` is expressed as -:math:`R_s = \frac{\rho_0\,\overline{u}^2}{\sigma^{(b)}}`. Together +:math:`R_s = \frac{\rho_0\,\bar{u}^2}{\boldsymbol{\sigma^b}\cdot\mathbf{n^b}}`. Together with the empirical parameter :math:`R_s^0` the term :math:`\frac{R_s^0}{R_s^0+R_s}` defines the Coulomb basal friction. Therefore lower avalanche speeds lead to a higher bed friction, making @@ -511,7 +669,7 @@ without this effect. This effect is intended to avoid lateral creep of the avalanche mass (:cite:`SaGr2009`). -Dam +Dam ~~~ The dam is described by a crown line, that is to say a series of x, y, z points describing the crown of @@ -567,7 +725,7 @@ The grid cells crossed by the dam as well as their neighbor cells are memorized .. \begin{aligned} .. \frac{\tilde{u}}{u_{\tau}} &= \frac{1}{\kappa}\,\ln{\frac{x_3}{R}} + B\\ .. &\text{mit}\\ -.. u_{\tau} &= \sqrt{\frac{\tau^{(b)}}{\bar{\rho}}}, +.. u_{\tau} &= \sqrt{\frac{\boldsymbol{\tau^b}}{\bar{\rho}}}, .. \end{aligned} .. .. @@ -599,16 +757,16 @@ The grid cells crossed by the dam as well as their neighbor cells are memorized .. .. Durch Einsetzen für $u_{\tau}$ und Ersetzen von $\tilde{u}_\text{max}$ durch die in Kapitel \ref{sec:vereinfachtegleichungen} .. tiefengemittelte Geschwindigkeit $\bar{u}$ -.. erhält man nach Umformen schließlich eine Beziehung für die gesuchte Bodenschubspannung $\tau^{(b)}$. +.. erhält man nach Umformen schließlich eine Beziehung für die gesuchte Bodenschubspannung $\boldsymbol{\tau^b}$. .. .. .. math:: -.. \tau^{(b)} = \frac{\bar{\rho}\,\bar{u}^2}{\left(\frac{1}{\kappa}\,\ln{\frac{\bar{h}}{R}}+B\right)^2} +.. \boldsymbol{\tau^b} = \frac{\bar{\rho}\,\bar{u}^2}{\left(\frac{1}{\kappa}\,\ln{\frac{\bar{h}}{R}}+B\right)^2} .. .. .. Dieses Modell lässt sich wie beim Voellmy-Modell mit der Coulomb'schen Reibung kombinieren. .. .. .. math:: -.. \tau^{(b)} = \tan{\delta}\,\sigma^{(b)} + +.. \boldsymbol{\tau^b} = \tan{\delta}\,\boldsymbol{\sigma^b} + .. \frac{\bar{\rho}\,\bar{u}^2}{\left(\frac{1}{\kappa}\,\ln{\frac{\bar{h}}{R}}+B\right)^2} .. ..