diff --git a/data/config/Ascii2NcConfig_default b/data/config/Ascii2NcConfig_default index ed3a28a819..bfddd0edd6 100644 --- a/data/config/Ascii2NcConfig_default +++ b/data/config/Ascii2NcConfig_default @@ -25,7 +25,7 @@ time_summary = { width = 600; grib_code = [ 11, 204, 211 ]; obs_var = []; - type = [ "min", "max", "range", "mean", "stdev", "median", "p80" ]; + type = [ "min", "max", "range", "mean", "stdev", "median", "p80", "sum" ]; vld_freq = 0; vld_thresh = 0.0; } diff --git a/data/config/IODA2NCConfig_default b/data/config/IODA2NCConfig_default index b1ac078d9c..4f6e573e7a 100644 --- a/data/config/IODA2NCConfig_default +++ b/data/config/IODA2NCConfig_default @@ -117,7 +117,7 @@ time_summary = { width = 600; grib_code = []; obs_var = [ "TMP", "WDIR", "RH" ]; - type = [ "min", "max", "range", "mean", "stdev", "median", "p80" ]; + type = [ "min", "max", "range", "mean", "stdev", "median", "p80", "sum" ]; vld_freq = 0; vld_thresh = 0.0; } diff --git a/data/config/Madis2NcConfig_default b/data/config/Madis2NcConfig_default index 36a44cb8bc..406120ab27 100644 --- a/data/config/Madis2NcConfig_default +++ b/data/config/Madis2NcConfig_default @@ -25,7 +25,7 @@ time_summary = { width = 600; grib_code = [ 11, 204, 211 ]; obs_var = []; - type = [ "min", "max", "range", "mean", "stdev", "median", "p80" ]; + type = [ "min", "max", "range", "mean", "stdev", "median", "p80", "sum" ]; vld_freq = 0; vld_thresh = 0.0; } diff --git a/data/config/PB2NCConfig_default b/data/config/PB2NCConfig_default index f765d98ec3..dd2351eabe 100644 --- a/data/config/PB2NCConfig_default +++ b/data/config/PB2NCConfig_default @@ -148,7 +148,7 @@ time_summary = { width = 600; grib_code = []; obs_var = [ "TMP", "WDIR", "RH" ]; - type = [ "min", "max", "range", "mean", "stdev", "median", "p80" ]; + type = [ "min", "max", "range", "mean", "stdev", "median", "p80", "sum" ]; vld_freq = 0; vld_thresh = 0.0; } diff --git a/data/table_files/grib2_mrms.txt b/data/table_files/grib2_mrms.txt index 4e1a9b6e5e..0b00086744 100644 --- a/data/table_files/grib2_mrms.txt +++ b/data/table_files/grib2_mrms.txt @@ -1,9 +1,12 @@ GRIB2 -209 10 0 255 161 1 2 0 "LightningDensityNLDN1min" "CG Lightning Density 1-min - NLDN" "flashes/km^2/min" -209 10 0 255 161 1 2 1 "LightningDensityNLDN5min" "CG Lightning Density 5-min - NLDN" "flashes/km^2/min" -209 10 0 255 161 1 2 2 "LightningDensityNLDN15min" "CG Lightning Density 15-min - NLDN" "flashes/km^2/min" -209 10 0 255 161 1 2 3 "LightningDensityNLDN30min" "CG Lightning Density 30-min - NLDN" "flashes/km^2/min" -209 10 0 255 161 1 2 4 "LightningProbabilityNext30min" "Lightning Probability 0-30 minutes - NLDN" "%" +209 10 0 255 161 1 2 0 "NLDN_CG_001min_AvgDensity" "CG Lightning Density 1-min - NLDN" "flashes/km^2/min" +209 10 0 255 161 1 2 1 "NLDN_CG_005min_AvgDensity" "CG Lightning Density 5-min - NLDN" "flashes/km^2/min" +209 10 0 255 161 1 2 2 "NLDN_CG_015min_AvgDensity" "CG Lightning Density 15-min - NLDN" "flashes/km^2/min" +209 10 0 255 161 1 2 3 "NLDN_CG_030min_AvgDensity" "CG Lightning Density 30-min - NLDN" "flashes/km^2/min" +209 10 0 255 161 1 2 5 "LightningProbabilityNext30minGrid" "Lightning Probability 0-30 minutes - NLDN" "%" +209 10 0 255 161 1 2 6 "LightningProbabilityNext60minGrid" "Lightning Probability 0-60 minutes - NLDN" "%" +209 10 0 255 161 1 2 7 "LightningJumpGrid" "Rapid lightning increases and decreases" "non-dim" +209 10 0 255 161 1 2 8 "LightningJumpGrid_Max_005min" "Rapid lightning increases and decreases over 5-minutes" "non-dim" 209 10 0 255 161 1 3 0 "MergedAzShear0to2kmAGL" "Azimuth Shear 0-2km AGL" "0.001/s" 209 10 0 255 161 1 3 1 "MergedAzShear3to6kmAGL" "Azimuth Shear 3-6km AGL" "0.001/s" 209 10 0 255 161 1 3 2 "RotationTrack30min" "Rotation Track 0-2km AGL 30-min" "0.001/s" @@ -27,8 +30,10 @@ GRIB2 209 10 0 255 161 1 3 32 "MESHMax240min" "MESH Hail Swath 240-min" "mm" 209 10 0 255 161 1 3 33 "MESHMax360min" "MESH Hail Swath 360-min" "mm" 209 10 0 255 161 1 3 34 "MESHMax1440min" "MESH Hail Swath 1440-min" "mm" +209 10 0 255 161 1 3 37 "VIL_Max_120min" "VIL Swath 120-min" "kg/m^2" +209 10 0 255 161 1 3 40 "VIL_Max_1440min" "VIL Swath 1440-min" "kg/m^2" 209 10 0 255 161 1 3 41 "VIL" "Vertically Integrated Liquid" "kg/m^2" -209 10 0 255 161 1 3 42 "VILDensity" "Vertically Integrated Liquid Density" "g/m^3" +209 10 0 255 161 1 3 42 "VIL_Density" "Vertically Integrated Liquid Density" "g/m^3" 209 10 0 255 161 1 3 43 "VII" "Vertically Integrated Ice" "kg/m^2" 209 10 0 255 161 1 3 44 "EchoTop18" "Echo Top - 18 dBZ" "km MSL" 209 10 0 255 161 1 3 45 "EchoTop30" "Echo Top - 30 dBZ" "km MSL" @@ -38,65 +43,72 @@ GRIB2 209 10 0 255 161 1 3 49 "H50Above0C" "Thickness [50 dBZ top - 0C]" "km" 209 10 0 255 161 1 3 50 "H60AboveM20C" "Thickness [60 dBZ top - (-20C)]" "km" 209 10 0 255 161 1 3 51 "H60Above0C" "Thickness [60 dBZ top - 0C]" "km" -209 10 0 255 161 1 3 52 "Reflectivity0C" "Isothermal Reflectivity at 0C" "dBZ" -209 10 0 255 161 1 3 53 "ReflectivityM5C" "Isothermal Reflectivity at -5C" "dBZ" -209 10 0 255 161 1 3 54 "ReflectivityM10C" "Isothermal Reflectivity at -10C" "dBZ" -209 10 0 255 161 1 3 55 "ReflectivityM15C" "Isothermal Reflectivity at -15C" "dBZ" -209 10 0 255 161 1 3 56 "ReflectivityM20C" "Isothermal Reflectivity at -20C" "dBZ" +209 10 0 255 161 1 3 52 "Reflectivity_0C" "Isothermal Reflectivity at 0C" "dBZ" +209 10 0 255 161 1 3 53 "Reflectivity_-5C" "Isothermal Reflectivity at -5C" "dBZ" +209 10 0 255 161 1 3 54 "Reflectivity_-10C" "Isothermal Reflectivity at -10C" "dBZ" +209 10 0 255 161 1 3 55 "Reflectivity_-15C " "Isothermal Reflectivity at -15C" "dBZ" +209 10 0 255 161 1 3 56 "Reflectivity_-20C" "Isothermal Reflectivity at -20C" "dBZ" 209 10 0 255 161 1 3 57 "ReflectivityAtLowestAltitude" "ReflectivityAtLowestAltitude" "dBZ" 209 10 0 255 161 1 3 58 "MergedReflectivityAtLowestAltitude" "Non Quality Controlled Reflectivity At Lowest Altitude" "dBZ" -209 10 0 255 161 1 4 0 "IRband4" "Infrared (E/W blend)" "K" -209 10 0 255 161 1 4 1 "Visible" "Visible (E/W blend)" "non-dim" -209 10 0 255 161 1 4 2 "WaterVapor" "Water Vapor (E/W blend)" "K" -209 10 0 255 161 1 4 3 "CloudCover" "Cloud Cover" "K" 209 10 0 255 161 1 6 0 "PrecipFlag" "Surface Precipitation Type" "type" 209 10 0 255 161 1 6 1 "PrecipRate" "Radar Precipitation Rate" "mm/hr" -209 10 0 255 161 1 6 2 "RadarOnlyQPE01H" "Radar precipitation accumulation 1-hour" "mm" -209 10 0 255 161 1 6 3 "RadarOnlyQPE03H" "Radar precipitation accumulation 3-hour" "mm" -209 10 0 255 161 1 6 4 "RadarOnlyQPE06H" "Radar precipitation accumulation 6-hour" "mm" -209 10 0 255 161 1 6 5 "RadarOnlyQPE12H" "Radar precipitation accumulation 12-hour" "mm" -209 10 0 255 161 1 6 6 "RadarOnlyQPE24H" "Radar precipitation accumulation 24-hour" "mm" -209 10 0 255 161 1 6 7 "RadarOnlyQPE48H" "Radar precipitation accumulation 48-hour" "mm" -209 10 0 255 161 1 6 8 "RadarOnlyQPE72H" "Radar precipitation accumulation 72-hour" "mm" -209 10 0 255 161 1 6 9 "GaugeCorrQPE01H" "Local gauge bias corrected radar precipitation accumulation 1-hour" "mm" -209 10 0 255 161 1 6 10 "GaugeCorrQPE03H" "Local gauge bias corrected radar precipitation accumulation 3-hour" "mm" -209 10 0 255 161 1 6 11 "GaugeCorrQPE06H" "Local gauge bias corrected radar precipitation accumulation 6-hour" "mm" -209 10 0 255 161 1 6 12 "GaugeCorrQPE12H" "Local gauge bias corrected radar precipitation accumulation 12-hour" "mm" -209 10 0 255 161 1 6 13 "GaugeCorrQPE24H" "Local gauge bias corrected radar precipitation accumulation 24-hour" "mm" -209 10 0 255 161 1 6 14 "GaugeCorrQPE48H" "Local gauge bias corrected radar precipitation accumulation 48-hour" "mm" -209 10 0 255 161 1 6 15 "GaugeCorrQPE72H" "Local gauge bias corrected radar precipitation accumulation 72-hour" "mm" -209 10 0 255 161 1 6 16 "GaugeOnlyQPE01H" "Gauge only precipitation accumulation 1-hour" "mm" -209 10 0 255 161 1 6 17 "GaugeOnlyQPE03H" "Gauge only precipitation accumulation 3-hour" "mm" -209 10 0 255 161 1 6 18 "GaugeOnlyQPE06H" "Gauge only precipitation accumulation 6-hour" "mm" -209 10 0 255 161 1 6 19 "GaugeOnlyQPE12H" "Gauge only precipitation accumulation 12-hour" "mm" -209 10 0 255 161 1 6 20 "GaugeOnlyQPE24H" "Gauge only precipitation accumulation 24-hour" "mm" -209 10 0 255 161 1 6 21 "GaugeOnlyQPE48H" "Gauge only precipitation accumulation 48-hour" "mm" -209 10 0 255 161 1 6 22 "GaugeOnlyQPE72H" "Gauge only precipitation accumulation 72-hour" "mm" -209 10 0 255 161 1 6 23 "MountainMapperQPE01H" "Mountain Mapper precipitation accumulation 1-hour" "mm" -209 10 0 255 161 1 6 24 "MountainMapperQPE03H" "Mountain Mapper precipitation accumulation 3-hour" "mm" -209 10 0 255 161 1 6 25 "MountainMapperQPE06H" "Mountain Mapper precipitation accumulation 6-hour" "mm" -209 10 0 255 161 1 6 26 "MountainMapperQPE12H" "Mountain Mapper precipitation accumulation 12-hour" "mm" -209 10 0 255 161 1 6 27 "MountainMapperQPE24H" "Mountain Mapper precipitation accumulation 24-hour" "mm" -209 10 0 255 161 1 6 28 "MountainMapperQPE48H" "Mountain Mapper precipitation accumulation 48-hour" "mm" -209 10 0 255 161 1 6 29 "MountainMapperQPE72H" "Mountain Mapper precipitation accumulation 72-hour" "mm" -209 10 0 255 161 1 7 0 "ModelSurfaceTemp" "Model Surface temperature [RAP 13km]" "C" -209 10 0 255 161 1 7 1 "ModelWetBulbTemp" "Model Surface wet bulb temperature [RAP 13km]" "C" -209 10 0 255 161 1 7 2 "WarmRainProbability" "Probability of warm rain [RAP 13km derived]" "%" -209 10 0 255 161 1 7 3 "ModelHeight0C" "Model Freezing Level Height [RAP 13km]" "m MSL" -209 10 0 255 161 1 7 4 "BrightBandTopHeight" "Brightband Top Radar [RAP 13km derived]" "m AGL" -209 10 0 255 161 1 7 5 "BrightBandBottomHeight" "Brightband Bottom Radar [RAP 13km derived]" "m AGL" +209 10 0 255 161 1 6 2 "RadarOnly_QPE_01H" "Radar precipitation accumulation 1-hour" "mm" +209 10 0 255 161 1 6 3 "RadarOnly_QPE_03H" "Radar precipitation accumulation 3-hour" "mm" +209 10 0 255 161 1 6 4 "RadarOnly_QPE_06H" "Radar precipitation accumulation 6-hour" "mm" +209 10 0 255 161 1 6 5 "RadarOnly_QPE_12H" "Radar precipitation accumulation 12-hour" "mm" +209 10 0 255 161 1 6 6 "RadarOnly_QPE_24H" "Radar precipitation accumulation 24-hour" "mm" +209 10 0 255 161 1 6 7 "RadarOnly_QPE_48H" "Radar precipitation accumulation 48-hour" "mm" +209 10 0 255 161 1 6 8 "RadarOnly_QPE_72H" "Radar precipitation accumulation 72-hour" "mm" +209 10 0 255 161 1 6 30 "MultiSensor_QPE_01H_Pass1" "Multi-sensor accumulation 1-hour Pass1" "mm" +209 10 0 255 161 1 6 31 "MultiSensor_QPE_03H_Pass1" "Multi-sensor accumulation 3-hour Pass1" "mm" +209 10 0 255 161 1 6 32 "MultiSensor_QPE_06H_Pass1" "Multi-sensor accumulation 6-hour Pass1" "mm" +209 10 0 255 161 1 6 33 "MultiSensor_QPE_12H_Pass1" "Multi-sensor accumulation 12-hour Pass1" "mm" +209 10 0 255 161 1 6 34 "MultiSensor_QPE_24H_Pass1" "Multi-sensor accumulation 24-hour Pass1" "mm" +209 10 0 255 161 1 6 35 "MultiSensor_QPE_48H_Pass1" "Multi-sensor accumulation 48-hour Pass1" "mm" +209 10 0 255 161 1 6 36 "MultiSensor_QPE_72H_Pass1" "Multi-sensor accumulation 72-hour Pass1" "mm" +209 10 0 255 161 1 6 37 "MultiSensor_QPE_01H_Pass2" "Multi-sensor accumulation 1-hour Pass2" "mm" +209 10 0 255 161 1 6 38 "MultiSensor_QPE_03H_Pass2" "Multi-sensor accumulation 3-hour Pass2" "mm" +209 10 0 255 161 1 6 39 "MultiSensor_QPE_06H_Pass2" "Multi-sensor accumulation 6-hour Pass2" "mm" +209 10 0 255 161 1 6 40 "MultiSensor_QPE_12H_Pass2" "Multi-sensor accumulation 12-hour Pass2" "mm" +209 10 0 255 161 1 6 41 "MultiSensor_QPE_24H_Pass2" "Multi-sensor accumulation 24-hour Pass2" "mm" +209 10 0 255 161 1 6 42 "MultiSensor_QPE_48H_Pass2" "Multi-sensor accumulation 48-hour Pass2" "mm" +209 10 0 255 161 1 6 43 "MultiSensor_QPE_72H_Pass2" "Multi-sensor accumulation 72-hour Pass2" "mm" +209 10 0 255 161 1 6 44 "SyntheticPrecipRateID" "Method IDs for blended single and dual-pol derived precip rates" "flag" +209 10 0 255 161 1 6 45 "RadarOnly_QPE_15M" "Radar precipitation accumulation 15-minute" "mm" +209 10 0 255 161 1 6 46 "RadarOnly_QPE_Since12Z" "Radar precipitation accumulation since 12Z" "mm" +209 10 0 255 161 1 7 0 "Model_SurfaceTemp" "Model Surface temperature" "C" +209 10 0 255 161 1 7 1 "Model_WetBulbTemp" "Model Surface wet bulb temperature" "C" +209 10 0 255 161 1 7 2 "WarmRainProbability" "Probability of warm rain" "%" +209 10 0 255 161 1 7 3 "Model_0degC_Height" "Model Freezing Level Height" "m MSL" +209 10 0 255 161 1 7 4 "BrightBandTopHeight" "Brightband Top Height" "m AGL" +209 10 0 255 161 1 7 5 "BrightBandBottomHeight" "Brightband Bottom Height" "m AGL" 209 10 0 255 161 1 8 0 "RadarQualityIndex" "Radar Quality Index" "non-dim" -209 10 0 255 161 1 8 1 "GaugeInflIndex01H" "Gauge Influence Index for 1-hour QPE" "non-dim" -209 10 0 255 161 1 8 2 "GaugeInflIndex03H" "Gauge Influence Index for 3-hour QPE" "non-dim" -209 10 0 255 161 1 8 3 "GaugeInflIndex06H" "Gauge Influence Index for 6-hour QPE" "non-dim" -209 10 0 255 161 1 8 4 "GaugeInflIndex12H" "Gauge Influence Index for 12-hour QPE" "non-dim" -209 10 0 255 161 1 8 5 "GaugeInflIndex24H" "Gauge Influence Index for 24-hour QPE" "non-dim" -209 10 0 255 161 1 8 6 "GaugeInflIndex48H" "Gauge Influence Index for 48-hour QPE" "non-dim" -209 10 0 255 161 1 8 7 "GaugeInflIndex72H" "Gauge Influence Index for 72-hour QPE" "non-dim" +209 10 0 255 161 1 8 1 "GaugeInflIndex_01H_Pass1" "Gauge Influence Index for 1-hour QPE Pass1" "non-dim" +209 10 0 255 161 1 8 2 "GaugeInflIndex_03H_Pass1" "Gauge Influence Index for 3-hour QPE Pass1" "non-dim" +209 10 0 255 161 1 8 3 "GaugeInflIndex_06H_Pass1" "Gauge Influence Index for 6-hour QPE Pass1" "non-dim" +209 10 0 255 161 1 8 4 "GaugeInflIndex_12H_Pass1" "Gauge Influence Index for 12-hour QPE Pass1" "non-dim" +209 10 0 255 161 1 8 5 "GaugeInflIndex_24H_Pass1" "Gauge Influence Index for 24-hour QPE Pass1" "non-dim" +209 10 0 255 161 1 8 6 "GaugeInflIndex_48H_Pass1" "Gauge Influence Index for 48-hour QPE Pass1" "non-dim" +209 10 0 255 161 1 8 7 "GaugeInflIndex_72H_Pass1" "Gauge Influence Index for 72-hour QPE Pass1" "non-dim" 209 10 0 255 161 1 8 8 "SeamlessHSR" "Seamless Hybrid Scan Reflectivity with VPR correction" "dBZ" 209 10 0 255 161 1 8 9 "SeamlessHSRHeight" "Height of Seamless Hybrid Scan Reflectivity" "km AGL" -209 10 0 255 161 1 9 0 "ConusMergedReflectivityQC" "WSR-88D 3D Reflectivty Mosaic - 33 CAPPIS (500-19000m)" "dBZ" -209 10 0 255 161 1 9 1 "ConusPlusMergedReflectivityQC" "All Radar 3D Reflectivty Mosaic - 33 CAPPIS (500-19000m)" "dBZ" +209 10 0 255 161 1 8 10 "RadarAccumulationQualityIndex_01H" "Radar 1-hour QPE Accumulation Quality" "non-dim" +209 10 0 255 161 1 8 11 "RadarAccumulationQualityIndex_03H" "Radar 3-hour QPE Accumulation Quality" "non-dim" +209 10 0 255 161 1 8 12 "RadarAccumulationQualityIndex_06H" "Radar 6-hour QPE Accumulation Quality" "non-dim" +209 10 0 255 161 1 8 13 "RadarAccumulationQualityIndex_12H" "Radar 12-hour QPE Accumulation Quality" "non-dim" +209 10 0 255 161 1 8 14 "RadarAccumulationQualityIndex_24H" "Radar 24-hour QPE Accumulation Quality" "non-dim" +209 10 0 255 161 1 8 15 "RadarAccumulationQualityIndex_48H" "Radar 48-hour QPE Accumulation Quality" "non-dim" +209 10 0 255 161 1 8 16 "RadarAccumulationQualityIndex_72H" "Radar 72-hour QPE Accumulation Quality" "non-dim" +209 10 0 255 161 1 8 17 "GaugeInflIndex_01H_Pass2" "Gauge Influence Index for 1-hour QPE Pass2" "non-dim" +209 10 0 255 161 1 8 18 "GaugeInflIndex_03H_Pass2" "Gauge Influence Index for 3-hour QPE Pass2" "non-dim" +209 10 0 255 161 1 8 19 "GaugeInflIndex_06H_Pass2" "Gauge Influence Index for 6-hour QPE Pass2" "non-dim" +209 10 0 255 161 1 8 20 "GaugeInflIndex_12H_Pass2" "Gauge Influence Index for 12-hour QPE Pass2" "non-dim" +209 10 0 255 161 1 8 21 "GaugeInflIndex_24H_Pass2" "Gauge Influence Index for 24-hour QPE Pass2" "non-dim" +209 10 0 255 161 1 8 22 "GaugeInflIndex_48H_Pass2" "Gauge Influence Index for 48-hour QPE Pass2" "non-dim" +209 10 0 255 161 1 8 23 "GaugeInflIndex_72H_Pass2" "Gauge Influence Index for 72-hour QPE Pass2" "non-dim" +209 10 0 255 161 1 9 0 "MergedReflectivityQC" "3D Reflectivty Mosaic - 33 CAPPIS (500-19000m)" "dBZ" +209 10 0 255 161 1 9 3 "MergedRhoHV" "3D RhoHV Mosaic - 33 CAPPIS (500-19000m)" "non-dim" +209 10 0 255 161 1 9 4 "MergedZdr" "3D Zdr Mosaic - 33 CAPPIS (500-19000m)" "dB" 209 10 0 255 161 1 10 0 "MergedReflectivityQCComposite" "Composite Reflectivity Mosaic (optimal method)" "dBZ" 209 10 0 255 161 1 10 1 "HeightCompositeReflectivity" "Height of Composite Reflectivity Mosaic (optimal method)" "m MSL" 209 10 0 255 161 1 10 2 "LowLevelCompositeReflectivity" "Low-Level Composite Reflectivity Mosaic (0-4km)" "dBZ" @@ -104,33 +116,33 @@ GRIB2 209 10 0 255 161 1 10 4 "LayerCompositeReflectivity_Low" "Layer Composite Reflectivity Mosaic 0-24kft (low altitude)" "dBZ" 209 10 0 255 161 1 10 5 "LayerCompositeReflectivity_High" "Layer Composite Reflectivity Mosaic 24-60 kft (highest altitude)" "dBZ" 209 10 0 255 161 1 10 6 "LayerCompositeReflectivity_Super" "Layer Composite Reflectivity Mosaic 33-60 kft (super high altitude)" "dBZ" -209 10 0 255 161 1 10 7 "ReflectivityCompositeHourlyMax" "Composite Reflectivity Hourly Maximum" "dBZ" -209 10 0 255 161 1 10 9 "LayerCompositeReflectivity_ANC" "Layer Composite Reflectivity Mosaic (2-4.5km) (forANC)" "dBZ" +209 10 0 255 161 1 10 7 "CREF_1HR_MAX" "Composite Reflectivity Hourly Maximum" "dBZ" +209 10 0 255 161 1 10 9 "LayerCompositeReflectivity_ANC" "Layer Composite Reflectivity Mosaic (2-4.5km) (for ANC)" "dBZ" +209 10 0 255 161 1 10 10 "BREF_1HR_MAX" "Base Reflectivity Hourly Maximum" "dBZ" 209 10 0 255 161 1 11 0 "MergedBaseReflectivityQC" "Mosaic Base Reflectivity (optimal method)" "dBZ" -209 10 0 255 161 1 11 1 "MergedReflectivityComposite" "dBZ" "Raw Composite Reflectivity Mosaic (max ref)" +209 10 0 255 161 1 11 1 "MergedReflectivityComposite" "Raw Composite Reflectivity Mosaic (max ref)" "dBZ" 209 10 0 255 161 1 11 2 "MergedReflectivityQComposite" "Composite Reflectivity Mosaic (max ref)" "dBZ" -209 10 0 255 161 1 11 3 "MergedBaseReflectivity" "dBZ" "Raw Base Reflectivity Mosaic (optimal method)" -209 10 0 255 161 1 11 4 "Merged_LVL3_BaseHCA" "flag" "Level III Base HCA Mosaic (nearest neighbor)" -209 10 0 255 161 1 12 0 "FLASH_CREST_MAXUNITSTREAMFLOW" "m^3/s/km^2" "FLASH QPE-CREST Unit Streamflow" -209 10 0 255 161 1 12 1 "FLASH_CREST_MAXSTREAMFLOW" "m^3/s" "FLASH QPE-CREST Streamflow" -209 10 0 255 161 1 12 2 "FLASH_CREST_MAXSOILSAT" "%" "FLASH QPE-CREST Soil Saturation" -209 10 0 255 161 1 12 4 "FLASH_SAC_MAXUNITSTREAMFLOW" "m^3/s/km^2" "FLASH QPE-SAC Unit Streamflow" -209 10 0 255 161 1 12 5 "FLASH_SAC_MAXSTREAMFLOW" "m^3/s" "FLASH QPE-SAC Streamflow" -209 10 0 255 161 1 12 6 "FLASH_SAC_MAXSOILSAT" "%" "FLASH QPE-SAC Soil Saturation" -209 10 0 255 161 1 12 14 "FLASH_QPE_ARI30M" "years" "FLASH QPE Average Recurrence Interval 30-min" -209 10 0 255 161 1 12 15 "FLASH_QPE_ARI01H" "years" "FLASH QPE Average Recurrence Interval 01H" -209 10 0 255 161 1 12 16 "FLASH_QPE_ARI03H" "years" "FLASH QPE Average Recurrence Interval 03H" -209 10 0 255 161 1 12 17 "FLASH_QPE_ARI06H" "years" "FLASH QPE Average Recurrence Interval 06H" -209 10 0 255 161 1 12 18 "FLASH_QPE_ARI12H" "years" "FLASH QPE Average Recurrence Interval 12H" -209 10 0 255 161 1 12 19 "FLASH_QPE_ARI24H" "years" "FLASH QPE Average Recurrence Interval 24H" -209 10 0 255 161 1 12 20 "FLASH_QPE_MAX" "years" "FLASH QPE Average Recurrence Interval Maximum" -209 10 0 255 161 1 12 26 "FLASH_QPE_FFG01H" "non-dim" "FLASH QPE-to-FFG Ratio 01H" -209 10 0 255 161 1 12 27 "FLASH_QPE_FFG03H" "non-dim" "FLASH QPE-to-FFG Ratio 03H" -209 10 0 255 161 1 12 28 "FLASH_QPE_FFG06H" "non-dim" "FLASH QPE-to-FFG Ratio 06H" -209 10 0 255 161 1 12 29 "FLASH_QPE_FFGMAX" "non-dim" "FLASH QPE-to-FFG Ratio Maximum" -209 10 0 255 161 1 12 39 "FLASH_HP_MAXUNITSTREAMFLOW" "m^3/s/km^2" "FLASH QPE-Hydrophobic Unit Streamflow" -209 10 0 255 161 1 12 40 "FLASH_HP_MAXSTREAMFLOW" "m^3/s" "FLASH QPE-Hydrophobic Streamflow" -209 10 0 255 161 1 13 0 "ANC_ConvectiveLikelihood" "non-dim" "Likelihood of convection over the next 01H" -209 10 0 255 161 1 13 1 "ANC_FinalForecast" "dBZ" "01H reflectivity forecast" -209 10 0 255 161 1 14 0 "LVL3_HREET" "kft" "Level III High Resolution Enhanced Echo Top mosaic" -209 10 0 255 161 1 14 1 "LVL3_HighResVIL" "kg/m^2" "Level III High Resouion VIL mosaic" \ No newline at end of file +209 10 0 255 161 1 11 3 "MergedBaseReflectivity" "Raw Base Reflectivity Mosaic (optimal method)" "dBZ" +209 10 0 255 161 1 12 0 "FLASH_CREST_MAXUNITSTREAMFLOW" "FLASH QPE-CREST Unit Streamflow" "m^3/s/km^2" +209 10 0 255 161 1 12 1 "FLASH_CREST_MAXSTREAMFLOW" "FLASH QPE-CREST Streamflow" "m^3/s" +209 10 0 255 161 1 12 2 "FLASH_CREST_MAXSOILSAT" "FLASH QPE-CREST Soil Saturation" "%" +209 10 0 255 161 1 12 4 "FLASH_SAC_MAXUNITSTREAMFLOW" "FLASH QPE-SAC Unit Streamflow" "m^3/s/km^2" +209 10 0 255 161 1 12 5 "FLASH_SAC_MAXSTREAMFLOW" "FLASH QPE-SAC Streamflow" "m^3/s" +209 10 0 255 161 1 12 6 "FLASH_SAC_MAXSOILSAT" "FLASH QPE-SAC Soil Saturation" "%" +209 10 0 255 161 1 12 14 "FLASH_QPE_ARI30M" "FLASH QPE Average Recurrence Interval 30-min" "years" +209 10 0 255 161 1 12 15 "FLASH_QPE_ARI01H" "FLASH QPE Average Recurrence Interval 01H" "years" +209 10 0 255 161 1 12 16 "FLASH_QPE_ARI03H" "FLASH QPE Average Recurrence Interval 03H" "years" +209 10 0 255 161 1 12 17 "FLASH_QPE_ARI06H" "FLASH QPE Average Recurrence Interval 06H" "years" +209 10 0 255 161 1 12 18 "FLASH_QPE_ARI12H" "FLASH QPE Average Recurrence Interval 12H" "years" +209 10 0 255 161 1 12 19 "FLASH_QPE_ARI24H" "FLASH QPE Average Recurrence Interval 24H" "years" +209 10 0 255 161 1 12 20 "FLASH_QPE_ARIMAX" "FLASH QPE Average Recurrence Interval Maximum" "years" +209 10 0 255 161 1 12 26 "FLASH_QPE_FFG01H" "FLASH QPE-to-FFG Ratio 01H" "non-dim" +209 10 0 255 161 1 12 27 "FLASH_QPE_FFG03H" "FLASH QPE-to-FFG Ratio 03H" "non-dim" +209 10 0 255 161 1 12 28 "FLASH_QPE_FFG06H" "FLASH QPE-to-FFG Ratio 06H" "non-dim" +209 10 0 255 161 1 12 29 "FLASH_QPE_FFGMAX" "FLASH QPE-to-FFG Ratio Maximum" "non-dim" +209 10 0 255 161 1 12 39 "FLASH_HP_MAXUNITSTREAMFLOW" "FLASH QPE-Hydrophobic Unit Streamflow" "m^3/s/km^2" +209 10 0 255 161 1 12 40 "FLASH_HP_MAXSTREAMFLOW" "FLASH QPE-Hydrophobic Streamflow" "m^3/s" +209 10 0 255 161 1 13 0 "ANC_ConvectiveLikelihood" "Likelihood of convection over the next 01H" "non-dim" +209 10 0 255 161 1 13 1 "ANC_FinalForecast" "01H reflectivity forecast" "dBZ" +209 10 0 255 161 1 14 0 "LVL3_HREET" "Level III High Resolution Enhanced Echo Top mosaic" "kft" +209 10 0 255 161 1 14 1 "LVL3_HighResVIL" "Level III High Resouion VIL mosaic" "kg/m^2" diff --git a/docs/Flowchart/MET_flowchart_v2.0.png b/docs/Flowchart/MET_flowchart_v2.0.png index b038071276..9d458afeed 100644 Binary files a/docs/Flowchart/MET_flowchart_v2.0.png and b/docs/Flowchart/MET_flowchart_v2.0.png differ diff --git a/docs/Flowchart/MET_flowchart_v3.0.png b/docs/Flowchart/MET_flowchart_v3.0.png index b03670791a..445f7451ba 100644 Binary files a/docs/Flowchart/MET_flowchart_v3.0.png and b/docs/Flowchart/MET_flowchart_v3.0.png differ diff --git a/docs/Flowchart/MET_flowchart_v3.1.png b/docs/Flowchart/MET_flowchart_v3.1.png index 26a4a5ccb2..82c110964f 100644 Binary files a/docs/Flowchart/MET_flowchart_v3.1.png and b/docs/Flowchart/MET_flowchart_v3.1.png differ diff --git a/docs/Users_Guide/appendixA.rst b/docs/Users_Guide/appendixA.rst index 9b47a535bd..6fe28f380f 100644 --- a/docs/Users_Guide/appendixA.rst +++ b/docs/Users_Guide/appendixA.rst @@ -147,9 +147,10 @@ Q. How do I choose a time slice in a NetCDF file? A. When processing NetCDF files, the level information needs to be -specified to tell MET which 2D slice of data to use. There is -currently no way to explicitly define which time slice to use -other than selecting the time index. +specified to tell MET which 2D slice of data to use. The index is selected from +a value when it starts with "@" for vertical level (pressure or height) +and time. The actual time, @YYYYMMDD_HHMM, is allowed instead of selecting +the time index. Let's use plot_data_plane as an example: @@ -160,6 +161,11 @@ Let's use plot_data_plane as an example: obs.ps \ 'name="APCP"; level="(5,*,*)";' + plot_data_plane \ + gtg_obs_forecast.20130730.i00.f00.nc \ + altitude_20000.ps \ + 'name = "edr"; level = "(@20130730_0000,@20000,*,*)";' + Assuming that the first array is the time, this will select the 6-th time slice of the APCP data and plot it since these indices are 0-based. diff --git a/docs/Users_Guide/config_options.rst b/docs/Users_Guide/config_options.rst index 9a2aab720d..5e4ae8fdbe 100644 --- a/docs/Users_Guide/config_options.rst +++ b/docs/Users_Guide/config_options.rst @@ -204,7 +204,7 @@ convenient to use them. For example, when applying the same configuration to the output from multiple models, consider defining the model name as an environment variable which the controlling script sets prior to verifying the output of each model. Setting MODEL to that environment variable enables you -to use one configuration file rather than maintianing many very similar ones. +to use one configuration file rather than maintaining many very similar ones. An error in the syntax of a configuration file will result in an error from the MET tool stating the location of the parsing error. @@ -991,7 +991,8 @@ File-format specific settings for the "field" entry: * (i,...,j,*,*) for a single field, where i,...,j specifies fixed dimension values and *,* specifies the two dimensions for the - gridded field. For example: + gridded field. @ specifies the vertical level value or time value + instead of offset, (i,...,@NNN,*,*). For example: .. code-block:: none @@ -1006,6 +1007,17 @@ File-format specific settings for the "field" entry: } ]; + field = [ + { + name = "QVAPOR"; + level = "(@20220601_1200,@850,*,*)"; + }, + { + name = "TMP_P850_ENS_MEAN"; + level = [ "(*,*)" ]; + } + ]; + * Python (using PYTHON_NUMPY or PYTHON_XARRAY): * The Python interface for MET is described in Appendix F of the MET @@ -2236,10 +2248,17 @@ one hour prior: width = { beg = -3600; end = 0; } -The summaries will only be calculated for the specified GRIB codes. -The supported summaries are "min" (minimum), "max" (maximum), "range", -"mean", "stdev" (standard deviation), "median" and "p##" (percentile, with -the desired percentile value specified in place of ##). +The summaries will only be calculated for the specified GRIB codes +or observation variable ("obs_var") names. + +When determining which observations fall within a time interval, data for the +beginning timestamp is included while data for the ending timestamp is excluded. +Users may need to adjust the "beg" and "end" settings in the "width" dictionary +to include the desired observations in each time interval. + +The supported time summaries are "min" (minimum), "max" (maximum), "range", +"mean", "stdev" (standard deviation), "median", "sum", and "p##" (percentile, +with the desired percentile value specified in place of ##). The "vld_freq" and "vld_thresh" options may be used to require that a certain ratio of observations must be present and contain valid data within the time @@ -2250,6 +2269,9 @@ setting "vld_thresh = 0.5" requires that at least 15 of the 30 expected observations be present and valid for a summary value to be written. The default "vld_thresh = 0.0" setting will skip over this logic. +When using the "sum" option, users should specify "vld_thresh = 1.0" to avoid +missing data values from affecting the resulting sum value. + The variable names are saved to NetCDF file if they are given instead of grib_codes which are not available for non GRIB input. The "obs_var" option was added and works like "grib_code" option (string value VS. int value). diff --git a/docs/Users_Guide/release-notes.rst b/docs/Users_Guide/release-notes.rst index d5fa2e5ec7..94ad929d3a 100644 --- a/docs/Users_Guide/release-notes.rst +++ b/docs/Users_Guide/release-notes.rst @@ -5,356 +5,29 @@ When applicable, release notes are followed by the GitHub issue number which des enhancement, or new feature (`MET GitHub issues `_). Important issues are listed **in bold** for emphasis. -MET Version 10.1.0 release notes (20220314) -------------------------------------------- +MET Version 11.0.0-beta1 release notes (20220622) +------------------------------------------------- * Repository and build: - * Installation: + * **Restructure the contents of the MET repository so that it matches the existing release tarfiles** (`#1920 `_). + * Fix the OpenMP compilation error for GCC 9.3.0/9.4.0 (`#2106 `_). + * Update the MET version number to 11.0.0 (`#2132 `_). - * **Enhance the MET compilation script and its documentation** (`#1395 `_). +* Bugfixes: - * Static Code Analysis: + * Fix regression test differences in pb2nc and ioda2nc output (`#2102 `_). + * Fix support for reading rotated lat/lon grids from CF-compliant NetCDF files (`#2115 `_). + * Fix support for reading rotated lat/lon grids from GRIB1 files (grid type 10) (`#2118 `_). + * Fix support for int64 NetCDF variable types (`#2123 `_). + * Fix Stat-Analysis to aggregate the ECNT ME and RMSE values correctly (`#2170 `_). + * Fix NetCDF library code to process scale_factor and add_offset attributes independently (`#2187 `_). - * **Automate calls to the SonarQube static code analysis tool in the nightly build** (`#2020 `_). - * Fix Fortify High finding for src/libcode/vx_data2d_nccf/nccf_file.cc (`#1795 `_). - * Fix the findings from SonarQube (`#1855 `_). - * Reduce the Security hotspots from SonarQube (`#1903 `_). - * Address findings from the Cppcheck code analysis tool (`#1996 `_). +* Enhancements: - * Testing: - - * Review and revise the warning messages when running the MET unit tests (`#1921 `_). - * Investigate nightly build output wind direction differences caused by machine precision (`#2027 `_). - * Modify plot_tcmpr.R script to support plotting of extra-tropical cyclone tracks not verified against BEST tracks (`#1801 `_). - * Fix failure in plot_tcmpr.R script when a directory is passed in with -lookin (`#1872 `_). - - * Continuous Integration: - - * **Implement Continuous Integration with GitHub Actions in MET** (`#1546 `_). - * Treat warnings from the documentation as errors to facilitate continuous integration with GHA (`#1819 `_). - -* Documentation: - - * **Create and publish a PDF of the MET User's Guide via Read-The-Docs** (`#1453 `_). - * **Enhance the MET documentation to follow the standard for sections** (`#1998 `_). - * Add anchors to link directly to configuration items in the MET User's Guide (`#1811 `_). - * Update FAQ in User's Guide with info from webpage FAQ (`#1834 `_). - * Document the statistics from the RPS line type in Appendix C (`#1853 `_). - * Enhance the documentation with meta-data that is expected by MET for netCDF (`#1949 `_). - * Update documentation to reference GitHub Discussions instead of MET Help (`#1833 `_). - * Fix broken URLs in default MET config files (`#1864 `_). - - -* Library code: - - * Bugfixes: - - * Add check for the start offset and data count are valid before calling NetCDF API (`#1852 `_). - * Fix the MET library code to correclty parse timing information from Grid-Stat NetCDF matched pairs output files (`#2040 `_). - * Fix bug with the incrementing of numbers in temporary file names (`#1906 `_). - - * Python embedding enhancements: - - * **Enhance Ensemble-Stat, Point-Stat, Plot-Point-Obs, and Point2Grid to support python embedding of point observations** (`#1844 `_). - * Fix python embedding when using a named grid with MET_PYTHON_EXE set (`#1798 `_). - - * Miscellaneous: - - * **Enhance MET to use point observations falling between the first and last columns of a global grid** (`#1823 `_). - * Support percentile thresholds for frequency bias not equal to 1 (e.g. ==FBIAS0.9) (`#1761 `_). - * Reimplement the NumArray class based on an STL template (`#1899 `_). - * Modify the interpretation of the message_type_group_map values to support the use of regular expressions (`#1974 `_). - * Sort files read from directories to provide consistent behavior across platforms (`#1989 `_). - * Print warning message for fields that contain no valid data (`#1912 `_). - * Update error messages to redirect users from the MET-Help desk to METplus Discussions (`#2054 `_). - * Update the copyright year of the source code to 2022 (`#2013 `_). - - * NetCDF library: - - * **Implement a common API for reading and writing the common NetCDF point observation file format** (`#1402 `_ and `#1581 `_). - * **Enhance the MET library code to read Rotated Lat/Lon data from CF-compliant NetCDF files** (`#1055 `_). - - * Statistics computations: - - * Add Scatter Index to the CNT line type (`#1843 `_). - * Add the HSS_EC statistic to the MCTS line type and a configurable option for its computation (`#1749 `_). - -* Application code: - - * ASCII2NC Tool: - - * Fix ASCII2NC to check the return status when reading ASCII input files (`#1957 `_). - - * Ensemble-Stat Tool: - - * **Enhance Ensemble-Stat to compute probabilistic statistics for user-defined or climatology-based thresholds** (`#1259 `_). - * **Enhance Ensemble-Stat to apply the HiRA method to ensembles** (`#1583 `_ and `#2045 `_). - * **Enhance Ensemble-Stat and Gen-Ens-Prod to read all ensemble members from a single input file** (`#1695 `_). - * **Add logic to Ensemble-Stat to handle an ensemble control member** (`#1905 `_). - * Enhance Ensemble-Stat and Gen-Ens-Prod to error out if the control member also appears in the list of ensemble members (`#1968 `_). - * Add Point-Stat and Ensemble-Stat obs_quality_exc configuration option to specify which quality flags should be excluded (`#1858 `_). - * Print a warning message about switching from Ensemble-Stat to Gen-Ens-Prod (`#1907 `_). - * Fix failure of Ensemble-Stat when verifying against gridded ECMWF GRIB1 files (`#1879 `_). - - * Gen-Ens-Prod Tool (NEW): - - * **Create the new Gen-Ens-Prod tool for ensemble product generation** (`#1904 `_). - * **Enhance Ensemble-Stat and Gen-Ens-Prod to read all ensemble members from a single input file** (`#1695 `_). - * Enhance Gen-Ens-Prod to standardize ensemble members relative to climatology (`#1918 `_). - - * Gen-Vx-Mask Tool: - - * **Refine logic to prevent rounding shapefile points to the nearest grid point** (affects GenVxMask -type shape masks) (`#1810 `_). - * Change -type for Gen-Vx-Mask from an optional argument to a required one (`#1792 `_). - * Fix Gen-Vx-Mask to handle named grids and grid specification strings for -type grid (`#1993 `_). - * Fix Gen-Vx-Mask so that the -input_field and -mask_field options are processed independently (`#1891 `_). - - * Grid-Diag Tool: - - * Fix integer overflow in Grid-Diag (`#1886 `_). - - * Grid-Stat Tool: - - * **Enhance Grid-Stat to use OpenMP for efficient computation of neighborhood statistics by setting $OMP_NUM_THREADS** (`#1926 `_). - * **Add G and G-Beta to the DMAP line type from Grid-Stat** (`#1673 `_). - * Fix Point-Stat and Grid-Stat to write VCNT output even if no VL1L2 or VAL1L2 output is requested (`#1991 `_). - - * IODA2NC Tool: - - * Fix IODA2NC to handle the same input file being provided multiple times (`#1965 `_). - * Fix IODA2NC bug rejecting all input observations in unit tests (`#1922 `_). - - * MADIS2NC Tool: - - * Enhance MADIS2NC to handle the 2016 updates to its format (`#1936 `_). - * Fix MADIS2NC to correctly parse MADIS profiler quality flag values (`#2028 `_). - - * MODE Tool: - - * **Add support for Multi-Variate MODE** (`#1184 `_). - - * MTD Tool: - - * Fix MTD to compute the CDIST_TRAVELLED value correctly (`#1976 `_). - - * PB2NC Tool: - - * **Enhance PB2NC to derive Mixed-Layer CAPE (MLCAPE)** (`#1824 `_). - * Enhance the PBL derivation logic in PB2NC (`#1913 `_). - * Update the PB2NC configuration to correct the obs_prefbufr_map name as obs_prepbufr_map (`#2044 `_). - * Add entries to the default obs_prepbufr_map setting (`#2070 `_). - * Fix PB2NC to better inventory BUFR input data when processing all variables (`#1894 `_). - * Fix PB2NC to reduce redundant verbosity level 3 log messages (`#2015 `_). - * Resolve PB2NC string truncation warning messages (`#1909 `_). - - * Point2Grid Tool: - - * Enhance Point2Grid to support double type latitude/longitude variables (`#1838 `_). - * Fix the output of Point2Grid which is flipped and rotated with lat/lon to lat/lon conversion (`#1817 `_). - - * Point-Stat Tool: - - * Add ORANK line type to the HiRA output from Point-Stat (`#1764 `_). - * Add Point-Stat and Ensemble-Stat obs_quality_exc configuration option to specify which quality flags should be excluded (`#1858 `_). - * Fix Point-Stat and Grid-Stat to write VCNT output even if no VL1L2 or VAL1L2 output is requested (`#1991 `_). - - * Series-Analysis Tool: - - * Enhance Series-Analysis to compute the BRIERCL statistic from the PSTD line type (`#2003 `_). - - * Stat-Analysis Tool: - - * **Enhance Stat-Analysis to compute the CBS Index** (`#1031 `_). - * **Enhance Stat-Analysis to write the GO Index and CBS Index into a new SSIDX STAT line type** (`#1788 `_). - * Modify the STAT-Analysis GO Index configuration file (`#1945 `_). - * Fix Stat-Analysis skill score index job which always writes a dump row output file (`#1914 `_). - * Fix consumption of too much memory by Stat-Analysis (`#1875 `_). - - * TC-Gen Tool: - - * **Enhance TC-Gen to verify genesis probabilities from ATCF e-deck files** (`#1809 `_). - * **Enhance TC-Gen to verify NHC tropical weather outlook shapefiles** (`#1810 `_). - - * TC-Pairs Tool: - - * Enhance TC-Pairs to only write output for a configurable list of valid times (`#1870 `_). - - * TC-Stat Tool: - - * Fix TC-Stat event equalization logic to include any model name requested using -amodel (`#1932 `_). - - * Wavelet-Stat Tool: - - * Make the specification of a binary threshold in Wavelet-Stat optional (`#1746 `_). - -MET Version 10.0.0 release notes (20210510) -------------------------------------------- - -* Repository and build: - - * **Migrate GitHub respository from the NCAR to DTCenter organization** (`#1462 `_). - * **Switch to consistent vX.Y.Z version numbering, from v10.0 to v10.0.0** (`#1590 `_). - * Switch from tagging releases as met-X.Y.Z to vX.Y.Z instead (`#1541 `_). - * Add a GitHub pull request template (`#1516 `_). - * Resolve warnings from autoconf (`#1498 `_). - * Restructure nightly builds (`#1510 `_). - * Update the MET unit test logic by unsetting environment variables after each test to provide a clean environment for the next (`#1624 `_). - * Run the nightly build as the shared met_test user (`#1116 `_). - * Correct the time offset for tests in unit_plot_data_plane.xml (`#1677 `_). - * Enhance the sample plotting R-script to read output from different versions of MET (`#1653 `_). - * Update the default configuration options to compile the development code with the debug (-g) option and the production code without it (`#1778 `_). - * Update MET to compile using GCC version 10 (`#1552 `_). - * Update MET to compile using PGI version 20 (`#1317 `_). - -* Documentation: - - * **Migrate the MET documentation to Read the Docs** (`#1649 `_). - * Enhance and update documentation (`#1459 `_ and `#1460 `_, and `#1731 `_). - * Enhance the python embedding documentation (`#1468 `_). - * Document the supported grid definition templates (`#1469 `_). - * Update comments at the top of each MET config file directing users to the MET User's Guide (`#1598 `_). - * Migrate content from README and README_TC in data/config to the MET User's Guide (`#1474 `_). - * Add version selector to the Sphinx documentation page (`#1461 `_). - * Make bolding consistent across the documentation (`#1458 `_). - * Implement hanging indents for references (`#1457 `_). - * Correct typos and spelling errors (`#1456 `_). - * Update the Grid-Diag documentation to clarify the -data command line option (`#1611 `_). - * Documentation updates to correct typos and apply consistent formatting (`#1455 `_). - * Correct the definition of H_RATE and PODY in MET User's Guide Appendix C (`#1631 `_). - -* Library code: - - * Bugfixes: - - * Apply the GRIB ensemble filtering option (GRIB_ens) whenever specified by the user (`#1604 `_). - * Fix the set_attr_accum option to set the accumulation time instead of the lead time (`#1646 `_). - * Fix ASCII file list parsing logic (`#1484 `_ and `#1508 `_). - * Fix parsing error for floating point percentile thresholds, like ">SFP33.3" (`#1716 `_). - - * Python embedding enhancements: - - * Note that the netCDF4 Python package is now required in place of the pickle package! - * **Replace the pickle format for temporary python files with NetCDF for gridded data** (`#1319 `_, `#1697 `_). - * **Replace the pickle format for temporary python files with ASCII for point observations in ascii2nc and matched pair data in Stat-Analysis** (`#1319 `_, `#1700 `_). - * **Complete support for Python XArray embedding** (`#1534 `_). - * Treat gridded fields of entirely missing data as missing files and fix python embedding to call common data processing code (`#1494 `_). - * Clarify error messages for Xarray python embedding (`#1472 `_). - * Add support for Gaussian grids with python embedding (`#1477 `_). - * Correct error messages from python embedding (`#1473 `_). - * Enhance to support the "grid" being defined as a named grid or specification string (`#1471 `_). - * Enhance to parse python longlong variables as integers to make the python embedding scripts less particular (`#1747 `_). - * Fix the read_ascii_mpr.py python embedding script to pass all 37 columns of MPR data to Stat-Analysis (`#1620 `_). - * Fix the read_tmp_dataplane.py python embedding script to handle the fill value correctly (`#1753 `_). - - * Miscellaneous: - - * **Enhance support for rotated latlon grids and update related documentation** (`#1574 `_). - * Parse the -v and -log options prior to application-specific command line options (`#1527 `_). - * Update GRIB1/2 table entries for the MXUPHL, MAXREF, MAXUVV, and MAXDVV variables (`#1658 `_). - * Update the Air Force GRIB tables to reflect current AF usage (`#1519 `_). - * Enhance the DataLine::get_item() error message to include the file name, line number, and column (`#1429 `_). - * Add support for climatological probabilities for complex CDP thresholds, like >=CDP33&&<=CDP67 (`#1705 `_). - * Update the NCL-derived color tables (`#1568 `_). - - * NetCDF library: - - * Enhance to support additional NetCDF data types (`#1492 `_ and `#1493 `_). - * Add support for the NetCDF-CF conventions time bounds option (`#1657 `_). - * Extend CF-compliant NetCDF file support when defining the time dimension as a time string (`#1755 `_). - * Error out when reading CF-compliant NetCDF data with incomplete grid definition (`#1454 `_). - * Reformat and simplify the magic_str() printed for NetCDF data files (`#1655 `_). - * Parse the "init_time" and "valid_time" attributes from MET NetCDF input files (`#1346 `_). - - * Statistics computations: - - * **Modify the climatological Brier Score computation to match the NOAA/EMC VSDB method** (`#1684 `_). - * **Add support for the Hersbach CRPS algorithm by add new columns to the ECNT line type** (`#1450 `_). - * Enhance MET to derive the Hersbach CRPSCL_EMP and CRPSS_EMP statistics from a single deterministic reference model (`#1685 `_). - * Correct the climatological CRPS computation to match the NOAA/EMC VSDB method (`#1451 `_). - * Refine log messages when verifying probabilities (`#1502 `_). - -* Application code: - - * ASCII2NC Tool: - - * Fix to handle bad records in little_r format (`#1737 `_). - * Create empty output files for zero input observations instead of erroring out (`#1630 `_). - - * GIS Tools: - - * Fix memory corruption bug in the gis_dump_dbf utility which causes it to abort at runtime (`#1777 `_). - - * Grid-Diag Tool: - - * Fix bug when reading the same variable name from multiple data sources (`#1694 `_). - - * Grid-Stat Tool: - - * **Add mpr_column and mpr_thresh configuration options to filter out matched pairs based on large fcst, obs, and climo differences** (`#1575 `_). - * Correct the climatological CDF values in the NetCDF matched pairs output files and correct the climatological probability values for climatgological distribution percentile (CDP) threshold types (`#1638 `_). - - * IODA2NC Tool (NEW): - - * **Add the new ioda2nc tool** (`#1355 `_). - - * MADIS2NC Tool: - - * Clarify various error messages (`#1409 `_). - - * MODE Tool: - - * **Update the MODE AREA_RATIO output column to list the forecast area divided by the observation area** (`#1643 `_). - * **Incremental development toward the Multivariate MODE tool** (`#1282 `_, `#1284 `_, and `#1290 `_). - - * PB2NC Tool: - - * Fix intermittent segfault when deriving PBL (`#1715 `_). - - * Plot-Point-Obs Tool: - - * **Overhaul Plot-Point-Obs to make it highly configurable** (`#213 `_, `#1528 `_, and `#1052 `_). - * Support regridding option in the config file (`#1627 `_). - - * Point2Grid Tool: - - * **Support additional NetCDF point observation data sources** (`#1345 `_, `#1509 `_, and `#1511 `_). - * Support the 2-dimensional time variable in Himawari data files (`#1580 `_). - * Create empty output files for zero input observations instead of erroring out (`#1630 `_). - * Improve the Point2Grid runtime performance (`#1421 `_). - * Process point observations by variable name instead of GRIB code (`#1408 `_). - - * Point-Stat Tool: - - * **Add mpr_column and mpr_thresh configuration options to filter out matched pairs based on large fcst, obs, and climo differences** (`#1575 `_). - * **Print the rejection code reason count log messages at verbosity level 2 for zero matched pairs** (`#1644 `_). - * **Add detailed log messages when discarding observations** (`#1588 `_). - * Update log messages (`#1514 `_). - * Enhance the validation of masking regions to check for non-unique masking region names (`#1439 `_). - * Fix Point-Stat runtime error for some CF-complaint NetCDF files (`#1782 `_). - - * Stat-Analysis Tool: - - * **Process multiple output thresholds and write multiple output line types in a single aggregate_stat job** (`#1735 `_). - * Skip writing job output to the logfile when the -out_stat option is provided (`#1736 `_). - * Add -fcst_init_inc/_exc and -fcst_valid_inc/_exc job command filtering options to Stat-Analysis (`#1135 `_). - * Add -column_exc job command option to exclude lines based on string values (`#1733 `_). - * Fix Stat-Analysis failure when aggregating ECNT lines (`#1706 `_). - - * TC-Gen Tool: - - * **Overhaul the genesis matching logic, add the development and operational scoring algorithms, and add many config file options** (`#1448 `_). - * Add config file options to filter data by initialization time (init_inc and init_exc) and hurricane basin (basin_mask) (`#1626 `_). - * Add the genesis matched pair (GENMPR) output line type (`#1597 `_). - * Add a gridded NetCDF output file with counts for genesis events and track points (`#1430 `_). - * Enhance the matching logic and update several config options to support its S2S application (`#1714 `_). - * Fix lead window filtering option (`#1465 `_). - - * TC-Pairs Tool: - - * Fix to report the correct number of lines read from input track data files (`#1725 `_). - * Fix to read supported RI edeck input lines and ignore unsupported edeck probability line types (`#1768 `_). - - * TC-Stat Tool: - - * Add -column_exc job command option to exclude lines based on string values (`#1733 `_). + * Sort mask.sid station lists to check their contents more efficiently (`#1950 `_). + * Add Anomaly Correlation Coefficient to VCNT Line Type (`#2022 `_). + * Enhance TC-RMW to compute tangential and radial winds (`#2072 `_). + * Allow 2x2 HSS calculations to include user-defined EC values (`#2147 `_). + * Enhance Gen-Vx-Mask by adding a new poly_xy masking type option (`#2152 `_). + * Add M_to_KFT and KM_to_KFT functions to ConfigConstants (`#2180 `_). diff --git a/docs/conf.py b/docs/conf.py index 44bbb2f91f..9ba9c47ec5 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -19,12 +19,12 @@ project = 'MET' author = 'UCAR/NCAR, NOAA, CSU/CIRA, and CU/CIRES' -author_list = 'Newman, K., J. Opatz, T. Jensen, J. Prestopnik, H. Soh, L. Goodrich, B. Brown, R. Bullock, J. Halley Gotway' -version = '10.1.0' +author_list = 'Opatz, J., T. Jensen, J. Prestopnik, H. Soh, L. Goodrich, B. Brown, R. Bullock, J. Halley Gotway, K. Newman' +version = '11.0.0-beta1' verinfo = version release = f'{version}' release_year = '2022' -release_date = f'{release_year}-03-14' +release_date = f'{release_year}-06-22' copyright = f'{release_year}, {author}' # -- General configuration --------------------------------------------------- diff --git a/internal/scripts/docker/Dockerfile b/internal/scripts/docker/Dockerfile index f6fdc194c3..d324cbb6d4 100644 --- a/internal/scripts/docker/Dockerfile +++ b/internal/scripts/docker/Dockerfile @@ -36,4 +36,4 @@ RUN echo "Checking out MET ${MET_GIT_NAME} from ${MET_GIT_URL}" \ && git clone ${MET_GIT_URL} ${MET_REPO_DIR} \ && cd ${MET_REPO_DIR} \ && git checkout ${MET_GIT_NAME} \ - && ../scripts/docker/build_met_docker.sh + && internal/scripts/docker/build_met_docker.sh diff --git a/internal/scripts/docker/README.md b/internal/scripts/docker/README.md index 80c6acaa08..87635ceb84 100644 --- a/internal/scripts/docker/README.md +++ b/internal/scripts/docker/README.md @@ -4,17 +4,17 @@ Run all of the Docker commands from the top-level directory of the MET repositor ## Build image with minimum requirements needed to build MET -```docker build -t dtcenter/met-base:minimum -f scripts/docker/Dockerfile.minimum . +```docker build -t dtcenter/met-base:minimum -f internal/scripts/docker/Dockerfile.minimum . docker push dtcenter/met-base:minimum``` ## Build image with requirements to build MET and run MET unit tests -```docker build -t dtcenter/met-base:unit_test -f scripts/docker/Dockerfile.test . +```docker build -t dtcenter/met-base:unit_test -f internal/scripts/docker/Dockerfile.test . docker push dtcenter/met-base:unit_test``` ## Build MET from clone -```docker build -t dtcenter/met:${TAG_NAME} --build-arg SOURCE_BRANCH=${BRANCH_NAME} scripts/docker +```docker build -t dtcenter/met:${TAG_NAME} --build-arg SOURCE_BRANCH=${BRANCH_NAME} internal/scripts/docker docker push dtcenter/met:${TAG_NAME}``` where: @@ -23,7 +23,7 @@ where: ## Build MET from local source code with minimum requirements -```docker build -t dtcenter/met:${TAG_NAME} --build-arg SOURCE_BRANCH=${BRANCH_NAME} -f scripts/docker/Dockerfile.copy . +```docker build -t dtcenter/met:${TAG_NAME} --build-arg SOURCE_BRANCH=${BRANCH_NAME} -f internal/scripts/docker/Dockerfile.copy . docker push dtcenter/met:${TAG_NAME}``` where: @@ -32,7 +32,7 @@ where: ## Build MET from local source code with unit test requirements -```docker build -t dtcenter/met:${TAG_NAME} --build-arg SOURCE_BRANCH=${BRANCH_NAME} --build-arg MET_BASE_IMAGE=unit_test -f scripts/docker/Dockerfile.copy . +```docker build -t dtcenter/met:${TAG_NAME} --build-arg SOURCE_BRANCH=${BRANCH_NAME} --build-arg MET_BASE_IMAGE=unit_test -f internal/scripts/docker/Dockerfile.copy . docker push dtcenter/met:${TAG_NAME}``` where: diff --git a/internal/scripts/docker/build_met_docker.sh b/internal/scripts/docker/build_met_docker.sh index 80ce57a3e7..7aa7ba15a8 100755 --- a/internal/scripts/docker/build_met_docker.sh +++ b/internal/scripts/docker/build_met_docker.sh @@ -5,6 +5,7 @@ echo "Running script to build MET in Docker" LOG_FILE=/met/logs/MET-${MET_GIT_NAME}_configure.log echo "Configuring MET ${MET_GIT_NAME} and writing log file ${LOG_FILE}" +./bootstrap ./configure --enable-grib2 --enable-mode_graphics --enable-modis --enable-lidar2nc --enable-python \ MET_HDF=/usr/local/hdf MET_HDFEOS=/usr/local/hdfeos \ MET_FREETYPEINC=/usr/include/freetype2 MET_FREETYPELIB=/usr/lib \ diff --git a/scripts/sonarqube/python.sonar-project.properties b/internal/scripts/sonarqube/python.sonar-project.properties similarity index 100% rename from scripts/sonarqube/python.sonar-project.properties rename to internal/scripts/sonarqube/python.sonar-project.properties diff --git a/internal/test_unit/config/Ascii2NcConfig_rain_01H_sum b/internal/test_unit/config/Ascii2NcConfig_rain_01H_sum new file mode 100644 index 0000000000..ca7aafdf4d --- /dev/null +++ b/internal/test_unit/config/Ascii2NcConfig_rain_01H_sum @@ -0,0 +1,52 @@ +//////////////////////////////////////////////////////////////////////////////// +// +// ASCII2NC configuration file. +// +// For additional information, please see the MET User's Guide. +// +//////////////////////////////////////////////////////////////////////////////// + +// +// The parameters listed below are used to summarize the ASCII data read in +// + +// +// Time periods for the summarization +// obs_var (string array) is added and works like grib_code (int array) +// when the obs name is given instead of grib_code +// + +time_summary = { + flag = TRUE; + raw_data = FALSE; + beg = "00"; + end = "23"; + step = 3600; + width = { beg = -3599; end = 1; } + grib_code = [ ]; + obs_var = [ "rain" ]; + type = [ "sum" ]; + vld_freq = 15*60; + vld_thresh = 1.0; +} + +// +// Mapping of input little_r report types to output message types +// +message_type_map = [ + { key = "FM-12 SYNOP"; val = "ADPSFC"; }, + { key = "FM-13 SHIP"; val = "SFCSHP"; }, + { key = "FM-15 METAR"; val = "ADPSFC"; }, + { key = "FM-18 BUOY"; val = "SFCSHP"; }, + { key = "FM-281 QSCAT"; val = "ASCATW"; }, + { key = "FM-32 PILOT"; val = "ADPUPA"; }, + { key = "FM-35 TEMP"; val = "ADPUPA"; }, + { key = "FM-88 SATOB"; val = "SATWND"; }, + { key = "FM-97 ACARS"; val = "AIRCFT"; } +]; + +// +// Indicate a version number for the contents of this configuration file. +// The value should generally not be modified. +// +version = "V11.0.0"; diff --git a/internal/test_unit/xml/unit_ascii2nc.xml b/internal/test_unit/xml/unit_ascii2nc.xml index feef57c65d..01322961a8 100644 --- a/internal/test_unit/xml/unit_ascii2nc.xml +++ b/internal/test_unit/xml/unit_ascii2nc.xml @@ -101,4 +101,20 @@ + + &MET_BIN;/ascii2nc + \ + &DATA_DIR_OBS;/ascii/aws/aws_20220609_020000.txt \ + &DATA_DIR_OBS;/ascii/aws/aws_20220609_021500.txt \ + &DATA_DIR_OBS;/ascii/aws/aws_20220609_023000.txt \ + &DATA_DIR_OBS;/ascii/aws/aws_20220609_024500.txt \ + &DATA_DIR_OBS;/ascii/aws/aws_20220609_030000.txt \ + &OUTPUT_DIR;/ascii2nc/aws_2022060903_rain_01H_sum.nc \ + -config &CONFIG_DIR;/Ascii2NcConfig_rain_01H_sum + + + &OUTPUT_DIR;/ascii2nc/aws_2022060903_rain_01H_sum.nc + + + diff --git a/internal/test_unit/xml/unit_plot_data_plane.xml b/internal/test_unit/xml/unit_plot_data_plane.xml index 02f85765bc..f581160b33 100644 --- a/internal/test_unit/xml/unit_plot_data_plane.xml +++ b/internal/test_unit/xml/unit_plot_data_plane.xml @@ -273,6 +273,20 @@ + + &MET_BIN;/plot_data_plane + \ + &DATA_DIR_MODEL;/nccf/gtg/latlon/gtg_obs_forecast.20130730.i00.f00.nc \ + &OUTPUT_DIR;/plot_data_plane/gtg_obs_forecast.20130730.i00.f00.NCCF_latlon_20000.ps \ + 'name = "edr"; level = "(0,@20000,*,*)";' \ + -title "NCCF Latitude/Longitude Level 0" \ + -v 1 + + + &OUTPUT_DIR;/plot_data_plane/gtg_obs_forecast.20130730.i00.f00.NCCF_latlon_20000.ps + + + &MET_BIN;/plot_data_plane \ @@ -403,7 +417,7 @@ \ &DATA_DIR_MODEL;/easm/pr_day_MPI-ESM-MR_rcp85_r1i1p1_20060101-20091231.nc \ &OUTPUT_DIR;/plot_data_plane/EaSM_CMIP5_pr_day_MPI-ESM-MR_rcp85_r1i1p1_20060101_12_time.ps \ - 'name="pr"; level="(20060102_000000,*,*)";' \ + 'name="pr"; level="(@20060102_000000,*,*)";' \ -v 4 diff --git a/src/basic/vx_util/util_constants.h b/src/basic/vx_util/util_constants.h index d8920d0f9b..2200bded36 100644 --- a/src/basic/vx_util/util_constants.h +++ b/src/basic/vx_util/util_constants.h @@ -119,6 +119,7 @@ static const double const_gop = 9.80616; // from The Ceaseless Wind static const double const_rd = 287.0; // kg/k dry gas constant static const int vx_data2d_star = -12345; +static const int vx_data2d_dim_by_value = -123456; // apply the value instead of offset for slicing //////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_data2d/level_info.cc b/src/libcode/vx_data2d/level_info.cc index 2323475e93..92df64e9db 100644 --- a/src/libcode/vx_data2d/level_info.cc +++ b/src/libcode/vx_data2d/level_info.cc @@ -70,7 +70,6 @@ void LevelInfo::init_from_scratch() { clear(); - return; } /////////////////////////////////////////////////////////////////////////////// @@ -88,9 +87,8 @@ void LevelInfo::assign(const LevelInfo &l) { Upper = l.upper(); Lower = l.lower(); Increment = l.increment(); - time_as_offset = l.is_time_as_offset(); + Is_offset = l.is_offset(); - return; } /////////////////////////////////////////////////////////////////////////////// @@ -106,9 +104,8 @@ void LevelInfo::clear() { Upper = 0.0; Lower = 0.0; Increment = 0.0; - time_as_offset = true; + Is_offset = true; - return; } /////////////////////////////////////////////////////////////////////////////// @@ -125,56 +122,48 @@ void LevelInfo::dump(ostream &out) const { << " Upper = " << Upper << "\n" << " Increment = " << Increment << "\n"; - return; } /////////////////////////////////////////////////////////////////////////////// void LevelInfo::set_type(LevelType lt) { Type = lt; - return; } /////////////////////////////////////////////////////////////////////////////// void LevelInfo::set_type_num(int i) { TypeNum = i; - return; } /////////////////////////////////////////////////////////////////////////////// void LevelInfo::set_req_name(const char *str) { ReqName = str; - return; } /////////////////////////////////////////////////////////////////////////////// void LevelInfo::set_name(const char *str) { Name = str; - return; } /////////////////////////////////////////////////////////////////////////////// void LevelInfo::set_units(const char *str) { Units = str; - return; } /////////////////////////////////////////////////////////////////////////////// void LevelInfo::set_upper(double u) { Upper = u; - return; } /////////////////////////////////////////////////////////////////////////////// void LevelInfo::set_lower(double l) { Lower = l; - return; } /////////////////////////////////////////////////////////////////////////////// @@ -182,21 +171,18 @@ void LevelInfo::set_lower(double l) { void LevelInfo::set_range(double l, double u) { Lower = l; Upper = u; - return; } /////////////////////////////////////////////////////////////////////////////// void LevelInfo::set_increment(double i) { Increment = i; - return; } /////////////////////////////////////////////////////////////////////////////// -void LevelInfo::set_time_as_offset(bool b) { - time_as_offset = b; - return; +void LevelInfo::set_is_offset(bool b) { + Is_offset = b; } /////////////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_data2d/level_info.h b/src/libcode/vx_data2d/level_info.h index a9becc61e6..445528017a 100644 --- a/src/libcode/vx_data2d/level_info.h +++ b/src/libcode/vx_data2d/level_info.h @@ -48,7 +48,7 @@ class LevelInfo double Upper; // Upper level limit double Lower; // Lower level limit double Increment; // Increment (time: seconds, 0 for no increment) - bool time_as_offset;// default: true, false: the (time) value instead + bool Is_offset; // default: true, false: the value instead // of the offset at Lower and Upper void init_from_scratch(); @@ -77,7 +77,7 @@ class LevelInfo double upper() const; double lower() const; double increment() const; - bool is_time_as_offset()const; + bool is_offset() const; // // set stuff @@ -92,7 +92,7 @@ class LevelInfo void set_lower(double l); void set_range(double l, double u); void set_increment(double i); - void set_time_as_offset(bool b); + void set_is_offset(bool b); }; @@ -106,7 +106,7 @@ inline ConcatString LevelInfo::units() const { return(Units); } inline double LevelInfo::upper() const { return(Upper); } inline double LevelInfo::lower() const { return(Lower); } inline double LevelInfo::increment()const { return(Increment);} -inline bool LevelInfo::is_time_as_offset()const { return(time_as_offset);} +inline bool LevelInfo::is_offset()const { return(Is_offset);} /////////////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_data2d_nc_met/met_file.cc b/src/libcode/vx_data2d_nc_met/met_file.cc index 14a41b64d1..4aab77db9d 100644 --- a/src/libcode/vx_data2d_nc_met/met_file.cc +++ b/src/libcode/vx_data2d_nc_met/met_file.cc @@ -32,16 +32,10 @@ using namespace std; //////////////////////////////////////////////////////////////////////// -static const char x_dim_name [] = "lon"; -static const char y_dim_name [] = "lat"; +static const char x_dim_name [] = "lon"; +static const char y_dim_name [] = "lat"; -static const string valid_time_att_name = "valid_time"; -static const string init_time_att_name = "init_time"; -static const string valid_time_ut_att_name = "valid_time_ut"; -static const string init_time_ut_att_name = "init_time_ut"; -static const string accum_time_att_name = "accum_time_sec"; - -static const int max_met_args = 30; +static const int max_met_args = 30; //////////////////////////////////////////////////////////////////////// @@ -50,59 +44,30 @@ template void copy_nc_data_as_double(double *to_array, const T *from_array, const int x_slot, const int y_slot, const int nx, const int ny, - double missing_value, double fill_value, - float add_offset, float scale_factor) { + double missing_value, double fill_value) { double value; int x, y, offset, start_offset; offset = 0; - if (add_offset != 0.0 || scale_factor != 1.0) { - if (x_slot > y_slot) { - for (y=0; y y_slot) { + for (y=0; y y_slot) { + for (x=0; x= 7) { @@ -735,17 +603,18 @@ plane.set_size(Nx, Ny); mlog << Debug(7) << method_name_short << "took " << duration_sec << " seconds to read NetCDF data\n"; } - + plane.set_block(data_array, Nx, Ny); - + if (mlog.verbosity_level() >= 7) { double duration_sec = (double)(clock() - nc_time)/CLOCKS_PER_SEC; mlog << Debug(7) << method_name_short << "took " << duration_sec << " seconds to fill data plane\n"; } - + if (data_array) delete[] data_array; - + if (double_array) delete[] double_array; + // // done // diff --git a/src/libcode/vx_data2d_nc_pinterp/data2d_nc_pinterp.cc b/src/libcode/vx_data2d_nc_pinterp/data2d_nc_pinterp.cc index 327cab74a3..cf1a97d45e 100644 --- a/src/libcode/vx_data2d_nc_pinterp/data2d_nc_pinterp.cc +++ b/src/libcode/vx_data2d_nc_pinterp/data2d_nc_pinterp.cc @@ -133,9 +133,25 @@ bool MetNcPinterpDataFile::data_plane(VarInfo &vinfo, DataPlane &plane) { plane.clear(); // Read the data + PinterpNc->get_nc_var_info(vinfo_nc->req_name().c_str(), info); + LongArray dimension = vinfo_nc->dimension(); + int dim_count = dimension.n_elements(); + for (int k=0; kvar, k)); + NcVarInfo *var_info = find_var_info_by_dim_name(PinterpNc->Var, dim_name, + PinterpNc->Nvars); + if (var_info) { + long new_offset = get_index_at_nc_data(var_info->var, + vinfo_nc->dim_value(k), + dim_name, (k == info->t_slot)); + if (new_offset != bad_data_int) dimension[k] = new_offset; + } + } + } + status = PinterpNc->data(vinfo_nc->req_name().c_str(), - vinfo_nc->dimension(), - plane, pressure, info); + dimension, plane, pressure, info); // Check that the times match those requested if(status) { diff --git a/src/libcode/vx_data2d_nc_pinterp/pinterp_file.cc b/src/libcode/vx_data2d_nc_pinterp/pinterp_file.cc index d4f8451006..c5dbe62a7b 100644 --- a/src/libcode/vx_data2d_nc_pinterp/pinterp_file.cc +++ b/src/libcode/vx_data2d_nc_pinterp/pinterp_file.cc @@ -7,8 +7,6 @@ // *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=* - - //////////////////////////////////////////////////////////////////////// @@ -54,7 +52,7 @@ static const char pressure_var_wrf_interp_name [] = "LEV"; static const char pa_units_str [] = "Pa"; static const char hpa_units_str [] = "hPa"; -static const string init_time_att_name = "START_DATE"; +static const string start_time_att_name = "START_DATE"; static const int max_pinterp_args = 30; @@ -75,7 +73,6 @@ static bool is_bad_data_pinterp(double); static bool is_accumulation(const char *); - //////////////////////////////////////////////////////////////////////// @@ -283,7 +280,7 @@ else { } ConcatString att_value; -get_global_att(Nc, init_time_att_name, att_value); +get_global_att(Nc, start_time_att_name, att_value); InitTime = parse_init_time(att_value.c_str()); @@ -567,53 +564,10 @@ int i; short s; float f; double d = bad_data_double; -float add_offset = 0.f; -float scale_factor = 1.f; double missing_value = get_var_missing_value(var); double fill_value = get_var_fill_value(var); -NcVarAtt *att_add_offset = get_nc_att(var, (string)"add_offset"); -NcVarAtt *att_scale_factor = get_nc_att(var, (string)"scale_factor"); -if (!IS_INVALID_NC_P(att_add_offset) && !IS_INVALID_NC_P(att_scale_factor)) { - add_offset = get_att_value_float(att_add_offset); - scale_factor = get_att_value_float(att_scale_factor); -} -if (att_add_offset) delete att_add_offset; -if (att_scale_factor) delete att_scale_factor; - -switch ( GET_NC_TYPE_ID_P(var) ) { - - case NcType::nc_INT: - status = get_nc_data(var, &i, (long *)a); - d = (double) (i); - break; - - case NcType::nc_SHORT: - status = get_nc_data(var, &s, (long *)a); - d = (double) (s); - break; - - case NcType::nc_FLOAT: - status = get_nc_data(var, &f, (long *)a); - d = (double) (f); - break; - - case NcType::nc_DOUBLE: - status = get_nc_data(var, &d, (long *)a); - break; - - default: - mlog << Error << "\nPinterpFile::data(NcVar *, const LongArray &) const -> " - << " bad type for variable \"" << (GET_NC_NAME_P(var)) << "\"\n\n"; - exit ( 1 ); - break; - -} // switch - -if ((add_offset != 0.0 || scale_factor != 1.0) && - !is_eq(d, missing_value) && - !is_eq(d, fill_value)) { - d = d * scale_factor + add_offset; -} + +status = get_nc_data(var, &d, (long *)a); if ( !status ) { @@ -784,22 +738,9 @@ plane.set_size(Nx, Ny); // get the data // double d[Ny]; -int i[Ny]; -short s[Ny]; -float f[Ny]; long offsets[dim_count]; long lengths[dim_count]; -float add_offset = 0.f; -float scale_factor = 1.f; -NcVarAtt *att_add_offset = get_nc_att(v, (string)"add_offset"); -NcVarAtt *att_scale_factor = get_nc_att(v, (string)"scale_factor"); -if (!IS_INVALID_NC_P(att_add_offset) && !IS_INVALID_NC_P(att_scale_factor)) { - add_offset = get_att_value_float(att_add_offset); - scale_factor = get_att_value_float(att_scale_factor); -} -if (att_add_offset) delete att_add_offset; -if (att_scale_factor) delete att_scale_factor; for (int k=0; k " - << " bad type for variable \"" << (GET_NC_NAME_P(v)) << "\"\n\n"; - exit ( 1 ); - break; - - } // switch - + get_nc_data(v, (double *)&d, lengths, offsets); b[x_slot] = x; @@ -854,9 +761,6 @@ for (x=0; xvar, a, plane, pressure); // // store the times // -time_index = a[Var[j].t_slot]; + time_index = a[info->t_slot]; -plane.set_init ( InitTime ); -plane.set_valid ( valid_time(time_index) ); -plane.set_lead ( lead_time(time_index) ); + plane.set_init ( InitTime ); + plane.set_valid ( valid_time(time_index) ); + plane.set_lead ( lead_time(time_index) ); // // since Pinterp files only contain WRF-ARW output, it is always a // a runtime accumulation // -if ( is_accumulation(var_name) ) { + if ( is_accumulation(var_name) ) { - plane.set_accum ( lead_time(time_index) ); + plane.set_accum ( lead_time(time_index) ); -} else { + } else { - plane.set_accum ( 0 ); + plane.set_accum ( 0 ); -} + } // // done // -return ( found ); + return ( found ); } +//////////////////////////////////////////////////////////////////////// + +bool PinterpFile::get_nc_var_info(const char *var_name, NcVarInfo *&info) const { + bool found = false; + + if (NULL == info) { + for (int j=0; j " + mlog << Error << "\n" << method_name << "only one dimension can have a range for NetCDF variable \"" << MagicStr << "\".\n\n"; exit(1); } // Store the dimension of the range and limits else { - Dimension.add(range_flag); + add_dimension(range_flag); Level.set_lower(atoi(ptr2)); Level.set_upper(atoi(++ptr3)); @@ -202,7 +218,29 @@ void VarInfoNcPinterp::set_magic(const ConcatString &nstr, const ConcatString &l } // Single level else { - Dimension.add(atoi(ptr2)); + int level = 0; + double level_value = bad_data_double; + if (is_number(ptr2)) { + if (as_offset) level = atoi(ptr2); + else { + level = vx_data2d_dim_by_value; + level_value = atof(ptr2); + } + } + else if (is_datestring(ptr2)) { + unixtime unix_time = timestring_to_unix(ptr2); + level = vx_data2d_dim_by_value; + level_value = unix_time; + as_offset = false; + } + else { + mlog << Error << "\n" << method_name + << "trouble parsing NetCDF dimension value \"" + << ptr2 << "\"!\n\n"; + exit(1); + } + if (as_offset) add_dimension(level, as_offset); + else add_dimension(level, as_offset, level_value); } } diff --git a/src/libcode/vx_data2d_nc_pinterp/var_info_nc_pinterp.h b/src/libcode/vx_data2d_nc_pinterp/var_info_nc_pinterp.h index e169437291..bff8f90389 100644 --- a/src/libcode/vx_data2d_nc_pinterp/var_info_nc_pinterp.h +++ b/src/libcode/vx_data2d_nc_pinterp/var_info_nc_pinterp.h @@ -23,6 +23,10 @@ /////////////////////////////////////////////////////////////////////////////// +typedef CRC_Array BoolArray; + +/////////////////////////////////////////////////////////////////////////////// + // // List of Pinterp precipitation variable names // Taken from the WRF version 3.2 Registry.EM file @@ -184,9 +188,12 @@ class VarInfoNcPinterp : public VarInfo // LongArray Dimension; // Dimension values for extracting 2D field + BoolArray Is_offset; // boolean for Dimension value (true: offset, false: value to be an offset (false for value) + NumArray Dim_value; // Dimension values as float for extracting 2D field void init_from_scratch(); void assign(const VarInfoNcPinterp &); + void clear_dimension(); public: VarInfoNcPinterp(); @@ -202,8 +209,12 @@ class VarInfoNcPinterp : public VarInfo // GrdFileType file_type() const; - const LongArray & dimension() const; - int dimension(int i) const; + const LongArray & dimension() const; + int dimension(int i) const; + const NumArray & dim_value() const; + double dim_value(int i) const; + const BoolArray & is_offset() const; + bool is_offset(int i) const; int n_dimension() const; // @@ -213,7 +224,7 @@ class VarInfoNcPinterp : public VarInfo void set_magic(const ConcatString &, const ConcatString &); void set_dict(Dictionary &); - void add_dimension(int dim); + void add_dimension(int dim, bool as_index=true, double dim_value=bad_data_double); void set_dimension(int i_dim, int dim); // @@ -235,6 +246,10 @@ inline GrdFileType VarInfoNcPinterp::file_type() const { return(FileT inline const LongArray & VarInfoNcPinterp::dimension() const { return(Dimension); } inline int VarInfoNcPinterp::dimension(int i) const { return(Dimension[i]); } inline int VarInfoNcPinterp::n_dimension() const { return(Dimension.n_elements()); } +inline const NumArray & VarInfoNcPinterp::dim_value() const { return(Dim_value); } +inline double VarInfoNcPinterp::dim_value(int i) const { return(Dim_value[i]); } +inline const BoolArray & VarInfoNcPinterp::is_offset() const { return(Is_offset); } +inline bool VarInfoNcPinterp::is_offset(int i) const { return(Is_offset[i]); } /////////////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_data2d_nccf/data2d_nccf.cc b/src/libcode/vx_data2d_nccf/data2d_nccf.cc index 8a8b2008c3..2a078bf4ed 100644 --- a/src/libcode/vx_data2d_nccf/data2d_nccf.cc +++ b/src/libcode/vx_data2d_nccf/data2d_nccf.cc @@ -76,8 +76,8 @@ MetNcCFDataFile & MetNcCFDataFile::operator=(const MetNcCFDataFile &) { void MetNcCFDataFile::nccf_init_from_scratch() { - _file = (NcCfFile *) 0; - _time_dim_offset = -1; + _file = (NcCfFile *) 0; + _cur_time_index = -1; close(); @@ -175,51 +175,89 @@ bool MetNcCFDataFile::data_plane(VarInfo &vinfo, DataPlane &plane) if (NULL != data_var) vinfo_nc->set_req_name(data_var->name.c_str()); } + int zdim_slot = bad_data_int; int time_dim_slot = bad_data_int; long org_time_offset = bad_data_int; + long org_z_offset = bad_data_int; + NumArray dim_value = vinfo_nc->dim_value(); LongArray dimension = vinfo_nc->dimension(); - long time_cnt = (long)_file->ValidTime.n_elements(); - long time_threshold_cnt = (time_cnt + 1000) * 1000; - + BoolArray is_offset = vinfo_nc->is_offset(); + data_var = _file->find_var_name(vinfo_nc->req_name().c_str()); if (NULL != data_var) { time_dim_slot = data_var->t_slot; - if (0 <= time_dim_slot) { - org_time_offset = dimension[time_dim_slot]; - long time_offset = org_time_offset; - bool time_as_value = !vinfo_nc->level().is_time_as_offset(); - if (time_as_value || (time_offset == range_flag) || (time_offset == vx_data2d_star)) { - if (0 <= _time_dim_offset) time_offset = _time_dim_offset; - if (time_as_value && time_offset > time_threshold_cnt) // convert the unixtime to offset + for (int idx=0; idxValidTime.n(); + long time_threshold_cnt = 10000000; + org_time_offset = dim_offset; + long time_offset = org_time_offset; + if (time_offset == range_flag) time_offset = _cur_time_index; // from data_plane_array() + else if (!is_offset[idx]) { + long time_value = dim_value[idx]; + time_offset = convert_time_to_offset(time_value); + if ((0 > time_offset) || (time_offset >= time_cnt)) { + if (time_value > time_threshold_cnt) // from time string (yyyymmdd_hh) + mlog << Warning << "\n" << method_name << "the requested time " + << unix_to_yyyymmdd_hhmmss(time_value) << " for \"" + << vinfo.req_name() << "\" variable does not exist (" + << unix_to_yyyymmdd_hhmmss(_file->ValidTime[0]) << " and " + << unix_to_yyyymmdd_hhmmss(_file->ValidTime[time_cnt-1]) << ").\n\n"; + else + mlog << Warning << "\n" << method_name << "the requested time value " + << time_value << " for \"" << vinfo.req_name() << "\" variable " + << "is out of range (between 0 and " << (time_cnt-1) << ").\n\n"; + + return false; + } + } + else if ((0 > time_offset) || (time_offset >= time_cnt)) { time_offset = convert_time_to_offset(time_offset); - } - - if ((0 <= time_offset) && (time_offset < time_cnt)) + } + if ((0 > time_offset) || (time_offset >= time_cnt)) { + mlog << Error << "\n" << method_name << "the requested time offset " + << time_offset << " for \"" << vinfo.req_name() << "\" variable " + << "is out of range (between 0 and " << (time_cnt-1) << ").\n\n"; + return false; + } dimension[time_dim_slot] = time_offset; + } else { - bool do_stop = true; - if (time_offset > time_threshold_cnt) // is from time string (yyyymmdd_hh) - mlog << Warning << "\n" << method_name << "the requested time " - << unix_to_yyyymmdd_hhmmss(time_offset) << " for \"" - << vinfo.req_name() << "\" variable does not exist (" - << unix_to_yyyymmdd_hhmmss(_file->ValidTime[0]) << " and " - << unix_to_yyyymmdd_hhmmss(_file->ValidTime[time_cnt-1]) << ").\n\n"; - else if (org_time_offset == bad_data_int) - mlog << Warning << "\n" << method_name << "the requested offset for \"" - << vinfo.req_name() << "\" variable " - << "is out of range (between 0 and " << (time_cnt-1) << ").\n\n"; - else if (org_time_offset == vx_data2d_star) { - do_stop = false; - dimension[time_dim_slot] = 0; - mlog << Warning << "\n" << method_name << "returns the first available time for \"" - << vinfo.req_name() << "\" variable.\n\n"; + long z_cnt = (long)_file->vlevels.n(); + if (z_cnt > 0) { + + zdim_slot = idx; + org_z_offset = dim_offset; + long z_offset = dim_offset; + string z_dim_name; + if (0 <= data_var->z_slot) { + NcDim z_dim = get_nc_dim(data_var->var, data_var->z_slot); + if (IS_VALID_NC(z_dim)) z_dim_name = GET_NC_NAME(z_dim); + } + if (!is_offset[idx]) { + // convert the value to index for slicing + z_offset = convert_value_to_offset(dim_value[idx], z_dim_name); + } + else if ((dim_offset < 0 || dim_offset >= z_cnt)) { + // convert the value to index for slicing + z_offset = convert_value_to_offset(dim_offset, z_dim_name); + } + if ((z_offset >= 0) && (z_offset < z_cnt)) + dimension[idx] = long(z_offset); + else { + if (is_offset[idx]) + mlog << Error << "\n" << method_name << "the requested vertical offset " + << dim_offset << " for \"" << vinfo.req_name() << "\" variable " + << "is out of range (between 0 and " << (z_cnt-1) << ").\n\n"; + else + mlog << Error << "\n" << method_name << "the requested vertical value " + << dim_value[idx] << " for \"" << vinfo.req_name() << "\" variable " + << "does not exist (data size = " << z_cnt << ").\n\n"; + return false; + } } - else - mlog << Warning << "\n" << method_name << "the requested offset " - << org_time_offset << " for \"" << vinfo.req_name() << "\" variable " - << "is out of range (between 0 and " << (time_cnt-1) << ").\n\n"; - - if (do_stop) return false; } } } @@ -233,6 +271,8 @@ bool MetNcCFDataFile::data_plane(VarInfo &vinfo, DataPlane &plane) if (org_time_offset != bad_data_int && 0 <= time_dim_slot) dimension[time_dim_slot] = org_time_offset; + if (org_z_offset != bad_data_int && 0 <= zdim_slot) + dimension[zdim_slot] = org_z_offset; // Check that the times match those requested @@ -311,28 +351,27 @@ int MetNcCFDataFile::data_plane_array(VarInfo &vinfo, NcVarInfo *data_var = find_first_data_var(); if (NULL != data_var) vinfo_nc->set_req_name(data_var->name.c_str()); } - - LongArray time_offsets = collect_time_offsets(vinfo); + + LongArray time_offsets = collect_time_offsets(vinfo); if (0 < time_offsets.n_elements()) { LevelInfo level = vinfo.level(); VarInfoNcCF *vinfo_nc = (VarInfoNcCF *)&vinfo; - LongArray dimension = vinfo_nc->dimension(); long time_lower = bad_data_int; long time_upper = bad_data_int; if (level.type() == LevelType_Time) { time_lower = level.lower(); time_upper = level.upper(); } - - int debug_level = 7; + for (int idx=0; idx= debug_level) { for (int idx=0; idx< time_offsets.n_elements(); idx++ ) { mlog << Debug(debug_level) << method_name << "time: " @@ -368,10 +407,10 @@ LongArray MetNcCFDataFile::collect_time_offsets(VarInfo &vinfo) { int time_dim_slot = info->t_slot; int time_dim_size = _file->ValidTime.n_elements(); if (0 < time_dim_size && time_dim_slot < 0) { - // The time dimension does not exist at the variable and the time - // variable exists. Stop time slicing and set the time offset to 0. - time_offsets.add(0); - return(time_offsets); + // The time dimension does not exist at the variable and the time + // variable exists. Stop time slicing and set the time offset to 0. + time_offsets.add(0); + return(time_offsets); } double time_lower = bad_data_double; @@ -380,7 +419,7 @@ LongArray MetNcCFDataFile::collect_time_offsets(VarInfo &vinfo) { LevelInfo level = vinfo.level(); LongArray dimension = vinfo_nc->dimension(); bool is_time_range = (level.type() == LevelType_Time); - bool time_as_value = !level.is_time_as_offset(); + bool time_as_value = !level.is_offset(); long dim_offset = (time_dim_slot >= 0) ? dimension[time_dim_slot] : -1; bool include_all_times = (dim_offset == vx_data2d_star); @@ -444,8 +483,7 @@ LongArray MetNcCFDataFile::collect_time_offsets(VarInfo &vinfo) { if (_file->ValidTime[idx] == next_time) { time_offsets.add(idx); mlog << Debug(9) << method_name << " found the time " - << (is_time_range ? - unix_to_yyyymmdd_hhmmss(_file->ValidTime[idx]) : idx) << "\n"; + << unix_to_yyyymmdd_hhmmss(_file->ValidTime[idx]) << "\n"; next_time += time_inc; } } @@ -571,18 +609,72 @@ int MetNcCFDataFile::index(VarInfo &vinfo){ //////////////////////////////////////////////////////////////////////// long MetNcCFDataFile::convert_time_to_offset(long time_value) { + bool found = false; + bool found_value = false; long time_offset = time_value; - int dim_size = _file->ValidTime.n_elements(); - long time_threshold_cnt = (dim_size + 1000) * 1000; - if (time_value >= time_threshold_cnt) { + int dim_size = _file->ValidTime.n(); + static const string method_name + = "MetNcCFDataFile::convert_time_to_offset() -> "; + + for (int idx=0; idxValidTime[idx] == time_value) { + time_offset = idx; + found = true; + break; + } + } + + if (!found) { + dim_size = _file->raw_times.n(); for (int idx=0; idxValidTime[idx] == time_value) { + if (_file->raw_times[idx] == time_value) { time_offset = idx; + found_value = true; break; } } } + + if (found) + mlog << Debug(7) << method_name << " Found " + << unix_to_yyyymmdd_hhmmss(time_value) + << " at index " << time_offset << " from time value\n"; + else if (found_value) + mlog << Debug(7) << method_name << " Found " << time_value + << " at index " << time_offset << " from time value\n"; + else + mlog << Warning << "\n" << method_name << time_value + << " does not exist at time variable\n\n"; + return time_offset; } //////////////////////////////////////////////////////////////////////// + +long MetNcCFDataFile::convert_value_to_offset(double z_value, string z_dim_name) { + bool found = false; + long z_offset = (long)z_value; + int dim_size = _file->vlevels.n(); + static const string method_name + = "MetNcCFDataFile::convert_value_to_offset() -> "; + + for (int idx=0; idxvlevels[idx], z_value)) { + found = true; + z_offset = idx; + break; + } + } + + if (!found && 0 < z_dim_name.length()) { + NcVarInfo *var_info = find_var_info_by_dim_name(_file->Var, z_dim_name, _file->Nvars); + if (var_info) { + long new_offset = get_index_at_nc_data(var_info->var, z_value, z_dim_name); + if (new_offset != bad_data_int) z_offset = new_offset; + } + } + + return z_offset; +} + +//////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_data2d_nccf/data2d_nccf.h b/src/libcode/vx_data2d_nccf/data2d_nccf.h index dbe5ed8073..4abc64d934 100644 --- a/src/libcode/vx_data2d_nccf/data2d_nccf.h +++ b/src/libcode/vx_data2d_nccf/data2d_nccf.h @@ -36,7 +36,8 @@ class MetNcCFDataFile : public Met2dDataFile { void nccf_init_from_scratch(); NcVarInfo *find_first_data_var(); - long convert_time_to_offset(long time_dim_value); + long convert_time_to_offset(long time_value); + long convert_value_to_offset(double z_value, string z_dim_name); LongArray collect_time_offsets(VarInfo &vinfo); MetNcCFDataFile(const MetNcCFDataFile &); @@ -47,7 +48,7 @@ class MetNcCFDataFile : public Met2dDataFile { // NcCfFile * _file; // allocated - long _time_dim_offset; // current time offset to get the data plane + long _cur_time_index; // current time index to get the data plane (for array of data_plane) public: diff --git a/src/libcode/vx_data2d_nccf/nccf_file.cc b/src/libcode/vx_data2d_nccf/nccf_file.cc index 7dcb7dc99a..5235b1639d 100644 --- a/src/libcode/vx_data2d_nccf/nccf_file.cc +++ b/src/libcode/vx_data2d_nccf/nccf_file.cc @@ -150,6 +150,8 @@ void NcCfFile::close() // Reset the time values ValidTime.clear(); + raw_times.clear(); + vlevels.clear(); InitTime = (unixtime)0; AccumTime = (unixtime)0; @@ -183,7 +185,6 @@ bool NcCfFile::open(const char * filepath) // NcError err(NcError::silent_nonfatal); // Open the file - _ncFile = open_ncfile(filepath); if (IS_INVALID_NC_P(_ncFile)) @@ -206,15 +207,16 @@ bool NcCfFile::open(const char * filepath) // Pull out the variables + int max_dim_count = 0; + NcVar *z_var = (NcVar *)0; NcVar *valid_time_var = (NcVar *)0; ConcatString att_value; - StringArray varNames; Nvars = get_var_names(_ncFile, &varNames); Var = new NcVarInfo [Nvars]; - //get_vars_info(Nc, &Var); + NcDim dim; for (int j=0; j max_dim_count) max_dim_count = dim_count; Var[j].Dims = new NcDim * [dim_count]; // parse the variable attributes - get_att_str( Var[j], (string)"long_name", Var[j].long_name_att ); - get_att_str( Var[j], (string)"units", Var[j].units_att ); + get_att_str( Var[j], long_name_att_name, Var[j].long_name_att ); + get_att_str( Var[j], units_att_name, Var[j].units_att ); - if (get_nc_att_value(Var[j].var, (string)"axis", att_value)) { + if (get_var_axis(Var[j].var, att_value)) { if ( "T" == att_value || "time" == att_value ) { valid_time_var = Var[j].var; _time_var_info = &Var[j]; } + else if ( "Z" == att_value || "z" == att_value ) { + z_var = Var[j].var; + } } - if (get_nc_att_value(Var[j].var, (string)"standard_name", att_value)) { + + if (get_var_standard_name(Var[j].var, att_value)) { if ( "time" == att_value ) { valid_time_var = Var[j].var; _time_var_info = &Var[j]; } else if( "latitude" == att_value ) _latVar = Var[j].var; else if( "longitude" == att_value ) _lonVar = Var[j].var; + else if( ("air_pressure" == att_value || "height" == att_value) + && (0 == z_var) ) z_var = Var[j].var; } if ( Var[j].name == "time" && (valid_time_var == 0)) { valid_time_var = Var[j].var; @@ -297,30 +306,25 @@ bool NcCfFile::open(const char * filepath) // Parse the units for the time variable. ut = sec_per_unit = 0; - NcVarAtt *units_att = get_nc_att(valid_time_var, (string)"units", false); - if (IS_VALID_NC_P(units_att)) - { - if (!get_att_value_chars(units_att, units) || units.length() == 0) - { + if (get_var_units(valid_time_var, units)) { + if (units.length() == 0) { mlog << Warning << "\n" << method_name << "the \"time\" variable must contain a \"units\" attribute. " << "Using valid time of 0\n\n"; } - else - { + else { mlog << Debug(4) << method_name << "parsing units for the time variable \"" << units << "\"\n"; parse_cf_time_string(units.c_str(), ut, sec_per_unit); } } - if (units_att) delete units_att; NcVar bounds_time_var; NcVar *nc_time_var = (NcVar *)0; bool use_bounds_var = false; ConcatString bounds_var_name; nc_time_var = valid_time_var; - NcVarAtt *bounds_att = get_nc_att(valid_time_var, (string)"bounds", false); + NcVarAtt *bounds_att = get_nc_att(valid_time_var, bounds_att_name, false); if (get_att_value_chars(bounds_att, bounds_var_name)) { bounds_time_var = get_nc_var(_ncFile, bounds_var_name.c_str()); use_bounds_var = IS_VALID_NC(bounds_time_var); @@ -346,6 +350,7 @@ bool NcCfFile::open(const char * filepath) if( latest_time < time_values[i] ) latest_time = time_values[i]; } ValidTime.add(add_to_unixtime(ut, sec_per_unit, latest_time, no_leap_year)); + raw_times.add(latest_time); } else { if (use_bounds_var) { @@ -353,6 +358,7 @@ bool NcCfFile::open(const char * filepath) double time_fraction; for(int i=0; iunits_att.c_str())) { + Var[j].t_slot = k; + t_dims.add(dim_name); + } + else if (is_nc_unit_latitude(info->units_att.c_str())) { + Var[j].y_slot = k; + } + else if (is_nc_unit_longitude(info->units_att.c_str())) { + Var[j].x_slot = k; + } + else { + Var[j].z_slot = k; + z_dims.add(dim_name); + if (0 == z_dim_name.length()) z_dim_name = dim_name; + } + } + } } } // for j + // Find the vertical level variable from dimension name if not found + if (IS_INVALID_NC_P(z_var) && (0 < z_dim_name.length())) { + NcVarInfo *info = find_var_by_dim_name(z_dim_name.c_str()); + if (info) z_var = info->var; + } + + // Pull out the vertical levels + if (IS_VALID_NC_P(z_var)) { + + int z_count = (int) get_data_size(z_var); + double *z_values = new double[z_count]; + + if( get_nc_data(z_var, z_values) ) { + for(int i=0; i y_slot ) { @@ -1134,7 +1076,6 @@ bool NcCfFile::getData(NcVar * v, const LongArray & a, DataPlane & plane) const if( is_eq(value, missing_value) || is_eq(value, fill_value) ) { value = bad_data_double; } - else if( do_scale_factor ) value = value * scale_factor + add_offset; plane.set(value, x, y_offset); @@ -1152,7 +1093,6 @@ bool NcCfFile::getData(NcVar * v, const LongArray & a, DataPlane & plane) const if( is_eq(value, missing_value) || is_eq(value, fill_value) ) { value = bad_data_double; } - else if( do_scale_factor ) value = value * scale_factor + add_offset; plane.set(value, x, y_offset); @@ -1227,6 +1167,32 @@ NcVarInfo* NcCfFile::find_var_name(const char * var_name) const //////////////////////////////////////////////////////////////////////// +NcVarInfo* NcCfFile::find_var_by_dim_name(const char *dim_name) const +{ + NcVarInfo *var = find_var_name(dim_name); + if (!var) { + //StringArray dimNames; + for (int i=0; igetSize(), _xDim->getSize()); status = true; } @@ -1393,7 +1351,7 @@ void NcCfFile::get_grid_from_grid_mapping(const NcVarAtt *grid_mapping_att) } } /* endfor - i */ - if (grid_mapping_var == 0 || IS_INVALID_NC_P(grid_mapping_var)) + if ((grid_mapping_var == 0) || (IS_INVALID_NC_P(grid_mapping_var))) { mlog << Error << "\n" << method_name << " -> " << "Cannot extract grid mapping variable (" << mapping_name @@ -1403,7 +1361,7 @@ void NcCfFile::get_grid_from_grid_mapping(const NcVarAtt *grid_mapping_att) // Get the name of the grid mapping - NcVarAtt *grid_mapping_name_att = get_nc_att(grid_mapping_var, (string)"grid_mapping_name"); + NcVarAtt *grid_mapping_name_att = get_nc_att(grid_mapping_var, grid_mapping_name_att_name); if (IS_INVALID_NC_P(grid_mapping_name_att)) { @@ -1672,18 +1630,14 @@ void NcCfFile::get_grid_mapping_lambert_conformal_conic(const NcVar *grid_mappin // files that are in other units, we'll have to update the code to do the // units conversions. - const NcVarAtt *x_coord_units_att = get_nc_att(_xCoordVar, (string)"units"); - if (IS_INVALID_NC_P(x_coord_units_att)) - { + ConcatString x_coord_units_name; + if (!get_var_units(_xCoordVar, x_coord_units_name)) { mlog << Warning << "\n" << method_name << " -> " << "Units not given for X coordinate variable -- assuming meters.\n\n"; } - else - { + else { //const char *x_coord_units_name = x_coord_units_att->getValues(att->as_string(0); - ConcatString x_coord_units_name; - if (!get_att_value_chars(x_coord_units_att, x_coord_units_name)) - { + if (0 == x_coord_units_name.length()) { mlog << Warning << "\n" << method_name << " -> " << "Cannot extract X coordinate units from netCDF file -- " << "assuming meters.\n\n"; @@ -1699,20 +1653,14 @@ void NcCfFile::get_grid_mapping_lambert_conformal_conic(const NcVar *grid_mappin } } - if (x_coord_units_att) delete x_coord_units_att; - - const NcVarAtt *y_coord_units_att = get_nc_att(_yCoordVar, (string)"units"); - if (IS_INVALID_NC_P(y_coord_units_att)) - { + ConcatString y_coord_units_name; + if (!get_var_units(_yCoordVar, y_coord_units_name)) { mlog << Warning << "\n" << method_name << " -> " << "Units not given for Y coordinate variable -- assuming meters.\n\n"; } - else - { + else { //const char *y_coord_units_name = y_coord_units_att->getValues(att->as_string(0); - ConcatString y_coord_units_name; - if (!get_att_value_chars(y_coord_units_att, y_coord_units_name)) - { + if (0 == y_coord_units_name.length()) { mlog << Warning << "\n" << method_name << " -> " << "Cannot extract Y coordinate units from netCDF file -- " << "assuming meters.\n\n"; @@ -1728,8 +1676,6 @@ void NcCfFile::get_grid_mapping_lambert_conformal_conic(const NcVar *grid_mappin } } - if (y_coord_units_att) delete y_coord_units_att; - // Figure out the dx/dy and x/y pin values from the dimension variables long x_counts = GET_NC_SIZE_P(_xDim); @@ -1738,7 +1684,6 @@ void NcCfFile::get_grid_mapping_lambert_conformal_conic(const NcVar *grid_mappin //_xCoordVar->get(x_values, &x_counts); get_nc_data(_xCoordVar, x_values); - long y_counts = GET_NC_SIZE_P(_yDim); double y_values[y_counts]; @@ -1856,22 +1801,10 @@ void NcCfFile::get_grid_mapping_latitude_longitude(const NcVar *grid_mapping_var // The lat/lon dimensions are identified by their units const NcVar coord_var = get_var(_ncFile, _dims[dim_num]->getName().c_str()); - if (IS_INVALID_NC(coord_var)) - continue; - - const NcVarAtt *units_att = get_nc_att(&coord_var, (string)"units"); - if (IS_INVALID_NC_P(units_att)) { - if (units_att) delete units_att; - continue; - } + if (IS_INVALID_NC(coord_var)) continue; ConcatString dim_units; - if (!get_att_value_chars(units_att, dim_units)) { - if (units_att) delete units_att; - continue; - } - - if (units_att) delete units_att; + if (!get_var_units(&coord_var, dim_units)) continue; // See if this is a lat or lon dimension @@ -2062,17 +1995,11 @@ void NcCfFile::get_grid_mapping_polar_stereographic(const NcVar *grid_mapping_va if (IS_INVALID_NC(coord_var)) continue; - const NcVarAtt *std_name_att = get_nc_att(&coord_var, (string)"standard_name"); - if (IS_INVALID_NC_P(std_name_att)) { - if (std_name_att) delete std_name_att; - continue; - } ConcatString dim_std_name; - if (!get_att_value_chars(std_name_att, dim_std_name)) { - if (std_name_att) delete std_name_att; + const NcVarAtt *std_name_att = get_nc_att(&coord_var, standard_name_att_name); + if (!get_var_standard_name(&coord_var, dim_std_name)) { continue; } - if (std_name_att) delete std_name_att; // See if this is an X or Y dimension @@ -2150,17 +2077,14 @@ void NcCfFile::get_grid_mapping_polar_stereographic(const NcVar *grid_mapping_va // files that are in other units, we'll have to update the code to do the // units conversions. - const NcVarAtt *x_coord_units_att = get_nc_att(_xCoordVar, (string)"units"); - if (IS_INVALID_NC_P(x_coord_units_att)) - { + ConcatString x_coord_units_name; + const NcVarAtt *x_coord_units_att = get_nc_att(_xCoordVar, units_att_name); + if (!get_var_units(_xCoordVar, x_coord_units_name)) { mlog << Warning << "\n" << method_name << " -> " << "Units not given for X coordinate variable -- assuming meters.\n\n"; } - else - { - ConcatString x_coord_units_name; - if (!get_att_value_chars(x_coord_units_att, x_coord_units_name)) - { + else { + if (0 == x_coord_units_name.length()) { mlog << Warning << "\n" << method_name << " -> " << "Cannot extract X coordinate units from netCDF file -- " << "assuming meters.\n\n"; @@ -2176,19 +2100,14 @@ void NcCfFile::get_grid_mapping_polar_stereographic(const NcVar *grid_mapping_va } } - if(x_coord_units_att) delete x_coord_units_att; - - const NcVarAtt *y_coord_units_att = get_nc_att(_yCoordVar, (string)"units"); - if (IS_INVALID_NC_P(y_coord_units_att)) - { + ConcatString y_coord_units_name; + const NcVarAtt *y_coord_units_att = get_nc_att(_yCoordVar, units_att_name); + if (!get_var_units(_yCoordVar, y_coord_units_name)) { mlog << Warning << "\n" << method_name << " -> " << "Units not given for Y coordinate variable -- assuming meters.\n\n"; } - else - { - ConcatString y_coord_units_name; - if (!get_att_value_chars(y_coord_units_att, y_coord_units_name)) - { + else { + if (0 == y_coord_units_name.length()) { mlog << Warning << "\n" << method_name << " -> " << "Cannot extract Y coordinate units from netCDF file -- " << "assuming meters.\n\n"; @@ -2204,8 +2123,6 @@ void NcCfFile::get_grid_mapping_polar_stereographic(const NcVar *grid_mapping_va } } - if(y_coord_units_att) delete y_coord_units_att; - // Figure out the dx/dy and x/y pin values from the dimension variables long x_counts = GET_NC_SIZE_P(_xDim); @@ -2307,20 +2224,15 @@ void NcCfFile::get_grid_mapping_rotated_latitude_longitude(const NcVar *grid_map if (IS_INVALID_NC(coord_var)) continue; - const NcVarAtt *std_name_att = get_nc_att(&coord_var, (string)"standard_name"); - if (IS_INVALID_NC_P(std_name_att)) { - if (std_name_att) delete std_name_att; + ConcatString dim_standard_name; + if (!get_var_standard_name(&coord_var, dim_standard_name)) { continue; } - ConcatString dim_standard_name; - if (!get_att_value_chars(std_name_att, dim_standard_name)) { - if (std_name_att) delete std_name_att; + if (0 == dim_standard_name.length()) { continue; } - if (std_name_att) delete std_name_att; - // See if this is a grid_latitude or grid_longitude dimension if (dim_standard_name == "grid_latitude") @@ -2584,7 +2496,7 @@ void NcCfFile::get_grid_mapping_geostationary( if (IS_INVALID_NC(coord_var)) continue; - const NcVarAtt *std_name_att = get_nc_att(&coord_var, (string)"standard_name"); + const NcVarAtt *std_name_att = get_nc_att(&coord_var, standard_name_att_name); if (IS_INVALID_NC_P(std_name_att)) { if (std_name_att) delete std_name_att; continue; @@ -2779,11 +2691,10 @@ bool NcCfFile::get_grid_from_coordinates(const NcVar *data_var) { mlog << Debug(6) << "\n" << method_name << " -> " << "collect GRID info from \"" << GET_NC_NAME_P(data_var) << "\".\n\n"; - NcVarAtt *coordinates_att = get_nc_att(data_var, (string)"coordinates"); + NcVarAtt *coordinates_att = get_nc_att(data_var, coordinates_att_name); if (IS_VALID_NC_P(coordinates_att)) { ConcatString coordinates_value, units_value, axis_value; - NcVarAtt *missing_value_att = (NcVarAtt*) 0; get_att_value_chars(coordinates_att, coordinates_value); StringArray sa = coordinates_value.split(" "); int count = sa.n_elements(); @@ -2799,7 +2710,7 @@ bool NcCfFile::get_grid_from_coordinates(const NcVar *data_var) { is_x_dim_var = is_y_dim_var = false; for (int cIdx = 0; cIdx " << "unknown units [" << units_value << "] for the coordinate variable [" @@ -2826,21 +2737,11 @@ bool NcCfFile::get_grid_from_coordinates(const NcVar *data_var) { } if (is_y_dim_var || Var[var_num].name == y_dim_var_name) { _yCoordVar = Var[var_num].var; - missing_value_att = get_nc_att(_yCoordVar, (string)"_FillValue"); - if (IS_VALID_NC_P(missing_value_att)) { - lat_missing_value = get_att_value_double(missing_value_att); - } + get_var_fill_value(_yCoordVar, lat_missing_value); } else if (is_x_dim_var || Var[var_num].name == x_dim_var_name) { _xCoordVar = Var[var_num].var; - missing_value_att = get_nc_att(_xCoordVar, (string)"_FillValue"); - if (IS_VALID_NC_P(missing_value_att)) { - lon_missing_value = get_att_value_double(missing_value_att); - } - } - if(missing_value_att) { - delete missing_value_att; - missing_value_att = (NcVarAtt *)0; + get_var_fill_value(_xCoordVar, lon_missing_value); } } diff --git a/src/libcode/vx_data2d_nccf/nccf_file.h b/src/libcode/vx_data2d_nccf/nccf_file.h index e2acf3ee64..9aa9308f16 100644 --- a/src/libcode/vx_data2d_nccf/nccf_file.h +++ b/src/libcode/vx_data2d_nccf/nccf_file.h @@ -80,6 +80,8 @@ class NcCfFile { // TimeArray ValidTime; + NumArray raw_times; + NumArray vlevels; unixtime InitTime; unixtime AccumTime; @@ -112,6 +114,7 @@ class NcCfFile { bool getData(const char *, const LongArray &, DataPlane &, NcVarInfo *&) const; NcVarInfo* find_var_name(const char * var_name) const; + NcVarInfo* find_var_by_dim_name(const char *dim_name) const; private: diff --git a/src/libcode/vx_data2d_nccf/var_info_nccf.cc b/src/libcode/vx_data2d_nccf/var_info_nccf.cc index 25495392f0..ee3880971c 100644 --- a/src/libcode/vx_data2d_nccf/var_info_nccf.cc +++ b/src/libcode/vx_data2d_nccf/var_info_nccf.cc @@ -95,8 +95,10 @@ void VarInfoNcCF::assign(const VarInfoNcCF &v) { VarInfo::assign(v); // Copy - Dimension.clear(); - for(i=0; i " + if (Dimension.has(range_flag)) { + mlog << Error << "\n" << method_name << "only one dimension can have a range for NetCDF variable \"" << MagicStr << "\".\n\n"; exit(1); } - else - { - int increment = 0; + else { + int increment = 1; // Store the dimension of the range and limits *ptr3++ = 0; char *ptr_inc = strchr(ptr3, ':'); @@ -228,50 +242,66 @@ void VarInfoNcCF::set_magic(const ConcatString &nstr, const ConcatString &lstr) bool datestring_start = is_datestring(ptr2); bool datestring_end = is_datestring(ptr3); + if (datestring_start != datestring_end) { + mlog << Error << "\n" << method_name + << "the time value and an index/offset can not be mixed for NetCDF variable \"" + << MagicStr << "\".\n\n"; + exit(1); + } + if (datestring_start && datestring_end) as_offset = false; + unixtime time_lower = datestring_start - ? timestring_to_unix(ptr2) : atoi(ptr2); + ? timestring_to_unix(ptr2) + : (as_offset ? atoi(ptr2) : atof(ptr2)); unixtime time_upper = datestring_end - ? timestring_to_unix(ptr3) : atoi(ptr3); + ? timestring_to_unix(ptr3) + : (as_offset ? atoi(ptr3) : atof(ptr3)); if (ptr_inc != NULL) { - if (datestring_end && datestring_start) { - increment = timestring_to_sec(ptr_inc); - mlog << Debug(7) << method_name - << " increment: \"" << ptr_inc << "\" to " - << increment << " seconds.\n"; - } - else increment = atoi(ptr_inc); + if (as_offset) increment = atoi(ptr_inc); + else { + increment = is_float(ptr_inc) + ? atof(ptr_inc) : timestring_to_sec(ptr_inc); + mlog << Debug(7) << method_name + << " increment: \"" << ptr_inc << "\" to " + << increment << " seconds.\n"; + } } - Dimension.add(range_flag); + add_dimension(range_flag, as_offset); Level.set_lower(time_lower); Level.set_upper(time_upper); Level.set_increment(increment); // Assume time level type for a range of levels Level.set_type(LevelType_Time); - if (datestring_end && datestring_start) - as_offset = false; + Level.set_is_offset(as_offset); } } - else - { + else { // Single level int level = 0; - if (is_datestring(ptr2)) { + double level_value = bad_data_double; + if (is_number(ptr2)) { + if (as_offset) level = atoi(ptr2); + else { + level = vx_data2d_dim_by_value; + level_value = atof(ptr2); + } + } + else if (is_datestring(ptr2)) { unixtime unix_time = timestring_to_unix(ptr2); - level = unix_time; + level = vx_data2d_dim_by_value; + level_value = unix_time; as_offset = false; } - else if (is_number(ptr2)) { - level = atoi(ptr2); - } else { mlog << Error << "\n" << method_name << "trouble parsing NetCDF dimension value \"" << ptr2 << "\"!\n\n"; exit(1); } - Dimension.add(level); + if (as_offset) add_dimension(level, as_offset); + else add_dimension(level, as_offset, level_value); } } @@ -279,7 +309,6 @@ void VarInfoNcCF::set_magic(const ConcatString &nstr, const ConcatString &lstr) ptr = NULL; } // end while - Level.set_time_as_offset(as_offset); } // end else diff --git a/src/libcode/vx_data2d_nccf/var_info_nccf.h b/src/libcode/vx_data2d_nccf/var_info_nccf.h index f49326d80a..f740539e0d 100644 --- a/src/libcode/vx_data2d_nccf/var_info_nccf.h +++ b/src/libcode/vx_data2d_nccf/var_info_nccf.h @@ -23,6 +23,10 @@ /////////////////////////////////////////////////////////////////////////////// +typedef CRC_Array BoolArray; + +/////////////////////////////////////////////////////////////////////////////// + class VarInfoNcCF : public VarInfo { private: @@ -32,9 +36,12 @@ class VarInfoNcCF : public VarInfo // LongArray Dimension; // Dimension values for extracting 2D field + BoolArray Is_offset; // boolean for Dimension value (true: offset, false: value to be an offset (false for value) + NumArray Dim_value; // Dimension values as float for extracting 2D field void init_from_scratch(); void assign(const VarInfoNcCF &); + void clear_dimension(); public: VarInfoNcCF(); @@ -49,10 +56,14 @@ class VarInfoNcCF : public VarInfo // get stuff // - GrdFileType file_type() const; - const LongArray & dimension() const; - int dimension(int i) const; - int n_dimension() const; + GrdFileType file_type() const; + const LongArray & dimension() const; + int dimension(int i) const; + const NumArray & dim_value() const; + double dim_value(int i) const; + const BoolArray & is_offset() const; + bool is_offset(int i) const; + int n_dimension() const; // // set stuff @@ -61,7 +72,7 @@ class VarInfoNcCF : public VarInfo void set_magic(const ConcatString &, const ConcatString &); void set_dict(Dictionary &s); - void add_dimension(int dim); + void add_dimension(int dim, bool as_offset=true, double dim_value=bad_data_double); // // do stuff @@ -78,9 +89,13 @@ class VarInfoNcCF : public VarInfo /////////////////////////////////////////////////////////////////////////////// inline GrdFileType VarInfoNcCF::file_type() const { return(FileType_NcCF); } -inline const LongArray & VarInfoNcCF::dimension() const { return(Dimension); } -inline int VarInfoNcCF::dimension(int i) const { return(Dimension[i]); } -inline int VarInfoNcCF::n_dimension() const { return(Dimension.n_elements()); } +inline const LongArray & VarInfoNcCF::dimension() const { return(Dimension); } +inline int VarInfoNcCF::dimension(int i) const { return(Dimension[i]); } +inline int VarInfoNcCF::n_dimension() const { return(Dimension.n_elements());} +inline const NumArray & VarInfoNcCF::dim_value() const { return(Dim_value); } +inline double VarInfoNcCF::dim_value(int i) const { return(Dim_value[i]); } +inline const BoolArray & VarInfoNcCF::is_offset() const { return(Is_offset); } +inline bool VarInfoNcCF::is_offset(int i) const { return(Is_offset[i]); } /////////////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_nc_util/nc_utils.cc b/src/libcode/vx_nc_util/nc_utils.cc index 6820afc6bd..f439407c1f 100644 --- a/src/libcode/vx_nc_util/nc_utils.cc +++ b/src/libcode/vx_nc_util/nc_utils.cc @@ -57,7 +57,7 @@ bool get_att_value(const NcAtt *att, ConcatString &value) { //////////////////////////////////////////////////////////////////////// template -bool _get_att_num_value(const NcAtt *att, T &att_val, int matching_type) { +bool get_att_num_value_(const NcAtt *att, T &att_val, int matching_type) { bool status = false; if (IS_VALID_NC_P(att)) { int nc_type_id = GET_NC_TYPE_ID_P(att); @@ -83,42 +83,42 @@ bool _get_att_num_value(const NcAtt *att, T &att_val, int matching_type) { //////////////////////////////////////////////////////////////////////// bool get_att_value(const NcAtt *att, ncbyte &att_val) { - bool status = _get_att_num_value(att, att_val, NC_BYTE); + bool status = get_att_num_value_(att, att_val, NC_BYTE); return(status); } //////////////////////////////////////////////////////////////////////// bool get_att_value(const NcAtt *att, short &att_val) { - bool status = _get_att_num_value(att, att_val, NC_SHORT); + bool status = get_att_num_value_(att, att_val, NC_SHORT); return(status); } //////////////////////////////////////////////////////////////////////// bool get_att_value(const NcAtt *att, int &att_val) { - bool status = _get_att_num_value(att, att_val, NC_INT); + bool status = get_att_num_value_(att, att_val, NC_INT); return(status); } //////////////////////////////////////////////////////////////////////// bool get_att_value(const NcAtt *att, unsigned int &att_val) { - bool status = _get_att_num_value(att, att_val, NC_UINT); + bool status = get_att_num_value_(att, att_val, NC_UINT); return(status); } //////////////////////////////////////////////////////////////////////// bool get_att_value(const NcAtt *att, float &att_val) { - bool status = _get_att_num_value(att, att_val, NC_FLOAT); + bool status = get_att_num_value_(att, att_val, NC_FLOAT); return(status); } //////////////////////////////////////////////////////////////////////// bool get_att_value(const NcAtt *att, double &att_val) { - bool status = _get_att_num_value(att, att_val, NC_DOUBLE); + bool status = get_att_num_value_(att, att_val, NC_DOUBLE); return(status); } @@ -490,7 +490,7 @@ bool get_nc_att_value(const NcVar *var, const ConcatString &att_name, //////////////////////////////////////////////////////////////////////// template -bool _get_nc_att_value(const NcVar *var, const ConcatString &att_name, +bool get_nc_att_value_(const NcVar *var, const ConcatString &att_name, T &att_val, bool exit_on_error, T bad_data, const char *caller_name) { bool status = false; @@ -521,17 +521,27 @@ bool _get_nc_att_value(const NcVar *var, const ConcatString &att_name, bool get_nc_att_value(const NcVar *var, const ConcatString &att_name, int &att_val, bool exit_on_error) { static const char *method_name = "get_nc_att_value(NcVar,int) -> "; - bool status = _get_nc_att_value(var, att_name, att_val, exit_on_error, + bool status = get_nc_att_value_(var, att_name, att_val, exit_on_error, bad_data_int, method_name); return(status); } //////////////////////////////////////////////////////////////////////// +bool get_nc_att_value(const NcVar *var, const ConcatString &att_name, + double &att_val, bool exit_on_error) { + static const char *method_name = "get_nc_att_value(NcVar,double) -> "; + bool status = get_nc_att_value_(var, att_name, att_val, exit_on_error, + bad_data_double, method_name); + return(status); +} + +//////////////////////////////////////////////////////////////////////// + bool get_nc_att_value(const NcVar *var, const ConcatString &att_name, float &att_val, bool exit_on_error) { static const char *method_name = "get_nc_att_value(NcVar,float) -> "; - bool status = _get_nc_att_value(var, att_name, att_val, exit_on_error, + bool status = get_nc_att_value_(var, att_name, att_val, exit_on_error, bad_data_float, method_name); return(status); } @@ -558,7 +568,7 @@ bool get_nc_att_value(const NcVarAtt *att, ConcatString &att_val) { //////////////////////////////////////////////////////////////////////// template -bool _get_nc_att_value(const NcVarAtt *att, T &att_val, bool exit_on_error, +bool get_nc_att_value_(const NcVarAtt *att, T &att_val, bool exit_on_error, T bad_data, const char *caller_name) { bool status = true; @@ -582,7 +592,7 @@ bool _get_nc_att_value(const NcVarAtt *att, T &att_val, bool exit_on_error, bool get_nc_att_value(const NcVarAtt *att, int &att_val, bool exit_on_error) { static const char *method_name = "get_nc_att_value(NcVarAtt,int) -> "; - bool status = _get_nc_att_value(att, att_val, exit_on_error, bad_data_int, method_name); + bool status = get_nc_att_value_(att, att_val, exit_on_error, bad_data_int, method_name); return(status); } @@ -590,7 +600,7 @@ bool get_nc_att_value(const NcVarAtt *att, int &att_val, bool exit_on_error) { bool get_nc_att_value(const NcVarAtt *att, float &att_val, bool exit_on_error) { static const char *method_name = "get_nc_att_value(NcVarAtt,float) -> "; - bool status = _get_nc_att_value(att, att_val, exit_on_error, bad_data_float, method_name); + bool status = get_nc_att_value_(att, att_val, exit_on_error, bad_data_float, method_name); return(status); } @@ -598,25 +608,42 @@ bool get_nc_att_value(const NcVarAtt *att, float &att_val, bool exit_on_error) { bool get_nc_att_value(const NcVarAtt *att, double &att_val, bool exit_on_error) { static const char *method_name = "get_nc_att_value(NcVarAtt,double) -> "; - bool status = _get_nc_att_value(att, att_val, exit_on_error, bad_data_double, method_name); + bool status = get_nc_att_value_(att, att_val, exit_on_error, bad_data_double, method_name); return(status); } /////////////////////////////////////////////////////////////////////////////// -bool has_att(NcFile * ncfile, const ConcatString att_name, bool exit_on_error) -{ +bool has_att(NcFile *ncfile, const ConcatString att_name, bool do_log) { bool status = false; NcGroupAtt *att; att = get_nc_att(ncfile, att_name); - if ( IS_VALID_NC_P(att)) { + if (IS_VALID_NC_P(att)) { status = true; - } else if(exit_on_error) { - mlog << Error << "\nhas_att() -> " + } + else if (do_log) { + mlog << Warning << "\nhas_att() -> " << "can't find global NetCDF attribute " << att_name << ".\n\n"; - exit ( 1 ); + } + if (att) delete att; + return status; +} + +/////////////////////////////////////////////////////////////////////////////// + +bool has_att(NcVar *var, const ConcatString att_name, bool do_log) { + bool status = false; + + NcVarAtt *att = get_nc_att(var, att_name); + if (IS_VALID_NC_P(att)) { + status = true; + } + else if (do_log) { + mlog << Warning << "\nhas_att() -> " + << "can't find NetCDF variable attribute " << att_name + << ".\n\n"; } if (att) delete att; return status; @@ -624,10 +651,22 @@ bool has_att(NcFile * ncfile, const ConcatString att_name, bool exit_on_error) //////////////////////////////////////////////////////////////////////// +bool has_add_offset_attr(NcVar *var) { + return has_att(var, add_offset_att_name); +} + +//////////////////////////////////////////////////////////////////////// + +bool has_scale_factor_attr(NcVar *var) { + return has_att(var, scale_factor_att_name); +} + +//////////////////////////////////////////////////////////////////////// + bool has_unsigned_attribute(NcVar *var) { bool is_unsigned = false; static const char *method_name = "has_unsigned_attribute() -> "; - NcVarAtt *att_unsigned = get_nc_att(var, string("_Unsigned")); + NcVarAtt *att_unsigned = get_nc_att(var, string("_Unsigned")); if (IS_VALID_NC_P(att_unsigned)) { ConcatString att_value; get_att_value_chars(att_unsigned, att_value); @@ -729,7 +768,7 @@ bool get_global_att(const NcFile *nc, const ConcatString &att_name, //////////////////////////////////////////////////////////////////////// template -bool _get_global_att_value(const NcFile *nc, const ConcatString& att_name, +bool get_global_att_value_(const NcFile *nc, const ConcatString& att_name, T &att_val, T bad_data, bool error_out, const char *caller_name) { bool status = false; // Initialize @@ -763,16 +802,16 @@ bool _get_global_att_value(const NcFile *nc, const ConcatString& att_name, bool get_global_att(const NcFile *nc, const ConcatString& att_name, int &att_val, bool error_out) { static const char *method_name = "\nget_global_att(int) -> "; - bool status = _get_global_att_value(nc, att_name, att_val, bad_data_int, + bool status = get_global_att_value_(nc, att_name, att_val, bad_data_int, false, method_name); if (!status) { short tmp_att_val; - status = _get_global_att_value(nc, att_name, tmp_att_val, (short)bad_data_int, + status = get_global_att_value_(nc, att_name, tmp_att_val, (short)bad_data_int, false, method_name); if (status) att_val = tmp_att_val; else { ncbyte tmp_val2; - status = _get_global_att_value(nc, att_name, tmp_val2, (ncbyte)bad_data_int, + status = get_global_att_value_(nc, att_name, tmp_val2, (ncbyte)bad_data_int, error_out, method_name); if (status) att_val = tmp_val2; } @@ -811,8 +850,8 @@ bool get_global_att(const NcFile *nc, const ConcatString& att_name, bool get_global_att(const NcFile *nc, const ConcatString& att_name, float &att_val, bool error_out) { static const char *method_name = "\nget_global_att(float) -> "; - bool status = _get_global_att_value(nc, att_name, att_val, bad_data_float, - error_out, method_name); + bool status = get_global_att_value_(nc, att_name, att_val, bad_data_float, + error_out, method_name); return(status); } @@ -823,11 +862,11 @@ bool get_global_att(const NcFile *nc, const ConcatString& att_name, double &att_val, bool error_out) { static const char *method_name = "\nget_global_att(double) -> "; bool status; - status = _get_global_att_value(nc, att_name, att_val, bad_data_double, - false, method_name); + status = get_global_att_value_(nc, att_name, att_val, bad_data_double, + false, method_name); if (!status) { float tmp_att_val; - status = _get_global_att_value(nc, att_name, tmp_att_val, bad_data_float, + status = get_global_att_value_(nc, att_name, tmp_att_val, bad_data_float, error_out, method_name); if (status) att_val = tmp_att_val; } @@ -930,7 +969,7 @@ int get_var_names(NcFile *nc, StringArray *varNames) { //////////////////////////////////////////////////////////////////////// template -bool _get_var_att_num(const NcVar *var, const ConcatString &att_name, +bool get_var_att_num_(const NcVar *var, const ConcatString &att_name, T &att_val, T bad_data) { bool status = false; @@ -952,7 +991,7 @@ bool _get_var_att_num(const NcVar *var, const ConcatString &att_name, bool get_var_att_double(const NcVar *var, const ConcatString &att_name, double &att_val) { - bool status = _get_var_att_num(var, att_name, att_val, bad_data_double); + bool status = get_var_att_num_(var, att_name, att_val, bad_data_double); return(status); } @@ -961,23 +1000,78 @@ bool get_var_att_double(const NcVar *var, const ConcatString &att_name, bool get_var_att_float(const NcVar *var, const ConcatString &att_name, float &att_val) { - bool status = _get_var_att_num(var, att_name, att_val, bad_data_float); + bool status = get_var_att_num_(var, att_name, att_val, bad_data_float); return(status); } //////////////////////////////////////////////////////////////////////// -bool get_var_units(const NcVar *var, ConcatString &att_val) { +double get_var_add_offset(const NcVar *var) { + double v; - return(get_nc_att_value(var, units_att_name, att_val)); + if(!get_var_att_double(var, add_offset_att_name, v)) { + v = 0.f; + } + + return(v); +} + +//////////////////////////////////////////////////////////////////////// + +bool get_var_axis(const NcVar *var, ConcatString &att_val) { + return(get_nc_att_value(var, axis_att_name, att_val)); +} + +//////////////////////////////////////////////////////////////////////// + +template +bool get_var_fill_value(const NcVar *var, T &att_val) { + bool found = false; + + NcVarAtt *att = get_nc_att(var, fill_value_att_name); + if (IS_INVALID_NC_P(att)) { + if (att) delete att; + att = get_nc_att(var, missing_value_att_name); + } + if (IS_VALID_NC_P(att)) { + att->getValues(&att_val); + found = true; + } + + if (att) delete att; + + return(found); } //////////////////////////////////////////////////////////////////////// -bool get_var_level(const NcVar *var, ConcatString &att_val) { +double get_var_fill_value(const NcVar *var) { + double v; + + if(!get_var_att_double(var, fill_value_att_name, v)) { + v = bad_data_double; + } + + return(v); +} + +//////////////////////////////////////////////////////////////////////// - return(get_nc_att_value(var, level_att_name, att_val)); +bool get_var_grid_mapping(const NcVar *var, ConcatString &att_val) { + return(get_nc_att_value(var, grid_mapping_att_name, att_val)); +} + +//////////////////////////////////////////////////////////////////////// + +bool get_var_grid_mapping_name(const NcVar *var, ConcatString &att_val) { + return(get_nc_att_value(var, grid_mapping_name_att_name, att_val)); +} + +//////////////////////////////////////////////////////////////////////// + +bool get_var_long_name(const NcVar *var, ConcatString &att_val) { + return(get_nc_att_value(var, long_name_att_name, att_val)); } //////////////////////////////////////////////////////////////////////// @@ -994,11 +1088,11 @@ double get_var_missing_value(const NcVar *var) { //////////////////////////////////////////////////////////////////////// -double get_var_fill_value(const NcVar *var) { +double get_var_scale_factor(const NcVar *var) { double v; - if(!get_var_att_double(var, fill_value_att_name, v)) { - v = bad_data_double; + if(!get_var_att_double(var, scale_factor_att_name, v)) { + v = 1.f; } return(v); @@ -1006,6 +1100,19 @@ double get_var_fill_value(const NcVar *var) { //////////////////////////////////////////////////////////////////////// +bool get_var_standard_name(const NcVar *var, ConcatString &att_val) { + return(get_nc_att_value(var, standard_name_att_name, att_val)); +} + +//////////////////////////////////////////////////////////////////////// + +bool get_var_units(const NcVar *var, ConcatString &att_val) { + + return(get_nc_att_value(var, units_att_name, att_val)); +} + +//////////////////////////////////////////////////////////////////////// + char get_char_val(NcFile * nc, const char * var_name, const int index) { NcVar var = get_var(nc, var_name); return (get_char_val(&var, index)); @@ -1253,36 +1360,73 @@ float get_float_var(NcVar * var, const int index) { //////////////////////////////////////////////////////////////////////// template -bool _get_nc_data(NcFile *nc, const char *var_name, T *data, - const long *dim, const long *cur) { - - // - // Retrieve the input variables - // - NcVar var = get_var(nc, var_name); - return get_nc_data(&var, data, dim, cur); -} - -//////////////////////////////////////////////////////////////////////// +void apply_scale_factor_(T *data, const int cell_count, + double add_offset, double scale_factor, + const T nc_fill_value, const T met_fill_value, + bool has_fill_value, + const char *data_type, const char *var_name) { + const int debug_level = 7; + clock_t start_clock = clock(); + const char *method_name = "apply_scale_factor(T) "; -bool get_nc_data(NcFile *nc, const char *var_name, int *data, - const long *dim, const long *cur) { + if (cell_count > 0) { + int idx; + int positive_cnt = 0; + int unpacked_count = 0; + T min_value, max_value; + T raw_min_val, raw_max_val; + + idx = 0; + if (has_fill_value) { + for (; idx data[idx]) raw_min_val = data[idx]; + if (raw_max_val < data[idx]) raw_max_val = data[idx]; + data[idx] = (data[idx] * scale_factor) + add_offset; + if (data[idx] > 0) positive_cnt++; + if (min_value > data[idx]) min_value = data[idx]; + if (max_value < data[idx]) max_value = data[idx]; + unpacked_count++; + } + } + //cout << typeid(nc_fill_value).name(); + mlog << Debug(debug_level) << method_name << var_name + << "(" << typeid(data[0]).name() << "): unpacked data: count=" + << unpacked_count << " out of " << cell_count + << ", scale_factor=" << scale_factor<< " add_offset=" << add_offset + << ". FillValue(" << data_type << ")=" << nc_fill_value << "\n"; + mlog << Debug(debug_level) << method_name + << " data range [" << min_value << " - " << max_value + << "] raw data: [" << raw_min_val << " - " << raw_max_val + << "] Positive count: " << positive_cnt << "\n"; + } + mlog << Debug(debug_level) << method_name << " took " + << (clock()-start_clock)/double(CLOCKS_PER_SEC) << " seconds\n"; + return; } //////////////////////////////////////////////////////////////////////// +// Note: +// - template _t reads data as is (do not apply no scale_factor and add_offset) +// - template _ reads data and applies scale_factor and add_offset. template -bool _get_nc_data(NcVar *var, T *data) { +bool get_nc_data_t(NcVar *var, T *data) { bool return_status = false; if (IS_VALID_NC_P(var)) { - // - // Retrieve the float value from the NetCDF variable. - // Note: missing data was checked here - // var->getVar(data); + return_status = true; } return(return_status); @@ -1290,45 +1434,67 @@ bool _get_nc_data(NcVar *var, T *data) { //////////////////////////////////////////////////////////////////////// -bool get_nc_data(NcVar *var, time_t *data) { - bool return_status = _get_nc_data(var, data); - return(return_status); -} - -//////////////////////////////////////////////////////////////////////// - -bool get_nc_data(NcVar *var, int *data) { - bool return_status = _get_nc_data(var, data); +template +bool get_nc_data_(NcVar *var, T *data, const T met_missing) { + //const char *method_name = "get_nc_data_() "; + + int data_size = get_data_size(var); + for (int idx1=0; idx1", GET_NC_NAME_P(var).c_str()); + } + } return(return_status); } //////////////////////////////////////////////////////////////////////// template -bool _get_nc_data(NcVar *var, T *data, T bad_data, const long *curs) { +bool get_nc_data_(NcVar *var, T *data, T bad_data, const long *dims, const long *curs) { bool return_status = false; - const char *method_name = "_get_nc_data(const long *curs) "; + const char *method_name = "get_nc_data_(T, *dims, *curs) "; if (IS_VALID_NC_P(var)) { std::vector start; std::vector count; - const int dimC = get_dim_count(var); + int data_size = 1; + int dimC = get_dim_count(var); for (int idx = 0 ; idx < dimC; idx++) { int dim_size = get_dim_size(var, idx); - if ((curs[idx] > dim_size) && (0 < dim_size)) { + if ((curs[idx]+dims[idx]) > dim_size) { NcDim nc_dim = get_nc_dim(var, idx); - mlog << Error << "\n" << method_name << "The start offset (" - << curs[idx] << ") exceeds the dimension[" << idx << "] " << dim_size << " " + mlog << Error << "\n" << method_name << "The start offset and count (" + << curs[idx] << ", " << dims[idx] << ") exceeds the dimension[" + << idx << "] " << dim_size << " " << (IS_VALID_NC(nc_dim) ? GET_NC_NAME(nc_dim) : " ") << " for the variable " << GET_NC_NAME_P(var) << ".\n\n"; exit(1); } + start.push_back((size_t)curs[idx]); - count.push_back((size_t)1); + count.push_back((size_t)dims[idx]); + data_size *= dims[idx]; } - *data = bad_data; + for (int idx1=0; idx1getVar(start, count, data); return_status = true; - } - return(return_status); -} - -//////////////////////////////////////////////////////////////////////// - - -bool get_nc_data(NcVar *var, int *data, const long *curs) { - bool return_status = _get_nc_data(var, data, bad_data_int, curs); + //scale_factor and add_offset + if (has_add_offset_attr(var) || has_scale_factor_attr(var)) { + T nc_missing; + double add_offset = get_var_add_offset(var); + double scale_factor = get_var_scale_factor(var); + bool has_missing_attr = get_var_fill_value(var, nc_missing); + if (!has_missing_attr) nc_missing = bad_data; + apply_scale_factor_(data, data_size, add_offset, scale_factor, + nc_missing, bad_data, has_missing_attr, + "", GET_NC_NAME_P(var).c_str()); + } + } return(return_status); } //////////////////////////////////////////////////////////////////////// template -bool _get_nc_data(NcVar *var, T *data, T bad_data, const long dim, const long cur) { +bool get_nc_data_(NcVar *var, T *data, T met_missing, const long dim, const long cur) { bool return_status = false; - const char *method_name = "_get_nc_data(const long dim, const long cur) "; + const char *method_name = "get_nc_data_(T, dim, cur) "; + for (int idx=0; idxgetVar(start, count, data); return_status = true; + + //scale_factor and add_offset + if (has_add_offset_attr(var) || has_scale_factor_attr(var)) { + T nc_missing; + double add_offset = get_var_add_offset(var); + double scale_factor = get_var_scale_factor(var); + bool has_missing_attr = get_var_fill_value(var, nc_missing); + if (!has_missing_attr) nc_missing = met_missing; + apply_scale_factor_(data, dim, add_offset, scale_factor, + nc_missing, met_missing, has_missing_attr, + "", GET_NC_NAME_P(var).c_str()); + } } return(return_status); } //////////////////////////////////////////////////////////////////////// - -bool get_nc_data(NcVar *var, int *data, const long dim, const long cur) { - return(_get_nc_data(var, data, bad_data_int, dim, cur)); -} - -//////////////////////////////////////////////////////////////////////// +// read a single data template -bool _get_nc_data(NcVar *var, T *data, T bad_data, const long *dims, const long *curs) { +bool get_nc_data_(NcVar *var, T *data, T bad_data, const long *curs) { bool return_status = false; - const char *method_name = "_get_nc_data(const long *dims, const long *curs) "; + const char *method_name = "get_nc_data_(*curs) "; if (IS_VALID_NC_P(var)) { - std::vector start; - std::vector count; - int data_size = 1; int dimC = get_dim_count(var); + long dims[dimC]; for (int idx = 0 ; idx < dimC; idx++) { - int dim_size = get_dim_size(var, idx); - if ((curs[idx]+dims[idx]) > dim_size) { - NcDim nc_dim = get_nc_dim(var, idx); - mlog << Error << "\n" << method_name << "The start offset and count (" - << curs[idx] << ", " << dims[idx] << ") exceeds the dimension[" - << idx << "] " << dim_size << " " - << (IS_VALID_NC(nc_dim) ? GET_NC_NAME(nc_dim) : " ") - << " for the variable " << GET_NC_NAME_P(var) << ".\n\n"; - exit(1); - } - - start.push_back((size_t)curs[idx]); - count.push_back((size_t)dims[idx]); - data_size *= dims[idx]; + dims[idx] = 1; } - for (int idx1=0; idx1getVar(start, count, data); - return_status = true; + // Retrieve the NetCDF value from the NetCDF variable. + return_status = get_nc_data_(var, data, bad_data, dims, curs); } return(return_status); } //////////////////////////////////////////////////////////////////////// -bool get_nc_data(NcVar *var, int *data, const long *dims, const long *curs) { - bool return_status = _get_nc_data(var, data, bad_data_int, dims, curs); +bool get_nc_data(NcVar *var, int *data, const long *curs) { + bool return_status = get_nc_data_(var, data, bad_data_int, curs); return(return_status); } //////////////////////////////////////////////////////////////////////// -bool get_nc_data(NcVar *var, short *data, const long *curs) { - bool return_status = _get_nc_data(var, data, (short)bad_data_int, curs); +bool get_nc_data(NcVar *var, time_t *data) { + bool return_status = get_nc_data_(var, data, (time_t)bad_data_int); return(return_status); } //////////////////////////////////////////////////////////////////////// -bool get_nc_data(NcVar *var, short *data, const long *dims, const long *curs) { - bool return_status = _get_nc_data(var, data, (short)bad_data_int, dims, curs); +bool get_nc_data(NcVar *var, int *data) { + bool return_status = get_nc_data_(var, data, bad_data_int); + return(return_status); +} + +//////////////////////////////////////////////////////////////////////// + +bool get_nc_data(NcVar *var, int *data, const long dim, const long cur) { + return(get_nc_data_(var, data, bad_data_int, dim, cur)); +} + +//////////////////////////////////////////////////////////////////////// + +bool get_nc_data(NcVar *var, int *data, const long *dims, const long *curs) { + bool return_status = get_nc_data_(var, data, bad_data_int, dims, curs); return(return_status); } //////////////////////////////////////////////////////////////////////// -bool get_nc_data(NcFile *nc, const char *var_name, float *data, - const long *dims, const long *curs) { +bool get_nc_data(NcVar *var, short *data, const long *curs) { + bool return_status = get_nc_data_(var, data, (short)bad_data_int, curs); - // - // Retrieve the input variables - // - NcVar var = get_var(nc, var_name); - return _get_nc_data(&var, data, bad_data_float, dims, curs); + return(return_status); +} + +//////////////////////////////////////////////////////////////////////// + +bool get_nc_data(NcVar *var, short *data, const long *dims, const long *curs) { + bool return_status = get_nc_data_(var, data, (short)bad_data_int, dims, curs); + + return(return_status); } //////////////////////////////////////////////////////////////////////// template -void _apply_scale_factor(float *data, const T *packed_data, - const int cell_count, const T fill_value, - T &raw_min_val, T &raw_max_val, const char *data_type, - float add_offset, float scale_factor) { - int positive_cnt = 0; - int unpacked_count = 0; - float min_value = 10e10; - float max_value = -10e10; +void copy_nc_data_t(NcVar *var, float *data, const T *packed_data, + const int cell_count, const char *data_type, + double add_offset, double scale_factor, bool has_missing, T missing_value) { clock_t start_clock = clock(); - const char *method_name = "apply_scale_factor(float)"; + const char *method_name = "copy_nc_data_t(float) "; + + if (cell_count > 0) { + int idx; + float min_value, max_value; + bool do_scale_factor = has_scale_factor_attr(var) || has_add_offset_attr(var); + + if (do_scale_factor) { + int positive_cnt = 0; + int unpacked_count = 0; + T raw_min_val, raw_max_val; + + for (idx=0; idx packed_data[idx]) raw_min_val = packed_data[idx]; + if (raw_max_val < packed_data[idx]) raw_max_val = packed_data[idx]; + data[idx] = ((float)packed_data[idx] * scale_factor) + add_offset; + if (data[idx] > 0) positive_cnt++; + if (min_value > data[idx]) min_value = data[idx]; + if (max_value < data[idx]) max_value = data[idx]; + unpacked_count++; + } + } + mlog << Debug(7) << method_name << GET_NC_NAME_P(var) + << " apply_scale_factor unpacked data: count=" + << unpacked_count << " out of " << cell_count + << ". FillValue(" << data_type << ")=" << missing_value << "\n"; + mlog << Debug(7) << method_name + << "data range [" << min_value << " - " << max_value + << "] raw data: [" << raw_min_val << " - " << raw_max_val + << "] Positive count: " << positive_cnt << "\n"; + } else { - if (raw_min_val > packed_data[idx]) raw_min_val = packed_data[idx]; - if (raw_max_val < packed_data[idx]) raw_max_val = packed_data[idx]; - data[idx] = ((float)packed_data[idx] * scale_factor) + add_offset; - if (data[idx] > 0) positive_cnt++; - if (min_value > data[idx]) min_value = data[idx]; - if (max_value < data[idx]) max_value = data[idx]; - if (!is_eq(0., add_offset) && !is_eq(1., scale_factor)) unpacked_count++; - } - } - mlog << Debug(4) << method_name << " unpacked data: count=" - << unpacked_count << " out of " << cell_count - << ". FillValue(" << data_type << ")=" << fill_value << "\n"; - mlog << Debug(4) << method_name << "data range [" << min_value << " - " << max_value - << "] raw data: [" << raw_min_val << " - " << raw_max_val << "] Positive count: " - << positive_cnt << "\n"; - mlog << Debug(7) << method_name << " took " + idx = 0; + + if (has_missing) { + for (idx=0; idx data[idx]) min_value = data[idx]; + if (max_value < data[idx]) max_value = data[idx]; + } + } + mlog << Debug(7) << method_name << "data range [" << min_value + << " - " << max_value << "]\n"; + } + } + mlog << Debug(7) << method_name << "took " << (clock()-start_clock)/double(CLOCKS_PER_SEC) << " seconds\n"; return; } //////////////////////////////////////////////////////////////////////// +template +void copy_nc_data_(NcVar *var, float *data, const T *packed_data, + const int cell_count, const char *data_type, + double add_offset, double scale_factor) { + T missing_value; + bool has_missing = get_var_fill_value(var, missing_value); + copy_nc_data_t(var, data, packed_data, cell_count, data_type, + add_offset, scale_factor, has_missing, missing_value); + return; +} + +//////////////////////////////////////////////////////////////////////// + bool get_nc_data(NcVar *var, float *data) { - clock_t start_clock = clock(); bool return_status = false; + clock_t start_clock = clock(); static const char *method_name = "get_nc_data(NcVar *, float *) "; if (IS_VALID_NC_P(var)) { @@ -1531,207 +1751,140 @@ bool get_nc_data(NcVar *var, float *data) { // Note: missing data was checked here // int type_id = GET_NC_TYPE_ID_P(var); + int cell_count = get_data_size(var); + return_status = true; if (NcType::nc_FLOAT == type_id) { - var->getVar(data); - } - else if (NcType::nc_DOUBLE == type_id) { - int cell_count = 1; - for (int idx=0; idxgetDimCount();idx++) { - cell_count *= get_dim_size(var, idx); - } - double *double_data = new double[cell_count]; - var->getVar(double_data); - for (int idx=0; idxgetDimCount();idx++) { - cell_count *= get_dim_size(var, idx); - } - - float add_offset = 0.; - float scale_factor = 1.; int unpacked_count = 0; + float add_offset = get_var_add_offset(var); + float scale_factor = get_var_scale_factor(var); + bool do_scale_factor = has_scale_factor_attr(var) || has_add_offset_attr(var); bool unsigned_value = has_unsigned_attribute(var); - NcVarAtt *att_add_offset = get_nc_att(var, string("add_offset")); - NcVarAtt *att_scale_factor = get_nc_att(var, string("scale_factor")); - NcVarAtt *att_fill_value = get_nc_att(var, string("_FillValue")); - if (IS_VALID_NC_P(att_add_offset)) add_offset = get_att_value_float(att_add_offset); - if (IS_VALID_NC_P(att_scale_factor)) scale_factor = get_att_value_float(att_scale_factor); - mlog << Debug(4) << method_name << "add_offset = " << add_offset - << ", scale_factor=" << scale_factor << ", cell_count=" << cell_count - << ", is_unsigned_value: " << unsigned_value << " for " << GET_NC_NAME_P(var) << "\n"; + mlog << Debug(6) << method_name << GET_NC_NAME_P(var) + << " data_size=" << cell_count << ", is_unsigned_value: " + << unsigned_value << "\n"; + if (do_scale_factor) { + mlog << Debug(6) << method_name << GET_NC_NAME_P(var) + << " add_offset = " << add_offset + << ", scale_factor=" << scale_factor << "\n"; + } switch ( type_id ) { + case NcType::nc_DOUBLE: + { + double *packed_data = new double[cell_count]; + + get_nc_data_t(var, packed_data); + + double fill_value; + bool has_fill_value = get_var_fill_value(var, fill_value); + for (int idx=0; idxgetVar(packed_data); - _apply_scale_factor(data, packed_data, cell_count, - fill_value, min_value, max_value, "int64", - add_offset, scale_factor); + copy_nc_data_(var, data, packed_data, cell_count, + "int64", add_offset, scale_factor); delete [] packed_data; } break; case NcType::nc_INT: { - int fill_value = bad_data_int; - int min_value = 2147483647; - int max_value = -2147483648; int *packed_data = new int[cell_count]; - if (IS_VALID_NC_P(att_fill_value)) - fill_value = get_att_value_int(att_fill_value); - var->getVar(packed_data); - _apply_scale_factor(data, packed_data, cell_count, - fill_value, min_value, max_value, "int", - add_offset, scale_factor); + copy_nc_data_(var, data, packed_data, cell_count, + "int", add_offset, scale_factor); delete [] packed_data; } break; case NcType::nc_SHORT: { - short fill_value = (short)bad_data_int; + short missing_value; + bool has_missing = get_var_fill_value(var, missing_value); short *packed_data = new short[cell_count]; - if (IS_VALID_NC_P(att_fill_value)) - fill_value = get_att_value_short(att_fill_value); - var->getVar(packed_data); - if (unsigned_value) { - unsigned short value; - int positive_cnt = 0; - int raw_min_value = 70000; - int raw_max_value = -70000; - float min_value = 10e10; - float max_value = -10e10; - unsigned short unsigned_fill_value = (unsigned short)fill_value; + unsigned short *ushort_data = new unsigned short[cell_count]; for (int idx=0; idx value) raw_min_value = value; - if (raw_max_value < value) raw_max_value = value; - if (data[idx] > 0) positive_cnt++; - if (min_value > data[idx]) min_value = data[idx]; - if (max_value < data[idx]) max_value = data[idx]; - } + ushort_data[idx] =(unsigned short)packed_data[idx]; } - mlog << Debug(4) << method_name << " unpacked data: count=" - << unpacked_count << " out of " << cell_count - << ". FillValue(short with unsigned) " << fill_value - << " data range [" << min_value << " - " << max_value - << "] raw data: [" << raw_min_value << " - " << raw_max_value << "] Positive count: " - << positive_cnt << "\n"; + copy_nc_data_t(var, data, ushort_data, cell_count, + "ushort", add_offset, scale_factor, + has_missing, (unsigned short)missing_value); + delete [] ushort_data; } else { - short min_value = 32766; - short max_value = -32767; - _apply_scale_factor(data, packed_data, - cell_count, fill_value, min_value, max_value, "short", - add_offset, scale_factor); + copy_nc_data_t(var, data, packed_data, cell_count, + "short", add_offset, scale_factor, + has_missing, missing_value); } delete [] packed_data; } break; case NcType::nc_USHORT: { - unsigned short min_value = 65535; - unsigned short max_value = 0; - unsigned short fill_value = (unsigned short)bad_data_int; unsigned short *packed_data = new unsigned short[cell_count]; - if (IS_VALID_NC_P(att_fill_value)) - fill_value = get_att_value_ushort(att_fill_value); - var->getVar(packed_data); - - _apply_scale_factor(data, packed_data, cell_count, - fill_value, min_value, max_value, "unsigned short", - add_offset, scale_factor); + copy_nc_data_(var, data, packed_data, cell_count, + "unsigned short", add_offset, scale_factor); delete [] packed_data; } break; case NcType::nc_BYTE: { - ncbyte fill_value = (ncbyte)bad_data_int; + ncbyte missing_value; + bool has_missing = get_var_fill_value(var, missing_value); ncbyte *packed_data = new ncbyte[cell_count]; - if (IS_VALID_NC_P(att_fill_value)) { - fill_value = get_att_value_char(att_fill_value); - } - var->getVar(packed_data); - if (unsigned_value) { - int value; - int positive_cnt = 0; - int raw_min_value = 70000; - int raw_max_value = -70000; - float min_value = 10e10; - float max_value = -10e10; - int unsigned_fill_value = (ncbyte)fill_value; + unsigned char *ubyte_data = new unsigned char[cell_count]; for (int idx=0; idx value) raw_min_value = value; - if (raw_max_value < value) raw_max_value = value; - if (data[idx] > 0) positive_cnt++; - if (min_value > data[idx]) min_value = data[idx]; - if (max_value < data[idx]) max_value = data[idx]; - } + ubyte_data[idx] =(unsigned char)packed_data[idx]; } - mlog << Debug(4) << method_name << " unpacked data: count=" - << unpacked_count << " out of " << cell_count - << ". FillValue(byte with unsigned) " << fill_value - << " data range [" << min_value << " - " << max_value - << "] raw data: [" << raw_min_value << " - " << raw_max_value << "] Positive count: " - << positive_cnt << "\n"; + copy_nc_data_t(var, data, ubyte_data, cell_count, + "ncubyte", add_offset, scale_factor, + has_missing, (unsigned char)missing_value); + delete [] ubyte_data; } else { - ncbyte min_value = 127; - ncbyte max_value = -127; - _apply_scale_factor(data, packed_data, cell_count, - fill_value, min_value, max_value, "ncbyte", - add_offset, scale_factor); + copy_nc_data_t(var, data, packed_data, cell_count, + "ncbyte", add_offset, scale_factor, + has_missing, missing_value); } delete [] packed_data; } break; case NcType::nc_UBYTE: { - unsigned char min_value = 255; - unsigned char max_value = 0; - unsigned char fill_value = (unsigned char)-99; unsigned char *packed_data = new unsigned char[cell_count]; - if (IS_VALID_NC_P(att_fill_value)) { - fill_value = get_att_value_char(att_fill_value); - } - - _apply_scale_factor(data, packed_data, cell_count, - fill_value, min_value, max_value, "unsigned char", - add_offset, scale_factor); + var->getVar(packed_data); + copy_nc_data_(var, data, packed_data, cell_count, + "unsigned char", add_offset, scale_factor); delete [] packed_data; } break; @@ -1741,9 +1894,6 @@ bool get_nc_data(NcVar *var, float *data) { << type_id << ", type name: " << GET_NC_TYPE_NAME_P(var) << ") for " << GET_NC_NAME_P(var) << "\n"; } - if(att_add_offset) delete att_add_offset; - if(att_scale_factor) delete att_scale_factor; - if(att_fill_value) delete att_fill_value; } } @@ -1756,7 +1906,7 @@ bool get_nc_data(NcVar *var, float *data) { //////////////////////////////////////////////////////////////////////// bool get_nc_data(NcVar *var, float *data, const long *curs) { - bool return_status = _get_nc_data(var, data, bad_data_float, curs); + bool return_status = get_nc_data_(var, data, bad_data_float, curs); return(return_status); } @@ -1764,7 +1914,7 @@ bool get_nc_data(NcVar *var, float *data, const long *curs) { //////////////////////////////////////////////////////////////////////// bool get_nc_data(NcVar *var, float *data, const long *dims, const long *curs) { - bool return_status = _get_nc_data(var, data, bad_data_float, dims, curs); + bool return_status = get_nc_data_(var, data, bad_data_float, dims, curs); return(return_status); } @@ -1772,7 +1922,7 @@ bool get_nc_data(NcVar *var, float *data, const long *dims, const long *curs) { //////////////////////////////////////////////////////////////////////// bool get_nc_data(NcVar *var, float *data, const long dim, const long cur) { - bool return_status = _get_nc_data(var, data, bad_data_float, dim, cur); + bool return_status = get_nc_data_(var, data, bad_data_float, dim, cur); return(return_status); } @@ -1785,43 +1935,94 @@ bool get_nc_data(NcFile *nc, const char *var_name, double *data, // // Retrieve the input variables // - NcVar var = get_var(nc, var_name); + NcVar var = get_var(nc, var_name); return get_nc_data(&var, data, dims, curs); } //////////////////////////////////////////////////////////////////////// template -int _apply_scale_factor(double *data, const T *packed_data, - const int cell_count, const T fill_value, - T &raw_min_val, T &raw_max_val, const char *data_type, - double add_offset, double scale_factor) { - int positive_cnt = 0; +void copy_nc_data_t(NcVar *var, double *data, const T *packed_data, + const int cell_count, const char *data_type, + double add_offset, double scale_factor, + bool has_missing, T missing_value) { int unpacked_count = 0; - double min_value = 10e10; - double max_value = -10e10; - const char *method_name = "apply_scale_factor(double)"; + const char *method_name = "copy_nc_data_t(double) "; + + if (cell_count > 0) { + int idx; + T missing_value; + double min_value, max_value; + bool do_scale_factor = has_scale_factor_attr(var) || has_add_offset_attr(var); + + if (do_scale_factor) { + int positive_cnt = 0; + T raw_min_val, raw_max_val; - for (int idx=0; idx packed_data[idx]) raw_min_val = packed_data[idx]; + if (raw_max_val < packed_data[idx]) raw_max_val = packed_data[idx]; + data[idx] = ((double)packed_data[idx] * scale_factor) + add_offset; + if (data[idx] > 0) positive_cnt++; + if (min_value > data[idx]) min_value = data[idx]; + if (max_value < data[idx]) max_value = data[idx]; + unpacked_count++; + } + } + mlog << Debug(7) << method_name << GET_NC_NAME_P(var) + << " apply_scale_factor unpacked data: count=" + << unpacked_count << " out of " << cell_count + << ". FillValue(" << data_type << ")=" << missing_value + << " data range [" << min_value << " - " << max_value + << "] raw data: [" << raw_min_val << " - " << raw_max_val + << "] Positive count: " << positive_cnt << "\n"; + } else { - if (raw_min_val > packed_data[idx]) raw_min_val = packed_data[idx]; - if (raw_max_val < packed_data[idx]) raw_max_val = packed_data[idx]; - data[idx] = ((double)packed_data[idx] * scale_factor) + add_offset; - if (data[idx] > 0) positive_cnt++; - if (min_value > data[idx]) min_value = data[idx]; - if (max_value < data[idx]) max_value = data[idx]; - if (!is_eq(0., add_offset) && !is_eq(1., scale_factor)) unpacked_count++; + idx = 0; + + if (has_missing) { + for (idx=0; idx data[idx]) min_value = data[idx]; + if (max_value < data[idx]) max_value = data[idx]; + } + } + mlog << Debug(7) << method_name << "data range [" << min_value + << " - " << max_value << "]\n"; } } - mlog << Debug(4) << method_name << " unpacked data: count=" - << unpacked_count << " out of " << cell_count - << ". FillValue(" << data_type << ")=" << fill_value - << " data range [" << min_value << " - " << max_value - << "] raw data: [" << raw_min_val << " - " << raw_max_val << "] Positive count: " - << positive_cnt << "\n"; - return unpacked_count; +} + +//////////////////////////////////////////////////////////////////////// + +template +void copy_nc_data_(NcVar *var, double *data, const T *packed_data, + const int cell_count, const char *data_type, + double add_offset, double scale_factor) { + T missing_value; + bool has_missing = get_var_fill_value(var, missing_value); + copy_nc_data_t(var, data, packed_data, cell_count, data_type, + add_offset, scale_factor, has_missing, missing_value); + return; } //////////////////////////////////////////////////////////////////////// @@ -1837,200 +2038,138 @@ bool get_nc_data(NcVar *var, double *data) { // int unpacked_count = 0; int type_id = GET_NC_TYPE_ID_P(var); + const int cell_count = get_data_size(var); + return_status = true; - if ((NcType::nc_DOUBLE == type_id) || (NcType::nc_FLOAT == type_id)){ + if (NcType::nc_DOUBLE == type_id) { + var->getVar(data); + + double fill_value; + bool has_fill_value = get_var_fill_value(var, fill_value); + if (has_fill_value) { + for (int idx=0; idxgetDimCount();idx++) { - cell_count *= get_dim_size(var, idx); - } - - double add_offset = 0.; - double scale_factor = 1.; bool unsigned_value = has_unsigned_attribute(var); - NcVarAtt *att_add_offset = get_nc_att(var, (string)"add_offset"); - NcVarAtt *att_scale_factor = get_nc_att(var, (string)"scale_factor"); - NcVarAtt *att_fill_value = get_nc_att(var, (string)"_FillValue"); - if (IS_VALID_NC_P(att_add_offset)) { - add_offset = get_att_value_double(att_add_offset); - } - if (IS_VALID_NC_P(att_scale_factor)) { - scale_factor = get_att_value_double(att_scale_factor); + const double add_offset = get_var_add_offset(var); + const double scale_factor = get_var_scale_factor(var); + bool do_scale_factor = has_scale_factor_attr(var) || has_add_offset_attr(var); + mlog << Debug(6) << method_name << GET_NC_NAME_P(var) + << " data_size=" << cell_count << ", is_unsigned_value: " + << unsigned_value << "\n"; + if (do_scale_factor) { + mlog << Debug(6) << method_name << GET_NC_NAME_P(var) + << " add_offset = " << add_offset + << ", scale_factor=" << scale_factor << "\n"; } - mlog << Debug(4) << method_name << "add_offset = " << add_offset - << ", scale_factor=" << scale_factor << ", cell_count=" << cell_count - << ", is_unsigned_value: " << unsigned_value << " for " << GET_NC_NAME_P(var) << "\n"; switch ( type_id ) { + case NcType::nc_FLOAT: + { + float *packed_data = new float[cell_count]; + + var->getVar(packed_data); + + float fill_value; + bool has_fill_value = get_var_fill_value(var, fill_value); + for (int idx=0; idxgetVar(packed_data); - _apply_scale_factor(data, packed_data, cell_count, - fill_value, min_value, max_value, "int64", - add_offset, scale_factor); + copy_nc_data_(var, data, packed_data, cell_count, + "int64", add_offset, scale_factor); delete [] packed_data; } break; case NcType::nc_INT: { - int fill_value = bad_data_int; - int min_value = 2147483647; - int max_value = -2147483648; int *packed_data = new int[cell_count]; - if (IS_VALID_NC_P(att_fill_value)) - fill_value = get_att_value_int(att_fill_value); - var->getVar(packed_data); - _apply_scale_factor(data, packed_data, cell_count, - fill_value, min_value, max_value, "int", - add_offset, scale_factor); + copy_nc_data_(var, data, packed_data, cell_count, + "int", add_offset, scale_factor); delete [] packed_data; } break; case NcType::nc_SHORT: { - short fill_value = (short)bad_data_int; + short missing_value; + bool has_missing = get_var_fill_value(var, missing_value); short *packed_data = new short[cell_count]; - - if (IS_VALID_NC_P(att_fill_value)) - fill_value = get_att_value_short(att_fill_value); - var->getVar(packed_data); - if (unsigned_value) { - int value; - int positive_cnt = 0; - int raw_min_value = 70000; - int raw_max_value = -70000; - float min_value = 10e10; - float max_value = -10e10; - int unsigned_fill_value = (unsigned short)fill_value; + unsigned short *ushort_data = new unsigned short[cell_count]; for (int idx=0; idx value) raw_min_value = value; - if (raw_max_value < value) raw_max_value = value; - if (data[idx] > 0) positive_cnt++; - if (min_value > data[idx]) min_value = data[idx]; - if (max_value < data[idx]) max_value = data[idx]; - } + ushort_data[idx] =(unsigned short)packed_data[idx]; } - mlog << Debug(4) << method_name << " unpacked data: count=" - << unpacked_count << " out of " << cell_count - << ". FillValue(short with unsigned) " << fill_value - << " data range [" << min_value << " - " << max_value - << "] raw data: [" << raw_min_value << " - " << raw_max_value << "] Positive count: " - << positive_cnt << "\n"; + copy_nc_data_t(var, data, ushort_data, cell_count, + "ushort", add_offset, scale_factor, + has_missing, (unsigned short)missing_value); + delete [] ushort_data; } else { - short min_value = 32766; - short max_value = -32767; - _apply_scale_factor(data, packed_data, - cell_count, fill_value, min_value, max_value, "int", - add_offset, scale_factor); + copy_nc_data_t(var, data, packed_data, cell_count, + "short", add_offset, scale_factor, + has_missing, missing_value); } delete [] packed_data; } break; case NcType::nc_USHORT: { - unsigned short fill_value = (unsigned short)bad_data_int; unsigned short *packed_data = new unsigned short[cell_count]; - if (IS_VALID_NC_P(att_fill_value)) - fill_value = get_att_value_short(att_fill_value); - var->getVar(packed_data); - - unsigned short min_value = 65535; - unsigned short max_value = 0; - _apply_scale_factor(data, packed_data, - cell_count, fill_value, min_value, max_value, "int", - add_offset, scale_factor); + copy_nc_data_(var, data, packed_data, cell_count, + "ushort", add_offset, scale_factor); delete [] packed_data; } break; case NcType::nc_BYTE: { - ncbyte fill_value = (ncbyte)bad_data_int; + ncbyte missing_value; + bool has_missing = get_var_fill_value(var, missing_value); ncbyte *packed_data = new ncbyte[cell_count]; - if (IS_VALID_NC_P(att_fill_value)) { - fill_value = get_att_value_char(att_fill_value); - } - var->getVar(packed_data); - if (unsigned_value) { - int value; - int positive_cnt = 0; - int raw_min_value = 70000; - int raw_max_value = -70000; - float min_value = 10e10; - float max_value = -10e10; - int unsigned_fill_value = (ncbyte)fill_value; + unsigned char *ubyte_data = new unsigned char[cell_count]; for (int idx=0; idx value) raw_min_value = value; - if (raw_max_value < value) raw_max_value = value; - if (data[idx] > 0) positive_cnt++; - if (min_value > data[idx]) min_value = data[idx]; - if (max_value < data[idx]) max_value = data[idx]; - } + ubyte_data[idx] =(unsigned char)packed_data[idx]; } - mlog << Debug(4) << method_name << " unpacked data: count=" - << unpacked_count << " out of " << cell_count - << ". FillValue(short with unsigned) " << fill_value - << " data range [" << min_value << " - " << max_value - << "] raw data: [" << raw_min_value << " - " << raw_max_value << "] Positive count: " - << positive_cnt << "\n"; + copy_nc_data_t(var, data, ubyte_data, cell_count, + "ncubyte", add_offset, scale_factor, + has_missing, (unsigned char)missing_value); + delete [] ubyte_data; } else { - ncbyte min_value = 127; - ncbyte max_value = -127; - _apply_scale_factor(data, packed_data, cell_count, - fill_value, min_value, max_value, "ncbyte", - add_offset, scale_factor); + copy_nc_data_t(var, data, packed_data, cell_count, + "ncbyte", add_offset, scale_factor, + has_missing, missing_value); } delete [] packed_data; } break; case NcType::nc_UBYTE: { - signed char min_value = 255; - signed char max_value = 0; - signed char fill_value = (signed char)bad_data_int; - signed char *packed_data = new signed char[cell_count]; - - if (IS_VALID_NC_P(att_fill_value)) { - fill_value = get_att_value_char(att_fill_value); - } + unsigned char *packed_data = new unsigned char[cell_count]; var->getVar(packed_data); - - _apply_scale_factor(data, packed_data, cell_count, - fill_value, min_value, max_value, "ncbyte", - add_offset, scale_factor); + copy_nc_data_(var, data, packed_data, cell_count, + "ncubyte", add_offset, scale_factor); delete [] packed_data; } break; @@ -2041,9 +2180,6 @@ bool get_nc_data(NcVar *var, double *data) { << ") for " << GET_NC_NAME_P(var) << "\n"; } - if(att_add_offset) delete att_add_offset; - if(att_scale_factor) delete att_scale_factor; - if(att_fill_value) delete att_fill_value; } } return(return_status); @@ -2052,14 +2188,14 @@ bool get_nc_data(NcVar *var, double *data) { //////////////////////////////////////////////////////////////////////// bool get_nc_data(NcVar *var, double *data, const long *curs) { - bool return_status = _get_nc_data(var, data, bad_data_double, curs); + bool return_status = get_nc_data_(var, data, bad_data_double, curs); return(return_status); } //////////////////////////////////////////////////////////////////////// bool get_nc_data(NcVar *var, double *data, const long dim, const long cur) { - bool return_status = _get_nc_data(var, data, bad_data_double, dim, cur);; + bool return_status = get_nc_data_(var, data, bad_data_double, dim, cur);; return(return_status); } @@ -2067,7 +2203,7 @@ bool get_nc_data(NcVar *var, double *data, const long dim, const long cur) { //////////////////////////////////////////////////////////////////////// bool get_nc_data(NcVar *var, double *data, const long *dims, const long *curs) { - bool return_status = _get_nc_data(var, data, bad_data_double, dims, curs); + bool return_status = get_nc_data_(var, data, bad_data_double, dims, curs); return(return_status); } @@ -2075,7 +2211,7 @@ bool get_nc_data(NcVar *var, double *data, const long *dims, const long *curs) { //////////////////////////////////////////////////////////////////////// bool get_nc_data(NcVar *var, char *data) { - bool return_status = _get_nc_data(var, data); + bool return_status = get_nc_data_t(var, data); return(return_status); } @@ -2086,14 +2222,11 @@ bool get_nc_data(NcVar *var, uchar *data) { bool return_status = false; int data_type = GET_NC_TYPE_ID_P(var); static const char *method_name = "get_nc_data(NcVar *, uchar *) -> "; - if (NC_UBYTE == data_type) return_status = _get_nc_data(var, data); + if (NC_UBYTE == data_type) return_status = get_nc_data_t(var, data); else if (NC_BYTE == data_type && has_unsigned_attribute(var)) { - int cell_count = 1; - for (int idx=0; idxgetDimCount(); idx++) { - cell_count *= get_dim_size(var, idx); - } + int cell_count = get_data_size(var); ncbyte *signed_data = new ncbyte[cell_count]; - return_status = _get_nc_data(var, signed_data); + return_status = get_nc_data_t(var, signed_data); for (int idx=0; idxgetDimCount(); idx++) { - NcDim dim = var->getDim(idx); - cell_count *= get_dim_size(&dim); - } + short *short_data = new short[cell_count]; - return_status = _get_nc_data(var, short_data); + return_status = get_nc_data_t(var, short_data); for (int idx=0; idx 10000000.)) value_str << unix_to_yyyymmdd_hhmmss(value); + else value_str << value; + if (offset == bad_data_int) + mlog << Debug(7) << method_name << "Not found value " << value_str + << " at " << GET_NC_NAME_P(var) + << " by dimension name \"" << dim_name << "\"\n"; + else + mlog << Debug(7) << method_name << "Found value " << value_str + << " (index=" << offset << ") at " << GET_NC_NAME_P(var) + << " by dimension name \"" << dim_name << "\"\n"; + } + else { + mlog << Debug(7) << method_name << "Not found a dimension variable for \"" + << dim_name << "\"\n"; + } + return(offset); +} + //////////////////////////////////////////////////////////////////////// bool get_nc_data_to_array(NcVar *var, StringArray *array_buf) { @@ -2301,7 +2496,7 @@ int get_nc_string_length(NcFile *nc_file, NcVar var, const char *var_name) { //////////////////////////////////////////////////////////////////////// template -bool _put_nc_data(NcVar *var, const T data, long offset0, long offset1, long offset2) { +bool put_nc_data_T(NcVar *var, const T data, long offset0, long offset1, long offset2) { vector offsets; offsets.push_back((size_t)offset0); if (0 <= offset1) { @@ -2317,31 +2512,31 @@ bool _put_nc_data(NcVar *var, const T data, long offset0, long offset1, long off //////////////////////////////////////////////////////////////////////// bool put_nc_data(NcVar *var, const int data, long offset0, long offset1, long offset2) { - return _put_nc_data(var, data, offset0, offset1, offset2); + return put_nc_data_T(var, data, offset0, offset1, offset2); } //////////////////////////////////////////////////////////////////////// bool put_nc_data(NcVar *var, const char data, long offset0, long offset1, long offset2) { - return _put_nc_data(var, data, offset0, offset1, offset2); + return put_nc_data_T(var, data, offset0, offset1, offset2); } //////////////////////////////////////////////////////////////////////// bool put_nc_data(NcVar *var, const float data , long offset0, long offset1, long offset2) { - return _put_nc_data(var, data, offset0, offset1, offset2); + return put_nc_data_T(var, data, offset0, offset1, offset2); } //////////////////////////////////////////////////////////////////////// bool put_nc_data(NcVar *var, const double data, long offset0, long offset1, long offset2) { - return _put_nc_data(var, data, offset0, offset1, offset2); + return put_nc_data_T(var, data, offset0, offset1, offset2); } //////////////////////////////////////////////////////////////////////// bool put_nc_data(NcVar *var, const ncbyte data, long offset0, long offset1, long offset2) { - return _put_nc_data(var, data, offset0, offset1, offset2); + return put_nc_data_T(var, data, offset0, offset1, offset2); } //////////////////////////////////////////////////////////////////////// @@ -2382,7 +2577,7 @@ bool put_nc_data(NcVar *var, const ncbyte *data ) { //////////////////////////////////////////////////////////////////////// template -bool _put_nc_data(NcVar *var, const T *data, const long length, const long offset) { +bool put_nc_data_T(NcVar *var, const T *data, const long length, const long offset) { vector offsets, counts; int dim_count = get_dim_count(var); offsets.push_back(offset); @@ -2398,42 +2593,42 @@ bool _put_nc_data(NcVar *var, const T *data, const long length, const long of //////////////////////////////////////////////////////////////////////// bool put_nc_data(NcVar *var, const int *data, const long length, const long offset) { - _put_nc_data(var, data, length, offset); + put_nc_data_T(var, data, length, offset); return true; } //////////////////////////////////////////////////////////////////////// bool put_nc_data(NcVar *var, const char *data, const long length, const long offset) { - _put_nc_data(var, data, length, offset); + put_nc_data_T(var, data, length, offset); return true; } //////////////////////////////////////////////////////////////////////// bool put_nc_data(NcVar *var, const float *data , const long length, const long offset) { - _put_nc_data(var, data, length, offset); + put_nc_data_T(var, data, length, offset); return true; } //////////////////////////////////////////////////////////////////////// bool put_nc_data(NcVar *var, const double *data, const long length, const long offset) { - _put_nc_data(var, data, length, offset); + put_nc_data_T(var, data, length, offset); return true; } //////////////////////////////////////////////////////////////////////// bool put_nc_data(NcVar *var, const ncbyte *data, const long length, const long offset) { - _put_nc_data(var, data, length, offset); + put_nc_data_T(var, data, length, offset); return true; } //////////////////////////////////////////////////////////////////////// template -bool _put_nc_data(NcVar *var, const T *data , const long *lengths, const long *offsets) { +bool put_nc_data_T(NcVar *var, const T *data , const long *lengths, const long *offsets) { int dim = get_dim_count(var); vector nc_offsets, counts; for (int idx = 0 ; idx < dim; idx++) { @@ -2449,29 +2644,29 @@ bool _put_nc_data(NcVar *var, const T *data , const long *lengths, const long *o //////////////////////////////////////////////////////////////////////// bool put_nc_data(NcVar *var, const float *data , const long *lengths, const long *offsets) { - _put_nc_data(var, data , lengths, offsets); + put_nc_data_T(var, data , lengths, offsets); return true; } //////////////////////////////////////////////////////////////////////// bool put_nc_data(NcVar *var, const char *data , const long *lengths, const long *offsets) { - _put_nc_data(var, data , lengths, offsets); + put_nc_data_T(var, data , lengths, offsets); return true; } //////////////////////////////////////////////////////////////////////// bool put_nc_data(NcVar *var, const int *data , const long *lengths, const long *offsets) { - _put_nc_data(var, data , lengths, offsets); + put_nc_data_T(var, data , lengths, offsets); return true; } //////////////////////////////////////////////////////////////////////// template -bool _put_nc_data_with_dims(NcVar *var, const T *data, - const long len0, const long len1, const long len2) { +bool put_nc_data_T_with_dims(NcVar *var, const T *data, + const long len0, const long len1, const long len2) { vector offsets, counts; if (0 < len0) { offsets.push_back(0); @@ -2500,7 +2695,7 @@ bool put_nc_data_with_dims(NcVar *var, const int *data, bool put_nc_data_with_dims(NcVar *var, const int *data, const long len0, const long len1, const long len2) { - _put_nc_data_with_dims(var, data, len0, len1, len2); + put_nc_data_T_with_dims(var, data, len0, len1, len2); return true; } @@ -2515,7 +2710,7 @@ bool put_nc_data_with_dims(NcVar *var, const float *data, bool put_nc_data_with_dims(NcVar *var, const float *data, const long len0, const long len1, const long len2) { - _put_nc_data_with_dims(var, data, len0, len1, len2); + put_nc_data_T_with_dims(var, data, len0, len1, len2); return true; } @@ -2530,7 +2725,7 @@ bool put_nc_data_with_dims(NcVar *var, const double *data, bool put_nc_data_with_dims(NcVar *var, const double *data, const long len0, const long len1, const long len2) { - _put_nc_data_with_dims(var, data, len0, len1, len2); + put_nc_data_T_with_dims(var, data, len0, len1, len2); return true; } @@ -3501,15 +3696,16 @@ NcVar get_nc_var_lat(const NcFile *nc) { itVar != mapVar.end(); ++itVar) { ConcatString name = (*itVar).first; //if (is_nc_name_lat(name)) found = true; - if (get_nc_att_value(&(*itVar).second, "standard_name", name)) { + if (get_var_standard_name(&(*itVar).second, name)) { if (is_nc_name_lat(name)) found = true; } - if (!found && get_nc_att_value(&(*itVar).second, "units", name)) { + if (!found && get_var_units(&(*itVar).second, name)) { if (is_nc_unit_latitude(name.c_str())) { - if (get_nc_att_value(&(*itVar).second, "axis", name)) { + if (get_nc_att_value(&(*itVar).second, axis_att_name, name)) { if (is_nc_attr_lat(name)) found = true; } - else if (get_nc_att_value(&(*itVar).second, "_CoordinateAxisType", name)) { + else if (get_nc_att_value(&(*itVar).second, + coordinate_axis_type_att_name, name)) { if (is_nc_attr_lat(name)) found = true; } } @@ -3542,15 +3738,16 @@ NcVar get_nc_var_lon(const NcFile *nc) { itVar != mapVar.end(); ++itVar) { ConcatString name = (*itVar).first; //if (is_nc_name_lon(name)) found = true; - if (get_nc_att_value(&(*itVar).second, "standard_name", name)) { + if (get_var_standard_name(&(*itVar).second, name)) { if (is_nc_name_lon(name)) found = true; } - if (!found && get_nc_att_value(&(*itVar).second, "units", name)) { + if (!found && get_var_units(&(*itVar).second, name)) { if (is_nc_unit_longitude(name.c_str())) { - if (get_nc_att_value(&(*itVar).second, "axis", name)) { + if (get_nc_att_value(&(*itVar).second, axis_att_name, name)) { if (is_nc_attr_lon(name)) found = true; } - else if (get_nc_att_value(&(*itVar).second, "_CoordinateAxisType", name)) { + else if (get_nc_att_value(&(*itVar).second, + coordinate_axis_type_att_name, name)) { if (is_nc_attr_lon(name)) found = true; } } @@ -3583,17 +3780,18 @@ NcVar get_nc_var_time(const NcFile *nc) { itVar != mapVar.end(); ++itVar) { ConcatString name = (*itVar).first; //if (is_nc_name_time(name)) found = true; - if (get_nc_att_value(&(*itVar).second, "standard_name", name)) { + if (get_var_standard_name(&(*itVar).second, name)) { if (is_nc_name_time(name)) found = true; mlog << Debug(7) << method_name << "checked variable \"" << name << "\" is_time: " << found << "\n"; } - if (!found && get_nc_att_value(&(*itVar).second, "units", name)) { + if (!found && get_var_units(&(*itVar).second, name)) { if (is_nc_unit_time(name.c_str())) { - if (get_nc_att_value(&(*itVar).second, "axis", name)) { + if (get_nc_att_value(&(*itVar).second, axis_att_name, name)) { if (is_nc_attr_time(name)) found = true; } - else if (get_nc_att_value(&(*itVar).second, "_CoordinateAxisType", name)) { + else if (get_nc_att_value(&(*itVar).second, + coordinate_axis_type_att_name, name)) { if (is_nc_attr_time(name)) found = true; } } @@ -3614,7 +3812,6 @@ NcVar get_nc_var_time(const NcFile *nc) { return var; } - //////////////////////////////////////////////////////////////////////// NcFile *open_ncfile(const char * nc_name, bool write) { @@ -3658,7 +3855,7 @@ unixtime get_reference_unixtime(NcVar *time_var, int &sec_per_unit, ConcatString time_unit_str; static const char *method_name = "get_reference_unixtime() -> "; - if (get_nc_att_value(time_var, (string)"units", time_unit_str)) { + if (get_var_units(time_var, time_unit_str)) { parse_cf_time_string(time_unit_str.c_str(), ref_ut, sec_per_unit); no_leap_year = (86400 == sec_per_unit) ? get_att_no_leap_year(time_var) : false; } diff --git a/src/libcode/vx_nc_util/nc_utils.h b/src/libcode/vx_nc_util/nc_utils.h index 560ae06309..9db75b08ea 100644 --- a/src/libcode/vx_nc_util/nc_utils.h +++ b/src/libcode/vx_nc_util/nc_utils.h @@ -31,6 +31,7 @@ typedef unsigned char uchar; #include "int_array.h" #include "long_array.h" #include "num_array.h" +#include "nc_var_info.h" //////////////////////////////////////////////////////////////////////// @@ -129,15 +130,22 @@ static const string nc_att_use_var_id = "use_var_id"; static const char nc_att_obs_version[] = "MET_Obs_version"; static const char nc_att_met_point_nccf[] = "MET_point_NCCF"; +static const string add_offset_att_name = "add_offset"; +static const string axis_att_name = "axis"; +static const string bounds_att_name = "bounds"; +static const string coordinates_att_name = "coordinates"; +static const string coordinate_axis_type_att_name = "_CoordinateAxisType"; static const string description_att_name = "description"; static const string fill_value_att_name = "_FillValue"; -static const string level_att_name = "level"; +static const string grid_mapping_att_name = "grid_mapping"; +static const string grid_mapping_name_att_name = "grid_mapping_name"; static const string long_name_att_name = "long_name"; static const string missing_value_att_name = "missing_value"; -static const string name_att_name = "name"; +static const string projection_att_name = "Projection"; +static const string scale_factor_att_name = "scale_factor"; +static const string standard_name_att_name = "standard_name"; static const string units_att_name = "units"; - static const char nc_time_unit_exp[] = "^[a-z|A-Z]* since [0-9]\\{1,4\\}-[0-9]\\{1,2\\}-[0-9]\\{1,2\\}"; static const char MET_NC_Obs_ver_1_2[] = "1.02"; @@ -183,8 +191,10 @@ extern bool get_nc_att_value(const NcVarAtt *, double &, bool exit_on_erro extern bool get_nc_att_value(const NcVar *, const ConcatString &, ConcatString &, bool exit_on_error = false); extern bool get_nc_att_value(const NcVar *, const ConcatString &, int &, bool exit_on_error = false); extern bool get_nc_att_value(const NcVar *, const ConcatString &, float &, bool exit_on_error = false); +extern bool get_nc_att_value(const NcVar *, const ConcatString &, double &, bool exit_on_error = false); -extern bool has_att(NcFile *, const ConcatString name, bool exit_on_error = false); +extern bool has_att(NcFile *, const ConcatString name, bool exit_on_error=false); +extern bool has_att(NcVar *, const ConcatString name, bool do_log=false); extern bool has_unsigned_attribute(NcVar *); extern bool get_global_att(const NcGroupAtt *, ConcatString &); @@ -213,10 +223,16 @@ extern int get_var_names(NcFile *, StringArray *varNames); extern bool get_var_att_float (const NcVar *, const ConcatString &, float &); extern bool get_var_att_double(const NcVar *, const ConcatString &, double &); -extern bool get_var_units(const NcVar *, ConcatString &); -extern bool get_var_level(const NcVar *, ConcatString &); -extern double get_var_missing_value(const NcVar *); +template +extern bool get_var_fill_value(const NcVar *var, T &att_val); +extern bool get_var_axis(const NcVar *var, ConcatString &att_val); extern double get_var_fill_value(const NcVar *); +extern bool get_var_grid_mapping(const NcVar *var, ConcatString &att_val); +extern bool get_var_grid_mapping_name(const NcVar *var, ConcatString &att_val); +extern bool get_var_long_name(const NcVar *, ConcatString &); +extern double get_var_missing_value(const NcVar *); +extern bool get_var_standard_name(const NcVar *, ConcatString &); +extern bool get_var_units(const NcVar *, ConcatString &); extern bool args_ok(const LongArray &); @@ -263,12 +279,6 @@ extern bool get_nc_data(NcVar *, float *data, const long *dims, const long *cur extern bool get_nc_data(NcVar *, double *data, const long *dims, const long *curs); extern bool get_nc_data(NcVar *, ncbyte *data, const long *dims, const long *curs); -extern bool get_nc_data(NcFile *, const char *var_name, int *data, const long *dims, const long *curs); -extern bool get_nc_data(NcFile *, const char *var_name, char *data, const long *dims, const long *curs); -extern bool get_nc_data(NcFile *, const char *var_name, float *data, const long *dims, const long *curs); -extern bool get_nc_data(NcFile *, const char *var_name, double *data, const long *dims, const long *curs); -extern bool get_nc_data(NcFile *, const char *var_name, ncbyte *data, const long *dims, const long *curs); - extern bool get_nc_data_to_array(NcVar *, StringArray *); extern bool get_nc_data_to_array(NcFile *, const char *, StringArray *); extern int get_nc_string_length(NcVar *); @@ -310,6 +320,7 @@ extern bool put_nc_data_with_dims(NcVar *, const double *data, const long len0, extern NcVar get_var(NcFile *, const char * var_name); // exit if not exists extern NcVar get_nc_var(NcFile *, const char * var_name, bool log_as_error=false); // continue even though not exists + extern NcVar *copy_nc_var(NcFile *, NcVar *, const int deflate_level=DEF_DEFLATE_LEVEL, const bool all_attrs=true); extern void copy_nc_att(NcFile *, NcVar *, const ConcatString attr_name); extern void copy_nc_att( NcVar *, NcVar *, const ConcatString attr_name); @@ -344,7 +355,7 @@ extern bool get_dim_names(const NcFile *nc, StringArray *dimNames); extern NcVar get_nc_var_lat(const NcFile *nc); extern NcVar get_nc_var_lon(const NcFile *nc); extern NcVar get_nc_var_time(const NcFile *nc); - +extern int get_index_at_nc_data(NcVar *var, double value, const string dim_name, bool is_time=false); extern NcFile* open_ncfile(const char * nc_name, bool write = false); extern int get_data_size(NcVar *); diff --git a/src/libcode/vx_nc_util/nc_var_info.cc b/src/libcode/vx_nc_util/nc_var_info.cc index d41d7c8762..1d3b278103 100644 --- a/src/libcode/vx_nc_util/nc_var_info.cc +++ b/src/libcode/vx_nc_util/nc_var_info.cc @@ -1,5 +1,3 @@ - - // *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=* // ** Copyright UCAR (c) 1992 - 2022 // ** University Corporation for Atmospheric Research (UCAR) @@ -9,8 +7,6 @@ // *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=* - - //////////////////////////////////////////////////////////////////////// @@ -27,6 +23,8 @@ using namespace std; #include "vx_log.h" #include "vx_cal.h" +//////////////////////////////////////////////////////////////////////// + unixtime get_att_value_unixtime(const NcAtt *att) { ConcatString s; unixtime time_value = -1; @@ -53,7 +51,6 @@ unixtime get_att_value_unixtime(const NcAtt *att) { return time_value; } - //////////////////////////////////////////////////////////////////////// @@ -65,9 +62,7 @@ unixtime get_att_value_unixtime(const NcAtt *att) { //////////////////////////////////////////////////////////////////////// -NcVarInfo::NcVarInfo() - -{ +NcVarInfo::NcVarInfo() { init_from_scratch(); @@ -77,9 +72,7 @@ init_from_scratch(); //////////////////////////////////////////////////////////////////////// -NcVarInfo::~NcVarInfo() - -{ +NcVarInfo::~NcVarInfo() { clear(); @@ -89,9 +82,7 @@ clear(); //////////////////////////////////////////////////////////////////////// -NcVarInfo::NcVarInfo(const NcVarInfo & i) - -{ +NcVarInfo::NcVarInfo(const NcVarInfo & i) { init_from_scratch(); @@ -103,9 +94,7 @@ assign(i); //////////////////////////////////////////////////////////////////////// -NcVarInfo & NcVarInfo::operator=(const NcVarInfo & i) - -{ +NcVarInfo & NcVarInfo::operator=(const NcVarInfo & i) { if ( this == &i ) return ( * this ); @@ -119,9 +108,7 @@ return ( * this ); //////////////////////////////////////////////////////////////////////// -void NcVarInfo::init_from_scratch() - -{ +void NcVarInfo::init_from_scratch() { Dims = (NcDim **) 0; @@ -135,9 +122,7 @@ return; //////////////////////////////////////////////////////////////////////// -void NcVarInfo::clear() - -{ +void NcVarInfo::clear() { var = (NcVar *) 0; // don't delete @@ -175,9 +160,7 @@ return; //////////////////////////////////////////////////////////////////////// -void NcVarInfo::dump(ostream & out, int depth) const - -{ +void NcVarInfo::dump(ostream & out, int depth) const { Indent prefix(depth); @@ -245,9 +228,7 @@ return; //////////////////////////////////////////////////////////////////////// -int NcVarInfo::lead_time() const - -{ +int NcVarInfo::lead_time() const { return ( (int) (ValidTime - InitTime) ); @@ -257,9 +238,7 @@ return ( (int) (ValidTime - InitTime) ); //////////////////////////////////////////////////////////////////////// -void NcVarInfo::assign(const NcVarInfo & i) - -{ +void NcVarInfo::assign(const NcVarInfo & i) { clear(); @@ -316,9 +295,39 @@ return; //////////////////////////////////////////////////////////////////////// -bool get_att_str(const NcVarInfo &info, const ConcatString att_name, ConcatString &att_value) +NcVarInfo *find_var_info_by_dim_name(NcVarInfo *vars, const string dim_name, + const int nvars) { + // Find the variable with the same dimension name + NcVarInfo *var = (NcVarInfo *)NULL; + for (int i = 0; i < nvars; i++) { + if (vars[i].name == dim_name) { + var = &vars[i]; + break; + } + } + + if (!var) { + //StringArray dim_names; + for (int i=0; iputAtt("accum_time_sec", ncInt, accum_sec); + add_att(var, accum_time_att_name, time_str.text()); + var->putAtt(accum_time_sec_att_name, ncInt, accum_sec); } return; diff --git a/src/libcode/vx_summary/Makefile.am b/src/libcode/vx_summary/Makefile.am index 7e56dcc05f..0630de9d59 100644 --- a/src/libcode/vx_summary/Makefile.am +++ b/src/libcode/vx_summary/Makefile.am @@ -17,6 +17,7 @@ libvx_summary_a_SOURCES = \ summary_calc_mean.cc summary_calc_mean.h \ summary_calc_median.cc summary_calc_median.h \ summary_calc_min.cc summary_calc_min.h \ + summary_calc_sum.cc summary_calc_sum.h \ summary_calc_percentile.cc summary_calc_percentile.h \ summary_calc_range.cc summary_calc_range.h \ summary_calc_stdev.cc summary_calc_stdev.h \ diff --git a/src/libcode/vx_summary/Makefile.in b/src/libcode/vx_summary/Makefile.in index ddf415b649..1f3c6e2e39 100644 --- a/src/libcode/vx_summary/Makefile.in +++ b/src/libcode/vx_summary/Makefile.in @@ -112,6 +112,7 @@ am_libvx_summary_a_OBJECTS = libvx_summary_a-summary_calc.$(OBJEXT) \ libvx_summary_a-summary_calc_mean.$(OBJEXT) \ libvx_summary_a-summary_calc_median.$(OBJEXT) \ libvx_summary_a-summary_calc_min.$(OBJEXT) \ + libvx_summary_a-summary_calc_sum.$(OBJEXT) \ libvx_summary_a-summary_calc_percentile.$(OBJEXT) \ libvx_summary_a-summary_calc_range.$(OBJEXT) \ libvx_summary_a-summary_calc_stdev.$(OBJEXT) \ @@ -142,6 +143,7 @@ am__depfiles_remade = ./$(DEPDIR)/libvx_summary_a-summary_calc.Po \ ./$(DEPDIR)/libvx_summary_a-summary_calc_percentile.Po \ ./$(DEPDIR)/libvx_summary_a-summary_calc_range.Po \ ./$(DEPDIR)/libvx_summary_a-summary_calc_stdev.Po \ + ./$(DEPDIR)/libvx_summary_a-summary_calc_sum.Po \ ./$(DEPDIR)/libvx_summary_a-summary_key.Po \ ./$(DEPDIR)/libvx_summary_a-summary_obs.Po \ ./$(DEPDIR)/libvx_summary_a-time_summary_interval.Po @@ -357,6 +359,7 @@ libvx_summary_a_SOURCES = \ summary_calc_mean.cc summary_calc_mean.h \ summary_calc_median.cc summary_calc_median.h \ summary_calc_min.cc summary_calc_min.h \ + summary_calc_sum.cc summary_calc_sum.h \ summary_calc_percentile.cc summary_calc_percentile.h \ summary_calc_range.cc summary_calc_range.h \ summary_calc_stdev.cc summary_calc_stdev.h \ @@ -422,6 +425,7 @@ distclean-compile: @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libvx_summary_a-summary_calc_percentile.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libvx_summary_a-summary_calc_range.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libvx_summary_a-summary_calc_stdev.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libvx_summary_a-summary_calc_sum.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libvx_summary_a-summary_key.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libvx_summary_a-summary_obs.Po@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libvx_summary_a-time_summary_interval.Po@am__quote@ # am--include-marker @@ -516,6 +520,20 @@ libvx_summary_a-summary_calc_min.obj: summary_calc_min.cc @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libvx_summary_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libvx_summary_a-summary_calc_min.obj `if test -f 'summary_calc_min.cc'; then $(CYGPATH_W) 'summary_calc_min.cc'; else $(CYGPATH_W) '$(srcdir)/summary_calc_min.cc'; fi` +libvx_summary_a-summary_calc_sum.o: summary_calc_sum.cc +@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libvx_summary_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libvx_summary_a-summary_calc_sum.o -MD -MP -MF $(DEPDIR)/libvx_summary_a-summary_calc_sum.Tpo -c -o libvx_summary_a-summary_calc_sum.o `test -f 'summary_calc_sum.cc' || echo '$(srcdir)/'`summary_calc_sum.cc +@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libvx_summary_a-summary_calc_sum.Tpo $(DEPDIR)/libvx_summary_a-summary_calc_sum.Po +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='summary_calc_sum.cc' object='libvx_summary_a-summary_calc_sum.o' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libvx_summary_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libvx_summary_a-summary_calc_sum.o `test -f 'summary_calc_sum.cc' || echo '$(srcdir)/'`summary_calc_sum.cc + +libvx_summary_a-summary_calc_sum.obj: summary_calc_sum.cc +@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libvx_summary_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libvx_summary_a-summary_calc_sum.obj -MD -MP -MF $(DEPDIR)/libvx_summary_a-summary_calc_sum.Tpo -c -o libvx_summary_a-summary_calc_sum.obj `if test -f 'summary_calc_sum.cc'; then $(CYGPATH_W) 'summary_calc_sum.cc'; else $(CYGPATH_W) '$(srcdir)/summary_calc_sum.cc'; fi` +@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libvx_summary_a-summary_calc_sum.Tpo $(DEPDIR)/libvx_summary_a-summary_calc_sum.Po +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='summary_calc_sum.cc' object='libvx_summary_a-summary_calc_sum.obj' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libvx_summary_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libvx_summary_a-summary_calc_sum.obj `if test -f 'summary_calc_sum.cc'; then $(CYGPATH_W) 'summary_calc_sum.cc'; else $(CYGPATH_W) '$(srcdir)/summary_calc_sum.cc'; fi` + libvx_summary_a-summary_calc_percentile.o: summary_calc_percentile.cc @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libvx_summary_a_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libvx_summary_a-summary_calc_percentile.o -MD -MP -MF $(DEPDIR)/libvx_summary_a-summary_calc_percentile.Tpo -c -o libvx_summary_a-summary_calc_percentile.o `test -f 'summary_calc_percentile.cc' || echo '$(srcdir)/'`summary_calc_percentile.cc @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libvx_summary_a-summary_calc_percentile.Tpo $(DEPDIR)/libvx_summary_a-summary_calc_percentile.Po @@ -733,6 +751,7 @@ distclean: distclean-am -rm -f ./$(DEPDIR)/libvx_summary_a-summary_calc_percentile.Po -rm -f ./$(DEPDIR)/libvx_summary_a-summary_calc_range.Po -rm -f ./$(DEPDIR)/libvx_summary_a-summary_calc_stdev.Po + -rm -f ./$(DEPDIR)/libvx_summary_a-summary_calc_sum.Po -rm -f ./$(DEPDIR)/libvx_summary_a-summary_key.Po -rm -f ./$(DEPDIR)/libvx_summary_a-summary_obs.Po -rm -f ./$(DEPDIR)/libvx_summary_a-time_summary_interval.Po @@ -789,6 +808,7 @@ maintainer-clean: maintainer-clean-am -rm -f ./$(DEPDIR)/libvx_summary_a-summary_calc_percentile.Po -rm -f ./$(DEPDIR)/libvx_summary_a-summary_calc_range.Po -rm -f ./$(DEPDIR)/libvx_summary_a-summary_calc_stdev.Po + -rm -f ./$(DEPDIR)/libvx_summary_a-summary_calc_sum.Po -rm -f ./$(DEPDIR)/libvx_summary_a-summary_key.Po -rm -f ./$(DEPDIR)/libvx_summary_a-summary_obs.Po -rm -f ./$(DEPDIR)/libvx_summary_a-time_summary_interval.Po diff --git a/src/libcode/vx_summary/summary_calc_sum.cc b/src/libcode/vx_summary/summary_calc_sum.cc new file mode 100644 index 0000000000..bac30c0873 --- /dev/null +++ b/src/libcode/vx_summary/summary_calc_sum.cc @@ -0,0 +1,43 @@ +// *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=* +// ** Copyright UCAR (c) 1992 - 2022 +// ** University Corporation for Atmospheric Research (UCAR) +// ** National Center for Atmospheric Research (NCAR) +// ** Research Applications Lab (RAL) +// ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA +// *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=* + + +//////////////////////////////////////////////////////////////////////// + + +using namespace std; + +#include + +#include "summary_calc_sum.h" + +//////////////////////////////////////////////////////////////////////// + + + // + // Code for class SummaryCalcSum + // + + +//////////////////////////////////////////////////////////////////////// + + +SummaryCalcSum::SummaryCalcSum() : + SummaryCalc() +{ +} + +//////////////////////////////////////////////////////////////////////// + +SummaryCalcSum::~SummaryCalcSum() +{ +} + +//////////////////////////////////////////////////////////////////////// +// Protected/Private Methods +//////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_summary/summary_calc_sum.h b/src/libcode/vx_summary/summary_calc_sum.h new file mode 100644 index 0000000000..7cd69afc4c --- /dev/null +++ b/src/libcode/vx_summary/summary_calc_sum.h @@ -0,0 +1,56 @@ +// *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=* +// ** Copyright UCAR (c) 1992 - 2022 +// ** University Corporation for Atmospheric Research (UCAR) +// ** National Center for Atmospheric Research (NCAR) +// ** Research Applications Lab (RAL) +// ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA +// *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=* + + +//////////////////////////////////////////////////////////////////////// + + +#ifndef __SUMMARYCALCSUM_H__ +#define __SUMMARYCALCSUM_H__ + + +//////////////////////////////////////////////////////////////////////// + + +#include + +#include "summary_calc.h" + +//////////////////////////////////////////////////////////////////////// + + +class SummaryCalcSum : public SummaryCalc +{ + +public: + + SummaryCalcSum(); + virtual ~SummaryCalcSum(); + + virtual string getType() const + { + return "SUM"; + } + + virtual double calcSummary(const NumArray &num_array) const + { + return num_array.sum(); + } + +}; + + +//////////////////////////////////////////////////////////////////////// + + +#endif /* __SUMMARYCALCSUM_H__ */ + + +//////////////////////////////////////////////////////////////////////// + + diff --git a/src/libcode/vx_summary/summary_obs.cc b/src/libcode/vx_summary/summary_obs.cc index 799c72f6b7..f0d752a5c5 100644 --- a/src/libcode/vx_summary/summary_obs.cc +++ b/src/libcode/vx_summary/summary_obs.cc @@ -22,6 +22,7 @@ using namespace std; #include "summary_calc_mean.h" #include "summary_calc_median.h" #include "summary_calc_min.h" +#include "summary_calc_sum.h" #include "summary_calc_percentile.h" #include "summary_calc_range.h" #include "summary_calc_stdev.h" @@ -176,11 +177,11 @@ bool SummaryObs::summarizeObs(const TimeSummaryInfo &summary_info) { mlog << Debug(3) << "Computing " << unix_to_yyyymmdd_hhmmss(time_interval->getBaseTime()) - << " time summary from " + << " time summary (" << unix_to_yyyymmdd_hhmmss(time_interval->getStartTime()) - << " to " + << " <= time < " << unix_to_yyyymmdd_hhmmss(time_interval->getEndTime()) - << ".\n"; + << ").\n"; // Initialize the map used to sort observations in this time period // into their correct summary groups @@ -365,6 +366,17 @@ vector< SummaryCalc* > SummaryObs::getSummaryCalculators(const TimeSummaryInfo & else if (type == "median") { calculators.push_back(new SummaryCalcMedian); } + else if (type == "sum") { + calculators.push_back(new SummaryCalcSum); + + // Check for vld_thresh = 1.0 + if (!is_eq(info.vld_thresh, 1.0)) { + mlog << Warning << "\nIn the \"time_summary\" dictionary, " + << "consider setting \"vld_thresh\" (" << info.vld_thresh + << ") to 1.0 for the \"sum\" type to better handle " + << "missing data.\n\n"; + } + } else if (type[0] == 'p') { calculators.push_back(new SummaryCalcPercentile(type)); } @@ -403,12 +415,12 @@ vector< TimeSummaryInterval > SummaryObs::getTimeIntervals( vector< TimeSummaryInterval > time_intervals; time_t interval_time = getIntervalTime(first_data_time, info.beg, info.end, info.step, info.width_beg, info.width_end); - while (interval_time < last_data_time) { + while (interval_time <= last_data_time) { // We need to process each day separately so that we can always start // at the indicated start time on each day. time_t day_end_time = getEndOfDay(interval_time); while (interval_time < day_end_time && - interval_time < last_data_time) + interval_time <= last_data_time) { // See if the current time is within the defined time intervals if (isInTimeInterval(interval_time, info.beg, info.end)) { diff --git a/src/tools/other/madis2nc/madis2nc.cc b/src/tools/other/madis2nc/madis2nc.cc index f6ba8a6696..ecb537b8a4 100644 --- a/src/tools/other/madis2nc/madis2nc.cc +++ b/src/tools/other/madis2nc/madis2nc.cc @@ -418,19 +418,20 @@ static bool get_filtered_nc_data(NcVar var, float *data, const char *var_name, bool required) { bool status = false; - float in_fill_value; const char *method_name = "get_filtered_nc_data(float) "; if (IS_VALID_NC(var)) { if(status = get_nc_data(&var, data, dim, cur)) { - get_nc_att_value(&var, (string)in_fillValue_str, in_fill_value); - mlog << Debug(5) << " " << method_name << GET_NC_NAME(var) << " " - << in_fillValue_str << "=" << in_fill_value << "\n"; - for (int idx=0; idx init time should be an integer or a string!\n\n"; exit ( 1 ); } - if (att) delete att; - att = get_nc_att(FcstRaw, (string)"valid_time_ut"); - ValidTime = get_att_value_unixtime(att); - if (ValidTime < 0) { + if (!get_att_unixtime(FcstRaw, valid_time_ut_att_name, ValidTime) || ValidTime < 0) { mlog << Error << "ModeNcOutputFile::open(const char *) -> valid time should be an integer or a string!\n\n"; exit ( 1 ); } - if (att) delete att; // att = FcstRaw->get_att("accum_time_sec"); // @@ -1006,6 +1000,3 @@ return ( s ); //////////////////////////////////////////////////////////////////////// - - - diff --git a/src/tools/other/plot_data_plane/plot_data_plane.cc b/src/tools/other/plot_data_plane/plot_data_plane.cc index 1d266bfc40..d5eb23e571 100644 --- a/src/tools/other/plot_data_plane/plot_data_plane.cc +++ b/src/tools/other/plot_data_plane/plot_data_plane.cc @@ -94,7 +94,6 @@ int main(int argc, char * argv[]) { VarInfo * var_ptr = (VarInfo * ) 0; VarInfoFactory v_factory; DataPlane data_plane; - DataPlaneArray data_plane_array; Grid grid; GrdFileType ftype; ColorTable color_table; diff --git a/src/tools/other/plot_point_obs/plot_point_obs.cc b/src/tools/other/plot_point_obs/plot_point_obs.cc index 888b521758..5990a33769 100644 --- a/src/tools/other/plot_point_obs/plot_point_obs.cc +++ b/src/tools/other/plot_point_obs/plot_point_obs.cc @@ -250,7 +250,6 @@ void process_point_obs(const char *point_obs_filename) { obs_qty_block, (char *)0); if (!status) exit(1); - int typ_idx, sid_idx, vld_idx; for(int i_offset=0; i_offset