Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Replenish effective mode in iotdb-system.properties #12706

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ public class IoTDBConfig {

/* Names of Watermark methods */
public static final String WATERMARK_GROUPED_LSB = "GroupBasedLSBMethod";
public static final String CONFIG_NAME = "iotdb-datanode.properties";
public static final String CONFIG_NAME = "iotdb-system.properties";
private static final Logger logger = LoggerFactory.getLogger(IoTDBConfig.class);
private static final String MULTI_DIR_STRATEGY_PREFIX =
"org.apache.iotdb.db.storageengine.rescon.disk.strategy.";
Expand Down Expand Up @@ -2175,14 +2175,6 @@ public void setAvgSeriesPointNumberThreshold(int avgSeriesPointNumberThreshold)
this.avgSeriesPointNumberThreshold = avgSeriesPointNumberThreshold;
}

public long getCrossCompactionFileSelectionTimeBudget() {
return crossCompactionFileSelectionTimeBudget;
}

void setCrossCompactionFileSelectionTimeBudget(long crossCompactionFileSelectionTimeBudget) {
this.crossCompactionFileSelectionTimeBudget = crossCompactionFileSelectionTimeBudget;
}

public boolean isRpcThriftCompressionEnable() {
return rpcThriftCompressionEnable;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -591,11 +591,6 @@ public void loadProperties(Properties properties) throws BadNodeUrlException, IO
conf.setChunkBufferPoolEnable(
Boolean.parseBoolean(properties.getProperty("chunk_buffer_pool_enable")));
}
conf.setCrossCompactionFileSelectionTimeBudget(
Long.parseLong(
properties.getProperty(
"cross_compaction_file_selection_time_budget",
Long.toString(conf.getCrossCompactionFileSelectionTimeBudget()))));
conf.setMergeIntervalSec(
Long.parseLong(
properties.getProperty(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3257,8 +3257,8 @@ public Statement visitSetConfiguration(IoTDBSqlParser.SetConfigurationContext ct
Integer.parseInt(ctx.INTEGER_LITERAL() == null ? "-1" : ctx.INTEGER_LITERAL().getText());
Map<String, String> configItems = new HashMap<>();
for (IoTDBSqlParser.SetConfigurationEntryContext entry : ctx.setConfigurationEntry()) {
String key = entry.STRING_LITERAL(0).getText().replace("\"", "");
String value = entry.STRING_LITERAL(1).getText().replace("\"", "");
String key = parseStringLiteral(entry.STRING_LITERAL(0).getText()).trim();
String value = parseStringLiteral(entry.STRING_LITERAL(1).getText()).trim();
configItems.put(key, value);
}
setConfigurationStatement.setNodeId(nodeId);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
package org.apache.iotdb.db.utils;

import org.apache.iotdb.commons.path.MeasurementPath;
import org.apache.iotdb.db.conf.IoTDBConfig;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
import org.apache.iotdb.db.exception.sql.SemanticException;
import org.apache.iotdb.db.queryengine.plan.analyze.ExpressionUtils;
Expand All @@ -40,17 +41,7 @@

public class TypeInferenceUtils {

private static final TSDataType booleanStringInferType =
IoTDBDescriptor.getInstance().getConfig().getBooleanStringInferType();

private static final TSDataType integerStringInferType =
IoTDBDescriptor.getInstance().getConfig().getIntegerStringInferType();

private static final TSDataType floatingStringInferType =
IoTDBDescriptor.getInstance().getConfig().getFloatingStringInferType();

private static final TSDataType nanStringInferType =
IoTDBDescriptor.getInstance().getConfig().getNanStringInferType();
private static final IoTDBConfig CONF = IoTDBDescriptor.getInstance().getConfig();

private TypeInferenceUtils() {}

Expand Down Expand Up @@ -108,18 +99,18 @@ public static TSDataType getPredictedDataType(Object value, boolean inferType) {
} else if (inferType) {
String strValue = value.toString();
if (isBoolean(strValue)) {
return booleanStringInferType;
return CONF.getBooleanStringInferType();
} else if (isNumber(strValue)) {
if (isLong(StringUtils.trim(strValue))) {
return integerStringInferType;
return CONF.getIntegerStringInferType();
} else {
return floatingStringInferType;
return CONF.getFloatingStringInferType();
}
} else if ("null".equals(strValue) || "NULL".equals(strValue)) {
return null;
// "NaN" is returned if the NaN Literal is given in Parser
} else if ("NaN".equals(strValue)) {
return nanStringInferType;
return CONF.getNanStringInferType();
} else if (isBlob(strValue)) {
return TSDataType.BLOB;
} else {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -61,4 +61,5 @@ for /f "usebackq tokens=*" %%i in ("%target_template_file%") do (
echo !line!>>"%target_properties_file%"
)
endlocal
)
)
powershell -Command "(Get-Content '%target_properties_file%') -join \"`n\" | Set-Content -NoNewline '%target_properties_file%'"
Original file line number Diff line number Diff line change
Expand Up @@ -257,6 +257,7 @@ dn_seed_config_node=127.0.0.1:10710
# dn_rpc_thrift_compression_enable=false

# if true, a snappy based compression method will be called before sending data by the network
# effectiveMode: restart
# Datatype: boolean
# this feature is under development, set this as false before it is done.
# dn_rpc_advanced_compression_enable=false
Expand Down Expand Up @@ -431,6 +432,7 @@ dn_seed_config_node=127.0.0.1:10710
# If it is relative, system will save the data in the relative path directory it indicates under the IoTDB folder.
# If there are more than one directory, please separate them by commas ",".
# Note: If pipe_consensus_receiver_file_dirs is assigned an empty string(i.e.,zero-size), it will be handled as a relative path.
# effectiveMode: restart
# For windows platform
# If its prefix is a drive specifier followed by "\\", or if its prefix is "\\\\", then the path is absolute. Otherwise, it is relative.
# pipe_consensus_receiver_file_dirs=data\\datanode\\system\\pipe\\consensus\\receiver
Expand Down Expand Up @@ -537,6 +539,7 @@ dn_metric_prometheus_reporter_port=9092
####################

# Used for indicate cluster name and distinguish different cluster.
# effectiveMode: first_start
# Datatype: string
cluster_name=defaultCluster

Expand Down Expand Up @@ -719,6 +722,7 @@ data_replication_factor=1
# primitive_array_size=64

# Ratio of compaction memory for chunk metadata maintains in memory when doing compaction
# effectiveMode: restart
# Datatype: double
# chunk_metadata_size_proportion=0.1

Expand All @@ -741,6 +745,7 @@ data_replication_factor=1
# reject_proportion=0.8

# Ratio of memory for the DevicePathCache. DevicePathCache is the deviceId cache, keep only one copy of the same deviceId in memory
# effectiveMode: restart
# Datatype: double
# device_path_cache_proportion=0.05

Expand Down Expand Up @@ -812,26 +817,31 @@ data_replication_factor=1

# Policy of DataNodeSchemaCache eviction.
# Support FIFO and LRU policy. FIFO takes low cache update overhead. LRU takes high cache hit rate.
# effectiveMode: restart
# Datatype: int
# datanode_schema_cache_eviction_policy=FIFO

# This configuration parameter sets the maximum number of time series allowed in the cluster.
# The value should be a positive integer representing the desired threshold.
# When the threshold is reached, users will be prohibited from creating new time series.
# -1 means unlimited
# effectiveMode: restart
# Datatype: int
# cluster_timeseries_limit_threshold=-1

# This configuration parameter sets the maximum number of device allowed in the cluster.
# The value should be a positive integer representing the desired threshold.
# When the threshold is reached, users will be prohibited from creating new time series.
# -1 means unlimited
# effectiveMode: restart
# Datatype: int
# cluster_device_limit_threshold=-1

# This configuration parameter sets the maximum number of Cluster Databases allowed.
# The value should be a positive integer representing the desired threshold.
# When the threshold is reached, users will be prohibited from creating new databases.
# -1 means unlimited.
# effectiveMode: restart
# Datatype: int
# database_limit_threshold = -1

Expand Down Expand Up @@ -989,10 +999,12 @@ data_replication_factor=1
# query_thread_count=0

# How many pipeline drivers will be created for one fragment instance. When <= 0, use CPU core number / 2.
# effectiveMode: restart
# Datatype: int
# degree_of_query_parallelism=0

# The threshold of count map size when calculating the MODE aggregation function
# effectiveMode: restart
# Datatype: int
# mode_map_size_threshold=10000

Expand All @@ -1002,6 +1014,7 @@ data_replication_factor=1
# batch_size=100000

# The memory for external sort in sort operator, when the data size is smaller than sort_buffer_size_in_bytes, the sort operator will use in-memory sort.
# effectiveMode: restart
# Datatype: long
# sort_buffer_size_in_bytes=1048576

Expand All @@ -1021,6 +1034,7 @@ data_replication_factor=1
# timestamp_precision=ms

# When the timestamp precision check is enabled, the timestamps those are over 13 digits for ms precision, or over 16 digits for us precision are not allowed to be inserted.
# effectiveMode: first_start
# Datatype: Boolean
# timestamp_precision_check_enabled=true

Expand All @@ -1033,24 +1047,28 @@ data_replication_factor=1
# default_ttl_in_ms=-1

# The maximum number of TTL rules stored in the system, the default is 1000.
# effectiveMode: restart
# Negative value means the threshold is unlimited.
# Datatype: int
# ttl_rule_capacity=1000

# The interval of TTL check task in each database. The TTL check task will inspect and select files with a higher volume of expired data for compaction. Default is 2 hours.
# Notice: It is not recommended to change it too small, as it will affect the read and write performance of the system.
# effectiveMode: restart
# Unit: ms
# Datatype: int
# ttl_check_interval=7200000

# The maximum expiring time of devices that have a ttl. Default is 1 month.
# If the data elapsed time (current timestamp minus the maximum data timestamp of the device in the file) of such devices exceeds this value, then the file will be cleaned by compaction.
# Notice: It is not recommended to change it too small, as it will affect the read and write performance of the system.
# effectiveMode: restart
# Unit: ms
# Datatype: int
# max_expired_time=2592000000

# The expired device ratio. If the ratio of expired devices in one file exceeds this value, then expired data of this file will be cleaned by compaction.
# effectiveMode: restart
# Datatype: float
# expired_data_ratio=0.3

Expand All @@ -1062,6 +1080,7 @@ data_replication_factor=1

# Add a switch to enable separate sequence and unsequence data.
# If it is true, then data will be separated into seq and unseq data dir. If it is false, then all data will be written into unseq data dir.
# effectiveMode: restart
# Datatype: boolean
# enable_separate_data=true

Expand Down Expand Up @@ -1208,6 +1227,7 @@ data_replication_factor=1
# compaction_priority=BALANCE

# The size of candidate compaction task queue.
# effectiveMode: restart
# Datatype: int
# candidate_compaction_task_queue_size=50

Expand Down Expand Up @@ -1263,13 +1283,6 @@ data_replication_factor=1
# Datatype: int
# min_cross_compaction_unseq_file_level=1

# If one merge file selection runs for more than this time, it will be ended and its current
# selection will be used as final selection.
# When < 0, it means time is unbounded.
# effectiveMode: restart
# Datatype: long, Unit: ms
# cross_compaction_file_selection_time_budget=30000

# How many threads will be set up to perform compaction, 10 by default.
# Set to 1 when less than or equal to 0.
# effectiveMode: hot_reload
Expand Down Expand Up @@ -1623,41 +1636,49 @@ data_replication_factor=1
# For Windows platform
# If its prefix is a drive specifier followed by "\\", or if its prefix is "\\\\", then the path is
# absolute. Otherwise, it is relative.
# effectiveMode: first_start
# pipe_lib_dir=ext\\pipe
# For Linux platform
# If its prefix is "/", then the path is absolute. Otherwise, it is relative.
# pipe_lib_dir=ext/pipe

# The maximum number of threads that can be used to execute the pipe subtasks in PipeSubtaskExecutor.
# The actual value will be min(pipe_subtask_executor_max_thread_num, max(1, CPU core number / 2)).
# effectiveMode: restart
# Datatype: int
# pipe_subtask_executor_max_thread_num=5

# The connection timeout (in milliseconds) for the thrift client.
# effectiveMode: restart
# Datatype: int
# pipe_sink_timeout_ms=900000

# The maximum number of selectors that can be used in the sink.
# Recommend to set this value to less than or equal to pipe_sink_max_client_number.
# effectiveMode: restart
# Datatype: int
# pipe_sink_selector_number=4

# The maximum number of clients that can be used in the sink.
# effectiveMode: restart
# Datatype: int
# pipe_sink_max_client_number=16

# Whether to enable receiving pipe data through air gap.
# The receiver can only return 0 or 1 in tcp mode to indicate whether the data is received successfully.
# effectiveMode: restart
# Datatype: Boolean
# pipe_air_gap_receiver_enabled=false

# The port for the server to receive pipe data through air gap.
# Datatype: int
# effectiveMode: restart
# pipe_air_gap_receiver_port=9780

# The total bytes that all pipe sinks can transfer per second.
# When given a value less than or equal to 0, it means no limit.
# default value is -1, which means no limit.
# effectiveMode: hot_reload
# Datatype: double
# pipe_all_sinks_rate_limit_bytes_per_second=-1

Expand Down Expand Up @@ -1755,12 +1776,14 @@ data_replication_factor=1
# data_region_ratis_preserve_logs_num_when_purge=1000

# Raft Log disk size control
# effectiveMode: restart
# Datatype: int
# config_node_ratis_log_max_size = 2147483648
# schema_region_ratis_log_max_size = 2147483648
# data_region_ratis_log_max_size = 21474836480

# Raft periodic snapshot interval, time unit is second
# effectiveMode: restart
# Datatype: int
# config_node_ratis_periodic_snapshot_interval=86400
# schema_region_ratis_periodic_snapshot_interval=86400
Expand All @@ -1770,6 +1793,7 @@ data_replication_factor=1
### Fast IoTConsensus Configuration
####################
# Default event buffer size for connector and receiver in pipe consensus
# effectiveMode: restart
# DataType: int
# fast_iot_consensus_pipeline_size=5

Expand Down Expand Up @@ -1831,6 +1855,7 @@ data_replication_factor=1
####################

# The thread count which can be used for model inference operation.
# effectiveMode: restart
# Datatype: int
# model_inference_execution_thread_count=5

Expand All @@ -1847,6 +1872,7 @@ data_replication_factor=1
# The maximum bytes per second of disk write throughput when loading tsfile.
# When given a value less than or equal to 0, it means no limit.
# Default value is -1, which means no limit.
# effectiveMode: hot_reload
# Datatype: int
# load_write_throughput_bytes_per_second=-1

Expand Down
Loading
Loading