Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore: more rename to horaedb #1340

Merged
merged 4 commits into from
Dec 4, 2023
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions benchmarks/bench.toml
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
# limitations under the License.

[sst_bench]
store_path = "/tmp/ceresdb/1/1"
store_path = "/tmp/horaedb/1/1"
sst_file_name = "37.sst"
runtime_thread_num = 1
bench_measurement_time = "30s"
Expand All @@ -29,7 +29,7 @@ start_time_ms = 1632985200000
end_time_ms = 1632985800000

[merge_sst_bench]
store_path = "/tmp/ceresdb"
store_path = "/tmp/horaedb"
space_id = 1
table_id = 1
sst_file_ids = [ 34, 37 ]
Expand All @@ -45,7 +45,7 @@ end_time_ms = 0
# end_time_ms = 1632985800000

[scan_memtable_bench]
store_path = "/tmp/ceresdb/1/1"
store_path = "/tmp/horaedb/1/1"
sst_file_name = "37.sst"
runtime_thread_num = 1
max_projections = 5
Expand Down
6 changes: 3 additions & 3 deletions benchmarks/config/sst.toml
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
runtime_thread_num = 4

[rebuild_sst]
store_path = "/tmp/ceresdb/benchmarks"
store_path = "/tmp/horaedb/benchmarks"
input_file_name = "898.sst"
read_batch_row_num = 8192
output_file_name = "tt_t.sst"
Expand All @@ -27,13 +27,13 @@ start_time_ms = 0
end_time_ms = 0

[merge_sst]
store_path = "/tmp/ceresdb/benchmarks/2199023255564"
store_path = "/tmp/horaedb/benchmarks/2199023255564"
space_id = 1
table_id = 1
sst_file_ids = [1, 17, 19, 24, 31, 37, 43, 45, 9, 14, 18, 21, 27, 34, 40, 44, 5]
dedup = true
read_batch_row_num = 16384
output_store_path = "/tmp/ceresdb/data/1/1"
output_store_path = "/tmp/horaedb/data/1/1"
output_file_name = "16384-all.sst"
num_rows_per_row_group = 16384

Expand Down
12 changes: 6 additions & 6 deletions docker/basic.sh
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ PORT=${HORAEDB_PORT:-"5440"}

URL="http://${ADDR}:${PORT}/sql"

function ceresdb_query {
function horaedb_query {
sql=${1}

curl --location --fail \
Expand All @@ -28,12 +28,12 @@ function ceresdb_query {
}'
}

ceresdb_query 'CREATE TABLE `demo` (`name` string TAG, `value` double NOT NULL, `t` timestamp NOT NULL, TIMESTAMP KEY(t)) ENGINE=Analytic with (enable_ttl='\''false'\'')'
horaedb_query 'CREATE TABLE `demo` (`name` string TAG, `value` double NOT NULL, `t` timestamp NOT NULL, TIMESTAMP KEY(t)) ENGINE=Analytic with (enable_ttl='\''false'\'')'

ceresdb_query 'INSERT INTO demo(t, name, value) VALUES(1651737067000, '\''horaedb'\'', 100)'
horaedb_query 'INSERT INTO demo(t, name, value) VALUES(1651737067000, '\''horaedb'\'', 100)'

ceresdb_query 'select * from demo'
horaedb_query 'select * from demo'

ceresdb_query 'show create table demo'
horaedb_query 'show create table demo'

ceresdb_query 'DROP TABLE demo'
horaedb_query 'DROP TABLE demo'
3 changes: 2 additions & 1 deletion docs/dockerhub-description.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,8 @@ docker run -d --name horaedb-server \
horaedb/horaedb-server:$(version)
```

CeresDB will listen three ports when start:
HoraeDB will listen three ports when started:

- 8831, gRPC port
- 3307, MySQL port
- 5440, HTTP port
Expand Down
4 changes: 2 additions & 2 deletions docs/example-cluster-0.toml
Original file line number Diff line number Diff line change
Expand Up @@ -31,11 +31,11 @@ mem_cache_partition_bits = 0

[analytic.storage.object_store]
type = "Local"
data_dir = "/tmp/ceresdb0"
data_dir = "/tmp/horaedb0"

[analytic.wal]
type = "RocksDB"
data_dir = "/tmp/ceresdb0"
data_dir = "/tmp/horaedb0"

[cluster_deployment]
mode = "WithMeta"
Expand Down
4 changes: 2 additions & 2 deletions docs/example-cluster-1.toml
Original file line number Diff line number Diff line change
Expand Up @@ -32,11 +32,11 @@ mem_cache_partition_bits = 0

[analytic.storage.object_store]
type = "Local"
data_dir = "/tmp/ceresdb1"
data_dir = "/tmp/horaedb1"

[analytic.wal]
type = "RocksDB"
data_dir = "/tmp/ceresdb1"
data_dir = "/tmp/horaedb1"

[cluster_deployment]
mode = "WithMeta"
Expand Down
4 changes: 2 additions & 2 deletions docs/example-standalone-static-routing.toml
Original file line number Diff line number Diff line change
Expand Up @@ -34,15 +34,15 @@ write_group_command_channel_cap = 1024

[analytic.wal]
type = "RocksDB"
data_dir = "/tmp/ceresdb1"
data_dir = "/tmp/horaedb1"

[analytic.storage]
mem_cache_capacity = '1G'
mem_cache_partition_bits = 0

[analytic.storage.object_store]
type = "Local"
data_dir = "/tmp/ceresdb"
data_dir = "/tmp/horaedb"

[analytic.compaction_config]
schedule_channel_len = 4
Expand Down
6 changes: 3 additions & 3 deletions docs/minimal.toml
Original file line number Diff line number Diff line change
Expand Up @@ -22,15 +22,15 @@ postgresql_port = 5433
level = "info"

[tracing]
dir = "/tmp/ceresdb"
dir = "/tmp/horaedb"

[analytic.storage.object_store]
type = "Local"
data_dir = "/tmp/ceresdb"
data_dir = "/tmp/horaedb"

[analytic.wal]
type = "RocksDB"
data_dir = "/tmp/ceresdb"
data_dir = "/tmp/horaedb"

[analytic]
enable_primary_key_sampling = true
2 changes: 1 addition & 1 deletion integration_tests/cases/common/dml/issue-1087.result
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ explain verbose select * from issue_1087;

plan_type,plan,
String("initial_logical_plan"),String("Projection: issue_1087.tsid, issue_1087.t, issue_1087.name, issue_1087.value\n TableScan: issue_1087"),
String("logical_plan after ceresdb_type_conversion"),String("SAME TEXT AS ABOVE"),
String("logical_plan after horaedb_type_conversion"),String("SAME TEXT AS ABOVE"),
String("logical_plan after inline_table_scan"),String("SAME TEXT AS ABOVE"),
String("logical_plan after type_coercion"),String("SAME TEXT AS ABOVE"),
String("logical_plan after count_wildcard_rule"),String("SAME TEXT AS ABOVE"),
Expand Down
6 changes: 3 additions & 3 deletions integration_tests/cases/env/cluster/config.toml
Original file line number Diff line number Diff line change
Expand Up @@ -22,19 +22,19 @@ log_level = "debug"
read_parallelism = 8

[analytic]
wal_path = "/tmp/ceresdb"
wal_path = "/tmp/horaedb"
sst_meta_cache_cap = 10000

[analytic.storage]
mem_cache_capacity = '1G'
mem_cache_partition_bits = 0
disk_cache_path = "/tmp/ceresdb"
disk_cache_path = "/tmp/horaedb"
disk_cache_capacity = '2G'
disk_cache_page_size = '1M'

[analytic.storage.object_store]
type = "Local"
data_path = "/tmp/ceresdb"
data_path = "/tmp/horaedb"

[[meta_client.cluster_view.schema_shards]]
schema = 'public'
Expand Down
98 changes: 49 additions & 49 deletions integration_tests/cases/env/cluster/ddl/partition_table.result
Original file line number Diff line number Diff line change
Expand Up @@ -19,72 +19,72 @@ String("partition_table_t"),String("CREATE TABLE `partition_table_t` (`tsid` uin


INSERT INTO partition_table_t (t, name, value)
VALUES (1651737067000, "ceresdb0", 100),
(1651737067000, "ceresdb1", 101),
(1651737067000, "ceresdb2", 102),
(1651737067000, "ceresdb3", 103),
(1651737067000, "ceresdb4", 104),
(1651737067000, "ceresdb5", 105),
(1651737067000, "ceresdb6", 106),
(1651737067000, "ceresdb7", 107),
(1651737067000, "ceresdb8", 108),
(1651737067000, "ceresdb9", 109),
(1651737067000, "ceresdb10", 110);
VALUES (1651737067000, "horaedb0", 100),
(1651737067000, "horaedb1", 101),
(1651737067000, "horaedb2", 102),
(1651737067000, "horaedb3", 103),
(1651737067000, "horaedb4", 104),
(1651737067000, "horaedb5", 105),
(1651737067000, "horaedb6", 106),
(1651737067000, "horaedb7", 107),
(1651737067000, "horaedb8", 108),
(1651737067000, "horaedb9", 109),
(1651737067000, "horaedb10", 110);

affected_rows: 11

SELECT * from partition_table_t where name = "ceresdb0";
SELECT * from partition_table_t where name = "horaedb0";

tsid,t,name,id,value,
UInt64(7867167026748566913),Timestamp(1651737067000),String("ceresdb0"),Int32(0),Double(100.0),
UInt64(7867167026748566913),Timestamp(1651737067000),String("horaedb0"),Int32(0),Double(100.0),


SELECT * from partition_table_t where name = "ceresdb1";
SELECT * from partition_table_t where name = "horaedb1";

tsid,t,name,id,value,
UInt64(17529796748086676681),Timestamp(1651737067000),String("ceresdb1"),Int32(0),Double(101.0),
UInt64(17529796748086676681),Timestamp(1651737067000),String("horaedb1"),Int32(0),Double(101.0),


SELECT * from partition_table_t where name = "ceresdb2";
SELECT * from partition_table_t where name = "horaedb2";

tsid,t,name,id,value,
UInt64(9909398277373623550),Timestamp(1651737067000),String("ceresdb2"),Int32(0),Double(102.0),
UInt64(9909398277373623550),Timestamp(1651737067000),String("horaedb2"),Int32(0),Double(102.0),


SELECT * from partition_table_t where name = "ceresdb3";
SELECT * from partition_table_t where name = "horaedb3";

tsid,t,name,id,value,
UInt64(5639753573168158607),Timestamp(1651737067000),String("ceresdb3"),Int32(0),Double(103.0),
UInt64(5639753573168158607),Timestamp(1651737067000),String("horaedb3"),Int32(0),Double(103.0),


SELECT * from partition_table_t where name in ("ceresdb0", "ceresdb1", "ceresdb2", "ceresdb3", "ceresdb4") order by name;
SELECT * from partition_table_t where name in ("horaedb0", "horaedb1", "horaedb2", "horaedb3", "horaedb4") order by name;

tsid,t,name,id,value,
UInt64(7867167026748566913),Timestamp(1651737067000),String("ceresdb0"),Int32(0),Double(100.0),
UInt64(17529796748086676681),Timestamp(1651737067000),String("ceresdb1"),Int32(0),Double(101.0),
UInt64(9909398277373623550),Timestamp(1651737067000),String("ceresdb2"),Int32(0),Double(102.0),
UInt64(5639753573168158607),Timestamp(1651737067000),String("ceresdb3"),Int32(0),Double(103.0),
UInt64(1510325626916728375),Timestamp(1651737067000),String("ceresdb4"),Int32(0),Double(104.0),
UInt64(7867167026748566913),Timestamp(1651737067000),String("horaedb0"),Int32(0),Double(100.0),
UInt64(17529796748086676681),Timestamp(1651737067000),String("horaedb1"),Int32(0),Double(101.0),
UInt64(9909398277373623550),Timestamp(1651737067000),String("horaedb2"),Int32(0),Double(102.0),
UInt64(5639753573168158607),Timestamp(1651737067000),String("horaedb3"),Int32(0),Double(103.0),
UInt64(1510325626916728375),Timestamp(1651737067000),String("horaedb4"),Int32(0),Double(104.0),


SELECT * from partition_table_t where name in ("ceresdb5", "ceresdb6", "ceresdb7","ceresdb8", "ceresdb9", "ceresdb10") order by name;
SELECT * from partition_table_t where name in ("horaedb5", "horaedb6", "horaedb7","horaedb8", "horaedb9", "horaedb10") order by name;

tsid,t,name,id,value,
UInt64(7158007527379307345),Timestamp(1651737067000),String("ceresdb10"),Int32(0),Double(110.0),
UInt64(12677620772014847982),Timestamp(1651737067000),String("ceresdb5"),Int32(0),Double(105.0),
UInt64(15597124953767983737),Timestamp(1651737067000),String("ceresdb6"),Int32(0),Double(106.0),
UInt64(4263010807109012073),Timestamp(1651737067000),String("ceresdb7"),Int32(0),Double(107.0),
UInt64(9923681778193615344),Timestamp(1651737067000),String("ceresdb8"),Int32(0),Double(108.0),
UInt64(4860320137932382618),Timestamp(1651737067000),String("ceresdb9"),Int32(0),Double(109.0),
UInt64(7158007527379307345),Timestamp(1651737067000),String("horaedb10"),Int32(0),Double(110.0),
UInt64(12677620772014847982),Timestamp(1651737067000),String("horaedb5"),Int32(0),Double(105.0),
UInt64(15597124953767983737),Timestamp(1651737067000),String("horaedb6"),Int32(0),Double(106.0),
UInt64(4263010807109012073),Timestamp(1651737067000),String("horaedb7"),Int32(0),Double(107.0),
UInt64(9923681778193615344),Timestamp(1651737067000),String("horaedb8"),Int32(0),Double(108.0),
UInt64(4860320137932382618),Timestamp(1651737067000),String("horaedb9"),Int32(0),Double(109.0),


ALTER TABLE partition_table_t ADD COLUMN (b string);

affected_rows: 0

INSERT INTO partition_table_t (t, id, name, value) VALUES (1651737067000, 10, "ceresdb0", 100);
INSERT INTO partition_table_t (t, id, name, value) VALUES (1651737067000, 10, "horaedb0", 100);

Failed to execute query, err: Server(ServerError { code: 500, msg: "Failed to execute plan. Caused by: Internal error, msg:Failed to execute interpreter, err:Failed to execute insert, err:Failed to write table, err:Failed to write tables in batch, tables:[\"__partition_table_t_1\"], err:Failed to query from table in server, table_idents:[TableIdentifier { catalog: \"horaedb\", schema: \"public\", table: \"__partition_table_t_1\" }], code:401, msg:failed to decode row group payload. Caused by: Schema mismatch with the write request, msg:expect 6 columns, but got 5. sql:INSERT INTO partition_table_t (t, id, name, value) VALUES (1651737067000, 10, \"ceresdb0\", 100);" })
Failed to execute query, err: Server(ServerError { code: 500, msg: "Failed to execute plan. Caused by: Internal error, msg:Failed to execute interpreter, err:Failed to execute insert, err:Failed to write table, err:Failed to write tables in batch, tables:[\"__partition_table_t_1\"], err:Failed to query from table in server, table_idents:[TableIdentifier { catalog: \"horaedb\", schema: \"public\", table: \"__partition_table_t_1\" }], code:401, msg:failed to decode row group payload. Caused by: Schema mismatch with the write request, msg:expect 6 columns, but got 5. sql:INSERT INTO partition_table_t (t, id, name, value) VALUES (1651737067000, 10, \"horaedb0\", 100);" })

ALTER TABLE partition_table_t MODIFY SETTING enable_ttl='true';

Expand Down Expand Up @@ -143,30 +143,30 @@ String("random_partition_table_t"),String("CREATE TABLE `random_partition_table_


INSERT INTO random_partition_table_t (t, name, value)
VALUES (1651737067000, "ceresdb0", 100),
(1651737067000, "ceresdb1", 101),
(1651737067000, "ceresdb2", 102),
(1651737067000, "ceresdb3", 103),
(1651737067000, "ceresdb4", 104),
(1651737067000, "ceresdb5", 105),
(1651737067000, "ceresdb6", 106),
(1651737067000, "ceresdb7", 107),
(1651737067000, "ceresdb8", 108),
(1651737067000, "ceresdb9", 109),
(1651737067000, "ceresdb10", 110);
VALUES (1651737067000, "horaedb0", 100),
(1651737067000, "horaedb1", 101),
(1651737067000, "horaedb2", 102),
(1651737067000, "horaedb3", 103),
(1651737067000, "horaedb4", 104),
(1651737067000, "horaedb5", 105),
(1651737067000, "horaedb6", 106),
(1651737067000, "horaedb7", 107),
(1651737067000, "horaedb8", 108),
(1651737067000, "horaedb9", 109),
(1651737067000, "horaedb10", 110);

affected_rows: 11

SELECT * from random_partition_table_t where name = "ceresdb0";
SELECT * from random_partition_table_t where name = "horaedb0";

tsid,t,name,id,value,
UInt64(7867167026748566913),Timestamp(1651737067000),String("ceresdb0"),Int32(0),Double(100.0),
UInt64(7867167026748566913),Timestamp(1651737067000),String("horaedb0"),Int32(0),Double(100.0),


SELECT * from random_partition_table_t where name = "ceresdb5";
SELECT * from random_partition_table_t where name = "horaedb5";

tsid,t,name,id,value,
UInt64(12677620772014847982),Timestamp(1651737067000),String("ceresdb5"),Int32(0),Double(105.0),
UInt64(12677620772014847982),Timestamp(1651737067000),String("horaedb5"),Int32(0),Double(105.0),


DROP TABLE IF EXISTS `random_partition_table_t`;
Expand Down
Loading
Loading