diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml index 954f814455a..9472d98c4c4 100644 --- a/.github/workflows/deploy.yml +++ b/.github/workflows/deploy.yml @@ -6,7 +6,7 @@ on: jobs: deploy: - runs-on: ubuntu-18.04 + runs-on: ubuntu-20.04 steps: - uses: actions/checkout@v2 with: @@ -29,21 +29,16 @@ jobs: pip3 install -r ./requirements.txt - name: Git Config - run: git config user.name jerry.liang && git config user.email ${{secrets.GIT_EMAIL}} + run: git config user.name whitewum && git config user.email min.wu@vesoft.com - name: mike list delete run: | - #mike delete 2.0.0 -p mike list - - - name: Mike Deploy - run: mike deploy master -p --rebase - -# - name: set-default -# run: mike set-default 2.0.1 -p - - - name: mike list - run: mike list + + - name: Mike Deploy master + run: | + mike deploy master -p --rebase + mike list # - name: Deploy # uses: peaceiris/actions-gh-pages@v3 diff --git a/docs-2.0/1.introduction/2.1.path.md b/docs-2.0/1.introduction/2.1.path.md index 80e413af15c..7a4a44e8cb4 100644 --- a/docs-2.0/1.introduction/2.1.path.md +++ b/docs-2.0/1.introduction/2.1.path.md @@ -8,7 +8,7 @@ The following picture is an example for a brief introduction. ![path](../images/path1.png) -## walk +## Walk A `walk` is a finite or infinite sequence of edges. Both vertices and edges can be repeatedly visited in graph traversal. @@ -18,7 +18,7 @@ In the above picture C, D, and E form a cycle. So, this picture contains infinit `GO` statements use `walk`. -## trail +## Trail A `trail` is a finite sequence of edges. Only vertices can be repeatedly visited in graph traversal. The Seven Bridges of Königsberg is a typical `trail`. @@ -40,7 +40,7 @@ There are two special cases of trail, `cycle`, and `circuit`. The following pict A `circuit` refers to a closed `trail`. Edges cannot be repeatedly visited in graph traversal. Apart from the terminal vertices, other vertices can also be repeatedly visited. The longest path in this picture: `A->B->C->D->E->C->A`. -## path +## Path A `path` is a finite sequence of edges. Neither vertices nor edges can be repeatedly visited in graph traversal. diff --git a/docs-2.0/1.introduction/3.nebula-graph-architecture/4.storage-service.md b/docs-2.0/1.introduction/3.nebula-graph-architecture/4.storage-service.md index d2b164a62a2..627d5319715 100644 --- a/docs-2.0/1.introduction/3.nebula-graph-architecture/4.storage-service.md +++ b/docs-2.0/1.introduction/3.nebula-graph-architecture/4.storage-service.md @@ -14,7 +14,7 @@ The other is the Storage Service that stores the data, which is run by the nebul - High availability (Raft) -- Supports synchronizing with the third party systems, such as [Elasticsearch](../4.deployment-and-installation/6.deploy-text-based-index/2.deploy-es.md). +- Supports synchronizing with the third party systems, such as [Elasticsearch](../../4.deployment-and-installation/6.deploy-text-based-index/2.deploy-es.md). ## The architecture of Storage Service diff --git a/docs-2.0/1.introduction/3.vid.md b/docs-2.0/1.introduction/3.vid.md index fd4d83d44c7..edaee4d7ae2 100644 --- a/docs-2.0/1.introduction/3.vid.md +++ b/docs-2.0/1.introduction/3.vid.md @@ -18,7 +18,7 @@ In Nebula Graph, a vertex is uniquely identified by its ID, which is called a VI + When two `INSERT` statements with the same VID but different tags, like `TAG A` and `TAG B`, are operated at the same time, the operation of `Tag A` will not affect `Tag B`. -- VIDs will usually be indexed and stored into memory(in the way of LSM-tree). Thus, direct access to VIDs enjoys peak performance. +- VIDs will usually be indexed and stored into memory (in the way of LSM-tree). Thus, direct access to VIDs enjoys peak performance. ## VID Operation @@ -47,3 +47,20 @@ VIDs can be generated via applications. Here are some tips: ## Define and modify the data type of VIDs The data type of VIDs must be defined when you [create the graph space](../3.ngql-guide/9.space-statements/1.create-space.md). Once defined, it cannot be modified. + + \ No newline at end of file diff --git a/docs-2.0/14.client/4.nebula-java-client.md b/docs-2.0/14.client/4.nebula-java-client.md index acc2e51afff..ea08a69dab6 100644 --- a/docs-2.0/14.client/4.nebula-java-client.md +++ b/docs-2.0/14.client/4.nebula-java-client.md @@ -33,13 +33,13 @@ You have installed Java 8.0 or later versions. !!! note - We recommended that each thread use one session. If multiple threads use the same session, the performance will be reduced. + We recommend that each thread uses one session. If multiple threads use the same session, the performance will be reduced. When importing a Maven project with tools such as IDEA, set the following dependency in `pom.xml`. !!! note - `2.0.0-SNAPSHOT` indicates the daily development version that may have unknow issues. We recommend that you replace `2.0.0-SNAPSHOT` with a released version number to use a table version. + `2.0.0-SNAPSHOT` indicates the daily development version that may have unknown issues. We recommend that you replace `2.0.0-SNAPSHOT` with a released version number to use a table version. ```bash diff --git a/docs-2.0/14.client/5.nebula-python-client.md b/docs-2.0/14.client/5.nebula-python-client.md index 94b7acc4607..d81495dacfd 100644 --- a/docs-2.0/14.client/5.nebula-python-client.md +++ b/docs-2.0/14.client/5.nebula-python-client.md @@ -53,7 +53,7 @@ $ pip install nebula2-python== !!! note - To run unit tests in the development mode, install `requirements-dev.txt` instead. + To run unit tests in the development mode, install dependencies of `requirements-dev.txt`. 4. Run the following command to install Nebula Python. @@ -80,7 +80,7 @@ ok = connection_pool.init([('192.168.xx.1', 9669)], config) # Get the session from the connection pool. session = connection_pool.get_session('root', 'nebula') -# Selection a graph space. +# Select a graph space. session.execute('USE basketballplayer') # Run the SHOW TAGS statement. diff --git a/docs-2.0/15.contribution/how-to-contribute.md b/docs-2.0/15.contribution/how-to-contribute.md index 2868b330a03..97a04971d33 100644 --- a/docs-2.0/15.contribution/how-to-contribute.md +++ b/docs-2.0/15.contribution/how-to-contribute.md @@ -108,9 +108,9 @@ The Nebula Graph project has many [repositories](https://github.com/vesoft-inc). git checkout -b myfeature ``` -!!! note + !!! note - Because the PR often consists of several commits, which might be squashed while being merged into upstream. We strongly suggest you to open a separate topic branch to make your changes on. After merged, this topic branch can be just abandoned, thus you could synchronize your master branch with upstream easily with a rebase like above. Otherwise, if you commit your changes directly into master, you need to use a hard reset on the master branch. For example: + Because the PR often consists of several commits, which might be squashed while being merged into upstream. We strongly suggest you to open a separate topic branch to make your changes on. After merged, this topic branch can be just abandoned, thus you could synchronize your master branch with upstream easily with a rebase like above. Otherwise, if you commit your changes directly into master, you need to use a hard reset on the master branch. For example: ```bash git fetch upstream @@ -198,7 +198,7 @@ Email address: info@vesoft.com WeChat: NebulaGraphbot -Slack:[Join Slack](https://join.slack.com/t/nebulagraph/shared_invite/zt-7ybejuqa-NCZBroh~PCh66d9kOQj45g) +Slack: [Join Slack](https://join.slack.com/t/nebulagraph/shared_invite/zt-7ybejuqa-NCZBroh~PCh66d9kOQj45g) ### Step 2: Get the information of the project recipient diff --git a/docs-2.0/2.quick-start/1.quick-start-workflow.md b/docs-2.0/2.quick-start/1.quick-start-workflow.md index 4dfebf98fb5..2068f105868 100644 --- a/docs-2.0/2.quick-start/1.quick-start-workflow.md +++ b/docs-2.0/2.quick-start/1.quick-start-workflow.md @@ -21,73 +21,3 @@ Users can quickly deploy and use Nebula Graph in the following steps. 4. [CRUD in Nebula Graph](4.nebula-graph-crud.md) Users can use nGQL (Nebula Graph Query Language) to run CRUD after connecting to Nebula Graph. - - diff --git a/docs-2.0/2.quick-start/4.nebula-graph-crud.md b/docs-2.0/2.quick-start/4.nebula-graph-crud.md index 1708ce3b930..8f6a7d30227 100644 --- a/docs-2.0/2.quick-start/4.nebula-graph-crud.md +++ b/docs-2.0/2.quick-start/4.nebula-graph-crud.md @@ -1,4 +1,4 @@ -# Step 4: Use nGQL(CRUD) +# Step 4: Use nGQL (CRUD) This topic will describe the basic CRUD operations in Nebula Graph. diff --git a/docs-2.0/20.appendix/0.FAQ.md b/docs-2.0/20.appendix/0.FAQ.md index b2dbce6d964..892f2029a8e 100644 --- a/docs-2.0/20.appendix/0.FAQ.md +++ b/docs-2.0/20.appendix/0.FAQ.md @@ -8,7 +8,7 @@ If the solutions described in this topic cannot solve your problems, ask for hel ### "Why is the behavior in the manual not consistent with the system?" - Nebula Graph is still under development. Its behavior changes from time to time. Users can submit an [issue](https://github.com/vesoft-inc/nebula/issues/new) to inform the team if the manual and the system are not consistent. +Nebula Graph is still under development. Its behavior changes from time to time. Users can submit an [issue](https://github.com/vesoft-inc/nebula/issues/new) to inform the team if the manual and the system are not consistent. !!! note @@ -32,23 +32,39 @@ If the solutions described in this topic cannot solve your problems, ask for hel ## About executions -### "Why are the query results different when using `GO` and `MATCH` to execute the same semantic query?" +### About dangling edges -Using different types of paths may cause different query results. +A dangling edge is an edge that only connects to a single vertex and only one part of the edge connects to the vertex. -- `GO` statements use `walk`. Both vertices and edges can be repeatedly visited in graph traversal. +Nebula Graph {{ nebula.release }} allows dangling edges. And there is no `MERGE` statements of openCypher. The guarantee for dangling edges depends entirely on the application level. For more information, see [INSERT VERTEX](../3.ngql-guide/12.vertex-statements/1.insert-vertex.md), [DELETE VERTEX](../3.ngql-guide/12.vertex-statements/4.delete-vertex.md), [INSERT EDGE](../3.ngql-guide/13.edge-statements/1.insert-edge.md), [DELETE EDGE](../3.ngql-guide/13.edge-statements/4.delete-edge.md). -- `MATCH` statements are compatible with openCypher and use `trail`. Only vertices can be repeatedly visited in graph traversal. - -The example is as follows. -![Path](../images/path1.png) +### "How to resolve `[ERROR (-1005)]: Used memory hits the high watermark(0.800000) of total system memory.`?" -All queries that start from `A` with 5 hops will end at `C` (`A->B->C->D->E->C`). If it is 6 hops, the `GO` statement will end at `D` (`A->B->C->D->E->C->D`), because the edge `C->D` can be visited repeatedly. However, the `MATCH` statement returns empty, because edges cannot be visited repeatedly. +The reason for this error may be that `system_memory_high_watermark_ratio` specifies the trigger threshold of the memory high watermark alarm mechanism. The default value is `0.8`. If the system memory usage is higher than this value, an alarm mechanism will be triggered, and Nebula Graph will stop querying. -Therefore, using `GO` and `MATCH` to execute the same semantic query may cause different query results. +Possible solutions are as follows: -For more information, see [Wikipedia](https://en.wikipedia.org/wiki/Path_(graph_theory)#Walk,_trail,_path). +* Clean the system memory to make it below the threshold. +* [Modify the Graph configuration](../5.configurations-and-logs/1.configurations/1.configurations.md). Add the `system_memory_high_watermark_ratio` parameter to the configuration files of all Graph servers, and set it greater than `0.8`, such as `0.9`. + + !!! note + + Only the Graph service supports `system_memory_high_watermark_ratio`, while the Storage and Meta services do not. + +### "How to resolve the error `Storage Error E_RPC_FAILURE`?" + +The reason for this error is usually that the storaged process returns too many data back to the graphd process. Possible solutions are as follows: + +* [Modify configuration files](../5.configurations-and-logs/1.configurations/3.graph-config.md): Modify the value of `--storage_client_timeout_ms` in the `nebula-graphd.conf` file to extend the connection timeout of the Storage client. This configuration is measured in milliseconds (ms). For example, set `--storage_client_timeout_ms=60000`. If this parameter is not specified in the `nebula-graphd.conf` file, specify it manually. Tip: Add `--local_config=true` at the beginning of the configuration file and restart the service. +* Optimize the query statement: Reduce queries that scan the entire database. No matter whether `LIMIT` is used to limit the number of returned results, use the `GO` statement to rewrite the `MATCH` statement (the former is optimized, while the latter is not). +* Check whether the Storaged process has OOM. (`dmesg |grep nebula`). +* Use better SSD or memory for the Storage Server. +* Retry. + +### "How to resolve the error `The leader has changed. Try again later`?" + +It is a known issue. Just retry 1 to N times, where N is the partition number. The reason is that the meta client needs some heartbeats to update or errors to trigger the new leader information. ### "How is the `time spent` value at the end of each return message calculated?" @@ -80,6 +96,28 @@ When there are enough machines and `replica_factor=2`, if one replica fails, the We suggest that you set `replica_factor=3` for a production environment and `replica_factor=1` for a test environment. Do not use an even number. +### "Is stopping or killing slow queries supported?" + +Yes. For more information, see [Kill query](../3.ngql-guide/18.operation-and-maintenance-statements/6.kill-query.md). + +### "Why are the query results different when using `GO` and `MATCH` to execute the same semantic query?" + +Using different types of paths may cause different query results. + +- `GO` statements use `walk`. Both vertices and edges can be repeatedly visited in graph traversal. + +- `MATCH` statements are compatible with openCypher and use `trail`. Only vertices can be repeatedly visited in graph traversal. + +The example is as follows. + +![Path](../images/path1.png) + +All queries that start from `A` with 5 hops will end at `C` (`A->B->C->D->E->C`). If it is 6 hops, the `GO` statement will end at `D` (`A->B->C->D->E->C->D`), because the edge `C->D` can be visited repeatedly. However, the `MATCH` statement returns empty, because edges cannot be visited repeatedly. + +Therefore, using `GO` and `MATCH` to execute the same semantic query may cause different query results. + +For more information, see [Wikipedia](https://en.wikipedia.org/wiki/Path_(graph_theory)#Walk,_trail,_path). + ### "How to resolve `[ERROR (-7)]: SyntaxError: syntax error near`?" In most cases, a query statement requires a `YIELD` or a `RETURN`. Check your query statement to see if `YIELD` or `RETURN` is provided. @@ -141,21 +179,6 @@ Check whether the length of the VID exceeds the limitation. For more information Nebula Graph may return such errors when the Storage service receives multiple requests to insert or update the same vertex or edge within milliseconds. Try the failed requests again later. -### "How to resolve the error `Storage Error E_RPC_FAILURE`?" - -The reason for this error is usually that the storaged process returns too many data back to the graphd process. Possible solutions are as follows: - -* [Modify configuration files](../5.configurations-and-logs/1.configurations/3.graph-config.md): Modify the value of `--storage_client_timeout_ms` in the `nebula-graphd.conf` file to extend the connection timeout of the Storage client. This configuration is measured in milliseconds (ms). For example, set `--storage_client_timeout_ms=60000`. If this parameter is not specified in the `nebula-graphd.conf` file, specify it manually. - -* Optimize the query statement: Reduce queries that scan the entire database. No matter whether `LIMIT` is used to limit the number of returned results, use the `GO` statement to rewrite the `MATCH` statement (the former is optimized, while the latter is not). -* Check whether the Storaged process has OOM. (`dmesg |grep nebula`). -* Use better SSD or memory for the Storage Server. -* Retry. - -### "How to resolve the error `The leader has changed. Try again later`?" - -It is a known issue. Just retry 1 to N times, where N is the partition number. The reason is that the meta client needs some heartbeats to update or errors to trigger the new leader information. - ### "How to resolve the error `RPC failure in MetaClient: Connection refused`?" The reason for this error is usually that the metad service status is unusual, or the network of the machine where the metad and graphd services are located is disconnected. Possible solutions are as follows: @@ -181,13 +204,11 @@ The reason for this error may be that the user has modified the IP or the port i Delete the `cluster.id` file in the installation directory where the storage machine is deployed (the default installation directory is `/usr/local/nebula`), and restart the storaged service. -### "Is stopping or killing slow queries supported?" +### Can non-English characters be used as identifiers, such as the names of graph spaces, tags, edge types, properties, and indexes? -Yes. For more information, see [Kill query](../3.ngql-guide/18.operation-and-maintenance-statements/6.kill-query.md). - -### Can Chinese characters be used as identifiers, such as the names of graph spaces, tags, edge types, properties, and indexes? +No. -No. The names of graph spaces, tags, edge types, properties, and indexes must use English letters, numbers, or underlines. Chinese characters are not currently supported. +The names of graph spaces, tags, edge types, properties, and indexes must use English letters, numbers, or underlines. Non-English characters are not currently supported. Meanwhile, the above identifiers are case-sensitive and cannot use [Keywords and reserved words](../3.ngql-guide/1.nGQL-overview/keywords-and-reserved-words.md). @@ -202,20 +223,9 @@ nebula > MATCH (s)<-[e]-() WHERE id(s) == "given" RETURN count(e); #In-degree ### "How to quickly get the out-degree and in-degree of all vertices?" -There is no such command. You can use [Nebula Algorithm](../nebula-algorithm.md). - -### "How to resolve `[ERROR (-1005)]: Used memory hits the high watermark(0.800000) of total system memory.`?" - -The reason for this error may be that `system_memory_high_watermark_ratio` specifies the trigger threshold of the memory high watermark alarm mechanism. The default value is `0.8`. If the system memory usage is higher than this value, an alarm mechanism will be triggered, and Nebula Graph will stop querying. - -Possible solutions are as follows: - -* Clean the system memory to make it below the threshold. -* [Modify the Graph configuration](../5.configurations-and-logs/1.configurations/1.configurations.md). Add the `system_memory_high_watermark_ratio` parameter to the configuration files of all Graph servers, and set it greater than `0.8`, such as `0.9`. +There is no such command. - !!! note - - Only the Graph service supports `system_memory_high_watermark_ratio`, while the Storage and Meta services do not. +You can use [Nebula Algorithm](../nebula-algorithm.md). ### "How to resolve `[ERROR (-1005)]: Schema not exist: xxx`?" @@ -223,13 +233,7 @@ If the system returns `Schema not exist` when querying, make sure that: - Whether there is a tag or an edge type in the Schema. -- -Whether the name of the tag or the edge type is a keyword. If it is a keyword, enclose them with backquotes (\`). For more information, see[Keywords](../3.ngql-guide/1.nGQL-overview/keywords-and-reserved-words.md). - -### About dangling edges - -A dangling edge is an edge that only connects to a single vertex and only one part of the edge connects to the vertex. - -Nebula Graph {{ nebula.release }} allows dangling edges. And there is no `MERGE` statements of openCypher. The guarantee for dangling edges depends entirely on the application level. For more information, see [INSERT VERTEX](../3.ngql-guide/12.vertex-statements/1.insert-vertex.md), [DELETE VERTEX](../3.ngql-guide/12.vertex-statements/4.delete-vertex.md), [INSERT EDGE](../3.ngql-guide/13.edge-statements/1.insert-edge.md), [DELETE EDGE](../3.ngql-guide/13.edge-statements/4.delete-edge.md). +- -Whether the name of the tag or the edge type is a keyword. If it is a keyword, enclose them with backquotes (\`). For more information, see [Keywords](../3.ngql-guide/1.nGQL-overview/keywords-and-reserved-words.md). ## About operation and maintenance @@ -241,6 +245,10 @@ Nebula Graph uses [glog](https://github.com/google/glog) to print logs. `glog` c ### "How to check the Nebula Graph version?" +If the service is running: run command `SHOW HOSTS META` in `nebula-console`. See [SHOW HOSTS](../3.ngql-guide/7.general-query-statements/6.show/6.show-hosts.md). + +If the service is not running: + Different installation methods make the method of checking the version different. The instructions are as follows: - If you install Nebula Graph by compiling the source code @@ -305,6 +313,8 @@ If you have not modified the predefined ports in the [Configurations](../5.confi If you have customized the configuration files and changed the predefined ports, find the port numbers in your configuration files and open them on the firewalls. +For those eco-tools, see the corresponding document. + ### "How to test whether a port is open or closed?" You can use telnet as follows to check for port status. diff --git a/docs-2.0/20.appendix/6.eco-tool-version.md b/docs-2.0/20.appendix/6.eco-tool-version.md index 8cf8bee24b5..247c4357b4b 100644 --- a/docs-2.0/20.appendix/6.eco-tool-version.md +++ b/docs-2.0/20.appendix/6.eco-tool-version.md @@ -1,42 +1,166 @@ -# Version Description +# Ecosystem tools overview -Different versions of the ecosystem tools support different Nebula Graph kernel versions. This topic introduces the correspondence between the versions of ecosystem tools and the Nebula Graph kernel. +![Nebula Graph birdview](../1.introduction/nebula-birdview.png) -!!! note +!!! compatibility - All ecosystem tools of 1.x did not support Nebula Graph 2.x kernel. + The core release number naming rule is `X.Y.Z`, which means `Major version X`, `Medium version Y`, and `Minor version Z`. The upgrade requirements for the client are: -## Nebula Studio + - Upgrade the core from `X.Y.Z1` to `X.Y.Z2`: It means that the core is fully forward compatible and is usually used for bugfixes. It is recommended to upgrade the minor version of the core as soon as possible. At this time, the client can stay **not upgraded**. + + - Upgrade the core from `X.Y1.*` to `X.Y2.*`: It means that there is some incompatibility of API, syntax, and return value. It is usually used to add functions, improve performance, and optimize code. The client needs to be upgraded to `X.Y2.*`. + + - Upgrade the core from `X1.*.*` to `X2.*.*`: It means that there is a major incompatibility in storage formats, API, syntax, etc. You need to use tools to upgrade the core data. The client must be upgraded. -|Studio version|Nebula Graph version| + - The default core and client do not support downgrade: You cannot downgrade from `X.Y.Z2` to `X.Y.Z1`. + + - The release cycle of a `Y` version is about 6 months, and its maintenance and support cycle is 6 months. + + - The version released at the beginning of the year is usually named `X.0.0`, and in the middle of the year, it is named `X.5.0`. + + - The file name contains `RC` to indicate an unofficial version (`Release Candidate`) that is only used for preview. Its maintenance period is only until the next RC or official version is released. Its client, data compatibility, etc. are not guaranteed. + + - The files with `nightly`, `SNAPSHOT`, or date are the nightly versions. There is no quality assurance and maintenance period. + +!!! compatibility + + All ecosystem tools of 1.x did not support Nebula Graph 2.x core. + +## Nebula Graph Studio + +Nebula Graph Studio (Studio for short) is a graph database visualization tool that can be accessed through the Web. It can be used with Nebula Graph DBMS to provide one-stop services such as composition, data import, writing nGQL queries, and graph exploration. For details, see [What is Nebula Graph Studio](../nebula-studio/about-studio/st-ug-what-is-graph-studio.md). + +!!! Note + + The release of the Studio is independent of Nebula Graph core, and its naming method is also not the same as the core naming rules. The compatible relationship between them is as follows. + +|Nebula Graph|Studio (commit id)| +|:---|:---| +| {{ nebula.release }} | {{studio.base300}}(9e2a120)| + +## Nebula Graph Dashboard + +Nebula Graph Dashboard (Dashboard for short) is a visualization tool for monitoring the status of machines and services in the Nebula Graph cluster. For details, see [What is Nebula Graph Dashboard](../nebula-dashboard/1.what-is-dashboard.md). + +|Nebula Graph version|Dashboard version (commit id)| +|:---|:---| +| {{ nebula.release }} | {{dashboard.release}}(49ab1bc) | + +## Nebula Graph Explorer + +Nebula Graph Explorer (Explorer for short) is a graph exploration visualization tool that can be accessed through the Web. It is used with the Nebula Graph core to visualize interaction with graph data. Users can quickly become map experts, even without experience in map data manipulation. For details, see [What is Nebula Graph Explorer](../nebula-explorer/about-explorer/ex-ug-what-is-explorer.md). + +|Nebula Graph version|Explorer version (commit id)| |:---|:---| -| 2.2.0 | 2.0.0 | -| 2.2.0 | 2.0.1 | +| {{ nebula.release }} | {{explorer.base100}}(3b82142) | ## Nebula Exchange -|Exchange version|Nebula Graph version| +Nebula Exchange (Exchange for short) is an Apache Spark&trade application for batch migration of data in a cluster to Nebula Graph in a distributed environment. It can support the migration of batch data and streaming data in a variety of different formats. For details, see [What is Nebula Exchange](../nebula-exchange/about-exchange/ex-ug-what-is-exchange.md). + +|Nebula Graph version|[Exchange](https://github.com/vesoft-inc/nebula-spark-utils/tree/{{exchange.branch}}/nebula-exchange) version (commit id)| |:---|:---| -| 2.0.0 | 2.0.0 | -| 2.0.0 | 2.0.1 | +| {{ nebula.release }} | {{exchange.release}}(3c0f4c6) | ## Nebula Importer -|Importer version|Nebula Graph version| +Nebula Importer (Importer for short) is a CSV file import tool for Nebula Graph. The Importer can read the local CSV file, and then import the data into the Nebula Graph database. For details, see [What is Nebula Importer](../nebula-importer/use-importer.md). + +|Nebula Graph version|[Importer](https://github.com/vesoft-inc/nebula-importer/tree/{{importer.branch}}) version(commit id)| |:---|:---| -| 2.0.0 | 2.0.0 | -| 2.0.0 | 2.0.1 | +| {{ nebula.release }} | {{importer.release}}(5c7417d) | ## Nebula Spark Connector -|Spark Connector version|Nebula Graph version| +Nebula Spark Connector is a Spark connector that provides the ability to read and write Nebula Graph data in the Spark standard format. Nebula Spark Connector consists of two parts, Reader and Writer. For details, see [What is Nebula Spark Connector](../nebula-spark-connector.md). + +|Nebula Graph version|[Spark Connector](https://github.com/vesoft-inc/nebula-spark-utils/tree/{{sparkconnector.branch}}/nebula-spark-connector) version (commit id)| |:---|:---| -| 2.0.0 | 2.0.0 | -| 2.0.0 | 2.0.1 | +| {{ nebula.release }} | {{sparkconnector.release}}(3c0f4c6) | ## Nebula Flink Connector -|Flink Connector version|Nebula Graph version| +Nebula Flink Connector is a connector that helps Flink users quickly access Nebula Graph. It supports reading data from the Nebula Graph database or writing data read from other external data sources to the Nebula Graph database. For details, see [What is Nebula Flink Connector](../nebula-flink-connector.md). + +|Nebula Graph version|[Flink Connector](https://github.com/vesoft-inc/nebula-flink-connector/tree/{{flinkconnector.branch}}) version (commit id)| +|:---|:---| +| {{ nebula.release }} | {{flinkconnector.release}}(49b8f3d) | + +## Nebula Algorithm + +Nebula Algorithm (Algorithm for short) is a Spark application based on [GraphX](https://spark.apache.org/graphx/), which uses a complete algorithm tool to analyze data in the Nebula Graph database by submitting a Spark task To perform graph computing, use the algorithm under the lib repository through programming to perform graph computing for DataFrame. For details, see [What is Nebula Algorithm](../nebula-algorithm.md). + +|Nebula Graph version|[Algorithm](https://github.com/vesoft-inc/nebula-spark-utils/tree/{{algorithm.branch}}/nebula-algorithm) version (commit id)| |:---|:---| -| 2.0.0 | 2.0.0 | -| 2.0.0 | 2.0.1 | \ No newline at end of file +| {{ nebula.release }} | {{algorithm.release}}(3c0f4c6) | + +## Nebula Console + +Nebula Console is the native CLI client of Nebula Graph. For how to use it, see [Connect Nebula Graph](../2.quick-start/3.connect-to-nebula-graph.md). + +|Nebula Graph version|[Console](https://github.com/vesoft-inc/nebula-console) version (commit id)| +|:---|:---| +| {{ nebula.release }} | {{console.release}}(3ce5151) | + +## Nebula Docker Compose + +Docker Compose can quickly deploy Nebula Graph clusters. For how to use it, please refer to [Docker Compose Deployment Nebula Graph](../4.deployment-and-installation/2.compile-and-install-nebula-graph/3.deploy-nebula-graph-with-docker-compose.md ). + +|Nebula Graph version|[Docker Compose](https://github.com/vesoft-inc/nebula-docker-compose/tree/master) version (commit id)| +|:---|:---| +| {{ nebula.release }} | {{dockercompose.release}}(d42231f) | + + + +## Nebula Bench + +[Nebula Bench](https://github.com/vesoft-inc/nebula-bench) is used to test the baseline performance data of Nebula Graph. It uses the standard data set of LDBC v0.3.3. + +|Nebula Graph version|[Nebula Bench](https://github.com/vesoft-inc/nebula-bench) version (commit id)| +|:---|:---| +| {{ nebula.release }} | {{bench.release}}(661f871) | + +## API, SDK + +!!! compatibility + + Select the latest version of `X.Y.*` which is the same as the core version. + +|Nebula Graph version| Language (commit id) | +|:---| :--- | +| {{ nebula.release }}| [C++](https://github.com/vesoft-inc/nebula-cpp)(00e2625) | +| {{ nebula.release }}| [Go](https://github.com/vesoft-inc/nebula-go/tree/{{go.branch}})(8a1495a) | +| {{ nebula.release }}| [Python](https://github.com/vesoft-inc/nebula-python)(98e08e4) | +| {{ nebula.release }}| [Java Client](https://github.com/vesoft-inc/nebula-java/tree/{{java.branch}})(0fbc3c6) | + +## Not Released + +- API + + - [Rust Client](https://github.com/vesoft-inc/nebula-rust) + + - [Node.js Client](https://github.com/vesoft-inc/nebula-node) + + - [HTTP Client](https://github.com/vesoft-inc/nebula-http-gateway) + + - [Object Graph Mapping Library (OGM, or ORM)] Java, Python (TODO: in design) + +- Monitoring + + - [Promethus connector](https://github.com/vesoft-inc/nebula-stats-exporter) + + - [Graph Computing] (TODO: in coding) + +- Test + + - [Chaos Test](https://github.com/vesoft-inc/nebula-chaos) + +- [Backup&Restore](https://github.com/vesoft-inc/nebula-br) diff --git a/docs-2.0/20.appendix/write-tools.md b/docs-2.0/20.appendix/write-tools.md index 54f837b2df0..29d34ce1873 100644 --- a/docs-2.0/20.appendix/write-tools.md +++ b/docs-2.0/20.appendix/write-tools.md @@ -6,7 +6,7 @@ There are many ways to write Nebula Graph {{ nebula.release }}: - Import with [Studio](../nebula-studio/quick-start/st-ug-import-data.md): This method uses a browser to import multiple csv files of this machine. A single file cannot exceed 100 MB, and its format is limited. - Import with [Importer](../nebula-importer/use-importer.md): This method imports multiple csv files on a single machine with unlimited size and flexible format. - Import with [Exchange](../nebula-exchange/about-exchange/ex-ug-what-is-exchange.md): This method imports from various distribution sources, such as Neo4j, Hive, MySQL, etc., which requires a Spark cluster. -- Import with [Spark-connector](../spark-connector/sc-ug-what-is-spark-connector.md)/[Flink-connector](../nebula-flink/nf-ug-what-is-flink-connector.md): This method has corresponding components (Spark/Flink) and writes a small amount of code. +- Import with [Spark-connector](../nebula-spark-connector.md)/[Flink-connector](../nebula-flink-connector.md): This method has corresponding components (Spark/Flink) and writes a small amount of code. - Import with [C++/GO/Java/Python SDK](../20.appendix/6.eco-tool-version.md): This method imports in the way of writing programs, which requires certain programming and tuning skills. The following figure shows the positions of these ways: diff --git a/docs-2.0/3.ngql-guide/11.edge-type-statements/3.alter-edge.md b/docs-2.0/3.ngql-guide/11.edge-type-statements/3.alter-edge.md index 7b2862e90aa..1f54954c5c3 100644 --- a/docs-2.0/3.ngql-guide/11.edge-type-statements/3.alter-edge.md +++ b/docs-2.0/3.ngql-guide/11.edge-type-statements/3.alter-edge.md @@ -44,7 +44,7 @@ Trying to use a newly altered edge type may fail because the alteration of the e Nebula Graph implements the alteration of the edge type in the next heartbeat cycle. To make sure the alteration is successful, take one of the following approaches: -- Use [`DESCRIBE EDGE`](5.describe-tag.md) to confirm that the edge type information is updated. If it is not, wait a few seconds and try again. +- Use [`DESCRIBE EDGE`](5.describe-edge.md) to confirm that the edge type information is updated. If it is not, wait a few seconds and try again. - Wait for two heartbeat cycles, i.e., 20 seconds. diff --git a/docs-2.0/3.ngql-guide/12.vertex-statements/1.insert-vertex.md b/docs-2.0/3.ngql-guide/12.vertex-statements/1.insert-vertex.md index 93548795503..014733db4c8 100644 --- a/docs-2.0/3.ngql-guide/12.vertex-statements/1.insert-vertex.md +++ b/docs-2.0/3.ngql-guide/12.vertex-statements/1.insert-vertex.md @@ -23,8 +23,8 @@ prop_value_list: !!! Note - - `IF NOT EXISTS` only compares the names of the VID and the tag (excluding properties). - - `IF NOT EXISTS` will read to check whether the data exists, which will have a significant impact on performance. + - `IF NOT EXISTS` only compares the names of the VID and the tag (excluding properties). + - `IF NOT EXISTS` will read to check whether the data exists, which will have a significant impact on performance. * `tag_name` denotes the tag (vertex type), which must be created before `INSERT VERTEX`. For more information, see [CREATE TAG](../10.tag-statements/1.create-tag.md). diff --git a/docs-2.0/3.ngql-guide/13.edge-statements/1.insert-edge.md b/docs-2.0/3.ngql-guide/13.edge-statements/1.insert-edge.md index 02051e1d53c..b7bf209a9d3 100644 --- a/docs-2.0/3.ngql-guide/13.edge-statements/1.insert-edge.md +++ b/docs-2.0/3.ngql-guide/13.edge-statements/1.insert-edge.md @@ -22,8 +22,8 @@ INSERT EDGE [IF NOT EXISTS] ( ) {VALUES | VALUE} !!! Note - - `IF NOT EXISTS` only detects whether exist and does not detect whether the property values overlap. - - `IF NOT EXISTS` will read to check whether the data exists, which will have a significant impact on performance. + - `IF NOT EXISTS` only detects whether exist and does not detect whether the property values overlap. + - `IF NOT EXISTS` will read to check whether the data exists, which will have a significant impact on performance. * `` denotes the edge type, which must be created before `INSERT EDGE`. Only one edge type can be specified in this statement. diff --git a/docs-2.0/3.ngql-guide/15.full-text-index-statements/1.search-with-text-based-index.md b/docs-2.0/3.ngql-guide/15.full-text-index-statements/1.search-with-text-based-index.md index 5f0746bce71..4c6c31c52c1 100644 --- a/docs-2.0/3.ngql-guide/15.full-text-index-statements/1.search-with-text-based-index.md +++ b/docs-2.0/3.ngql-guide/15.full-text-index-statements/1.search-with-text-based-index.md @@ -18,13 +18,13 @@ A natural language search interprets the search string as a phrase in natural hu ## Syntax -## Create full-text indexes +### Create full-text indexes ```ngql CREATE {TAG | EDGE} INDEX ON { | } ([]); ``` -## Show full-text indexes +### Show full-text indexes ```ngql SHOW FULLTEXT INDEXES; diff --git a/docs-2.0/3.ngql-guide/16.subgraph-and-path/1.get-subgraph.md b/docs-2.0/3.ngql-guide/16.subgraph-and-path/1.get-subgraph.md index e28db913a07..999aee5a1d8 100644 --- a/docs-2.0/3.ngql-guide/16.subgraph-and-path/1.get-subgraph.md +++ b/docs-2.0/3.ngql-guide/16.subgraph-and-path/1.get-subgraph.md @@ -90,7 +90,7 @@ To show the completeness of the subgraph, an additional hop is made on all verti - The returned path of `GET SUBGRAPH 1 STEPS FROM "A" IN follow;` is `B->A`. To show the completeness of the subgraph, an additional hop is made on all vertices that meet the conditions, namely `A->B`. -If you only query paths or vertices that meet the conditions, we suggest you use [MATCH](../7.general-query-statements/2.match.md) or [GO](../7.general-query-statements/3 .go.md). The example is as follows. +If you only query paths or vertices that meet the conditions, we suggest you use [MATCH](../7.general-query-statements/2.match.md) or [GO](../7.general-query-statements/3.go.md). The example is as follows. ```ngql nebula> match p= (v:player) -- (v2) where id(v)=="A" return p; diff --git a/docs-2.0/3.ngql-guide/18.operation-and-maintenance-statements/4.job-statements.md b/docs-2.0/3.ngql-guide/18.operation-and-maintenance-statements/4.job-statements.md index 060b5b79dd6..136b1367d0b 100644 --- a/docs-2.0/3.ngql-guide/18.operation-and-maintenance-statements/4.job-statements.md +++ b/docs-2.0/3.ngql-guide/18.operation-and-maintenance-statements/4.job-statements.md @@ -132,7 +132,7 @@ nebula> SHOW JOBS; +--------+----------------------+------------+-------------------------+-------------------------+ ``` -### STOP JOB +## STOP JOB The `STOP JOB` statement stops jobs that are not finished. @@ -147,7 +147,7 @@ nebula> STOP JOB 22; +---------------+ ``` -### RECOVER JOB +## RECOVER JOB The `RECOVER JOB` statement re-executes the failed jobs and returns the number of recovered jobs. diff --git a/docs-2.0/3.ngql-guide/3.data-types/1.numeric.md b/docs-2.0/3.ngql-guide/3.data-types/1.numeric.md index d9c76019c66..9c00669b4a6 100644 --- a/docs-2.0/3.ngql-guide/3.data-types/1.numeric.md +++ b/docs-2.0/3.ngql-guide/3.data-types/1.numeric.md @@ -41,7 +41,7 @@ When writing and reading different types of data, nGQL complies with the followi | FLOAT | Not supported | Supported | DOUBLE | | DOUBLE | Not supported | Supported | DOUBLE | -For example, nGQL does not support setting [VID](../../1.introduction/3.vid.md) as INT8, but supports setting a certain property type of [TAG](../10.tag-statements/1.create- tag.md) or [Edge type](../11.edge-type-statements/1.create-edge.md) as INT8. When using the nGQL statement to read the property of INT8, the resulted type is INT64. +For example, nGQL does not support setting [VID](../../1.introduction/3.vid.md) as INT8, but supports setting a certain property type of [TAG](../10.tag-statements/1.create-tag.md) or [Edge type](../11.edge-type-statements/1.create-edge.md) as INT8. When using the nGQL statement to read the property of INT8, the resulted type is INT64. Multiple formats are supported: diff --git a/docs-2.0/3.ngql-guide/3.data-types/3.string.md b/docs-2.0/3.ngql-guide/3.data-types/3.string.md index 43a8652e316..68e57c7672c 100644 --- a/docs-2.0/3.ngql-guide/3.data-types/3.string.md +++ b/docs-2.0/3.ngql-guide/3.data-types/3.string.md @@ -2,7 +2,7 @@ Fixed-length strings and variable-length strings are supported. -## String types +## Declaration and literal representation The string type is declared with the keywords of: @@ -11,7 +11,7 @@ The string type is declared with the keywords of: A string type is used to store a sequence of characters (text). The literal constant is a sequence of characters of any length surrounded by double or single quotes. For example, `"Hello, Cooper"` or `'Hello, Cooper'`. -## String types +## String reading and writing Nebula Graph supports using string types in the following ways: @@ -38,7 +38,7 @@ When the fixed-length string you try to write exceeds the length limit: - If the fixed-length string is a property, the writing will succeed, and Nebula Graph will truncate the string and only store the part that meets the length limit. - If the fixed-length string is a VID, the writing will fail and Nebula Graph will return an error. -## Escape Characters +## Escape characters Line breaks are not allowed in a string. Escape characters are supported within strings, for example: @@ -46,7 +46,7 @@ Line breaks are not allowed in a string. Escape characters are supported within - `"\110ello world"` -## OpenCypher Compatibility +## OpenCypher compatibility There are some tiny differences between openCypher and Cypher, as well as nGQL. The following is what openCypher requires. Single quotes cannot be converted to double quotes. diff --git a/docs-2.0/3.ngql-guide/3.data-types/6.list.md b/docs-2.0/3.ngql-guide/3.data-types/6.list.md index dc5a72899b5..750c826c95e 100644 --- a/docs-2.0/3.ngql-guide/3.data-types/6.list.md +++ b/docs-2.0/3.ngql-guide/3.data-types/6.list.md @@ -216,7 +216,7 @@ nebula> MATCH p = (n:player{name:"Tim Duncan"})-[:follow]->(m) \ ``` - A composite data type (i.e., set, map, and list) **CAN NOT** be stored as properties for vertices or edges. - - + It is recommended to modify the graph modeling method. The composite data type should be modeled as an adjacent edge of a vertex, rather than its property. Each adjacent edge can be dynamically added or deleted. The rank values of the adjacent edges can be used for sequencing. + + It is recommended to modify the graph modeling method. The composite data type should be modeled as an adjacent edge of a vertex, rather than its property. Each adjacent edge can be dynamically added or deleted. The rank values of the adjacent edges can be used for sequencing. - Patterns are not supported in the list. For example, `[(src)-[]->(m) | m.name]`. diff --git a/docs-2.0/3.ngql-guide/5.operators/9.precedence.md b/docs-2.0/3.ngql-guide/5.operators/9.precedence.md index 7b73ec9fef4..fc65f6838be 100644 --- a/docs-2.0/3.ngql-guide/5.operators/9.precedence.md +++ b/docs-2.0/3.ngql-guide/5.operators/9.precedence.md @@ -2,8 +2,7 @@ The following list shows the precedence of nGQL operators in descending order. Operators that are shown together on a line have the same precedence. - -- `-`(negative number) +- `-` (negative number) - `!`, `NOT` - `*`, `/`, `%` - `-`, `+` diff --git a/docs-2.0/3.ngql-guide/7.general-query-statements/2.match.md b/docs-2.0/3.ngql-guide/7.general-query-statements/2.match.md index 339cdef15d2..4e19064d9b9 100644 --- a/docs-2.0/3.ngql-guide/7.general-query-statements/2.match.md +++ b/docs-2.0/3.ngql-guide/7.general-query-statements/2.match.md @@ -183,7 +183,7 @@ nebula> MATCH (v:player { name: 'Tim Duncan' })--(v2) \ You can use the `--` symbol to represent edges of both directions and match vertices connected by these edges. -!!! Legacy version compatibility +!!! compatibility "Legacy version compatibility" In nGQL 1.x, the `--` symbol is used for inline comments. Starting from nGQL 2.x, the `--` symbol represents an incoming or outgoing edge. @@ -284,7 +284,7 @@ nebula> MATCH p=(v:player{name:"Tim Duncan"})-->(v2) \ +-------------------------------------------+ ``` -!!! note "OpenCypher compatibility" +!!! compatibility "OpenCypher compatibility" In nGQL, the `@` symbol represents the rank of an edge, but openCypher has no such concept. @@ -426,7 +426,7 @@ You can use the `:*[minHop]..` pattern to match variable-leng |`minHop`|Optional. It represents the minimum length of the path. `minHop` must be a non-negative integer. The default value is 1.| |`maxHop`|Required. It represents the maximum length of the path. `maxHop` must be a non-negative integer. It has no default value.| -!!! note "OpenCypher compatibility" +!!! compatibility "OpenCypher compatibility" In openCypher, `maxHop` is optional and defaults to infinity. When no bounds are given, `..` can be omitted. However, in nGQL, `maxHop` is required. And `..` cannot be omitted. diff --git a/docs-2.0/4.deployment-and-installation/1.resource-preparations.md b/docs-2.0/4.deployment-and-installation/1.resource-preparations.md index b7b06923256..42e2e5a74b6 100644 --- a/docs-2.0/4.deployment-and-installation/1.resource-preparations.md +++ b/docs-2.0/4.deployment-and-installation/1.resource-preparations.md @@ -116,39 +116,39 @@ This section guides you through the downloading and installation of software req 1. Clone the `nebula-common` repository to your host. - ```bash - $ git clone -b {{common.release}} https://github.com/vesoft-inc/nebula-common.git - ``` - - Users can use the `--branch` or `-b` option to specify the branch to be cloned. For example, for {{ nebula.release }}, run the following command. - - ```bash - $ git clone --branch v{{ nebula.release }} https://github.com/vesoft-inc/nebula-common.git - ``` + ```bash + $ git clone -b {{common.release}} https://github.com/vesoft-inc/nebula-common.git + ``` + + Users can use the `--branch` or `-b` option to specify the branch to be cloned. For example, for {{ nebula.release }}, run the following command. + + ```bash + $ git clone --branch v{{ nebula.release }} https://github.com/vesoft-inc/nebula-common.git + ``` 2. Make `nebula-common` the current working directory. - ```bash - $ cd nebula-common - ``` + ```bash + $ cd nebula-common + ``` 3. Run the following commands to install and enable CMake and GCC. - ```bash - // Install CMake. - $ ./third-party/install-cmake.sh cmake-install - - // Enable CMake. - $ source cmake-install/bin/enable-cmake.sh - - // Authorize the write privilege to the opt directory. - $ sudo mkdir /opt/vesoft && sudo chmod -R a+w /opt/vesoft - - // Install GCC. Installing GCC to the opt directory requires the write privilege. And users can change it to other locations. - $ ./third-party/install-gcc.sh --prefix=/opt - - // Enable GCC. - $ source /opt/vesoft/toolset/gcc/7.5.0/enable + ```bash + // Install CMake. + $ ./third-party/install-cmake.sh cmake-install + + // Enable CMake. + $ source cmake-install/bin/enable-cmake.sh + + // Authorize the write privilege to the opt directory. + $ sudo mkdir /opt/vesoft && sudo chmod -R a+w /opt/vesoft + + // Install GCC. Installing GCC to the opt directory requires the write privilege. And users can change it to other locations. + $ ./third-party/install-gcc.sh --prefix=/opt + + // Enable GCC. + $ source /opt/vesoft/toolset/gcc/7.5.0/enable ``` 3. Execute the script `install-third-party.sh`. diff --git a/docs-2.0/4.deployment-and-installation/2.compile-and-install-nebula-graph/3.deploy-nebula-graph-with-docker-compose.md b/docs-2.0/4.deployment-and-installation/2.compile-and-install-nebula-graph/3.deploy-nebula-graph-with-docker-compose.md index 38e8b2cbd70..f449baafd0d 100644 --- a/docs-2.0/4.deployment-and-installation/2.compile-and-install-nebula-graph/3.deploy-nebula-graph-with-docker-compose.md +++ b/docs-2.0/4.deployment-and-installation/2.compile-and-install-nebula-graph/3.deploy-nebula-graph-with-docker-compose.md @@ -57,7 +57,7 @@ Using Docker Compose can quickly deploy Nebula Graph services based on the prepa !!! Note - For more information of the preceding services, see [Nebula Graph architecture](../1.introduction/3.nebula-graph-architecture/1.architecture-overview.md). + For more information of the preceding services, see [Nebula Graph architecture](../../1.introduction/3.nebula-graph-architecture/1.architecture-overview.md). 4. Connect to Nebula Graph. @@ -88,7 +88,7 @@ Using Docker Compose can quickly deploy Nebula Graph services based on the prepa !!! Note - By default, the authentication is off, you can only log in with an existing username (the default is `root`) and any password. To turn it on, see [Enable authentication](../7.data-security/1.authentication/1.authentication.md). + By default, the authentication is off, you can only log in with an existing username (the default is `root`) and any password. To turn it on, see [Enable authentication](../../7.data-security/1.authentication/1.authentication.md). 3. Run the `SHOW HOSTS` statement to check the status of the `nebula-storaged` processes. @@ -271,6 +271,6 @@ The data format has been modified on Jan 27, 2021, and is incompatible with the ## Related documents -- [Install and deploy Nebula Graph with the source code](../4.deployment-and-installation/2.compile-and-install-nebula-graph/1.install-nebula-graph-by-compiling-the-source-code.md) -- [Install Nebula Graph by RPM or DEB](../4.deployment-and-installation/2.compile-and-install-nebula-graph/2.install-nebula-graph-by-rpm-or-deb.md) -- [Connect to Nebula Graph](3.connect-to-nebula-graph.md) +- [Install and deploy Nebula Graph with the source code](1.install-nebula-graph-by-compiling-the-source-code.md) +- [Install Nebula Graph by RPM or DEB](2.install-nebula-graph-by-rpm-or-deb.md) +- [Connect to Nebula Graph](../connect-to-nebula-graph.md) diff --git a/docs-2.0/4.deployment-and-installation/2.compile-and-install-nebula-graph/deploy-nebula-graph-cluster.md b/docs-2.0/4.deployment-and-installation/2.compile-and-install-nebula-graph/deploy-nebula-graph-cluster.md index 02fb5f4ddc9..b9ee3120c33 100644 --- a/docs-2.0/4.deployment-and-installation/2.compile-and-install-nebula-graph/deploy-nebula-graph-cluster.md +++ b/docs-2.0/4.deployment-and-installation/2.compile-and-install-nebula-graph/deploy-nebula-graph-cluster.md @@ -276,7 +276,7 @@ sudo /usr/local/nebula/scripts/nebula.service start - When the graphd process, the storaged process, and the metad process are all started, you can use `all` instead. - - `/usr/local/nebula` is the default installation path for Nebula Graph. Use the actual path if you have customized the path. For more information about how to start and stop the services, see [Manage Nebula Graph services](../2.quick-start/5.start-stop-service.md). + - `/usr/local/nebula` is the default installation path for Nebula Graph. Use the actual path if you have customized the path. For more information about how to start and stop the services, see [Manage Nebula Graph services](../manage-service.md). ### Step 4: Check the cluster status diff --git a/docs-2.0/4.deployment-and-installation/3.upgrade-nebula-graph/upgrade-nebula-from-200-to-250.md b/docs-2.0/4.deployment-and-installation/3.upgrade-nebula-graph/upgrade-nebula-from-200-to-250.md index 0d67fc9d4aa..0bb51272f22 100644 --- a/docs-2.0/4.deployment-and-installation/3.upgrade-nebula-graph/upgrade-nebula-from-200-to-250.md +++ b/docs-2.0/4.deployment-and-installation/3.upgrade-nebula-graph/upgrade-nebula-from-200-to-250.md @@ -1,48 +1,49 @@ # Upgrade Nebula Graph v2.0.x to v{{nebula.release}} -To upgrade Nebula Graph v2.0.x to v{{nebula.release}}, you only need to use the RPM/DEB package of v{{nebula.release}} for the upgrade operation, or [compile v{{nebula.release} }](../2.compile-and-install-nebula-graph/1.install-nebula-graph-by-compiling-the-source-code.md) and then reinstall. +To upgrade Nebula Graph v2.0.x to v{{nebula.release}}, you only need to use the RPM/DEB package of v{{nebula.release}} for the upgrade, or [compile it](../2.compile-and-install-nebula-graph/1.install-nebula-graph-by-compiling-the-source-code.md) and then reinstall. !!! note - Nebula Graph v2.0.x refers to v2.0.0-GA and v2.0.1 versions. If the Nebula Graph version is too low (v2.0.0-RC, v2.0.0-beta, v1.x), please refer to [Upgrade Nebula Graph Historical Version to v{{nebula.release}}](upgrade-nebula-graph-to -250.md). + Nebula Graph v2.0.x refers to v2.0.0-GA and v2.0.1 releases. If your Nebula Graph version is too low (v2.0.0-RC, v2.0.0-beta, v1.x), see [Upgrade Nebula Graph to v{{nebula.release}}](upgrade-nebula-graph-to-250.md). -## RPM/DEB package upgrade steps +## Upgrade steps with RPM/DEB packages -1. Download [RPM/DEB package](https://github.com/vesoft-inc/nebula-graph/releases/tag/v{{nebula.release}}). +1. Download the [RPM/DEB package](https://github.com/vesoft-inc/nebula-graph/releases/tag/v{{nebula.release}}). -2. Stop all Nebula Graph services. For details, please refer to [Manage Nebula Graph Service](../../2.quick-start/5.start-stop-service.md). It is recommended to back up the configuration file before updating. +2. Stop all Nebula Graph services. For details, see [Manage Nebula Graph Service](../../2.quick-start/5.start-stop-service.md). It is recommended to back up the configuration file before updating. 3. Execute the following command to upgrade: - -RPM package + - RPM package ```bash $ sudo rpm -Uvh ``` - - If you specify the path during installation, you also need to specify the path during upgrade - + + If you specify the path during installation, you also need to specify the path during upgrade. + ```bash $ sudo rpm -Uvh --prefix= ``` - -DEB package + + - DEB package ```bash $ sudo dpkg -i ``` -4. Start the required services on each server. For details, please refer to [Manage Nebula Graph Service](../../2.quick-start/5.start-stop-service.md#_1). +4. Start the required services on each server. For details, see [Manage Nebula Graph Service](../../2.quick-start/5.start-stop-service.md#_1). -## Compile the new version source code upgrade steps +## Upgrade steps by compiling the new source code 1. Back up the old version of the configuration file. The configuration file is saved in the `etc` directory of the Nebula Graph installation path. -2. Update the warehouse and compile the source code. For details, please refer to [Install Nebula Graph Using Source Code](../2.compile-and-install-nebula-graph/1.install-nebula-graph-by-compiling-the-source-code.md). +2. Update the repository and compile the source code. For details, see [Install Nebula Graph by compiling the source code](../2.compile-and-install-nebula-graph/1.install-nebula-graph-by-compiling-the-source-code.md). !!! note - When compiling, pay attention to setting the installation path, which is consistent with the installation path of the old version. + When compiling, set the installation path, which is the same as the installation path of the old version. -## Docker Compose deployment upgrade steps +## Upgrade steps by deploying Docker Compose -Please refer to [How to update the Docker image of Nebula Graph service](../2.compile-and-install-nebula-graph/3.deploy-nebula-graph-with-docker-compose.md#nebula_graphdocker). \ No newline at end of file +See [How to update the Docker image of Nebula Graph services](../2.compile-and-install-nebula-graph/3.deploy-nebula-graph-with-docker-compose.md#nebula_graphdocker). diff --git a/docs-2.0/4.deployment-and-installation/3.upgrade-nebula-graph/upgrade-nebula-graph-to-250.md b/docs-2.0/4.deployment-and-installation/3.upgrade-nebula-graph/upgrade-nebula-graph-to-250.md index fda3d8ff8a1..91b30602678 100644 --- a/docs-2.0/4.deployment-and-installation/3.upgrade-nebula-graph/upgrade-nebula-graph-to-250.md +++ b/docs-2.0/4.deployment-and-installation/3.upgrade-nebula-graph/upgrade-nebula-graph-to-250.md @@ -1,6 +1,10 @@ -# Upgrade Nebula Graph to v2.0.0 +# Upgrade Nebula Graph to v{{nebula.release}} -This topic describes how to upgrade Nebula Graph to v2.0.0. +The legacy versions of Nebula Graph refer to the versions lower than Nebula Graph v2.0.0-GA. This topic describes how to upgrade Nebula Graph to v{{nebula.release}}. + +!!! note + + To upgrade Nebula Graph v2.0.0-GA or later versions to v{{nebula.release}}, see [Nebula Graph v2.0.x to v{{nebula.release}}](upgrade-nebula-from-200-to-250.md). ## Limitations @@ -8,15 +12,11 @@ This topic describes how to upgrade Nebula Graph to v2.0.0. * There is no upgrade script. You have to manually upgrade each server in the cluster. -* Supported versions: - * From Nebula Graph [v1.2.0](https://github.com/vesoft-inc/nebula/releases/tag/v1.2.0) to [Nebula Graph v2.0.0](https://github.com/vesoft-inc/nebula-graph/releases/tag/v2.0.0). - * From Nebula Graph [v2.0.0-RC1](https://github.com/vesoft-inc/nebula-graph/releases/tag/v2.0.0-rc1) to Nebula Graph 2.0.0. - -* This topic does not apply to scenarios where Nebula Graph is deployed with Docker, including Docker Swarm, Docker Compose, and Kubernetes. +* This topic does not apply to scenarios where Nebula Graph is deployed with Docker, including Docker Swarm, Docker Compose, and K8s. * You must upgrade the old Nebula Graph services on the same machines they are deployed. **DO NOT** change the IP addresses, configuration files of the machines, and **DO NOT** change the cluster topology. -* The hard disk space of each machine should be three times as much as the space taken by the original data directories. +* The hard disk space of each machine should be **three times** as much as the space taken by the original data directories. * Known issues that could cause data loss are listed on [GitHub known issues](https://github.com/vesoft-inc/nebula-graph/issues/857). The issues are all related to altering schema or default values. @@ -34,11 +34,9 @@ This topic describes how to upgrade Nebula Graph to v2.0.0. By default, old versions of Nebula Graph are installed in `/usr/local/nebula/`, hereinafter referred to as `${nebula-old}`. The default configuration file path is `${nebula-old}/etc/`. -The data of the old Nebula Graph are stored by the Storage Service and the Meta Service. You can find the data paths as follows. - -* Storage data path is defined by the `--data_path` option in the `${nebula-old}/etc/nebula-storaged.conf` file. The default path is `data/storage`. +* Storaged data path is defined by the `--data_path` option in the `${nebula-old}/etc/nebula-storaged.conf` file. The default path is `data/storage`. -* Meta data path is defined by the `--data_path` option in the `${nebula-old}/etc/nebula-metad.conf` file. The default path is `data/meta`. +* Metad data path is defined by the `--data_path` option in the `${nebula-old}/etc/nebula-metad.conf` file. The default path is `data/meta`. !!! note @@ -46,280 +44,271 @@ The data of the old Nebula Graph are stored by the Storage Service and the Meta ### New installation path -`${nebula-new}` represents the installation path of the new Nebula Graph version. An example for `${nebula-new}` is `/usr/local/nebula-new/`. +`${nebula-new}` represents the installation path of the new Nebula Graph version, such as `/usr/local/nebula-new/`. + +``` +# mkdir -p ${nebula-new} +``` -## Steps +## Upgrade steps -1. Stop all client connections. You can run the following commands on each Graph server to turn off the Graph Service and avoid dirty write. +1. **Stop all client connections**. You can run the following commands on each Graph server to turn off the Graph Service and avoid dirty write. - ```bash - > ${nebula-old}/scripts/nebula.service stop graphd - [INFO] Stopping nebula-graphd... - [INFO] Done - ``` + ``` + # ${nebula-old}/scripts/nebula.service stop graphd + [INFO] Stopping nebula-graphd... + [INFO] Done + ``` 2. Run the following commands to stop all services of the old version Nebula Graph. - ```bash - > ${nebula-old}/scripts/nebula.service stop all - [INFO] Stopping nebula-metad... - [INFO] Done - [INFO] Stopping nebula-graphd... - [INFO] Done - [INFO] Stopping nebula-storaged... - [INFO] Done - ``` + ``` + # ${nebula-old}/scripts/nebula.service stop all + [INFO] Stopping nebula-metad... + [INFO] Done + [INFO] Stopping nebula-graphd... + [INFO] Done + [INFO] Stopping nebula-storaged... + [INFO] Done + ``` - The Storage Service needs about 1 minute to flush data. Wait 1 minute and then run `ps -ef | grep nebula` to check and make sure that all the Nebula Graph services are stopped. + The `storaged` process needs about 1 minute to flush data. Wait 1 minute and then run `ps -ef | grep nebula` to check and make sure that all the Nebula Graph services are stopped. - !!! note + !!! Note - If the services are not fully stopped in 20 minutes, stop upgrading and go to the [Nebula Graph community](https://discuss.nebula-graph.io/) for help. + If the services are not fully stopped in 20 minutes, **stop upgrading** and go to the [Nebula Graph community](https://discuss.nebula-graph.io/) for help. 3. Install the new version of Nebula Graph on each machine. - * To install with RPM/DEB packages, run the following command. For detailed steps, see [Install Nebula Graph with RPM or DEB package](2.compile-and-install-nebula-graph/2.install-nebula-graph-by-rpm-or-deb.md). - - ```bash - > sudo rpm --force -i --prefix=${nebula-new} ${nebula-package-name.rpm} # for CentOS/RedHat - > sudo dpkg -i --instdir==${nebula-new} ${nebula-package-name.deb} # for Ubuntu - ``` - - * To install with the source code, follow the substeps. For detailed steps, see [Install Nebula Graph by compiling the source code](2.compile-and-install-nebula-graph/1.install-nebula-graph-by-compiling-the-source-code.md) + 1. Install the new binary file. - 1. Clone the source code. + - To install with RPM/DEB packages, download the installation package of the corresponding operating system from [release page](https://github.com/vesoft-inc/nebula-graph/releases). - ```bash - > git clone --branch v2.0.0 https://github.com/vesoft-inc/nebula-graph.git - ``` + ``` + # sudo rpm --force -i --prefix=${nebula-new} ${nebula-package-name.rpm} # for centos/redhat + # sudo dpkg -i --instdir==${nebula-new} ${nebula-package-name.deb} # for ubuntu + ``` - 2. Configure CMake. + For detailed steps, see [Install Nebula Graph with RPM or DEB package](../2.compile-and-install-nebula-graph/2.install-nebula-graph-by-rpm-or-deb.md). - ```bash - > cmake -DCMAKE_INSTALL_PREFIX=${nebula-new} -DENABLE_BUILD_STORAGE=on -DENABLE_TESTING=OFF -DCMAKE_BUILD_TYPE=Release -DNEBULA_COMMON_REPO_TAG=v2.0.0 -DNEBULA_STORAGE_REPO_TAG=v2.0.0 .. - ``` + - To install with the source code, follow the substeps. For detailed steps, see [Install Nebula Graph by compiling the source code](../2.compile-and-install-nebula-graph/1.install-nebula-graph-by-compiling-the-source-code.md). Some key commands are as follows. -4. Copy the configuration files from the old path to the new path. + - Clone the source code. + + ``` + # git clone --branch v{{nebula.release}} https://github.com/vesoft-inc/nebula-graph.git + ``` - ```bash - > cp -rf ${nebula-old}/etc ${nebula-new}/ - ``` + - Configure CMake. -5. Follow the substeps to prepare the Meta servers (usually 3 of them in a cluster). + ``` + # cmake -DCMAKE_INSTALL_PREFIX=${nebula-new} -DENABLE_BUILD_STORAGE=on -DENABLE_TESTING=OFF -DCMAKE_BUILD_TYPE=Release -DNEBULA_COMMON_REPO_TAG=v{{nebula.release}} -DNEBULA_STORAGE_REPO_TAG=v{{nebula.release}} .. + ``` - !!! note + 2. Copy the configuration files from the old path to the new path. - You must make sure that this step is applied on every Meta server. + ``` + # cp -rf ${nebula-old}/etc ${nebula-new}/ + ``` - 1. Locate the old Meta [data path](#old-installation-path) and copy the data files to the new path. +4. Follow the substeps to prepare the Meta servers (usually 3 of them in a cluster). - ```bash - > mkdir -p ${nebula-new}/data/meta/ - > cp -r ${nebula-old}/data/meta/* ${nebula-new}/data/meta/ - ``` + - Locate the old Meta [data path](#old-installation-path) and copy the data files to the new path. - 2. Modify the new Meta configuration files: + Find the `--data_path` option in `${nebula-old}/etc/nebula-metad.conf`. The default value is `data/meta`. - ```bash - > vim ${nebula-new}/nebula-metad.conf - ``` + - If the legacy versions **has not changed** the `--data_path` item, run the following command to copy the meta data to the new directory. - [Optional] Add the following parameters in the Meta configuration files if you need them. + ``` + # mkdir -p ${nebula-new}/data/meta/ + # cp -r ${nebula-old}/data/meta/* ${nebula-new}/data/meta/ + ``` - * `--null_type=false`: Disables the support for using [`NULL`](../3.ngql-guide/3.data-types/5.null.md) as schema properties after the upgrade. The default value is `true`. When set to `false`, you must specify a [default value](../3.ngql-guide/10.tag-statements/1.create-tag.md) when altering tags or edge types, otherwise, data reading fails. - * `--string_index_limit=32`: Specifies the index length for string values as 32. The default length is 64. + - If the legacy versions change the default metad directory, copy it according to the actual directory. -6. Prepare the Storage configuration files on each Storage server. + - Modify the new Meta configuration files. - * If the old Storage data path is not the default setting `--data_path=data/storage`, Modify the Storage configuration file and change the value of `--data_path` as the new data path. + - Edit the new metad configuration file. - ```bash - > vim ${nebula-new}/nebula-storaged.conf - ``` + ``` + # vim ${nebula-new}/nebula-metad.conf + ``` - * Create the new Storage data directories. + - [Optional]Add the following parameters in the Meta configuration files if you need them. - ```bash - > mkdir -p ${nebula-new}/data/storage/ - ``` + `--null_type=false`: Disables the support for using [`NULL`](../../3.ngql-guide/3.data-types/5.null.md).**The default value is `true`**. When set to `false`, you must specify a [default value](../../3.ngql-guide/10.tag-statements/1.create-tag.md) when altering tags or edge types, otherwise, data reading fails. - !!! note + `--string_index_limit=32`: Specifies the [index length](../../3.ngql-guide/14.native-index-statements/1.create-native-index.md) for string values as 32. The default length is 64. - If the `--data_path` default value has been modified, create the Storage data directories according to the modification. + !!! Note -7. Start the new Meta Service. + You must make sure that this step is applied on every Meta server. - 1. Run the following command on each Meta server. +5. Prepare the Storage configuration files on each Storage server. - ```bash - $ sudo ${nebula-new}/scripts/nebula.service start metad - [INFO] Starting nebula-metad... - [INFO] Done - ``` + + [Optional]If the old Storage data path is not the default setting `--data_path=data/storage`, modify it. - 2. Check if every nebula-metad process is started normally. + ``` + # vim ${nebula-new}/nebula-storaged.conf + ``` + Change the value of `--data_path` as the new data path. - ```bash - $ ps -ef |grep nebula-metad - ``` + + Create the new Storage data directories. - 3. Check if there is any error information in the Meta logs in `${nebula-new}/logs`. If any nebula-metad process cannot start normally, stop upgrading, start the Nebula Graph services from the old directories, and take the error logs to the [Nebula Graph community](https://discuss.nebula-graph.io/) for help. + ``` + # mkdir -p ${nebula-new}/data/storage/ + ``` -8. Run the following commands to upgrade the Storage data format. + If the `--data_path` default value has been modified, create the Storage data directories according to the modification. - ```bash - $ sudo ${nebula-new}/bin/db_upgrader \ - --src_db_path= \ - --dst_db_path= \ - --upgrade_meta_server=:[,:,...] \ - --upgrade_version= \ - ``` +6. Start the new Meta Service. - The parameters are described as follows. + - Run the following command on each Meta server. - * `--src_db_path`: Specifies the absolute path of the **OLD** Storage data directories. Separate multiple paths with commas, without spaces. + ``` + # ${nebula-new}/scripts/nebula.service start metad + [INFO] Starting nebula-metad... + [INFO] Done + ``` - * `--dst_db_path`: Specifies the absolute path of the **NEW** Storage data directories. Separate multiple paths with commas, without spaces. The paths must correspond to the paths set in `--src_db_path` one by one. + - Check if every nebula-metad process is started normally. - !!! danger + ``` + # ps -ef |grep nebula-metad + ``` - Don't mix up the preceding two parameters, otherwise, the old data will be damaged during the upgrade. + - Check if there is any error information in the Meta logs in `${nebula-new}/logs`. + + !!! Note - * `--upgrade_meta_server`: Specifies the addresses of the new Meta servers that you started in step 7. + If any nebula-metad process cannot start normally, **stop upgrading, start the Nebula Graph services from the old directories**, and take the error logs to the [Nebula Graph community](https://discuss.nebula-graph.io/) for help. - * `--upgrade_version`: If the old Nebula Graph version is v1.2.0, set the parameter value to `1`. If the old version is v2.0.0-RC1, set the value to 2. +7. Run the following commands to upgrade the Storage data format. - !!! danger + ``` + # ${nebula-new}/bin/db_upgrader \ + --src_db_path= \ + --dst_db_path= \ + --upgrade_meta_server=:[,:,...] \ + --upgrade_version= \ + ``` - Don't set the value to other numbers. + The parameters are described as follows. - Example of upgrading from v1.2.0: + - `--src_db_path`: Specifies the absolute path of the **OLD** Storage data directories. Separate multiple paths with commas, without spaces. - ```bash - $ sudo /usr/local/nebula_new/bin/db_upgrader \ - --src_db_path=/usr/local/nebula/data/storage/data1/,/usr/local/nebula/data/storage/data2/ \ - --dst_db_path=/usr/local/nebula_new/data/storage/data1/,/usr/local/nebula_new/data/storage/data2/\ - --upgrade_meta_server=192.168.8.14:45500,192.168.8.15:45500,192.168.8.16:45500 \ - --upgrade_version=1 - ``` + - `--dst_db_path`: Specifies the absolute path of the **NEW** Storage data directories. Separate multiple paths with commas, without spaces. The paths must correspond to the paths set in `--src_db_path` one by one. - Example of upgrading from v2.0.0-RC1: + - `--upgrade_meta_server`: Specifies the addresses of the new Meta servers that you started in step 6. - ```bash - $ sudo /usr/local/nebula_new/bin/db_upgrader \ - --src_db_path=/usr/local/nebula/data/storage/ \ - --dst_db_path=/usr/local/nebula_new/data/storage/ \ - --upgrade_meta_server=192.168.8.14:9559,192.168.8.15:9559,192.168.8.16:9559 \ - --upgrade_version=2 - ``` + - `--upgrade_version`: If the old Nebula Graph version is v1.2.0, set the parameter value to `1`. If the old version is v2.0.0-RC1, set the value to `2`. Do not set the value to other numbers. - !!! note + !!! danger - Make sure that all the Storage servers have finished the upgrade. If anything goes wrong: + Do not mix up the order of `--src_db_path` and `--dst_db_path`. Otherwise, the old data will be damaged during the upgrade. - 1. Stop upgrading. - 2. Stop all the Meta servers. - 3. Start the Nebula Graph services from the old directories. - 4. Go to the [Nebula Graph community](https://discuss.nebula-graph.io/) for help. + For example, upgrade from v1.2.x: -9. Start the new Storage Service on each Storage server. + ``` + # /usr/local/nebula_new/bin/db_upgrader \ + --src_db_path=/usr/local/nebula/data/storage/data1/,/usr/local/nebula/data/storage/data2/ \ + --dst_db_path=/usr/local/nebula_new/data/storage/data1/,/usr/local/nebula_new/data/storage/data2/\ + --upgrade_meta_server=192.168.*.14:45500,192.168.*.15:45500,192.168.*.16:45500 \ + --upgrade_version=1 + ``` - ```bash - $ sudo ${nebula-new}/scripts/nebula.service start storaged - $ sudo ${nebula-new}/scripts/nebula.service status storaged - ``` + For example, upgrade from v2.0.0-RC1: - !!! note - If this step goes wrong on any server: + ``` + # /usr/local/nebula_new/bin/db_upgrader \ + --src_db_path=/usr/local/nebula/data/storage/ \ + --dst_db_path=/usr/local/nebula_new/data/storage/ \ + --upgrade_meta_server=192.168.*.14:9559,192.168.*.15:9559,192.168.*.16:9559 \ + --upgrade_version=2 + ``` - 1. Stop upgrading. - 2. Stop all the Meta servers and Storage servers. - 3. Start the Nebula Graph services from the old directories. - 4. Take the logs in `${nebula-new}/logs/` to the [Nebula Graph community](https://discuss.nebula-graph.io/) for help. + !!! Note + + - If anything goes wrong, **Stop upgrading, stop all the Meta servers, and start the Nebula Graph services from the old directories.** + - Make sure that all the Storage servers have finished the upgrade. -10. Start the new Graph Service on each Graph server. +8. Start the new Storage Service on each Storage server. - ```bash - $ sudo ${nebula-new}/scripts/nebula.service start graphd - $ sudo ${nebula-new}/scripts/nebula.service status graphd - ``` + ``` + # ${nebula-new}/scripts/nebula.service start storaged + # ${nebula-new}/scripts/nebula.service status storaged + ``` !!! note + If this step goes wrong on any server, Take the logs in `${nebula-new}/logs/` to the [Nebula Graph community](https://discuss.nebula-graph.io/) for help. **Stop upgrading. Stop all the Meta servers and Storage servers. Start the Nebula Graph services from the old directories.** - If this step goes wrong on any server: +9. Start the new Graph Service on each Graph server. - 1. Stop upgrading. - 2. Stop all the Meta servers, Storage servers, and Graph servers. - 3. Start the Nebula Graph services from the old directories. - 4. Take the logs in `${nebula-new}/logs/` to the [Nebula Graph community](https://discuss.nebula-graph.io/) for help. + ``` + # ${nebula-new}/scripts/nebula.service start graphd + # ${nebula-new}/scripts/nebula.service status graphd + ``` -11. Connect to Nebula Graph with the new version (v2.0.0 or later) of [Nebula Console](https://github.com/vesoft-inc/nebula-console). Verify if the Nebula Graph services are available and if the data can be accessed normally. - - The command for connection, including the IP address and port of the Graph Service, is the same as the old one. - - The following statements may help in this step. + !!! note - ```ngql - nebula> SHOW HOSTS; - nebula> SHOW SPACES; - nebula> USE - nebula> SHOW PARTS; - nebula> SUBMIT JOB STATS; - nebula> SHOW STATS; - ``` + If this step goes wrong on any server, take the logs in `${nebula-new}/logs/` to the [Nebula Graph community](https://discuss.nebula-graph.io/) for help. **Stop upgrading. Stop all the Meta servers, Storage servers, and Graph servers. Start the Nebula Graph services from the old directories.** - !!! danger +10. Connect to Nebula Graph with the new versions of [Nebula Console](https://github.com/vesoft-inc/nebula-console). Verify if the Nebula Graph services are available and if the data can be accessed normally. Make sure that the command parameters, including the IP address and port of the Graph Service, are the same as the old one. - Don't use Nebula Console versions prior to v2.0.0. + ```ngql + nebula> SHOW HOSTS; + nebula> SHOW SPACES; + nebula> USE + nebula> SHOW PARTS; + nebula> SUBMIT JOB STATS; + nebula> SHOW STATS; + ``` -12. Upgrade other Nebula Graph clients. + !!! Note - You must upgrade all other clients to corresponding v2.0.0 versions. The clients include but are not limited to the following ones. Find the v2.0.0 branch for each client. + The old releases of Nebula Console may have compatibility issues. - * [studio](https://github.com/vesoft-inc/nebula-docker-compose) - * [python](https://github.com/vesoft-inc/nebula-python) - * [java](https://github.com/vesoft-inc/nebula-java) - * [go](https://github.com/vesoft-inc/nebula-go) - * [c++](https://github.com/vesoft-inc/nebula-cpp) - * [flink-connector](https://github.com/vesoft-inc/nebula-flink-connector) - * [spark-util](https://github.com/vesoft-inc/nebula-spark-utils) - * [benchmark](https://github.com/vesoft-inc/nebula-bench) +11. Upgrade other Nebula Graph clients. - !!! note + You must upgrade all other clients to corresponding Nebula Graph v{{nebula.release}}. The clients include but are not limited to [Python](https://github.com/vesoft-inc/nebula-python), [Java](https://github.com/vesoft-inc/nebula-java), [go](https://github.com/vesoft-inc/nebula-go), [C++](https://github.com/vesoft-inc/nebula-cpp), [Flink-connector](https://github.com/vesoft-inc/nebula-flink-connector), [Spark-util](https://github.com/vesoft-inc/nebula-spark-utils), and [Nebula Bench](https://github.com/vesoft-inc/nebula-bench). Find the v{{nebula.release}} branch for each client. - + Communication protocols of the v2.0.0 versions are not compatible with that of the historical versions. To upgrade the clients, you must compile the v2.0.0 source code of the clients or download corresponding binaries. + !!! Note - + Tip for maintenance: The data path after the upgrade is `${nebula-new}/`. Modify relative paths for hard disk monitor systems or log ELK. + Communication protocols of v{{nebula.release}} are not compatible with that of the old releases. To upgrade the clients, compile the v{{nebula.release}} source code of the clients or download corresponding binaries. + + Tip for maintenance: The data path after the upgrade is `${nebula-new}/`. Modify relative paths for hard disk monitor systems, log, or ELK, etc. ## Upgrade failure and rollback If the upgrade fails, stop all Nebula Graph services of the new version, and start the services of the old version. -All Nebula Graph clients in use must be switched to the old version. +All Nebula Graph clients in use must be switched to the **old version**. ## Appendix 1: Test Environment The test environment for this topic is as follows: * Machine specifications: 32 CPU cores, 62 GB memory, and SSD. + * Data size: 100 GB of Nebula Graph 1.2.0 LDBC test data, with 1 graph space, 24 partitions, and 92 GB of data directory size. -* Concurrent configuration: - |Parameter|Default value|Applied value in the Tests| - |-|-|-| - |--max_concurrent|5|5| - |--max_concurrent_parts|10|24| - |--write_batch_num|100|100| +* Concurrent configuration: `--max_concurrent=5`, `--max_concurrent_parts=24`, and `--write_batch_num=100`. + +The upgrade cost **21 minutes** in all, including 13 minutes of compaction. The descriptions are as follows. -The upgrade cost 21 minutes in all, including 21 minutes of compaction. +|Parameter|Default value| +|:---|:---| +|`--max_concurrent`|5| +|`--max_concurrent_parts`|10| +|`--write_batch_num`|100| ## Appendix 2: Nebula Graph V2.0.0 code address and commit ID | Code address | Commit ID | |:---|:---| -| [Graph Service](https://github.com/vesoft-inc/nebula-graph/releases/tag/v2.0.0) | 7923a45 | -| [Storage and Meta Services](https://github.com/vesoft-inc/nebula-storage/tree/v2.0.0) | 761f22b | -| [Common](https://github.com/vesoft-inc/nebula-common/tree/v2.0.0) | b2512aa | +| [graphd](https://github.com/vesoft-inc/nebula-graph/releases/tag/v2.0.0) | 91639db | +| [storaged and metad](https://github.com/vesoft-inc/nebula-storage/tree/v2.0.0) | 761f22b | +| [common](https://github.com/vesoft-inc/nebula-common/tree/v2.0.0) | b2512aa | ## FAQ @@ -327,13 +316,9 @@ The upgrade cost 21 minutes in all, including 21 minutes of compaction. A: No. The state of the data written during this process is undefined. -### Can I upgrade other old versions except for v1.2.0 or v2.0.0-RC1 to v2.0.0? - -A: Upgrading from other old versions is not tested. Theoretically, versions between v1.0.0 and v1.2.0 could adopt the upgrade approach for v1.2.0. V2.x nightly versions cannot apply the solutions in this topic. - -### How to upgrade clients after the server upgrade? +### Can I upgrade other old versions except for v1.2.x and v2.0.0-RC to v{{nebula.release}}? -A: See step 12 in this topic. +A: Upgrading from other old versions is not tested. Theoretically, versions between v1.0.0 and v1.2.0 could adopt the upgrade approach for v1.2.x. v2.0.0-RC nightly versions cannot apply the solutions in this topic. ### How to upgrade if a machine has only the Graph Service, but not the Storage Service? @@ -345,7 +330,7 @@ A: Try again with the sudo privileges. ### Is there any change in gflags? -A: Yes. For more information, see [known gflags changes](https://github.com/vesoft-inc/nebula-graph/issues/858). +A: Yes. For more information, see [github issues](https://github.com/vesoft-inc/nebula/issues/2501). ### What are the differences between deleting data then installing the new version and upgrading according to this topic? diff --git a/docs-2.0/4.deployment-and-installation/6.deploy-text-based-index/3.deploy-listener.md b/docs-2.0/4.deployment-and-installation/6.deploy-text-based-index/3.deploy-listener.md index c0c596657e9..8ae2d6b8c3c 100644 --- a/docs-2.0/4.deployment-and-installation/6.deploy-text-based-index/3.deploy-listener.md +++ b/docs-2.0/4.deployment-and-installation/6.deploy-text-based-index/3.deploy-listener.md @@ -74,9 +74,9 @@ ADD LISTENER ELASTICSEARCH [,, ...] Add all Listeners in one statement completely. -```ngql -nebula> ADD LISTENER ELASTICSEARCH 192.168.8.5:9789,192.168.8.6:9789; -``` + ```ngql + nebula> ADD LISTENER ELASTICSEARCH 192.168.8.5:9789,192.168.8.6:9789; + ``` ## Show Listeners diff --git a/docs-2.0/5.configurations-and-logs/1.configurations/2.meta-config.md b/docs-2.0/5.configurations-and-logs/1.configurations/2.meta-config.md index 1b969740942..06736d562b4 100644 --- a/docs-2.0/5.configurations-and-logs/1.configurations/2.meta-config.md +++ b/docs-2.0/5.configurations-and-logs/1.configurations/2.meta-config.md @@ -23,7 +23,7 @@ For all parameters and their current values, see [Configurations](1.configuratio | ----------- | ----------------------- | ---------------------------------------------------- | | `daemonize` | `true` | When set to `true`, the process is a daemon process. | | `pid_file` | `pids/nebula-metad.pid` | The file that records the process ID. | -| `timezone_name` | - | Specifies the Nebula Graph time zone. This parameter is not predefined in the initial configuration files. You can manually set it if you need it. The system default value is `UTC+00:00:00`. For the format of the parameter value, see [Specifying the Time Zone with TZ](https://www.gnu.org/software/libc/manual/html_node/TZ-Variable.html "Click to view the timezone-related content in the GNU C Library manual"). For example, `--timezone_name=CST-8` represents the GMT+8 time zone.| +| `timezone_name` | - | Specifies the Nebula Graph time zone. This parameter is not predefined in the initial configuration files. You can manually set it if you need it. The system default value is `UTC+00:00:00`. For the format of the parameter value, see [Specifying the Time Zone with TZ](https://www.gnu.org/software/libc/manual/html_node/TZ-Variable.html "Click to view the timezone-related content in the GNU C Library manual"). For example, `--timezone_name=CST-8` represents the GMT+8 time zone.| | `local_config` | `true` | When set to `true`, the process gets configurations from the configuration files. | |`minimum_reserved_bytes`|-|Specifies the minimum remaining space of each data storage path. When the value is lower than this standard, the cluster metadata operation may fail. This configuration is measured in bytes. The default value is `1073741824`, namely, 1GB.| diff --git a/docs-2.0/5.configurations-and-logs/1.configurations/3.graph-config.md b/docs-2.0/5.configurations-and-logs/1.configurations/3.graph-config.md index c7814ffca0d..79d33fa6473 100644 --- a/docs-2.0/5.configurations-and-logs/1.configurations/3.graph-config.md +++ b/docs-2.0/5.configurations-and-logs/1.configurations/3.graph-config.md @@ -25,7 +25,7 @@ For all parameters and their current values, see [Configurations](1.configuratio | `pid_file` | `pids/nebula-graphd.pid`| The file that records the process ID. | |`enable_optimizer` |`true` | When set to `true`, the optimizer is enabled. | | `system_memory_high_watermark_ratio` | - | Specifies the trigger threshold of the high-level memory alarm mechanism. The default value is `0.8`. If the system memory usage is higher than this value, an alarm mechanism will be triggered, and Nebula Graph will stop querying. This parameter is not predefined in the initial configuration files. | -| `timezone_name` | - | Specifies the Nebula Graph time zone. This parameter is not predefined in the initial configuration files. The system default value is `UTC+00:00:00`. For the format of the parameter value, see [Specifying the Time Zone with TZ](https://www.gnu.org/software/libc/manual/html_node/TZ-Variable.html "Click to view the timezone-related content in the GNU C Library manual"). For example, `--timezone_name=CST-8` represents the GMT+8 time zone. | +| `timezone_name` | - | Specifies the Nebula Graph time zone. This parameter is not predefined in the initial configuration files. The system default value is `UTC+00:00:00`. For the format of the parameter value, see [Specifying the Time Zone with TZ](https://www.gnu.org/software/libc/manual/html_node/TZ-Variable.html "Click to view the timezone-related content in the GNU C Library manual"). For example, `--timezone_name=UTC+08:00` represents the GMT+8 time zone. | | `local_config` | `true` | When set to `true`, the process gets configurations from the configuration files. | !!! note diff --git a/docs-2.0/5.configurations-and-logs/1.configurations/4.storage-config.md b/docs-2.0/5.configurations-and-logs/1.configurations/4.storage-config.md index 2ae39c33a69..747d2b3f108 100644 --- a/docs-2.0/5.configurations-and-logs/1.configurations/4.storage-config.md +++ b/docs-2.0/5.configurations-and-logs/1.configurations/4.storage-config.md @@ -27,7 +27,7 @@ For all parameters and their current values, see [Configurations](1.configuratio | :----------- | :----------------------- | :------------------| | `daemonize` | `true` | When set to `true`, the process is a daemon process. | | `pid_file` | `pids/nebula-storaged.pid` | The file that records the process ID. | -| `timezone_name` | - | Specifies the Nebula Graph time zone. This parameter is not predefined in the initial configuration files. The system default value is `UTC+00:00:00`. For the format of the parameter value, see [Specifying the Time Zone with TZ](https://www.gnu.org/software/libc/manual/html_node/TZ-Variable.html "Click to view the timezone-related content in the GNU C Library manual"). For example, `--timezone_name=CST-8` represents the GMT+8 time zone. | +| `timezone_name` | - | Specifies the Nebula Graph time zone. This parameter is not predefined in the initial configuration files. The system default value is `UTC+00:00:00`. For the format of the parameter value, see [Specifying the Time Zone with TZ](https://www.gnu.org/software/libc/manual/html_node/TZ-Variable.html "Click to view the timezone-related content in the GNU C Library manual"). For example, `--timezone_name=CST-8` represents the GMT+8 time zone. | | `local_config` | `true` | When set to `true`, the process gets configurations from the configuration files. | !!! note diff --git a/docs-2.0/6.monitor-and-metrics/1.query-performance-metrics.md b/docs-2.0/6.monitor-and-metrics/1.query-performance-metrics.md index 88cd04136ab..f8591e796b0 100644 --- a/docs-2.0/6.monitor-and-metrics/1.query-performance-metrics.md +++ b/docs-2.0/6.monitor-and-metrics/1.query-performance-metrics.md @@ -4,7 +4,7 @@ Nebula Graph supports querying the monitoring metrics through HTTP ports. ## Metrics -Each metric of Nebula Graph consists of three fields: name, type, and time range. The fields are separated by periods, for example, `num_queries.sum.600`. The detailed description is as follows. +Each metric of Nebula Graph consists of three fields: name, type, and time range. The fields are separated by periods, for example, `num_queries.sum.600`. Different Nebula Graph services (Graph, Storage, or Meta) support different metrics. The detailed description is as follows. |Field|Example|Description| |-|-|-| @@ -12,14 +12,12 @@ Each metric of Nebula Graph consists of three fields: name, type, and time range |Metric type|`sum`|Indicates how the metrics are collected. Supported types are SUM, COUNT, AVG, RATE, and the P-th sample quantiles such as P75, P95, P99, and P99.9.| |Time range|`600`|The time range in seconds for the metric collection. Supported values are 5, 60, 600, and 3600, representing the last 5 seconds, 1 minute, 10 minutes, and 1 hour.| -Different Nebula Graph services (Graph, Storage, or Meta) support different metrics, for more information, see Metric list (TODO: doc). - ## Query metrics over HTTP ### Syntax ```bash -$ curl -G "http://:/stats?stats=[&format=json]" +curl -G "http://:/stats?stats= [&format=json]" ``` |Parameter|Description| @@ -31,13 +29,13 @@ $ curl -G "http://:/stats?stats=[&format=json]" !!! note - If Nebula Graph is deployed with [Docker Compose](../2.quick-start/2.deploy-nebula-graph-with-docker-compose.md), run `docker-compose ps` to check the ports that are mapped from the service ports inside of the container and then query through them. + If Nebula Graph is deployed with [Docker Compose](..//4.deployment-and-installation/2.compile-and-install-nebula-graph/3.deploy-nebula-graph-with-docker-compose.md), run `docker-compose ps` to check the ports that are mapped from the service ports inside of the container and then query through them. -### Example +### Examples * Query a single metric - Query the query number in the last 10 minutes in the Graph Service. + Query the query number in the last 10 minutes in the Graph Service. ```bash $ curl -G "http://192.168.8.40:19669/stats?stats=num_queries.sum.600" @@ -46,9 +44,11 @@ $ curl -G "http://:/stats?stats=[&format=json]" * Query multiple metrics - Query the following metrics together: - * The average heartbeat latency in the last 1 minute. - * The average latency of the slowest 1% heartbeats, i.e., the P99 heartbeats, in the last 10 minutes. + Query the following metrics together: + + * The average heartbeat latency in the last 1 minute. + + * The average latency of the slowest 1% heartbeats, i.e., the P99 heartbeats, in the last 10 minutes. ```bash $ curl -G "http://192.168.8.40:19559/stats?stats=heartbeat_latency_us.avg.60,heartbeat_latency_us.p99.600" @@ -58,7 +58,7 @@ $ curl -G "http://:/stats?stats=[&format=json]" * Return a JSON result. - Query the number of new vertices in the Storage Service in the last 10 minutes and return the result in the JSON format. + Query the number of new vertices in the Storage Service in the last 10 minutes and return the result in the JSON format. ```bash $ curl -G "http://192.168.8.40:19779/stats?stats=num_add_vertices.sum.600&format=json" @@ -67,7 +67,7 @@ $ curl -G "http://:/stats?stats=[&format=json]" * Query all metrics in a service. - If no metric is specified in the query, Nebula Graph returns all metrics in the service. + If no metric is specified in the query, Nebula Graph returns all metrics in the service. ```bash $ curl -G "http://192.168.8.40:19559/stats" diff --git a/docs-2.0/8.service-tuning/2.graph-modeling.md b/docs-2.0/8.service-tuning/2.graph-modeling.md index 2ba351eafd6..1ec0867b902 100644 --- a/docs-2.0/8.service-tuning/2.graph-modeling.md +++ b/docs-2.0/8.service-tuning/2.graph-modeling.md @@ -1,4 +1,4 @@ -## Graph data modeling suggestions +# Graph data modeling suggestions This section provides general suggestions for modeling data in Nebula Graph. @@ -6,7 +6,7 @@ This section provides general suggestions for modeling data in Nebula Graph. The following suggestions may not apply to some special scenarios. In these cases, find help in the [Nebula Graph community](https://discuss.nebula-graph.io/). -### Model for performance +## Model for performance There is no perfect method to model in Nebula Graph. Graph modeling depends on the questions that you want to know from the data. Your data drives your graph model. Graph data modeling is intuitive and convenient. Create your data model based on your business model. Test your model and gradually optimize it to fit your business. To get better performance, you can change or re-design your model multiple times. diff --git a/docs-2.0/8.service-tuning/3.system-design.md b/docs-2.0/8.service-tuning/3.system-design.md index 1d3e720b435..3235e973b03 100644 --- a/docs-2.0/8.service-tuning/3.system-design.md +++ b/docs-2.0/8.service-tuning/3.system-design.md @@ -22,7 +22,7 @@ Nebula Graph {{ nebula.release }} supports horizontal scaling. + Metad does not support horizontal scaling. -Vertical scaling usually has higher hardware costs, but relatively simple operations. Nebula Graph {{Nebula. Release}} can also be scaled vertically. +Vertical scaling usually has higher hardware costs, but relatively simple operations. Nebula Graph {{nebula.release}} can also be scaled vertically. ## Data transmission and optimization diff --git a/docs-2.0/README.md b/docs-2.0/README.md index 7bc84ad8168..cf2a52a3a28 100644 --- a/docs-2.0/README.md +++ b/docs-2.0/README.md @@ -1,6 +1,12 @@ # Welcome to Nebula Graph {{ nebula.release }} Documentation -!!! note "This manual is revised on {{ now().year }}-{{ now().month }}-{{ now().day }}, with [GitHub commit](https://github.com/vesoft-inc/nebula-docs) {{ git.short_commit }}." +!!! note "Check the manual version" + + This manual is revised on {{ now().year }}-{{ now().month }}-{{ now().day }}, with [GitHub commit](https://github.com/vesoft-inc/nebula-docs/commits/v{{nebula.release}}) {{ git.short_commit }}. + + Nebula Graph is a distributed, scalable, and lightning-fast graph database. It is the optimal solution in the world capable of hosting graphs with dozens of billions of vertices (nodes) and trillions of edges (relationships) with millisecond latency. @@ -9,7 +15,7 @@ Nebula Graph is a distributed, scalable, and lightning-fast graph database. It i * [What is Nebula Graph](1.introduction/1.what-is-nebula-graph.md) * [Quick start workflow](2.quick-start/1.quick-start-workflow.md) * [Configuration](4.deployment-and-installation/1.resource-preparations.md) -* [FAQ](19.FAQ/0.FAQ.md) +* [FAQ](20.appendix/0.FAQ.md) * [Ecosystem Tools](20.appendix/6.eco-tool-version.md) ## Other Sources @@ -19,9 +25,9 @@ Nebula Graph is a distributed, scalable, and lightning-fast graph database. It i - [Forum](https://discuss.nebula-graph.io/) - [Blog](https://nebula-graph.io/posts/) - [Video](https://www.youtube.com/channel/UC73V8q795eSEMxDX4Pvdwmw) -- [Chinese Docs](https://docs.nebula-graph.com.cn/master/) +- [Chinese Docs](https://docs.nebula-graph.com.cn/) -## Graphic Illustrations +## Symbols used in this manual diff --git a/docs-2.0/nebula-exchange/about-exchange/ex-ug-what-is-exchange.md b/docs-2.0/nebula-exchange/about-exchange/ex-ug-what-is-exchange.md index ac538432320..d572c29e180 100644 --- a/docs-2.0/nebula-exchange/about-exchange/ex-ug-what-is-exchange.md +++ b/docs-2.0/nebula-exchange/about-exchange/ex-ug-what-is-exchange.md @@ -55,7 +55,7 @@ Exchange {{exchange.release}} supports converting data from the following format - [Hive](../use-exchange/ex-ug-import-from-hive.md) - [MaxCompute](../use-exchange/ex-ug-import-from-maxcompute.md) -- Graph database: [Neo4j](../use-exchange/ex-ug-import-from-neo4j.md)(Client version 2.4.5-M1) +- Graph database: [Neo4j](../use-exchange/ex-ug-import-from-neo4j.md) (Client version 2.4.5-M1) - Relational database: [MySQL](../use-exchange/ex-ug-import-from-mysql.md) diff --git a/docs-2.0/nebula-exchange/ex-ug-FAQ.md b/docs-2.0/nebula-exchange/ex-ug-FAQ.md index d3463e312bc..08ea9ca11b4 100644 --- a/docs-2.0/nebula-exchange/ex-ug-FAQ.md +++ b/docs-2.0/nebula-exchange/ex-ug-FAQ.md @@ -39,7 +39,7 @@ nebula-exchange-2.0.0.jar \ Generally, the port configuration is incorrect. Check the port configuration of the Meta service, Graph service, and Storage service. -### Error: NoSuchMethod、MethodNotFound(`Exception in thread "main" java.lang.NoSuchMethodError`, etc) +### Error: NoSuchMethod, MethodNotFound (`Exception in thread "main" java.lang.NoSuchMethodError`, etc) Most errors are caused by JAR package conflicts or version conflicts. Check whether the version of the error reporting service is the same as that used in Exchange, especially Spark, Scala, and Hive. diff --git a/docs-2.0/nebula-exchange/nebula-exchange.md b/docs-2.0/nebula-exchange/nebula-exchange.md deleted file mode 100644 index fb3497487ad..00000000000 --- a/docs-2.0/nebula-exchange/nebula-exchange.md +++ /dev/null @@ -1,61 +0,0 @@ -# Nebula Exchange - -[Nebula Exchange](https://github.com/vesoft-inc/nebula-spark-utils/tree/v2.0.0/nebula-exchange) (hereinafter referred to as Exchange) is an Apache Spark™ application for migrating data into Nebula Graph from distributed systems. Exchange supports the migration of migrating batch data and stream data of different formats. - - - -## Use cases - -Exchange applies to transforming the following data into vertices and edges in [Nebula Graph](../1.introduction/1.what-is-nebula-graph.md): - -* Stream data stored in Kafka or Pulsar, including Logs, online shopping records, online game player activities, social network information, financial trading data, and geospatial service data. -* Telemeasuring data recorded by equipment connected to IDCs. -* Batch data stored in relational databases such as MySQL or distributed file systems such as HDFS. - -## Benefits - -* Adaptable. Exchange supports importing data with many different formats and sources into the Nebula Graph for easy data migration. - -* Supports SST import. Exchange can transform data from different sources into SST files for importing. - - !!! note - - SST import is only supported on Linux. - -* Supports breakpoint continuous transmission. To save time and improve efficiency, Exchange can continue the data transmission after the transmission is stopped. - - !!! note - - For now, breakpoint continuous transmission is only supported when importing Neo4j data. - -* Asynchronous operations. Exchange generates a writing statement and then sends it to the Graph Service for data insertion. - -* Flexible. Exchange supports importing data with multiple tags and edge types that originated from different data formats or sources. - -* Supports statistics. Exchange uses Apache Spark™ Accumulators to make statistics for successful and failed insertion operations. - -* Easy to use. Exchange applies the Human-Optimized Config Object Notation (HOCON) format for configuration files. HOCON is object-oriented and easy to understand and use. - -## Data formats and origins - -Exchange 2.0 can migrate data with the following formats or origins. - -* Data stored in HDFS, including: - - Apache Parquet - - Apache ORC - - JSON - - CSV - -* Apache HBase™ - -* Data warehouse: Hive - -* Graph database: Neo4j - -* Relational database: MySQL - -* Event streaming platform: Apache Kafka® - -* Message publishing/subscribing platform: Apache Pulsar 2.4.5 diff --git a/docs-2.0/nebula-exchange/parameter-reference/ex-ug-parameter.md b/docs-2.0/nebula-exchange/parameter-reference/ex-ug-parameter.md index ee8e3c1caf8..f84b2ad6877 100644 --- a/docs-2.0/nebula-exchange/parameter-reference/ex-ug-parameter.md +++ b/docs-2.0/nebula-exchange/parameter-reference/ex-ug-parameter.md @@ -2,13 +2,13 @@ This document describes how to configure the file [`application.conf`](https://github.com/vesoft-inc/nebula-spark-utils/blob/master/nebula-exchange/src/main/resources/application.conf) when users use Nebula Exchange. -Before configuring the `application.conf` file, it is recommended to copy the file name `application.conf` and then edit the file name according to the file type of a data source. For example,change the file name to `csv_application.conf` if the file type of the data source is CSV. +Before configuring the `application.conf` file, it is recommended to copy the file name `application.conf` and then edit the file name according to the file type of a data source. For example, change the file name to `csv_application.conf` if the file type of the data source is CSV. The `application.conf` file contains the following content types: - Spark Configurations -- Hive Configurations(optional) +- Hive Configurations (optional) - Nebula Graph Configurations @@ -24,15 +24,15 @@ This document lists only some Spark parameters. For more information, see [Spark |:---|:---|:---|:---|:---| |`spark.app.name`|string|-|No|The drive name in Spark.| |`spark.driver.cores`|int|`1`|No|The number of CPU cores used by a driver, only applicable to a cluster mode.| -|`spark.driver.maxResultSize`|string|`1G`|No|The total size limit (in bytes) of the serialized results of all partitions in a single Spark operation(such as collect). The minimum value is 1M, and 0 means unlimited| +|`spark.driver.maxResultSize`|string|`1G`|No|The total size limit (in bytes) of the serialized results of all partitions in a single Spark operation (such as collect). The minimum value is 1M, and 0 means unlimited| |`spark.executor.memory`|string|`1G`|No|The amount of memory used by a Spark driver which can be specified in units, such as 512M or 1G.| |`spark.cores.max`|int|`16`|No|The maximum number of CPU cores of applications requested across clusters (rather than from each node) when a driver runs in a coarse-grained sharing mode on a standalone cluster or a Mesos cluster. The default value is `spark.deploy.defaultCores` on a Spark standalone cluster manager or the value of the `infinite` parameter (all available cores) on Mesos.| -## Hive Configurations(optional) +## Hive Configurations (optional) Users only need to configure parameters for connecting to Hive if Spark and Hive are deployed in different clusters. Otherwise, please ignore the following configurations. -|Parameter|Type|Default Value|Required|Description| +|Parameter|Type|Default Value|Required|Description| |:---|:---|:---|:---|:---| |`hive.warehouse`|string|-|Yes|The warehouse path in HDFS. Enclose the path in double quotes and start with `hdfs://`.| |`hive.connectionURL`|string|-|Yes|The URL of a JDBC connection. For example, `"jdbc:mysql://127.0.0.1:3306/hive_spark?characterEncoding=UTF-8"`.| @@ -42,7 +42,7 @@ Users only need to configure parameters for connecting to Hive if Spark and Hive ## Nebula Graph Configurations -|Parameter|Type|Default Value|Required|Description| +|Parameter|Type|Default Value|Required|Description| |:---|:---|:---|:---|:---| |`nebula.address.graph`|list\[string\]|`["127.0.0.1:9669"]`|Yes|The addresses of all Graph services, including IPs and ports, separated by commas (,). Example: `["ip1:port1","ip2:port2","ip3:port3"]`.| |`nebula.address.meta`|list\[string\]|`["127.0.0.1:9559"]`|Yes|The addresses of all Meta services, including IPs and ports, separated by commas (,). Example: `["ip1:port1","ip2:port2","ip3:port3"]`.| @@ -66,7 +66,7 @@ For different data sources, the vertex configurations are different. There are m ### General Parameters -|Parameter|Type|Default Value|Required|Description| +|Parameter|Type|Default Value|Required|Description| |:---|:---|:---|:---|:---| |`tags.name`|string|-|Yes|The tag name defined in Nebula Graph.| |`tags.type.source`|string|-|Yes|Specify a data source. For example, `csv`.| @@ -79,27 +79,27 @@ For different data sources, the vertex configurations are different. There are m ### Specific Parameters of Parquet/JSON/ORC Data Sources -|Parameter|Type|Default Value|Required|Description| +|Parameter|Type|Default Value|Required|Description| |:---|:---|:---|:---|:---| |`tags.path`|string|-|Yes|The path of vertex data files in HDFS. Enclose the path in double quotes and start with `hdfs://`.| ### Specific Parameters of CSV Data Sources -|Parameter|Type|Default Value|Required|Description| +|Parameter|Type|Default Value|Required|Description| |:---|:---|:---|:---|:---| |`tags.path`|string|-|Yes|The path of vertex data files in HDFS. Enclose the path in double quotes and start with `hdfs://`.| -|`tags.separator`|string|`,`|Yes|The separator. The default value is a comma (,).| +|`tags.separator`|string|`,`|Yes|The separator. The default value is a comma (,).| |`tags.header`|bool|`true`|Yes|Whether the file has a header.| ### Specific Parameters of Hive Data Sources -|Parameter|Type|Default Value|Required|Description| +|Parameter|Type|Default Value|Required|Description| |:---|:---|:---|:---|:---| |`tags.exec`|string|-|Yes|The statement to query data sources. For example, `select name,age from mooc.users`.| ### Specific Parameters of MaxCompute Data Sources -|Parameter|Type|Default Value|Required|Description| +|Parameter|Type|Default Value|Required|Description| |:---|:---|:---|:---|:---| |`tags.table`|string|-|Yes|The Maxcompute table name.| |`tags.project`|string|-|Yes|The MaxCompute project name.| @@ -112,7 +112,7 @@ For different data sources, the vertex configurations are different. There are m ### Specific Parameters of Neo4j Data Sources -|Parameter|Type|Default Value|Required|Description| +|Parameter|Type|Default Value|Required|Description| |:---|:---|:---|:---|:---| |`tags.exec`|string|-|Yes|Statements to query data sources. For example: `match (n:label) return n.neo4j-field-0`.| |`tags.server`|string|`"bolt://127.0.0.1:7687"`|Yes|The Neo4j server address. @@ -123,7 +123,7 @@ For different data sources, the vertex configurations are different. There are m ### Specific Parameters of MySQL Data Sources -|Parameter|Type|Default Value|Required|Description| +|Parameter|Type|Default Value|Required|Description| |:---|:---|:---|:---|:---| |`tags.host`|string|-|Yes|The MySQL server address.| |`tags.port`|string|-|Yes|The MySQL server port.| @@ -135,7 +135,7 @@ For different data sources, the vertex configurations are different. There are m ### Specific Parameters of ClickHouse Data Sources -|Parameter|Type|Default Value|Required|Description| +|Parameter|Type|Default Value|Required|Description| |:---|:---|:---|:---|:---| |`tags.url`|string|-|Yes|The JDBC URL of ClickHouse.| |`tags.user`|string|-|Yes|The ClickHouse username with read permissions.| @@ -145,7 +145,7 @@ For different data sources, the vertex configurations are different. There are m ### Specific Parameters of Hbase Data Sources -|Parameter|Type|Default Value|Required|Description| +|Parameter|Type|Default Value|Required|Description| |:---|:---|:---|:---|:---| |`tags.host`|string|`127.0.0.1`|Yes|The Hbase server address.| |`tags.port`|string|`2181`|Yes|The Hbase server port. @@ -154,16 +154,16 @@ For different data sources, the vertex configurations are different. There are m ### Specific Parameters of Pulsar Data Sources -|Parameter|Type|Default Value|Required|Description| +|Parameter|Type|Default Value|Required|Description| |:---|:---|:---|:---|:---| |`tags.service`|string|`"pulsar://localhost:6650"`|Yes|The Pulsar server address. |`tags.admin`|string|`"http://localhost:8081"`|Yes|The admin URL used to connect pulsar.| -|`tags.options.`|string|-|Yes|Options offered by Pulsar,which can be configured by choosing one from `topic`, `topics`, and `topicsPattern`.| +|`tags.options.`|string|-|Yes|Options offered by Pulsar, which can be configured by choosing one from `topic`, `topics`, and `topicsPattern`.| |`tags.interval.seconds`|int|`10`|Yes|The interval for reading messages. Unit: Seconds.| ### Specific Parameters of Kafka Data Sources -|Parameter|Type|Default Value|Required|Description| +|Parameter|Type|Default Value|Required|Description| |:---|:---|:---|:---|:---| |`tags.service`|string|-|Yes|The Kafka server address. |`tags.topic`|string|-|Yes|The message type.| @@ -171,7 +171,7 @@ For different data sources, the vertex configurations are different. There are m ### Specific Parameters of SST Data Sources -|Parameter|Type|Default Value|Required|Description| +|Parameter|Type|Default Value|Required|Description| |:---|:---|:---|:---|:---| |`tags.path`|string|-|Yes|The path of the source file specified to generate SST files.| @@ -183,7 +183,7 @@ For the specific parameters of different data sources for edge configurations, p ### General Parameters -|Parameter|Type|Default Value|Required|Description| +|Parameter|Type|Default Value|Required|Description| |:---|:---|:---|:---|:---| |`edges.name`| string|-|Yes|The edge type name defined in Nebula Graph.| |`edges.type.source`|string|-|Yes|The data source of edges. For example, `csv`.| diff --git a/docs-2.0/nebula-exchange/use-exchange/ex-ug-import-from-kafka.md b/docs-2.0/nebula-exchange/use-exchange/ex-ug-import-from-kafka.md index 144b2415313..33fb35a2951 100644 --- a/docs-2.0/nebula-exchange/use-exchange/ex-ug-import-from-kafka.md +++ b/docs-2.0/nebula-exchange/use-exchange/ex-ug-import-from-kafka.md @@ -145,7 +145,7 @@ After Exchange is compiled, copy the conf file `target/classes/application.conf` # Message category. topic: "topic_name1" - # Kafka data has a fixed domain name: key、value、topic、partition、offset、timestamp、timestampType. + # Kafka data has a fixed domain name: key, value, topic, partition, offset, timestamp, timestampType. # If multiple fields need to be specified after Spark reads as DataFrame, separate them with commas. # Specify the field name in fields, for example key for name in Nebula and value for age in Nebula, as shown in the following. fields: [key,value] @@ -207,7 +207,7 @@ After Exchange is compiled, copy the conf file `target/classes/application.conf` # Message category. topic: "topic_name3" - # Kafka data has a fixed domain name: key、value、topic、partition、offset、timestamp、timestampType. + # Kafka data has a fixed domain name: key, value, topic, partition, offset, timestamp, timestampType. # If multiple fields need to be specified after Spark reads as DataFrame, separate them with commas. # Specify the field name in fields, for example key for degree in Nebula, as shown in the following. fields: [key] diff --git a/docs-2.0/nebula-explorer/about-explorer/ex-ug-what-is-explorer.md b/docs-2.0/nebula-explorer/about-explorer/ex-ug-what-is-explorer.md index bd6080ec8d7..ca407753429 100644 --- a/docs-2.0/nebula-explorer/about-explorer/ex-ug-what-is-explorer.md +++ b/docs-2.0/nebula-explorer/about-explorer/ex-ug-what-is-explorer.md @@ -1,6 +1,6 @@ # What is Nebula Graph Explorer -Nebula Graph Explorer (Explorer in short) is a browser-based visualization tool. It is used with the Nebula Graph kernel to visualize interaction with graph data. Even if there is no experience in graph database, you can quickly become a graph exploration expert. +Nebula Graph Explorer (Explorer in short) is a browser-based visualization tool. It is used with the Nebula Graph core to visualize interaction with graph data. Even if there is no experience in graph database, you can quickly become a graph exploration expert. !!! enterpriseonly diff --git a/docs-2.0/nebula-explorer/deploy-connect/ex-ug-connect.md b/docs-2.0/nebula-explorer/deploy-connect/ex-ug-connect.md index d4581d57e1d..764c1a85926 100644 --- a/docs-2.0/nebula-explorer/deploy-connect/ex-ug-connect.md +++ b/docs-2.0/nebula-explorer/deploy-connect/ex-ug-connect.md @@ -28,13 +28,13 @@ To connect Explorer to Nebula Graph, follow these steps: When Nebula Graph and Explorer are deployed on the same machine, you must enter the IP address of the machine, but not `127.0.0.1` or `localhost`, in the **Host** field. - - **Username** and **Password**: Fill in the log in account according to the authentication settings of Nebula Graph. + - **Username** and **Password**: Fill in the log in account according to the authentication settings of Nebula Graph. - - If authentication is not enabled, you can use `root` and any password as the username and its password. + - If authentication is not enabled, you can use `root` and any password as the username and its password. - - If authentication is enabled and no account information has been created, you can only log in as GOD role and use `root` and `nebula` as the username and its password. + - If authentication is enabled and no account information has been created, you can only log in as GOD role and use `root` and `nebula` as the username and its password. - - If authentication is enabled and different users are created and assigned roles, users in different roles log in with their accounts and passwords. + - If authentication is enabled and different users are created and assigned roles, users in different roles log in with their accounts and passwords. ![The Config Server page shows the fields to be configured for connection](../figs/ex-ug-050.png "Config Server") @@ -42,6 +42,6 @@ To connect Explorer to Nebula Graph, follow these steps: If you can see the interface as shown in the below, it means you have successfully connected to the Nebula Graph database. - ![The Console page shows that the connection is successful](../figs/st-ug-051.png "Nebula Graph is connected") + ![The Console page shows that the connection is successful](../figs/ex-ug-051.png "Nebula Graph is connected") -One session continues for up to 30 minutes. If you do not operate Explorer within 30 minutes, the active session will time out and you must connect to Nebula Graph again. \ No newline at end of file +One session continues for up to 30 minutes. If you do not operate Explorer within 30 minutes, the active session will time out and you must connect to Nebula Graph again. diff --git a/docs-2.0/nebula-explorer/deploy-connect/ex-ug-deploy.md b/docs-2.0/nebula-explorer/deploy-connect/ex-ug-deploy.md index 8843b5e9043..7a2a89a649b 100644 --- a/docs-2.0/nebula-explorer/deploy-connect/ex-ug-deploy.md +++ b/docs-2.0/nebula-explorer/deploy-connect/ex-ug-deploy.md @@ -15,7 +15,7 @@ Before you deploy Explorer, you must do a check of these: | Port | Description | | ---- | ---- | | 7002 | Web service provided by Explorer | - | 8080 | Nebula-http-gateway service | + | 8070 | Nebula-http-gateway service | - The Linux distribution is CentOS, installed `lsof` and [Node.js](https://nodejs.org/en/) of version above v10.16.0+. @@ -70,7 +70,7 @@ Before you deploy Explorer, you must do a check of these: | Port | Description | | ---- | ---- | | 7002 | Web service provided by Explorer | - | 8080 | Nebula-http-gateway service | + | 8070 | Nebula-http-gateway service | - The Linux distribution is CentOS, installed `lsof` and [Node.js](https://nodejs.org/en/) of version above v10.16.0+. @@ -125,4 +125,4 @@ Seeing the following login interface, Explorer is successfully connected to Nebu ![Nebula Graph Explorer](../figs/ex-ug-001.png) -After entering the Explorer login interface, you need to connect to Nebula Graph. For more information, refer to [Connecting to the Nebula Graph](../deploy-connect/ex-ug-connect.md). \ No newline at end of file +After entering the Explorer login interface, you need to connect to Nebula Graph. For more information, refer to [Connecting to the Nebula Graph](../deploy-connect/ex-ug-connect.md). diff --git a/docs-2.0/nebula-explorer/operation-guide/ex-ug-page-overview.md b/docs-2.0/nebula-explorer/operation-guide/ex-ug-page-overview.md index 9c71ab8c5f2..745269e3d10 100644 --- a/docs-2.0/nebula-explorer/operation-guide/ex-ug-page-overview.md +++ b/docs-2.0/nebula-explorer/operation-guide/ex-ug-page-overview.md @@ -15,7 +15,7 @@ The main page of Explorer is divided into five parts: ## Tab bar -- Export: Export a CSV or SVG file of the current view. +- Export: Export a CSV or PNG file of the current view. ## Sidebar @@ -40,6 +40,7 @@ The sidebar consists of five parts. You can click the buttons to explore the gra - Frame selection mode: Click the ![frameSelect](../figs/nav-frameSelect.png) icon to support frame selection of vertexes and edges in the canvas. - Click to select multiple vertexes and edges: Click the ![singleSelect](../figs/nav-singleSelect.png) icon, you can easily click the vertexes and edges in the canvas, and click the blank space to cancel the selection. - Move the canvas: Click the ![moveCanvas](../figs/nav-moveCanvas.png) icon to drag the position of the canvas. + For more information, see [Canvas Operation](../operation-guide/ex-ug-canvas.md). ### Graph exploration and expansion @@ -48,6 +49,7 @@ For more information, see [Canvas Operation](../operation-guide/ex-ug-canvas.md) - Common neighbor: Click the ![commonNeighbor](../figs/rightclickmenu-commonNeighbor.png) icon, select at least two vertexes on the page and view their common neighbors. - Search path: Click the ![findPath](../figs/rightclickmenu-findPath.png) icon to query the path of `all paths`, `Shortest path` or `Noloop path` between the start vertex and the end vertex. - Inspect property: Click the ![propertyView](../figs/nav-propertyView.png) icon to choose whether to display the property values of vertexes or edges in the canvas. + For more information, see [Graph exploration and expansion](../operation-guide/ex-ug-graph-exploration.md). ### Hide and undo @@ -66,7 +68,7 @@ For more information, see [Graph exploration and expansion](../operation-guide/e The canvas is mainly divided into: -- canvas: Display the data queried by VID, Tag or subgraph. +- Canvas: Display the data queried by VID, Tag or subgraph. - Vertexes and Edges overview: It is hidden by default and only displayed when the vertex and edge are selected on the current canvas. Click on the icon in the following, and the user can open the menu to view the detailed data of the selected vertexes and edges in the current canvas. @@ -84,6 +86,6 @@ You can use the button on the minimap to switch the graph mode, display the vert | ---- | ---- |----| ----| | mode | force | dagre | circular | - ## Relationship list +## Relationship list - Click the ![unfold](../figs/sidebar-unfold.png) icon on the right, you can open the menu, view the number of tags and edges in the canvas, search for tags and edges, and also support modifying the color and icon of the vertex. \ No newline at end of file +Click the ![unfold](../figs/sidebar-unfold.png) icon on the right, you can open the menu, view the number of tags and edges in the canvas, search for tags and edges, and also support modifying the color and icon of the vertex. diff --git a/docs-2.0/nebula-importer/config-with-header.md b/docs-2.0/nebula-importer/config-with-header.md index 9c4f86723b7..e476c7427e0 100644 --- a/docs-2.0/nebula-importer/config-with-header.md +++ b/docs-2.0/nebula-importer/config-with-header.md @@ -70,7 +70,7 @@ For Tag or Edge type properties, the format is `. - ``: property type. Support `bool`, `int`, `float`, `double`, `timestamp` and `string`, default `string`. -Such as `student.name:string`、`follow.degree:double`. +Such as `student.name:string`, `follow.degree:double`. ## Sample configuration diff --git a/docs-2.0/nebula-spark-connector.md b/docs-2.0/nebula-spark-connector.md index 20f34f281c2..7eb4f161acd 100644 --- a/docs-2.0/nebula-spark-connector.md +++ b/docs-2.0/nebula-spark-connector.md @@ -217,7 +217,7 @@ df.write.nebula(config, nebulaWriteVertexConfig).writeVertices() |`withVidField` |Yes| The column in the DataFrame as the vertex ID. | |`withVidPolicy` |No| When writing the vertex ID, Nebula Graph 2.x use mapping function, supports HASH only. No mapping is performed by default. | |`withVidAsProp` |No| Whether the column in the DataFrame that is the vertex ID is also written as an property. The default value is `false`. If set to `true`, make sure the Tag has the same property name as `VidField`. | - |`withUser` |No| Nebula Graph user name. If [authentication]((7.data-security/1.authentication/1.authentication.md)) is disabled, you do not need to configure the user name and password. | + |`withUser` |No| Nebula Graph user name. If [authentication](7.data-security/1.authentication/1.authentication.md) is disabled, you do not need to configure the user name and password. | |`withPasswd` |No| The password for the Nebula Graph user name. | |`withBatch` |Yes| The number of rows of data written at a time. The default value is `1000`. | |`withWriteMode`|No|Write mode. The optional values are `insert` and `update`. The default value is `insert`.| @@ -236,7 +236,7 @@ df.write.nebula(config, nebulaWriteVertexConfig).writeVertices() |`withSrcAsProperty` |No| Whether the column in the DataFrame that is the starting vertex is also written as an property. The default value is `false`. If set to `true`, make sure Edge type has the same property name as `SrcIdField`. | |`withDstAsProperty` |No| Whether column that are destination vertex in the DataFrame are also written as property. The default value is `false`. If set to `true`, make sure Edge type has the same property name as `DstIdField`. | |`withRankAsProperty` |No| Whether column in the DataFrame that is the rank is also written as property.The default value is `false`. If set to `true`, make sure Edge type has the same property name as `RankField`. | - |`withUser` |No| Nebula Graph user name. If [authentication]((7.data-security/1.authentication/1.authentication.md)) is disabled, you do not need to configure the user name and password. | + |`withUser` |No| Nebula Graph user name. If [authentication](7.data-security/1.authentication/1.authentication.md) is disabled, you do not need to configure the user name and password. | |`withPasswd` |No| The password for the Nebula Graph user name. | |`withBatch` |Yes| The number of rows of data written at a time. The default value is `1000`. | |`withWriteMode`|No|Write mode. The optional values are `insert` and `update`. The default value is `insert`.| diff --git a/docs-2.0/nebula-studio/about-studio/st-ug-limitations.md b/docs-2.0/nebula-studio/about-studio/st-ug-limitations.md index 2640b1e3918..e5593be5e34 100644 --- a/docs-2.0/nebula-studio/about-studio/st-ug-limitations.md +++ b/docs-2.0/nebula-studio/about-studio/st-ug-limitations.md @@ -1,12 +1,12 @@ # Limitations -This article introduces the limitations of Studio. +This topic introduces the limitations of Studio. ## Nebula Graph versions !!! Note - The Studio version is released independently of the Nebula Graph kernel. The correspondence between the versions of Studio and the Nebula Graph kernel, as shown in the table below. + The Studio version is released independently of the Nebula Graph core. The correspondence between the versions of Studio and the Nebula Graph core, as shown in the table below. | Nebula Graph version | Studio version | | --- | --- | @@ -17,7 +17,7 @@ This article introduces the limitations of Studio. ## Architecture -For now, Docker-based and RPM-based Studio v2.x supports x86_64 architecture only. +For now, Docker-based and RPM-based Studio v3.x supports x86_64 architecture only. ## Upload data -If you want to reset Nebula Graph, you can clear the connection and reconfigure the database +If you want to reset Nebula Graph, you can clear the connection and reconfigure the database. When the Studio is still connected to a Nebula Graph database, you can choose **setting > clear connect** at the toolbar. If the **Config Server** page is displayed on the browser, it means that Studio has successfully disconnected from the Nebula Graph database. \ No newline at end of file diff --git a/docs-2.0/nebula-studio/figs/st-ug-045.png b/docs-2.0/nebula-studio/figs/st-ug-045.png index 1d74884cbd0..58197a74d87 100644 Binary files a/docs-2.0/nebula-studio/figs/st-ug-045.png and b/docs-2.0/nebula-studio/figs/st-ug-045.png differ diff --git a/docs-2.0/nebula-studio/manage-schema/st-ug-crud-edge-type.md b/docs-2.0/nebula-studio/manage-schema/st-ug-crud-edge-type.md index a71b937c38d..08d33b330dd 100644 --- a/docs-2.0/nebula-studio/manage-schema/st-ug-crud-edge-type.md +++ b/docs-2.0/nebula-studio/manage-schema/st-ug-crud-edge-type.md @@ -28,17 +28,17 @@ To create an edge type on the **Schema** page, follow these steps: 5. On the **Create** page, do these settings: - a. **Name**: Specify an appropriate name for the edge type. In this example, `serve` is used. + a. **Name**: Specify an appropriate name for the edge type. In this example, `serve` is used. - b. (Optional) If necessary, under the name, click the **Comment** to input content. + b. (Optional) If necessary, under the name, click the **Comment** to input content. - c. (Optional) If necessary, in the upper left corner of the **Define Properties** panel, click the check box to expand the panel and do these settings: + c. (Optional) If necessary, in the upper left corner of the **Define Properties** panel, click the check box to expand the panel and do these settings: - - To define a property: Enter a property name, a data type, and a default value. + - To define a property: Enter a property name, a data type, and a default value. - - To add multiple properties: Click the **Add Property** button and define more properties. + - To add multiple properties: Click the **Add Property** button and define more properties. - - To cancel a defined property: Besides the **Defaults** column, click the button ![Icon of deletion](https://docs-cdn.nebula-graph.com.cn/nebula-studio-docs/st-ug-020.png "Cancel"). + - To cancel a defined property: Besides the **Defaults** column, click the button ![Icon of deletion](https://docs-cdn.nebula-graph.com.cn/nebula-studio-docs/st-ug-020.png "Cancel"). d. (Optional) If no index is set for the edge type, you can set the TTL configuration: In the upper left corner of the **Set TTL** panel, click the check box to expand the panel, and configure `TTL_COL` and `TTL_ DURATION`. For more information about both parameters, see [TTL configuration](../../3.ngql-guide/8.clauses-and-options/ttl-options.md "Click to go to Nebula Graph website"). diff --git a/docs-2.0/nebula-studio/manage-schema/st-ug-crud-index.md b/docs-2.0/nebula-studio/manage-schema/st-ug-crud-index.md index 8b23fd48784..38030aea19f 100644 --- a/docs-2.0/nebula-studio/manage-schema/st-ug-crud-index.md +++ b/docs-2.0/nebula-studio/manage-schema/st-ug-crud-index.md @@ -28,25 +28,25 @@ To create an index on the **Schema** page, follow these steps: 4. Click the **Index** tab and then click the **+ Create** button. 5. On the **Create** page, do these settings: - a. **Index Type**: Choose to create an index for a tag or for an edge type. In this example, **Edge Type** is chosen. + - **Index Type**: Choose to create an index for a tag or for an edge type. In this example, **Edge Type** is chosen. - b. **Name**: Choose a tag name or an edge type name. In this example, **follow** is chosen. + - **Name**: Choose a tag name or an edge type name. In this example, **follow** is chosen. - c. **Index Name**: Specify a name for the new index. In this example, **follow_index** is used. + - **Index Name**: Specify a name for the new index. In this example, **follow_index** is used. - d. **Indexed Properties**: Click **Add**, and then, in the dialog box, choose a property. If necessary, repeat this step to choose more properties. You can drag the properties to sort them. In this example, `degree` is chosen. + - **Indexed Properties**: Click **Add**, and then, in the dialog box, choose a property. If necessary, repeat this step to choose more properties. You can drag the properties to sort them. In this example, `degree` is chosen. !!! note The order of the indexed properties has an effect on the result of the `LOOKUP` statement. For more information, see [nGQL Manual](../../3.ngql-guide/7.general-query-statements/5.lookup.md "Click to go to the Nebula Graph website"). - e. **Comment**: The remarks of a certain property or the index itself. The maximum length is 256 bytes. By default, there will be no comments on an index. But in this example, `follow_index` is used. + - **Comment**: The remarks of a certain property or the index itself. The maximum length is 256 bytes. By default, there will be no comments on an index. But in this example, `follow_index` is used. - When the settings are done, the **Equivalent to the following nGQL statement** panel shows the statement equivalent to the settings. +6. When the settings are done, the **Equivalent to the following nGQL statement** panel shows the statement equivalent to the settings. ![A page for index creation](https://docs-cdn.nebula-graph.com.cn/nebula-studio-docs/st-ug-030.png "Create an index") -6. Confirm the settings and then click the **+ Create** button. +7. Confirm the settings and then click the **+ Create** button. When an index is created, the index list shows the new index. ## View indexes diff --git a/docs-2.0/nebula-studio/manage-schema/st-ug-crud-tag.md b/docs-2.0/nebula-studio/manage-schema/st-ug-crud-tag.md index 5c836280ec1..1c983f8aa48 100644 --- a/docs-2.0/nebula-studio/manage-schema/st-ug-crud-tag.md +++ b/docs-2.0/nebula-studio/manage-schema/st-ug-crud-tag.md @@ -28,15 +28,15 @@ To create a tag on the **Schema** page, follow these steps: 5. On the **Create** page, do these settings: - a. **Name**: Specify an appropriate name for the tag. In this example, `course` is specified. + a. **Name**: Specify an appropriate name for the tag. In this example, `course` is specified. - b. (Optional) If necessary, in the upper left corner of the **Define Properties** panel, click the check box to expand the panel and do these settings: + b. (Optional) If necessary, in the upper left corner of the **Define Properties** panel, click the check box to expand the panel and do these settings: - - To define a property: Enter a property name, a data type, and a default value. + - To define a property: Enter a property name, a data type, and a default value. - - To add multiple properties: Click the **Add Property** button and define more properties. + - To add multiple properties: Click the **Add Property** button and define more properties. - - To cancel a defined property: Besides the **Defaults** column, click the button ![Icon of deletion](https://docs-cdn.nebula-graph.com.cn/nebula-studio-docs/st-ug-020.png "Cancel"). + - To cancel a defined property: Besides the **Defaults** column, click the button ![Icon of deletion](https://docs-cdn.nebula-graph.com.cn/nebula-studio-docs/st-ug-020.png "Cancel"). c. (Optional) If no index is set for the tag, you can set the TTL configuration: In the upper left corner of the **Set TTL** panel, click the check box to expand the panel and configure `TTL_COL` and `TTL_ DURATION`. For more information about both parameters, see [TTL configuration](../../3.ngql-guide/8.clauses-and-options/ttl-options.md "Click to go to Nebula Graph website"). diff --git a/docs-2.0/nebula-studio/quick-start/st-ug-create-schema.md b/docs-2.0/nebula-studio/quick-start/st-ug-create-schema.md index 05eef9ffff1..1b2778b17c6 100644 --- a/docs-2.0/nebula-studio/quick-start/st-ug-create-schema.md +++ b/docs-2.0/nebula-studio/quick-start/st-ug-create-schema.md @@ -18,9 +18,9 @@ To create a graph schema on Studio, you must do a check of these: - A graph space is created. - !!! note +!!! note - If no graph space exists and your account has the GOD privilege, you can create a graph space on the **Console** page. For more information, see [CREATE SPACE](../../3.ngql-guide/9.space-statements/1.create-space.md). + If no graph space exists and your account has the GOD privilege, you can create a graph space on the **Console** page. For more information, see [CREATE SPACE](../../3.ngql-guide/9.space-statements/1.create-space.md). ## Create a schema with Schema diff --git a/docs-2.0/nebula-studio/quick-start/st-ug-explore.md b/docs-2.0/nebula-studio/quick-start/st-ug-explore.md index 0d9cde92905..ac712c10387 100644 --- a/docs-2.0/nebula-studio/quick-start/st-ug-explore.md +++ b/docs-2.0/nebula-studio/quick-start/st-ug-explore.md @@ -2,7 +2,11 @@ When data is imported, you can use the **Console** page or the **Explore** page to query graph data. -For example, if you want to query the edge properties of the player named _player100_ to the team named _team204_, you can perform these optional operations: +!!! Note + + Users can also perform the following query operations online through [Studio](https://playground.nebula-graph.io/explore). + +For example, if you want to query the edge properties of the player named `player100` to the team named `team204`, you can perform these optional operations: * On the **Console** tab: Run `FETCH PROP ON serve "player100" -> "team204";`. The result window shows all the property information of this vertex. When the result returns, click the **View Subgraph** button and then you can view the vertex information in a visualized way. ![The information retrieved with Console](../figs/st-ug-036-1.png) diff --git a/docs-2.0/nebula-studio/quick-start/st-ug-import-data.md b/docs-2.0/nebula-studio/quick-start/st-ug-import-data.md index a233ef34e68..7bf3414425c 100644 --- a/docs-2.0/nebula-studio/quick-start/st-ug-import-data.md +++ b/docs-2.0/nebula-studio/quick-start/st-ug-import-data.md @@ -10,7 +10,7 @@ To batch import data, do a check of these: - A schema is created. -- CSV files for vertex and edge data separately are created. +- CSV files meet the demands of the Schema. - Your account has privilege of GOD, ADMIN, DBA, or USER. @@ -22,7 +22,7 @@ To batch import data, follow these steps: 2. On the **Select Space** page, choose a graph space name. In this example, **basketballplayer** is used. And then click the **Next** button. -3. On the **Upload Files** page, click the **Upload Files** button and then choose CSV files. In this example, `edge_serve.csv`, `edge_follow.csv`, `vertex_player.csv` and `vertex_team.csv` are chosen. +3. On the **Upload Files** page, click the **Upload Files** button and then choose CSV files. In this example, `edge_serve.csv`, `edge_follow.csv`, `vertex_player.csv`, and `vertex_team.csv` are chosen. !!! note @@ -39,12 +39,16 @@ To batch import data, follow these steps: a. In the **CSV Index** column, click **Mapping**. ![Click "Mapping** in the CSV Index column](../figs/st-ug-032-1.png "Choose the source for vertexId") - b. In the dialog box, choose a column from the CSV file. In this example, the only one cloumn of `vertex_player.csv` is chosen to generate VIDs representing users and the `playerID` column of `vertex_player.csv` is chosen to generate VIDs representing courses. + b. In the dialog box, choose a column from the CSV file. In this example, the only one cloumn of `vertex_player.csv` is chosen to generate VIDs representing players and the `playerID` column of `vertex_player.csv` is chosen to generate VIDs representing players. + + !!! Note + + In the same graph space, the VID is always unique and cannot be repeated. For VID information, see [VID](../../1.introduction/3.vid.md) "Click to enter the Nebula Graph Manual". 8. In the **TAG 1** section, do these operations: a. In the **TAG** drop-down list, choose a tag name. In this example, **player** is used for the `vertex_player.csv` file, and **team** is used for the `vertex_team.csv` file. - b. In the property list, click **Mapping** to choose a data column from the CSV file as the value of a property. In this example,for the **player** tag, choose **Column 1** for the `age` property and set its type to **int**. And choose **Column 2** for the `name` property and set its type to **string**. + b. In the property list, click **Mapping** to choose a data column from the CSV file as the value of a property. In this example, for the **player** tag, choose **Column 1** for the `age` property and set its type to **int**. And choose **Column 2** for the `name` property and set its type to **string**. ![Data source for the course vertices](../figs/st-ug-033-1.png "Choose data source for tag properties") @@ -57,7 +61,7 @@ To batch import data, follow these steps: 12. In the **Type** drop-down list, choose an edge type name. In this example, **follow** is chosen. -13. In the property list, click **Mapping** to choose a column from the `edge_follow.csv` file as values of a property for the edges. **srcId** and **dstId** are the VIDs of the source vertex and destination vertex of an edge. In this example, **srcId** must be set to the VIDs of the player and **dstId** must be set to the VIDs of another player. **rank** is optional. +13. In the property list, click **Mapping** to choose a column from the `edge_follow.csv` file as values of a property for the edges. **srcId** and **dstId** are the VIDs of the source vertex and destination vertex of an edge. In this example, **srcId** must be set to the VIDs of the player and **dstId** must be set to the VIDs of another player. **Rank** is optional. ![Data source for the action edges](../figs/st-ug-034-1.png "Choose data source for the edge type properties") @@ -68,4 +72,4 @@ To batch import data, follow these steps: ## Next to do -When the data are imported to Nebula Graph v2.x, you can [query graph data](st-ug-explore.md). +When the data are imported to v{{nebula.release}}, you can [query graph data](st-ug-explore.md). diff --git a/docs-2.0/nebula-studio/quick-start/st-ug-plan-schema.md b/docs-2.0/nebula-studio/quick-start/st-ug-plan-schema.md index f75922f3609..a790071cbe6 100644 --- a/docs-2.0/nebula-studio/quick-start/st-ug-plan-schema.md +++ b/docs-2.0/nebula-studio/quick-start/st-ug-plan-schema.md @@ -14,11 +14,11 @@ This table gives all the essential elements of the schema. | Element | Name | Property name (Data type) | Description | | :--- | :--- | :--- | :--- | -| Tag | **player** | - `name` (`string`)
- `age` (`int`) | Represents the player. | -| Tag | **team** | - `name` (`string`) | Represents the team. | -| Edge type | **serve** | - `start_year` (`int`)
- `end_year` (`int`) | Represent the players behavior.
This behavior connects the player to the team, and the direction is from player to team. | -| Edge type | **follow** | - `degree`(`int`) | Represent the players behavior.
This behavior connects the player to the player, and the direction is from a player to a player. | +| Tag | **player** | - `name` (`string`)
- `age` (`int`) | Represents the player. | +| Tag | **team** | - `name` (`string`) | Represents the team. | +| Edge type | **serve** | - `start_year` (`int`)
- `end_year` (`int`) | Represent the players behavior.
This behavior connects the player to the team, and the direction is from player to team. | +| Edge type | **follow** | - `degree` (`int`) | Represent the players behavior.
This behavior connects the player to the player, and the direction is from a player to a player. | -This figure shows the relationship (**action**) between a **user** and a **course** on the MOOC platform. +This figure shows the relationship (**serve**/**follow**) between a **player** and a **team**. -![Users take actions on a MOOC platform](../figs/st-ug-006-1.png "Relationship between users and courses in the example dataset") +![The relationship between players and between players and teams](../figs/st-ug-006-1.png "Relationship between players and teams in the example dataset") diff --git a/docs-2.0/nebula-studio/st-ug-toc.md b/docs-2.0/nebula-studio/st-ug-toc.md index 874beffa37a..ee43ef8b184 100644 --- a/docs-2.0/nebula-studio/st-ug-toc.md +++ b/docs-2.0/nebula-studio/st-ug-toc.md @@ -1,28 +1,29 @@ + + [Glossary] [DOC_TO_DO] - [Limitations](about-studio/st-ug-limitations.md) - [Check updates](about-studio/st-ug-check-updates.md) - + [FAQ] [DOC_TO_DO] - Deploy and connect - [Deploy Studio](install-configure/st-ug-deploy.md) - [Connect to Nebula Graph](install-configure/st-ug-connect.md) - + [Clear connection] [DOC_TO_DO] - [Design a schema](quick-start/st-ug-plan-schema.md) - [Create a schema](quick-start/st-ug-create-schema.md) - [Import data](quick-start/st-ug-import-data.md) - - [Query graph data](quick-start/st-ug-explore.md)--> + - [Query graph data](quick-start/st-ug-explore.md) - Operation guide - + - [Operate indexes](manage-schema/st-ug-crud-index.md) - [Use Explore][DOC_TO_DO] - Use Console - [Open in Explore](use-console/st-ug-open-in-explore.md) @@ -32,3 +33,4 @@ - Troubleshooting [DOC_TO_DO] - Connection - Error messages +--> \ No newline at end of file diff --git a/docs-2.0/nebula-studio/troubleshooting/st-ug-faq.md b/docs-2.0/nebula-studio/troubleshooting/st-ug-faq.md index 8c0a5e70ae2..1f785088211 100644 --- a/docs-2.0/nebula-studio/troubleshooting/st-ug-faq.md +++ b/docs-2.0/nebula-studio/troubleshooting/st-ug-faq.md @@ -3,7 +3,11 @@ !!! faq "Why can't I use a function?" If you find that a function cannot be used, it is recommended to troubleshoot the problem according to the following steps: + 1. Confirm that Nebula Graph is the latest version. If you use Docker Compose to deploy the Nebula Graph database, it is recommended to run `docker-compose pull && docker-compose up -d` to pull the latest Docker image and start the container. + 2. Confirm that Studio is the latest version. For more information, refer to [check updates](../about-studio/st-ug-check-updates.md). + 3. Search the [nebula forum](https://discuss.nebula-graph.io/), [nebula](https://github.com/vesoft-inc/nebula) and [nebula-studio](https://github.com/vesoft-inc/nebula-studio) projects on the GitHub to confirm if there are already similar problems. - 4. If none of the above steps solve the problem, you can submit a problem on the forum. \ No newline at end of file + + 4. If none of the above steps solve the problem, you can submit a problem on the forum. diff --git a/docs-2.0/nebula-studio/use-console/st-ug-console.md b/docs-2.0/nebula-studio/use-console/st-ug-console.md index f1655ecda47..94e7a2689c2 100644 --- a/docs-2.0/nebula-studio/use-console/st-ug-console.md +++ b/docs-2.0/nebula-studio/use-console/st-ug-console.md @@ -17,4 +17,4 @@ The following table lists various functions on the console interface. | 7 | statement running status | After running the nGQL statement, the statement running status is displayed. If the statement runs successfully, the statement is displayed in green. If the statement fails, the statement is displayed in red. | | 8 | result window | Display the results of the statement execution. If the statement returns results, the results window will display the returned results in tabular form. | | 9 | export CSV file | After running the nGQL statement and return the result, click the **Export CSV File** button to export the result as a CSV file. | -| 10 | open in explore | According to the running nGQL statement, the user can click the graph exploration function key to import the returned results into graph exploration for visual display, such as [open in explore](../use-console/st-ug-open-in-explore.md) and [view subgraphs](../use-console/st-ug-visualize-subgraph.md). | +| 10 | open in explore | According to the running nGQL statement, the user can click the graph exploration function key to import the returned results into graph exploration for visual display, such as [open in explore](st-ug-open-in-explore.md) and [view subgraphs](st-ug-visualize-subgraph.md). | diff --git a/docs-2.0/nebula-studio/use-console/st-ug-open-in-explore.md b/docs-2.0/nebula-studio/use-console/st-ug-open-in-explore.md index 15ff64229e7..30668a50d6e 100644 --- a/docs-2.0/nebula-studio/use-console/st-ug-open-in-explore.md +++ b/docs-2.0/nebula-studio/use-console/st-ug-open-in-explore.md @@ -30,10 +30,6 @@ To query edge data on the **Console** page and then view the result on the **Exp nebula> GO FROM "player102" OVER serve YIELD serve._src,serve._dst; ``` - !!! note - - For more information about the `MATCH` syntax, see [MATCH in nGQL User Guide](../../3.ngql-guide/7.general-query-statements/2.match.md). - In the query result, you can see the start year and end year of the service team for the player whose playerId is `palyer102`. As shown below. ![The Result window shows the queried edge data, including the VIDs of the source vertex and the destination vertex](../figs/st-ug-037.png "Edge data") diff --git a/docs-2.0/nebula-studio/use-console/st-ug-visualize-subgraph.md b/docs-2.0/nebula-studio/use-console/st-ug-visualize-subgraph.md index fe5e421c62f..538971d09da 100644 --- a/docs-2.0/nebula-studio/use-console/st-ug-visualize-subgraph.md +++ b/docs-2.0/nebula-studio/use-console/st-ug-visualize-subgraph.md @@ -35,15 +35,15 @@ To query the paths or subgraph on the **Console** page and then view them on the Take the `FIND ALL PATH` for example, query the path information as shown in this figure. - ![The result window shows the queried paths](../figs/st-ug-045-1.png "The queried PATHs") + ![The result window shows the queried paths](../figs/st-ug-045.png "The queried PATHs") 4. Click the **View Subgraphs** button. 5. (Optional) If some data exists on the board of **Explore**, choose a method to insert data: - - **Incremental Insertion**: Click this button to add the result to the existing data on the board. + - **Incremental Insertion**: Click this button to add the result to the existing data on the board. - - **Insert After Clear**: Click this button to clear the existing data from the board and then add the data to the board. + - **Insert After Clear**: Click this button to clear the existing data from the board and then add the data to the board. When the data is inserted, you can view the visualized representation of the paths. diff --git a/docs-2.0/reuse/console-1.png b/docs-2.0/reuse/console-1.png new file mode 100644 index 00000000000..5f21b0c2e1d Binary files /dev/null and b/docs-2.0/reuse/console-1.png differ diff --git a/docs-2.0/reuse/source_connect-to-nebula-graph.md b/docs-2.0/reuse/source_connect-to-nebula-graph.md index b358615d7a6..64810bfc57d 100644 --- a/docs-2.0/reuse/source_connect-to-nebula-graph.md +++ b/docs-2.0/reuse/source_connect-to-nebula-graph.md @@ -2,7 +2,7 @@ Nebula Graph supports multiple types of clients, including a CLI client, a GUI c ## Nebula Graph clients -You can use supported [clients or console](../20.appendix/6.eco-tool-version.md) to connect to Nebula Graph. +You can use supported [clients or console](https://docs.nebula-graph.io/{{nebula.release}}/20.appendix/6.eco-tool-version/) to connect to Nebula Graph. * The machine you plan to run Nebula Console on has network access to the Nebula Graph services. ### Steps @@ -78,25 +78,118 @@ If you don't have a Nebula Graph database yet, we recommend that you try the clo You can find more details in the [Nebula Console Repository](https://github.com/vesoft-inc/nebula-console/tree/v2.0.0-ga). -## Nebula Console export mode +## Nebula Console commands -When the export mode is enabled, Nebula Console exports all the query results into a CSV file. When the export mode is disabled, the export stops. The syntax is as follows. +Nebula Console can export CSV file, DOT file, and import too. !!! note - * The following commands are case insensitive. - * The CSV file is stored in the working directory. Run the Linux command `pwd` to show the working directory. + The commands are case insensitive. -* Enable Nebula Console export mode: +### Export a CSV file + +!!! note + + - A CSV file will be saved in the working directory, i.e., what linux command `pwd` show; + + - This command only works for the next query statement. + +The command to export a csv file. + +```ngql +nebula> :CSV +``` + +### Export a DOT file + +!!! Note + + - A DOT file will be saved in the working directory, i.e., what linux command `pwd` show; + + - You can copy the contents of DOT file, and paste in [GraphvizOnline](https://dreampuf.github.io/GraphvizOnline/), to visualize the excution plan; + + - This command only works for the next query statement. + +The command to export a DOT file. + +```ngql +nebula> :dot +``` + +For example, ```ngql -nebula> :SET CSV +nebula> :dot a.dot +nebula> PROFILE FORMAT="dot" GO FROM "player100" OVER follow; ``` -* Disable Nebula Console export mode: +### Importing a testing dataset + +The testing dataset is named `nba`. Details about schema and data can be seen by commands `SHOW`. + +Using the following command to import the testing dataset, + +```ngql +nebula> :play nba +``` + +### Run a command multiple times + +Sometimes, you want to run a command multiple times. Run the following command. + +```ngql +nebula> :repeat N +``` + +For example, + +```ngql +nebula> :repeat 3 +nebula> GO FROM "player100" OVER follow; ++-------------+ +| follow._dst | ++-------------+ +| "player101" | ++-------------+ +| "player125" | ++-------------+ +Got 2 rows (time spent 2602/3214 us) + +Fri, 20 Aug 2021 06:36:05 UTC + ++-------------+ +| follow._dst | ++-------------+ +| "player101" | ++-------------+ +| "player125" | ++-------------+ +Got 2 rows (time spent 583/849 us) + +Fri, 20 Aug 2021 06:36:05 UTC + ++-------------+ +| follow._dst | ++-------------+ +| "player101" | ++-------------+ +| "player125" | ++-------------+ +Got 2 rows (time spent 496/671 us) + +Fri, 20 Aug 2021 06:36:05 UTC + +Executed 3 times, (total time spent 3681/4734 us), (average time spent 1227/1578 us) +``` + +### Sleep to wait + +Sleep N seconds. + +It is usually used when altering schema. Since schema is altered in async way, and take effects in the next heartbeat cycle. ```ngql -nebula> :UNSET CSV +nebula> :sleep N ``` ## Disconnect Nebula Console from Nebula Graph diff --git a/docs-2.0/reuse/source_install-nebula-graph-by-rpm-or-deb.md b/docs-2.0/reuse/source_install-nebula-graph-by-rpm-or-deb.md index 9d5808a62cd..783a1a8fcfa 100644 --- a/docs-2.0/reuse/source_install-nebula-graph-by-rpm-or-deb.md +++ b/docs-2.0/reuse/source_install-nebula-graph-by-rpm-or-deb.md @@ -2,7 +2,7 @@ RPM and DEB are common package formats on Linux systems. This topic shows how to ## Prerequisites -Prepare the right [resources](../1.resource-preparations.md). +Prepare the right [resources](https://docs.nebula-graph.io/{{nebula.release}}/4.deployment-and-installation/1.resource-preparations/). !!! note @@ -34,25 +34,26 @@ Prepare the right [resources](../1.resource-preparations.md). https://oss-cdn.nebula-graph.io/package//nebula-graph-.ubuntu2004.amd64.deb ``` - For example, download release package `2.0.0` for `Centos 7.5`: + For example, download release package {{ nebula.release }} for `Centos 7.5`: ```bash - wget https://oss-cdn.nebula-graph.io/package/2.0.0/nebula-graph-2.0.0.el7.x86_64.rpm - wget https://oss-cdn.nebula-graph.io/package/2.0.0/nebula-graph-2.0.0.el7.x86_64.rpm.sha256sum.txt + wget https://oss-cdn.nebula-graph.io/package/{{ nebula.release }}/nebula-graph-{{ nebula.release }}.el7.x86_64.rpm + wget https://oss-cdn.nebula-graph.io/package/{{ nebula.release }}/nebula-graph-{{ nebula.release }}.el7.x86_64.rpm.sha256sum.txt ``` - download release package `2.0.0` for `Ubuntu 1804`: + download release package `{{ nebula.release }}` for `Ubuntu 1804`: ```bash - wget https://oss-cdn.nebula-graph.io/package/2.0.0/nebula-graph-2.0.0.ubuntu1804.amd64.deb - wget https://oss-cdn.nebula-graph.io/package/2.0.0/nebula-graph-2.0.0.ubuntu1804.amd64.deb.sha256sum.txt + wget https://oss-cdn.nebula-graph.io/package/{{ nebula.release }}/nebula-graph-{{ nebula.release }}.ubuntu1804.amd64.deb + wget https://oss-cdn.nebula-graph.io/package/{{ nebula.release }}/nebula-graph-{{ nebula.release }}.ubuntu1804.amd64.deb.sha256sum.txt ``` * Download the nightly version. !!! danger - Nightly versions are usually used to test new features. Don't use it for production. + - Nightly versions are usually used to test new features. Don't use it for production. + - Nightly versions may not be build successfully every night. And the names may change from day to day. URL: @@ -89,15 +90,16 @@ Prepare the right [resources](../1.resource-preparations.md). wget https://oss-cdn.nebula-graph.io/package/v2-nightly/2021.03.28/nebula-graph-2021.03.28-nightly.ubuntu1804.amd64.deb wget https://oss-cdn.nebula-graph.io/package/v2-nightly/2021.03.28/nebula-graph-2021.03.28-nightly.ubuntu1804.amd64.deb.sha256sum.txt ``` - + ## Install Nebula Graph @@ -127,3 +130,8 @@ Prepare the right [resources](../1.resource-preparations.md). !!! note The default installation path is `/usr/local/nebula/`. + +## What's next + +- [start Nebula Graph](https://docs.nebula-graph.io/{{nebula.release}}/2.quick-start/5.start-stop-service/) +- [connect to Nebula Graph](https://docs.nebula-graph.io/{{nebula.release}}/2.quick-start/3.connect-to-nebula-graph/) diff --git a/docs-2.0/reuse/source_manage-service.md b/docs-2.0/reuse/source_manage-service.md index 15cc587f390..b49f8874f8f 100644 --- a/docs-2.0/reuse/source_manage-service.md +++ b/docs-2.0/reuse/source_manage-service.md @@ -107,7 +107,7 @@ Removing nebula-docker-compose_metad0_1 ... done Removing network nebula-docker-compose_nebula-net ``` -If you are using a development or nightly version for testing and have compatibility issues, try to run 'docker-compose down-v' to **DELETE** all data stored in Nebula Graph and import data again. +If you are using a development or nightly version for testing and have compatibility issues, try to run `docker-compose down -v` to **DELETE** all data stored in Nebula Graph and import data again. ## Check the service status @@ -164,12 +164,13 @@ nebula-docker-compose_storaged2_1 ./bin/nebula-storaged --fl ... Up (healthy 0.0.0.0:49227->9779/tcp, 9780/tcp ``` -To troubleshoot for a specific service: +Use the `CONTAINER ID` to log in the container and troubleshoot. -1. Confirm the container name in the preceding return information. -2. Run `docker ps` to find the `CONTAINER ID`. -3. Use the `CONTAINER ID` to log in the container and troubleshoot. - ```ngql - nebula-docker-compose]$ docker exec -it 2a6c56c405f5 bash - [root@2a6c56c405f5 nebula]# - ``` +```ngql +nebula-docker-compose]$ docker exec -it 2a6c56c405f5 bash +[root@2a6c56c405f5 nebula]# +``` + +## What's next + +[Connect to Nebula Graph](https://docs.nebula-graph.io/{{nebula.release}}/2.quick-start/3.connect-to-nebula-graph/) diff --git a/mkdocs.yml b/mkdocs.yml index 43e14da499c..4de4872aea1 100755 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -2,7 +2,7 @@ site_name: Nebula Graph Database Manual site_description: Documentation for Nebula Graph Database site_author: Nebula Graph -site_url: https://docs.nebula-graph.io/master/ +site_url: https://docs.nebula-graph.io/ edit_uri: 'edit/master/docs-2.0/' docs_dir: docs-2.0 @@ -111,10 +111,10 @@ nav: - Quick start: - Quick start workflow: 2.quick-start/1.quick-start-workflow.md - - Step 1: Install Nebula Graph: 2.quick-start/2.install-nebula-graph.md - - Step 2: Manage Nebula Graph Service: 2.quick-start/5.start-stop-service.md - - Step 3: Connect to Nebula Graph: 2.quick-start/3.connect-to-nebula-graph.md - - Step 4: Use nGQL(CRUD): 2.quick-start/4.nebula-graph-crud.md + - Step 1 Install Nebula Graph: 2.quick-start/2.install-nebula-graph.md + - Step 2 Manage Nebula Graph Service: 2.quick-start/5.start-stop-service.md + - Step 3 Connect to Nebula Graph: 2.quick-start/3.connect-to-nebula-graph.md + - Step 4 Use nGQL (CRUD): 2.quick-start/4.nebula-graph-crud.md - nGQL guide: - nGQL overview: @@ -243,7 +243,7 @@ nav: - DROP INDEX: 3.ngql-guide/14.native-index-statements/6.drop-native-index.md - Full-text index statements: - - Index overview: 3.ngql-guide/14.native-index-statements/README.md +# - Index overview: 3.ngql-guide/14.native-index-statements/README.md - Full-text restrictions: 4.deployment-and-installation/6.deploy-text-based-index/1.text-based-index-restrictions.md - Deploy Elasticsearch cluster: 4.deployment-and-installation/6.deploy-text-based-index/2.deploy-es.md - Deploy Raft Listener cluster: 4.deployment-and-installation/6.deploy-text-based-index/3.deploy-listener.md @@ -260,6 +260,7 @@ nav: # - CONFIG syntax: 3.ngql-guide/18.operation-and-maintenance-statements/1.configs-syntax.md - BALANCE syntax: 3.ngql-guide/18.operation-and-maintenance-statements/2.balance-syntax.md - Job statements: 3.ngql-guide/18.operation-and-maintenance-statements/4.job-statements.md + - Kill queries: 3.ngql-guide/18.operation-and-maintenance-statements/6.kill-query.md - Deployment and installation: - Resource preparations: 4.deployment-and-installation/1.resource-preparations.md @@ -303,6 +304,17 @@ nav: - Service Tuning: - Compaction: 8.service-tuning/compaction.md - Storage load balance: 8.service-tuning/load-balance.md + - Modeling suggestions: 8.service-tuning/2.graph-modeling.md + - System design suggestions: 8.service-tuning/3.system-design.md + - Execution plan: 8.service-tuning/4.plan.md + - Processing super vertices: 8.service-tuning/super-node.md + + - Client: + - Clients overview: 14.client/1.nebula-client.md + - Nebula CPP: 14.client/3.nebula-cpp-client.md + - Nebula Java: 14.client/4.nebula-java-client.md + - Nebula Python: 14.client/5.nebula-python-client.md + - Nebula Go: 14.client/6.nebula-go-client.md - Nebula Graph Studio: - Change Log: nebula-studio/about-studio/st-ug-release-note.md @@ -329,8 +341,8 @@ nav: - Operate Indexes: nebula-studio/manage-schema/st-ug-crud-index.md - Use Console: - Console: nebula-studio/use-console/st-ug-console.md - - Open in Explore: nebula-studio/use-console/st-ug-open-in-explorer.md - - View subgraphs: nebula-studio/use-console/st-ug-visualize-findpath.md + - Open in Explore: nebula-studio/use-console/st-ug-open-in-explore.md + - View subgraphs: nebula-studio/use-console/st-ug-visualize-subgraph.md - Troubleshooting: - Connecting to the database error: nebula-studio/troubleshooting/st-ug-config-server-errors.md - Cannot access to Studio: nebula-studio/troubleshooting/st-ug-connection-errors.md @@ -394,18 +406,12 @@ nav: - Nebula Bench: nebula-bench.md - - Contribution: - - How to Contribute: 15.contribution/how-to-contribute.md - - - FAQ: - - FAQ: 19.FAQ/0.FAQ.md - - Appendix: - Release Note: 20.appendix/releasenote.md - FAQ: 20.appendix/0.FAQ.md - Ecosystem tools: 20.appendix/6.eco-tool-version.md - Write tools: 20.appendix/write-tools.md - - Contribution: 15.contribution/how-to-contribute.md + - How to contribute: 15.contribution/how-to-contribute.md - 中文手册: https://docs.nebula-graph.com.cn/ @@ -435,12 +441,13 @@ plugins: - with-pdf: copyright: 2021 Vesoft Inc. cover_subtitle: master - author: Min Wu, Yao Zhou, Cooper Liang, foesa Yang, Max Zhu + author: Min Wu, Yao Zhou, Cooper Liang, Foesa Yang, Max Zhu cover: true back_cover: true cover_logo: 'https://cloud-cdn.nebula-graph.com.cn/nebula-for-pdf.png' output_path: pdf/NebulaGraph-EN.pdf show_anchors: true + render_js: true google_analytics: - UA-60523578-16