From f9b8a14dc4fa1ace12f8627ef558a868a44ddff9 Mon Sep 17 00:00:00 2001 From: leemos <502101107@qq.com> Date: Wed, 22 Jun 2022 17:15:02 +0800 Subject: [PATCH 1/8] =?UTF-8?q?=E5=A2=9E=E5=8A=A0=E5=AF=B9markdown?= =?UTF-8?q?=E8=AF=AD=E6=B3=95=E7=9A=84=E6=A0=A1=E9=AA=8C=E9=85=8D=E7=BD=AE?= =?UTF-8?q?=EF=BC=8C=E9=80=9A=E8=BF=87markdownlint=E5=AE=9E=E7=8E=B0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .github/markdown_lint_config.json | 52 +++++++++++++++++++++++++++ .github/workflows/markdown-linter.yml | 23 ++++++++++++ 2 files changed, 75 insertions(+) create mode 100644 .github/markdown_lint_config.json create mode 100644 .github/workflows/markdown-linter.yml diff --git a/.github/markdown_lint_config.json b/.github/markdown_lint_config.json new file mode 100644 index 0000000000..164fcfc1c7 --- /dev/null +++ b/.github/markdown_lint_config.json @@ -0,0 +1,52 @@ +{ + "MD001": false, + "MD002": false, + "MD003": false, + "MD004": false, + "MD005": false, + "MD006": false, + "MD007": false, + "MD008": false, + "MD009": false, + "MD010": false, + "MD011": false, + "MD012": false, + "MD013": false, + "MD014": false, + "MD015": false, + "MD016": false, + "MD017": false, + "MD018": false, + "MD019": false, + "MD020": false, + "MD021": false, + "MD022": false, + "MD023": false, + "MD024": false, + "MD025": false, + "MD026": false, + "MD027": false, + "MD028": false, + "MD029": false, + "MD030": false, + "MD031": true, + "MD032": false, + "MD033": false, + "MD034": false, + "MD035": false, + "MD036": false, + "MD037": false, + "MD038": false, + "MD039": false, + "MD040": false, + "MD041": false, + "MD042": false, + "MD043": false, + "MD044": false, + "MD045": false, + "MD046": false, + "MD047": false, + "MD048": false, + "MD049": false, + "MD050": false +} \ No newline at end of file diff --git a/.github/workflows/markdown-linter.yml b/.github/workflows/markdown-linter.yml new file mode 100644 index 0000000000..8db0b4d4a8 --- /dev/null +++ b/.github/workflows/markdown-linter.yml @@ -0,0 +1,23 @@ +name: Layotto Env Pipeline 🌊 + +on: + push: + branches: + - main + pull_request: + branches: + - main + +jobs: + check: + name: "🍀 Markdown Lint" + runs-on: ubuntu-latest + steps: + - name: Check out code + uses: actions/checkout@v2 + + - name: Check markdown grammar in the docs directory + uses: nosborn/github-action-markdown-cli@v3.1.0 + with: + files: docs/* + config_file: ".github/markdown_lint_config.json" From cd63f3297603e9123d8f16799c76088f1fd5030b Mon Sep 17 00:00:00 2001 From: leemos <502101107@qq.com> Date: Wed, 22 Jun 2022 17:17:11 +0800 Subject: [PATCH 2/8] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E4=B8=8D=E5=8C=B9?= =?UTF-8?q?=E9=85=8DMD031=E8=A7=84=E5=88=99=E7=9A=84md?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../en/api_reference/comment_spec_of_proto.md | 5 ++++ .../api_reference/how_to_generate_api_doc.md | 5 ++++ docs/en/building_blocks/actuator/actuator.md | 6 +++++ docs/en/building_blocks/file/file.md | 8 ++++++ docs/en/building_blocks/lock/reference.md | 2 ++ docs/en/building_blocks/pubsub/reference.md | 1 + docs/en/building_blocks/rpc/reference.md | 1 + .../en/building_blocks/sequencer/reference.md | 4 +++ docs/en/building_blocks/state/reference.md | 12 +++++++++ docs/en/component_specs/lock/common.md | 2 ++ docs/en/component_specs/lock/consul.md | 1 + docs/en/component_specs/lock/etcd.md | 2 ++ docs/en/component_specs/lock/redis.md | 1 + docs/en/component_specs/lock/zookeeper.md | 3 +++ docs/en/component_specs/pubsub/common.md | 2 ++ docs/en/component_specs/pubsub/redis.md | 1 + docs/en/component_specs/secret/common.md | 3 +++ docs/en/component_specs/sequencer/common.md | 2 ++ docs/en/component_specs/sequencer/etcd.md | 3 +++ docs/en/component_specs/sequencer/redis.md | 3 +++ .../en/component_specs/sequencer/zookeeper.md | 3 +++ docs/en/component_specs/state/common.md | 2 ++ docs/en/component_specs/state/redis.md | 1 + .../en/design/actuator/actuator-design-doc.md | 14 ++++++++++ .../configuration-api-with-apollo.md | 3 +++ docs/en/design/lock/lock-api-design.md | 6 +++++ ...api-and-compability-with-dapr-component.md | 3 +++ docs/en/design/rpc/rpc-design-doc.md | 1 + docs/en/development/developing-api.md | 1 + docs/en/development/developing-component.md | 1 + docs/en/development/test-quickstart.md | 13 +++++++++ docs/en/start/api_plugin/helloworld.md | 2 ++ docs/en/start/configuration/start-apollo.md | 1 + docs/en/start/configuration/start.md | 7 +++++ docs/en/start/faas/start.md | 13 +++++++++ docs/en/start/istio/start.md | 27 +++++++++++++++++++ docs/en/start/network_filter/tcpcopy.md | 2 ++ docs/en/start/pubsub/start.md | 5 ++++ docs/en/start/rpc/dubbo_json_rpc.md | 3 +++ docs/en/start/rpc/helloworld.md | 2 ++ docs/en/start/secret/start.md | 1 + docs/en/start/sequencer/start.md | 4 +++ docs/en/start/state/start.md | 2 ++ docs/en/start/stream_filter/flow_control.md | 1 + docs/en/start/trace/prometheus.md | 2 ++ docs/en/start/trace/skywalking.md | 4 +++ docs/en/start/trace/trace.md | 1 + docs/en/start/wasm/start.md | 3 +++ .../zh/api_reference/comment_spec_of_proto.md | 5 ++++ .../api_reference/how_to_generate_api_doc.md | 2 ++ docs/zh/blog/code/layotto-rpc/index.md | 21 +++++++++++++++ .../blog/code/start_process/start_process.md | 8 ++++++ docs/zh/blog/code/webassembly/index.md | 17 ++++++++++++ docs/zh/building_blocks/actuator/actuator.md | 5 ++++ docs/zh/building_blocks/file/file.md | 8 ++++++ docs/zh/building_blocks/lock/reference.md | 3 +++ docs/zh/building_blocks/pubsub/reference.md | 1 + docs/zh/building_blocks/rpc/reference.md | 1 + .../zh/building_blocks/sequencer/reference.md | 4 +++ docs/zh/building_blocks/state/reference.md | 12 +++++++++ docs/zh/component_specs/custom/common.md | 1 + docs/zh/component_specs/lock/common.md | 1 + docs/zh/component_specs/lock/etcd.md | 3 +++ docs/zh/component_specs/lock/in-memory.md | 2 ++ docs/zh/component_specs/lock/redis.md | 2 ++ docs/zh/component_specs/pubsub/common.md | 1 + docs/zh/component_specs/pubsub/redis.md | 1 + docs/zh/component_specs/secret/common.md | 3 +++ docs/zh/component_specs/sequencer/common.md | 1 + docs/zh/component_specs/sequencer/etcd.md | 3 +++ .../zh/component_specs/sequencer/in-memory.md | 2 ++ docs/zh/component_specs/sequencer/redis.md | 3 +++ docs/zh/component_specs/state/common.md | 1 + docs/zh/component_specs/state/redis.md | 1 + .../zh/design/actuator/actuator-design-doc.md | 14 ++++++++++ .../configuration-api-with-apollo.md | 3 +++ docs/zh/design/lock/lock-api-design.md | 4 +++ ...api-and-compability-with-dapr-component.md | 3 +++ ...76\350\256\241\346\226\207\346\241\243.md" | 1 + docs/zh/design/sequencer/design.md | 5 ++++ docs/zh/development/contributing-doc.md | 2 ++ docs/zh/development/developing-api.md | 1 + docs/zh/development/developing-component.md | 1 + docs/zh/development/github-workflows.md | 11 ++++++++ docs/zh/development/test-quickstart.md | 15 +++++++++++ docs/zh/operation/README.md | 1 + docs/zh/start/api_plugin/helloworld.md | 3 +++ docs/zh/start/configuration/start-apollo.md | 1 + docs/zh/start/configuration/start.md | 3 +++ docs/zh/start/faas/start.md | 12 +++++++++ docs/zh/start/file/minio.md | 2 ++ docs/zh/start/istio/start.md | 26 ++++++++++++++++++ docs/zh/start/network_filter/tcpcopy.md | 1 + docs/zh/start/pubsub/start.md | 6 +++++ docs/zh/start/rpc/dubbo_json_rpc.md | 5 ++++ docs/zh/start/rpc/helloworld.md | 2 ++ docs/zh/start/secret/start.md | 4 +++ docs/zh/start/sequencer/start.md | 3 +++ docs/zh/start/state/start.md | 2 ++ docs/zh/start/stream_filter/flow_control.md | 1 + docs/zh/start/trace/jaeger.md | 1 + docs/zh/start/trace/prometheus.md | 1 + docs/zh/start/trace/trace.md | 5 ++++ docs/zh/start/trace/zipkin.md | 1 + docs/zh/start/wasm/start.md | 2 ++ 105 files changed, 449 insertions(+) diff --git a/docs/en/api_reference/comment_spec_of_proto.md b/docs/en/api_reference/comment_spec_of_proto.md index 61d7dcccb9..42e87c1a95 100644 --- a/docs/en/api_reference/comment_spec_of_proto.md +++ b/docs/en/api_reference/comment_spec_of_proto.md @@ -3,6 +3,7 @@ Avoid adding empty lines between comments symbols `//`.If there is a blank line in the comments, the tool(protoc-gen-doc) will generate malformed documents. bad case: + ``` message BadCase{ // XXXXXXXX @@ -13,7 +14,9 @@ message BadCase{ field A } ``` + good case: + ``` message GoodCase{ // XXXXXXXX @@ -22,11 +25,13 @@ message GoodCase{ field A } ``` + Or you can use another annotation symbol directly `/* */` If you want to have some comment in your proto files, but don't want them to be part of the docs, you can simply prefix the comment with `@exclude`. Example: include only the comment for the id field + ``` /** * @exclude diff --git a/docs/en/api_reference/how_to_generate_api_doc.md b/docs/en/api_reference/how_to_generate_api_doc.md index 50856ecf1e..c5c201d8f6 100644 --- a/docs/en/api_reference/how_to_generate_api_doc.md +++ b/docs/en/api_reference/how_to_generate_api_doc.md @@ -4,9 +4,11 @@ Note: the commands below should be executed under layotto directory ## How to compile the proto files into `.pb.go` code ### **Make cmmand(recommended)** + ```bash make proto.code ``` + This command uses docker to run protoc and generate `.pb.go` code files. ### **Install protoc** @@ -20,15 +22,18 @@ This command uses docker to run protoc and generate `.pb.go` code files. cd spec/proto/runtime/v1 protoc -I. --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=require_unimplemented_servers=false,paths=source_relative *.proto ``` + ## How to generate API reference doc according to the proto files We can use [protoc-gen-doc](https://github.com/pseudomuto/protoc-gen-doc) and docker to generate api documents,the command is as follows: ### **Make command(recommended)** + ```bash make proto.doc ``` + This command uses docker to run protoc-gen-doc and generate docs. ### **Use docker to run protoc-gen-doc** diff --git a/docs/en/building_blocks/actuator/actuator.md b/docs/en/building_blocks/actuator/actuator.md index 87e406ab09..3ebb8f2918 100644 --- a/docs/en/building_blocks/actuator/actuator.md +++ b/docs/en/building_blocks/actuator/actuator.md @@ -14,6 +14,7 @@ For another example, on the Dashboard for SRE, by calling Actuator API, you can Used to check the health status of Layotto and app. The health status can be used to determine "whether restarting is needed". GET,no parameters. + ```json // http://localhost:8080/actuator/health/liveness // HTTP/1.1 200 OK @@ -30,11 +31,13 @@ GET,no parameters. } } ``` + Return field description: HTTP status code 200 means success, other (status code above 400) means failure. There are three types of status fields: + ```go var ( // INIT means it is starting @@ -57,6 +60,7 @@ A: The liveness check is used to check some unrecoverable faults, "Do we need to Readiness is used to check some temporary and recoverable states. For example, the application is warming up the cache. It needs to tell the infrastructure "Don't lead traffic to me now". After it finishes warming up, the infrastructure will reinvoke the API and get the result "I am ready to serve customers" GET,no parameters. + ```json // http://localhost:8080/actuator/health/readiness // HTTP/1.1 503 SERVICE UNAVAILABLE @@ -79,6 +83,7 @@ Note: By default, the API will only return the health status of Layotto. If you Used to view the runtime metadata of Layotto and app. GET,no parameters. + ```json // http://localhost:8080/actuator/health/liveness // HTTP/1.1 200 OK @@ -113,6 +118,7 @@ Actuator API path adopts restful style. After different Endpoints are registered ``` For example: + ``` /actuator/health/liveness ``` diff --git a/docs/en/building_blocks/file/file.md b/docs/en/building_blocks/file/file.md index 0667055f2e..f79ad46f18 100644 --- a/docs/en/building_blocks/file/file.md +++ b/docs/en/building_blocks/file/file.md @@ -86,29 +86,37 @@ message DelFileRequest { ``` ### Get File + ```protobuf // Get file with stream rpc GetFile(GetFileRequest) returns (stream GetFileResponse) {} ``` + To avoid inconsistencies between this document and the code, please refer to [the newest proto file](https://github.com/mosn/layotto/blob/main/spec/proto/runtime/v1/runtime.proto) for detailed input parameters and return values. ### Put File + ```protobuf // Put file with stream rpc PutFile(stream PutFileRequest) returns (google.protobuf.Empty) {} ``` + To avoid inconsistencies between this document and the code, please refer to [the newest proto file](https://github.com/mosn/layotto/blob/main/spec/proto/runtime/v1/runtime.proto) for detailed input parameters and return values. ### Delete File + ```protobuf // Delete specific file rpc DelFile(DelFileRequest) returns (google.protobuf.Empty){} ``` + To avoid inconsistencies between this document and the code, please refer to [the newest proto file](https://github.com/mosn/layotto/blob/main/spec/proto/runtime/v1/runtime.proto) for detailed input parameters and return values. ### List File + ```protobuf // List all files rpc ListFile(ListFileRequest) returns (ListFileResp){} ``` + To avoid inconsistencies between this document and the code, please refer to [the newest proto file](https://github.com/mosn/layotto/blob/main/spec/proto/runtime/v1/runtime.proto) for detailed input parameters and return values. \ No newline at end of file diff --git a/docs/en/building_blocks/lock/reference.md b/docs/en/building_blocks/lock/reference.md index 9c2d0e6f56..39b14a6301 100644 --- a/docs/en/building_blocks/lock/reference.md +++ b/docs/en/building_blocks/lock/reference.md @@ -12,6 +12,7 @@ Layotto client sdk encapsulates the logic of grpc calling. For an example of usi ### TryLock + ```protobuf // A non-blocking method trying to get a lock with ttl. rpc TryLock(TryLockRequest)returns (TryLockResponse) {} @@ -67,6 +68,7 @@ req.LockOwner = uuid.New().String() ``` ### Unlock + ```protobuf rpc Unlock(UnlockRequest)returns (UnlockResponse) {} ``` diff --git a/docs/en/building_blocks/pubsub/reference.md b/docs/en/building_blocks/pubsub/reference.md index 449b9b602b..f37b18ccd5 100644 --- a/docs/en/building_blocks/pubsub/reference.md +++ b/docs/en/building_blocks/pubsub/reference.md @@ -31,6 +31,7 @@ Used to publish events to the specified topic // Publishes events to the specific topic. rpc PublishEvent(PublishEventRequest) returns (google.protobuf.Empty) {} ``` + To avoid inconsistencies between the documentation and the code, please refer to [runtime.proto](https://github.com/mosn/layotto/blob/main/spec/proto/runtime/v1/runtime.proto) for detailed input parameters and return values ### Subscribe to events diff --git a/docs/en/building_blocks/rpc/reference.md b/docs/en/building_blocks/rpc/reference.md index 5b5a73b562..c91a9ce3ff 100644 --- a/docs/en/building_blocks/rpc/reference.md +++ b/docs/en/building_blocks/rpc/reference.md @@ -48,6 +48,7 @@ Quick start Document for this demo: [Dubbo JSON RPC Example](https://mosn.io/lay The server is [dubbo-go-samples](https://github.com/apache/dubbo-go-samples), the config file [example.json](https://github.com/mosn/layotto/blob/77e0a4b2af063ff9e365a933c4735655898de369/demo/rpc/dubbo_json_rpc/example.json) use the callback function [dubbo_json_rpc](https://github.com/mosn/layotto/blob/8db7a2297bd05d1b0c4452cc980d8f6412a82f3a/components/rpc/callback/dubbo_json_rpc.go) to generate a request header. And then,the [client](https://github.com/mosn/layotto/blob/b66b998f50901f8bd1cce035478579c1b47f986d/demo/rpc/dubbo_json_rpc/dubbo_json_client/client.go) do a RPC calls though grpc interface **InvokeService**. + ```golang resp, err := cli.InvokeService( ctx, diff --git a/docs/en/building_blocks/sequencer/reference.md b/docs/en/building_blocks/sequencer/reference.md index fd9bee54bc..1a1d342022 100644 --- a/docs/en/building_blocks/sequencer/reference.md +++ b/docs/en/building_blocks/sequencer/reference.md @@ -24,9 +24,11 @@ Q: What scenarios will I need an increasing trend? 1. For b+ tree type db (such as MYSQL), the primary key with increasing trend can make better use of cache (cache friendly). 2. When you want to use the id to sort and query the latest data. For example, the requirement is to check the latest 100 messages, and the developer does not want to add a timestamp field and build an index on it. If the id itself is incremented, then the latest 100 messages can be sorted by id directly: + ``` select * from message order by message-id limit 100 ``` + This is very common when using nosql, because it is difficult for nosql to index on another timestamp field - Global monotonically increasing @@ -40,6 +42,7 @@ Layotto client sdk encapsulates the logic of grpc calling. For an example of usi The components need to be configured before use. For detailed configuration options, see [Sequencer component document](en/component_specs/sequencer/common.md) ### Get next unique id + ```protobuf // Sequencer API // Get next unique id with some auto-increment guarantee @@ -76,4 +79,5 @@ message GetNextIdResponse{ int64 next_id = 1; } ``` + To avoid inconsistencies between the documentation and the code, please refer to [proto file](https://github.com/mosn/layotto/blob/main/spec/proto/runtime/v1/runtime.proto) for detailed input parameters and return values diff --git a/docs/en/building_blocks/state/reference.md b/docs/en/building_blocks/state/reference.md index 1dff89c0ac..e92c9ac80c 100644 --- a/docs/en/building_blocks/state/reference.md +++ b/docs/en/building_blocks/state/reference.md @@ -35,6 +35,7 @@ Used to save a batch of status data ``` #### parameters + ```protobuf // SaveStateRequest is the message to save multiple states into state store. @@ -106,41 +107,52 @@ message StateOptions { StateConsistency consistency = 2; } ``` + #### return `google.protobuf.Empty` ### Get State + ```protobuf // Gets the state for a specific key. rpc GetState(GetStateRequest) returns (GetStateResponse) {} ``` + To avoid inconsistencies between this document and the code, please refer to [the newest proto file](https://github.com/mosn/layotto/blob/main/spec/proto/runtime/v1/runtime.proto) for detailed input parameters and return values. ### Get bulk state + ```protobuf // Gets a bulk of state items for a list of keys rpc GetBulkState(GetBulkStateRequest) returns (GetBulkStateResponse) {} ``` + To avoid inconsistencies between this document and the code, please refer to [the newest proto file](https://github.com/mosn/layotto/blob/main/spec/proto/runtime/v1/runtime.proto) for detailed input parameters and return values. ### Delete state + ```protobuf // Deletes the state for a specific key. rpc DeleteState(DeleteStateRequest) returns (google.protobuf.Empty) {} ``` + To avoid inconsistencies between this document and the code, please refer to [the newest proto file](https://github.com/mosn/layotto/blob/main/spec/proto/runtime/v1/runtime.proto) for detailed input parameters and return values. ### Delete bulk state + ```protobuf // Deletes a bulk of state items for a list of keys rpc DeleteBulkState(DeleteBulkStateRequest) returns (google.protobuf.Empty) {} ``` + To avoid inconsistencies between this document and the code, please refer to [the newest proto file](https://github.com/mosn/layotto/blob/main/spec/proto/runtime/v1/runtime.proto) for detailed input parameters and return values. ### State transactions + ```protobuf // Executes transactions for a specified store rpc ExecuteStateTransaction(ExecuteStateTransactionRequest) returns (google.protobuf.Empty) {} ``` + To avoid inconsistencies between this document and the code, please refer to [the newest proto file](https://github.com/mosn/layotto/blob/main/spec/proto/runtime/v1/runtime.proto) for detailed input parameters and return values. diff --git a/docs/en/component_specs/lock/common.md b/docs/en/component_specs/lock/common.md index 8b83e9e6af..5445699253 100644 --- a/docs/en/component_specs/lock/common.md +++ b/docs/en/component_specs/lock/common.md @@ -2,6 +2,7 @@ **Configuration file structure** The json configuration file has the following structure: + ```json "lock": { "": { @@ -20,6 +21,7 @@ The json configuration file has the following structure: } } ``` + You can configure the key/value configuration items that the component cares about in the metadata. For example, [redis component configuration](https://github.com/mosn/layotto/blob/main/configs/config_redis.json) is as follows: ```json diff --git a/docs/en/component_specs/lock/consul.md b/docs/en/component_specs/lock/consul.md index 4a1954c4f5..9953c8bdef 100644 --- a/docs/en/component_specs/lock/consul.md +++ b/docs/en/component_specs/lock/consul.md @@ -16,6 +16,7 @@ Example:configs/config_consul.json If you want to run the Consul demo, you need to start a Consul server with Docker first. command: + ```shell docker run --name consul -d -p 8500:8500 consul ``` \ No newline at end of file diff --git a/docs/en/component_specs/lock/etcd.md b/docs/en/component_specs/lock/etcd.md index 2b847f9879..e7cba08176 100644 --- a/docs/en/component_specs/lock/etcd.md +++ b/docs/en/component_specs/lock/etcd.md @@ -22,6 +22,7 @@ Steps: download etcd from `https://github.com/etcd-io/etcd/releases` (You can also use docker.) start: + ```shell ./etcd ``` @@ -38,6 +39,7 @@ go build >If build reports an error, it can be executed in the root directory of the project `go mod vendor` Execute after the compilation is successful: + ```shell @background ./layotto start -c ../../configs/runtime_config.json ``` diff --git a/docs/en/component_specs/lock/redis.md b/docs/en/component_specs/lock/redis.md index 89dd5f88fb..efac815ebd 100644 --- a/docs/en/component_specs/lock/redis.md +++ b/docs/en/component_specs/lock/redis.md @@ -12,6 +12,7 @@ Example: configs/config_redis.json If you want to run the redis demo, you need to start a Redis server with Docker first. command: + ```shell docker pull redis:latest docker run -itd --name redis-test -p 6380:6379 redis diff --git a/docs/en/component_specs/lock/zookeeper.md b/docs/en/component_specs/lock/zookeeper.md index 24f546d9e8..1034a04fc2 100644 --- a/docs/en/component_specs/lock/zookeeper.md +++ b/docs/en/component_specs/lock/zookeeper.md @@ -14,6 +14,7 @@ Example: configs/config_zookeeper.json If you want to run the zookeeper demo, you need to start a Zookeeper server with Docker first. command: + ```shell docker pull zookeeper docker run --privileged=true -d --name zookeeper --publish 2181:2181 -d zookeeper:latest @@ -25,9 +26,11 @@ docker run --privileged=true -d --name zookeeper --publish 2181:2181 -d zookeep cd ${project_path}/cmd/layotto go build ```` + >If build reports an error, it can be executed in the root directory of the project `go mod vendor` Execute after the compilation is successful: + ````shell ./layotto start -c ../../configs/config_zookeeper.json ```` diff --git a/docs/en/component_specs/pubsub/common.md b/docs/en/component_specs/pubsub/common.md index 058e265f61..dfc3be9cc2 100644 --- a/docs/en/component_specs/pubsub/common.md +++ b/docs/en/component_specs/pubsub/common.md @@ -2,6 +2,7 @@ **Configuration file structure** The json configuration file has the following structure: + ```json "pub_subs": { "": { @@ -20,6 +21,7 @@ The json configuration file has the following structure: } } ``` + You can configure the key/value configuration items that the component cares about in the metadata. For example, [redis component configuration](https://github.com/mosn/layotto/blob/main/configs/config_redis.json) is as follows: ```json diff --git a/docs/en/component_specs/pubsub/redis.md b/docs/en/component_specs/pubsub/redis.md index 89dd5f88fb..efac815ebd 100644 --- a/docs/en/component_specs/pubsub/redis.md +++ b/docs/en/component_specs/pubsub/redis.md @@ -12,6 +12,7 @@ Example: configs/config_redis.json If you want to run the redis demo, you need to start a Redis server with Docker first. command: + ```shell docker pull redis:latest docker run -itd --name redis-test -p 6380:6379 redis diff --git a/docs/en/component_specs/secret/common.md b/docs/en/component_specs/secret/common.md index e6c5600232..9c76e64c64 100644 --- a/docs/en/component_specs/secret/common.md +++ b/docs/en/component_specs/secret/common.md @@ -3,6 +3,7 @@ This component can access secrets from local files, environment variables, k8s, **Configuration file structure** The json configuration file has the following structure: + ```json "secret_store": { "": { @@ -21,7 +22,9 @@ The json configuration file has the following structure: } } ``` + Configuration examples of local file keys, local environment variables, and k8s keys: + ```json "secret_store": { "secret_demo": { diff --git a/docs/en/component_specs/sequencer/common.md b/docs/en/component_specs/sequencer/common.md index 35b4426558..a7880ace8f 100644 --- a/docs/en/component_specs/sequencer/common.md +++ b/docs/en/component_specs/sequencer/common.md @@ -2,6 +2,7 @@ **Configuration file structure** The json configuration file has the following structure: + ```json "sequencer": { "": { @@ -28,6 +29,7 @@ The json configuration file has the following structure: } } ``` + You can configure the key/value configuration items that the component cares about in the metadata. For example, [Etcd component configuration](https://github.com/mosn/layotto/blob/main/configs/runtime_config.json) is as follows: diff --git a/docs/en/component_specs/sequencer/etcd.md b/docs/en/component_specs/sequencer/etcd.md index 5d230a4f12..bec82c81a2 100644 --- a/docs/en/component_specs/sequencer/etcd.md +++ b/docs/en/component_specs/sequencer/etcd.md @@ -22,6 +22,7 @@ Steps: download etcd from `https://github.com/etcd-io/etcd/releases` (You can also use docker.) start: + ````shell ./etcd ```` @@ -34,9 +35,11 @@ default listen address `localhost:2379` cd ${project_path}/cmd/layotto go build ```` + >If build reports an error, it can be executed in the root directory of the project `go mod vendor` Execute after the compilation is successful: + ````shell ./layotto start -c ../../configs/runtime_config.json ```` diff --git a/docs/en/component_specs/sequencer/redis.md b/docs/en/component_specs/sequencer/redis.md index 3c1facd86c..ff7668ea0e 100644 --- a/docs/en/component_specs/sequencer/redis.md +++ b/docs/en/component_specs/sequencer/redis.md @@ -20,6 +20,7 @@ In order to avoid data loss and duplicate IDs, you need to use stand-alone redis If you want to run the redis demo, you need to start a Redis server with Docker first. command: + ```shell docker pull redis:latest docker run -itd --name redis-test -p 6379:6379 redis @@ -31,9 +32,11 @@ docker run -itd --name redis-test -p 6379:6379 redis cd ${project_path}/cmd/layotto go build ```` + >If build reports an error, it can be executed in the root directory of the project `go mod vendor` Execute after the compilation is successful: + ````shell ./layotto start -c ../../configs/config_redis.json ```` diff --git a/docs/en/component_specs/sequencer/zookeeper.md b/docs/en/component_specs/sequencer/zookeeper.md index e99b5fd73d..e74f5722ca 100644 --- a/docs/en/component_specs/sequencer/zookeeper.md +++ b/docs/en/component_specs/sequencer/zookeeper.md @@ -19,6 +19,7 @@ It is recommended that you monitor zookeeper carefully and prevent the overflow. If you want to run the zookeeper demo, you need to start a Zookeeper server with Docker first. command: + ```shell docker pull zookeeper docker run --privileged=true -d --name zookeeper --publish 2181:2181 -d zookeeper:latest @@ -30,9 +31,11 @@ docker run --privileged=true -d --name zookeeper --publish 2181:2181 -d zookeep cd ${project_path}/cmd/layotto go build ```` + >If build reports an error, it can be executed in the root directory of the project `go mod vendor` Execute after the compilation is successful: + ````shell ./layotto start -c ../../configs/config_zookeeper.json ```` diff --git a/docs/en/component_specs/state/common.md b/docs/en/component_specs/state/common.md index de5fb40c59..6ea7e7efda 100644 --- a/docs/en/component_specs/state/common.md +++ b/docs/en/component_specs/state/common.md @@ -2,6 +2,7 @@ **Configuration file structure** The json configuration file has the following structure: + ```json "state": { "": { @@ -20,6 +21,7 @@ The json configuration file has the following structure: } } ``` + You can configure the key/value configuration items that the component cares about in the metadata. For example, [redis component configuration](https://github.com/mosn/layotto/blob/main/configs/config_redis.json) is as follows: ```json diff --git a/docs/en/component_specs/state/redis.md b/docs/en/component_specs/state/redis.md index 89dd5f88fb..efac815ebd 100644 --- a/docs/en/component_specs/state/redis.md +++ b/docs/en/component_specs/state/redis.md @@ -12,6 +12,7 @@ Example: configs/config_redis.json If you want to run the redis demo, you need to start a Redis server with Docker first. command: + ```shell docker pull redis:latest docker run -itd --name redis-test -p 6380:6379 redis diff --git a/docs/en/design/actuator/actuator-design-doc.md b/docs/en/design/actuator/actuator-design-doc.md index d76f4fa89c..819a2e3f80 100644 --- a/docs/en/design/actuator/actuator-design-doc.md +++ b/docs/en/design/actuator/actuator-design-doc.md @@ -75,6 +75,7 @@ The path adopts restful style. After different Endpoints are registered in Actua ``` For example: + ``` /actuator/health/liveness ``` @@ -97,6 +98,7 @@ The paths registered by default are: ### 2.2.2. Health Endpoint #### /actuator/health/liveness GET + ```json // http://localhost:8080/actuator/health/liveness // HTTP/1.1 200 OK @@ -113,11 +115,13 @@ GET } } ``` + Return field description: HTTP status code 200 means success, other (status code above 400) means failure. There are three types of status fields: + ```go var ( // INIT means it is starting @@ -131,6 +135,7 @@ var ( #### /actuator/health/readiness GET + ```json // http://localhost:8080/actuator/health/readiness // HTTP/1.1 503 SERVICE UNAVAILABLE @@ -144,11 +149,13 @@ GET } } ``` + ### 2.2.3. Info Endpoint #### /actuator/info GET + ```json // http://localhost:8080/actuator/health/liveness // HTTP/1.1 200 OK @@ -194,6 +201,7 @@ explanation: The request arrives at the mosn, enters Layotto through the stream filter, and calls the Actuator. The http protocol implementation class (struct) of the stream filter layer is DispatchFilter, which is responsible for dispatching requests and calling Actuator according to the http path: + ```go type DispatchFilter struct { @@ -209,6 +217,7 @@ func (dis *DispatchFilter) OnDestroy() {} func (dis *DispatchFilter) OnReceive(ctx context.Context, headers api.HeaderMap, buf buffer.IoBuffer, trailers api.HeaderMap) api.StreamFilterStatus { } ``` + The protocol layer is decoupled from Actuator. If the API of other protocols is needed in the future, the stream filter of this protocol can be implemented. ### 2.4.2. Requests will be assigned to Endpoints inside Actuator @@ -216,6 +225,7 @@ The protocol layer is decoupled from Actuator. If the API of other protocols is Drawing lessons from the design of spring boot: Actuator abstracts the concept of Endpoint, which supports on-demand expansion and injection of Endpoint. Built-in Endpoints are health Endpoint and info Endpoint. + ```go type Actuator struct { endpointRegistry map[string]Endpoint @@ -231,6 +241,7 @@ func (act *Actuator) AddEndpoint(name string, ep Endpoint) { } ``` + When a new request arrives at Actuator,it will be assigned to the corresponding Endpoint according to the path. For example, /actuator/health/readiness will be assigned to health.Endpoint @@ -238,16 +249,19 @@ For example, /actuator/health/readiness will be assigned to health.Endpoint ### 2.4.3. health.Endpoint collect information from all the implementation of health.Indicator The components that need to report health check information should implement the Indicator interface and inject it into health.Endpoint: + ```go type Indicator interface { Report() Health } ``` + When a new request arrives,health.Endpoint will collect information from all the implementation of health.Indicator ### 2.4.4. info.Endpoint collect information from all the implementation of info.Contributor Components that need to report runtime information should implement the Contributor interface and inject it into info.Endpoint: + ```go type Contributor interface { GetInfo() (info interface{}, err error) diff --git a/docs/en/design/configuration/configuration-api-with-apollo.md b/docs/en/design/configuration/configuration-api-with-apollo.md index 9300014a5f..b9d8301083 100644 --- a/docs/en/design/configuration/configuration-api-with-apollo.md +++ b/docs/en/design/configuration/configuration-api-with-apollo.md @@ -20,6 +20,7 @@ The actual key stored in apollo will be 'key@$label' and the value will be raw v Tags will be stored in a special namespace 'sidecar_config_tags', with key='group@$key@$label' and value= + ```json { "tag1": "tag1value", @@ -49,6 +50,7 @@ A: Legacy systems using apollo can't migrate to our sidecar if we design like th 2. Save/delete APIs might be incompatible.The sidecar use fixed 'cluster' field configurated in config.json and fixed 'env' field in code,which means users can't pass 'cluster' and 'env' field as a parameter for save/delete API when they want to change some configuration items with other appid. ### config.json for sidecar + ```json { "config_store": { @@ -77,6 +79,7 @@ There isn't any official apollo sdk for Go,so I choose the repo with most stars Some problems with the sdk: 1. Users must declare all namespaces in AppConfig before connecting to the server and constructing a client,like: + ```go c := &config.AppConfig{ AppID: "testApplication_yang", diff --git a/docs/en/design/lock/lock-api-design.md b/docs/en/design/lock/lock-api-design.md index e0fd56876b..4b08c2d0fa 100644 --- a/docs/en/design/lock/lock-api-design.md +++ b/docs/en/design/lock/lock-api-design.md @@ -34,6 +34,7 @@ The most basic locking and unlocking API. TryLock is non-blocking, it return directly if the lock is not obtained. proto: + ```protobuf // Distributed Lock API // A non-blocking method trying to get a lock with ttl. @@ -98,6 +99,7 @@ message UnlockResponse { } ``` + **Q: What is the time unit of the expire field?** A: Seconds. @@ -184,6 +186,7 @@ An option is to ensure that the heartbeat interval low enough, such as 1 time pe 2. How to ensure reliable failure detection? For example, the following java code `unlock()` method may fail: + ```java try{ @@ -191,6 +194,7 @@ try{ lock.unlock() } ``` + If it is a lock in JVM, unlock can guarantee success (unless the entire JVM fails), but unlock may fail if it is called via the network. How to ensure that the heartbeat is interrupted after the call fails? Here shows the corner case: @@ -203,6 +207,7 @@ Here shows the corner case: Solving this case requires the app to report some fine-grained status with the heartbeat. We can define a http callback SPI, which is polled and detected by the sidecar, and the data structure returned by the callback is as follows: + ```json { "status": "UP", @@ -218,6 +223,7 @@ We can define a http callback SPI, which is polled and detected by the sidecar, } } ``` + The application has to handle status collection, reporting, cleaning up after the report is successful, and limiting the map capacity (for example, what if the map is too large when report fails too much times?), which requires the app to implement some complex logic, and it must be put in the SDK. 3. This implementation is actually the same as Solution A. It opens a separate connection for status management and failure detection, and user reports the status through this public connection when necessary. diff --git a/docs/en/design/pubsub/pubsub-api-and-compability-with-dapr-component.md b/docs/en/design/pubsub/pubsub-api-and-compability-with-dapr-component.md index 9d06f552d7..67e71b5d4a 100644 --- a/docs/en/design/pubsub/pubsub-api-and-compability-with-dapr-component.md +++ b/docs/en/design/pubsub/pubsub-api-and-compability-with-dapr-component.md @@ -50,6 +50,7 @@ In the future, when everyone really sits together to reach a consensus and build ### 2.2.2. Between APP and Layotto Use the same grpc API as Dapr + ```protobuf service AppCallback { // Lists all topics subscribed by this app. @@ -60,6 +61,7 @@ service AppCallback { } ``` + ```protobuf service Dapr { // Publishes events to the specific topic. @@ -67,6 +69,7 @@ service Dapr { } ``` + ### 2.2.3. Between Layotto and Component Use the same as Dapr; PublishRequest.Data and NewMessage.Data put json data conforming to CloudEvent 1.0 specification (can be deserialized and put into map[string]interface{}) diff --git a/docs/en/design/rpc/rpc-design-doc.md b/docs/en/design/rpc/rpc-design-doc.md index c8ebf3a016..4ca4ea4608 100644 --- a/docs/en/design/rpc/rpc-design-doc.md +++ b/docs/en/design/rpc/rpc-design-doc.md @@ -34,6 +34,7 @@ Mosn's xprotocol support popular protocols such as dubbo、thrift... In layotto, we design a convenient way to support xprotocols. The only task need to be finished is convert RPC request and response to xprotocol frames. #### config params + ```bigquery { "mosn": { diff --git a/docs/en/development/developing-api.md b/docs/en/development/developing-api.md index 5bf0241f59..b965e9e7b4 100644 --- a/docs/en/development/developing-api.md +++ b/docs/en/development/developing-api.md @@ -107,6 +107,7 @@ Need to have : List out which interfaces are there. On the one hand, the users of the province go to the proto and don’t know which APIs are related. On the other hand, it can avoid the disgust of users due to the lack of interface documentation - About the interface`s input and output parameters: use proto comments as interface documentation Considering that the interface document needs to be written in Both Chinese and English and may be inconsistent with the code after a long time, it is recommended not to write the interface document but to write the proTO comment in sufficient detail as the interface document. Such as: + ```protobuf // GetStateRequest is the message to get key-value states from specific state store. message GetStateRequest { diff --git a/docs/en/development/developing-component.md b/docs/en/development/developing-component.md index dba84e7e35..e484bfe9cc 100644 --- a/docs/en/development/developing-component.md +++ b/docs/en/development/developing-component.md @@ -111,6 +111,7 @@ For example, when implementing distributed locks using ZooKeeper, you need some ![img_7.png](../../img/development/component/img_7.png) Note: If there are errors in the demo code that shouldn't be there , you can panic directly. Later, we will directly use demo to run the integration test. If panic occurs, it means that the integration test fails. For example the demo/lock/redis/client.go: + ```go //.... cli, err := client.NewClient() diff --git a/docs/en/development/test-quickstart.md b/docs/en/development/test-quickstart.md index 6a7b7d7abe..00b9c0f0d4 100644 --- a/docs/en/development/test-quickstart.md +++ b/docs/en/development/test-quickstart.md @@ -15,12 +15,14 @@ Let's use the tool to test the documentation automatically! ## Principle Use the tool to execute all shell scripts in a markdown file sequentially, i.e. all scripts wrapped in: + ~~~markdown ```shell ``` ~~~ Note: The script wrapped in `bash` blocks will NOT be run. + ~~~markdown ```bash ``` @@ -37,6 +39,7 @@ Similarly, if the documentation will start containers like redis with Docker, yo ## step 3. Running documentation As an example, run the Quickstart documentation for the state API: + ```shell mdx docs/en/start/state/start.md ``` @@ -49,6 +52,7 @@ If the document runs with an error, it means that the case needs to be optimized This is also the idea of "test-driven development", optimize the documentation to make it "testable", right? For example, I ran the Quickstart documentation for the state API and found an error: + ```bash SaveState succeeded.key:key1 , value: hello world GetState succeeded.[key:key1 etag:1]: hello world @@ -68,6 +72,7 @@ main.main() /Users/qunli/projects/layotto/demo/state/redis/client.go:57 +0x2f4 exit status 2 ``` + After some troubleshooting, we found that the demo client did not pass the `etag` field when deleting the specified key, which caused the demo to run abnormally. See, through the automated testing documentation, we found a Quickstart bug :) @@ -85,6 +90,7 @@ if err := cli.SaveBulkState(ctx, store, item, &item2); err != nil { panic(err) } ``` + In addition to judging errors, the demo should also verify the test results, and panic directly if it does not meet expectations. This is equivalent to UT, after calling a method, the result of the call needs to be verified. The advantage of this is that once the Quickstart does not meet expectations, the demo will exit abnormally, allowing automated tools to find "the test failed! Find someone to fix it!" @@ -104,6 +110,7 @@ So even if the container is not deleted in the document, it will not affect the #### What should I do if I don't want a certain command to be executed? `mdx` by default will only execute shell code blocks, i.e. code blocks written like this: + ```shell ```shell ``` @@ -118,6 +125,7 @@ If you don't want a block of code to be executed, you can change the shell to so Again, take docs/en/start/state/start.md as an example. One of the scripts will run Layotto, but if you run it it will hang, preventing the test tool from continuing to run the next command: + ```bash ./layotto start -c ../../configs/config_redis.json ``` @@ -192,6 +200,7 @@ Add a hidden script to switch directories. For example write: cd demo/state/redis/ go run . ``` + ### Other markdown annotations The mdx tool provides many "markdown annotations" to help you write "runnable markdown files". If you are interested, you can check the [mdx documentation](https://github.com/seeflood/mdx#usage) @@ -199,12 +208,14 @@ The mdx tool provides many "markdown annotations" to help you write "runnable ma ### Fix the error and see the effect! After a fix, I ran the document again: + ```shell mdx docs/en/start/state/start.md ``` The document does not report an error, it can run normally and exit: + ```bash admindeMacBook-Pro-2:layotto qunli$ mdx docs/en/start/state/start.md latest: Pulling from library/redis @@ -251,6 +262,7 @@ The modification method is: 3. After making the above changes, it is time to test the new CI. Run in the project root directory + ```shell make style.quickstart ``` @@ -266,6 +278,7 @@ These documents will be tested: whereas if you run: + ```shell make style.quickstart QUICKSTART_VERSION=1.17 ``` diff --git a/docs/en/start/api_plugin/helloworld.md b/docs/en/start/api_plugin/helloworld.md index 84284a5da8..bf4c1c5e22 100644 --- a/docs/en/start/api_plugin/helloworld.md +++ b/docs/en/start/api_plugin/helloworld.md @@ -39,12 +39,14 @@ Check the code in [`main.go`](https://github.com/mosn/layotto/blob/d74ff0e8940e0 ``` ## step 2. invoke the helloworld API + ```shell # change directory cd ${project_path}/cmd/layotto_multiple_api # run demo client go run client/main.go ``` + The result will be: ```bash diff --git a/docs/en/start/configuration/start-apollo.md b/docs/en/start/configuration/start-apollo.md index 2c99a4cac2..0f1dc07fd6 100644 --- a/docs/en/start/configuration/start-apollo.md +++ b/docs/en/start/configuration/start-apollo.md @@ -48,6 +48,7 @@ The client demo calls Layotto to add, delete, modify, and query configuration ```shell cd ${project_path}/demo/configuration/common ``` + ```shell @if.not.exist client go build -o client ``` diff --git a/docs/en/start/configuration/start.md b/docs/en/start/configuration/start.md index 8c34ffe3f3..4845d9364b 100644 --- a/docs/en/start/configuration/start.md +++ b/docs/en/start/configuration/start.md @@ -17,6 +17,7 @@ cd docker/layotto-etcd # Start etcd and layotto with docker-compose docker-compose up -d ``` + #### **Compile locally (not for Windows)** You can run etcd with Docker, then compile and run Layotto locally. @@ -28,6 +29,7 @@ If you want to run this demo, you need to start a etcd server first. You can download etcd from `https://github.com/etcd-io/etcd/releases` (You can also use docker.) start it: + ```shell @background ./etcd ``` @@ -46,9 +48,11 @@ go build ``` Run it: + ```shell @background ./layotto start -c ../../configs/runtime_config.json ``` + ### step 2. Start client APP @@ -75,6 +79,7 @@ delete keys success write start receive subscribe resp store_name:"config_demo" app_id:"apollo" items: tags: > ``` + ### step 3. Stop containers and release resources #### **Docker Compose** @@ -87,9 +92,11 @@ docker-compose stop #### **Destroy the etcd container** If you started etcd with Docker, you can destroy the etcd container as follows: + ```shell docker rm -f etcd ``` + ## Next step diff --git a/docs/en/start/faas/start.md b/docs/en/start/faas/start.md index 4e4a22e325..b9fb09ab70 100644 --- a/docs/en/start/faas/start.md +++ b/docs/en/start/faas/start.md @@ -33,14 +33,17 @@ The example only needs a Redis server that can be used normally. As for where it > brew install redis > redis-server /usr/local/etc/redis.conf ``` + **Note: If you want external services to connect to redis, you need to modify the protected-mode in redis.conf to no,At the same time, add bind * -::* to let it monitor all interfaces.** #### B、Start minikube in virtualbox + containerd mode + ``` > minikube start --driver=virtualbox --container-runtime=containerd ``` #### C、Compile & install Layotto + ``` > git clone https://github.com/mosn/layotto.git > cd layotto @@ -51,6 +54,7 @@ The example only needs a Redis server that can be used normally. As for where it > sudo chmod +x layotto > sudo mv layotto /usr/bin/ ``` + **Note1: You need to modify the redis address as needed, the default address is: localhost:6379** **Note2: Need to modify the path of the wasm file in `./demo/faas/config.json` to `/home/docker/function_1.wasm` and `/home/docker/function_2.wasm`** @@ -70,18 +74,22 @@ The example only needs a Redis server that can be used normally. As for where it #### E、Modify & restart containerd Add laytto runtime configuration. + ``` > minikube ssh > sudo vi /etc/containerd/config.toml [plugins.cri.containerd.runtimes.layotto] runtime_type = "io.containerd.layotto.v2" ``` + Restart containerd for the latest configuration to take effect + ``` sudo systemctl restart containerd ``` #### F、Install wasmer + ``` > curl -L -O https://github.com/wasmerio/wasmer/releases/download/2.0.0/wasmer-linux-amd64.tar.gz > tar zxvf wasmer-linux-amd64.tar.gz @@ -91,12 +99,14 @@ sudo systemctl restart containerd ### 4. Quickstart #### A、Start Layotto + ``` > minikube ssh > layotto start -c /home/docker/config.json ``` #### B、Create Layotto runtime + ``` > kubectl apply -f ./demo/faas/layotto-runtimeclass.yaml runtimeclass.node.k8s.io/layotto created @@ -104,6 +114,7 @@ runtimeclass.node.k8s.io/layotto created #### C、Create Function This operation will automatically inject function_1.wasm and function_2.wasm into the Virtualbox virtual machine. + ``` > kubectl apply -f ./demo/faas/function-1.yaml pod/function-1 created @@ -113,6 +124,7 @@ pod/function-2 created ``` #### D、Write inventory to Redis + ``` > redis-cli 127.0.0.1:6379> set book1 100 @@ -120,6 +132,7 @@ OK ``` #### E、Send request + ``` > minikube ip 192.168.99.117 diff --git a/docs/en/start/istio/start.md b/docs/en/start/istio/start.md index a8e83cc360..fc8486e152 100644 --- a/docs/en/start/istio/start.md +++ b/docs/en/start/istio/start.md @@ -20,6 +20,7 @@ before starting the demo,you must install some components as follows: 3. [Istio-1.5.x](https://github.com/istio/istio/releases/tag/1.5.2) Currently, mosn only supports `istio 1.5.X` (the support for `istio 1.10.X` is already in CR), so you need to download the corresponding version of `istio`. After decompressing, configure it as follows to facilitate subsequent operations. + ``` export PATH=$PATH:${your istio directory}/bin ``` @@ -28,63 +29,84 @@ before starting the demo,you must install some components as follows: 1. Run Docker Desktop 2. Run the following command to start `minikube` + ``` minikube start ``` + 3. Run the following command to start the services in the demo (all the dependent images have been uploaded to the docker hub) + ``` kubectl apply -f layotto-injected.yaml ``` + The contents of `layotto-injected.yaml` is [here](https://github.com/mosn/layotto/blob/istio-1.5.x/demo/istio/layotto-injected.yaml) ,just copy it。 4. Run the command `kubectl get pod` to check the status (it needs to download the dependent images during the first startup,so please wait patiently) + ``` NAME READY STATUS RESTARTS AGE client-665c5cc4f-tfxrk 2/2 Running 0 49m server-v1-685966b499-8hnqp 2/2 Running 0 49m server-v2-6cfff5dbb5-4hlgb 2/2 Running 0 49m ``` + When you see something similar to the above, it indicates that the startup is successful. We have deployed a client and a server. The server side is divided into V1 and V2 versions. 5. If you want to access the services in the `istio` cluster from the outside, you must configure the `istio ingress gateway` service, which will increase the cost of getting started. Therefore, the proxy method is used here to simplify this demo. Run the following command + ``` kubectl port-forward svc/client 9080:9080 ``` + Then you can directly access the following links, or you can directly access them in the browser. + ``` curl localhost:9080/grpc ``` + When you see the following response, the example starts successfully. + ``` GET /hello hello, i am layotto v1 ``` + ## 4. Using istio to dynamically change routing policy #### A. route according to version 1. Run the following command to create destination rules + ``` kubectl apply -f destination-rule-all.yaml ``` + The contents of `destination-rule-all.yaml` is [here](https://github.com/mosn/layotto/blob/istio-1.5.x/demo/istio/layotto-destination-rule-all.yaml) 2. Run the following command to specify that only the V1 service is accessed + ``` kubectl apply -f layotto-virtual-service-all-v1.yaml ``` + The contents of `layotto-virtual-service-all-v1.yaml` is [here](https://github.com/mosn/layotto/blob/istio-1.5.x/demo/istio/layotto-virtual-service-all-v1.yaml) 3. After the above command is executed, subsequent requests will only get the return result of v1, as follows: + ``` GET /hello hello, i am layotto v1 ``` + #### B. route according to a specific header 1. Run the following command to modify the routing rules to access the v1 service when the request header contains `name:layotto`, and other access to the v2 service + ``` kubectl apply -f layotto-header-route.yaml ``` + 2. Send the request to see the result + ``` curl -H 'name: layotto' localhost:9080/grpc ``` @@ -95,16 +117,21 @@ before starting the demo,you must install some components as follows: 2. For the source code of client and server used in the example, please refer to [here](https://github.com/mosn/layotto/tree/istio-1.5.x/demo/istio). 3. In order to get started simple, the `layotto-injected.yaml` file used above has been injected through istio already. This injection process is as follows: 1. Run the following command to specify `istio` to use `Layotto` as the data plane + ``` istioctl manifest apply --set .values.global.proxy.image="mosnio/proxyv2:layotto" --set meshConfig.defaultConfig.binaryPath="/usr/local/bin/mosn" ``` + 2. Sidecar injection is achieved through `kube-inject` + ``` istioctl kube-inject -f layotto.yaml > layotto-injected.yaml ``` + The contents of `layotto.yaml` is [here](https://github.com/mosn/layotto/blob/istio-1.5.x/demo/istio/layotto.yaml) 3. Run the following command to replace all `/usr/local/bin/envoy` in `layotto-injected.yaml` with `/usr/local/bin/mosn` + ``` sed -i "s/\/usr\/local\/bin\/envoy/\/usr\/local\/bin\/mosn/g" ./layotto-injected.yaml ``` diff --git a/docs/en/start/network_filter/tcpcopy.md b/docs/en/start/network_filter/tcpcopy.md index 9d86d931dd..4e51266c08 100644 --- a/docs/en/start/network_filter/tcpcopy.md +++ b/docs/en/start/network_filter/tcpcopy.md @@ -3,6 +3,7 @@ ## Introduction When you run the demo according to the quick-start document [Configuration demo with apollo](en/start/configuration/start-apollo.md), you may notice that there is such a configuration in the configuration file config_apollo.json: + ```json { "type": "tcpcopy", @@ -16,6 +17,7 @@ When you run the demo according to the quick-start document [Configuration demo } } ``` + The meaning of this configuration is to load the tcpcopy plug-in at startup to dump the tcp traffic. After enabling this configuration, when Layotto receives a request and the conditions for traffic dump are met, it will write the binary request data to the local file system. diff --git a/docs/en/start/pubsub/start.md b/docs/en/start/pubsub/start.md index a520c21d56..c802a88344 100644 --- a/docs/en/start/pubsub/start.md +++ b/docs/en/start/pubsub/start.md @@ -26,6 +26,7 @@ Start subscriber: ```shell @background ./subscriber -s pub_subs_demo ``` + If the following information is printed out, it means the startup is successful: ```bash @@ -79,6 +80,7 @@ Use the following command to check whether Redis is installed: ```shell docker images ``` + ![img.png](../../../img/mq/start/img.png) 3. Run the container @@ -110,6 +112,7 @@ After completion, the layotto file will be generated in the directory, run it: ```shell @background ./layotto start -c ../../configs/config_redis.json ``` + ### Step 3. Run the Publisher program and call Layotto to publish events @@ -147,9 +150,11 @@ docker-compose stop #### **Destroy the Redis container** If you started Redis with Docker, you can destroy the Redis container as follows: + ```shell docker rm -f redis-test ``` + ### Next Step diff --git a/docs/en/start/rpc/dubbo_json_rpc.md b/docs/en/start/rpc/dubbo_json_rpc.md index b0a7550248..35a7f07851 100644 --- a/docs/en/start/rpc/dubbo_json_rpc.md +++ b/docs/en/start/rpc/dubbo_json_rpc.md @@ -6,6 +6,7 @@ ![jsonrpc.jpg](../../../img/rpc/jsonrpc.jpg) ### step 2. Compile and start layotto + ```shell @if.not.exist layotto go build -o layotto cmd/layotto/main.go ``` @@ -36,6 +37,7 @@ export DUBBO_GO_CONFIG_PATH="../conf/dubbogo.yml" ``` Build dubbo server: + ```shell @if.not.exist server go build -o server . ``` @@ -47,6 +49,7 @@ Start dubbo server: ``` ### step 4. call runtime InvokerService api. + ```shell @cd ${project_path} go run demo/rpc/dubbo_json_rpc/dubbo_json_client/client.go -d '{"jsonrpc":"2.0","method":"GetUser","params":["A003"],"id":9527}' ``` diff --git a/docs/en/start/rpc/helloworld.md b/docs/en/start/rpc/helloworld.md index 845b0a8527..5a5497d2e6 100644 --- a/docs/en/start/rpc/helloworld.md +++ b/docs/en/start/rpc/helloworld.md @@ -26,11 +26,13 @@ Let's run it: ``` ### step 2. start echoserver + ```shell @background go run ${project_path}/demo/rpc/http/echoserver/echoserver.go ``` ### step 3. call runtime InvokerService api. + ```shell go run ${project_path}/demo/rpc/http/echoclient/echoclient.go -d 'hello layotto' ``` diff --git a/docs/en/start/secret/start.md b/docs/en/start/secret/start.md index 77c9637a53..cc25d21770 100644 --- a/docs/en/start/secret/start.md +++ b/docs/en/start/secret/start.md @@ -18,6 +18,7 @@ cd ${project_path}/cmd/layotto ``` build: + ```shell @if.not.exist layotto go build -o layotto ``` diff --git a/docs/en/start/sequencer/start.md b/docs/en/start/sequencer/start.md index b5286b01b0..ecc36f3c1c 100644 --- a/docs/en/start/sequencer/start.md +++ b/docs/en/start/sequencer/start.md @@ -21,6 +21,7 @@ cd docker/layotto-etcd # Start etcd and layotto with docker-compose docker-compose up -d ``` + #### **Compile locally (not for Windows)** You can run etcd with Docker, then compile and run Layotto locally. @@ -84,6 +85,7 @@ Next id:next_id:9 Next id:next_id:10 Demo success! ``` + ### step 3. Stop containers and release resources #### **Docker Compose** @@ -96,9 +98,11 @@ docker-compose stop #### **Destroy the etcd container** If you started etcd with Docker, you can destroy the etcd container as follows: + ```shell docker rm -f etcd ``` + ### Next step diff --git a/docs/en/start/state/start.md b/docs/en/start/state/start.md index 1d307c91f2..36c849016f 100644 --- a/docs/en/start/state/start.md +++ b/docs/en/start/state/start.md @@ -121,9 +121,11 @@ docker-compose stop #### **Destroy the Redis container** If you started Redis with Docker, you can destroy the Redis container as follows: + ```shell docker rm -f redis-test ``` + ### Next step diff --git a/docs/en/start/stream_filter/flow_control.md b/docs/en/start/stream_filter/flow_control.md index 017309a23e..0e088f095a 100644 --- a/docs/en/start/stream_filter/flow_control.md +++ b/docs/en/start/stream_filter/flow_control.md @@ -24,6 +24,7 @@ There is a config of flow control in [runtime_config.json](https://github.com/mo } ] ``` + this can help `/spec.proto.runtime.v1.Runtime/SayHello` method has a flow control feature, which means we can only access this method below 5 times in 1 second. this code of the client is here [client.go](https://github.com/mosn/layotto/blob/main/demo/flowcontrol/client.go),the logic is very simple, send 10 times request to the server,and the result is below: diff --git a/docs/en/start/trace/prometheus.md b/docs/en/start/trace/prometheus.md index a91094c4aa..e50a275072 100644 --- a/docs/en/start/trace/prometheus.md +++ b/docs/en/start/trace/prometheus.md @@ -41,11 +41,13 @@ The corresponding call-side code is in [client.go](https://github.com/mosn/layot ``` Build the demo client: + ```shell @if.not.exist client go build -o client ``` Run the demo client: + ```shell ./client ``` diff --git a/docs/en/start/trace/skywalking.md b/docs/en/start/trace/skywalking.md index 52fec07d7a..b4a1484721 100644 --- a/docs/en/start/trace/skywalking.md +++ b/docs/en/start/trace/skywalking.md @@ -53,6 +53,7 @@ cd ${project_path}/cmd/layotto_multiple_api/ ``` Build it: + ```shell @if.not.exist layotto go build -o layotto ``` @@ -62,6 +63,7 @@ Run it: ```shell @background ./layotto start -c ../../configs/config_trace_skywalking.json ``` + ## Run Demo @@ -71,11 +73,13 @@ Run it: ``` Build the demo client: + ```shell @if.not.exist client go build -o client ``` Run the demo client: + ```shell ./client ``` diff --git a/docs/en/start/trace/trace.md b/docs/en/start/trace/trace.md index 89eb152c31..22bc032b6f 100644 --- a/docs/en/start/trace/trace.md +++ b/docs/en/start/trace/trace.md @@ -24,6 +24,7 @@ This configuration can turn on the trace capability of layotto, allowing layotto You can start a layotto server as follows: - Build + ```shell cd cmd/layotto_multiple_api/ ``` diff --git a/docs/en/start/wasm/start.md b/docs/en/start/wasm/start.md index 380df2eb98..56ce674da1 100644 --- a/docs/en/start/wasm/start.md +++ b/docs/en/start/wasm/start.md @@ -26,6 +26,7 @@ The example only needs a Redis server that can be used normally. As for where it Here, we run redis with docker: Run redis container: + ```shell docker run -d --name redis-test -p 6379:6379 redis ``` @@ -64,6 +65,7 @@ go build -tags wasmer -o ./layotto_wasmer ./cmd/layotto/main.go ``` Run it: + ```shell @background ./layotto_wasmer start -c ./demo/faas/config.json ``` @@ -71,6 +73,7 @@ Run it: **Note: You need to modify the redis address as needed, the default address is: localhost:6379** #### step 3. send request + ```shell curl -H 'id:id_1' 'localhost:2045?name=book1' ``` diff --git a/docs/zh/api_reference/comment_spec_of_proto.md b/docs/zh/api_reference/comment_spec_of_proto.md index 1188e9840c..6a388b030a 100644 --- a/docs/zh/api_reference/comment_spec_of_proto.md +++ b/docs/zh/api_reference/comment_spec_of_proto.md @@ -3,6 +3,7 @@ 避免在注释符号`//`之间添加空行,否则生成工具protoc-gen-doc会生成格式错乱的文档。 一个坏示例: + ``` message BadCase{ // XXXXXXXX @@ -13,7 +14,9 @@ message BadCase{ field A } ``` + 一个好示例: + ``` message GoodCase{ // XXXXXXXX @@ -22,10 +25,12 @@ message GoodCase{ field A } ``` + 或者你可以直接使用另一种注释符号:`/* */` 假如你想添加一些注释在proto文件里,但不想让它们出现在生成的文档里,你可以在注释里使用`@exclude`前缀。 示例:只包括id字段的注释 + ``` /** * @exclude diff --git a/docs/zh/api_reference/how_to_generate_api_doc.md b/docs/zh/api_reference/how_to_generate_api_doc.md index a2349eaed3..7a095b5955 100644 --- a/docs/zh/api_reference/how_to_generate_api_doc.md +++ b/docs/zh/api_reference/how_to_generate_api_doc.md @@ -8,6 +8,7 @@ ```bash make proto.code ``` + 该命令会用 docker 启动 protoc,生成`.pb.go`代码。 这种方式更方便,开发者不需要修改本地 protoc 版本,省去了很多烦恼。 @@ -35,6 +36,7 @@ protoc -I. --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_o ```bash make proto.doc ``` + 该命令会用 docker 启动 protoc-gen-doc,生成文档 ### **用 docker 启动 protoc-gen-doc** diff --git a/docs/zh/blog/code/layotto-rpc/index.md b/docs/zh/blog/code/layotto-rpc/index.md index 0b9d4d8297..8607c10acf 100644 --- a/docs/zh/blog/code/layotto-rpc/index.md +++ b/docs/zh/blog/code/layotto-rpc/index.md @@ -21,6 +21,7 @@ ## 概述 Layotto 作为区别于网络代理 Service Mesh 的分布式原语集合且使用标准协议的 Runtime,具有明确和丰富的语义 API,而 RPC API 就是众多 API 中的一种。通过 RPC API 应用程序开发者可以通过与同样使用 Sidecar 架构的应用本地 Layotto 实例进行交互,从而间接的调用不同服务的方法,并可以利用内置能力完成分布式追踪和诊断,流量调控,错误处理,安全链路等操作。并且 Layotto 的 RPC API 基于 Mosn 的 Grpc handler 设计,除了 Http/Grpc,与其它服务通信时还可以利用Mosn的多协议机制,使用 X-Protocol 协议进行安全可靠通信。如下代码所示,RPC API 的接口与 Dapr 一致,通过 Grpc 接口 InvokeService 即可进行 RPC 调用。 + ```go type DaprClient interface { // Invokes a method on a remote Dapr app. @@ -38,6 +39,7 @@ type DaprClient interface { ### 0x00 Layotto 初始化 RPC Layotto 启动流程涉及众多本流程,在此只分析下跟 RPC 相关的及下述流程用的初始化,因为 Layotto 是建立在 Mosn 之上,所以从 Main 函数出发,urfave/cli 库会调用 Mosn 的 StageManager 初始化 Mosn, 进而在 Mosn NetworkFilter 中初始化 GrpcServer,具体流程如下。 + ```go mosn.io/mosn/pkg/stagemanager.(*StageManager).runInitStage at stage_manager.go => @@ -149,6 +151,7 @@ func (stm *StageManager) runStartStage() { 根据 [Dubbo Json Rpc Example](https://mosn.io/layotto/#/zh/start/rpc/dubbo_json_rpc)例子运行如下命令 go run demo/rpc/dubbo_json_rpc/dubbo_json_client/client.go -d '{"jsonrpc":"2.0","method":"GetUser","params":["A003"],"id":9527}' 使用 Layotto 对 App 提供的 Grpc API InvokeService 发起 RPC 调用,经过数据填充和连接建立等流程,最终通过 Grpc clientStream 中调用 SendMsg 向 Layotto 发送数据,具体流程如下。 + ```go func main() { @@ -200,6 +203,7 @@ google.golang.org/grpc/internal/transport.(*http2Client).Write at http2_client.g ### 0x02 Mosn EventLoop 读协程处理请求数据 上文说过 Layotto 的内核相当于是 Mosn,所以当网络连接数据到达时,会先到 Mosn 的 L4 网络层进行读写,具体流程如下。 + ```go mosn.io/mosn/pkg/network.(*listener).accept at listener.go => @@ -233,7 +237,9 @@ func (c *connection) startRWLoop(lctx context.Context) { } } ``` + 在 startRWLoop 方法中我们可以看到会分别开启两个协程来分别处理该连接上的读写操作,即 startReadLoop 和 startWriteLoop,在 startReadLoop 中经过如下流转,把网络层读到的数据,由 filterManager 过滤器管理器把数据交由过滤器链进行处理,具体流程如下。 + ```go mosn.io/mosn/pkg/network.(*connection).doRead at connection.go => @@ -296,6 +302,7 @@ func (f *grpcFilter) dispatch(buf buffer.IoBuffer) { ### 0x03 Grpc Sever 作为 NetworkFilter 处理请求 第一阶段中从原始连接读取数据,会进入 Grpc Serve 处理,Serve 方法通过 net.Listener 监听连接,每次启动一个新的协程来处理新的连接(handleRawConn),建立一个基于Http2 的 Transport 进行传输层的 RPC 调用,具体流程如下。 + ```go google.golang.org/grpc.(*Server).handleRawConn at server.go func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) { @@ -382,6 +389,7 @@ func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info ### 0x04 Layotto 发送 RPC 请求并写入 Local 虚拟连接 接上述 0x03 流程,从 Runtime_InvokeService_Handler 起,由 GRPC 默认 API 转换为 Dapr API,进入 Layotto 提供的对接 Mosn 的轻量 RPC 框架,具体流程如下。 + ```go mosn.io/layotto/spec/proto/runtime/v1._Runtime_InvokeService_Handler at runtime.pb.go => @@ -512,7 +520,9 @@ func (p *connPool) Get(ctx context.Context) (*wrapConn, error) { } => ``` + 上面第二步创建新的连接需要注意下,是调用了 init 阶段的 RegistChannel 初始化的协议中的 dialFunc func() (net.Conn, error),因为配置里与 Mosn 交互用的是 Http 协议,所以这里是 newHttpChanel,目前还支持 Bolt,Dubbo 等,详见如下代码。 + ```go mosn.io/layotto/components/rpc/invoker/mosn/channel.newHttpChannel at httpchannel.go // newHttpChannel is used to create rpc.Channel according to ChannelConfig @@ -560,6 +570,7 @@ func newHttpChannel(config ChannelConfig) (rpc.Channel, error) { ### 0x05 Mosn 读取 Remote 并执行 Filter 和代理转发 (1) 与 0x02 类似,filtermanager 执行过滤器处理阶段,这里会到 proxy 中进行代理转发,详见如下代码。 + ```go ... mosn.io/mosn/pkg/network.(*filterManager).onContinueReading at filtermanager.go @@ -581,7 +592,9 @@ func (p *proxy) OnData(buf buffer.IoBuffer) api.FilterStatus { } => ``` + (2) serverStreamConnection.serve 监听并处理请求到 downstream OnReceive,详见如下代码。 + ```go mosn.io/mosn/pkg/stream/http.(*serverStream).handleRequest at stream.go func (s *serverStream) handleRequest(ctx context.Context) { @@ -634,7 +647,9 @@ func (s *downStream) OnReceive(ctx context.Context, headers types.HeaderMap, dat } ``` + (3) 上述 ScheduleAuto 调度后,经过 downStream 的 reveive 的各个阶段处理,经过 upstreamRequest、http clientStream 等处理,最终从网络层的 connection.Write 发送数据并进入 WaitNotify 阶段阻塞,详见如下代码。 + ```go mosn.io/mosn/pkg/sync.(*workerPool).ScheduleAuto at workerpool.go => @@ -694,6 +709,7 @@ func (s *downStream) waitNotify(id uint32) (phase types.Phase, err error) { ### 0x06 Dubbo-go-sample server 收到请求返回响应 这里就是 dubbo-go-sample server的处理,暂不展开,贴下日志信息,感兴趣的同学可以回去翻看源码。 + ``` [2022-04-18/21:03:18 github.com/apache/dubbo-go-samples/rpc/jsonrpc/go-server/pkg.(*UserProvider2).GetUser: user_provider2.go: 53] userID:"A003" [2022-04-18/21:03:18 github.com/apache/dubbo-go-samples/rpc/jsonrpc/go-server/pkg.(*UserProvider2).GetUser: user_provider2.go: 56] rsp:&pkg.User{ID:"113", Name:"Moorse", Age:30, sex:0, Birth:703394193, Sex:"MAN"} @@ -702,6 +718,7 @@ func (s *downStream) waitNotify(id uint32) (phase types.Phase, err error) { ### 0x07 Mosn 框架处理响应并写回 Remote 虚拟连接 接上述 0x05 第三阶段,在 reveive 的循环阶段的 UpRecvData 阶段进入处理响应逻辑,经过一系列处理最终 Response 写回 0x04 中的 remote 虚拟连接,具体流程如下。 + ```go mosn.io/mosn/pkg/proxy.(*downStream).receive at downstream.go func (s *downStream) waitNotify(id uint32) (phase types.Phase, err error) { @@ -764,6 +781,7 @@ mosn.io/mosn/pkg/stream/http.(*streamConnection).Write at stream.go ### 0x08 Layotto 接收 RPC 响应并读取 Local 虚拟连接 上述 0x04 启动的 readloop 协程读IO被激活,从连接读取数Mosn 传回的数据,然后交给 hstate 管道中转处理再返回给请求协程,具体流程如下。 + ```go mosn.io/layotto/components/rpc/invoker/mosn/channel.(*connPool).readloop at connpool.go // readloop is loop to read connected then exec onDataFunc @@ -847,6 +865,7 @@ func (h *httpChannel) Do(req *rpc.RPCRequest) (*rpc.RPCResponse, error) { ### 0x09 Grpc Sever 处理数据帧返回给客户端 Grpc 并没有直接写入数据到连接,而是用协程异步 loop 循环从一个缓存结构里面获取帧然后写回到客户端,具体流程如下。 + ```go google.golang.org/grpc/internal/transport.NewServerTransport at http2_server.go func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) { @@ -884,6 +903,7 @@ mosn.io/mosn/pkg/network.(*connection).doWrite at connection.go ### 0x10 dubbo-go-sample client 接收响应 接上述 0x01 发送数据之后会阻塞在 Client grpc 底层读IO中, Layotto经过上述一些列处理层层返回数据激活Client底层Read IO,具体流程如下。 + ```go google.golang.org/grpc.(*ClientConn).Invoke at call.go => @@ -909,6 +929,7 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt ... } ``` + 最终收到返回数据: {"jsonrpc":"2.0","id":9527,"result":{"id":"113","name":"Moorse","age":30,"time":703394193,"sex":"MAN"}} diff --git a/docs/zh/blog/code/start_process/start_process.md b/docs/zh/blog/code/start_process/start_process.md index ae3170801d..b9e41b6e3a 100644 --- a/docs/zh/blog/code/start_process/start_process.md +++ b/docs/zh/blog/code/start_process/start_process.md @@ -32,6 +32,7 @@ func init() { ``` cmd 的 action 开始执行: + ``` Action: func(c *cli.Context) error { app := mosn.NewMosn() @@ -60,6 +61,7 @@ cmd 的 action 开始执行: ### 2.回调函数NewRuntimeGrpcServer分析 MOSN 启动的时候回调 NewRuntimeGrpcServer ,data 是未解析的配置文件,opts 是 grpc 的配置项,返回 Grpc server + ``` func NewRuntimeGrpcServer(data json.RawMessage, opts ...grpc.ServerOption) (mgrpc.RegisteredServer, error) { // 将原始的配置文件解析成结构体形式。 @@ -132,6 +134,7 @@ type MosnRuntime struct { ``` runtime 的 run 函数逻辑如下: + ``` func (m *MosnRuntime) Run(opts ...Option) (mgrpc.RegisteredServer, error) { // 启动标志 @@ -188,6 +191,7 @@ func (m *MosnRuntime) Run(opts ...Option) (mgrpc.RegisteredServer, error) { } ``` + 组件的初始化函数 initRuntime : ``` @@ -207,7 +211,9 @@ func (m *MosnRuntime) initRuntime(r *runtimeOptions) error { return nil } ``` + DefaultInitRuntimeStage 组件初始化逻辑,调用每个组件的 init 方法: + ``` func DefaultInitRuntimeStage(o *runtimeOptions, m *MosnRuntime) error { ... @@ -251,6 +257,7 @@ func DefaultInitRuntimeStage(o *runtimeOptions, m *MosnRuntime) error { return nil } ``` + 以 file 组件为例,看下初始化函数: ``` @@ -275,6 +282,7 @@ func (m *MosnRuntime) initFiles(files ...*file.FileFactory) error { return nil } ``` + 至此 MOSN、Grpc、Layotto 都已经启动完成,通过 Grpc 的接口就可以调用到组件的代码逻辑。 ## 总结 diff --git a/docs/zh/blog/code/webassembly/index.md b/docs/zh/blog/code/webassembly/index.md index c613dc1db2..b3f19c4dd8 100644 --- a/docs/zh/blog/code/webassembly/index.md +++ b/docs/zh/blog/code/webassembly/index.md @@ -33,6 +33,7 @@ WebAssemly 简称 WASM,是一种运行在沙箱化的执行环境中的可移 VM:Virtual Machine 虚拟机,Runtime类型有:wasmtime、Wasmer、V8、 Lucet、WAMR、wasm3,本文例子中使用 wasmer 1、首先看 [quickstart例子](https://mosn.io/layotto/#/zh/start/wasm/start) 中 stream filter 的配置,如下可以看到配置中有两个 WASM 插件,使用 wasmer VM 分别启动一个实例,详见如下配置: + ```json "stream_filters": [ { @@ -58,7 +59,9 @@ VM:Virtual Machine 虚拟机,Runtime类型有:wasmtime、Wasmer、V8、 Lu } ] ``` + 上述配置中 function1 主要逻辑就是接收 HTTP 请求,然后通过 ABI 调用 function2,并返回 function2 结果,详见如下代码: + ```go func (ctx *httpHeaders) OnHttpRequestBody(bodySize int, endOfStream bool) types.Action { //1. get request body @@ -87,7 +90,9 @@ func (ctx *httpHeaders) OnHttpRequestBody(bodySize int, endOfStream bool) types. return types.ActionContinue } ``` + function2 主要逻辑就是接收 HTTP 请求,然后通过 ABI 调用 redis,并返回 redis 结果,详见如下代码: + ```go func (ctx *httpHeaders) OnHttpRequestBody(bodySize int, endOfStream bool) types.Action { //1. get request body @@ -112,6 +117,7 @@ func (ctx *httpHeaders) OnHttpRequestBody(bodySize int, endOfStream bool) types. ``` 2、对应图1 WASM 框架 中的 Manager 部分,在 Mosn filter Init 阶段进行初始化,详见如下代码: + ```go // Create a proxy factory for WasmFilter func createProxyWasmFilterFactory(confs map[string]interface{}) (api.StreamFilterChainFactory, error) { @@ -185,6 +191,7 @@ func createProxyWasmFilterFactory(confs map[string]interface{}) (api.StreamFilte ``` 3、对应图1 WASM 框架中 VM 部分,NewWasmPlugin 用来创建初始化 WASM 插件,其中 VM、Module 和 Instance 分别对应 WASM 中的虚拟机、模块和实例,详见如下代码: + ```go func NewWasmPlugin(wasmConfig v2.WasmPluginConfig) (types.WasmPlugin, error) { // check instance num @@ -254,6 +261,7 @@ actual := plugin.EnsureInstanceNum(wasmConfig.InstanceNum) ``` 4、 对应图1 WASM 框架 中的 ABI 部分,OnPluginStart 方法中会调用 proxy-wasm-go-host 的对应方法对 ABI 的 Exports 和 Imports 等进行相关设置。 + ```go // Execute the plugin of FilterConfigFactory func (f *FilterConfigFactory) OnPluginStart(plugin types.WasmPlugin) { @@ -327,6 +335,7 @@ Layotto 中 WASM 的工作流程大致如下图2 Layotto & Mosn WASM 工作流
图2 Layotto & Mosn WASM 工作流程
1、由 Layotto 底层 Mosn 收到请求,经过 workpool 调度,在 proxy downstream 中按照配置依次执行 StreamFilterChain 到 Wasm StreamFilter 的 OnReceive 方法,具体逻辑详见如下代码: + ```go func (f *Filter) OnReceive(ctx context.Context, headers api.HeaderMap, buf buffer.IoBuffer, trailers api.HeaderMap) api.StreamFilterStatus { // 获取 WASM 插件的 id @@ -422,6 +431,7 @@ func (f *Filter) OnReceive(ctx context.Context, headers api.HeaderMap, buf buffe ``` 2、proxy-wasm-go-host 将 Mosn 请求三元组编码成规范指定的格式,并调用Proxy-Wasm ABI 规范中的 proxy_on_request_headers 等对应接口,调用 WASMER 虚拟机将请求信息传至 WASM 插件。 + ```go func (a *ABIContext) CallWasmFunction(funcName string, args ...interface{}) (interface{}, Action, error) { ff, err := a.Instance.GetExportsFunc(funcName) @@ -448,6 +458,7 @@ func (a *ABIContext) CallWasmFunction(funcName string, args ...interface{}) (int // nativeFunction = function.Native() // _ = nativeFunction(1, 2, 3) // Native 会将 Function 转换为可以调用的原生 Go 函数 + ```go func (self *Function) Native() NativeFunction { ... @@ -479,6 +490,7 @@ func (self *Function) Native() NativeFunction { ``` 4、proxy-wasm-go-sdk 将请求数据从规范格式转换为便于用户使用的格式,然后调用用户扩展代码。proxy-wasm-go-sdk 基于 proxy-wasm/spec 实现,定义了函数访问系统资源及基础设施服务的接口,并在此基础上结合 Runtime API 的思路,增加了对基础设施访问的ABI。 + ```go // function1主要逻辑就是接收 HTTP 请求,然后通过 ABI 调用 function2,并返回 function2 结果,具体代码如下所示 func (ctx *httpHeaders) OnHttpRequestBody(bodySize int, endOfStream bool) types.Action { @@ -512,6 +524,7 @@ func (ctx *httpHeaders) OnHttpRequestBody(bodySize int, endOfStream bool) types. 5、WASM 插件通过初始化时 RegisterFunc 注册的 ABI Imports 函数,比如例子中 Function1 RPC 调用 Function2 的 ProxyInvokeService,Function2 用以获取 Redis 中指定 Key 的 Valye 的 ProxyGetState,具体代码如下所示: Function1 通过 ProxyInvokeService 调用 Function2,ProxyInvokeService 对应 Imports 函数 proxy_invoke_service + ```go func ProxyInvokeService(instance common.WasmInstance, idPtr int32, idSize int32, methodPtr int32, methodSize int32, paramPtr int32, paramSize int32, resultPtr int32, resultSize int32) int32 { id, err := instance.GetMemory(uint64(idPtr), uint64(idSize)) @@ -540,7 +553,9 @@ func ProxyInvokeService(instance common.WasmInstance, idPtr int32, idSize int32, return copyIntoInstance(instance, ret, resultPtr, resultSize).Int32() } ``` + Function2 通过 ProxyGetState 获取 Redis 中指定 Key 的 Valye, ProxyGetState 对应 Imports 函数 proxy_get_state + ```go func ProxyGetState(instance common.WasmInstance, storeNamePtr int32, storeNameSize int32, keyPtr int32, keySize int32, valuePtr int32, valueSize int32) int32 { storeName, err := instance.GetMemory(uint64(storeNamePtr), uint64(storeNameSize)) @@ -563,6 +578,7 @@ func ProxyGetState(instance common.WasmInstance, storeNamePtr int32, storeNameSi return copyIntoInstance(instance, ret, valuePtr, valueSize).Int32() } ``` + 以上 Layotto rpc 流程简要说是通过两个虚拟连接借助 Dapr API 和 底层 Mosn 实现 [5],具体可参见前序文章[Layotto源码解析——处理RPC请求](https://mosn.io/layotto/#/zh/blog/code/layotto-rpc/index),从 Redis 中获取数据可直接阅读 Dapr State 相关代码,在此不一一展开了。 ### FaaS模式 @@ -577,6 +593,7 @@ func ProxyGetState(instance common.WasmInstance, storeNamePtr int32, storeNameSi
图3 Layotto FaaS Workflow
这里简单看一下 containerd-shim-layotto-v2 的主函数,可以看到 shim.Run 设置的 WASM 的运行时为 io.containerd.layotto.v2,也就是 containerd 中 plugins.cri.containerd.runtimes 对应插件的 runtime_type。当创建 Pod 时,在 yaml 的 spec 中指定 runtimeClassName: layotto,经过调度,最终 kubelet 就会通过 cri-plugin 调用 containerd 中的 containerd-shim-layotto-v2 运行时来进行加载和运行等相关处理。 + ```go func main() { startLayotto() diff --git a/docs/zh/building_blocks/actuator/actuator.md b/docs/zh/building_blocks/actuator/actuator.md index 43f660cb8d..7efcba4451 100644 --- a/docs/zh/building_blocks/actuator/actuator.md +++ b/docs/zh/building_blocks/actuator/actuator.md @@ -16,6 +16,7 @@ Actuator API一般是给运维系统用的,比如k8s调用Actuator API监控La GET 不需要传参 + ```json // http://localhost:8080/actuator/health/liveness // HTTP/1.1 200 OK @@ -32,10 +33,12 @@ GET } } ``` + 返回字段说明: HTTP状态码200代表成功,其他(400以上的状态码)代表失败 status字段有三种: + ```go var ( // INIT means it is starting @@ -59,6 +62,7 @@ A: liveness检查用于检查一些不可恢复的故障,"是否需要重启" 而readiness用于检查一些临时性、可恢复的状态,比如应用正在预热缓存,需要告诉基础设施"先别把流量引到我这里来",等过会预热好了,基础设施再调readiness检查的接口,会得到结果"我准备好了,可以接客了" GET,不需要传参 + ```json // http://localhost:8080/actuator/health/readiness // HTTP/1.1 503 SERVICE UNAVAILABLE @@ -81,6 +85,7 @@ GET,不需要传参 用于查询Layotto和App的运行时元数据 GET + ```json // http://localhost:8080/actuator/health/liveness // HTTP/1.1 200 OK diff --git a/docs/zh/building_blocks/file/file.md b/docs/zh/building_blocks/file/file.md index b2dfff519e..2b1428ac05 100644 --- a/docs/zh/building_blocks/file/file.md +++ b/docs/zh/building_blocks/file/file.md @@ -89,29 +89,37 @@ message DelFileRequest { ``` ### 读文件 + ```protobuf // Get file with stream rpc GetFile(GetFileRequest) returns (stream GetFileResponse) {} ``` + 为避免文档和代码不一致,详细入参和返回值请参考 [the newest proto file](https://github.com/mosn/layotto/blob/main/spec/proto/runtime/v1/runtime.proto). ### 写文件 + ```protobuf // Put file with stream rpc PutFile(stream PutFileRequest) returns (google.protobuf.Empty) {} ``` + 为避免文档和代码不一致,详细入参和返回值请参考 [the newest proto file](https://github.com/mosn/layotto/blob/main/spec/proto/runtime/v1/runtime.proto). ### 删文件 + ```protobuf // Delete specific file rpc DelFile(DelFileRequest) returns (google.protobuf.Empty){} ``` + 为避免文档和代码不一致,详细入参和返回值请参考 [the newest proto file](https://github.com/mosn/layotto/blob/main/spec/proto/runtime/v1/runtime.proto). ### 查文件 + ```protobuf // List all files rpc ListFile(ListFileRequest) returns (ListFileResp){} ``` + 为避免文档和代码不一致,详细入参和返回值请参考 [the newest proto file](https://github.com/mosn/layotto/blob/main/spec/proto/runtime/v1/runtime.proto). diff --git a/docs/zh/building_blocks/lock/reference.md b/docs/zh/building_blocks/lock/reference.md index f1a1f38423..ee392146d4 100644 --- a/docs/zh/building_blocks/lock/reference.md +++ b/docs/zh/building_blocks/lock/reference.md @@ -11,6 +11,7 @@ Layotto client sdk封装了grpc调用的逻辑,使用sdk调用分布式锁 API的示例可以参考[快速开始:使用分布式锁API](zh/start/lock/start.md) ### TryLock + ```protobuf // A non-blocking method trying to get a lock with ttl. rpc TryLock(TryLockRequest)returns (TryLockResponse) {} @@ -66,7 +67,9 @@ import "github.com/google/uuid" //... req.LockOwner = uuid.New().String() ``` + ### Unlock + ```protobuf rpc Unlock(UnlockRequest)returns (UnlockResponse) {} ``` diff --git a/docs/zh/building_blocks/pubsub/reference.md b/docs/zh/building_blocks/pubsub/reference.md index 846b1ab1bc..56d092b4bb 100644 --- a/docs/zh/building_blocks/pubsub/reference.md +++ b/docs/zh/building_blocks/pubsub/reference.md @@ -32,6 +32,7 @@ Layotto client sdk封装了grpc调用的逻辑,使用sdk调用Pub/Sub API的 // Publishes events to the specific topic. rpc PublishEvent(PublishEventRequest) returns (google.protobuf.Empty) {} ``` + 为避免文档和代码不一致,详细入参和返回值请参考[runtime.proto](https://github.com/mosn/layotto/blob/main/spec/proto/runtime/v1/runtime.proto) ### 订阅事件 diff --git a/docs/zh/building_blocks/rpc/reference.md b/docs/zh/building_blocks/rpc/reference.md index cefd93a525..5828b4f64d 100644 --- a/docs/zh/building_blocks/rpc/reference.md +++ b/docs/zh/building_blocks/rpc/reference.md @@ -47,6 +47,7 @@ resp, err := cli.InvokeService( 本demo快速入门文档:[Dubbo JSON RPC Example](https://mosn.io/layotto/#/zh/start/rpc/dubbo_json_rpc) 服务端由dubbo示例程序[dubbo-go-samples](https://github.com/apache/dubbo-go-samples)充当,配置文件 [example.json](https://github.com/mosn/layotto/blob/77e0a4b2af063ff9e365a933c4735655898de369/demo/rpc/dubbo_json_rpc/example.json) 使用插件[dubbo_json_rpc](https://github.com/mosn/layotto/blob/8db7a2297bd05d1b0c4452cc980d8f6412a82f3a/components/rpc/callback/dubbo_json_rpc.go),以产生请求头。随后请求端[client](https://github.com/mosn/layotto/blob/b66b998f50901f8bd1cce035478579c1b47f986d/demo/rpc/dubbo_json_rpc/dubbo_json_client/client.go) 使用接口 **InvokeService** 进行 RPC 调用。 + ```golang resp, err := cli.InvokeService( ctx, diff --git a/docs/zh/building_blocks/sequencer/reference.md b/docs/zh/building_blocks/sequencer/reference.md index d2edcdc134..767eff90cc 100644 --- a/docs/zh/building_blocks/sequencer/reference.md +++ b/docs/zh/building_blocks/sequencer/reference.md @@ -22,9 +22,11 @@ Q: 什么场景需要趋势递增? 1. 对b+树类的db(例如MYSQL)来说,趋势递增的主键能更好的利用缓存(cache friendly)。 2. 拿来排序查最新数据。比如需求是查最新的100条消息,开发者不想新增个时间戳字段、建索引,如果id本身是递增的,那么查最新的100条消息时直接按id排序即可: + ``` select * from message order by message-id limit 100 ``` + 这在使用nosql的时候很常见,因为nosql在时间戳字段上加索引很难 - 全局单调递增 @@ -39,6 +41,7 @@ Layotto client sdk封装了grpc调用的逻辑,使用sdk调用Sequencer API的 使用前需要先对组件进行配置,详细的配置说明见[Sequencer组件文档](zh/component_specs/sequencer/common.md) ### Get next unique id + ```protobuf // Sequencer API // Get next unique id with some auto-increment guarantee @@ -75,4 +78,5 @@ message GetNextIdResponse{ int64 next_id = 1; } ``` + 为避免文档和代码不一致,详细入参和返回值请参考[proto文件](https://github.com/mosn/layotto/blob/main/spec/proto/runtime/v1/runtime.proto) diff --git a/docs/zh/building_blocks/state/reference.md b/docs/zh/building_blocks/state/reference.md index 36247eaaa5..558ad18cbf 100644 --- a/docs/zh/building_blocks/state/reference.md +++ b/docs/zh/building_blocks/state/reference.md @@ -35,6 +35,7 @@ Layotto client sdk封装了grpc调用的逻辑,使用sdk调用State API的示 ``` #### 入参 + ```protobuf // SaveStateRequest is the message to save multiple states into state store. @@ -106,41 +107,52 @@ message StateOptions { StateConsistency consistency = 2; } ``` + #### 返回 `google.protobuf.Empty` ### Get State + ```protobuf // Gets the state for a specific key. rpc GetState(GetStateRequest) returns (GetStateResponse) {} ``` + 为避免文档和代码不一致,详细入参和返回值请参考[proto文件](https://github.com/mosn/layotto/blob/main/spec/proto/runtime/v1/runtime.proto) ### Get bulk state + ```protobuf // Gets a bulk of state items for a list of keys rpc GetBulkState(GetBulkStateRequest) returns (GetBulkStateResponse) {} ``` + 为避免文档和代码不一致,详细入参和返回值请参考[proto文件](https://github.com/mosn/layotto/blob/main/spec/proto/runtime/v1/runtime.proto) ### Delete state + ```protobuf // Deletes the state for a specific key. rpc DeleteState(DeleteStateRequest) returns (google.protobuf.Empty) {} ``` + 为避免文档和代码不一致,详细入参和返回值请参考[proto文件](https://github.com/mosn/layotto/blob/main/spec/proto/runtime/v1/runtime.proto) ### Delete bulk state + ```protobuf // Deletes a bulk of state items for a list of keys rpc DeleteBulkState(DeleteBulkStateRequest) returns (google.protobuf.Empty) {} ``` + 为避免文档和代码不一致,详细入参和返回值请参考[proto文件](https://github.com/mosn/layotto/blob/main/spec/proto/runtime/v1/runtime.proto) ### State transactions + ```protobuf // Executes transactions for a specified store rpc ExecuteStateTransaction(ExecuteStateTransactionRequest) returns (google.protobuf.Empty) {} ``` + 为避免文档和代码不一致,详细入参和返回值请参考[proto文件](https://github.com/mosn/layotto/blob/main/spec/proto/runtime/v1/runtime.proto) diff --git a/docs/zh/component_specs/custom/common.md b/docs/zh/component_specs/custom/common.md index f7558eb58b..df970fc5f8 100644 --- a/docs/zh/component_specs/custom/common.md +++ b/docs/zh/component_specs/custom/common.md @@ -11,6 +11,7 @@ Layotto 中的组件分为两种: 允许您自己扩展自己的组件,比如[使用指南](zh/design/api_plugin/design?id=_24-使用指南) 中的 `HelloWorld` 组件。 ## 配置文件结构 + ```json "custom_component": { "": { diff --git a/docs/zh/component_specs/lock/common.md b/docs/zh/component_specs/lock/common.md index 3b8cb65b04..6faf2afe34 100644 --- a/docs/zh/component_specs/lock/common.md +++ b/docs/zh/component_specs/lock/common.md @@ -2,6 +2,7 @@ **配置文件结构** json配置文件有如下结构: + ```json "lock": { "": { diff --git a/docs/zh/component_specs/lock/etcd.md b/docs/zh/component_specs/lock/etcd.md index 944b5453f9..a97f0b81f5 100644 --- a/docs/zh/component_specs/lock/etcd.md +++ b/docs/zh/component_specs/lock/etcd.md @@ -23,6 +23,7 @@ etcd的启动方式可以参考etcd的[官方文档](https://etcd.io/docs/v3.5/q 访问 https://github.com/etcd-io/etcd/releases 下载对应操作系统的 etcd(也可用 docker) 下载完成执行命令启动: + ````shell ./etcd ```` @@ -35,9 +36,11 @@ etcd的启动方式可以参考etcd的[官方文档](https://etcd.io/docs/v3.5/q cd ${project_path}/cmd/layotto go build ```` + >如果 build 报错,可以在项目根目录执行 `go mod vendor` 编译成功后执行: + ````shell ./layotto start -c ../../configs/runtime_config.json ```` diff --git a/docs/zh/component_specs/lock/in-memory.md b/docs/zh/component_specs/lock/in-memory.md index 802d6b9047..aacfd9c556 100644 --- a/docs/zh/component_specs/lock/in-memory.md +++ b/docs/zh/component_specs/lock/in-memory.md @@ -11,7 +11,9 @@ cd ${project_path}/cmd/layotto go build ```` + 编译成功后执行: + ````shell ./layotto start -c ../../configs/config_standalone.json ```` diff --git a/docs/zh/component_specs/lock/redis.md b/docs/zh/component_specs/lock/redis.md index 67cedf3e1a..4e67e4c46b 100644 --- a/docs/zh/component_specs/lock/redis.md +++ b/docs/zh/component_specs/lock/redis.md @@ -11,6 +11,7 @@ ## 怎么启动Redis 如果想启动redis的demo,需要先用Docker启动一个Redis 命令: + ```shell docker pull redis:latest docker run -itd --name redis-test -p 6380:6379 redis @@ -34,6 +35,7 @@ docker run -itd --name redis-test -p 6380:6379 redis ## 如何启动多个redis节点 如果想启动redis集群锁的demo,需要先用Docker启动5个Redis 命令: + ```shell docker pull redis:latest docker run -itd --name redis1 -p 6381:6379 redis diff --git a/docs/zh/component_specs/pubsub/common.md b/docs/zh/component_specs/pubsub/common.md index 7e8f65ebc5..dbf434b53e 100644 --- a/docs/zh/component_specs/pubsub/common.md +++ b/docs/zh/component_specs/pubsub/common.md @@ -2,6 +2,7 @@ **配置文件结构** json配置文件有如下结构: + ```json "pub_subs": { "": { diff --git a/docs/zh/component_specs/pubsub/redis.md b/docs/zh/component_specs/pubsub/redis.md index 5d33ce0d5d..4b4adac41a 100644 --- a/docs/zh/component_specs/pubsub/redis.md +++ b/docs/zh/component_specs/pubsub/redis.md @@ -11,6 +11,7 @@ ## 怎么启动Redis 如果想启动redis的demo,需要先用Docker启动一个Redis 命令: + ```shell docker pull redis:latest docker run -itd --name redis-test -p 6380:6379 redis diff --git a/docs/zh/component_specs/secret/common.md b/docs/zh/component_specs/secret/common.md index 4740f19c8a..908dfe4b4e 100644 --- a/docs/zh/component_specs/secret/common.md +++ b/docs/zh/component_specs/secret/common.md @@ -4,6 +4,7 @@ **配置文件结构** json配置文件有如下结构: + ```json "secret_store": { "": { @@ -22,7 +23,9 @@ json配置文件有如下结构: } } ``` + 本地文件秘钥、本地环境变量、k8s秘钥的配置例子: + ```json "secret_store": { "secret_demo": { diff --git a/docs/zh/component_specs/sequencer/common.md b/docs/zh/component_specs/sequencer/common.md index 8ce82b73bb..7766e9f385 100644 --- a/docs/zh/component_specs/sequencer/common.md +++ b/docs/zh/component_specs/sequencer/common.md @@ -2,6 +2,7 @@ **配置文件结构** json配置文件有如下结构: + ```json "sequencer": { "": { diff --git a/docs/zh/component_specs/sequencer/etcd.md b/docs/zh/component_specs/sequencer/etcd.md index d7123d291f..7691a6e2ee 100644 --- a/docs/zh/component_specs/sequencer/etcd.md +++ b/docs/zh/component_specs/sequencer/etcd.md @@ -23,6 +23,7 @@ etcd的启动方式可以参考etcd的[官方文档](https://etcd.io/docs/v3.5/q 访问 https://github.com/etcd-io/etcd/releases 下载对应操作系统的 etcd(也可用 docker) 下载完成执行命令启动: + ````shell ./etcd ```` @@ -35,9 +36,11 @@ etcd的启动方式可以参考etcd的[官方文档](https://etcd.io/docs/v3.5/q cd ${project_path}/cmd/layotto go build ```` + >如果 build 报错,可以在项目根目录执行 `go mod vendor` 编译成功后执行: + ````shell ./layotto start -c ../../configs/runtime_config.json ```` diff --git a/docs/zh/component_specs/sequencer/in-memory.md b/docs/zh/component_specs/sequencer/in-memory.md index ff1217d287..56719e73cd 100644 --- a/docs/zh/component_specs/sequencer/in-memory.md +++ b/docs/zh/component_specs/sequencer/in-memory.md @@ -6,12 +6,14 @@ ## 启动 layotto + ```shell cd ${project_path}/cmd/layotto go build ``` 编译成功后执行: + ```shell @background ./layotto start -c ../../configs/config_standalone.json ``` diff --git a/docs/zh/component_specs/sequencer/redis.md b/docs/zh/component_specs/sequencer/redis.md index 4ec751e731..63c5a80e88 100644 --- a/docs/zh/component_specs/sequencer/redis.md +++ b/docs/zh/component_specs/sequencer/redis.md @@ -17,6 +17,7 @@ redis组件在丢数据的情况下可能生成重复id,为了避免重复id ## 怎么启动Redis 如果想启动redis的demo,需要先用Docker启动一个Redis 命令: + ```shell docker pull redis:latest docker run -itd --name redis-test -p 6379:6379 redis @@ -28,9 +29,11 @@ docker run -itd --name redis-test -p 6379:6379 redis cd ${project_path}/cmd/layotto go build ```` + >如果 build 报错,可以在项目根目录执行 `go mod vendor` 编译成功后执行: + ````shell ./layotto start -c ../../configs/config_redis.json ```` diff --git a/docs/zh/component_specs/state/common.md b/docs/zh/component_specs/state/common.md index 80b013288b..f511999c9a 100644 --- a/docs/zh/component_specs/state/common.md +++ b/docs/zh/component_specs/state/common.md @@ -2,6 +2,7 @@ **配置文件结构** json配置文件有如下结构: + ```json "state": { "": { diff --git a/docs/zh/component_specs/state/redis.md b/docs/zh/component_specs/state/redis.md index 5d33ce0d5d..4b4adac41a 100644 --- a/docs/zh/component_specs/state/redis.md +++ b/docs/zh/component_specs/state/redis.md @@ -11,6 +11,7 @@ ## 怎么启动Redis 如果想启动redis的demo,需要先用Docker启动一个Redis 命令: + ```shell docker pull redis:latest docker run -itd --name redis-test -p 6380:6379 redis diff --git a/docs/zh/design/actuator/actuator-design-doc.md b/docs/zh/design/actuator/actuator-design-doc.md index 7ef7a7139f..92c180efaa 100644 --- a/docs/zh/design/actuator/actuator-design-doc.md +++ b/docs/zh/design/actuator/actuator-design-doc.md @@ -100,6 +100,7 @@ Actuator内部抽象出Endpoint概念,新请求到达服务器后,Actuator #### /actuator/health/liveness GET + ```json // http://localhost:8080/actuator/health/liveness // HTTP/1.1 200 OK @@ -116,9 +117,11 @@ GET } } ``` + 返回字段说明: HTTP状态码200代表成功,其他(400以上的状态码)代表失败 status字段有三种: + ```go var ( // INIT means it is starting @@ -133,6 +136,7 @@ var ( #### /actuator/health/readiness GET + ```json // http://localhost:8080/actuator/health/readiness // HTTP/1.1 503 SERVICE UNAVAILABLE @@ -146,11 +150,13 @@ GET } } ``` + ### 2.2.3. Info Endpoint #### /actuator/info GET + ```json // http://localhost:8080/actuator/health/liveness // HTTP/1.1 200 OK @@ -197,6 +203,7 @@ GET ### 2.4.1. 请求到达mosn,通过stream filter进入Layotto、调用Actuator stream filter层的http协议实现类(struct)为DispatchFilter,负责按http路径分发请求、调用Actuator: + ```go type DispatchFilter struct { @@ -212,12 +219,14 @@ func (dis *DispatchFilter) OnDestroy() {} func (dis *DispatchFilter) OnReceive(ctx context.Context, headers api.HeaderMap, buf buffer.IoBuffer, trailers api.HeaderMap) api.StreamFilterStatus { } ``` + 协议层和Actuator解耦,如果未来需要其他协议的接口,可以实现该协议的stream filter ### 2.4.2. 请求分发给Actuator内部的Endpoint 参考spring boot actuator的设计: Actuator抽象出Endpoint概念,支持按需扩展、注入Endpoint。先内置实现health和info Endpoint。 + ```go type Actuator struct { endpointRegistry map[string]Endpoint @@ -233,26 +242,31 @@ func (act *Actuator) AddEndpoint(name string, ep Endpoint) { } ``` + 来请求后,根据路径将请求分发给对应的Endpoint。比如/actuator/health/readiness会分发给health.Endpoint ### 2.4.3. health.Endpoint将请求分发给health.Indicator的实现 需要上报健康检查信息的组件实现Indicator接口、注入进health.Endpoint: + ```go type Indicator interface { Report() Health } ``` + health.Endpoint将请求分发给health.Indicator的实现 ### 2.4.4. info.Endpoint将请求分发给info.Contributor的实现 需要上报运行时信息的组件实现Contributor接口、注入进info.Endpoint: + ```go type Contributor interface { GetInfo() (info interface{}, err error) } ``` + info.Endpoint将请求分发给info.Contributor的实现 # 三、详细设计 diff --git a/docs/zh/design/configuration/configuration-api-with-apollo.md b/docs/zh/design/configuration/configuration-api-with-apollo.md index 9300014a5f..b9d8301083 100644 --- a/docs/zh/design/configuration/configuration-api-with-apollo.md +++ b/docs/zh/design/configuration/configuration-api-with-apollo.md @@ -20,6 +20,7 @@ The actual key stored in apollo will be 'key@$label' and the value will be raw v Tags will be stored in a special namespace 'sidecar_config_tags', with key='group@$key@$label' and value= + ```json { "tag1": "tag1value", @@ -49,6 +50,7 @@ A: Legacy systems using apollo can't migrate to our sidecar if we design like th 2. Save/delete APIs might be incompatible.The sidecar use fixed 'cluster' field configurated in config.json and fixed 'env' field in code,which means users can't pass 'cluster' and 'env' field as a parameter for save/delete API when they want to change some configuration items with other appid. ### config.json for sidecar + ```json { "config_store": { @@ -77,6 +79,7 @@ There isn't any official apollo sdk for Go,so I choose the repo with most stars Some problems with the sdk: 1. Users must declare all namespaces in AppConfig before connecting to the server and constructing a client,like: + ```go c := &config.AppConfig{ AppID: "testApplication_yang", diff --git a/docs/zh/design/lock/lock-api-design.md b/docs/zh/design/lock/lock-api-design.md index 86203b85cf..4f01229dd0 100644 --- a/docs/zh/design/lock/lock-api-design.md +++ b/docs/zh/design/lock/lock-api-design.md @@ -26,6 +26,7 @@ 最基础的加锁、解锁功能。TryLock非阻塞,如果没有抢到锁直接返回 proto: + ```protobuf // Distributed Lock API rpc TryLock(TryLockRequest)returns (TryLockResponse) {} @@ -115,6 +116,7 @@ A: 入参增加feature option,组件也要实现Support()接口 ### 2.1.2. 续租 #### Solution A: add an API "LockKeepAlive" + ```protobuf rpc LockKeepAlive(stream LockKeepAliveRequest) returns (stream LockKeepAliveResponse){} @@ -139,6 +141,7 @@ message LockKeepAliveResponse { Status status = 2; } ``` + 续租的入参、返回结果都是stream,这里参考etcd的实现,app和sidecar只需要维护一个连接,每次用锁需要续租的时候都复用该连接传递续租请求。 **Q: 为啥不把续租作为一个stream参数塞到tryLock里?** @@ -178,6 +181,7 @@ try{ 这就要求业务在开发时要往心跳检测里上报一些细粒度的状态。 我们可以定义http callback接口,由actuator轮询检测,约定callback返回的数据结构为: + ```json { "status": "UP", diff --git a/docs/zh/design/pubsub/pubsub-api-and-compability-with-dapr-component.md b/docs/zh/design/pubsub/pubsub-api-and-compability-with-dapr-component.md index 33403d140c..f0de81993c 100644 --- a/docs/zh/design/pubsub/pubsub-api-and-compability-with-dapr-component.md +++ b/docs/zh/design/pubsub/pubsub-api-and-compability-with-dapr-component.md @@ -74,6 +74,7 @@ Dapr的组件库可以直接复用;下文讨论sdk和proto是否复用、怎 ### 2.2.2. Between APP and Layotto 用和Dapr一样的grpc API + ```protobuf service AppCallback { // Lists all topics subscribed by this app. @@ -84,6 +85,7 @@ service AppCallback { } ``` + ```protobuf service Dapr { // Publishes events to the specific topic. @@ -91,6 +93,7 @@ service Dapr { } ``` + ### 2.2.3. Between Layotto and Component 用和Dapr一样的; PublishRequest.Data和NewMessage.Data里面放符合CloudEvent 1.0规范的json数据(能反序列化放进map[string]interface{} ) diff --git "a/docs/zh/design/rpc/rpc\350\256\276\350\256\241\346\226\207\346\241\243.md" "b/docs/zh/design/rpc/rpc\350\256\276\350\256\241\346\226\207\346\241\243.md" index 02ea97b84f..5431e8abc4 100644 --- "a/docs/zh/design/rpc/rpc\350\256\276\350\256\241\346\226\207\346\241\243.md" +++ "b/docs/zh/design/rpc/rpc\350\256\276\350\256\241\346\226\207\346\241\243.md" @@ -32,6 +32,7 @@ Mosn通过xprotocol支持了流行的RPC协议. 在Layotto里设计了对应的扩展机制,只需要完成RPC请求响应与xprotocol frame的互相转换,就可以方便的支持xprotocl协议. #### 配置参数 + ```bigquery { "mosn": { diff --git a/docs/zh/design/sequencer/design.md b/docs/zh/design/sequencer/design.md index 7121b66a11..c2550a014f 100644 --- a/docs/zh/design/sequencer/design.md +++ b/docs/zh/design/sequencer/design.md @@ -20,9 +20,11 @@ Q: 什么场景需要趋势递增? 1. 对b+树类的db(例如MYSQL)来说,趋势递增的主键能更好的利用缓存(cache friendly)。 2. 拿来排序查最新数据。比如需求是查最新的100条消息,开发者不想新增个时间戳字段、建索引,如果id本身是递增的,那么查最新的100条消息时直接按id排序即可: + ``` select * from message order by message-id limit 100 ``` + 这在使用nosql的时候很常见,因为nosql在时间戳字段上加索引很难 - sharding内单调递增。比如[Tidb的自增id](https://docs.pingcap.com/zh/tidb/stable/auto-increment) 能保证单台服务器上生成的id递增,没法保证全局(在多台服务器上)单调递增 @@ -57,6 +59,7 @@ select * from message order by message-id limit 100 ## 3. grpc API设计 ### 3.1. proto定义 + ```protobuf // Sequencer API rpc GetNextId(GetNextIdRequest )returns (GetNextIdResponse) {} @@ -136,6 +139,7 @@ API中原先定义了用户传参SequencerOptions.Uniqueness枚举值 ,其中WEA 存在争议,本期先不添加该枚举值。默认返回的结果一定能保证全局唯一(STRONG)。 ## 4. 组件API + ```go package sequencer @@ -216,6 +220,7 @@ type Configuration struct { **Q: 要不要在runtime层实现缓存?** 如果runtime做缓存,需要组件实现方法: + ```go GetSegment(*GetSegmentRequest) (support bool, result *GetSegmentResponse, err error) ``` diff --git a/docs/zh/development/contributing-doc.md b/docs/zh/development/contributing-doc.md index 09be72b2de..ebc631f296 100644 --- a/docs/zh/development/contributing-doc.md +++ b/docs/zh/development/contributing-doc.md @@ -21,11 +21,13 @@ docs/目录下的文件,会被自动部署到github pages,通过[docsify](ht 这里概括一下步骤: step 1. 安装 docsify + ```shell npm i docsify-cli -g ``` step 2. 启动文档站点 + ```shell # 在 layotto 项目根目录下执行 docsify serve docs diff --git a/docs/zh/development/developing-api.md b/docs/zh/development/developing-api.md index a3b6cbfa8a..6930bddb58 100644 --- a/docs/zh/development/developing-api.md +++ b/docs/zh/development/developing-api.md @@ -108,6 +108,7 @@ A: **本规范只限制“新增Layotto API的pr需要有哪些东西”(比 列出来有哪些接口,一方面省的用户自己去翻proto、不知道哪些是相关API,一方面避免用户产生"这项目连接口文档都没有?!"的反感 - 关于接口的出入参:拿proto注释当接口文档 考虑到接口文档用中英文写要写两份、时间长了还有可能和代码不一致,因此建议不写接口文档,直接把proto注释写的足够详细、当接口文档。例如: + ```protobuf // GetStateRequest is the message to get key-value states from specific state store. message GetStateRequest { diff --git a/docs/zh/development/developing-component.md b/docs/zh/development/developing-component.md index 657f57984e..88390a5033 100644 --- a/docs/zh/development/developing-component.md +++ b/docs/zh/development/developing-component.md @@ -112,6 +112,7 @@ 注:demo的代码里如果出现不该有的错误,可以直接panic。后续我们会直接用demo跑集成测试,如果panic了代表集成测试没有通过。 例如demo/lock/redis/client.go 里: + ```go //.... cli, err := client.NewClient() diff --git a/docs/zh/development/github-workflows.md b/docs/zh/development/github-workflows.md index 7ef1185b5b..4fdda1da56 100644 --- a/docs/zh/development/github-workflows.md +++ b/docs/zh/development/github-workflows.md @@ -24,6 +24,7 @@ Layotto Env Pipeline 流水线主要负责 Layotto 的项目以及相关环境 Layotto Env Pipeline 流水线任务触发方式: + Title Validation: + ``` pull_request: types: @@ -33,7 +34,9 @@ Layotto Env Pipeline 流水线任务触发方式: - labeled PR 添加 Label - unlabeled PR 取消 Label ``` + + Quickstart Validation: + ``` push: branches: @@ -42,13 +45,17 @@ Layotto Env Pipeline 流水线任务触发方式: branches: - main 提交 PR ``` + + Update Stale Status: + ``` on: schedule: - cron: '30 1 * * *' 定时任务 ``` + + License Validation: + ``` push: branches: @@ -57,13 +64,17 @@ Layotto Env Pipeline 流水线任务触发方式: branches: - main 提交 PR ``` + + DeadLink Validation: + ``` pull_request: branches: - main 提交 PR ``` + + CodeQL: + ``` schedule: - cron: '0 4 * * 5' 定时任务 diff --git a/docs/zh/development/test-quickstart.md b/docs/zh/development/test-quickstart.md index b8bd4d3d93..b23d8343e7 100644 --- a/docs/zh/development/test-quickstart.md +++ b/docs/zh/development/test-quickstart.md @@ -13,17 +13,21 @@ Quickstart 是项目的门面, 如果新用户进入仓库后,发现 Quickstar ## 原理 用工具按顺序执行 markdown 文档里的所有 shell 脚本, 即, 所有用 + ~~~markdown ```shell ``` ~~~ + 包裹起来的脚本。 注意,不会执行用 + ~~~markdown ```bash ``` ~~~ + 包裹起来的脚本哦。 ## step 1. 安装 `mdx` @@ -50,6 +54,7 @@ mdx docs/en/start/state/start.md 这也是"测试驱动开发"的思想,优化文档,让文档具有"可测试性"吧。 比如,我运行 state API 的 Quickstart 文档,发现报错: + ```bash SaveState succeeded.key:key1 , value: hello world GetState succeeded.[key:key1 etag:1]: hello world @@ -104,6 +109,7 @@ docker rm -f redis-test 所以即使文档里不删除容器,也不影响 github workflow 跑测试。 #### 不想让某条命令被执行,怎么办? `mdx` 默认情况下只会执行 shell 代码块,即这么写的代码块: + ```shell ```shell ``` @@ -118,6 +124,7 @@ docker rm -f redis-test 还是以 docs/en/start/state/start.md 为例。 其中有一段脚本会运行 Layotto, 但是如果运行它就会 hang 住,导致测试工具没法继续运行下一条命令: + ```bash ./layotto start -c ../../configs/config_redis.json ``` @@ -165,6 +172,7 @@ docker rm -f redis-test ##### 解决方案1. 用 `${project_path}` 变量,代表项目根路径,见 https://github.com/seeflood/mdx#cd-project_path + ```shell cd ${project_path}/demo/state/redis/ ``` @@ -186,16 +194,19 @@ cd ${project_path}/demo/state/redis/ cd demo/state/redis/ go run . ``` + ### 其他 markdown 注解 mdx 工具提供了很多"markdown 注解",帮助您编写"可以运行的 markdown 文件"。感兴趣可以查看[mdx文档](https://github.com/seeflood/mdx#usage) ### 修复报错,看看效果吧! 经过一顿修复,我再次运行文档: + ```shell mdx docs/en/start/state/start.md ``` 文档不报错了,能正常运行并退出: + ```bash admindeMacBook-Pro-2:layotto qunli$ mdx docs/en/start/state/start.md latest: Pulling from library/redis @@ -223,6 +234,7 @@ DeleteState succeeded.key:key1 DeleteState succeeded.key:key2 redis-test ``` + ## step 5. 修改 CI,自动测试新写的 quickstart 文档 如果您新写了一篇 quickstart 文档, 并且自测能正常运行,下一步可以修改 CI,实现"每次有人提 Pull request 时,工具自动测试这篇 quickstart 文档能跑通"。 @@ -239,9 +251,11 @@ redis-test 3. 完成上述改动后,就可以测试新的 CI 了。 在项目根目录下运行 + ```shell make style.quickstart ``` + 会测试这些文档: ![](https://gw.alipayobjects.com/mdn/rms_5891a1/afts/img/A*I7LRSryXwWYAAAAAAAAAAAAAARQnAQ) @@ -256,6 +270,7 @@ make style.quickstart ```shell make style.quickstart QUICKSTART_VERSION=1.17 ``` + 会测试以下文档(这些文档在 golang 1.17 及以上的版本才能运行成功): ![](https://gw.alipayobjects.com/mdn/rms_5891a1/afts/img/A*X3F9QJSKq3QAAAAAAAAAAAAAARQnAQ) diff --git a/docs/zh/operation/README.md b/docs/zh/operation/README.md index 942dfe787a..d674baac3e 100644 --- a/docs/zh/operation/README.md +++ b/docs/zh/operation/README.md @@ -15,6 +15,7 @@ Layotto 提供了官方 Docker 镜像,包括: - [layotto/layotto.arm64](https://hub.docker.com/repository/docker/layotto/layotto.arm64) 镜像内不包含 `config.json` 配置文件,您可以将自己的配置文件挂载进镜像的`/runtime/configs/config.json`目录, 然后启动镜像。例如: + ```shell docker run -v "$(pwd)/configs/config.json:/runtime/configs/config.json" -d -p 34904:34904 --name layotto layotto/layotto start ``` diff --git a/docs/zh/start/api_plugin/helloworld.md b/docs/zh/start/api_plugin/helloworld.md index 285bc57836..bf4c1c5e22 100644 --- a/docs/zh/start/api_plugin/helloworld.md +++ b/docs/zh/start/api_plugin/helloworld.md @@ -19,6 +19,7 @@ go build -o layotto ``` Run Layotto: + ```shell @background ./layotto start -c ../../configs/config_standalone.json ``` @@ -38,12 +39,14 @@ Check the code in [`main.go`](https://github.com/mosn/layotto/blob/d74ff0e8940e0 ``` ## step 2. invoke the helloworld API + ```shell # change directory cd ${project_path}/cmd/layotto_multiple_api # run demo client go run client/main.go ``` + The result will be: ```bash diff --git a/docs/zh/start/configuration/start-apollo.md b/docs/zh/start/configuration/start-apollo.md index ff8ea6cb2c..62b05474d5 100644 --- a/docs/zh/start/configuration/start-apollo.md +++ b/docs/zh/start/configuration/start-apollo.md @@ -35,6 +35,7 @@ go build -o layotto > [!TIP|label: 如果发现构建失败、无法下载] > 请进行如下设置 +> > ```bash > go env -w GOPROXY="https://goproxy.cn,direct" > ``` diff --git a/docs/zh/start/configuration/start.md b/docs/zh/start/configuration/start.md index 569a075df8..3e487497b2 100644 --- a/docs/zh/start/configuration/start.md +++ b/docs/zh/start/configuration/start.md @@ -53,6 +53,7 @@ go build -o layotto ``` 编译成功后执行: + ```shell @background ./layotto start -c ../../configs/runtime_config.json ``` @@ -84,6 +85,7 @@ delete keys success write start receive subscribe resp store_name:"config_demo" app_id:"apollo" items: tags: > ``` + ### step 3.销毁容器,释放资源 #### **关闭 Docker Compose** @@ -93,6 +95,7 @@ receive subscribe resp store_name:"config_demo" app_id:"apollo" items: brew install redis > redis-server /usr/local/etc/redis.conf ``` + 注:如果redis安装在本机器,Virtualbox内的虚拟机是无法访问到redis的, 需要把 redis.conf 中的 protected-mode 修改为 no.同时增加 bind * -::*, 让其监听所有接口。 #### B、以 virtualbox + containerd 模式启动 minikube + ``` > minikube start --driver=virtualbox --container-runtime=containerd ``` #### C、安装 Layotto + ``` > git clone https://github.com/mosn/layotto.git > cd layotto @@ -51,6 +54,7 @@ Layotto支持加载并运行以 wasm 为载体的 Function,并支持Function > sudo chmod +x layotto > sudo mv layotto /usr/bin/ ``` + **注1:需要把`./demo/faas/config.json`中的 redis 地址修改为实际地址(安装redis的宿主机ip),默认地址为:localhost:6379。** **注2:需要把`./demo/faas/config.json`中的 wasm 文件的路径修改为`/home/docker/function_1.wasm`跟`/home/docker/function_2.wasm`, 两个wasm文件在后面会被自动注入。** @@ -70,13 +74,16 @@ Layotto支持加载并运行以 wasm 为载体的 Function,并支持Function #### E、修改&重启 containerd 增加 laytto 运行时的配置。 + ``` > minikube ssh > sudo vi /etc/containerd/config.toml [plugins.cri.containerd.runtimes.layotto] runtime_type = "io.containerd.layotto.v2" ``` + 重启 containerd 让最新配置生效 + ``` sudo systemctl restart containerd ``` @@ -92,12 +99,14 @@ sudo systemctl restart containerd ### 四、快速开始 #### A、启动 Layotto + ``` > minikube ssh > layotto start -c /home/docker/config.json ``` #### B、创建 Layotto 运行时 + ``` > kubectl apply -f ./demo/faas/layotto-runtimeclass.yaml runtimeclass.node.k8s.io/layotto created @@ -105,6 +114,7 @@ runtimeclass.node.k8s.io/layotto created #### C、创建 Function 该操作会将function_1.wasm和function_2.wasm自动注入到Virtualbox虚拟机中。 + ``` > kubectl apply -f ./demo/faas/function-1.yaml pod/function-1 created @@ -114,6 +124,7 @@ pod/function-2 created ``` #### D、写入库存数据到 Redis + ``` > redis-cli 127.0.0.1:6379> set book1 100 @@ -121,6 +132,7 @@ OK ``` #### E、发送请求 + ``` > minikube ip 192.168.99.117 diff --git a/docs/zh/start/file/minio.md b/docs/zh/start/file/minio.md index f62c93fa20..c7b290177e 100644 --- a/docs/zh/start/file/minio.md +++ b/docs/zh/start/file/minio.md @@ -23,6 +23,7 @@ docker-compose up -d #### step 1.1. 启动 MinIO 服务 您可以使用 Docker 启动本地MinIO服务, 参考[官方文档](http://docs.minio.org.cn/docs/master/minio-docker-quickstart-guide) + ```shell docker run -d -p 9000:9000 -p 9090:9090 --name minio \ -e "MINIO_ROOT_USER=layotto" \ @@ -104,6 +105,7 @@ go build client.go cd ${project_path}/docker/layotto-minio docker-compose stop ``` + #### **销毁 MinIO Docker 容器** 如果您是用 Docker 启动的 MinIO,可以按以下方式销毁 MinIO 容器: diff --git a/docs/zh/start/istio/start.md b/docs/zh/start/istio/start.md index d08b7810ae..6d63c9f5ff 100644 --- a/docs/zh/start/istio/start.md +++ b/docs/zh/start/istio/start.md @@ -20,6 +20,7 @@ MOSN作为Istio官方认可的数据面实现,这里就对Layotto如何跟Isti 3. [Istio-1.5.x](https://github.com/istio/istio/releases/tag/1.5.2) 当前mosn只支持`istio 1.5.x`(对`istio 1.10.x`的支持已经在CR了),因此需要下载对应版本的`istio`,解压后进行如下配置方便后续操作。 + ``` export PATH=$PATH:${你的istio目录}/bin ``` @@ -28,51 +29,68 @@ MOSN作为Istio官方认可的数据面实现,这里就对Layotto如何跟Isti 1. 启动Docker Desktop 2. 执行如下命令启动`minikube` + ``` minikube start ``` + 3. 执行如下命令启动demo中的client、server(所依赖的镜像已经全部上传docker hub) + ``` kubectl apply -f layotto-injected.yaml ``` + 其中`layotto-injected.yaml`文件中的内容在[这里](https://github.com/mosn/layotto/blob/istio-1.5.x/demo/istio/layotto-injected.yaml) ,复制即可。 4. 执行命令`kubectl get pod`查看启动状态(首次启动需要下载依赖镜像,请耐心等待) + ``` NAME READY STATUS RESTARTS AGE client-665c5cc4f-tfxrk 2/2 Running 0 49m server-v1-685966b499-8hnqp 2/2 Running 0 49m server-v2-6cfff5dbb5-4hlgb 2/2 Running 0 49m ``` + 命令执行完后看到类似上述内容则表示启动成功,我们部署了一个client端以及一个server端,其中server端分为了v1,v2两个版本。 5. 由于原生的`Istio`如果想要从外部访问集群里面的服务需要配置`istio-ingressgateway`服务,这会增加大家使用演示的成本,因此这里我们使用代理命名进行访问, 执行如下命令: + ``` kubectl port-forward svc/client 9080:9080 ``` + 然后直接访问如下链接即可,也可以直接在浏览器中访问。 + ``` curl localhost:9080/grpc ``` + 当看到如下响应时就表示示例启动成功。 + ``` GET /hello hello, i am layotto v1 ``` + ## 四、使用Istio动态改变路由策略 #### A、按version路由能力 1. 执行如下命令创建destination rules + ``` kubectl apply -f destination-rule-all.yaml ``` + 其中`destination-rule-all.yaml`文件内容在[这里](https://github.com/mosn/layotto/blob/istio-1.5.x/demo/istio/layotto-destination-rule-all.yaml) 2. 执行如下命令指定只访问V1服务 + ``` kubectl apply -f layotto-virtual-service-all-v1.yaml ``` + 其中`layotto-virtual-service-all-v1.yaml`文件内容在[这里](https://github.com/mosn/layotto/blob/istio-1.5.x/demo/istio/layotto-virtual-service-all-v1.yaml) 3. 上述命令执行完以后,后续请求只会拿到v1的返回结果,如下: + ``` GET /hello hello, i am layotto v1 @@ -80,10 +98,13 @@ MOSN作为Istio官方认可的数据面实现,这里就对Layotto如何跟Isti #### B、按header信息进行路由 1. 执行如下命令把路由规则修改为请求header中包含`name:layotto`时会访问v1服务,其他则访问v2服务 + ``` kubectl apply -f layotto-header-route.yaml ``` + 2. 发送请求即可看到效果 + ``` curl -H 'name: layotto' localhost:9080/grpc ``` @@ -96,16 +117,21 @@ MOSN作为Istio官方认可的数据面实现,这里就对Layotto如何跟Isti 2. 示例中使用的client、server源码可以参考[这里](https://github.com/mosn/layotto/tree/istio-1.5.x/demo/istio) 。 3. 为了上手简单,上述使用到的`layotto-injected.yaml`文件是已经通过istio完成注入的,整个注入过程如下: 1. 执行如下命令指定`istio`使用`Layotto`作为数据面 + ``` istioctl manifest apply --set .values.global.proxy.image="mosnio/proxyv2:layotto" --set meshConfig.defaultConfig.binaryPath="/usr/local/bin/mosn" ``` + 2. 通过`kube-inject`的方式实现Sidecar注入 + ``` istioctl kube-inject -f layotto.yaml > layotto-injected.yaml ``` + 其中`layotto.yaml`文件内容在[这里](https://github.com/mosn/layotto/blob/istio-1.5.x/demo/istio/layotto.yaml) 3. 把`layotto-injected.yaml`中所有的`/usr/local/bin/envoy`替换为`/usr/local/bin/mosn` + ``` sed -i "s/\/usr\/local\/bin\/envoy/\/usr\/local\/bin\/mosn/g" ./layotto-injected.yaml ``` diff --git a/docs/zh/start/network_filter/tcpcopy.md b/docs/zh/start/network_filter/tcpcopy.md index fede63a6ed..c4fdf2eb09 100644 --- a/docs/zh/start/network_filter/tcpcopy.md +++ b/docs/zh/start/network_filter/tcpcopy.md @@ -17,6 +17,7 @@ } } ``` + 这段配置的含义是启动时加载tcpcopy插件,进行tcp流量dump。 开启该配置后,当Layotto接到请求,如果判断满足流量dump的条件,就会把请求的二进制数据写到本地文件系统。 diff --git a/docs/zh/start/pubsub/start.md b/docs/zh/start/pubsub/start.md index 24115b3389..b558936211 100644 --- a/docs/zh/start/pubsub/start.md +++ b/docs/zh/start/pubsub/start.md @@ -15,6 +15,7 @@ Layotto Pub/Sub API的设计目标是定义一套统一的消息发布/订阅API ![img_1.png](../../../img/mq/start/img_1.png) ### step 1. 启动 Subscriber 程序,订阅事件 + ```shell cd ${project_path}/demo/pubsub/server/ go build -o subscriber @@ -23,6 +24,7 @@ Layotto Pub/Sub API的设计目标是定义一套统一的消息发布/订阅API ```shell @background ./subscriber -s pub_subs_demo ``` + 打印出如下信息则代表启动成功: ```bash @@ -72,6 +74,7 @@ docker pull redis:latest ```shell docker images ``` + ![img.png](../../../img/mq/start/img.png) 3. 运行容器 @@ -105,6 +108,7 @@ go build -o layotto ```shell @background ./layotto start -c ../../configs/config_redis.json ``` + ### step 3. 运行Publisher程序,调用Layotto发布事件 @@ -139,12 +143,14 @@ Received a new event.Topic: topic1 , Data:value1 cd ${project_path}/docker/layotto-redis docker-compose stop ``` + #### **销毁 Redis Docker 容器** 如果您是用 Docker 启动的 Redis,可以按以下方式销毁 Redis 容器: ```shell docker rm -f redis-test ``` + diff --git a/docs/zh/start/rpc/dubbo_json_rpc.md b/docs/zh/start/rpc/dubbo_json_rpc.md index 73e39e9278..cc645ceaab 100644 --- a/docs/zh/start/rpc/dubbo_json_rpc.md +++ b/docs/zh/start/rpc/dubbo_json_rpc.md @@ -6,6 +6,7 @@ ![jsonrpc.jpg](../../../img/rpc/jsonrpc.jpg) ### step 2. 编译运行layotto + ```shell @if.not.exist layotto go build -o layotto cmd/layotto/main.go ``` @@ -18,11 +19,13 @@ go build -o layotto cmd/layotto/main.go 这里使用了`dubbo-go-samples`提供的示例服务。 下载示例: + ```shell @catch git clone https://github.com/apache/dubbo-go-samples.git ``` 启动 zookeeper: + ```shell cd dubbo-go-samples git reset --hard f0d1e1076397a4736de080ffb16cd0963c8c2f9d @@ -37,6 +40,7 @@ export DUBBO_GO_CONFIG_PATH="../conf/dubbogo.yml" ``` 构建 dubbo server: + ```shell @if.not.exist server go build -o server . ``` @@ -48,6 +52,7 @@ go build -o server . ``` ### step 4. 通过GPRC接口发起调用 + ```shell @cd ${project_path} go run demo/rpc/dubbo_json_rpc/dubbo_json_client/client.go -d '{"jsonrpc":"2.0","method":"GetUser","params":["A003"],"id":9527}' ``` diff --git a/docs/zh/start/rpc/helloworld.md b/docs/zh/start/rpc/helloworld.md index fd1ddb60b4..3df61fc838 100644 --- a/docs/zh/start/rpc/helloworld.md +++ b/docs/zh/start/rpc/helloworld.md @@ -17,11 +17,13 @@ go build -o layotto ``` 运行: + ```shell @background ./layotto -c ../../demo/rpc/http/example.json ``` ### step 2. 启动echoserver服务端 + ```shell @background go run ${project_path}/demo/rpc/http/echoserver/echoserver.go ``` diff --git a/docs/zh/start/secret/start.md b/docs/zh/start/secret/start.md index ff023237de..1009ac5705 100644 --- a/docs/zh/start/secret/start.md +++ b/docs/zh/start/secret/start.md @@ -15,10 +15,13 @@ Secret API支持获取单个和所有secret ```shell cd ${project_path}/cmd/layotto ``` + 构建: + ```shell @if.not.exist layotto go build -o layotto ``` + 完成后目录下会生成layotto文件,运行它: ```shell @background @@ -34,6 +37,7 @@ go build -o layotto ```shell @if.not.exist client go build -o client ``` + ```shell ./client -s "secret_demo" ``` diff --git a/docs/zh/start/sequencer/start.md b/docs/zh/start/sequencer/start.md index bffaf4ccbc..787f71cc9e 100644 --- a/docs/zh/start/sequencer/start.md +++ b/docs/zh/start/sequencer/start.md @@ -88,6 +88,7 @@ Next id:next_id:9 Next id:next_id:10 Demo success! ``` + ### step 3.销毁容器,释放资源 #### **关闭 Docker Compose** @@ -97,12 +98,14 @@ Demo success! cd ${project_path}/docker/layotto-etcd docker-compose stop ``` + #### **销毁 etcd Docker 容器** 如果您是用 Docker 启动的 etcd,可以按以下方式销毁 etcd 容器: ```shell docker rm -f etcd ``` + ### 下一步 diff --git a/docs/zh/start/state/start.md b/docs/zh/start/state/start.md index 35fb9b54cd..a8a401b647 100644 --- a/docs/zh/start/state/start.md +++ b/docs/zh/start/state/start.md @@ -117,12 +117,14 @@ DeleteState succeeded.key:key2 cd ${project_path}/docker/layotto-redis docker-compose stop ``` + #### **销毁 Redis Docker 容器** 如果您是用 Docker 启动的 Redis,可以按以下方式销毁 Redis 容器: ```shell docker rm -f redis-test ``` + ### 下一步 diff --git a/docs/zh/start/stream_filter/flow_control.md b/docs/zh/start/stream_filter/flow_control.md index ffd57c4288..183d44298e 100644 --- a/docs/zh/start/stream_filter/flow_control.md +++ b/docs/zh/start/stream_filter/flow_control.md @@ -22,6 +22,7 @@ } ] ``` + 这段配置就是让`/spec.proto.runtime.v1.Runtime/SayHello`接口具备限流能力,1s只能调用5次。 对应的调用端代码在[client.go](https://github.com/mosn/layotto/blob/main/demo/flowcontrol/client.go) 中,代码逻辑很简单,就是想服务端发起10次调用,调用结果如下: diff --git a/docs/zh/start/trace/jaeger.md b/docs/zh/start/trace/jaeger.md index bb280bfc9a..8419e21770 100644 --- a/docs/zh/start/trace/jaeger.md +++ b/docs/zh/start/trace/jaeger.md @@ -68,6 +68,7 @@ go build -o layotto ```shell @if.not.exist client go build -o client ``` + 运行: ```shell diff --git a/docs/zh/start/trace/prometheus.md b/docs/zh/start/trace/prometheus.md index 6a8648ab9e..8f7da866dd 100644 --- a/docs/zh/start/trace/prometheus.md +++ b/docs/zh/start/trace/prometheus.md @@ -49,6 +49,7 @@ go build -o layotto ```shell @if.not.exist client go build -o client ``` + 运行: ```shell diff --git a/docs/zh/start/trace/trace.md b/docs/zh/start/trace/trace.md index 4157bdac1d..75a02ad8f0 100644 --- a/docs/zh/start/trace/trace.md +++ b/docs/zh/start/trace/trace.md @@ -18,6 +18,7 @@ } ] ``` + 这段配置可以开启layotto的trace能力,让layotto在接到请求后打印链路追踪日志。用户可以通过配置来指定trace日志上报的方式,以及spanId,traceId等字段的生成方式。 可以按照如下方式启动一个layotto的server: @@ -94,6 +95,7 @@ type Span struct { operationName string } ``` + Span结构定义了layotto和其component之间传递的数据结构,如下图所示,component可以通过tags将自己的信息传递到layotto,layotto做 统一的trace上报: @@ -126,6 +128,7 @@ exporter接口定了如何将Span的信息上报给远端,对应配置中的ex #### Span的上下文传递: ##### Layotto侧 + ```go GenerateNewContext(ctx context.Context, span api.Span) context.Context ``` @@ -135,6 +138,7 @@ GenerateNewContext用于生成新的context,我们通过mosnctx可以将该con ```go ctx = mosnctx.WithValue(ctx, types.ContextKeyActiveSpan, span) ``` + 可以参考代码中的[OpenGenerator](https://github.com/mosn/layotto/blob/main/diagnostics/genetator.go) 的实现 ##### Component侧 @@ -177,6 +181,7 @@ curl --location --request GET 'http://127.0.0.1:34903/metrics' 详见 [mosn代码](https://github.com/mosn/mosn/blob/70751eae7a13dd1b3ac84c31b1ba85c45945ef69/pkg/filter/stream/grpcmetric/metric.go#L54) #### 展示metrics数据 + ```json "metrics": { "sinks": [ diff --git a/docs/zh/start/trace/zipkin.md b/docs/zh/start/trace/zipkin.md index d071d3eefc..256e515f81 100644 --- a/docs/zh/start/trace/zipkin.md +++ b/docs/zh/start/trace/zipkin.md @@ -20,6 +20,7 @@ } ``` + | 字段 | 必填 | 说明 | |------|-----|--------------------------| | service_name | Y | 当前服务名称,例如layotto | diff --git a/docs/zh/start/wasm/start.md b/docs/zh/start/wasm/start.md index 61e7c2377e..1a20f1ccfe 100644 --- a/docs/zh/start/wasm/start.md +++ b/docs/zh/start/wasm/start.md @@ -23,6 +23,7 @@ Layotto支持加载编译好的WASM文件,并通过`proxy_abi_version_0_2_0` 这里以用 Docker 安装 Redis 为例,进行介绍。 启动 Redis 容器: + ```shell docker run -d --name redis-test -p 6379:6379 redis ``` @@ -62,6 +63,7 @@ go build -tags wasmer -o ./layotto_wasmer ./cmd/layotto/main.go ``` 运行: + ```shell @background ./layotto_wasmer start -c ./demo/faas/config.json ``` From 07ae585a5a006a3ca45943e459530befe975d708 Mon Sep 17 00:00:00 2001 From: leemos <502101107@qq.com> Date: Wed, 22 Jun 2022 17:29:37 +0800 Subject: [PATCH 3/8] =?UTF-8?q?=E6=B5=8B=E8=AF=95=E6=98=AF=E5=90=A6?= =?UTF-8?q?=E5=8F=AF=E4=BB=A5=E6=A3=80=E6=9F=A5=E5=87=BAMD031?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- docs/zh/api_reference/comment_spec_of_proto.md | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/zh/api_reference/comment_spec_of_proto.md b/docs/zh/api_reference/comment_spec_of_proto.md index 6a388b030a..8962d5f9e4 100644 --- a/docs/zh/api_reference/comment_spec_of_proto.md +++ b/docs/zh/api_reference/comment_spec_of_proto.md @@ -30,7 +30,6 @@ message GoodCase{ 假如你想添加一些注释在proto文件里,但不想让它们出现在生成的文档里,你可以在注释里使用`@exclude`前缀。 示例:只包括id字段的注释 - ``` /** * @exclude From 6c87f32c7de8ccc22284fe74e886c8f25d62ac4d Mon Sep 17 00:00:00 2001 From: leemos <502101107@qq.com> Date: Wed, 22 Jun 2022 17:32:11 +0800 Subject: [PATCH 4/8] =?UTF-8?q?=E4=BF=AE=E5=A4=8DMD031=E8=A7=84=E5=88=99?= =?UTF-8?q?=E6=A0=A1=E9=AA=8C=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- docs/zh/api_reference/comment_spec_of_proto.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/zh/api_reference/comment_spec_of_proto.md b/docs/zh/api_reference/comment_spec_of_proto.md index 8962d5f9e4..6a388b030a 100644 --- a/docs/zh/api_reference/comment_spec_of_proto.md +++ b/docs/zh/api_reference/comment_spec_of_proto.md @@ -30,6 +30,7 @@ message GoodCase{ 假如你想添加一些注释在proto文件里,但不想让它们出现在生成的文档里,你可以在注释里使用`@exclude`前缀。 示例:只包括id字段的注释 + ``` /** * @exclude From 1097a75d7c168e58171cea6f6ad70f83193169fa Mon Sep 17 00:00:00 2001 From: leemos <502101107@qq.com> Date: Wed, 22 Jun 2022 17:35:41 +0800 Subject: [PATCH 5/8] =?UTF-8?q?push=E7=9A=84=E6=97=B6=E5=80=99=E4=B8=8D?= =?UTF-8?q?=E6=A0=A1=E9=AA=8Cmarkdown?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .github/workflows/markdown-linter.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/.github/workflows/markdown-linter.yml b/.github/workflows/markdown-linter.yml index 8db0b4d4a8..fb78521208 100644 --- a/.github/workflows/markdown-linter.yml +++ b/.github/workflows/markdown-linter.yml @@ -1,9 +1,6 @@ name: Layotto Env Pipeline 🌊 on: - push: - branches: - - main pull_request: branches: - main From 3770113bdfc8c2459c2ee34ff48a8dfb66ff4b78 Mon Sep 17 00:00:00 2001 From: leemos <502101107@qq.com> Date: Thu, 23 Jun 2022 08:30:14 +0800 Subject: [PATCH 6/8] =?UTF-8?q?markdownlint=E5=A2=9E=E5=8A=A0=E5=AF=B9MD00?= =?UTF-8?q?1/MD037/MD038=E8=A7=84=E5=88=99=E7=9A=84=E6=A0=A1=E9=AA=8C?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .github/markdown_lint_config.json | 6 +- docs/en/api_reference/appcallback_v1.md | 16 +- docs/en/api_reference/runtime_v1.md | 172 +++++++++--------- docs/en/building_blocks/file/file.md | 2 +- docs/en/component_specs/file/qiniu_oss.md | 2 +- .../component_specs/file/tencentcloud_oss.md | 2 +- docs/en/development/github-workflows.md | 24 +-- docs/en/start/configuration/start.md | 18 +- docs/en/start/istio/start.md | 4 +- docs/en/start/lock/start.md | 20 +- docs/en/start/trace/skywalking.md | 4 +- docs/zh/blog/code/webassembly/index.md | 4 +- ...application-runtime-archsummit-shanghai.md | 17 +- docs/zh/building_blocks/file/file.md | 2 +- docs/zh/design/file/file-design.md | 12 +- docs/zh/development/release-guide.md | 10 +- docs/zh/start/configuration/start-apollo.md | 12 +- docs/zh/start/configuration/start.md | 18 +- docs/zh/start/faas/start.md | 2 +- docs/zh/start/istio/start.md | 4 +- docs/zh/start/trace/skywalking.md | 4 +- docs/zh/start/trace/zipkin.md | 4 +- 22 files changed, 179 insertions(+), 180 deletions(-) diff --git a/.github/markdown_lint_config.json b/.github/markdown_lint_config.json index 164fcfc1c7..254f23cb66 100644 --- a/.github/markdown_lint_config.json +++ b/.github/markdown_lint_config.json @@ -1,5 +1,5 @@ { - "MD001": false, + "MD001": true, "MD002": false, "MD003": false, "MD004": false, @@ -35,8 +35,8 @@ "MD034": false, "MD035": false, "MD036": false, - "MD037": false, - "MD038": false, + "MD037": true, + "MD038": true, "MD039": false, "MD040": false, "MD041": false, diff --git a/docs/en/api_reference/appcallback_v1.md b/docs/en/api_reference/appcallback_v1.md index 2603cf265d..54894d7c90 100644 --- a/docs/en/api_reference/appcallback_v1.md +++ b/docs/en/api_reference/appcallback_v1.md @@ -12,7 +12,7 @@ This document is automaticallly generated from the [`.proto`](https://github.com -### [gRPC Service] AppCallback +## [gRPC Service] AppCallback AppCallback V1 allows user application to interact with runtime. User application needs to implement AppCallback service if it needs to receive message from runtime. @@ -28,7 +28,7 @@ receive message from runtime.

Top

-### ListTopicSubscriptionsResponse +## ListTopicSubscriptionsResponse ListTopicSubscriptionsResponse is the message including the list of the subscribing topics. @@ -44,7 +44,7 @@ ListTopicSubscriptionsResponse is the message including the list of the subscrib

Top

-### TopicEventRequest +## TopicEventRequest TopicEventRequest message is compatible with CloudEvent spec v1.0 https://github.com/cloudevents/spec/blob/v1.0/spec.md @@ -69,7 +69,7 @@ https://github.com/cloudevents/spec/blob/v1.0/spec.md

Top

-### TopicEventRequest.MetadataEntry +## TopicEventRequest.MetadataEntry @@ -86,7 +86,7 @@ https://github.com/cloudevents/spec/blob/v1.0/spec.md

Top

-### TopicEventResponse +## TopicEventResponse TopicEventResponse is response from app on published message @@ -102,7 +102,7 @@ TopicEventResponse is response from app on published message

Top

-### TopicSubscription +## TopicSubscription TopicSubscription represents topic and metadata. @@ -120,7 +120,7 @@ TopicSubscription represents topic and metadata.

Top

-### TopicSubscription.MetadataEntry +## TopicSubscription.MetadataEntry @@ -138,7 +138,7 @@ TopicSubscription represents topic and metadata. -### TopicEventResponse.TopicEventResponseStatus +## TopicEventResponse.TopicEventResponseStatus TopicEventResponseStatus allows apps to have finer control over handling of the message. | Name | Number | Description | diff --git a/docs/en/api_reference/runtime_v1.md b/docs/en/api_reference/runtime_v1.md index 2df0a88006..b39b3e3068 100644 --- a/docs/en/api_reference/runtime_v1.md +++ b/docs/en/api_reference/runtime_v1.md @@ -12,7 +12,7 @@ This document is automaticallly generated from the [`.proto`](https://github.com -### [gRPC Service] Runtime +## [gRPC Service] Runtime | Method Name | Request Type | Response Type | Description | @@ -48,7 +48,7 @@ This document is automaticallly generated from the [`.proto`](https://github.com

Top

-### BulkStateItem +## BulkStateItem BulkStateItem is the response item for a bulk get operation. Return values include the item key, data and etag. @@ -69,7 +69,7 @@ Return values include the item key, data and etag.

Top

-### BulkStateItem.MetadataEntry +## BulkStateItem.MetadataEntry @@ -86,7 +86,7 @@ Return values include the item key, data and etag.

Top

-### CommonInvokeRequest +## CommonInvokeRequest @@ -105,7 +105,7 @@ Return values include the item key, data and etag.

Top

-### ConfigurationItem +## ConfigurationItem ConfigurationItem represents a configuration item with key, content and other information. @@ -126,7 +126,7 @@ ConfigurationItem represents a configuration item with key, content and other in

Top

-### ConfigurationItem.MetadataEntry +## ConfigurationItem.MetadataEntry @@ -143,7 +143,7 @@ ConfigurationItem represents a configuration item with key, content and other in

Top

-### ConfigurationItem.TagsEntry +## ConfigurationItem.TagsEntry @@ -160,7 +160,7 @@ ConfigurationItem represents a configuration item with key, content and other in

Top

-### DelFileRequest +## DelFileRequest @@ -176,7 +176,7 @@ ConfigurationItem represents a configuration item with key, content and other in

Top

-### DeleteBulkStateRequest +## DeleteBulkStateRequest DeleteBulkStateRequest is the message to delete a list of key-value states from specific state store. @@ -193,7 +193,7 @@ DeleteBulkStateRequest is the message to delete a list of key-value states from

Top

-### DeleteConfigurationRequest +## DeleteConfigurationRequest DeleteConfigurationRequest is the message to delete a list of key-value configuration from specified configuration store. @@ -214,7 +214,7 @@ DeleteConfigurationRequest is the message to delete a list of key-value configur

Top

-### DeleteConfigurationRequest.MetadataEntry +## DeleteConfigurationRequest.MetadataEntry @@ -231,7 +231,7 @@ DeleteConfigurationRequest is the message to delete a list of key-value configur

Top

-### DeleteStateRequest +## DeleteStateRequest DeleteStateRequest is the message to delete key-value states in the specific state store. @@ -251,7 +251,7 @@ DeleteStateRequest is the message to delete key-value states in the specific sta

Top

-### DeleteStateRequest.MetadataEntry +## DeleteStateRequest.MetadataEntry @@ -268,7 +268,7 @@ DeleteStateRequest is the message to delete key-value states in the specific sta

Top

-### Etag +## Etag Etag represents a state item version @@ -284,7 +284,7 @@ Etag represents a state item version

Top

-### ExecuteStateTransactionRequest +## ExecuteStateTransactionRequest ExecuteStateTransactionRequest is the message to execute multiple operations on a specified store. @@ -302,7 +302,7 @@ ExecuteStateTransactionRequest is the message to execute multiple operations on

Top

-### ExecuteStateTransactionRequest.MetadataEntry +## ExecuteStateTransactionRequest.MetadataEntry @@ -319,7 +319,7 @@ ExecuteStateTransactionRequest is the message to execute multiple operations on

Top

-### FileInfo +## FileInfo @@ -338,7 +338,7 @@ ExecuteStateTransactionRequest is the message to execute multiple operations on

Top

-### FileInfo.MetadataEntry +## FileInfo.MetadataEntry @@ -355,7 +355,7 @@ ExecuteStateTransactionRequest is the message to execute multiple operations on

Top

-### FileMeta +## FileMeta @@ -371,7 +371,7 @@ ExecuteStateTransactionRequest is the message to execute multiple operations on

Top

-### FileMeta.MetadataEntry +## FileMeta.MetadataEntry @@ -388,7 +388,7 @@ ExecuteStateTransactionRequest is the message to execute multiple operations on

Top

-### FileMetaValue +## FileMetaValue @@ -404,7 +404,7 @@ ExecuteStateTransactionRequest is the message to execute multiple operations on

Top

-### FileRequest +## FileRequest @@ -422,7 +422,7 @@ ExecuteStateTransactionRequest is the message to execute multiple operations on

Top

-### FileRequest.MetadataEntry +## FileRequest.MetadataEntry @@ -439,7 +439,7 @@ ExecuteStateTransactionRequest is the message to execute multiple operations on

Top

-### GetBulkSecretRequest +## GetBulkSecretRequest GetBulkSecretRequest is the message to get the secrets from secret store. @@ -456,7 +456,7 @@ GetBulkSecretRequest is the message to get the secrets from secret store.

Top

-### GetBulkSecretRequest.MetadataEntry +## GetBulkSecretRequest.MetadataEntry @@ -473,7 +473,7 @@ GetBulkSecretRequest is the message to get the secrets from secret store.

Top

-### GetBulkSecretResponse +## GetBulkSecretResponse GetBulkSecretResponse is the response message to convey the requested secrets. @@ -489,7 +489,7 @@ GetBulkSecretResponse is the response message to convey the requested secrets.

Top

-### GetBulkSecretResponse.DataEntry +## GetBulkSecretResponse.DataEntry @@ -506,7 +506,7 @@ GetBulkSecretResponse is the response message to convey the requested secrets.

Top

-### GetBulkStateRequest +## GetBulkStateRequest GetBulkStateRequest is the message to get a list of key-value states from specific state store. @@ -525,7 +525,7 @@ GetBulkStateRequest is the message to get a list of key-value states from specif

Top

-### GetBulkStateRequest.MetadataEntry +## GetBulkStateRequest.MetadataEntry @@ -542,7 +542,7 @@ GetBulkStateRequest is the message to get a list of key-value states from specif

Top

-### GetBulkStateResponse +## GetBulkStateResponse GetBulkStateResponse is the response conveying the list of state values. @@ -558,7 +558,7 @@ GetBulkStateResponse is the response conveying the list of state values.

Top

-### GetConfigurationRequest +## GetConfigurationRequest GetConfigurationRequest is the message to get a list of key-value configuration from specified configuration store. @@ -580,7 +580,7 @@ GetConfigurationRequest is the message to get a list of key-value configuration

Top

-### GetConfigurationRequest.MetadataEntry +## GetConfigurationRequest.MetadataEntry @@ -597,7 +597,7 @@ GetConfigurationRequest is the message to get a list of key-value configuration

Top

-### GetConfigurationResponse +## GetConfigurationResponse GetConfigurationResponse is the response conveying the list of configuration values. @@ -613,7 +613,7 @@ GetConfigurationResponse is the response conveying the list of configuration val

Top

-### GetFileMetaRequest +## GetFileMetaRequest @@ -629,7 +629,7 @@ GetConfigurationResponse is the response conveying the list of configuration val

Top

-### GetFileMetaResponse +## GetFileMetaResponse @@ -647,7 +647,7 @@ GetConfigurationResponse is the response conveying the list of configuration val

Top

-### GetFileRequest +## GetFileRequest @@ -665,7 +665,7 @@ GetConfigurationResponse is the response conveying the list of configuration val

Top

-### GetFileRequest.MetadataEntry +## GetFileRequest.MetadataEntry @@ -682,7 +682,7 @@ GetConfigurationResponse is the response conveying the list of configuration val

Top

-### GetFileResponse +## GetFileResponse @@ -698,7 +698,7 @@ GetConfigurationResponse is the response conveying the list of configuration val

Top

-### GetNextIdRequest +## GetNextIdRequest @@ -717,7 +717,7 @@ GetConfigurationResponse is the response conveying the list of configuration val

Top

-### GetNextIdRequest.MetadataEntry +## GetNextIdRequest.MetadataEntry @@ -734,7 +734,7 @@ GetConfigurationResponse is the response conveying the list of configuration val

Top

-### GetNextIdResponse +## GetNextIdResponse @@ -750,7 +750,7 @@ GetConfigurationResponse is the response conveying the list of configuration val

Top

-### GetSecretRequest +## GetSecretRequest GetSecretRequest is the message to get secret from secret store. @@ -768,7 +768,7 @@ GetSecretRequest is the message to get secret from secret store.

Top

-### GetSecretRequest.MetadataEntry +## GetSecretRequest.MetadataEntry @@ -785,7 +785,7 @@ GetSecretRequest is the message to get secret from secret store.

Top

-### GetSecretResponse +## GetSecretResponse GetSecretResponse is the response message to convey the requested secret. @@ -801,7 +801,7 @@ GetSecretResponse is the response message to convey the requested secret.

Top

-### GetSecretResponse.DataEntry +## GetSecretResponse.DataEntry @@ -818,7 +818,7 @@ GetSecretResponse is the response message to convey the requested secret.

Top

-### GetStateRequest +## GetStateRequest GetStateRequest is the message to get key-value states from specific state store. @@ -837,7 +837,7 @@ GetStateRequest is the message to get key-value states from specific state store

Top

-### GetStateRequest.MetadataEntry +## GetStateRequest.MetadataEntry @@ -854,7 +854,7 @@ GetStateRequest is the message to get key-value states from specific state store

Top

-### GetStateResponse +## GetStateResponse GetStateResponse is the response conveying the state value and etag. @@ -872,7 +872,7 @@ GetStateResponse is the response conveying the state value and etag.

Top

-### GetStateResponse.MetadataEntry +## GetStateResponse.MetadataEntry @@ -889,7 +889,7 @@ GetStateResponse is the response conveying the state value and etag.

Top

-### HTTPExtension +## HTTPExtension @@ -906,7 +906,7 @@ GetStateResponse is the response conveying the state value and etag.

Top

-### InvokeBindingRequest +## InvokeBindingRequest InvokeBindingRequest is the message to send data to output bindings @@ -925,7 +925,7 @@ InvokeBindingRequest is the message to send data to output bindings

Top

-### InvokeBindingRequest.MetadataEntry +## InvokeBindingRequest.MetadataEntry @@ -942,7 +942,7 @@ InvokeBindingRequest is the message to send data to output bindings

Top

-### InvokeBindingResponse +## InvokeBindingResponse InvokeBindingResponse is the message returned from an output binding invocation @@ -959,7 +959,7 @@ InvokeBindingResponse is the message returned from an output binding invocation

Top

-### InvokeBindingResponse.MetadataEntry +## InvokeBindingResponse.MetadataEntry @@ -976,7 +976,7 @@ InvokeBindingResponse is the message returned from an output binding invocation

Top

-### InvokeResponse +## InvokeResponse @@ -993,7 +993,7 @@ InvokeBindingResponse is the message returned from an output binding invocation

Top

-### InvokeServiceRequest +## InvokeServiceRequest @@ -1010,7 +1010,7 @@ InvokeBindingResponse is the message returned from an output binding invocation

Top

-### ListFileRequest +## ListFileRequest @@ -1028,7 +1028,7 @@ InvokeBindingResponse is the message returned from an output binding invocation

Top

-### ListFileResp +## ListFileResp @@ -1046,7 +1046,7 @@ InvokeBindingResponse is the message returned from an output binding invocation

Top

-### PublishEventRequest +## PublishEventRequest PublishEventRequest is the message to publish event data to pubsub topic @@ -1068,7 +1068,7 @@ metadata property: - key : the key of the message. |

Top

-### PublishEventRequest.MetadataEntry +## PublishEventRequest.MetadataEntry @@ -1085,7 +1085,7 @@ metadata property: - key : the key of the message. |

Top

-### PutFileRequest +## PutFileRequest @@ -1104,7 +1104,7 @@ metadata property: - key : the key of the message. |

Top

-### PutFileRequest.MetadataEntry +## PutFileRequest.MetadataEntry @@ -1121,7 +1121,7 @@ metadata property: - key : the key of the message. |

Top

-### SaveConfigurationRequest +## SaveConfigurationRequest SaveConfigurationRequest is the message to save a list of key-value configuration into specified configuration store. @@ -1140,7 +1140,7 @@ SaveConfigurationRequest is the message to save a list of key-value configuratio

Top

-### SaveConfigurationRequest.MetadataEntry +## SaveConfigurationRequest.MetadataEntry @@ -1157,7 +1157,7 @@ SaveConfigurationRequest is the message to save a list of key-value configuratio

Top

-### SaveStateRequest +## SaveStateRequest SaveStateRequest is the message to save multiple states into state store. @@ -1174,7 +1174,7 @@ SaveStateRequest is the message to save multiple states into state store.

Top

-### SayHelloRequest +## SayHelloRequest @@ -1192,7 +1192,7 @@ SaveStateRequest is the message to save multiple states into state store.

Top

-### SayHelloResponse +## SayHelloResponse @@ -1209,7 +1209,7 @@ SaveStateRequest is the message to save multiple states into state store.

Top

-### SecretResponse +## SecretResponse SecretResponse is a map of decrypted string/string values @@ -1225,7 +1225,7 @@ SecretResponse is a map of decrypted string/string values

Top

-### SecretResponse.SecretsEntry +## SecretResponse.SecretsEntry @@ -1242,7 +1242,7 @@ SecretResponse is a map of decrypted string/string values

Top

-### SequencerOptions +## SequencerOptions SequencerOptions configures requirements for auto-increment guarantee @@ -1258,7 +1258,7 @@ SequencerOptions configures requirements for auto-increment guarantee

Top

-### StateItem +## StateItem StateItem represents state key, value, and additional options to save state. @@ -1278,7 +1278,7 @@ StateItem represents state key, value, and additional options to save state.

Top

-### StateItem.MetadataEntry +## StateItem.MetadataEntry @@ -1295,7 +1295,7 @@ StateItem represents state key, value, and additional options to save state.

Top

-### StateOptions +## StateOptions StateOptions configures concurrency and consistency for state operations @@ -1312,7 +1312,7 @@ StateOptions configures concurrency and consistency for state operations

Top

-### SubscribeConfigurationRequest +## SubscribeConfigurationRequest SubscribeConfigurationRequest is the message to get a list of key-value configuration from specified configuration store. @@ -1333,7 +1333,7 @@ SubscribeConfigurationRequest is the message to get a list of key-value configur

Top

-### SubscribeConfigurationRequest.MetadataEntry +## SubscribeConfigurationRequest.MetadataEntry @@ -1350,7 +1350,7 @@ SubscribeConfigurationRequest is the message to get a list of key-value configur

Top

-### SubscribeConfigurationResponse +## SubscribeConfigurationResponse SubscribeConfigurationResponse is the response conveying the list of configuration values. @@ -1368,7 +1368,7 @@ SubscribeConfigurationResponse is the response conveying the list of configurati

Top

-### TransactionalStateOperation +## TransactionalStateOperation TransactionalStateOperation is the message to execute a specified operation with a key-value pair. @@ -1385,7 +1385,7 @@ TransactionalStateOperation is the message to execute a specified operation with

Top

-### TryLockRequest +## TryLockRequest @@ -1404,7 +1404,7 @@ TransactionalStateOperation is the message to execute a specified operation with

Top

-### TryLockResponse +## TryLockResponse @@ -1420,7 +1420,7 @@ TransactionalStateOperation is the message to execute a specified operation with

Top

-### UnlockRequest +## UnlockRequest @@ -1438,7 +1438,7 @@ TransactionalStateOperation is the message to execute a specified operation with

Top

-### UnlockResponse +## UnlockResponse @@ -1455,7 +1455,7 @@ TransactionalStateOperation is the message to execute a specified operation with -### HTTPExtension.Verb +## HTTPExtension.Verb | Name | Number | Description | @@ -1475,7 +1475,7 @@ TransactionalStateOperation is the message to execute a specified operation with -### SequencerOptions.AutoIncrement +## SequencerOptions.AutoIncrement requirements for auto-increment guarantee | Name | Number | Description | @@ -1487,7 +1487,7 @@ requirements for auto-increment guarantee -### StateOptions.StateConcurrency +## StateOptions.StateConcurrency Enum describing the supported concurrency for state. The API server uses Optimized Concurrency Control (OCC) with ETags. When an ETag is associated with an save or delete request, the store shall allow the update only if the attached ETag matches with the latest ETag in the database. @@ -1503,7 +1503,7 @@ But when ETag is missing in the write requests, the state store shall handle the -### StateOptions.StateConsistency +## StateOptions.StateConsistency Enum describing the supported consistency for state. | Name | Number | Description | @@ -1516,7 +1516,7 @@ Enum describing the supported consistency for state. -### UnlockResponse.Status +## UnlockResponse.Status | Name | Number | Description | diff --git a/docs/en/building_blocks/file/file.md b/docs/en/building_blocks/file/file.md index f79ad46f18..6bc4f676dd 100644 --- a/docs/en/building_blocks/file/file.md +++ b/docs/en/building_blocks/file/file.md @@ -38,7 +38,7 @@ Refer: https://github.com/mosn/layotto/issues/98 ``` -#### parameters +### parameters ```protobuf message GetFileRequest { diff --git a/docs/en/component_specs/file/qiniu_oss.md b/docs/en/component_specs/file/qiniu_oss.md index 8a52bbd742..478b57db19 100644 --- a/docs/en/component_specs/file/qiniu_oss.md +++ b/docs/en/component_specs/file/qiniu_oss.md @@ -20,7 +20,7 @@ Example:configs/config_file_qiniu_oss.json 2.get keys https://portal.qiniu.com/user/key -After the above operation steps are completed, configure endpoint, AK and SK to ` configs/config_file_qiniu_oss.json` +After the above operation steps are completed, configure endpoint, AK and SK to `configs/config_file_qiniu_oss.json` file ## Run layotto diff --git a/docs/en/component_specs/file/tencentcloud_oss.md b/docs/en/component_specs/file/tencentcloud_oss.md index 014ef03cd1..ed1988b9bc 100644 --- a/docs/en/component_specs/file/tencentcloud_oss.md +++ b/docs/en/component_specs/file/tencentcloud_oss.md @@ -25,7 +25,7 @@ visit https://console.cloud.tencent.com/cos/bucket to create bucket visit https://console.cloud.tencent.com/cam/capi to create AK and SK -After the above operation steps are completed, configure endpoint, AK and SK to ` configs/config_file_tencentcloud_oss.JSON` file +After the above operation steps are completed, configure endpoint, AK and SK to `configs/config_file_tencentcloud_oss.JSON` file ## Run layotto diff --git a/docs/en/development/github-workflows.md b/docs/en/development/github-workflows.md index e4b3d78b6b..1da577af77 100644 --- a/docs/en/development/github-workflows.md +++ b/docs/en/development/github-workflows.md @@ -9,9 +9,9 @@ This document explains Layotto's four workflows in Github: The workflow contains one or more tasks, It improves the standardization and security of the code in layotto, simplifies repetitive steps of development / build / release. The following is a detailed explanation of the above four workflows. -### Layotto Env Pipeline 🌊 +## Layotto Env Pipeline 🌊 -#### Job Task Content +### Job Task Content Layotto Env Pipeline is mainly responsible for the project of layotto and the specification of relevant environment,it current contains the following tasks: @@ -22,7 +22,7 @@ Layotto Env Pipeline is mainly responsible for the project of layotto and the sp + DeadLink Validation (Check the deadLink in document) + CodeQL (Analysis of CodeQL) -#### Job Trigger Event +### Job Trigger Event Layotto Env Pipeline Task Trigger Events: @@ -83,11 +83,11 @@ Layotto Env Pipeline Task Trigger Events: - cron: '0 4 * * 5' timed tasks ``` -### Layotto Dev Pipeline 🌊 (Before Merged) +## Layotto Dev Pipeline 🌊 (Before Merged) ![release.png](../../img/development/workflow/workflow-dev.png) -#### Job Task Content +### Job Task Content The layotto dev pipeline (before merged) is mainly responsible for verifying the code after submitting the PR, which currently includes the following tasks: @@ -103,7 +103,7 @@ The layotto dev pipeline (before merged) is mainly responsible for verifying th + Linux ARM64 Artifact : Build linux arm64 binary verification for code + Linux AMD64 WASM Artifact : Build linux AMD64 binary verification for layotto wasm -#### Job Trigger Event +### Job Trigger Event ``` on: @@ -119,11 +119,11 @@ The layotto dev pipeline (before merged) is mainly responsible for verifying th - '**/*.md' ``` -### Layotto Dev Pipeline 🌊 (After Merged) +## Layotto Dev Pipeline 🌊 (After Merged) ![release.png](../../img/development/workflow/workflow-merge.png) -#### Job Task Content +### Job Task Content The layotto dev pipeline (after merged) is mainly responsible for the verification and release of the combined layotto code, which currently includes the following tasks: @@ -142,7 +142,7 @@ The layotto dev pipeline (after merged) is mainly responsible for the verificat + Linux AMD64 Image : Release the latest version of layotto wasm image. The image specification is layotto/layotto:latest + Linux ARMD64 Image : Release the latest version of layotto wasm image. The image specification is layotto/layotto.arm64:latest -#### Job Trigger Event +### Job Trigger Event ``` on: @@ -158,11 +158,11 @@ The layotto dev pipeline (after merged) is mainly responsible for the verificat - '**/*.md' ``` -### Layotto Release Pipeline 🌊 +## Layotto Release Pipeline 🌊 ![release.png](../../img/development/workflow/workflow-release.png) -#### Job Task Content +### Job Task Content The layotto release pipeline is mainly responsible for the release and verification of the new version of layotto, which currently includes the following tasks : @@ -181,7 +181,7 @@ The layotto release pipeline is mainly responsible for the release and verifica + Linux AMD64 Image : Release the latest version of layotto wasm image. The image specification is layotto/layotto:{latest_tagname} + Linux ARMD64 Image : Release the latest version of layotto wasm image. The image specification is layotto/layotto.arm64:{latest_tagname} -#### Job Trigger Event +### Job Trigger Event ``` on: diff --git a/docs/en/start/configuration/start.md b/docs/en/start/configuration/start.md index 4845d9364b..6443d53af6 100644 --- a/docs/en/start/configuration/start.md +++ b/docs/en/start/configuration/start.md @@ -7,9 +7,9 @@ The architecture of this demo is shown in the figure below. The processes starte ![](https://gw.alipayobjects.com/mdn/rms_5891a1/afts/img/A*dzGaSb78UCoAAAAAAAAAAAAAARQnAQ) [Then config file](https://github.com/mosn/layotto/blob/main/configs/runtime_config.json) claims `etcd` in the `config_store` section, but users can change it to other configuration center they want (currently only support etcd and apollo). -### step 1. Deploy etcd and Layotto +## step 1. Deploy etcd and Layotto -#### **With Docker Compose** +### **With Docker Compose** You can start etcd and Layotto with docker-compose ```bash @@ -18,12 +18,12 @@ cd docker/layotto-etcd docker-compose up -d ``` -#### **Compile locally (not for Windows)** +### **Compile locally (not for Windows)** You can run etcd with Docker, then compile and run Layotto locally. > [!TIP|label: Not for Windows users] > Layotto fails to compile under Windows. Windows users are recommended to deploy using docker-compose -#### step 1.1 Start etcd +### step 1.1 Start etcd If you want to run this demo, you need to start a etcd server first. You can download etcd from `https://github.com/etcd-io/etcd/releases` (You can also use docker.) @@ -36,7 +36,7 @@ start it: Then you can access etcd with the address `localhost:2379`. -#### step 1.2 Start Layotto +### step 1.2 Start Layotto Build Layotto: ```shell @@ -55,7 +55,7 @@ Run it: -### step 2. Start client APP +## step 2. Start client APP ```shell cd ${project_path}/demo/configuration/common @@ -80,9 +80,9 @@ write start receive subscribe resp store_name:"config_demo" app_id:"apollo" items: tags: > ``` -### step 3. Stop containers and release resources +## step 3. Stop containers and release resources -#### **Docker Compose** +### **Docker Compose** If you started etcd and Layotto with docker-compose, you can shut them down as follows: ```bash @@ -90,7 +90,7 @@ cd ${project_path}/docker/layotto-etcd docker-compose stop ``` -#### **Destroy the etcd container** +### **Destroy the etcd container** If you started etcd with Docker, you can destroy the etcd container as follows: ```shell diff --git a/docs/en/start/istio/start.md b/docs/en/start/istio/start.md index fc8486e152..2d95acb1ef 100644 --- a/docs/en/start/istio/start.md +++ b/docs/en/start/istio/start.md @@ -75,7 +75,7 @@ before starting the demo,you must install some components as follows: ## 4. Using istio to dynamically change routing policy -#### A. route according to version +### A. route according to version 1. Run the following command to create destination rules ``` @@ -98,7 +98,7 @@ before starting the demo,you must install some components as follows: hello, i am layotto v1 ``` -#### B. route according to a specific header +### B. route according to a specific header 1. Run the following command to modify the routing rules to access the v1 service when the request header contains `name:layotto`, and other access to the v2 service ``` diff --git a/docs/en/start/lock/start.md b/docs/en/start/lock/start.md index aff0d4bcf2..4865764349 100644 --- a/docs/en/start/lock/start.md +++ b/docs/en/start/lock/start.md @@ -6,9 +6,9 @@ The architecture of this example is shown in the figure below, and the started p ![img.png](../../../img/lock/img.png) -### step 1. Deploy Redis and Layotto +## step 1. Deploy Redis and Layotto -#### **with Docker Compose** +### **with Docker Compose** You can start Redis and Layotto with docker-compose ```bash @@ -17,13 +17,13 @@ cd docker/layotto-redis docker-compose up -d ``` -#### **Compile locally (not for Windows)** +### **Compile locally (not for Windows)** You can run Redis with Docker, then compile and run Layotto locally. > [!TIP|label: Not for Windows users] > Layotto fails to compile under Windows. Windows users are recommended to deploy using docker-compose -#### step 1.1. Run Redis with Docker +### step 1.1. Run Redis with Docker 1. Get the latest version of Redis docker image @@ -55,7 +55,7 @@ Parameter Description: `-p 6380:6379`: Map port 6379 of the container to port 6380 of the host. The outside can directly access the Redis service through the host ip:6380. -#### step 1.2. Compile and run Layotto +### step 1.2. Compile and run Layotto After downloading the project code to the local, enter the code directory and compile: @@ -75,7 +75,7 @@ The layotto file will be generated in the directory, run it: -### step 2. Run the client program, call Layotto to add, delete, modify and query +## step 2. Run the client program, call Layotto to add, delete, modify and query ```shell cd ${project_path}/demo/lock/common/ @@ -98,8 +98,8 @@ client2 succeeded in unlocking Demo success! ``` -### Next Step -#### What did this client Demo do? +## Next Step +### What did this client Demo do? The demo client program uses the golang version SDK provided by Layotto, calls the Layotto distributed lock API, and starts multiple goroutines to do locking and unlocking operations. The sdk is located in the `sdk` directory, and users can call the API provided by Layotto through the sdk. @@ -108,10 +108,10 @@ In addition to using sdk, you can also interact with Layotto directly through gr In fact, sdk is only a very thin package for grpc, using sdk is about equal to directly using grpc. -#### Details later, let's continue to experience other APIs +### Details later, let's continue to experience other APIs Explore other Quickstarts through the navigation bar on the left. -#### Understand the design principle of Distributed Lock API +### Understand the design principle of Distributed Lock API If you are interested in the design principle, or want to extend some functions, you can read [Distributed Lock API design document](en/design/lock/lock-api-design.md) \ No newline at end of file diff --git a/docs/en/start/trace/skywalking.md b/docs/en/start/trace/skywalking.md index b4a1484721..9d4beb8368 100644 --- a/docs/en/start/trace/skywalking.md +++ b/docs/en/start/trace/skywalking.md @@ -32,7 +32,7 @@ docker-compose -f diagnostics/skywalking/skywalking-docker-compose.yaml up -d ## Run layotto -#### **with Docker** +### **with Docker** You can run Layotto with docker ```bash @@ -42,7 +42,7 @@ docker run -d \ layotto/layotto start ``` -#### **Compile locally (not for Windows)** +### **Compile locally (not for Windows)** You can compile and run Layotto locally. > [!TIP|label: Not for Windows users] diff --git a/docs/zh/blog/code/webassembly/index.md b/docs/zh/blog/code/webassembly/index.md index b3f19c4dd8..6e6e2244a0 100644 --- a/docs/zh/blog/code/webassembly/index.md +++ b/docs/zh/blog/code/webassembly/index.md @@ -454,9 +454,9 @@ func (a *ABIContext) CallWasmFunction(funcName string, args ...interface{}) (int ``` 3、WASMER 虚拟机经过处理调用 WASM 插件的具体函数,比如例子中的 OnHttpRequestBody 函数 - // function, _ := instance.Exports.GetFunction("exported_function") + // function, _:= instance.Exports.GetFunction("exported_function") // nativeFunction = function.Native() - // _ = nativeFunction(1, 2, 3) + //_ = nativeFunction(1, 2, 3) // Native 会将 Function 转换为可以调用的原生 Go 函数 ```go diff --git a/docs/zh/blog/exploration-and-practice-of-antcloud-native-application-runtime-archsummit-shanghai.md b/docs/zh/blog/exploration-and-practice-of-antcloud-native-application-runtime-archsummit-shanghai.md index 340be87ee7..391b6126ca 100644 --- a/docs/zh/blog/exploration-and-practice-of-antcloud-native-application-runtime-archsummit-shanghai.md +++ b/docs/zh/blog/exploration-and-practice-of-antcloud-native-application-runtime-archsummit-shanghai.md @@ -3,8 +3,7 @@ >Mesh 模式的引入是实现应用云原生的关键路径,蚂蚁集团已在内部实现大规模落地。随着 Message、DB、Cache Mesh 等更多的中间件能力的下沉,从 Mesh 演进而来的应用运行时将是中间件技术的未来形态。应用运行时旨在帮助开发人员快速的构建云原生应用,帮助应用和基础设施进一步解耦,而应用运行时最核心是 API 标准,期望社区一起共建。 >![](https://gw.alipayobjects.com/mdn/rms_1c90e8/afts/img/A*nergRo8-RI0AAAAAAAAAAAAAARQnAQ) - -### 蚂蚁集团 Mesh 化介绍 +## 蚂蚁集团 Mesh 化介绍 蚂蚁是一家技术和创新驱动的公司,从最早淘宝里的一个支付应用,到现在服务 全球十二亿用户的大型公司,蚂蚁的技术架构演进大概会分为如下几个阶段: @@ -23,7 +22,7 @@ 可以看到蚂蚁的技术架构也是跟随公司的业务创新不断演进的,前面的从集中式到 SOA 再到微服务的过程,相信搞过微服务的同学都深有体会,而从微服务到云原生的实践是蚂蚁近几年自己探索出来的。 -### 为什么要引入 Service Mesh +## 为什么要引入 Service Mesh 蚂蚁既然有一套完整的微服务治理中间件,那为什么还需要引入 Service Mesh 呢? @@ -37,7 +36,7 @@ 我们注意到云原生里有 Service Mesh 一些理念开始出现,所以开始往这个方向探索。在 Service Mesh 的理念里,有两个概念,一个是 Control Plane 控制平面,一个是 Data Plane 数据平面。控制面这里暂时不展开,其中数据平面的核心思想就是解耦,将一些业务无需关系的复杂逻辑(如 RPC 调用里的服务发现、服务路由、熔断限流、安全)抽象到一个独立进程里去。只要保持业务和独立进程的通信协议不变,这些能力的演进可以跟随这个独立的进程自主升级,整个 Mesh 就可以做到统一演进。而我们的跨语言应用,只要流量是经过我们的 Data Plane 的,都可以享受到刚才提到的各种服务治理相关的能力,应用对底层的基础设施能力是透明的,真正的云原生的。 -### 蚂蚁 Mesh 落地过程 +## 蚂蚁 Mesh 落地过程 所以从 2017 年底开始,蚂蚁就开始探索 Service Mesh 的技术方向,并提出了 基础设施统一,业务无感升级 的愿景。主要的里程碑就是: @@ -51,7 +50,7 @@ 2020 年双十一,全站超过 80% 的在线应用接入了 Mesh 化,整套 Mesh 体系也具备了 2 个月从能力开发到全站升级完成的能力。 -### 蚂蚁 Mesh 落地架构 +## 蚂蚁 Mesh 落地架构 目前 Mesh 化在蚂蚁落地规模是应用约数千个,容器数十万的级别,这个规模的落地,在业界是数一数二的,根本就没有前人的路可以学习,所以蚂蚁在落地过程中,也建设一套完整的研发运维体系去支撑蚂蚁的 Mesh 化。 @@ -62,7 +61,7 @@ 同时随着下沉能力的越来越多,各个能力之前也面临了研发协作的一些问题,甚至互相影响性能和稳定性的问题,所以对于 Mesh 自身的研发效能,我们也做了一下模块化隔离、新能力动态插拔、自动回归等改进,目前一个下沉能力从开发到全站推广完成可以在 2 个月内完成。 -### 云原生应用运行时上的探索 +## 云原生应用运行时上的探索 **大规模落地后的新问题与思考** @@ -87,7 +86,7 @@ >![](https://gw.alipayobjects.com/mdn/rms_1c90e8/afts/img/A*hsZBQJg0VnoAAAAAAAAAAAAAARQnAQ) -### 蚂蚁云原生应用运行时架构 +## 蚂蚁云原生应用运行时架构 从去年的 3 月份开始,经过内部的多轮讨论,以及对业界一些新理念的调研,我们提出了一个“云原生应用运行时”(下称运行时)的概念。顾名思义,我们希望这个运行时能够包含应用所关心的所有分布式能力,帮助开发人员快速的构建云原生应用,帮助应用和基础设施进一步解耦! @@ -152,7 +151,7 @@ FaaS 冷启预热池也是我们近期在探索的一个场景,大家知道 FaaS 里的 Function 在冷启的时候,是需要从创建 Pod 到下载 Function 再到启动的,这个过程会比较长。有了运行时之后,我们可以提前把 Pod 创建出来并启动好运行时,等到应用启动的时候其实已经非常简单的应用逻辑了,经过测试发现可以将从 5s 缩短 80% 到 1s。这个方向我们还会持续探索当中。 -### 规划和展望 +## 规划和展望 **API 共建** @@ -166,7 +165,7 @@ FaaS 冷启预热池也是我们近期在探索的一个场景,大家知道 Fa >![](https://gw.alipayobjects.com/mdn/rms_1c90e8/afts/img/A*Kgr9QLc5TH4AAAAAAAAAAAAAARQnAQ) -### 总结 +## 总结 **最后做一下小结:** diff --git a/docs/zh/building_blocks/file/file.md b/docs/zh/building_blocks/file/file.md index 2b1428ac05..4933dbd902 100644 --- a/docs/zh/building_blocks/file/file.md +++ b/docs/zh/building_blocks/file/file.md @@ -41,7 +41,7 @@ api的设计请参考下面的issue: https://github.com/mosn/layotto/issues/98 ``` -#### 接口参数 +### 接口参数 ```protobuf message GetFileRequest { diff --git a/docs/zh/design/file/file-design.md b/docs/zh/design/file/file-design.md index 5b3b9c2cc5..e907cb826c 100644 --- a/docs/zh/design/file/file-design.md +++ b/docs/zh/design/file/file-design.md @@ -1,6 +1,6 @@ # File API 设计文档 -### API +## API API定义主要依据常用的文件操作来定义的,分为增删改查四个接口,对于Get/Put接口来说,文件的上传和下载需要支持流传输。因此接口定义如下: @@ -21,7 +21,7 @@ API定义主要依据常用的文件操作来定义的,分为增删改查四 关于接口的定义的讨论可以参照[issue98](https://github.com/mosn/layotto/issues/98) -### 参数定义 +## 参数定义 ```protobuf @@ -69,7 +69,7 @@ message DelFileRequest { } ``` -#### Get接口 +### Get接口 Get的入参主要有三个: @@ -79,7 +79,7 @@ Get的入参主要有三个: | name | 文件名字 | yes| | metadata | 元数据,该字段用户可以用来指定component需要的一些字段,(eg:权限,用户名等) | yes| -#### Put接口 +### Put接口 Put接口入参主要有三个,多了一个data字段用来传输文件内容: @@ -91,7 +91,7 @@ Put接口入参主要有三个,多了一个data字段用来传输文件内容 | metadata | 元数据,该字段用户可以用来指定component需要的一些字段,(eg:权限,用户名等) | yes| -#### List和Del接口 +### List和Del接口 两个接口的参数是一样的: @@ -101,7 +101,7 @@ Put接口入参主要有三个,多了一个data字段用来传输文件内容 | name | 文件名字 | yes| | metadata | 元数据,该字段用户可以用来指定component需要的一些字段,(eg:权限,用户名等) | yes| -#### 配置参数 +### 配置参数 配置参数,不同的component可以配置不同格式,比如aliOSS的配置如下: diff --git a/docs/zh/development/release-guide.md b/docs/zh/development/release-guide.md index 2665b9e604..6a46d250a2 100644 --- a/docs/zh/development/release-guide.md +++ b/docs/zh/development/release-guide.md @@ -6,13 +6,13 @@ Layotto 发布周期暂定为每季度发布一次。 ## 发布 checklist -#### Step1: 检查当前迭代的 [Roadmap](https://github.com/mosn/layotto/projects) +### Step1: 检查当前迭代的 [Roadmap](https://github.com/mosn/layotto/projects) 1. 检查进行中的任务 2. 检查未完成的任务 3. 与负责人确认任务状态和发布内容 -#### Step2: 创建发布 tag, push 至 github 并检查工作流 +### Step2: 创建发布 tag, push 至 github 并检查工作流 1. 规范:请按照 `v{majorVersion}.{subVersion}.{latestVersion}` 格式创建 tag。 @@ -36,7 +36,7 @@ Layotto 发布周期暂定为每季度发布一次。 ![release.png](../../img/development/workflow/release.png) -#### Step3: Draft a new release 并编写发布报告 +### Step3: Draft a new release 并编写发布报告 > 发布报告可以先用 github 的功能自动生成,再基于生成的内容做修改。 @@ -44,14 +44,14 @@ Layotto 发布周期暂定为每季度发布一次。 ![img_1.png](../../img/development/release/img_1.png) -#### Step4: 上传多平台架构的 Binaries +### Step4: 上传多平台架构的 Binaries > 2022/05/04更新:这一步可以忽略。打 tag 发布后,Layotto 的 Release Pipeline 会自动上传二进制文件,无需手动上传。PR 见 https://github.com/mosn/layotto/pull/566 > 如果没有自动上传,可以手动将 `步骤 2` 中构建的多平台 Artifacts 下载、上传 ![img.png](../../img/development/release/img.png) -#### Step5: 确认发布 +### Step5: 确认发布 1. 点击发布 2. 社区周知 diff --git a/docs/zh/start/configuration/start-apollo.md b/docs/zh/start/configuration/start-apollo.md index 62b05474d5..cae85a6758 100644 --- a/docs/zh/start/configuration/start-apollo.md +++ b/docs/zh/start/configuration/start-apollo.md @@ -6,13 +6,13 @@ ![img.png](../../../img/configuration/apollo/arch.png) -### 第一步:部署apollo配置中心(可选) +## 第一步:部署apollo配置中心(可选) 您可以跳过这一步,使用本demo无需自己部署apollo服务器。本demo会使用[apollo官方](https://github.com/apolloconfig/apollo) 提供的演示环境http://106.54.227.205/ 如果您自己部署了apollo,可以修改Layotto的[config文件](https://github.com/mosn/layotto/blob/main/configs/config_apollo.json) ,将apollo服务器地址改成您自己的。 -### 第二步:运行Layotto server 端 +## 第二步:运行Layotto server 端 将Layotto代码下载到本地 @@ -51,7 +51,7 @@ go build -o layotto > > 遇到这种情况,您可以先尝试其他 demo,例如 [etcd demo](zh/start/configuration/start) -### 第三步:启动客户端Demo,调用Layotto增删改查 +## 第三步:启动客户端Demo,调用Layotto增删改查 ```shell cd ${project_path}/demo/configuration/common @@ -76,8 +76,8 @@ write start receive subscribe resp store_name:"config_demo" app_id:"apollo" items: tags: > ``` -### 下一步 -#### 这个客户端Demo做了什么? +## 下一步 +### 这个客户端Demo做了什么? 示例客户端程序中使用了Layotto提供的golang版本sdk,调用Layotto 的Configuration API对配置数据进行增删改查、订阅变更。 sdk位于`sdk`目录下,用户可以通过sdk调用Layotto提供的API。 @@ -87,5 +87,5 @@ sdk位于`sdk`目录下,用户可以通过sdk调用Layotto提供的API。 其实sdk只是对grpc很薄的封装,用sdk约等于直接用grpc调。 -#### 细节以后再说,继续体验其他API +### 细节以后再说,继续体验其他API 通过左侧的导航栏,继续体验别的API吧! diff --git a/docs/zh/start/configuration/start.md b/docs/zh/start/configuration/start.md index 3e487497b2..5f75667e8f 100644 --- a/docs/zh/start/configuration/start.md +++ b/docs/zh/start/configuration/start.md @@ -4,9 +4,9 @@ 本示例架构如下图,启动的进程有:客户端程程序、Layotto、etcd 。 ![](https://gw.alipayobjects.com/mdn/rms_5891a1/afts/img/A*dzGaSb78UCoAAAAAAAAAAAAAARQnAQ) -### step 1. 启动 etcd 和 Layotto +## step 1. 启动 etcd 和 Layotto -#### **使用 Docker Compose** +### **使用 Docker Compose** 您可以使用 docker-compose 启动 etcd 和 Layotto ```bash @@ -15,12 +15,12 @@ cd docker/layotto-etcd docker-compose up -d ``` -#### **本地编译(不适合 Windows)** +### **本地编译(不适合 Windows)** 您可以使用 Docker 运行 etcd,然后本地编译、运行 Layotto。 > [!TIP|label: 不适合 Windows 用户] > Layotto 在 Windows 下会编译失败。建议 Windows 用户使用 docker-compose 部署 -#### step 1.1 启动 etcd +### step 1.1 启动 etcd etcd的启动方式可以参考etcd的[官方文档](https://etcd.io/docs/v3.5/quickstart/) @@ -40,7 +40,7 @@ etcd的启动方式可以参考etcd的[官方文档](https://etcd.io/docs/v3.5/q 默认监听地址为 `localhost:2379` -#### step 1.2 启动 layotto +### step 1.2 启动 layotto ```shell cd ${project_path}/cmd/layotto @@ -61,7 +61,7 @@ go build -o layotto > 解释:[runtime_config.json](https://github.com/mosn/layotto/blob/main/configs/runtime_config.json) 是 Layotto 的配置文件,它在 `config_store` 中声明了使用 etcd 作为配置中心。用户可以更改配置文件,改成使用自己想要用的其他配置中心(目前支持 etcd 和 apollo)。 -### step 2. 启动测试demo +## step 2. 启动测试demo ```shell cd ${project_path}/demo/configuration/common @@ -86,9 +86,9 @@ write start receive subscribe resp store_name:"config_demo" app_id:"apollo" items: tags: > ``` -### step 3.销毁容器,释放资源 +## step 3.销毁容器,释放资源 -#### **关闭 Docker Compose** +### **关闭 Docker Compose** 如果您是用 docker-compose 启动的 etcd 和 Layotto,可以按以下方式关闭: ```bash @@ -96,7 +96,7 @@ cd ${project_path}/docker/layotto-etcd docker-compose stop ``` -#### **销毁 etcd Docker 容器** +### **销毁 etcd Docker 容器** 如果您是用 Docker 启动的 etcd,可以按以下方式销毁 etcd 容器: ```shell diff --git a/docs/zh/start/faas/start.md b/docs/zh/start/faas/start.md index bf10a6cadb..8f53e06b3b 100644 --- a/docs/zh/start/faas/start.md +++ b/docs/zh/start/faas/start.md @@ -34,7 +34,7 @@ Layotto支持加载并运行以 wasm 为载体的 Function,并支持Function > redis-server /usr/local/etc/redis.conf ``` -注:如果redis安装在本机器,Virtualbox内的虚拟机是无法访问到redis的, 需要把 redis.conf 中的 protected-mode 修改为 no.同时增加 bind * -::*, 让其监听所有接口。 +注:如果redis安装在本机器,Virtualbox内的虚拟机是无法访问到redis的, 需要把 redis.conf 中的 protected-mode 修改为 no.同时增加 bind *-::*, 让其监听所有接口。 #### B、以 virtualbox + containerd 模式启动 minikube diff --git a/docs/zh/start/istio/start.md b/docs/zh/start/istio/start.md index 6d63c9f5ff..722792bc6b 100644 --- a/docs/zh/start/istio/start.md +++ b/docs/zh/start/istio/start.md @@ -73,7 +73,7 @@ MOSN作为Istio官方认可的数据面实现,这里就对Layotto如何跟Isti ## 四、使用Istio动态改变路由策略 -#### A、按version路由能力 +### A、按version路由能力 1. 执行如下命令创建destination rules ``` @@ -96,7 +96,7 @@ MOSN作为Istio官方认可的数据面实现,这里就对Layotto如何跟Isti hello, i am layotto v1 ``` -#### B、按header信息进行路由 +### B、按header信息进行路由 1. 执行如下命令把路由规则修改为请求header中包含`name:layotto`时会访问v1服务,其他则访问v2服务 ``` diff --git a/docs/zh/start/trace/skywalking.md b/docs/zh/start/trace/skywalking.md index 4f8c75022a..55aea56526 100644 --- a/docs/zh/start/trace/skywalking.md +++ b/docs/zh/start/trace/skywalking.md @@ -32,7 +32,7 @@ docker-compose -f diagnostics/skywalking/skywalking-docker-compose.yaml up -d ## 运行 layotto -#### **使用 Docker** +### **使用 Docker** 您可以用 docker 启动 Layotto ```bash @@ -42,7 +42,7 @@ docker run -d \ layotto/layotto start ``` -#### **本地编译(不适合 Windows)** +### **本地编译(不适合 Windows)** 您可以本地编译、运行 Layotto。 > [!TIP|label: 不适合 Windows 用户] diff --git a/docs/zh/start/trace/zipkin.md b/docs/zh/start/trace/zipkin.md index 256e515f81..02267ca6a3 100644 --- a/docs/zh/start/trace/zipkin.md +++ b/docs/zh/start/trace/zipkin.md @@ -39,7 +39,7 @@ docker-compose -f diagnostics/zipkin/zipkin-docker-compose.yaml up -d -#### **使用 Docker** +### **使用 Docker** 您可以用 docker 启动 Layotto @@ -50,7 +50,7 @@ docker run -d \ layotto/layotto start ``` -#### **本地编译(不适合 Windows)** +### **本地编译(不适合 Windows)** 您可以本地编译、运行 Layotto。 > [!TIP|label: 不适合 Windows 用户] From 1f2d8fa405e6262babc44e70e701699a94ceac79 Mon Sep 17 00:00:00 2001 From: leemos Date: Thu, 23 Jun 2022 09:03:12 +0800 Subject: [PATCH 7/8] Update docs/zh/start/faas/start.md Co-authored-by: seeflood --- docs/zh/start/faas/start.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/start/faas/start.md b/docs/zh/start/faas/start.md index 8f53e06b3b..c816c13398 100644 --- a/docs/zh/start/faas/start.md +++ b/docs/zh/start/faas/start.md @@ -34,7 +34,7 @@ Layotto支持加载并运行以 wasm 为载体的 Function,并支持Function > redis-server /usr/local/etc/redis.conf ``` -注:如果redis安装在本机器,Virtualbox内的虚拟机是无法访问到redis的, 需要把 redis.conf 中的 protected-mode 修改为 no.同时增加 bind *-::*, 让其监听所有接口。 +注:如果redis安装在本机器,Virtualbox内的虚拟机是无法访问到redis的, 需要把 redis.conf 中的 protected-mode 修改为 no.同时增加 `bind * -::*`, 让其监听所有接口。 #### B、以 virtualbox + containerd 模式启动 minikube From 7f263da83c21e99662273d22af2124136a2d76fc Mon Sep 17 00:00:00 2001 From: seeflood Date: Thu, 23 Jun 2022 09:07:18 +0800 Subject: [PATCH 8/8] modify template.tmpl Signed-off-by: seeflood --- spec/proto/runtime/v1/template.tmpl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/spec/proto/runtime/v1/template.tmpl b/spec/proto/runtime/v1/template.tmpl index 5668b5b05d..64fff10254 100644 --- a/spec/proto/runtime/v1/template.tmpl +++ b/spec/proto/runtime/v1/template.tmpl @@ -12,7 +12,7 @@ This document is automaticallly generated from the [`.proto`](https://github.com {{range .Services}} -### [gRPC Service] {{.Name}} +## [gRPC Service] {{.Name}} {{.Description}} | Method Name | Request Type | Response Type | Description | @@ -26,7 +26,7 @@ This document is automaticallly generated from the [`.proto`](https://github.com

Top

-### {{.LongName}} +## {{.LongName}} {{.Description}} {{if .HasFields}} @@ -50,7 +50,7 @@ This document is automaticallly generated from the [`.proto`](https://github.com {{range .Enums}} -### {{.LongName}} +## {{.LongName}} {{.Description}} | Name | Number | Description | @@ -64,7 +64,7 @@ This document is automaticallly generated from the [`.proto`](https://github.com {{if .HasExtensions}} -### File-level Extensions +## File-level Extensions | Extension | Type | Base | Number | Description | | --------- | ---- | ---- | ------ | ----------- | {{range .Extensions -}}