Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

change(request-id): remove snowflake algorithm #9715

Merged
merged 1 commit into from
Jun 27, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
185 changes: 2 additions & 183 deletions apisix/plugins/request-id.lua
Original file line number Diff line number Diff line change
Expand Up @@ -16,36 +16,23 @@
--

local ngx = ngx
local bit = require("bit")
local core = require("apisix.core")
local snowflake = require("snowflake")
local uuid = require("resty.jit-uuid")
local nanoid = require("nanoid")
local process = require("ngx.process")
local timers = require("apisix.timers")
local tostring = tostring
local math_pow = math.pow
local math_ceil = math.ceil
local math_floor = math.floor
local math_random = math.random
local str_byte = string.byte
local ffi = require "ffi"

local plugin_name = "request-id"

local data_machine = nil
local snowflake_inited = nil

local attr = nil

local schema = {
type = "object",
properties = {
header_name = {type = "string", default = "X-Request-Id"},
include_in_response = {type = "boolean", default = true},
algorithm = {
type = "string",
enum = {"uuid", "snowflake", "nanoid", "range_id"},
enum = {"uuid", "nanoid", "range_id"},
default = "uuid"
},
range_id = {
Expand All @@ -67,164 +54,17 @@ local schema = {
}
}

local attr_schema = {
type = "object",
properties = {
snowflake = {
type = "object",
properties = {
enable = {type = "boolean", default = false},
snowflake_epoc = {type = "integer", minimum = 1, default = 1609459200000},
data_machine_bits = {type = "integer", minimum = 1, maximum = 31, default = 12},
sequence_bits = {type = "integer", minimum = 1, default = 10},
delta_offset = {type = "integer", default = 1, enum = {1, 10, 100, 1000}},
data_machine_ttl = {type = "integer", minimum = 1, default = 30},
data_machine_interval = {type = "integer", minimum = 1, default = 10}
}
}
}
}

local _M = {
version = 0.1,
priority = 12015,
name = plugin_name,
schema = schema
}


function _M.check_schema(conf)
return core.schema.check(schema, conf)
end


-- Generates the current process data machine
local function gen_data_machine(max_number)
if data_machine == nil then
local etcd_cli, prefix = core.etcd.new()
local prefix = prefix .. "/plugins/request-id/snowflake/"
local uuid = uuid.generate_v4()
local id = 1
::continue::
while (id <= max_number) do
local res, err = etcd_cli:grant(attr.snowflake.data_machine_ttl)
if err then
id = id + 1
core.log.error("Etcd grant failure, err: ".. err)
goto continue
end

local _, err1 = etcd_cli:setnx(prefix .. tostring(id), uuid)
local res2, err2 = etcd_cli:get(prefix .. tostring(id))

if err1 or err2 or res2.body.kvs[1].value ~= uuid then
core.log.notice("data_machine " .. id .. " is not available")
id = id + 1
else
data_machine = id

local _, err3 =
etcd_cli:set(
prefix .. tostring(id),
uuid,
{
prev_kv = true,
lease = res.body.ID
}
)

if err3 then
id = id + 1
etcd_cli:delete(prefix .. tostring(id))
core.log.error("set data_machine " .. id .. " lease error: " .. err3)
goto continue
end

local lease_id = res.body.ID
local start_at = ngx.time()
local handler = function()
local now = ngx.time()
if now - start_at < attr.snowflake.data_machine_interval then
return
end

local _, err4 = etcd_cli:keepalive(lease_id)
if err4 then
snowflake_inited = nil
data_machine = nil
timers.unregister_timer("plugin#request-id")
core.log.error("snowflake data_machine: " .. id .." lease failed.")
end
start_at = now
core.log.info("snowflake data_machine: " .. id .." lease success.")
end

timers.register_timer("plugin#request-id", handler)
core.log.info(
"timer created to lease snowflake algorithm data_machine, interval: ",
attr.snowflake.data_machine_interval)
core.log.notice("lease snowflake data_machine: " .. id)
break
end
end

if data_machine == nil then
core.log.error("No data_machine is not available")
return nil
end
end
return data_machine
end


-- Split 'Data Machine' into 'Worker ID' and 'datacenter ID'
local function split_data_machine(data_machine, node_id_bits, datacenter_id_bits)
local num = bit.tobit(data_machine)
local worker_id = bit.band(num, math_pow(2, node_id_bits) - 1)
num = bit.rshift(num, node_id_bits)
local datacenter_id = bit.band(num, math_pow(2, datacenter_id_bits) - 1)
return worker_id, datacenter_id
end


-- Initialize the snowflake algorithm
local function snowflake_init()
if snowflake_inited == nil then
local max_number = math_pow(2, (attr.snowflake.data_machine_bits))
local datacenter_id_bits = math_floor(attr.snowflake.data_machine_bits / 2)
local node_id_bits = math_ceil(attr.snowflake.data_machine_bits / 2)
data_machine = gen_data_machine(max_number)
if data_machine == nil then
return ""
end

local worker_id, datacenter_id = split_data_machine(data_machine,
node_id_bits, datacenter_id_bits)

core.log.info("snowflake init datacenter_id: " ..
datacenter_id .. " worker_id: " .. worker_id)
snowflake.init(
datacenter_id,
worker_id,
attr.snowflake.snowflake_epoc,
node_id_bits,
datacenter_id_bits,
attr.snowflake.sequence_bits,
attr.delta_offset
)
snowflake_inited = true
end
end


-- generate snowflake id
local function next_id()
if snowflake_inited == nil then
snowflake_init()
end
return snowflake:next_id()
end

-- generate range_id
local function get_range_id(range_id)
local res = ffi.new("unsigned char[?]", range_id.length)
Expand All @@ -246,7 +86,7 @@ local function get_request_id(conf)
return get_range_id(conf.range_id)
end

return next_id()
return uuid()
end


Expand Down Expand Up @@ -276,25 +116,4 @@ function _M.header_filter(conf, ctx)
end
end

function _M.init()
local local_conf = core.config.local_conf()
attr = core.table.try_read_attr(local_conf, "plugin_attr", plugin_name)
local ok, err = core.schema.check(attr_schema, attr)
if not ok then
core.log.error("failed to check the plugin_attr[", plugin_name, "]", ": ", err)
return
end
if attr.snowflake.enable then
if process.type() == "worker" then
ngx.timer.at(0, snowflake_init)
end
end
end

function _M.destroy()
if snowflake_inited then
timers.unregister_timer("plugin#request-id")
end
end

return _M
8 changes: 0 additions & 8 deletions conf/config-default.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -565,14 +565,6 @@ plugin_attr:
report_ttl: 60 # live time for server info in etcd (unit: second)
dubbo-proxy:
upstream_multiplex_count: 32
request-id:
snowflake:
enable: false
snowflake_epoc: 1609459200000 # the starting timestamp is expressed in milliseconds
data_machine_bits: 12 # data machine bit, maximum 31, because Lua cannot do bit operations greater than 31
sequence_bits: 10 # each machine generates a maximum of (1 << sequence_bits) serial numbers per millisecond
data_machine_ttl: 30 # live time for data_machine in etcd (unit: second)
data_machine_interval: 10 # lease renewal interval in etcd (unit: second)
proxy-mirror:
timeout: # proxy timeout in mirrored sub-request
connect: 60s
Expand Down
36 changes: 1 addition & 35 deletions docs/en/latest/plugins/request-id.md
Original file line number Diff line number Diff line change
Expand Up @@ -44,44 +44,10 @@ The Plugin will not add a unique ID if the request already has a header with the
| ------------------- | ------- | -------- | -------------- | ------------------------------- | ---------------------------------------------------------------------- |
| header_name | string | False | "X-Request-Id" | | Header name for the unique request ID. |
| include_in_response | boolean | False | true | | When set to `true`, adds the unique request ID in the response header. |
| algorithm | string | False | "uuid" | ["uuid", "snowflake", "nanoid", "range_id"] | Algorithm to use for generating the unique request ID. |
| algorithm | string | False | "uuid" | ["uuid", "nanoid", "range_id"] | Algorithm to use for generating the unique request ID. |
| range_id.char_set | string | False | "abcdefghijklmnopqrstuvwxyzABCDEFGHIGKLMNOPQRSTUVWXYZ0123456789| The minimum string length is 6 | Character set for range_id |
| range_id.length | integer | False | 16 | Minimum 6 | Id length for range_id algorithm |

### Using snowflake algorithm to generate unique ID

:::caution

- When you need to use `snowflake` algorithm, make sure APISIX has the permission to write to the etcd.
- Please read this documentation before deciding to use the snowflake algorithm. Once it is configured, you cannot arbitrarily change the configuration. Failure to do so may result in duplicate IDs.

:::

The `snowflake` algorithm supports flexible configurations to cover a variety of needs. Attributes are as follows:

| Name | Type | Required | Default | Description |
| --------------------- | ------- | -------- | ------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| enable | boolean | False | false | When set to `true`, enables the snowflake algorithm. |
| snowflake_epoc | integer | False | 1609459200000 | Starting timestamp in milliseconds. Default is `2021-01-01T00:00:00Z` and supports to a 69 year time until `2090-09-0715:47:35Z`. |
| data_machine_bits | integer | False | 12 | Maximum number of supported machines (processes) `1 << data_machine_bits`. Corresponds the set of `workIDs` and `dataCenterIDs` in the snowflake definition. Each process is associated to a unique ID. The maximum number of supported processes is `pow(2, data_machine_bits)`. So, for the default value of 12 bits, it is 4096. |
| sequence_bits | integer | False | 10 | Maximum number of generated ID per millisecond per node `1 << sequence_bits`. Each process generates up to 1024 IDs per millisecond. |
| data_machine_ttl | integer | False | 30 | Valid time in seconds of registration of `data_machine` in etcd. |
| data_machine_interval | integer | False | 10 | Time in seconds between `data_machine` renewals in etcd. |

To use the snowflake algorithm, you have to enable it first on your configuration file `conf/config.yaml`:

```yaml title="conf/config.yaml"
plugin_attr:
request-id:
snowflake:
enable: true
snowflake_epoc: 1609459200000
data_machine_bits: 12
sequence_bits: 10
data_machine_ttl: 30
data_machine_interval: 10
```

## Enabling the Plugin

The example below enables the Plugin on a specific Route:
Expand Down
36 changes: 1 addition & 35 deletions docs/zh/latest/plugins/request-id.md
Original file line number Diff line number Diff line change
Expand Up @@ -42,44 +42,10 @@ description: 本文介绍了 Apache APISIX request-id 插件的相关操作,
| ------------------- | ------- | -------- | -------------- | ------ | ------------------------------ |
| header_name | string | 否 | "X-Request-Id" | | unique ID 的请求头的名称。 |
| include_in_response | boolean | 否 | true | | 当设置为 `true` 时,将 unique ID 加入返回头。 |
| algorithm | string | 否 | "uuid" | ["uuid", "snowflake", "nanoid", "range_id"] | 指定的 unique ID 生成算法。 |
| algorithm | string | 否 | "uuid" | ["uuid", "nanoid", "range_id"] | 指定的 unique ID 生成算法。 |
| range_id.char_set | string | 否 | "abcdefghijklmnopqrstuvwxyzABCDEFGHIGKLMNOPQRSTUVWXYZ0123456789| 字符串长度最小为 6 | range_id 算法的字符集 |
| range_id.length | integer | 否 | 16 | 最小值为 6 | range_id 算法的 id 长度 |

### 使用 snowflake 算法生成 unique ID

:::caution 警告

- 当使用 `snowflake` 算法时,请确保 APISIX 有权限写入 etcd。
- 在决定使用 `snowflake` 算法时,请仔细阅读本文档了解配置细节。因为一旦启用相关配置信息后,就不能随意调整,否则可能会导致生成重复的 ID。

:::

`snowflake` 算法支持灵活配置来满足各种需求,可配置的参数如下:

| 名称 | 类型 | 必选项 | 默认值 | 描述 |
| ------------------- | ------- | -------- | -------------- | ------------------------------ |
| enable | boolean | 否 | false | 当设置为 `true` 时,启用 `snowflake` 算法。 |
| snowflake_epoc | integer | 否 | 1609459200000 | 起始时间戳,以毫秒为单位。默认为 `2021-01-01T00:00:00Z`, 可以支持 `69 年`到 `2090-09-07 15:47:35Z`。 |
| data_machine_bits | integer | 否 | 12 | 最多支持的机器(进程)数量。与 `snowflake` 定义中 `workerIDs` 和 `datacenterIDs` 的集合对应,插件会为每一个进程分配一个 unique ID。最大支持进程数为 `pow(2, data_machine_bits)`。即对于默认值 `12 bits`,最多支持的进程数为 `4096`。|
| sequence_bits | integer | 否 | 10 | 每个节点每毫秒内最多产生的 ID 数量。每个进程每毫秒最多产生 `1024` 个 ID。 |
| data_machine_ttl | integer | 否 | 30 | etcd 中 `data_machine` 注册有效时间,以秒为单位。 |
| data_machine_interval | integer | 否 | 10 | etcd 中 `data_machine` 续约间隔时间,以秒为单位。 |

如果你需要使用 `snowflake` 算法,请务必在配置文件 `./conf/config.yaml` 中添加以下参数:

```yaml title="conf/config.yaml"
plugin_attr:
request-id:
snowflake:
enable: true
snowflake_epoc: 1609459200000
data_machine_bits: 12
sequence_bits: 10
data_machine_ttl: 30
data_machine_interval: 10
```

## 启用插件

以下示例展示了如何在指定路由上启用 `request-id` 插件:
Expand Down
1 change: 0 additions & 1 deletion rockspec/apisix-master-0.rockspec
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,6 @@ dependencies = {
"penlight = 1.9.2-1",
"ext-plugin-proto = 0.6.0",
"casbin = 1.41.5",
"api7-snowflake = 2.0-1",
"inspect == 3.1.1",
"lualdap = 1.2.6-1",
"lua-resty-rocketmq = 0.3.0-0",
Expand Down
Loading