Skip to content

Commit

Permalink
feat: release bklog 4.6.5
Browse files Browse the repository at this point in the history
  • Loading branch information
jayjiahua committed Oct 26, 2023
1 parent 99075a3 commit 350ef73
Show file tree
Hide file tree
Showing 143 changed files with 4,729 additions and 3,139 deletions.
2 changes: 1 addition & 1 deletion bklog/app.yml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ is_use_celery: True
author: 蓝鲸智云
introduction: 蓝鲸智云日志平台是为了解决运维场景中查询日志难的问题而推出的一款Saas,基于业界主流的全文检索引擎,通过蓝鲸智云的专属agent进行日志采集,无需登录各台机器,集中管理所有日志。
introduction_en: BlueKing Log System is a SaaS to solve the problem that it is difficult to query logs in operation and maintenance scenarios, With full-text search engine based on the mainstream in the industry. It collects logs through BlueKing's exclusive agent without logging in to each machine and manages all logs centrally.
version: 4.7.0
version: 4.6.6
category: 运维工具
desktop:
width: 1300
Expand Down
8 changes: 0 additions & 8 deletions bklog/apps/api/modules/monitor.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,14 +81,6 @@ def __init__(self):
default_return_value=None,
before_request=add_esb_info_before_request,
)
self.save_alarm_strategy_v3 = DataAPI(
method="POST",
url=MONITOR_APIGATEWAY_ROOT + "save_alarm_strategy_v3/",
module=self.MODULE,
description="保存告警策略V3",
default_return_value=None,
before_request=add_esb_info_before_request,
)
self.query_log_relation = DataAPI(
method="POST",
url=MONITOR_APIGATEWAY_ROOT + "query_log_relation",
Expand Down
3 changes: 1 addition & 2 deletions bklog/apps/api/modules/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -175,8 +175,7 @@ def add_esb_info_before_request(params):

def add_esb_info_before_request_for_bkdata_token(params): # pylint: disable=function-name-too-long
req = get_request()
skip_check = getattr(req, "skip_check", False)
if settings.BKAPP_IS_BKLOG_API and not skip_check:
if settings.BKAPP_IS_BKLOG_API:
auth_info = EsquerySearchPermissions.get_auth_info(req)
if auth_info["bk_app_code"] in settings.ESQUERY_WHITE_LIST:
# 在白名单内的 app 使用超级权限
Expand Down
10 changes: 0 additions & 10 deletions bklog/apps/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -177,13 +177,3 @@ class SpacePropertyEnum(ChoicesEnum):
SPACE_TYPE = "space_type"

_choices_labels = (SPACE_TYPE, _("空间类型"))


class ApiTokenAuthType(ChoicesEnum):
"""
API Token鉴权类型
"""

GRAFANA = "Grafana"

_choices_labels = ((GRAFANA, _("Grafana")),)
113 changes: 5 additions & 108 deletions bklog/apps/grafana/data_source.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,16 +20,13 @@
the project delivered to anyone in the future.
"""
import json
from copy import deepcopy
from dataclasses import dataclass
from typing import Any, Dict, List, Union

from apps.api import BkDataQueryApi
from apps.constants import ApiTokenAuthType
from apps.feature_toggle.handlers.toggle import FeatureToggleObject
from apps.feature_toggle.models import FeatureToggle
from apps.feature_toggle.plugins.constants import GRAFANA_CUSTOM_ES_DATASOURCE
from apps.log_commons.models import ApiAuthToken
from apps.log_esquery.esquery.client.QueryClient import QueryClient
from apps.log_esquery.esquery.client.QueryClientBkData import QueryClientBkData
from apps.log_esquery.esquery.client.QueryClientEs import QueryClientEs
Expand All @@ -46,22 +43,13 @@
class CustomIndexSetESDataSource:
"""可以转换成Grafana DataSource的索引集"""

space_uid: str = ""
index_set_id: int = 0
index_set_name: str = ""
time_field: str = DEFAULT_TIME_FIELD
token: str = ""

@classmethod
def get_token(cls, space_uid: str):
"""获取token"""
token_obj, __ = ApiAuthToken.objects.get_or_create(space_uid=space_uid, type=ApiTokenAuthType.GRAFANA.value)
return token_obj.token

@classmethod
def list(cls, space_uid: str) -> List["CustomIndexSetESDataSource"]:
"""获取列表"""
token = cls.get_token(space_uid=space_uid)
index_sets: List["CustomIndexSetESDataSource"] = []
index_set_objs = LogIndexSet.objects.filter(space_uid=space_uid).iterator()
for index_set_obj in index_set_objs:
Expand All @@ -73,13 +61,11 @@ def list(cls, space_uid: str) -> List["CustomIndexSetESDataSource"]:
continue
index_sets.append(
cls(
space_uid=space_uid,
index_set_id=index_set_obj.index_set_id,
index_set_name=cls.generate_datasource_name(
scenario_id=index_set_obj.scenario_id, index_set_name=index_set_obj.index_set_name
),
time_field=index_set_obj.time_field,
token=token,
)
)
return index_sets
Expand All @@ -96,25 +82,14 @@ def generate_datasource_name(scenario_id: str, index_set_name: str) -> str:

def to_datasource(self) -> Datasource:
"""索引 -> Grafana ES数据源"""
json_data = {
"timeField": self.time_field,
# 因为监控的Grafana版本已经到10, 默认支持的ES版本是7.10+, 但是日志的Grafana是8, 兼容两边将自定义ES数据源的版本固定住7.10
"esVersion": "7.10.0",
"tlsSkipVerify": True,
"httpHeaderName1": "X-BKLOG-SPACE-UID",
"httpHeaderName2": "X-BKLOG-TOKEN",
}
json_data = {"timeField": self.time_field}
return Datasource(
name=self.index_set_name,
database=str(self.index_set_id),
access="proxy",
access="direct",
type="elasticsearch",
url=f"{settings.BK_IAM_RESOURCE_API_HOST}/grafana/custom_es_datasource",
url="custom_es_datasource",
jsonData=json_data,
secureJsonData={
"httpHeaderValue1": self.space_uid,
"httpHeaderValue2": self.token,
},
)

@classmethod
Expand Down Expand Up @@ -149,52 +124,6 @@ def disable_space(cls, bk_biz_id: int):
feature_toggle_obj.save()


class ESBodyAdapter:
"""该类用于兼容Grafana ES7的语法与日志检索的body查询语法"""

def __init__(self, body: Dict[str, Any]):
self.body = body

@staticmethod
def adapt_interval(body: Dict[str, Any] = None) -> Dict[str, Any]:
"""
data_histogram的时间间隔字段名为fixed_interval, 但是我们的接口是interval
"""
new_dict = {}
for k, v in body.items():
if k == "date_histogram" and "fixed_interval" in v:
v["interval"] = v.pop("fixed_interval")
if isinstance(v, dict):
new_dict[k] = ESBodyAdapter.adapt_interval(v)
else:
new_dict[k] = v
return new_dict

@staticmethod
def adapt_aggs(body: Dict[str, Any] = None):
"""
聚合的时候, order的字段名为_key, 但是我们的接口是_term
"""
if isinstance(body, dict):
for k, v in body.items():
if k == "aggs":
for agg_key in v:
if (
"terms" in v[agg_key]
and "order" in v[agg_key]["terms"]
and "_key" in v[agg_key]["terms"]["order"]
):
v[agg_key]["terms"]["order"] = {"_term": v[agg_key]["terms"]["order"]["_key"]}
ESBodyAdapter.adapt_aggs(v)

def adapt(self):
"""适配Grafana ES请求"""
body = deepcopy(self.body)
body = self.adapt_interval(body=body)
self.adapt_aggs(body=body)
return body


class CustomESDataSourceTemplate:
"""
自定义ES数据源模板模板, 各个Scenario的数据源都继承这个模板
Expand Down Expand Up @@ -223,42 +152,10 @@ def get_index(self):
]
)

@staticmethod
def compatible_mapping(mapping: Dict[str, Any]) -> Dict[str, Any]:
"""
兼容Grafana高版本只支持7.10+以上的情况, mapping结构需要调整
"""
result = dict()
for index, value in mapping.items():
mapping_dict: dict = value.get("mappings", {})
# 日志接口返回的mapping结构中有一层是索引名,
if len(mapping_dict) == 1 and list(mapping_dict.keys())[0] in index:
result[index] = {"mappings": list(mapping_dict.values())[0]}
continue
result[index] = value
return result

def mapping(self):
"""
获取mapping
"""
mapping = self._mapping()
return self.compatible_mapping(mapping=mapping)

def _mapping(self):
"""
各个继承类如果需要自定义mapping, 重写这个方法
"""
return self.get_client().mapping(index=self.index)

def query(self, body: Dict[str, Any]):
body = ESBodyAdapter(body=body).adapt()
return self._query(body=body)

def _query(self, body: Dict[str, Any]):
"""
各个继承类如果需要自定义查询逻辑, 重写这个方法
"""
return self.get_client().query(index=self.index, body=body)

def msearch(self, sql_list: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
Expand Down Expand Up @@ -309,10 +206,10 @@ def query_bkdata(self, body: Dict[str, Any] = None) -> List[Dict[str, Any]]:
params.update({"bkdata_authentication_method": "user", "bk_username": "admin", "operator": "admin"})
return BkDataQueryApi.query(params, request_cookies=False)["list"]

def _query(self, body: Dict[str, Any]):
def query(self, body: Dict[str, Any]):
return self.query_bkdata(body=body)

def _mapping(self):
def mapping(self):
return self.query_bkdata()


Expand Down
44 changes: 12 additions & 32 deletions bklog/apps/grafana/handlers/query.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,10 +34,10 @@
)
from apps.iam import ActionEnum, Permission, ResourceEnum
from apps.log_desensitize.handlers.desensitize import DesensitizeHandler
from apps.log_desensitize.models import DesensitizeFieldConfig
from apps.log_desensitize.handlers.utils import desensitize_params_init
from apps.log_desensitize.models import DesensitizeConfig, DesensitizeFieldConfig
from apps.log_search.constants import GlobalCategoriesEnum
from apps.log_search.exceptions import BaseSearchIndexSetDataDoseNotExists
from apps.log_search.handlers.index_set import IndexSetHandler
from apps.log_search.handlers.search.aggs_handlers import AggsViewAdapter
from apps.log_search.handlers.search.search_handlers_esquery import SearchHandler
from apps.log_search.models import LogIndexSet, Scenario
Expand All @@ -59,7 +59,7 @@ class GrafanaQueryHandler:
{"id": "min", "name": "MIN"},
{"id": "max", "name": "MAX"},
{"id": "avg", "name": "AVG"},
{"id": "cardinality", "name": "UNIQUE_COUNT"},
{"id": "cardinality", "name": "UNIQUE_COUNT"}
]

CONDITION_CHOICES = [
Expand Down Expand Up @@ -124,7 +124,7 @@ def _get_buckets(self, records, record, dimensions, aggregations, metric_field,
record[metric_field] = aggregations.get(metric_field).get("value")
records.append(copy.deepcopy(record))

def _format_time_series(self, params, data, time_field, desensitize_configs=None):
def _format_time_series(self, params, data, time_field, desensitize_entities=None):
"""
转换为Grafana TimeSeries的格式
:param params: 请求参数
Expand All @@ -139,12 +139,9 @@ def _format_time_series(self, params, data, time_field, desensitize_configs=None
:rtype: list
"""
formatted_data = defaultdict(list)
desensitize_configs = desensitize_configs or []
desensitize_handler = DesensitizeHandler(desensitize_configs)
for record in data:
# 字段脱敏处理
if desensitize_configs:
record = desensitize_handler.transform_dict(record)
record = DesensitizeHandler(desensitize_entities).transform_dict(record)
dimensions = tuple(
sorted(
(key, value)
Expand Down Expand Up @@ -276,21 +273,20 @@ def query(self, query_dict: dict):
self.check_panel_permission(query_dict["dashboard_id"], query_dict["panel_id"], query_dict["result_table_id"])

# 初始化DB脱敏配置
desensitize_field_config_objs = DesensitizeFieldConfig.objects.filter(
index_set_id=query_dict["result_table_id"]
)
desensitize_field_config_objs = DesensitizeFieldConfig.objects.filter(index_set_id=query_dict["result_table_id"])

desensitize_configs = [
{
"field_name": field_config_obj.field_name or "",
"rule_id": field_config_obj.rule_id or 0,
"operator": field_config_obj.operator,
"params": field_config_obj.params,
"match_pattern": field_config_obj.match_pattern,
}
for field_config_obj in desensitize_field_config_objs
} for field_config_obj in desensitize_field_config_objs
]

# 初始化脱敏工厂参数
desensitize_entities = desensitize_params_init(desensitize_configs=desensitize_configs)

time_field = SearchHandler(query_dict["result_table_id"], {}).time_field

# 如果是统计数量,则无需提供指标字段,用 _id 字段统计即可
Expand Down Expand Up @@ -336,7 +332,7 @@ def query(self, query_dict: dict):
records = []
self._get_buckets(records, {}, all_dimensions, result["aggregations"], query_dict["metric_field"])

records = self._format_time_series(query_dict, records, search_handler.time_field, desensitize_configs)
records = self._format_time_series(query_dict, records, search_handler.time_field, desensitize_entities)

return records

Expand Down Expand Up @@ -406,9 +402,7 @@ def get_metric_list(self, category_id=None):
space_uid = self.space_uid
if not space_uid:
return []

space_uids = IndexSetHandler.get_all_related_space_uids(space_uid)
index_set_list = LogIndexSet.objects.filter(space_uid__in=space_uids)
index_set_list = LogIndexSet.objects.filter(space_uid=space_uid)

if category_id:
index_set_list = index_set_list.filter(category_id=category_id)
Expand Down Expand Up @@ -689,27 +683,13 @@ def _query_dimension(self, params):

return [{"label": v, "value": v} for v in dimension_values]

def _query_index_set(self, params):
"""
查询维度
"""
metrics = self.get_metric_list()

results = []

for group in metrics:
for metric in group["children"]:
results.append({"label": metric["name"], "value": metric["id"]})
return results

def get_variable_value(self, variable_type, params):
query_cmdb = partial(self._query_cmdb, variable_type=variable_type)
query_processor = {
"host": query_cmdb,
"module": query_cmdb,
"set": query_cmdb,
"dimension": self._query_dimension,
"index_set": self._query_index_set,
}

if variable_type not in query_processor:
Expand Down
2 changes: 1 addition & 1 deletion bklog/apps/grafana/serializers.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ class GetVariableFieldSerializer(serializers.Serializer):

class GetVariableValueSerializer(serializers.Serializer):
bk_biz_id = serializers.IntegerField(label=_("业务ID"))
type = serializers.ChoiceField(label=_("查询类型"), choices=["dimension", "host", "module", "set", "index_set"])
type = serializers.ChoiceField(label=_("查询类型"), choices=["dimension", "host", "module", "set"])
params = serializers.DictField(label=_("查询参数"))


Expand Down
Loading

0 comments on commit 350ef73

Please sign in to comment.