Skip to content

Commit

Permalink
Skip not bind VRRP IP for control_plain calculation
Browse files Browse the repository at this point in the history
1. Fix #216 is partially reverted (changes in check_paas).
2. VRRP IP with maintenance maintenance-type: "not bind" is now skipped when calculating control_plain.
3. Added new patch which migrates the cluster from not bind VRRP IP.
4. Added ability to deviate enrichment of cluster to simplify patch development.
5. Removed unnecessary template haproxy_mntc.cfg.j2
6. Added more rebust verification of haproxy and VRRP configuration.
7. Other minor fixes and refactoring.
8. Added unit tests for control_plain detection.
  • Loading branch information
ilia1243 committed Oct 25, 2022
1 parent 58776c4 commit 3b8f04a
Show file tree
Hide file tree
Showing 10 changed files with 239 additions and 221 deletions.
26 changes: 12 additions & 14 deletions kubemarine/core/cluster.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
import fabric
import yaml

from kubemarine.core import log, defaults, utils
from kubemarine.core import log, utils
from kubemarine.core.connections import ConnectionPool, Connections
from kubemarine.core.environment import Environment
from kubemarine.core.group import NodeGroup
Expand Down Expand Up @@ -58,20 +58,12 @@ def __init__(self, inventory: dict, context: dict, procedure_inventory: dict = N
# connection pool should be created every time, because it is relied on partially enriched inventory
self._connection_pool = ConnectionPool(self)

def enrich(self, nodes_context: dict = None, custom_enrichment_fns: List[str] = None):
# if nodes context is explicitly supplied, let's copy it first.
if nodes_context is not None:
self.context['nodes'] = deepcopy(nodes_context['nodes'])
self.context['os'] = deepcopy(nodes_context['os'])

def enrich(self, custom_enrichment_fns: List[str] = None):
# do not make dumps for custom enrichment functions, because result is generally undefined
make_dumps = custom_enrichment_fns is None
self._inventory = defaults.enrich_inventory(self, self.raw_inventory,
make_dumps=make_dumps, custom_fns=custom_enrichment_fns)

# detect nodes context automatically, after enrichment is done to ensure that node groups are initialized
if nodes_context is None:
self._detect_nodes_context()
from kubemarine.core import defaults
self._inventory = defaults.enrich_inventory(
self, self.raw_inventory, make_dumps=make_dumps, custom_fns=custom_enrichment_fns)

@property
def inventory(self) -> dict:
Expand Down Expand Up @@ -109,6 +101,9 @@ def get_addresses_from_node_names(self, node_names: List[str]) -> dict:
}
return result

def get_node(self, host: Union[str, fabric.connection.Connection]) -> dict:
return self.make_group([host]).get_first_member(provide_node_configs=True)

def make_group_from_nodes(self, node_names: List[str]) -> NodeGroup:
addresses = self.get_addresses_from_node_names(node_names)
ips = []
Expand Down Expand Up @@ -159,7 +154,8 @@ def get_facts_enrichment_fns(self):
"kubemarine.core.defaults.calculate_nodegroups"
]

def _detect_nodes_context(self) -> None:
def detect_nodes_context(self) -> dict:
"""The method should fetch only node specific information that is not changed during Kubemarine run"""
self.log.debug('Start detecting nodes context...')

for node in self.nodes['all'].get_ordered_members_list(provide_node_configs=True):
Expand All @@ -181,6 +177,7 @@ def _detect_nodes_context(self) -> None:
self.log.verbose('OS family check finished')

self.log.debug('Detecting nodes context finished!')
return {k: deepcopy(self.context[k]) for k in ('nodes', 'os')}

def _gather_facts_after(self):
self.log.debug('Gathering facts after tasks execution started...')
Expand Down Expand Up @@ -363,6 +360,7 @@ def cache_package_versions(self):
def dump_finalized_inventory(self):
self._gather_facts_after()
# TODO: rewrite the following lines as deenrichment functions like common enrichment mechanism
from kubemarine.core import defaults
from kubemarine.procedures import remove_node
from kubemarine import controlplane
prepared_inventory = remove_node.remove_node_finalize_inventory(self, self.inventory)
Expand Down
12 changes: 8 additions & 4 deletions kubemarine/core/defaults.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,17 +15,19 @@
import re
from importlib import import_module
from copy import deepcopy
from typing import Optional

import yaml

from kubemarine.core.cluster import KubernetesCluster
from kubemarine.core.errors import KME
from kubemarine import jinja
from kubemarine.core import utils
from kubemarine.core.yaml_merger import default_merger
from kubemarine import controlplane
from kubemarine import controlplane

# All enrichment procedures should not connect to any node.
# The information about nodes should be collected within KubernetesCluster#_detect_nodes_context().
# The information about nodes should be collected within KubernetesCluster#detect_nodes_context().
DEFAULT_ENRICHMENT_FNS = [
"kubemarine.kubernetes.add_node_enrichment",
"kubemarine.kubernetes.remove_node_enrichment",
Expand Down Expand Up @@ -266,7 +268,7 @@ def apply_registry_endpoints(inventory, cluster):
return registry_mirror_address, containerd_endpoints, thirdparties_address


def append_controlplain(inventory, cluster):
def append_controlplain(inventory, cluster: Optional[KubernetesCluster]):

if inventory.get('control_plain', {}).get('internal') and inventory.get('control_plain', {}).get('external'):
if cluster:
Expand All @@ -290,7 +292,9 @@ def append_controlplain(inventory, cluster):
if internal_address is None:
internal_address = item
internal_address_source = 'vrrp_ip[%s]' % i
else:
# todo remove p1_migrate_not_bind_vrrp_fix after next release
elif item.get('params', {}).get('maintenance-type', False) != 'not bind' \
or (cluster and not cluster.context.get('p1_migrate_not_bind_vrrp_fix', True)):
if internal_address is None or item.get('control_endpoint', False):
internal_address = item['ip']
internal_address_source = 'vrrp_ip[%s]' % i
Expand Down
87 changes: 63 additions & 24 deletions kubemarine/core/resources.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,13 +11,14 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from copy import deepcopy
from typing import Optional

import yaml
import ruamel.yaml

from kubemarine.core import utils, cluster as c, log, errors, static
from kubemarine.core.yaml_merger import default_merger


class DynamicResources:
Expand All @@ -34,6 +35,13 @@ def __init__(self, context: dict, silent=False):
self._raw_inventory = None
self._formatted_inventory = None
self._procedure_inventory = None

self._nodes_context = None
"""
The nodes_context variable should hold node specific information that is not changed during Kubemarine run.
The variable should be initialized on demand and only once.
"""

self._cluster = None

args: dict = context['execution_arguments']
Expand Down Expand Up @@ -101,43 +109,74 @@ def recreate_inventory(self):

self._raw_inventory = None
self._formatted_inventory = None
# no need to clear _nodes_context as it should not change after cluster is reinitialized.
self._cluster = None

def cluster_if_initialized(self) -> Optional[c.KubernetesCluster]:
return self._cluster

def cluster(self) -> c.KubernetesCluster:
"""Returns already initialized cluster object or initializes new cluster object."""
"""Returns already initialized cluster object or initializes new real cluster object."""
if self._cluster is None:
log = self.logger()
self._cluster = self._create_cluster(self.context)

return self._cluster

def create_deviated_cluster(self, deviated_context: dict):
"""
Create new cluster instance with specified deviation of context params.
The method work should minimize work with network and avoid RW work with filesystem.
The cluster instance should be useful to develop a patch in case enrichment procedure is changed
and it is necessary to compare the result of old and new algorithm of enrichment.
It should not be used in tasks.
:param deviated_context dictionary to override context params.
"""
sample_context = deepcopy(self.context)
default_merger.merge(sample_context, deviated_context)
sample_context['preserve_inventory'] = False
args = sample_context['execution_arguments']
args['disable_dump'] = True
del args['ansible_inventory_location']
return self._create_cluster(sample_context)

def _create_cluster(self, context):
log = self.logger()
context = deepcopy(context)
default_merger.merge(context, self._get_nodes_context())
try:
cluster = self._new_cluster_instance(context)
cluster.enrich()
except Exception as exc:
raise errors.FailException("Failed to proceed inventory file", exc)

if not self._silent:
log.debug("Inventory file loaded:")
for role in cluster.roles:
log.debug(" %s %i" % (role, len(cluster.ips[role])))
for ip in cluster.ips[role]:
log.debug(" %s" % ip)

args = context['execution_arguments']
if 'ansible_inventory_location' in args:
utils.make_ansible_inventory(args['ansible_inventory_location'], cluster)

return cluster

def _get_nodes_context(self):
if self._nodes_context is None:
try:
# temporary cluster instance to detect initial nodes context.
light_cluster = self._create_cluster()
light_cluster = self._new_cluster_instance(self.context)
light_cluster.enrich(custom_enrichment_fns=light_cluster.get_facts_enrichment_fns())

# main cluster instance to be used in flow
cluster = self._create_cluster()
cluster.enrich(nodes_context=light_cluster.context)

self._cluster = cluster
self._nodes_context = light_cluster.detect_nodes_context()
except Exception as exc:
raise errors.FailException("Failed to proceed inventory file", exc)

if not self._silent:
log.debug("Inventory file loaded:")
for role in self._cluster.roles:
log.debug(" %s %i" % (role, len(self._cluster.ips[role])))
for ip in self._cluster.ips[role]:
log.debug(" %s" % ip)

args = self.context['execution_arguments']
if 'ansible_inventory_location' in args:
utils.make_ansible_inventory(args['ansible_inventory_location'], self._cluster)

return self._cluster
return self._nodes_context

def _create_cluster(self):
return _provide_cluster(self.raw_inventory(), self.context,
def _new_cluster_instance(self, context: dict):
return _provide_cluster(self.raw_inventory(), context,
procedure_inventory=self.procedure_inventory(),
logger=self.logger())

Expand Down
14 changes: 5 additions & 9 deletions kubemarine/demo.py
Original file line number Diff line number Diff line change
Expand Up @@ -207,8 +207,8 @@ def __init__(self, context, raw_inventory: dict, procedure_inventory: dict = Non
self._fake_shell = fake_shell if fake_shell else FakeShell()
self._fake_fs = fake_fs if fake_fs else FakeFS()

def _create_cluster(self):
return FakeKubernetesCluster(self.raw_inventory(), self.context,
def _new_cluster_instance(self, context: dict):
return FakeKubernetesCluster(self.raw_inventory(), context,
procedure_inventory=self.procedure_inventory(),
logger=self.logger(),
fake_shell=self._fake_shell, fake_fs=self._fake_fs)
Expand Down Expand Up @@ -387,10 +387,6 @@ def new_cluster(inventory, procedure=None, fake=True, context: dict = None,
elif os_name in ['ubuntu', 'debian']:
os_family = 'debian'

nodes_context = {
"nodes": {}
}

for node in inventory['nodes']:
node_context = {
'name': node['name'],
Expand All @@ -409,17 +405,17 @@ def new_cluster(inventory, procedure=None, fake=True, context: dict = None,
connect_to = node['internal_address']
if node.get('address'):
connect_to = node['address']
nodes_context['nodes'][connect_to] = node_context
context['nodes'][connect_to] = node_context

nodes_context['os'] = os_family
context['os'] = os_family

# It is possible to disable FakeCluster and create real cluster Object for some business case
if fake:
cluster = FakeKubernetesCluster(inventory, context)
else:
cluster = KubernetesCluster(inventory, context)

cluster.enrich(nodes_context=nodes_context)
cluster.enrich()
return cluster


Expand Down
Loading

0 comments on commit 3b8f04a

Please sign in to comment.