Current File : //usr/bin/kcarectl |
#!/usr/libexec/platform-python
# Copyright (c) Cloud Linux GmbH & Cloud Linux Software, Inc
# Licensed under CLOUD LINUX LICENSE AGREEMENT
# http://cloudlinux.com/docs/LICENCE.TXT
from __future__ import print_function
import ast
import base64
import errno
import functools
import glob
import hashlib
import json
import logging
import logging.handlers
import os
import platform
import random
import re
import shutil
import socket
import ssl
import subprocess
import sys
import tempfile
import time
import traceback
import warnings
import fnmatch
import zlib
import textwrap
from argparse import ArgumentParser
from datetime import datetime
from contextlib import contextmanager
# The scanner interface should skip us
os.environ['KCARE_SCANNER_INTERFACE_DO_NOTHING'] = '1'
if os.path.isdir('/usr/libexec/kcare/python'): # pragma: no cover
sys.path.insert(0, '/usr/libexec/kcare/python')
import kcsig_verify # noqa: E402
warnings.filterwarnings('ignore', category=DeprecationWarning)
PY2 = sys.version_info[0] == 2
if PY2: # pragma: no py3 cover
from ConfigParser import ConfigParser
import httplib
from urllib import urlencode
from urllib import quote as urlquote
from urllib2 import HTTPError, URLError, Request as StdRequest, urlopen as std_urlopen
class Request(StdRequest):
def __init__(self, *args, **kwargs):
method = kwargs.pop('method', None)
StdRequest.__init__(self, *args, **kwargs)
if method == 'HEAD':
# Older versions of mypy supporting 2.x do not infer type of
# the `method` variable correctly here. Cast to str explicitly.
self.get_method = lambda: str(method) # type: ignore[assignment]
else: # pragma: no py2 cover
from urllib.parse import quote as urlquote
from configparser import ConfigParser
from http import client as httplib
from urllib.error import HTTPError, URLError
from urllib.parse import urlencode
from urllib.request import Request, urlopen as std_urlopen
if False: # pragma: no cover
from typing import Optional, Dict, Tuple, Any, List, Union, Set # noqa: F401
kcarelog = logging.getLogger('kcare') # mocked: tests/unit
kcarelog.setLevel(logging.DEBUG)
_CONFIG_OPTIONS = set() # type: Set[str]
FLAGS = ['keep-registration', 'manage-libcare']
BLACKLIST_FILE = 'kpatch.blacklist'
CACHE_ENTRIES = 3
CONFIG = '/etc/sysconfig/kcare/kcare.conf'
LOG_FILE = '/var/log/kcarectl.log'
CPANEL_GID = 99
EFFECTIVE_LATEST = 'v2'
KC_PATCH_VERSION = '2'
EXPECTED_PREFIX = ('12h', '24h', '48h', 'test')
FIXUPS_FILE = 'kpatch.fixups'
FREEZER_BLACKLIST = '/etc/sysconfig/kcare/freezer.modules.blacklist'
GPG_BIN = '/usr/bin/gpgv'
GPG_KEY_DIR = '/etc/pki/kcare-gpg/'
KCDOCTOR = '/usr/libexec/kcare/kcdoctor.sh'
KERNEL_VERSION_FILE = '/proc/version'
KMOD_BIN = 'kcare.ko'
KPATCH_CTL = '/usr/libexec/kcare/kpatch_ctl'
LEVEL = None # a level to 'stick on' (if 0 then use latest level)
LIBCARE_CLIENT = '/usr/libexec/kcare/libcare-client'
LIBCARE_DISABLED = False
LIBCARE_SOCKET = (
"/run/libcare/libcare.sock",
"/var/run/libcare.sock",
)
LIBCARE_SOCKET_TIMEOUT = 10
LIBCARE_PATCHES = '/var/cache/kcare/libcare_patches'
LIBCARE_CVE_LIST = '/var/cache/kcare/libcare_cvelist'
KCORE_OUTPUT_SIZE = 100 # 100Mb
KMSG_OUTPUT = True
PATCH_BIN = 'kpatch.bin'
PATCH_CACHE = '/var/cache/kcare'
KDUMPS_DIR = PATCH_CACHE + '/dumps'
PATCH_DONE = '.done'
PATCH_INFO = 'kpatch.info'
PATCH_LATEST = ('latest.v2',)
PATCH_METHOD = ''
PATCH_SERVER = 'https://patches.kernelcare.com'
REGISTRATION_API_URL = 'https://cln.cloudlinux.com/api/kcare'
SYSCTL_CONFIG = '/etc/sysconfig/kcare/sysctl.conf'
TEST_PREFIX = ''
VERSION = '2.84-2.el8'
VIRTWHAT = '/usr/libexec/kcare/virt-what'
SYSTEMCTL = '/usr/bin/systemctl'
USERSPACE_PATCHES = None
IM360_LICENSE_FILE = '/var/imunify360/license.json'
SYSTEMID = '/etc/sysconfig/kcare/systemid'
ALMA_SYSTEMID = '/etc/sysconfig/kcare/systemid.almacare'
# false positives by bandit based on the `TOKEN` keyword in name
AUTH_TOKEN_HEADER = 'Kc-Auth-Token' # nosec hardcoded_password_string
AUTH_TOKEN_DUMP_PATH = '/etc/sysconfig/kcare/auth_token' # nosec hardcoded_password_string
CACHE_KEY_HEADER = 'Kc-Cache-Key'
CACHE_KEY_DUMP_PATH = '/etc/sysconfig/kcare/cache_key'
# urlopen retry options
RETRY_DELAY = 3
RETRY_MAX_DELAY = 30
RETRY_BACKOFF = 2
RETRY_COUNT = 4
UNLOAD_RETRY_DELAY = 10
UPDATE_MODE_MANUAL = 'manual' # update is launched manually bu `kcarectl -u`
UPDATE_MODE_AUTO = 'auto' # update is launched by cron
UPDATE_MODE_SMART = 'smart' # update is launched by kcare daemon
CHECK_CLN_LICENSE_STATUS = True
VERSION_RE = re.compile(r'^(\d+[.]\d+[-]\d+)')
BLACKLIST_RE = re.compile('==BLACKLIST==\n(.*)==END BLACKLIST==\n', re.DOTALL)
CONFLICTING_MODULES_RE = re.compile('(kpatch.*|ksplice.*|kpatch_livepatch.*)')
KCARE_UNAME_FILE = '/proc/kcare/effective_version'
POLICY_REMOTE = 'REMOTE'
POLICY_LOCAL = 'LOCAL'
POLICY_LOCAL_FIRST = 'LOCAL_FIRST'
UPDATE_POLICY = POLICY_REMOTE
AUTO_UPDATE = True
LIB_AUTO_UPDATE = True
UPDATE_FROM_LOCAL = False
USE_SIGNATURE = True
SIG = '.sig'
SIG2 = '.sig2'
SIG_JSON = '.json-sig'
USE_CONTENT_FILE = False # type: Optional[bool]
CONTENT_FILE = 'release.content.json'
IGNORE_UNKNOWN_KERNEL = False
LOAD_KCARE_SYSCTL = True
KPATCH_DEBUG = False
CHECK_SSL_CERTS = True
PATCH_TYPE = ''
PREV_PATCH_TYPE = 'default'
BEFORE_UPDATE_COMMAND = None
AFTER_UPDATE_COMMAND = None
PRINT_DEBUG = 0
PRINT_INFO = 1
PRINT_WARN = 2
PRINT_ERROR = 3
PRINT_CRITICAL = 4
PRINT_LEVEL = PRINT_INFO
SILENCE_ERRORS = True
STATUS_CHANGE_GAP = 4 * 60 * 60 + 5 * 60 # 4 hours
STATUS_CHANGE_GAP_DELAY = 5 * 60 # 5 minute
SUCCESS_TIMEOUT = 5 * 60
REPORT_FQDN = False
FORCE_GID = None
# helper vars for tests
SKIP_SYSTEMCTL_CHECK = False
CONFIG_TARGETS_MAPPING = {
'REGISTRATION_URL': 'REGISTRATION_API_URL',
'PREFIX': 'TEST_PREFIX',
'UPDATE_SYSCTL_CONFIG': 'LOAD_KCARE_SYSCTL',
'PATCH_LEVEL': 'LEVEL',
'STICKY_PATCH': 'STICKY',
}
ntype = type('')
btype = type(b'')
utype = type(u'')
def bstr(data, encoding='latin1'): # pragma: no py2 cover
if type(data) is utype:
data = data.encode(encoding)
return data
def ustr(data, encoding='latin1'): # pragma: no py2 cover
if type(data) is btype:
data = data.decode(encoding)
return data
def nstr(data, encoding='utf-8'): # pragma: no py2 cover
if type(data) is ntype:
return data
elif type(data) is btype:
return data.decode(encoding)
else:
return data.encode(encoding) # pragma: no py3 cover
if PY2: # pragma: no py3 cover
# json.loads returns unicode strings and they can contaminate following
# calls. Functions converts all unicode strings into native
def _convert(data):
dtype = type(data)
if dtype is utype:
return data.encode('utf-8')
elif dtype is list:
return [_convert(it) for it in data]
elif dtype is dict:
return dict((_convert(k), _convert(v)) for k, v in data.items())
return data
def json_loads_nstr(json_str):
return _convert(json.loads(json_str))
else: # pragma: no py2 cover
json_loads_nstr = json.loads
class SafeExceptionWrapper(Exception):
def __init__(self, inner, etype=None, details=None):
self.inner = inner
self.etype = etype
self.details = details
def format_exception_without_details():
etype, value, tb = sys.exc_info()
details_sanitized = ''
if isinstance(value, OSError) and not isinstance(value, URLError):
try:
# reconstruct for safety, it may be any IO-related subclass
details_sanitized = "[Errno %i] %s: '%s'" % (value.errno, os.strerror(value.errno), value.filename)
except (AttributeError, TypeError):
pass
elif isinstance(value, KeyError):
details_sanitized = '%s' % value
elif isinstance(value, SafeExceptionWrapper):
etype = value.etype or type(value.inner)
details_sanitized = value.details or ('%s' % value.inner)
distro = get_distro()
return {
'agent_version': VERSION,
'distro': distro[0],
'distro_version': distro[1],
'error': getattr(etype, '__name__', str(etype)),
'details': details_sanitized,
'traceback': ''.join(traceback.format_tb(tb, 100)),
}
def get_freezer_blacklist():
result = set()
if os.path.isfile(FREEZER_BLACKLIST):
f = open(FREEZER_BLACKLIST, 'r')
for line in f:
result.add(line.rstrip())
f.close()
return result
def _apply_ptype(ptype, filename):
name_parts = filename.split('.')
if ptype:
filename = '.'.join([name_parts[0], ptype, name_parts[-1]])
else:
filename = '.'.join([name_parts[0], name_parts[-1]])
return filename
def apply_ptype(ptype):
global PATCH_BIN, PATCH_INFO, BLACKLIST_FILE, FIXUPS_FILE, PATCH_DONE
PATCH_BIN = _apply_ptype(ptype, PATCH_BIN)
PATCH_INFO = _apply_ptype(ptype, PATCH_INFO)
BLACKLIST_FILE = _apply_ptype(ptype, BLACKLIST_FILE)
FIXUPS_FILE = _apply_ptype(ptype, FIXUPS_FILE)
PATCH_DONE = _apply_ptype(ptype, PATCH_DONE)
def _printlvl(message, level, file=None):
if level >= PRINT_LEVEL:
print(message, file=file)
def logdebug(message):
_printlvl(message, PRINT_DEBUG)
kcarelog.debug(message)
def loginfo(message):
_printlvl(message, PRINT_INFO)
kcarelog.info(message)
def logerror(message, print_msg=True):
if print_msg:
_printlvl(message, PRINT_ERROR, file=sys.stderr)
kcarelog.error(message)
def logexc(message):
if PRINT_ERROR >= PRINT_LEVEL:
traceback.print_exc()
kcarelog.exception(message)
def send_exc():
if UPDATE_FROM_LOCAL: # pragma: no cover
return
trace = json.dumps(format_exception_without_details())
url = get_patch_server_url('/api/kcarectl-trace') + '?trace=' + nstr(base64.urlsafe_b64encode(bstr(trace)))
request = http_request(url, get_http_auth_string())
try:
_urlopen(request)
except Exception:
# import traceback
# traceback.print_exc()
# we really don't interested in exception
pass
def _timestmap_str():
return str(int(time.time()))
def cached(fn):
cache = {} # type: Dict[Tuple[Any, ...], Any]
@functools.wraps(fn)
def inner(*args):
try:
return cache[args]
except KeyError:
pass
result = cache[args] = fn(*args)
return result
inner.clear = cache.clear # type: ignore[attr-defined]
inner.orig = fn # type: ignore[attr-defined]
return inner
def nohup_fork(func, sleep=None): # pragma: no cover
"""
Run func in a fork in an own process group
(will stay alive after kcarectl process death).
:param func: function to execute
:return:
"""
pid = os.fork()
if pid != 0:
os.waitpid(pid, 0)
return
os.setsid()
pid = os.fork()
if pid != 0:
os._exit(0)
# close standard files to release TTY
os.close(0)
# redirect stdout/stdin into log file
with open(LOG_FILE, 'a') as fd:
os.dup2(fd.fileno(), 1)
os.dup2(fd.fileno(), 2)
if sleep:
time.sleep(sleep)
try:
func()
except Exception:
kcarelog.exception('Wait exception')
os._exit(1)
os._exit(0)
def restore_selinux_context(dname):
if is_selinux_enabled():
# Try to restore selinux context
cmd = [find_cmd('restorecon', '/usr/sbin', '/sbin'), '-R', dname]
code, _, stderr = run_command(cmd, catch_stdout=True, catch_stderr=True)
if code:
logerror("SELinux context restoration for {0} failed with {1}: {2}".format(dname, code, stderr), print_msg=False)
def atomic_write(fname, content, ensure_dir=False, mode='w'):
tmp_fname = fname + '.tmp'
dname = os.path.dirname(tmp_fname)
if ensure_dir and not os.path.exists(dname):
os.makedirs(dname)
with open(tmp_fname, mode) as f:
f.write(content)
f.flush()
os.fsync(f.fileno())
os.rename(tmp_fname, fname)
def _read_file(fname, mode, default):
if not os.path.exists(fname):
return default
with open(fname, mode) as f:
return f.read()
def read_file(fname, default=None): # type: (str, Optional[str]) -> str
result = _read_file(fname, 'r', default) # type: str
return result
def read_file_bin(fname, default=None): # type: (str, Optional[bytes]) -> bytes
result = _read_file(fname, 'rb', default) # type: bytes
return result
def touch_anchor():
"""Check the fact that there was a failed patching attempt.
If anchor file not exists we should create an anchor with
timestamp and schedule its deletion at $timeout.
If anchor exists and its timestamp more than $timeout from now
we should raise an error.
"""
anchor_filepath = os.path.join(PATCH_CACHE, '.kcareprev.lock')
if os.path.isfile(anchor_filepath):
with open(anchor_filepath, 'r') as afile:
try:
timestamp = int(afile.read())
# anchor was created quite recently
# that means that something went wrong
if timestamp + SUCCESS_TIMEOUT > time.time():
raise PreviousPatchFailedException(timestamp, anchor_filepath)
except ValueError:
pass
atomic_write(anchor_filepath, _timestmap_str()) # write a new timestamp
def commit_update(state_data):
"""
See touch_anchor() for detailed explanation of anchor mechanics.
See KPT-730 for details about action registration.
:param state_data: dict with current level, kernel_id etc.
"""
try:
os.remove(os.path.join(PATCH_CACHE, '.kcareprev.lock'))
except OSError:
pass
register_action('done', state_data)
# reset module cache, to allow server_info get fresh data
get_loaded_modules.clear()
try:
get_latest_patch_level(reason='done')
except Exception:
kcarelog.exception('Cannot send update info!')
def save_to_file(response, dst):
parent_dir = os.path.dirname(dst)
if not os.path.exists(parent_dir):
os.makedirs(parent_dir)
with open(dst, 'wb') as f:
shutil.copyfileobj(response, f)
f.flush()
os.fsync(f.fileno())
def clean_directory(directory, exclude_path=None, keep_n=CACHE_ENTRIES, pattern=None):
if not os.path.exists(directory):
return
data = []
items = os.listdir(directory)
if pattern is not None:
items = fnmatch.filter(items, pattern)
for item in items:
full_path = os.path.join(directory, item)
if full_path != exclude_path:
data.append((os.stat(full_path).st_mtime, full_path))
data.sort(reverse=True)
for _, entry in data[keep_n:]:
if os.path.isfile(entry) or os.path.islink(entry):
os.remove(entry)
else:
shutil.rmtree(entry)
def clear_cache(khash, plevel):
clean_directory(os.path.join(PATCH_CACHE, 'patches'), exclude_path=get_cache_path(khash, plevel, ''))
def clear_all_cache():
clean_directory(os.path.join(PATCH_CACHE, 'modules'), keep_n=0)
clean_directory(os.path.join(PATCH_CACHE, 'patches'), keep_n=0)
os.unlink(CACHE_KEY_DUMP_PATH)
def get_cache_path(khash, plevel, fname):
prefix = TEST_PREFIX or 'none'
ptype = PATCH_TYPE or 'default'
patch_dir = '-'.join([prefix, khash, str(plevel), ptype])
result = (PATCH_CACHE, 'patches', patch_dir) # type: Tuple[str, ...]
if fname:
result += (fname,)
return os.path.join(*result)
class BaseKernelPatchLevel(int):
def cache_path(self, *parts):
return get_cache_path(self.khash, str(self), *parts) # type: ignore[attr-defined]
class KernelPatchLevel(BaseKernelPatchLevel):
def __new__(cls, khash, level, baseurl, release=None):
return super(cls, cls).__new__(cls, level)
def __init__(self, khash, level, baseurl, release=None):
self.level = level
self.khash = khash
self.baseurl = baseurl
self.release = release
def kmod_url(self, *parts):
return get_patch_server_url(self.baseurl, self.khash, *parts)
def file_url(self, *parts):
return get_patch_server_url(self.baseurl, self.khash, str(self), *parts)
class LegacyKernelPatchLevel(BaseKernelPatchLevel):
def __new__(cls, khash, level):
try:
return super(cls, cls).__new__(cls, level)
except ValueError as exc:
# common error with this class
raise SafeExceptionWrapper(exc)
def __init__(self, khash, level):
self.level = level
self.khash = khash
self.baseurl = None
def kmod_url(self, *parts):
if 'patches.kernelcare.com' in PATCH_SERVER:
return get_kernel_prefixed_url(self.khash, str(self), *parts)
# ePortal workaround, it doesn't support leveled links to kmod
return get_kernel_prefixed_url(self.khash, *parts)
def file_url(self, *parts):
return get_kernel_prefixed_url(self.khash, str(self), *parts)
def upgrade(self, baseurl):
return KernelPatchLevel(self.khash, int(self), baseurl)
class UserspacePatchLevel(int):
def __new__(cls, libname, buildid, level, baseurl=None):
return super(cls, cls).__new__(cls, level)
def __init__(self, libname, buildid, level, baseurl=None):
self.level = level
self.libname = libname
self.buildid = buildid
self.baseurl = baseurl
def cache_path(self, *parts):
return get_userspace_cache_path(self.libname, self.buildid, str(self), *parts)
def get_current_level_path(khash, fname):
prefix = TEST_PREFIX or 'none'
module_dir = '-'.join([prefix, khash])
result = (PATCH_CACHE, 'modules', module_dir) # type: Tuple[str, ...]
if fname:
result += (fname,)
return os.path.join(*result)
def save_cache_latest(khash, patch_level):
atomic_write(get_current_level_path(khash, 'latest'), str(patch_level), ensure_dir=True)
def get_cache_latest(khash):
path_with_latest = get_current_level_path(khash, 'latest')
if os.path.isfile(path_with_latest):
try:
pl = int(open(path_with_latest, 'r').read().strip())
return LegacyKernelPatchLevel(khash, pl)
except (ValueError, TypeError):
pass
def check_gpg_signature(file_path, signature): # mocked: tests/unit
"""
Check a file signature using the gpg tool.
If signature is wrong BadSignatureException will be raised.
:param file_path: path to file which signature will be checked
:param signature: a file with the signature
:return: True in case of valid signature
:raises: BadSignatureException
"""
check_gpg_bin()
if signature.endswith(SIG_JSON):
root_keys = os.path.join(GPG_KEY_DIR, 'root-keys.json')
try:
kcsig_verify.verify(signature, file_path, root_keys)
except kcsig_verify.Error as e:
raise BadSignatureException('Bad Signature: {0}: {1}'.format(file_path, str(e)))
else:
keyring = os.path.join(GPG_KEY_DIR, 'kcare_pub2.key' if signature.endswith(SIG2) else 'kcare_pub.key')
cmd = [GPG_BIN, '--keyring', keyring, signature, file_path]
code, stdout, stderr = run_command(cmd, catch_stdout=True, catch_stderr=True)
if code:
raise BadSignatureException('Bad Signature: {0}\n{1}\n{2}'.format(file_path, stdout, stderr))
class CertificateError(ValueError):
pass
class KcareError(Exception):
"""Base kernelcare exception which will be considered as expected
error and the full traceback will not be shown.
"""
pass
class NotFound(HTTPError):
pass
class UnknownKernelException(KcareError):
def __init__(self):
Exception.__init__(
self,
'New kernel detected ({0} {1} {2}).\nThere are no updates for this kernel yet.'.format(
get_distro()[0], platform.release(), get_kernel_hash()
),
)
class UnableToGetLicenseException(KcareError):
def __init__(self, code):
Exception.__init__(self, 'Unknown Issue when getting trial license. Error code: ' + str(code))
class ApplyPatchError(KcareError):
def __init__(self, code, freezer_style, level, patch_file, *args, **kwargs):
super(ApplyPatchError, self).__init__(*args, **kwargs)
self.code = code
self.freezer_style = freezer_style
self.level = level
self.patch_file = patch_file
self.distro = get_distro()[0]
self.release = platform.release()
def __str__(self):
return 'Unable to apply patch ({0} {1} {2} {3} {4}, {5})'.format(
self.patch_file,
self.level,
self.code,
self.distro,
self.release,
', '.join([str(i) for i in self.freezer_style]),
)
class AlreadyTrialedException(KcareError):
def __init__(self, ip, created, *args, **kwargs):
super(AlreadyTrialedException, self).__init__(*args, **kwargs)
self.created = created[0 : created.index('T')]
self.ip = ip
def __str__(self):
return 'The IP {0} was already used for a trial license on {1}'.format(self.ip, self.created)
class BadSignatureException(KcareError):
pass
# KCARE-509
class PreviousPatchFailedException(KcareError):
def __init__(self, timestamp, anchor, *args, **kwargs):
super(PreviousPatchFailedException, self).__init__(*args, **kwargs)
self.timestamp = timestamp
self.anchor = anchor
def __str__(self):
message = (
'It seems, the latest patch, applying at {0}, crashed, '
'and further attempts will be suspended. '
'To force patch applying, remove `{1}` file'
)
return message.format(self.timestamp, self.anchor)
class NoLibcareLicenseException(KcareError):
pass
def http_request(url, auth_string, auth_token=None, method=None):
request = Request(url, method=method)
if not UPDATE_FROM_LOCAL and auth_string:
request.add_header('Authorization', 'Basic {0}'.format(auth_string))
if not UPDATE_FROM_LOCAL and auth_token:
request.add_header(AUTH_TOKEN_HEADER, auth_token)
return request
def print_cln_http_error(ex, url=None, stdout=True):
url = url or '<route cannot be logged>'
logerror('Unable to fetch {0}. Please try again later (error: {1})'.format(url, str(ex)), stdout)
def parse_response_date(str_raw):
# Try to split it by T
str_date, sep, _ = str_raw.partition('T')
# No success - split by space
if not sep:
str_date, _, _ = str_raw.partition(' ')
return datetime.strptime(str_date, '%Y-%m-%d')
def set_monitoring_key_for_ip_license(key):
url = REGISTRATION_API_URL + '/nagios/register_key.plain?key={0}'.format(key)
try:
response = urlopen(url)
res = data_as_dict(nstr(response.read()))
code = int(res['code'])
if code == 0:
print('Key successfully registered')
elif code == 1:
print('Wrong key format or size')
elif code == 2:
print('No KernelCare license for that IP')
else:
print('Unknown error {0}'.format(code))
return code
except HTTPError as e:
print_cln_http_error(e, url)
return -1
@contextmanager
def execute_hooks():
if BEFORE_UPDATE_COMMAND:
run_command(BEFORE_UPDATE_COMMAND, shell=True)
try:
yield
finally:
if AFTER_UPDATE_COMMAND:
run_command(AFTER_UPDATE_COMMAND, shell=True)
def update_config(**kwargs):
cf = open(CONFIG)
lines = cf.readlines()
cf.close()
for prop, value in kwargs.items():
updated = False
prop_eq = prop + '='
prop_sp = prop + ' '
for i in range(len(lines)):
if lines[i].startswith(prop_eq) or lines[i].startswith(prop_sp):
if value is None:
del lines[i]
else:
lines[i] = prop + ' = ' + str(value) + '\n'
updated = True
break
if not updated:
lines.append(prop + ' = ' + str(value) + '\n')
atomic_write(CONFIG, ''.join(lines))
def plugin_info(fmt=None):
"""
The output will consist of:
Ignore output up to the line with "--START--"
Line 1: show if update is needed:
0 - updated to latest,
1 - update available,
2 - unknown kernel
3 - kernel doesn't need patches
4 - no license, cannot determine
Line 2: licensing message (can be skipped, can be more then one line)
Line 3: LICENSE: CODE: 1: license present, 2: trial license present, 0: no license
Line 4: Update mode (True - auto-update, False, no auto update)
Line 5: Effective kernel version
Line 6: Real kernel version
Line 7: Patchset Installed # --> If None, no patchset installed
Line 8: Uptime (in seconds)
If *format* is 'json' return the results in JSON format.
Any other output means error retrieving info
:return:
"""
pli = _patch_level_info()
update_code = pli.code
loaded_pl = pli.applied_lvl
license_info_result = license_info()
if fmt == 'json':
results = {
'updateCode': str(update_code),
'autoUpdate': AUTO_UPDATE,
'effectiveKernel': kcare_uname(),
'realKernel': platform.release(),
'loadedPatchLevel': loaded_pl,
'uptime': int(get_uptime()),
'license': license_info_result,
}
print('--START--')
print(json.dumps(results))
else:
print('--START--')
print(str(update_code))
print('LICENSE: ' + str(license_info_result))
print(AUTO_UPDATE)
print(kcare_uname())
print(platform.release())
print(loaded_pl)
print(get_uptime())
def touch_status_gap_file(filename='.kcarestatus'):
status_filepath = os.path.join(PATCH_CACHE, filename)
atomic_write(status_filepath, _timestmap_str())
def status_gap_passed(filename='.kcarestatus'):
status_filepath = os.path.join(PATCH_CACHE, filename)
if os.path.isfile(status_filepath):
with open(status_filepath, 'r') as sfile:
try:
timestamp = int(sfile.read())
if int(timestamp) + STATUS_CHANGE_GAP + STATUS_CHANGE_GAP_DELAY > time.time():
return False
except Exception:
pass
return True
def get_update_status():
current_level = loaded_patch_level()
try:
latest_patch_level = get_latest_patch_level(reason='info')
except UnknownKernelException:
return 0 if IGNORE_UNKNOWN_KERNEL else 3
if current_level is None:
return 1
if current_level >= latest_patch_level:
return 0
return 2 if status_gap_passed() else 0
def clear_libcare_cache(clbl):
def wrapper(*args, **kwargs):
try:
return clbl(*args, **kwargs)
finally:
try:
libcare_client('clearcache')
except Exception as err:
# We don't wand to show the error to the user but want to see it in logs
logerror("Libcare cache clearing failed: '{0}'".format(err), print_msg=False)
return wrapper
@clear_libcare_cache
def get_userspace_update_status():
try:
failed, _, libs_not_patched, _ = check_userspace_updates()
except KcareError:
return 3
if failed:
return 3
if libs_not_patched:
return 1
return 2 if status_gap_passed(filename='.libcarestatus') else 0
def license_info():
server_id = get_serverid()
if server_id:
url = REGISTRATION_API_URL + '/check.plain?server_id={0}'.format(server_id)
try:
response = urlopen(url)
content = nstr(response.read())
res = data_as_dict(content)
if not res or not res.get('code'):
print('Unexpected CLN response: {0}'.format(content))
return 1
code = int(res['code'])
if code == 0:
print('Key-based valid license found')
return 1
else:
license_type = _get_license_info_by_ip(key_checked=1)
if license_type == 0:
print('No valid key-based license found')
return license_type
except HTTPError as e:
print_cln_http_error(e, url)
return 0
else:
return _get_license_info_by_ip()
def _get_license_info_by_ip(key_checked=0):
url = REGISTRATION_API_URL + '/check.plain'
try:
response = urlopen(url)
content = nstr(response.read())
res = data_as_dict(content)
if res['success'].lower() == 'true':
code = int(res['code'])
if code == 0:
print('Valid license found for IP {0}'.format(res['ip']))
return 1 # valid license
if code == 1:
ip = res['ip']
expires_str = parse_response_date(res['expire_date']).strftime('%Y-%m-%d')
print('You have a trial license for the IP {0} that will expire on {1}'.format(ip, expires_str))
return 2 # trial license
if code == 2 and key_checked == 0:
ip = res['ip']
expires_str = parse_response_date(res['expire_date']).strftime('%Y-%m-%d')
print('Your trial license for the IP {0} expired on {1}'.format(ip, expires_str))
if code == 3 and key_checked == 0:
if 'ip' in res:
print("The IP {0} hasn't been licensed".format(res['ip']))
else:
print("This server hasn't been licensed")
else:
message = res.get('message', '')
print('Error retrieving license info: {0}'.format(message))
except HTTPError as e:
print_cln_http_error(e, url)
except KeyError as key:
print('Unexpected CLN response, cannot find {0} key:\n{1}'.format(key, content.strip()))
return 0 # no valid license
def register_trial():
trial_mark = os.path.join(PATCH_CACHE, 'trial-requested')
if os.path.exists(trial_mark):
return
try:
response = urlopen(REGISTRATION_API_URL + '/trial.plain')
res = data_as_dict(nstr(response.read()))
try:
if res['success'].lower() == 'true':
atomic_write(trial_mark, '', ensure_dir=True)
if res['expired'] == 'true':
raise AlreadyTrialedException(res['ip'], res['created'])
loginfo('Requesting trial license for IP {0}. Please wait...'.format(res['ip']))
return None
elif res['success'] == 'na':
atomic_write(trial_mark, '', ensure_dir=True)
raise KcareError('Invalid License')
else:
# TODO: make sane exception messages
raise UnableToGetLicenseException(-1) # Invalid response?
except KeyError as ke:
raise UnableToGetLicenseException(ke)
except HTTPError as e:
raise UnableToGetLicenseException(e.code)
@cached
def get_uptime():
if os.path.isfile('/proc/uptime'):
f = open('/proc/uptime', 'r')
line = f.readline()
result = str(int(float(line.split()[0])))
f.close()
return result
return '-1'
@cached
def get_virt():
if os.path.isfile(VIRTWHAT):
return check_output([VIRTWHAT]).strip()
return 'no-virt-what' # pragma: no cover
def get_last_stop():
"""Returns timestamp from PATCH_CACHE/stoped.at if its exsits"""
stopped_at_filename = os.path.join(PATCH_CACHE, 'stopped.at')
if os.path.exists(stopped_at_filename):
with open(stopped_at_filename, 'r') as fh:
value = fh.read().rstrip()
try:
int(value)
except ValueError:
return str(int(os.path.getctime(stopped_at_filename)))
except Exception: # pragma: no cover, it should not happen
return 'error'
return value
return '-1'
def get_distro():
if sys.version_info[:2] < (3, 6): # pragma: no py3 cover
return platform.linux_distribution()
else: # pragma: no distro cover
import distro
return distro.linux_distribution(full_distribution_name=False)
def edf_fallback_ptype():
distro, version = get_distro()[:2]
# From talk with @kolshanov
if distro == 'CloudLinux' and version.startswith('7.'):
return 'extra'
else:
return ''
def strip_version_timestamp(version):
match = VERSION_RE.match(version)
return match and match.group(1) or version
def server_info(reason, now=None):
data = dict() # type: Dict[str, Any]
data['ts'] = int(now or time.time())
data['reason'] = reason
data['machine'] = platform.machine()
data['processor'] = platform.processor()
data['release'] = platform.release()
data['system'] = platform.system()
data['version'] = platform.version()
distro = get_distro()
data['distro'] = distro[0]
data['distro_version'] = distro[1]
data['euname'] = kcare_uname()
data['kcare_version'] = strip_version_timestamp(VERSION)
data['last_stop'] = get_last_stop()
data['node'] = get_hostname()
data['uptime'] = get_uptime()
data['virt'] = get_virt()
data['proxy'] = proxy_is_used()
description = parse_patch_description(loaded_patch_description())
data['ltimestamp'] = description['last-update']
data['patch_level'] = description['patch-level']
data['patch_type'] = description['patch-type']
data['kmod'] = get_current_kmod_version() or ''
data['kdump_status'] = kdump_status()
try:
data['kdump_ts'] = kdumps_latest_event_timestamp()
except Exception:
# Not critical data
pass
server_id = get_serverid()
if server_id:
data['server_id'] = server_id
state = get_state()
if state is not None:
data['state'] = state
return data
def get_state():
state_file = os.path.join(PATCH_CACHE, 'kcare.state')
if os.path.exists(state_file):
with open(state_file, 'r') as f:
state = f.read()
try:
return ast.literal_eval(state)
except (SyntaxError, ValueError, TypeError):
pass
def server_lib_info(reason, patch_level, now=None):
data = dict() # type: Dict[str, Any]
data['ts'] = int(now or time.time())
data['reason'] = reason
data['patch_level'] = patch_level
distro = get_distro()
data['distro'] = distro[0]
data['distro_version'] = distro[1]
data['machine'] = platform.machine()
data['kcare_version'] = strip_version_timestamp(VERSION)
data['node'] = get_hostname()
data['uptime'] = get_uptime()
data['virt'] = get_virt()
stop_ts = 0.0
if os.path.exists('/var/lib/libcare/stop'):
stop_ts = os.path.getctime('/var/lib/libcare/stop')
data['stop_ts'] = stop_ts
return data
def based_server_info(reason):
return nstr(base64.b16encode(bstr(str(server_info(reason)))))
def encode_server_lib_info(info):
data = json.dumps(info, ensure_ascii=False, separators=(',', ':'))
return nstr(base64.urlsafe_b64encode(zlib.compress(bstr(data, 'utf-8'))))
def get_http_auth_string():
server_id = get_serverid()
if server_id:
return nstr(base64.b64encode(bstr('{0}:{1}'.format(server_id, 'kernelcare'))))
return None
# addr -> resolved_peer_addr map
CONNECTION_STICKY_MAP = {} # type: Dict[Tuple[str, int], Tuple[Optional[str], int]]
def sticky_connect(self):
"""Function remembers IP address of host connected to
and uses it for later connections.
Replaces stdlib version of httplib.HTTPConnection.connect
"""
addr = self.host, self.port
resolved_addr = CONNECTION_STICKY_MAP.get(addr, addr) # type: Tuple[Optional[str], int]
self.sock = socket.create_connection(resolved_addr, self.timeout)
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if addr not in CONNECTION_STICKY_MAP:
CONNECTION_STICKY_MAP[addr] = self.sock.getpeername()[:2]
if self._tunnel_host:
self._tunnel()
httplib.HTTPConnection.connect = sticky_connect # type: ignore[method-assign,assignment]
# python >= 2.7.9 stdlib (with ssl.HAS_SNI) is able to process https request on its own,
# for earlier versions manual checks should be done
if not getattr(ssl, 'HAS_SNI', None): # pragma: no cover unit
try:
import distutils.version
import OpenSSL.SSL
if distutils.version.StrictVersion(OpenSSL.__version__) < distutils.version.StrictVersion('0.13'): # type: ignore[attr-defined]
raise ImportError('No pyOpenSSL module with SNI ability.')
except ImportError:
pass
else:
def dummy_verify_callback(*args):
# OpenSSL.SSL.Context.set_verify() requires callback
# where additional checks could be done;
# here is a dummy callback and a hostname check is made externally
# to provide original exception from match_hostname()
return True
PureHTTPSConnection = httplib.HTTPSConnection
class SSLSock(object):
def __init__(self, sock):
self._ssl_conn = sock
self._makefile_refs = 0
def makefile(self, *args):
self._makefile_refs += 1
return socket._fileobject(self._ssl_conn, *args, close=True) # type: ignore[attr-defined]
def close(self):
if not self._makefile_refs and self._ssl_conn:
self._ssl_conn.close()
self._ssl_conn = None
def sendall(self, *args):
return self._ssl_conn.sendall(*args)
class PyOpenSSLHTTPSConnection(httplib.HTTPSConnection):
def connect(self):
httplib.HTTPConnection.connect(self)
# workaround to force pyopenssl to use TLSv1.2
ctx = OpenSSL.SSL.Context(OpenSSL.SSL.SSLv23_METHOD)
ctx.set_options(OpenSSL.SSL.OP_NO_SSLv2 | OpenSSL.SSL.OP_NO_SSLv3)
if CHECK_SSL_CERTS:
ctx.set_verify(OpenSSL.SSL.VERIFY_PEER, dummy_verify_callback)
else:
ctx.set_verify(OpenSSL.SSL.VERIFY_NONE, dummy_verify_callback)
ctx.set_default_verify_paths()
conn = OpenSSL.SSL.Connection(ctx, self.sock)
conn.set_connect_state()
# self._tunnel_host is an original hostname in case of proxy use
server_host = self._tunnel_host or self.host # type: ignore[attr-defined]
conn.set_tlsext_host_name(server_host.encode())
conn.do_handshake()
if CHECK_SSL_CERTS:
match_hostname(conn.get_peer_certificate(), server_host)
self.sock = SSLSock(conn)
httplib.HTTPSConnection = PyOpenSSLHTTPSConnection # type: ignore[misc]
def _urlopen(url, *args, **kwargs): # mocked: tests/unit
if hasattr(url, 'get_full_url'):
request_url = url.get_full_url()
else:
request_url = url
url = Request(url)
headers = kwargs.pop('headers', {})
headers.update(
{
'KC-Version': VERSION,
'KC-Patch-Version': KC_PATCH_VERSION,
}
)
for header, value in headers.items():
url.add_header(header, value)
logdebug("Requesting url: `{0}`. Headers: {1}".format(request_url, headers))
try:
# bandit warns about use of file: in urlopen which can happen here but is secure
if not CHECK_SSL_CERTS and getattr(ssl, 'HAS_SNI', None): # pragma: no cover unit
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
kwargs['context'] = ctx
return std_urlopen(url, *args, **kwargs) # nosec B310
return std_urlopen(url, *args, **kwargs) # nosec B310
except HTTPError as ex:
if ex.code == 404:
raise NotFound(ex.url, ex.code, ex.msg, ex.hdrs, ex.fp) # type: ignore[attr-defined]
# HTTPError is a URLError descendant and contains URL, raise it as is
raise
except URLError as ex:
# Local patches OSError(No such file) should be interpreted as Not found(404)
# It was done as a chain because when it implemented with "duck-typing" it will mess
# with error context
if ex.args and hasattr(ex.args[0], 'errno') and ex.args[0].errno == errno.ENOENT:
raise NotFound(url, 404, str(ex), None, None) # type: ignore[arg-type]
# there is no information about URL in the base URLError class, add it and raise
ex.reason = 'Request for `{0}` failed: {1}'.format(request_url, ex)
ex.url = request_url # type: ignore[attr-defined]
raise
def check_exc(*exc_list):
def inner(e, state):
return isinstance(e, exc_list)
return inner
def retry(check_retry, count=None, delay=None, backoff=None):
if delay is None:
delay = RETRY_DELAY
if count is None:
count = RETRY_COUNT
if backoff is None:
backoff = RETRY_BACKOFF
state = {} # type: Dict[str, Any]
def decorator(fn):
def inner(*args, **kwargs):
ldelay = delay
for _ in range(count):
try:
return fn(*args, **kwargs)
except Exception as ex:
if not check_retry(ex, state):
raise
time.sleep(ldelay)
# bandit warns about using random.uniform for security which is not the case here
ldelay = min(ldelay * random.uniform(1, backoff), RETRY_MAX_DELAY) # nosec B311
# last try
return fn(*args, **kwargs)
return inner
return decorator
def check_urlopen_retry(e, state):
if isinstance(e, HTTPError):
return e.code >= 500
elif isinstance(e, URLError):
return True
def check_auth_retry(e, state):
if isinstance(e, HTTPError):
if e.code in (403, 401):
return handle_forbidden(state)
return e.code >= 500
elif isinstance(e, URLError):
return True
def is_local_url(url):
if hasattr(url, 'get_full_url'):
url = url.get_full_url()
return url.startswith('file:')
def urlopen(url, *args, **kwargs):
if is_local_url(url):
return _urlopen(url, *args, **kwargs)
return retry(check_urlopen_retry)(_urlopen)(url, *args, **kwargs)
def urlopen_auth(url, *args, **kwargs):
method = kwargs.pop('method', None)
if kwargs.pop('check_license', True):
check = check_auth_retry
else:
check = check_urlopen_retry
if is_local_url(url):
return _urlopen(url, *args, **kwargs)
request = http_request(url, get_http_auth_string(), get_auth_token(), method=method)
return retry(check)(_urlopen)(request, *args, **kwargs)
def handle_forbidden(state):
"""In case of 403 error we should check what's happen.
Case #1. We are trying to register unlicensed machine and should try to register trial.
Case #2. We have a valid license but access restrictions on server are not consistent yet
and we had to try later.
"""
if 'license' in state:
# license has already been checked and is valid, no need to ask CLN again
return True
if CHECK_CLN_LICENSE_STATUS:
server_id = get_serverid()
if server_id:
url = REGISTRATION_API_URL + '/check.plain' + '?server_id={0}'.format(server_id)
else:
url = REGISTRATION_API_URL + '/check.plain'
try:
# do not retry in case of 500 from CLN!
# otherwise, CLN will die in pain because of too many requests
content = nstr(_urlopen(url).read())
info = data_as_dict(content)
except URLError as ex:
print_cln_http_error(ex, url, stdout=False)
return
if not info or not info.get('code'):
kcarelog.error('Unexpected CLN response: {0}'.format(content))
return
if info['code'] in ['0', '1']:
# license is fine: 0 - valid license, 1 - valid trial license;
# looks like htpasswd not updated yet;
# mark state as licensed to avoid repeated requests to CLN
state['license'] = True
logerror('Unable to access server. Retrying...')
return True
else:
register_trial()
def wrap_with_cache_key(clbl):
"""Enrish request with a cache key, and save it if responce had."""
def wrapper(*args, **kwargs):
cache_key = get_cache_key()
if cache_key is not None:
if 'headers' not in kwargs:
kwargs['headers'] = {}
kwargs['headers'][CACHE_KEY_HEADER] = cache_key
resp = clbl(*args, **kwargs)
new_cache_key = resp.headers.get(CACHE_KEY_HEADER)
if new_cache_key is not None and new_cache_key != cache_key:
atomic_write(CACHE_KEY_DUMP_PATH, new_cache_key)
return resp
return wrapper
def fetch_patch_level(reason, mode=UPDATE_MODE_MANUAL):
if LEVEL is not None:
return LegacyKernelPatchLevel(KHASH, int(LEVEL))
for latest in PATCH_LATEST:
if UPDATE_FROM_LOCAL:
url = get_kernel_prefixed_url(KHASH, latest)
else:
url = get_kernel_prefixed_url(KHASH, stickyfy(latest, mode)) + '?' + based_server_info(reason)
try:
response = wrap_with_cache_key(urlopen_auth)(url, check_license=False)
set_config_from_patchserver(response.headers)
pl = nstr(response.read()).strip()
if pl and pl.startswith('{'):
latest_info = json_loads_nstr(pl)
return KernelPatchLevel(KHASH, latest_info['level'], latest_info['baseurl'], latest_info['release'])
return LegacyKernelPatchLevel(KHASH, int(pl))
except NotFound:
pass
except HTTPError as ex:
# No license - no access
if ex.code in (403, 401):
raise KcareError('KC licence is required')
raise
raise UnknownKernelException()
def get_signature_extensions():
sig2_expire_at_str = os.getenv('KCARE_SIG2_EXPIRE_AT', '2024-01-01')
sig2_expire_at_dt = datetime.strptime(sig2_expire_at_str, '%Y-%m-%d')
if datetime.utcnow() > sig2_expire_at_dt:
return [SIG, SIG_JSON]
return [SIG2, SIG, SIG_JSON]
def fetch_signature(url, dst, auth=False):
urlopen_local = urlopen
if auth:
urlopen_local = urlopen_auth
sig_exts = get_signature_extensions()
for sig_ext in sig_exts:
try:
signature = urlopen_local(url + sig_ext)
break
except NotFound as nf:
if sig_ext == sig_exts[-1]:
raise nf # pragma: no cover
sig_dst = dst + sig_ext
save_to_file(signature, sig_dst)
return sig_dst
def selinux_safe_tmpname(fname):
head, tail = os.path.split(fname)
return os.path.join(head, 'tmp.' + tail)
# BadSignatureException is the only side effect of interrupted connection,
# should retry file extraction in this case
@retry(check_exc(BadSignatureException), count=3, delay=0)
def fetch_url(url, dst, check_signature=False, hash_checker=None):
response = urlopen_auth(url)
tmp = selinux_safe_tmpname(dst)
save_to_file(response, tmp)
if hash_checker:
hash_checker.check(url, tmp)
if check_signature and not hash_checker:
signature = fetch_signature(url, tmp, auth=True)
check_gpg_signature(tmp, signature)
os.rename(tmp, dst)
return response
def probe_patch(level, ptype):
bin_url = level.file_url(_apply_ptype(ptype, PATCH_BIN))
kcarelog.info('Probing patch URL: {0}'.format(bin_url))
try:
urlopen_auth(bin_url, check_license=False, method='HEAD')
return True
except NotFound:
kcarelog.info('{0} is not available: 404'.format(bin_url))
return False
except Exception as ex: # Fallback to GET in case of any error.
kcarelog.debug('HEAD request for {0} raised an error, fallback to the GET request: {1}'.format(bin_url, str(ex)))
url = level.file_url(_apply_ptype(ptype, PATCH_BIN) + SIG)
kcarelog.info('Probing patch URL: {0}'.format(url))
try:
urlopen_auth(url, check_license=False)
except NotFound:
kcarelog.info('{0} is not available: 404'.format(url))
return False
except URLError as ex:
kcarelog.info('{0} is not available: {1}'.format(url, str(ex)))
return True
class HashChecker(object):
def __init__(self, baseurl, content_file):
self.content_file = content_file
self.url_prefix = get_patch_server_url(baseurl).rstrip('/') + '/'
self.hashes = json.loads(read_file(content_file))['files']
def check(self, url, fname):
cfname = url[len(self.url_prefix) :]
if cfname not in self.hashes:
raise KcareError('Invalid checksum: {0} not found in content file {1}'.format(cfname, self.content_file))
hsh = hashlib.sha256(read_file_bin(fname)).hexdigest()
expected_hsh = self.hashes[cfname]['sha256']
if hsh != expected_hsh:
raise BadSignatureException(
'Invalid checksum: {0} has invalid checksum {1}, expected {2}'.format(fname, hsh, expected_hsh)
)
def set_config_from_patchserver(headers):
global _CONFIG_OPTIONS
global USE_CONTENT_FILE
use_content_file_flag = headers.get('KC-Flag-Use-Content-File')
if use_content_file_flag is not None and 'USE_CONTENT_FILE' not in _CONFIG_OPTIONS:
USE_CONTENT_FILE = bool(int(use_content_file_flag))
kcarelog.info('patchserver config override: USE_CONTENT_FILE with %s', USE_CONTENT_FILE)
@cached
def get_hash_checker(level):
if not USE_CONTENT_FILE:
return None
if not level.baseurl:
return None
dst = level.cache_path(CONTENT_FILE)
if not os.path.exists(dst):
try:
fetch_url(get_patch_server_url(level.baseurl, CONTENT_FILE), dst, USE_SIGNATURE)
except NotFound:
return None
return HashChecker(level.baseurl, dst)
def fetch_and_verify_kernel_file(level, name):
if name == KMOD_BIN:
url = level.kmod_url(KMOD_BIN)
else:
url = level.file_url(name)
dst = level.cache_path(name)
return fetch_url(url, dst, USE_SIGNATURE, hash_checker=get_hash_checker(level))
class PatchFetcher(object):
def __init__(self, patch_level=None):
self.patch_level = patch_level # LegacyKernelPatchLevel or KernelPatchLevel
def _fetch(self, name):
return fetch_and_verify_kernel_file(self.patch_level, name)
def is_patch_fetched(self):
patch_files = (
self.patch_level.cache_path(PATCH_DONE),
self.patch_level.cache_path(PATCH_BIN),
self.patch_level.cache_path(PATCH_INFO),
self.patch_level.cache_path(KMOD_BIN),
)
return all(os.path.isfile(it) for it in patch_files)
def fetch_patch(self):
if self.patch_level is None:
raise ValueError("Cannot fetch patch as no patch level is set")
if not self.patch_level: # level is 0, do nothing
return self.patch_level
if self.is_patch_fetched():
loginfo('Updates already downloaded')
return self.patch_level
loginfo('Downloading updates')
# try to upgrade patch level
if isinstance(self.patch_level, LegacyKernelPatchLevel):
try:
resp = urlopen_auth(self.patch_level.file_url(PATCH_BIN), method='HEAD')
except NotFound:
pass
else:
baseurl = resp.headers.get('KC-Base-Url', None)
if baseurl:
self.patch_level = self.patch_level.upgrade(nstr(baseurl))
try:
self._fetch(PATCH_BIN)
except NotFound:
raise KcareError(
'The `{0}` patch level is not found for `{1}` patch type. '
'Please select valid patch type or patch level'.format(self.patch_level, PATCH_TYPE or 'default')
)
self._fetch(PATCH_INFO)
self._fetch(KMOD_BIN)
self.extract_blacklist()
atomic_write(self.patch_level.cache_path(PATCH_DONE), b'', mode='wb')
restore_selinux_context(PATCH_CACHE)
return self.patch_level
def extract_blacklist(self):
buf = open(self.patch_level.cache_path(PATCH_INFO), 'r').read()
if buf:
mo = BLACKLIST_RE.search(buf)
if mo:
atomic_write(self.patch_level.cache_path(BLACKLIST_FILE), mo.group(1))
def fetch_fixups(self, level):
"""
Download fixup files for defined patch level
:param level: download fixups for this patch level (usually it's a level of loaded patch)
:return: None
"""
if level is None:
return
try:
# never use cache for fixup files, must be downloaded from scratch
resp = fetch_and_verify_kernel_file(level, FIXUPS_FILE)
except NotFound:
return
# Upgrade level to a new format with baseurl to fetch fixup files
baseurl = resp.headers.get('KC-Base-Url', None)
if baseurl:
level = level.upgrade(nstr(baseurl))
fixups_fname = level.cache_path(FIXUPS_FILE)
with open(fixups_fname, 'r') as f:
fixups = set([fixup.strip() for fixup in f.readlines()])
for fixup in fixups:
fetch_and_verify_kernel_file(level, fixup)
restore_selinux_context(PATCH_CACHE)
def get_kernel_hash():
f = open(KERNEL_VERSION_FILE, 'rb')
try:
# sha1 is not used for security, turn off bandit warning
# bandit issues a warning that B324 has no test when `nosec B324` is
# set here. Using broad `nosec` here to bypass the warning.
return hashlib.sha1(f.read()).hexdigest() # nosec B324
finally:
f.close()
def kcare_check():
pli = _patch_level_info()
print(pli.msg)
if pli.code == PLI.PATCH_NEED_UPDATE:
sys.exit(0)
else:
sys.exit(1)
def show_generic_info():
pli = _patch_level_info()
kcare_info = _kcare_patch_info_json(pli)
try:
libcare_info = _libcare_patch_info()
except KcareError:
libcare_info = {}
state = get_state()
latest_update = "Unknown"
if state is not None:
latest_update = datetime.fromtimestamp(state['ts']).strftime('%Y-%m-%d')
effective_version = kcare_uname()
kernel_vulnerabilities = len(kcare_info.get('patches', []))
userspace_vulnerabilities = sum(len(rec.get('patches', [])) for rec in libcare_info)
patch_level = loaded_patch_level()
if not patch_level:
print("KernelCare live patching is disabled")
else:
print("KernelCare live patching is active")
print(" - Last updated on {0}".format(latest_update))
print(" - Effective kernel version {0}".format(effective_version))
if kernel_vulnerabilities > 0:
print(" - {0} kernel vulnerabilities live patched".format(kernel_vulnerabilities))
if userspace_vulnerabilities > 0:
print(" - {0} userspace vulnerabilities live patched".format(userspace_vulnerabilities))
if kernel_vulnerabilities + userspace_vulnerabilities == 0:
print(" - This system has no applied patches")
print("Type kcarectl --patch-info to learn more")
def kcare_latest_patch_info(is_json=False):
"""
Retrieve and output to STDOUT latest patch info, so it is easy to get
list of CVEs in use. More info at
https://cloudlinux.atlassian.net/browse/KCARE-952
:return: None
"""
try:
latest = get_latest_patch_level(reason='info', policy=POLICY_REMOTE)
if not latest:
raise UnknownKernelException
url = latest.file_url(PATCH_INFO)
patch_info = nstr(urlopen_auth(url).read())
if is_json:
patches, result = [], {}
for chunk in patch_info.split('\n\n'):
data = data_as_dict(chunk)
if data and 'kpatch-name' in data:
patches.append(data)
else:
result.update(data)
result['patches'] = patches
patch_info = json.dumps(result)
print(patch_info)
except HTTPError as e:
print_cln_http_error(e, e.url)
return 1
except UnknownKernelException:
print('No patches available')
return 0
def _kcare_patch_info_json(pli):
result = {'message': pli.msg}
if pli.applied_lvl is not None:
patch_info = _kcare_patch_info(pli)
patches = []
for chunk in patch_info.split('\n\n'):
data = data_as_dict(chunk)
if data and 'kpatch-name' in data:
patches.append(data)
else:
result.update(data)
result['patches'] = patches
return result
def _kcare_patch_info(pli):
khash = get_kernel_hash()
cache_path = get_cache_path(khash, pli.applied_lvl, PATCH_INFO)
if not os.path.isfile(cache_path):
raise KcareError(
"Can't find information due to the absent patch information file."
" Please, run /usr/bin/kcarectl --update and try again."
)
info = open(cache_path, 'r').read()
if info:
info = BLACKLIST_RE.sub('', info)
return info
def patch_info(is_json=False):
pli = _patch_level_info()
if not is_json:
if pli.code != 0:
print(pli.msg)
if pli.applied_lvl is None:
return
print(_kcare_patch_info(pli))
else:
print(json.dumps(_kcare_patch_info_json(pli), sort_keys=True))
UNAME_LABEL = 'uname: '
def is_uname_char(c):
return str.isalnum(c) or c in '.-_+'
def parse_uname(patch_level):
khash = get_kernel_hash()
f = open(get_cache_path(khash, patch_level, PATCH_INFO), 'r')
try:
for line in f.readlines():
if line.startswith(UNAME_LABEL):
return ''.join(filter(is_uname_char, line[len(UNAME_LABEL) :].strip()))
finally:
f.close()
return ''
def kcare_uname_su():
patch_level = loaded_patch_level()
if not patch_level:
return platform.release()
return parse_uname(patch_level)
def is_same_patch(new_patch_file): # mocked: tests/unit
args = [KPATCH_CTL, 'file-info', new_patch_file]
new_patch_info = check_output(args)
current_patch_info = _patch_info()
build_time_label = 'kpatch-build-time'
return get_patch_value(new_patch_info, build_time_label) == get_patch_value(current_patch_info, build_time_label)
def kcare_update_effective_version(new_version):
if os.path.exists(KCARE_UNAME_FILE):
try:
f = open(KCARE_UNAME_FILE, 'w')
f.write(new_version)
f.close()
return True
except Exception:
pass
return False
def kcare_uname():
if os.path.exists(KCARE_UNAME_FILE):
return open(KCARE_UNAME_FILE, 'r').read().strip()
else:
# TODO: talk to @kolshanov about runtime results from KPATCH_CTL info
# (euname from kpatch-description -- not from kpatch.info file)
return kcare_uname_su()
def kcare_need_update(applied_level, new_level):
if new_level == 0:
return False
# ignore down-patching
if applied_level and new_level < applied_level:
return False
if applied_level != new_level:
return True
new_patch_file = get_cache_path(KHASH, new_level, PATCH_BIN)
if not is_same_patch(new_patch_file):
return True
return False
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[asection]\n' # type: Optional[str]
def readline(self): # pragma: no py3 cover
if self.sechead:
try:
return self.sechead
finally:
self.sechead = None
else:
return self.fp.readline()
def __iter__(self): # pragma: no py2 cover
if self.sechead:
yield self.sechead
self.sechead = None
for line in self.fp:
yield line
def get_proxy_from_env(scheme):
if scheme == 'http':
return os.getenv('http_proxy') or os.getenv('HTTP_PROXY')
elif scheme == 'https':
return os.getenv('https_proxy') or os.getenv('HTTPS_PROXY')
def proxy_is_used():
return bool(get_proxy_from_env('http')) or bool(get_proxy_from_env('https'))
def get_config_settings():
global _CONFIG_OPTIONS
_CONFIG_OPTIONS.clear()
result = {}
cp = ConfigParser(defaults={'HTTP_PROXY': '', 'HTTPS_PROXY': ''})
try:
config = FakeSecHead(open(CONFIG))
if PY2: # pragma: no py3 cover
cp.readfp(config)
else: # pragma: no py2 cover
cp.read_file(config)
except Exception:
return {}
def bool_converter(value):
return value.upper() in ('1', 'TRUE', 'YES', 'Y')
def read_var(name, default=None, convert=None):
target = CONFIG_TARGETS_MAPPING.get(name)
try:
value = cp.get('asection', name)
except Exception:
value = default
if value is not None:
if convert:
value = convert(value)
config_option_name = target or name
result[config_option_name] = value
_CONFIG_OPTIONS.add(config_option_name)
for scheme, variable in [('http', 'HTTP_PROXY'), ('https', 'HTTPS_PROXY')]:
# environment settings take precedence over kcare.config ones
if not get_proxy_from_env(scheme):
proxy = cp.get('asection', variable)
if proxy:
os.environ[variable] = proxy
read_var('UPDATE_POLICY', convert=str.upper)
read_var('PATCH_METHOD', convert=str.upper)
read_var('PATCH_SERVER', convert=lambda v: v.rstrip('/'))
read_var('AUTO_UPDATE', convert=bool_converter)
read_var('LIB_AUTO_UPDATE', convert=bool_converter)
read_var('REGISTRATION_URL', convert=lambda v: v.rstrip('/'))
read_var('PREFIX')
read_var('IGNORE_UNKNOWN_KERNEL', convert=bool_converter)
read_var('UPDATE_SYSCTL_CONFIG', convert=bool_converter)
read_var('CHECK_SSL_CERTS', convert=bool_converter)
read_var('PATCH_TYPE', convert=str.lower)
read_var('PREV_PATCH_TYPE', convert=str.lower)
read_var('PATCH_LEVEL', convert=lambda v: v or None)
read_var('STICKY_PATCH', convert=str.upper)
read_var('STICKY_PATCHSET')
read_var('AUTO_STICKY_PATCHSET')
read_var('UPDATE_DELAY')
read_var('AUTO_UPDATE_DELAY')
read_var('REPORT_FQDN', convert=bool_converter)
read_var('SILENCE_ERRORS', convert=bool_converter)
read_var('FORCE_GID')
read_var('LIBCARE_DISABLED', convert=bool_converter)
read_var('BEFORE_UPDATE_COMMAND', convert=lambda v: v.strip())
read_var('AFTER_UPDATE_COMMAND', convert=lambda v: v.strip())
read_var('KMSG_OUTPUT', convert=bool_converter)
read_var('KCORE_OUTPUT_SIZE', convert=int)
read_var('KDUMPS_DIR', convert=lambda v: v.rstrip('/'))
read_var('USERSPACE_PATCHES', convert=lambda v: [ptch.strip().lower() for ptch in v.split(',')])
read_var('KMSG_OUTPUT', convert=bool_converter)
read_var('KCORE_OUTPUT_SIZE', convert=int)
read_var('STATUS_CHANGE_GAP', convert=int)
read_var('USE_CONTENT_FILE', convert=bool_converter)
return result
def update_config_params(params):
config = get_config_settings()
params_for_update = {}
pattern = re.compile(r'^([^=]+)=([^=]*)$')
for param in params:
match = pattern.match(param)
if match:
key, value = match.groups()
if not value:
value = None
else:
raise SystemExit('Invalid parameter format: %s. Format should be KEY=VALUE' % param)
params_for_update[key] = value
# params with another targets in config, see get_config_settings()
# reversed mapping is used to get the parameter name as in config file, not as variable name
# python 2.6 does not support dict comprehension
config_targets_reverse_mapping = dict((v, k) for k, v in CONFIG_TARGETS_MAPPING.items())
# python 2.6 does not support set comprehension
possible_params = set([config_targets_reverse_mapping.get(k, k) for k in config.keys()])
config_keys_diff = set(params_for_update.keys()) - possible_params
if not config_keys_diff:
update_config(**params_for_update)
else:
raise SystemExit('Unknown parameter: %s' % ', '.join(config_keys_diff))
def update_sysctl():
if LOAD_KCARE_SYSCTL:
if not (os.path.isfile(SYSCTL_CONFIG) and os.access(SYSCTL_CONFIG, os.R_OK)):
kcarelog.warning('File {0} does not exist or has no read access'.format(SYSCTL_CONFIG))
return
code, _, _ = run_command(['/sbin/sysctl', '-q', '-p', SYSCTL_CONFIG], catch_stdout=True)
if code != 0:
kcarelog.warning('Unable to load kcare sysctl.conf: {0}'.format(code))
def is_cpanel():
return os.path.isfile('/usr/local/cpanel/cpanel')
def is_secure_boot(): # mocked: tests/unit/test_load_kmod.py
efivars_location = "/sys/firmware/efi/efivars/"
if not os.path.exists(efivars_location):
return False
for filename in os.listdir(efivars_location):
if filename.startswith('SecureBoot'):
varfile = os.path.join(efivars_location, filename)
# Get last byte
with open(varfile, 'rb') as vfd:
return vfd.read()[-1:] == b'\x01'
return False
def inside_vz_container(): # mocked: tests/unit/test_load_kmod.py
return os.path.exists('/proc/vz/veinfo') and not os.path.exists('/proc/vz/version')
def inside_lxc_container(): # mocked: tests/unit/test_load_kmod.py
return '/lxc/' in open('/proc/1/cgroup').read()
def inside_docker_container(): # mocked: tests/unit/test_load_kmod.py
return os.path.isfile('/.dockerenv')
def edit_sysctl_conf(remove, append):
"""Update SYSCTL_CONFIG accordingly the edits"""
# Create if it does not exist
if not os.path.isfile(SYSCTL_CONFIG):
open(SYSCTL_CONFIG, 'a').close()
# Check kcare sysctl path and read access
if not os.access(SYSCTL_CONFIG, os.R_OK):
kcarelog.warning('File {0} has no read access'.format(SYSCTL_CONFIG))
return
with open(SYSCTL_CONFIG, 'r+') as sysctl:
lines = sysctl.readlines()
sysctl.seek(0)
for line in lines:
# Do not rewrite lines that should be deleted
if not any(line.startswith(r) for r in remove):
sysctl.write(line)
# Write additional lines
for a in append:
sysctl.write(a + '\n')
sysctl.truncate()
def run_command(command, catch_stdout=False, catch_stderr=False, shell=False): # mocked: tests/unit/conftest.py
stdout = subprocess.PIPE if catch_stdout else None
stderr = subprocess.PIPE if catch_stderr else None
# We need to eventually keep shell=True as it might break customer's hooks, skip this bandit check.
p = subprocess.Popen(command, stdout=stdout, stderr=stderr, shell=shell) # nosec B602
stdout_captured, stderr_captured = p.communicate()
code = p.returncode
if stdout_captured is not None:
stdout_captured = nstr(stdout_captured)
if stderr is not None:
stderr_captured = nstr(stderr_captured)
logdebug(
textwrap.dedent(
"""
Call result for `{cmd}`:
exit code {exit_code}
=== STDOUT ===
{stdout}
=== STDERR ===
{stderr}
=== END ===
"""
).format(exit_code=p.returncode, stdout=stdout_captured, stderr=stderr_captured, cmd=' '.join(command))
)
return code, stdout_captured, stderr_captured
def check_output(args):
_, stdout, _ = run_command(args, catch_stdout=True)
return stdout
@cached
def get_loaded_modules():
try:
return [line.split()[0] for line in open('/proc/modules')]
except (OSError, IOError) as ex:
logerror('Error getting loaded modules list: ' + str(ex), print_msg=False)
return []
def detect_conflicting_modules(modules):
for module in modules:
if CONFLICTING_MODULES_RE.match(module):
raise KcareError("Detected '{0}' kernel module loaded. Please unload that module first".format(module))
def get_current_kmod_version():
kmod_version_file = '/sys/module/kcare/version'
if not os.path.exists(kmod_version_file):
return
with open(kmod_version_file, 'r') as f:
version = f.read().strip()
return version
def is_kmod_version_changed(khash, plevel):
old_version = get_current_kmod_version()
if not old_version:
return True
new_version = check_output(['/sbin/modinfo', '-F', 'version', get_cache_path(khash, plevel, KMOD_BIN)]).strip()
return old_version != new_version
def get_kcare_kmod_link():
return '/lib/modules/{0}/extra/kcare.ko'.format(platform.uname()[2])
def kmod_is_signed():
level = get_latest_patch_level(reason='info')
kmod_file = get_cache_path(KHASH, level, KMOD_BIN)
if not os.path.isfile(kmod_file):
return
with open(kmod_file, 'rb') as vfd:
return vfd.read()[-28:] == b'~Module signature appended~\n'
def load_kmod(kmod, **kwargs):
cmd = ['/sbin/insmod', kmod]
for key, value in kwargs.items():
cmd.append('{0}={1}'.format(key, value))
code, _, _ = run_command(cmd, catch_stdout=True)
if code != 0:
raise KcareError('Unable to load kmod ({0} {1}). Try to run with `--check-compatibility` flag.'.format(kmod, code))
def check_compatibility():
if is_secure_boot() and not kmod_is_signed():
raise KcareError('Secure boot is enabled. Not supported by KernelCare.')
if inside_vz_container() or inside_lxc_container() or inside_docker_container():
raise KcareError('You are running inside a container. Kernelcare should be executed on host side instead.')
def get_kmod_params(kcare_link):
stdout = check_output(["/sbin/modinfo", "-F", "parm", kcare_link])
available_params = []
for line in stdout.split('\n'):
if line.strip():
param_name, _, _ = line.partition(':')
available_params.append(param_name)
return available_params
def load_kcare_kmod(khash, level):
# To make `kdump` service work. We need to copy
# `kcare.ko` into `/lib/modules/$(uname -r)/extra/kcare.ko`
# and call `/sbin/depmod`
kcare_link = get_kcare_kmod_link()
kcare_file = get_cache_path(khash, level, KMOD_BIN)
try:
shutil.copy(kcare_file, kcare_link)
except Exception:
kcare_link = kcare_file
kmod_args = {
'kpatch_debug': 1 if KPATCH_DEBUG else 0,
'kmsg_output': 1 if KMSG_OUTPUT else 0,
'kcore_output': KCORE_OUTPUT_SIZE,
'kdumps_dir': KDUMPS_DIR if isinstance(KDUMPS_DIR, str) else "",
}
available_kmod_args = get_kmod_params(kcare_link)
kmod_args = dict((k, v) for k, v in kmod_args.items() if k in available_kmod_args)
if len(KDUMPS_DIR) and not os.path.exists(KDUMPS_DIR):
os.makedirs(KDUMPS_DIR)
load_kmod(kcare_link, **kmod_args)
code, _, stderr = run_command(['/sbin/depmod'], catch_stdout=True, catch_stderr=True)
if code:
# We don't wand to show the error to the user but want to see it in logs
logerror('Running of `depmod` failed with {0}: {1}'.format(code, stderr), print_msg=False)
def unload_kmod(modname):
code, _, _ = run_command(['/sbin/rmmod', modname], catch_stdout=True)
if code != 0:
raise KcareError('Unable to unload {0} kmod {1}'.format(modname, code))
def apply_fixups(khash, current_level, modules):
loaded = []
for mod in ['vmlinux'] + modules:
modpath = get_cache_path(khash, current_level, 'fixup_{0}.ko'.format(mod))
if os.path.exists(modpath):
load_kmod(modpath)
loaded.append('fixup_{0}'.format(mod))
return loaded
def remove_fixups(fixups):
for mod in fixups:
try:
unload_kmod(mod)
except Exception:
kcarelog.exception('Exception while unloading module %s.' % mod)
def get_freezer_style(freezer, modules):
if freezer:
method = freezer
elif PATCH_METHOD:
method = PATCH_METHOD
elif get_freezer_blacklist().intersection(modules):
# blacklist module found, use smart freezer
# xxx: this branch could be safely removed when smart would work by default
return 'freeze_conflict', freezer, PATCH_METHOD, True
else:
# user doesn't provide patch method and no conflicting modules loaded
return 'default', freezer, PATCH_METHOD, False
# non default patch method, translate it into form accepted by kpatch_ctl
patch_method_map = {
'NONE': 'freeze_none',
'NOFREEZE': 'freeze_none',
'FULL': 'freeze_all',
'FREEZE': 'freeze_all',
'SMART': 'freeze_conflict',
}
method = method.upper()
if method in patch_method_map:
method = patch_method_map[method]
else:
raise KcareError('Unable to detect freezer style ({0}, {1}, {2}, {3})'.format(method, freezer, PATCH_METHOD, False))
return method, freezer, PATCH_METHOD, False
def kcare_load(khash, level, mode, freezer='', use_anchor=False):
state_data = {'khash': khash, 'future': level, 'mode': mode}
register_action('start', state_data)
current_level = loaded_patch_level()
modules = get_loaded_modules()
detect_conflicting_modules(modules)
# get freezer in the beginning to prevent any further job in case of exception
freezer_style = get_freezer_style(freezer, modules)
patch_file = get_cache_path(khash, level, PATCH_BIN)
save_cache_latest(khash, level)
description = '{0}-{1}:{2};{3}'.format(
level, PATCH_TYPE, _timestmap_str(), parse_uname(level) # future server_info['ltimestamp']
)
kmod_loaded = 'kcare' in modules
kmod_changed = kmod_loaded and is_kmod_version_changed(khash, level)
patch_loaded = current_level is not None
same_patch = patch_loaded and is_same_patch(patch_file) and kcare_update_effective_version(description)
state_data.update({'current': current_level, 'kmod_changed': kmod_changed})
if same_patch:
register_action('done', state_data)
return
if patch_loaded:
register_action('fxp', state_data)
fixups = apply_fixups(khash, current_level, modules)
register_action('unpatch', state_data)
kpatch_ctl_unpatch(freezer_style)
register_action('unfxp', state_data)
remove_fixups(fixups)
if kmod_changed:
register_action('unload', state_data)
unload_kmod('kcare')
kmod_loaded = False
if not kmod_loaded:
register_action('load', state_data)
load_kcare_kmod(khash, level)
if use_anchor: # KCARE-509
touch_anchor()
register_action('patch', state_data)
kpatch_ctl_patch(patch_file, khash, level, description, freezer_style)
update_sysctl()
loginfo('Patch level {0} applied. Effective kernel version {1}'.format(level, kcare_uname()))
# Update last status check timestamp
touch_status_gap_file()
# do final actions when update is considered as successful
register_action('wait', state_data)
nohup_fork(lambda: commit_update(state_data), sleep=SUCCESS_TIMEOUT)
def kpatch_ctl_patch(patch_file, khash, level, description, freezer_style):
args = [KPATCH_CTL]
blacklist_file = get_cache_path(khash, level, BLACKLIST_FILE)
if os.path.exists(blacklist_file):
args.extend(['-b', blacklist_file])
args.extend(['patch', '-d', description])
args.extend(['-m', freezer_style[0]])
args.append(patch_file)
code, _, _ = run_command(args, catch_stdout=True)
if code != 0:
raise ApplyPatchError(code, freezer_style, level, patch_file)
def kpatch_ctl_unpatch(freezer_style):
code, _, _ = run_command([KPATCH_CTL, 'unpatch', '-m', freezer_style[0]], catch_stdout=True)
if code != 0:
raise KcareError('Error unpatching [{0}] {1}'.format(code, str(freezer_style)))
def register_action(action, state_data):
state_data['action'] = action
state_data['ts'] = int(time.time())
atomic_write(os.path.join(PATCH_CACHE, 'kcare.state'), str(state_data))
def kcare_unload(freezer='', force=False):
current_level = loaded_patch_level()
pf = PatchFetcher()
try:
pf.fetch_fixups(current_level)
except Exception as err:
if not force:
raise KcareError(
"Unable to retrieve fixups: '{0}'. The unloading of patches has been "
"interrupted. To proceed without fixups, use the --force flag.".format(err)
)
modules = get_loaded_modules()
freezer_style = get_freezer_style(freezer, modules)
with execute_hooks():
if 'kcare' in modules:
need_unpatch = current_level is not None
if need_unpatch:
fixups = apply_fixups(KHASH, current_level, modules)
code, _, _ = run_command([KPATCH_CTL, 'unpatch', '-m', freezer_style[0]], catch_stdout=True)
remove_fixups(fixups)
if code != 0:
raise KcareError('Error unpatching [{0}] {1}'.format(code, str(freezer_style)))
# Unload kcare module and retry once after 10 seconds if failed
# Kernel module could be loaded even if patch is not applyed
retry(check_exc(KcareError), count=1, delay=UNLOAD_RETRY_DELAY)(unload_kmod)('kcare')
kmod_link = get_kcare_kmod_link()
if os.path.isfile(kmod_link):
os.unlink(kmod_link)
def kcare_info(is_json):
pli = _patch_level_info()
if is_json:
return _kcare_info_json(pli)
else:
if pli.code != 0:
return pli.msg
if pli.applied_lvl is not None:
return _patch_info()
def _kcare_info_json(pli):
result = {'message': pli.msg}
if pli.applied_lvl is not None:
result.update(data_as_dict(_patch_info()))
result.update(parse_patch_description(result.get('kpatch-description')))
result['kpatch-state'] = pli.state
return json.dumps(result)
def _patch_info():
return check_output([KPATCH_CTL, 'info'])
def data_as_dict(data):
result = {}
data = data.splitlines()
for line in data:
if line:
key, delimiter, value = line.partition(':')
if delimiter:
result[key] = value.strip()
return result
def get_patch_value(info, label):
return data_as_dict(info).get(label)
def loaded_patch_description():
if 'kcare' not in get_loaded_modules():
return None
# example: 28-:1532349972;4.4.0-128.154
# (patch level: number)-(patch type: free/extra/empty):(timestamp);(effective kernel version from kpatch.info)
return get_patch_value(_patch_info(), 'kpatch-description')
def parse_patch_description(desc):
result = {'patch-level': None, 'patch-type': 'default', 'last-update': '', 'kernel-version': ''}
if not desc:
return result
level_type_timestamp, _, kernel = desc.partition(';')
level_type, _, timestamp = level_type_timestamp.partition(':')
patch_level, _, patch_type = level_type.partition('-')
# need to return patch_level=None not to break old code
# TODO: refactor all loaded_patch_level() usages to work with empty string instead of None
result['patch-level'] = patch_level or None
result['patch-type'] = patch_type or 'default'
result['last-update'] = timestamp
result['kernel-version'] = kernel
return result
def loaded_patch_level(): # mocked: tests/unit
pl = parse_patch_description(loaded_patch_description())['patch-level']
if pl:
try:
int(pl)
except ValueError as e:
raise SafeExceptionWrapper(e, 'Unexpected patch state', _patch_info())
return LegacyKernelPatchLevel(KHASH, pl)
class PLI:
PATCH_LATEST = 0
PATCH_NEED_UPDATE = 1
PATCH_UNAVALIABLE = 2
PATCH_NOT_NEEDED = 3
def __init__(self, code, msg, remote_lvl, applied_lvl, state):
self.code = code
self.msg = msg
self.remote_lvl = remote_lvl
self.applied_lvl = applied_lvl
self.state = state
def _patch_level_info():
current_patch_level = loaded_patch_level()
try:
# this line raises UnknownKernel from the bottom of this try
new_patch_level = get_latest_patch_level(reason='info')
if current_patch_level:
if kcare_need_update(current_patch_level, new_patch_level):
code, msg, state = (
PLI.PATCH_NEED_UPDATE,
"Update available, run 'kcarectl --update'.",
'applied',
)
else:
code, msg, state = (
PLI.PATCH_LATEST,
'The latest patch is applied.',
'applied',
)
else:
# no patch applied
if new_patch_level == 0:
code, msg, state = (
PLI.PATCH_NOT_NEEDED,
"This kernel doesn't require any patches.",
'unset',
)
else:
code, msg, state = (
PLI.PATCH_NEED_UPDATE,
"No patches applied, but some are available, run 'kcarectl --update'.",
'unset',
)
info = PLI(code, msg, new_patch_level, current_patch_level, state)
except UnknownKernelException:
code = PLI.PATCH_UNAVALIABLE
if STICKY:
msg = (
'Invalid sticky patch tag {0} for kernel ({1} {2}). '
'Please check /etc/sysconfig/kcare/kcare.conf '
'STICKY_PATCH settings'.format(STICKY, get_distro()[0], platform.release())
)
else:
msg = 'New kernel detected ({0} {1} {2}).\nThere are no updates for this kernel yet.'.format(
get_distro()[0], platform.release(), get_kernel_hash()
)
info = PLI(code, msg, None, None, 'unavailable')
return info
def check_gpg_bin():
if not os.path.isfile(GPG_BIN):
raise KcareError('No {0} present. Please install gnupg'.format(GPG_BIN))
@cached
def find_cmd(name, *path):
for it in path:
fname = os.path.join(it, name)
if os.path.isfile(fname):
return fname
raise Exception('{0} could not be found at {1}'.format(name, path))
def rm_serverid():
os.unlink(SYSTEMID)
def set_server_id(server_id):
atomic_write(SYSTEMID, 'server_id={0}\n'.format(server_id))
def set_auth_token(auth_token):
if not auth_token:
return
atomic_write(AUTH_TOKEN_DUMP_PATH, auth_token)
def unregister(silent=False):
url = None
try:
server_id = get_serverid()
if server_id is None:
if not silent:
logerror('Error unregistering server: cannot find server id')
return
url = REGISTRATION_API_URL + '/unregister_server.plain?server_id={0}'.format(server_id)
response = urlopen(url)
content = nstr(response.read())
res = data_as_dict(content)
if res['success'] == 'true':
rm_serverid()
if not silent:
loginfo('Server was unregistered')
elif not silent:
logerror(content)
logerror('Error unregistering server: ' + res['message'])
except HTTPError as e:
if not silent:
print_cln_http_error(e, url)
def register_retry(url): # pragma: no cover unit
print('Register auto-retry has been enabled, the system can be registered later')
pid = os.fork()
if pid > 0:
return
os.setsid()
pid = os.fork()
import sys
if pid > 0:
sys.exit(0)
sys.stdout.flush()
si = open('/dev/null', 'r')
so = open('/dev/null', 'a+')
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(so.fileno(), sys.stderr.fileno())
while True:
time.sleep(60 * 60 * 2)
code, server_id, auth_token = try_register(url)
if code == 0 and server_id:
set_server_id(server_id)
set_auth_token(auth_token)
sys.exit(0)
def tag_server(tag):
"""
Request to tag server from ePortal. See KCARE-947 for more info
:param tag: String used to tag the server
:return: 0 on success, -1 on wrong server id, other values otherwise
"""
url = None
try:
# TODO: is it ok to send request in case when no server_id found? (machine is not registered in ePortal)
server_id = get_serverid()
query = urlencode([('server_id', server_id), ('tag', tag)])
url = REGISTRATION_API_URL + '/tag_server.plain?{0}'.format(query)
response = urlopen(url)
res = data_as_dict(nstr(response.read()))
return int(res['code'])
except HTTPError as e:
print_cln_http_error(e, url)
return -3
except URLError as ue:
print_cln_http_error(ue, url)
return -4
except Exception as ee:
logerror('Internal Error {0}'.format(ee))
return -5
def try_register(url):
try:
response = urlopen(url)
auth_token = response.headers.get(AUTH_TOKEN_HEADER, None)
res = data_as_dict(nstr(response.read()))
return int(res['code']), res.get('server_id'), auth_token
except (HTTPError, URLError) as e:
print_cln_http_error(e, url)
return None, None, None
except Exception:
kcarelog.exception('Exception while trying to register URL %s' % url)
return None, None, None
@cached
def get_hostname():
# KCARE-1165 If fqdn gathering is forced
if REPORT_FQDN:
try:
# getaddrinfo() -> [(family, socktypeget_hostname, proto, canonname, sockaddr), ...]
hostname = socket.getaddrinfo(socket.gethostname(), 0, 0, 0, 0, socket.AI_CANONNAME)[0][3]
except socket.gaierror as ge:
logerror(ge)
hostname = platform.node()
else:
hostname = platform.node()
return hostname
def register(key, retry=False):
try:
unregister(True)
except Exception:
kcarelog.exception('Exception while trying to unregister URL before register.')
hostname = get_hostname()
query = urlencode([('hostname', hostname), ('key', key)])
url = '{0}/register_server.plain?{1}'.format(REGISTRATION_API_URL, query)
code, server_id, auth_token = try_register(url)
if code == 0:
set_server_id(server_id)
set_auth_token(auth_token)
loginfo('Server Registered')
return 0
elif code == 1:
logerror('Account Locked')
elif code == 2:
logerror('Invalid Key')
elif code == 3:
logerror(
'You have reached maximum registered servers for this key. '
'Please go to your CLN account, remove unused servers and try again.'
)
elif code == 4:
logerror('IP is not allowed. Please change allowed IP ranges for the key in KernelCare Key tab in CLN')
elif code == 5:
logerror('This IP was already used for trial, you cannot use it for trial again')
elif code == 6:
logerror('This IP was banned. Please contact support for more information at https://www.kernelcare.com/support/')
else:
logerror('Unknown Error {0}'.format(code))
if retry: # pragma: no cover
register_retry(url)
return 0
return code or -1
def kcdoctor():
doctor_url = get_patch_server_url("doctor.sh")
logdebug("Requesting doctor script from `{0}`".format(doctor_url))
doctor_filename = KCDOCTOR
with tempfile.NamedTemporaryFile() as doctor_dst:
try:
signature = fetch_signature(doctor_url, doctor_dst.name)
save_to_file(urlopen(doctor_url), doctor_dst.name)
check_gpg_signature(doctor_dst.name, signature)
doctor_filename = doctor_dst.name
except Exception as err:
logerror('Kcare doctor error: {0}. Fallback to the local one.'.format(err))
code, _, stderr = run_command(['bash', doctor_filename, PATCH_SERVER], catch_stderr=True)
if code:
raise KcareError("Script failed with '{0}' {1}".format(stderr, code))
def _systemid():
if not os.path.exists(SYSTEMID):
return None
with open(SYSTEMID, 'r') as fd:
for line in fd:
param, _, value = line.partition('=')
if param.strip() == 'server_id':
return value.strip()
raise KcareError('Unable to parse {0}.'.format(SYSTEMID))
def _alma_systemid():
if not os.path.exists(ALMA_SYSTEMID):
return None
with open(ALMA_SYSTEMID, 'r') as f:
return f.readline().strip()
def _im360_systemid():
if not os.path.exists(IM360_LICENSE_FILE):
return None
data = {}
with open(IM360_LICENSE_FILE) as f:
content = f.read()
if content:
try:
data = json_loads_nstr(content)
except Exception:
pass # we are not interested why lic file can't be parsed
return data.get('id')
@cached
def get_serverid():
"""Get server_id or None if not present.
Lookup order: SYSTEMID then IM360_LICENSE_FILE then ALMA_SYSTEMID
"""
return _systemid() or _im360_systemid() or _alma_systemid()
def _try_to_read(filename):
if not os.path.exists(filename):
return
with open(filename) as f:
return f.read().strip()
@cached
def get_auth_token():
return _try_to_read(AUTH_TOKEN_DUMP_PATH)
@cached
def get_cache_key():
return _try_to_read(CACHE_KEY_DUMP_PATH)
KHASH = get_kernel_hash()
def get_kernel_prefixed_url(*parts):
return get_patch_server_url(TEST_PREFIX, *parts)
def get_patch_server_url(*parts): # type: (*str) -> str
return '/'.join(it.strip('/') for it in filter(None, (PATCH_SERVER,) + parts))
def check_new_kc_version():
url = get_patch_server_url('{0}-new-version'.format(EFFECTIVE_LATEST))
try:
urlopen(url)
except URLError:
return False
loginfo('A new version of the KernelCare package is available. To continue to get kernel updates, please install the new version')
return True
# mocked: tests/unit/test_patch_level_info.py
def get_latest_patch_level(reason, policy=POLICY_REMOTE, mode=UPDATE_MODE_MANUAL):
"""
Get patch level to apply.
:param reason: what was the source of request (update, info etc.)
:param policy: REMOTE -- get latest patch_level from patchserver,
LOCAL -- use cached latest,
LOCAL_FIRST -- if cached level is None get latest from patchserver, use cache otherwise
:return: patch_level string
"""
cached_level = get_cache_latest(KHASH)
consider_remote_ex = policy == POLICY_REMOTE or (policy == POLICY_LOCAL_FIRST and cached_level is None)
try:
remote_level = fetch_patch_level(reason, mode)
except Exception as ex:
if consider_remote_ex:
raise
else:
kcarelog.warning('Unable to send data: {0}'.format(ex))
if policy == POLICY_REMOTE:
level = remote_level
else:
level = cached_level
if cached_level is None:
if policy == POLICY_LOCAL:
level = LegacyKernelPatchLevel(KHASH, 0)
elif policy == POLICY_LOCAL_FIRST:
level = remote_level
else:
raise KcareError('Unknown policy, choose one of: REMOTE, LOCAL, LOCAL_FIRST')
return level
def update_patch_type(ptype):
global PATCH_TYPE
if ptype == 'edf':
# The only way user can get here if call kcarectl --set-patch-type
# we don't support this anyway and can silently ignore
return
if ptype != 'default':
PATCH_TYPE = ptype
else:
PATCH_TYPE = ''
if probe_patch(fetch_patch_level(reason='probe'), PATCH_TYPE):
update_config(PATCH_TYPE=PATCH_TYPE)
if PATCH_TYPE in ('free', 'extra') and is_cpanel():
gid = FORCE_GID or CPANEL_GID
edit_sysctl_conf(
('fs.enforce_symlinksifowner', 'fs.symlinkown_gid'),
('fs.enforce_symlinksifowner=1', 'fs.symlinkown_gid={0}'.format(gid)),
)
loginfo("'{0}' patch type selected".format(ptype))
else:
raise KcareError("'{0}' patch type is unavailable for your kernel".format(ptype))
def do_update(freezer, mode, policy=POLICY_REMOTE):
"""
:param mode: UPDATE_MODE_MANUAL, UPDATE_MODE_AUTO or UPDATE_MODE_SMART
:param policy: REMOTE -- download latest and patches from patchserver,
LOCAL -- use cached files,
LOCAL_FIRST -- download latest and patches if cached level is None, use cache in other cases
:param freezer: freezer mode
"""
if policy == POLICY_REMOTE:
check_new_kc_version()
try:
level = get_latest_patch_level(reason='update', policy=policy, mode=mode)
except UnknownKernelException as e:
if mode in (UPDATE_MODE_AUTO, UPDATE_MODE_SMART) and IGNORE_UNKNOWN_KERNEL:
msg = str(e)
kcarelog.warning(msg)
return
raise
current_level = loaded_patch_level()
pf = PatchFetcher(level)
pf.fetch_patch()
if not kcare_need_update(applied_level=current_level, new_level=level):
loginfo('No updates are needed for this kernel')
return
# Rotate crash report dumps
try:
clean_directory(KDUMPS_DIR, keep_n=3, pattern="kcore*.dump")
clean_directory(KDUMPS_DIR, keep_n=3, pattern="kmsg*.log")
except Exception:
kcarelog.exception('Error during crash reporter cleanup')
# take into account AUTO_UPDATE config setting in case of `--auto-update` cli option
if mode != UPDATE_MODE_AUTO or AUTO_UPDATE:
with execute_hooks():
pf.fetch_fixups(current_level)
kcare_load(KHASH, level, mode, freezer, use_anchor=mode == UPDATE_MODE_SMART)
clear_cache(KHASH, level)
"""
This is needed to support sticky keys as per
https://cloudlinux.atlassian.net/browse/KCARE-953
"""
STICKY = False
UPDATE_DELAY = None
AUTO_UPDATE_DELAY = None
STICKY_PATCHSET = None
AUTO_STICKY_PATCHSET = None
def get_sticky(mode):
count = sum((bool(STICKY), bool(UPDATE_DELAY or AUTO_UPDATE_DELAY), bool(STICKY_PATCHSET or AUTO_STICKY_PATCHSET)))
if count > 1:
raise KcareError(
'Invalid configuration: conflicting settings STICKY_PATCH,'
' [AUTO_]UPDATE_DELAY or [AUTO_]STICKY_PATCHSET. There should be only one of them'
)
if STICKY:
return STICKY
if mode != UPDATE_MODE_MANUAL:
delay = AUTO_UPDATE_DELAY or UPDATE_DELAY
patchset = AUTO_STICKY_PATCHSET or STICKY_PATCHSET
else:
delay = UPDATE_DELAY
patchset = STICKY_PATCHSET
if delay:
return delay
if patchset:
return 'release-' + patchset
def _stickyfy(prefix, fname):
return prefix + '.' + fname
def stickyfy(file, mode):
"""
Used to add sticky prefix to satisfy KCARE-953
:param file: name of the file to stickify
:return: stickified file.
"""
s = get_sticky(mode)
if not s:
return file
if s != 'KEY':
return _stickyfy(s, file)
server_id = get_serverid()
if not server_id:
kcarelog.info('Patch set to STICKY=KEY, but server is not registered with the key')
sys.exit(-4)
try:
response = urlopen(REGISTRATION_API_URL + '/sticky_patch.plain?server_id={0}'.format(server_id))
except HTTPError as e:
print_cln_http_error(e, e.url)
sys.exit(-5)
res = data_as_dict(nstr(response.read()))
code = int(res['code'])
if code == 0:
return _stickyfy(res['prefix'], file)
elif code == 1:
return file
elif code == 2:
kcarelog.info('Server ID is not recognized. Please check if the server is registered')
sys.exit(-1)
kcarelog.info('Error: ' + res['message'])
sys.exit(-3)
def get_kcare_handler(level):
kcare_formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')
if os.getuid() == 0:
kcare_handler = logging.handlers.RotatingFileHandler(LOG_FILE, maxBytes=1024**2, backupCount=2) # type: logging.Handler
# We need at least INFO level logs at all times
kcare_handler.setLevel(min(level, logging.INFO))
kcare_handler.setFormatter(kcare_formatter)
return kcare_handler
else:
kcare_handler = logging.StreamHandler()
kcare_handler.setLevel(level)
kcare_handler.setFormatter(kcare_formatter)
return kcare_handler
def get_syslog_handler():
syslog_formatter = logging.Formatter('kcare %(levelname)s: %(message)s')
syslog_handler = logging.handlers.SysLogHandler(address='/dev/log', facility=logging.handlers.SysLogHandler.LOG_USER)
syslog_handler.setLevel(logging.INFO)
syslog_handler.setFormatter(syslog_formatter)
return syslog_handler
def initialize_logging(level):
kcarelog.handlers[:] = []
try:
kcare_handler = get_kcare_handler(level)
kcarelog.addHandler(kcare_handler)
except Exception as ex:
kcarelog.exception(ex)
if os.path.exists('/dev/log'):
try:
syslog_handler = get_syslog_handler()
kcarelog.addHandler(syslog_handler)
except Exception as ex:
kcarelog.exception(ex)
#################################
# from python 2.7.17 ssl stdlib #
#################################
def _dnsname_match(dn, hostname, max_wildcards=1): # pragma: no cover
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
pieces = dn.split(r'.')
leftmost = pieces[0]
remainder = pieces[1:]
wildcards = leftmost.count('*')
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survery of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError('too many wildcards in certificate DNS name: ' + repr(dn))
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
return pat.match(hostname)
# match_hostname tweaked to get dns names from pyopenssl x509 cert object
def match_hostname(cert, hostname): # pragma: no cover
san = []
for i in range(cert.get_extension_count()):
e = cert.get_extension(i)
if e.get_short_name() == 'subjectAltName':
san = [it.strip().split(':', 1) for it in str(e).split(',')]
if not cert:
raise ValueError(
'empty or no certificate, match_hostname needs a '
'SSL socket or SSL context with either '
'CERT_OPTIONAL or CERT_REQUIRED'
)
dnsnames = []
for key, value in san:
if key == 'DNS':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
cn = cert.get_subject().commonName
if _dnsname_match(cn, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname {0} doesn't match either of {1}".format(hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname {0} doesn't match {1}".format(hostname, dnsnames[0]))
else:
raise CertificateError('no appropriate commonName or subjectAltName fields were found')
#####################
# end of ssl stdlib #
#####################
# --- {{{ libcare ---
def get_userspace_cache_path(libname, *parts):
return os.path.join(PATCH_CACHE, 'userspace', libname, *parts)
LIBNAME_MAP = {'mysqld': 'db', 'mariadbd': 'db', 'postgres': 'db', 'qemu-kvm': 'qemu', 'qemu-system-x86_64': 'qemu'}
USERSPACE_MAP = {
'db': ['mysqld', 'mariadbd', 'postgres'],
'qemu': ['qemu-kvm', 'qemu-system-x86_64'],
'libs': ['libc', 'libssl'],
}
def fetch_userspace_patch(libname, build_id, patch_level=None):
prefix = TEST_PREFIX or 'main'
libname = urlquote(libname)
url = get_patch_server_url(LIBNAME_MAP.get(libname, 'u'), prefix, libname, build_id, 'latest.v1')
url += '?info=' + encode_server_lib_info(server_lib_info('update', patch_level))
cache_dst = LIBNAME_MAP.get(libname, 'libs')
try:
response = wrap_with_cache_key(urlopen_auth)(url, check_license=False)
except NotFound:
# There is no latest info, so we need to clear cache for corresponding
# build_id to prevent updates by "-ctl" utility.
shutil.rmtree(get_userspace_cache_path(cache_dst, build_id), ignore_errors=True)
raise
set_config_from_patchserver(response.headers)
meta = json_loads_nstr(nstr(response.read()))
level = UserspacePatchLevel(cache_dst, build_id, meta['level'], meta.get('baseurl'))
plevel = str(meta['level'])
patch_path = get_userspace_cache_path(cache_dst, build_id, plevel, 'patch.tar.gz')
if not os.path.exists(patch_path):
url = get_patch_server_url(meta['patch_url'])
try:
fetch_url(url, patch_path, check_signature=USE_SIGNATURE, hash_checker=get_hash_checker(level))
except HTTPError as ex:
# No license - no access
if ex.code in (403, 401):
raise NoLibcareLicenseException('KC+ licence is required')
raise
dst = get_userspace_cache_path(cache_dst, build_id, plevel)
cmd = ['tar', 'xf', patch_path, '-C', dst, '--no-same-owner']
code, stdout, stderr = run_command(cmd, catch_stdout=True, catch_stderr=True)
if code:
raise KcareError("Patches unpacking error: '{0}' '{1}' {2}".format(stderr, stdout, code))
link_name = get_userspace_cache_path(cache_dst, build_id, 'latest')
if not os.path.islink(link_name) and os.path.isdir(link_name):
shutil.rmtree(link_name)
os.symlink(plevel, link_name + '.tmp')
os.rename(link_name + '.tmp', link_name)
def set_libcare_status(enabled):
global LIBCARE_DISABLED
LIBCARE_DISABLED = not enabled
if not enabled:
libcare_server_stop()
update_config(LIBCARE_DISABLED=('FALSE' if enabled else 'YES'))
if enabled:
libcare_server_start()
kcarelog.info('libcare service is ' + ('enabled' if enabled else 'disabled'))
def _libcare_info(patched=True, limit=None):
regexp = '|'.join("({0})".format(proc) for proc in sorted(limit or []))
cmd = ['info', '-j']
if not patched:
cmd += ['-l', '-r', regexp]
try:
lines = libcare_client(*cmd)
except Exception as err:
raise KcareError("Gathering userspace libraries info error: '{0}'".format(err))
result = []
for line in lines.split('\n'):
if line:
try:
result.append(json.loads(line))
except ValueError:
# We have to do that because socket's output isn't separated to stderr and stdout
# so there are chances that will be non-json lines
pass
# FIXME: remove that libe when library names will be separated to lower
# level from process name and pid
result = [{'comm': line.pop('comm'), 'pid': line.pop('pid'), 'libs': line} for line in result]
for line in result:
line['libs'] = dict((k, v) for k, v in line['libs'].items() if ('patchlvl' in v or not patched))
return result
def _get_patches_info(info):
patches = set()
for rec in info:
for _, data in rec['libs'].items():
patches.add((data['buildid'], data['patchlvl']))
result = []
for cache_dst in USERSPACE_MAP:
for build_id, patchlvl in patches:
patch_info_filename = get_userspace_cache_path(cache_dst, build_id, str(patchlvl), 'info.json')
if os.path.isfile(patch_info_filename):
with open(patch_info_filename, 'r') as fd:
result.append(json.load(fd))
return result
def _libcare_patch_info():
return _get_patches_info(_libcare_info())
@clear_libcare_cache
def libcare_patch_info():
result = _libcare_patch_info()
if not result:
logerror("No patched processes.")
return json.dumps({'result': result})
@clear_libcare_cache
def libcare_info():
result = _libcare_info()
if not result:
logerror("No patched processes.")
return json.dumps({'result': result})
def _libcare_version():
result = {}
for rec in _libcare_patch_info():
result[rec.get('package')] = rec.get('latest-version', '')
return result
def libcare_version(libname):
for package, version in _libcare_version().items():
if libname.startswith(package):
return version
return ''
def libcare_client_format(params):
return b''.join(bstr(p) + b'\0' for p in params) + b'\0'
def get_available_libcare_socket():
for libcare_socket in LIBCARE_SOCKET:
if os.path.exists(libcare_socket):
return libcare_socket
raise KcareError("Libcare socket is not found.")
def libcare_client(*params):
if LIBCARE_DISABLED:
raise KcareError('Libcare is disabled.')
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0)
sock.settimeout(LIBCARE_SOCKET_TIMEOUT)
res = b''
try:
sock.connect(get_available_libcare_socket())
cmd = libcare_client_format(params)
logdebug("Libcare socket send: {cmd}".format(cmd=params))
sock.sendall(cmd)
while True:
data = sock.recv(4096)
if not data:
break
res += data
result = res.decode('utf-8', 'replace')
logdebug("Libcare socket recieved: {result}".format(result=result))
return result
finally:
sock.close()
def libcare_patch_apply(limit):
for dst in limit:
try:
libcare_client('storage', get_userspace_cache_path(dst))
except Exception as err:
raise KcareError("Userspace storage switching error: '{0}'".format(err))
try:
libcare_client('update')
except Exception as err:
raise KcareError("Userspace patch applying error: '{0}'".format(err))
def refresh_applied_patches_list(clbl):
def save_current_state(info):
'''KPT-1543 Save info about applyed patches'''
versions, cves = '', ''
try:
if info is None:
info = _libcare_info()
packages = {}
cves_list = []
for rec in _get_patches_info(info):
packages[rec.get('package')] = rec.get('latest-version', '')
for patch in rec.get('patches', []):
cves_list.append(patch.get('cve'))
versions = '\n'.join([' '.join(rec) for rec in packages.items()])
cves = '\n'.join(cves_list)
finally:
atomic_write(LIBCARE_PATCHES, versions, ensure_dir=True)
atomic_write(LIBCARE_CVE_LIST, cves, ensure_dir=True)
def wrapper(*args, **kwargs):
info = None
try:
info = clbl(*args, **kwargs)
return info
finally:
save_current_state(info)
return wrapper
@clear_libcare_cache
@refresh_applied_patches_list
def libcare_unload():
try:
libcare_client('unload')
except Exception as err:
raise KcareError("Userspace patch unloading error: '{0}'".format(err))
@cached
def is_selinux_enabled():
if os.path.isfile('/usr/sbin/selinuxenabled'):
code, _, _ = run_command(['/usr/sbin/selinuxenabled'])
else:
return False
return code == 0
@cached
def kdump_status():
if SKIP_SYSTEMCTL_CHECK or os.path.isfile(SYSTEMCTL):
_, stdout, _ = run_command([SYSTEMCTL, 'is-active', 'kdump'], catch_stdout=True, catch_stderr=True)
return stdout.strip()
return 'systemd-absent'
@cached
def kdumps_latest_event_timestamp():
kdump_path = "/var/crash"
result = None
if os.path.isfile("/etc/kdump.conf"):
with open("/etc/kdump.conf", 'r') as kdump_conf:
for line in kdump_conf:
line = line.strip()
if line.startswith('path '):
_, kdump_path = line.split(None, 1)
if os.path.isdir(kdump_path):
vmcore_list = glob.glob(os.path.join(kdump_path, '*/vmcore'))
if vmcore_list:
result = max(os.path.getctime(it) for it in vmcore_list)
return result
def is_selinux_module_present(semodule_name):
code, out, err = run_command(['/usr/sbin/semodule', '-l'], catch_stdout=True)
if code:
raise KcareError("SELinux modules list gathering error: '{0}' {1}".format(err, code))
for line in out.split('\n'):
if semodule_name in line:
return True
return False
def skip_if_no_selinux_module(clbl):
def wrapper(*args, **kwargs):
if is_selinux_enabled() and not is_selinux_module_present('libcare'):
raise KcareError('SELinux is enabled and kernelcare-selinux is not installed.')
return clbl(*args, **kwargs)
return wrapper
def _get_userspace_procs(info):
result = {} # type: Dict[str, List[Tuple[int, str]]]
for item in info:
for libname, rec in item['libs'].items():
if rec.get('patchlvl'):
if libname not in result:
result[libname] = []
result[libname].append((item['pid'], item['comm']))
return result
def _get_userspace_libs(info):
result = set()
for item in info:
for libname, rec in item['libs'].items():
result.add((libname, rec['buildid'], rec.get('patchlvl', 0)))
return result
def check_userspace_updates(limit=None):
if not limit:
limit = []
[limit.extend(libs) for libs in USERSPACE_MAP.values()]
data_before = _libcare_info(patched=False, limit=limit)
before = _get_userspace_procs(data_before)
failed = something_found = False
libs_not_patched = True
for rec in _get_userspace_libs(data_before):
# Download and unpack patches
libname, build_id, patchlvl = rec
try:
fetch_userspace_patch(libname, build_id, patchlvl)
something_found = True
if patchlvl != 0:
libs_not_patched = False
except (NotFound, NoLibcareLicenseException):
pass
except AlreadyTrialedException:
raise
except KcareError as ex:
failed = True
logerror(str(ex))
touch_status_gap_file(filename='.libcarestatus')
return failed, something_found, libs_not_patched, before
@skip_if_no_selinux_module
@clear_libcare_cache
@refresh_applied_patches_list
def do_userspace_update(mode=UPDATE_MODE_MANUAL, limit=None):
"""Patch userspace processes to the latest version."""
# Auto-update means cron-initiated run and if no
# LIB_AUTO_UPDATE flag in the config - nothing will happen.
if mode == UPDATE_MODE_AUTO and not LIB_AUTO_UPDATE:
return None
if limit is None:
limit = list(USERSPACE_MAP.keys())
process_filter = []
for userspace_patch in limit:
process_filter.extend(USERSPACE_MAP.get(userspace_patch, []))
if not process_filter:
# Unknown limits were defined. Do nothing
loginfo('No such userspace patches: {0}'.format(limit))
return None
failed, something_found, _, before = check_userspace_updates(limit=process_filter)
if failed:
raise KcareError('There was an errors while patches downloading (unpacking).')
if not something_found:
loginfo('No patches were found.')
return None
restore_selinux_context(os.path.join(PATCH_CACHE, 'userspace'))
try:
# Batch apply for all collected patches
libcare_patch_apply(limit)
# TODO: clear userspace cache. We need the same logic as for kernel, lets do
# it later to reduce this patch size.
except KcareError as ex:
logerror(str(ex))
raise KcareError('There was an errors while patches applying.')
data_after = _libcare_info()
after = _get_userspace_procs(data_after)
if not any(list(item['libs'] for item in data_after)):
# No patches were applyed
return None
# Info on how many patches were actually patched via before and after diff
logdebug("Patched before: {before}".format(before=before))
logdebug("Patched after: {after}".format(after=after))
uniq_procs_after = set(v for items in after.values() for v in items)
uniq_procs_before = set(v for items in before.values() for v in items)
diff = uniq_procs_after - uniq_procs_before
overall = sum(len(v) for v in after.values())
loginfo(
"The patches have been successfully applied to {count} newly "
"discovered processes. The overall amount of applied patches "
"is {overall}.".format(count=len(diff), overall=overall)
)
for k, v in after.items():
loginfo("Object `{0}` is patched for {1} processes.".format(k, len(v)))
return data_after
def libcare_server_started():
"""Assume that whenever the service is not running, we did not patch anything."""
try:
cmd = [find_cmd('service', '/usr/sbin/', '/sbin/'), 'libcare', 'status']
except Exception: # pragma: no cover unit
return False
code, _, _ = run_command(cmd, catch_stdout=True, catch_stderr=True)
return code == 0
def libcare_server_stop():
try:
cmd = [find_cmd('service', '/usr/sbin/', '/sbin/'), 'libcare', 'stop']
except Exception: # pragma: no cover unit
return
run_command(cmd)
def libcare_server_start():
# we should reset libcare service status here and restart libcare.socket
# they can be in failed state and prevent connection to a socket
if SKIP_SYSTEMCTL_CHECK or os.path.exists(SYSTEMCTL):
run_command([SYSTEMCTL, 'reset-failed', 'libcare'])
run_command([SYSTEMCTL, 'restart', 'libcare.socket'])
else:
try:
cmd = [find_cmd('service', '/usr/sbin/', '/sbin/'), 'libcare', 'start']
except Exception: # pragma: no cover unit
return
run_command(cmd)
# --- end libcare }}} ---
def main():
parser = ArgumentParser(description='Manage KernelCare patches for your kernel')
parser.add_argument('--debug', help='', action='store_true')
parser.add_argument(
'-i',
'--info',
help='Display information about KernelCare. Use with --json parameter to get result in JSON format.',
action='store_true',
)
parser.add_argument('-u', '--update', help='Download latest patches and apply them to the current kernel', action='store_true')
parser.add_argument('--unload', help='Unload patches', action='store_true')
parser.add_argument('--smart-update', help='Patch kernel based on UPDATE POLICY settings', action='store_true')
parser.add_argument('--auto-update', help='Check if update is available, if so -- update', action='store_true')
parser.add_argument(
'--local', help='Update from a server local directory; accepts a path where patches are located', metavar='PATH'
)
parser.add_argument('--patch-info', help='Return the list of applied patches', action='store_true')
parser.add_argument('--freezer', help='Freezer type: full (default), smart, none', metavar='freezer')
parser.add_argument('--nofreeze', help="[deprecated] Don't freeze tasks before patching", action='store_true')
parser.add_argument('--uname', help='Return safe kernel version', action='store_true')
parser.add_argument('--license-info', help='Return current license info', action='store_true')
parser.add_argument('--status', help='Return status of updates', action='store_true')
parser.add_argument('--register', help='Register using KernelCare Key', metavar='KEY')
parser.add_argument(
'--register-autoretry', help='Retry registering indefinitely if failed on the first attempt', action='store_true'
)
parser.add_argument('--unregister', help='Unregister from KernelCare (for key-based servers only)', action='store_true')
parser.add_argument('--check', help='Check if new update available', action='store_true')
parser.add_argument(
'--latest-patch-info',
help='Return patch info for the latest available patch. Use with --json parameter to get result in JSON format.',
action='store_true',
)
parser.add_argument('--test', help='[deprecated] Use --prefix=test instead', action='store_true')
parser.add_argument('--tag', help='Tag server with custom metadata, for ePortal users only', metavar='TAG')
parser.add_argument(
'--prefix',
help='Patch source prefix used to test different builds by downloading builds from different locations based on prefix',
metavar='PREFIX',
)
parser.add_argument('--nosignature', help='Do not check signature', action='store_true')
parser.add_argument(
'--set-monitoring-key', help='Set monitoring key for IP based licenses. 16 to 32 characters, alphanumeric only', metavar='KEY'
)
parser.add_argument('--doctor', help='Submits a vitals report to CloudLinux for analysis and bug-fixes', action='store_true')
parser.add_argument('--enable-auto-update', help='Enable auto updates', action='store_true')
parser.add_argument('--disable-auto-update', help='Disable auto updates', action='store_true')
parser.add_argument(
'--plugin-info',
help='Provides the information shown in control panel plugins for KernelCare. '
'Use with --json parameter to get result in JSON format.',
action='store_true',
)
parser.add_argument(
'--json',
help="Return '--plugin-info', '--latest-patch-info', '--patch-info' and '--info' results in JSON format",
action='store_true',
)
parser.add_argument('--version', help='Return the current version of KernelCare', action='store_true')
parser.add_argument('--kpatch-debug', help='Enable the debug mode', action='store_true')
parser.add_argument('--no-check-cert', help='Disable the patch server SSL certificates checking', action='store_true')
parser.add_argument(
'--set-patch-level',
help='Set patch level to be applied. To select latest patch level set -1',
action='store',
type=int,
default=None,
required=False,
)
parser.add_argument('--check-compatibility', help='Check compatibility.', action='store_true')
parser.add_argument('--clear-cache', help='Clear all cached files', action='store_true')
exclusive_group = parser.add_mutually_exclusive_group()
exclusive_group.add_argument(
'--set-patch-type', help="Set patch type feed. To select default feed use 'default' option", action='store'
)
exclusive_group.add_argument('--edf-enabled', help='Enable exploit detection framework', action='store_true')
exclusive_group.add_argument('--edf-disabled', help='Disable exploit detection framework', action='store_true')
parser.add_argument(
'--set-sticky-patch',
help='Set patch to stick to date in DDMMYY format, or retrieve it from KEY if set to KEY. Leave empty to unstick',
action='store',
default=None,
required=False,
)
parser.add_argument(
'-q', '--quiet', help='Suppress messages, provide only errors and warnings to stderr', action='store_true', required=False
)
parser.add_argument('--has-flags', help='Check agent features')
parser.add_argument('--force', help='Force action and ignore several restristions.', action="store_true")
parser.add_argument('--set-config', help='Change configuration option', action='append', metavar='KEY=VALUE')
if not LIBCARE_DISABLED:
parser.add_argument(
'--disable-libcare', help='Disable libcare services', dest='enable_libcare', action='store_const', const=False
)
parser.add_argument(
'--enable-libcare', help='Enable libcare services', dest='enable_libcare', action='store_const', const=True
)
parser.add_argument(
'--lib-update', help='Download latest patches and apply them to the current userspace libraries', action='store_true'
)
parser.add_argument('--lib-unload', '--userspace-unload', help='Unload userspace patches', action='store_true')
parser.add_argument('--lib-auto-update', help='Check if update is available, if so -- update', action='store_true')
parser.add_argument('--lib-info', '--userspace-info', help='Display information about KernelCare+.', action='store_true')
parser.add_argument(
'--lib-patch-info', '--userspace-patch-info', help='Return the list of applied userspace patches', action='store_true'
)
parser.add_argument('--lib-version', '--userspace-version', help='Return safe package version', metavar='PACKAGENAME')
parser.add_argument(
'--userspace-update',
metavar='USERSPACE_PATCHES',
nargs='?',
const="",
help='Download latest patches and apply them to the corresponding userspace processes',
)
parser.add_argument(
'--userspace-auto-update',
help='Download latest patches and apply them to the corresponding userspace processes',
action='store_true',
)
parser.add_argument('--userspace-status', help='Return status of userspace updates', action='store_true')
args = parser.parse_args()
globals().update(get_config_settings())
global PATCH_TYPE, FLAGS
if not LIBCARE_DISABLED:
FLAGS += ['libcare-enabled']
if args.has_flags is not None:
if set(filter(None, args.has_flags.split(','))).issubset(FLAGS):
return 0
else:
return 1
# do not remove args.auto_update!
# once added to machine, kcare-cron is never changed by package update;
# old clients has no -q option in their cron,
# so auto_update default silent mode must be saved forever
global PRINT_LEVEL
if args.quiet or args.auto_update:
if SILENCE_ERRORS:
PRINT_LEVEL = PRINT_CRITICAL
else:
PRINT_LEVEL = PRINT_ERROR
elif args.debug:
PRINT_LEVEL = PRINT_DEBUG
if not args.uname:
if os.getuid() != 0:
print('Please run as root', file=sys.stderr)
return 1
level = logging.INFO
if args.quiet:
level = logging.WARNING
elif args.debug:
level = logging.DEBUG
# should be after root role check to create a log file with correct rights
initialize_logging(level)
if args.clear_cache:
clear_all_cache()
if args.set_patch_level:
global LEVEL
if args.set_patch_level >= 0:
LEVEL = str(args.set_patch_level)
update_config(PATCH_LEVEL=LEVEL)
else:
LEVEL = None
update_config(PATCH_LEVEL='')
if args.set_sticky_patch is not None:
update_config(STICKY_PATCH=args.set_sticky_patch)
global STICKY
STICKY = args.set_sticky_patch
if args.nosignature:
global USE_SIGNATURE
USE_SIGNATURE = False
if args.no_check_cert:
global CHECK_SSL_CERTS
CHECK_SSL_CERTS = False
if args.kpatch_debug:
global KPATCH_DEBUG
KPATCH_DEBUG = True
if args.check_compatibility:
check_compatibility()
# EDF do nothing
if args.edf_enabled:
warnings.warn('Flag --edf-enabled has been deprecated and will be not available in future releases.', DeprecationWarning)
elif args.edf_disabled:
if PATCH_TYPE == 'edf':
args.set_patch_type = ('' if PREV_PATCH_TYPE == 'edf' else PREV_PATCH_TYPE) or 'default'
args.update = True
global TEST_PREFIX
if args.prefix:
TEST_PREFIX = args.prefix
if args.test:
warnings.warn('Flag --test has been deprecated and will be not available in future releases.', DeprecationWarning)
TEST_PREFIX = 'test'
TEST_PREFIX = TEST_PREFIX.strip('/')
if TEST_PREFIX and TEST_PREFIX not in EXPECTED_PREFIX:
kcarelog.warning('Prefix `{0}` is not in expected one {1}.'.format(TEST_PREFIX, ' '.join(EXPECTED_PREFIX)))
if args.local:
global UPDATE_FROM_LOCAL
UPDATE_FROM_LOCAL = True
global PATCH_SERVER
PATCH_SERVER = 'file:' + args.local
if args.set_patch_type:
update_patch_type(args.set_patch_type)
if PATCH_TYPE == 'edf':
PATCH_TYPE = edf_fallback_ptype()
warnings.warn('edf patches are deprecated. Fallback to {0}'.format(PATCH_TYPE or 'default'), DeprecationWarning)
apply_ptype(PATCH_TYPE)
if args.doctor:
kcdoctor()
return
if args.plugin_info:
if args.json:
plugin_info(fmt='json')
else:
plugin_info()
return
if args.enable_auto_update:
update_config(AUTO_UPDATE='YES')
return
if args.disable_auto_update:
update_config(AUTO_UPDATE='NO')
return
if args.set_config:
update_config_params(args.set_config)
return
if args.set_monitoring_key:
return set_monitoring_key_for_ip_license(args.set_monitoring_key)
if args.unregister:
unregister()
if args.register:
if PATCH_TYPE == 'free':
update_config(PATCH_TYPE='extra')
return register(args.register, args.register_autoretry)
if args.license_info:
# license_info returns zero if no valid license found and non-zero otherwise
if license_info() != 0:
return 0
else:
return 1
if args.tag is not None:
return tag_server(args.tag)
if args.version:
print(VERSION)
if getattr(args, 'enable_libcare', None) is not None:
set_libcare_status(args.enable_libcare)
return 0
if not LIBCARE_DISABLED:
if args.userspace_status:
return get_userspace_update_status()
if args.lib_update:
if do_userspace_update() is not None:
loginfo('Userspace patches are applied.')
if args.lib_auto_update:
do_userspace_update(mode=UPDATE_MODE_AUTO)
elif args.lib_unload:
libcare_unload()
loginfo('Userspace patches are unloaded.')
if args.lib_info:
print(libcare_info())
if args.lib_patch_info:
print(libcare_patch_info())
if args.lib_version and libcare_server_started():
print(libcare_version(args.lib_version))
if args.userspace_update is not None:
if args.userspace_update == '':
# Get from config or defaults
limit = USERSPACE_PATCHES or list(USERSPACE_MAP.keys())
else:
limit = [ptch.strip().lower() for ptch in args.userspace_update.split(',')]
if do_userspace_update(limit=sorted(limit)) is not None:
loginfo('Userspace patches are applied.')
if args.userspace_auto_update:
do_userspace_update(mode=UPDATE_MODE_AUTO, limit=None)
if args.info:
print(kcare_info(is_json=args.json))
freezer = ''
if args.nofreeze:
warnings.warn('Flag --nofreeze has been deprecated and will be not available in future releases.', DeprecationWarning)
freezer = 'none'
if args.freezer:
freezer = args.freezer
if args.smart_update:
do_update(freezer, mode=UPDATE_MODE_SMART, policy=UPDATE_POLICY)
if args.update:
do_update(freezer, mode=UPDATE_MODE_MANUAL)
loginfo('Kernel is safe')
if args.uname:
print(kcare_uname())
if args.unload:
kcare_unload(freezer, force=args.force)
loginfo('KernelCare protection disabled. Your kernel might not be safe')
if args.auto_update:
global CHECK_CLN_LICENSE_STATUS
CHECK_CLN_LICENSE_STATUS = False
# wait to prevent spikes at the beginning of each minute KPT-1874
# bandit warns about using random.uniform for security which is not the case here
time.sleep(random.uniform(0, 60)) # nosec B311
do_update(freezer, mode=UPDATE_MODE_AUTO)
if args.patch_info:
patch_info(is_json=args.json)
if args.status:
return get_update_status()
if args.latest_patch_info:
kcare_latest_patch_info(is_json=args.json)
if args.check:
kcare_check()
# No arg were provided
if len(sys.argv) == 1:
show_generic_info()
if __name__ == '__main__': # pragma: no cover unit
try:
sys.exit(main())
except URLError as err:
logerror('{0}: {1}'.format(err, getattr(err, 'url', 'unknown')))
except KcareError as err:
logerror(str(err))
sys.exit(1)
except Exception as err:
if isinstance(err, SafeExceptionWrapper):
logexc(err.inner)
else:
logexc(err)
send_exc()
sys.exit(1)