| Server IP : 152.69.216.235 / Your IP : 80.80.80.28 Web Server : Apache/2.4.37 (Oracle Linux Server) System : Linux ust-wp4-prod 5.15.0-310.184.5.2.el8uek.x86_64 #2 SMP Wed Jul 9 16:08:33 PDT 2025 x86_64 User : apache ( 48) PHP Version : 8.4.10 Disable Function : NONE MySQL : OFF | cURL : ON | WGET : ON | Perl : ON | Python : ON | Sudo : ON | Pkexec : ON Directory : /lib/python3.6/site-packages/ |
Upload File : |
# See included LICENSE file for additional license and copyright information.
from io import BytesIO
import datetime
import errno
import glob
import grp
import json
import logging
import os
import os.path
import posixpath
import pycurl
import random
import re
import select
import socket
import stat
import subprocess
import sys
import textwrap
import time
import traceback
import yaml # noqa: #402
import shutil
try:
from yaml import CSafeLoader as yaml_loader
from yaml import CSafeDumper as yaml_dumper
except ImportError:
from yaml import SafeLoader as yaml_loader
from yaml import SafeDumper as yaml_dumper
from uptrack import version # noqa: #402
from six.moves.urllib import parse # noqa: #402
try:
import gconf
have_gconf = True
except ImportError:
have_gconf = False
if sys.version_info < (3, 2):
from six.moves.configparser import SafeConfigParser as ConfigParser
from six.moves.configparser import Error as ConfigParserError
else:
from configparser import ConfigParser
from configparser import Error as ConfigParserError
__version__ = version.version
STATUS_FILE_FORMAT_VERSION = "2"
USERAGENT = 'Uptrack/' + __version__
BUG_EMAIL = 'Oracle support'
USE_SERVER_RESOLVER = True
UPTRACK_CONFIG_FILE = '/etc/uptrack/uptrack.conf'
UPTRACK_UUID_FILE = '/var/lib/uptrack/uuid'
UPTRACK_SERIAL_FILE = '/var/lib/uptrack/serial'
UPTRACK_CACHE_DIR = "/var/cache/uptrack"
UPTRACK_EM_STATUS_DIR = "/var/run/ksplice-em"
UPTRACK_EM_STATUS = "/var/run/ksplice-em/kernel_status"
UPDATE_REPO_URL = "https://updates.ksplice.com/update-repository"
UPDATE_REPO_PATH = "/usr/lib/uptrack-repository"
UPTRACK_OFFLINE_DIR = "ksplice/kernel-offline"
# We can't put this under /var/cache/uptrack, because we want
# it to be world-readable.
UPTRACK_EFFECTIVE_KERNEL_FILE = '/var/lib/uptrack/effective_kernel'
# TODO: We should eventually replace occurances of these with more
# specific codes (below)
ERROR_GENERIC_ERROR = 1
# This value, in Uptrack.Result.code, indicates that the error was due
# to a network failure.
ERROR_NO_NETWORK = 10
# Uptrack threw an unhandled exception
ERROR_INTERNAL_ERROR = 11
# User answered "no" to the confirmation prompt
ERROR_USER_NO_CONFIRM = 12
# Running kernel is not supported by Uptrack
ERROR_UNSUPPORTED = 13
# The user's access key was invalid
ERROR_INVALID_KEY = 14
# The Uptrack client is too old to manage the updates
ERROR_TOO_OLD_INSTALL = 15
# The Uptrack client is too old to even parse packages.yml
ERROR_TOO_OLD_PARSE = 16
# Your subscription to the Ksplice Uptrack service has expired
ERROR_EXPIRED = 17
# The Uptrack server returned an internal error
ERROR_INTERNAL_SERVER_ERROR = 18
# The machine has not yet been activated for use with the Uptrack service.
ERROR_MACHINE_NOT_ACTIVATED = 19
# The user's access key is missing
ERROR_MISSING_KEY = 20
# The sysfs filesystem isn't mounted at /sys
ERROR_SYS_NOT_MOUNTED = 21
# The Uptrack server returned malformed YAML
ERROR_MALFORMED_YAML = 22
dir_perms = [
(UPTRACK_CACHE_DIR, 0, grp.getgrnam("adm")[2],
stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IXGRP),
]
def is_offline():
test_path = os.path.join('/usr/lib', UPTRACK_OFFLINE_DIR, 'enabled')
if os.path.exists(test_path):
logging.debug('Running offline uptrack.')
return True
logging.debug('Running online uptrack.')
return False
def server_is_oracle(config):
try:
url = config.remoteroot
# The client testing patches UPDATE_REPO_URL to localhost for testing, so
# we can't just match against (ksplice|oracle).com. In production this
# test is actually problematic because we don't know what the actual domain
# name of an Oracle domain is going to be, as different OCI realms have different
# domains. We get around this by checking if the OCI domain has been set. If
# it has, we assume an Oracle domain. If the user bypasses the region-local
# updates-server for whatever reason, they will have to fall back to one
# of the servers in the check below.
if url == UPDATE_REPO_URL or config.oci_domain is not None:
return True
netloc = parse.urlparse(url)[1]
return bool(re.match(r'.*\.(ksplice|oracle|oraclecloud)\.com(:[0-9]+)?$', netloc))
except Exception:
return False
def mkdirp(dir):
"""
Essentially, mkdir -p
"""
for name, uid, gid, mode in dir_perms:
if dir.startswith(name):
try:
os.makedirs(name)
except OSError as e:
if e.errno != errno.EEXIST:
raise
else:
os.chmod(name, mode)
os.chown(name, uid, gid)
try:
os.makedirs(dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def write_file(path, data):
fh = open(path, 'w')
try:
fh.write(data)
finally:
fh.close()
def write_file_binary(path, data):
with open(path, 'wb') as fh:
fh.write(data)
# Accept a mode argument so that callers can pass 'rb' if they need binary IO.
def read_file(path, mode='r'):
fh = open(path, mode)
try:
return fh.read()
finally:
fh.close()
def yaml_load(stream, **kwargs):
return yaml.load(stream, Loader=yaml_loader, **kwargs)
def yaml_dump(obj, stream=None, **kwargs):
return yaml.dump(obj, stream, Dumper=yaml_dumper, **kwargs)
def getConfigBooleanOrDie(config, section, option, default):
"""
Return the value of a boolean config option, or `default` if no value is
given.
Raise a ResultException on invalid (non-boolean) values.
"""
if config.has_option(section, option):
try:
return config.getboolean(section, option)
except ValueError as e:
msg = """Unable to read %s setting from %s.
%s
Please check that %s is set to 'yes' or 'no' and try again.""" % (
option, UPTRACK_CONFIG_FILE, e, option)
raise ResultException(1, msg)
else:
return default
def queryRealArch(userarch):
try:
p = subprocess.Popen(['setarch', 'linux64', 'uname', '-m'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out = p.communicate()[0].decode('utf-8').strip()
if p.returncode == 0:
return out
p = subprocess.Popen(['setarch', 'x86_64', 'uname', '-m'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out = p.communicate()[0].decode('utf-8').strip()
if p.returncode == 0:
return out
except (subprocess.CalledProcessError, OSError):
logging.debug("Unable to determine the kernel architecture")
logging.debug(traceback.format_exc())
return userarch
def getUname():
"""
Gets the uname, but lies a little, since the arch field is
actually governed by 'personality' and not the real architecture.
Note that this returns both the architecture Uptrack is being run
under, as well as the architecture of the kernel itself (i.e.
'uname -m' and 'linux64 uname -m').
"""
sysname, hostname, release, version, userarch = os.uname()
arch = queryRealArch(userarch)
if arch in ['i686', 'i586', 'i486']:
arch = 'i386'
if userarch in ['i686', 'i586', 'i486']:
userarch = 'i386'
uname = (sysname, hostname, release, version, arch, userarch)
return uname
__curl = None
def initCurl(config=None):
"""Initialize the shared cURL object for getCurl().
"""
global __curl
if __curl is None:
__curl = pycurl.Curl()
__curl.setopt(pycurl.USERAGENT, USERAGENT)
__curl.setopt(pycurl.OPT_FILETIME, 1)
__curl.setopt(pycurl.FOLLOWLOCATION, 1)
__curl.setopt(pycurl.MAXREDIRS, 5)
__curl.setopt(pycurl.ENCODING, '')
if config and config.ssl_ca_certs:
for type, value in config.ssl_ca_certs:
__curl.setopt(type, value)
__curl.setopt(pycurl.CONNECTTIMEOUT, 30)
__curl.setopt(pycurl.TIMEOUT, 600)
if config and config.proxy is not None:
__curl.setopt(pycurl.PROXY, config.proxy)
if config and getattr(config, 'verbose', 0) > 1:
__curl.setopt(pycurl.VERBOSE, 1)
def getCurl():
"""Return a shared cURL object for use by Uptrack.
For performance, this always returns the same cURL object, in
order to allow libcURL to reuse connections as much as
possible. In order for this to work properly, callers should
always explicitly set the HTTP method they desire before calling
`.perform()`, and should reset any other unusual properties they
set on the cURL object to a reasonable default value when they're
done.
Needless to say, this is not thread-safe.
You must call initCurl() before using this function.
"""
return __curl
def my_cmp(x, y):
"""
Replacement for built-in function cmp that was removed in Python 3
Compare the two objects x and y and return an integer according to
the outcome. The return value is negative if x < y, zero if x == y
and strictly positive if x > y.
From https://portingguide.readthedocs.io/en/latest/comparisons.html
"""
return (x > y) - (x < y)
def verrevcmp(a, b):
"""Emulates dpkg's verrevcmp() in lib/vercmp.c."""
def order(x):
if x == '~':
return -1
if x.isdigit():
return 0
if not x:
return 0
if x.isalpha():
return ord(x)
return ord(x) + 256
def num(s):
if not s:
return 0
return int(s)
while a or b:
while (a and not a[0].isdigit()) or (b and not b[0].isdigit()):
d = my_cmp(order(a[:1]), order(b[:1]))
if d:
return d
a = a[1:]
b = b[1:]
an, a = re.match('^([0-9]*)(.*)', a).groups()
bn, b = re.match('^([0-9]*)(.*)', b).groups()
d = my_cmp(num(an), num(bn))
if d:
return d
return 0
def parseversion(v):
"""Emulates dpkg's parseversion(), in lib/parsehelp.c."""
if ':' in v:
epochstr, rest = v.split(':', 1)
epoch = int(epochstr)
else:
epoch = 0
rest = v
if '-' in rest:
version, revision = rest.split('-', 1)
else:
version, revision = rest, ''
return epoch, version, revision
def compareversions(a, b):
"""Emulates dpkg --compare-versions. Returns -1, 0, 1 like cmp()."""
ae, av, ar = parseversion(a)
be, bv, br = parseversion(b)
return my_cmp(ae, be) or verrevcmp(av, bv) or verrevcmp(ar, br)
class Result(object):
def __init__(self, code=0, message=''):
self.code = code
self.message = message
self.succeeded = []
self.failed = []
self.debug = None
self.alert = None
self.desupported = None
self.tray_icon_error = None
self.newkernel = False
self.uptrack_log = None
def resultFromPycurl(config, e):
if e.args[0] in [pycurl.E_COULDNT_RESOLVE_HOST,
pycurl.E_COULDNT_CONNECT,
pycurl.E_OPERATION_TIMEOUTED]:
msg = ("Could not connect to the Ksplice Uptrack server. "
"A network connection is needed to ensure you have "
"the latest list of updates to install. "
"Please check your Internet connection and try again. "
"If this computer does not have direct access to the Internet, "
"you will need to configure an https proxy in %s." % UPTRACK_CONFIG_FILE)
elif e.args[0] == pycurl.E_COULDNT_RESOLVE_PROXY:
msg = ("Could not resolve your proxy server (%s) while trying to "
"connect to the Ksplice Uptrack server. You should check that "
"this machine can directly connect to the proxy server configured "
"in %s." % (config.proxy, UPTRACK_CONFIG_FILE))
elif e.args[0] == pycurl.E_URL_MALFORMAT:
msg = ("Malformed URL <%s> for Uptrack server. Please correct the "
"value of Network.update_repo_url in %s." %
(config.remoteroot, UPTRACK_CONFIG_FILE))
elif e.args[0] == pycurl.E_SSL_CACERT:
msg = "Could not verify the Ksplice Uptrack server's SSL certificate. "
if server_is_oracle(config):
msg += ("Check your network configuration, and contact %s for "
"assistance if you are unable to resolve this error." %
(BUG_EMAIL,))
else:
msg += ("You may need to update ssl_ca_cert_file or "
"ssl_ca_cert_dir in %s with the path to an appropriate "
"CA. Please consult %s for assistance if you are "
"unable to resolve this error." %
(UPTRACK_CONFIG_FILE, BUG_EMAIL))
else:
msg = ("Unexpected error communicating with the Ksplice Uptrack server. "
"Please check your network connection and try again. "
"If this error re-occurs, contact %s. " %
(BUG_EMAIL,))
msg = textwrap.fill(msg) + "\n\n(Network error: " + e.args[1] + ")"
return Result(ERROR_NO_NETWORK, msg)
class ResultException(Exception):
def __init__(self, code, message):
# We can't use super here because Exception is an old-style
# class in python 2.4
Exception.__init__(self, code, message)
self.result = Result(code, message)
server_error_exception = ResultException(ERROR_INTERNAL_SERVER_ERROR, """\
The Ksplice Uptrack service has experienced a transient error. Please
wait a few minutes and try again. If this error persists, please
contact %s for assistance.""" % (BUG_EMAIL,))
class ActionResult(object):
def __init__(self, update, command):
self.code = 0
self.message = ''
self.update = update
self.command = command
self.abort_code = None
self.stack_check_processes = None
self.nomatch_modules = None
self.locked_modules = []
self.usedby_modules = []
self.depmod_needed = False
self.debug = ''
self.core_version = update.getCoreVersion()
def asDict(self):
d = {}
d['Command'] = self.command
d['ID'] = self.update.id
d['Name'] = self.update.name
d['Message'] = self.message
d['Abort'] = self.abort_code
d['Core Version'] = self.core_version
d['Stack Check'] = self.stack_check_processes
d['Nonmatching Modules'] = self.nomatch_modules
d['Locked Modules'] = self.locked_modules
d['UsedBy Modules'] = self.usedby_modules
return d
def getKernelDict():
sysname, _, release, version, userarch = os.uname()
return {'Sysname': sysname, 'Release': release,
'Version': version, 'UserArchitecture': userarch}
class Status(object):
def __init__(self, statusdir):
self.statusdir = statusdir
self.statusloc = os.path.join(statusdir, 'status')
self.resultsloc = os.path.join(statusdir, 'results')
self.upgradeloc = os.path.join(statusdir, 'upgrade_plan')
self.stamploc = os.path.join(statusdir, 'results.server-stamp')
# An explanation of return values:
# - 'None' means status or results file does not exist
# - If x is returned, x['Result']['Code'] will be populated
# with an error code and if the error code is nonzero,
# x['Result']['Message'] will have an error message.
# - If the error code is 2, then the upgrade plan is not available
# - If the error code is 3, then the installed updates are not available.
def readStatus(self):
try:
f = open(self.statusloc)
status = yaml_load(f)
f.close()
except IOError as e:
if e.errno == errno.EACCES:
if os.path.exists('/etc/debian_version'):
recommendation = 'sudo adduser $USER adm'
else:
recommendation = 'gpasswd -a <your username> adm (as root)'
status = {}
status['Result'] = {}
status['Result']['Code'] = 3
status['Result']['Message'] = \
("Permission denied reading the status file. You need to be in the adm "
"group in order to use the the Ksplice Uptrack Manager; you can add yourself by running\n\n"
"%s\n\nYou will need to log out and back in "
"for this change to take effect." % recommendation)
return status
elif e.errno == errno.ENOENT:
return None
else:
status = {}
status['Result'] = {}
status['Result']['Code'] = 3
status['Result']['Message'] = "Error reading status file (%s): %s\n" % \
(self.statusloc, os.strerror(e.errno))
return status
try:
f = open(self.upgradeloc)
upgrade = yaml_load(f)
f.close()
status.update(upgrade)
except IOError as e:
if e.errno == errno.ENOENT:
status['Plan'] = []
else:
status['Plan'] = []
status['Result'] = {}
status['Result']['Code'] = 2
status['Result']['Message'] = "Error reading upgrade plan (%s): %s\n" % \
(self.upgradeloc, os.strerror(e.errno))
return status
try:
f = open(self.resultsloc)
results = yaml_load(f)
f.close()
status.update(results)
except IOError as e:
status['Result'] = {}
if e.errno == errno.ENOENT:
status['Result']['Code'] = 0
else:
status['Result']['Code'] = 1
status['Result']['Message'] = "Error reading results file (%s): %s\n" % \
(self.resultsloc, os.strerror(e.errno))
return status
def _writeFile(self, contents, file):
dir = os.path.dirname(file)
if not os.path.isdir(dir):
os.makedirs(dir)
f = open(file, 'w')
yaml_dump(contents, f, version=(1, 1),
explicit_start=True, explicit_end=True)
f.close()
def addIdentity(config, d, local_status=None):
d['Client'] = {}
d['Client']['Hostname'] = getattr(config, 'hostname', None)
d['Client']['FullHostname'] = getattr(config, 'fullhostname', None)
d['Client']['Key'] = config.accesskey
d['Client']['UUID'] = config.uuid
if config.newuuid:
d['Client']['NewUUID'] = config.newuuid
if config.olduuid:
d['Client']['OldUUID'] = config.olduuid
d['Client']['CPUInfo'] = config.cpuinfo
d['Client']['UptrackVersion'] = __version__
try:
d['Client']['Uptime'] = read_file('/proc/uptime').split()[0]
except IOError:
logging.debug(traceback.format_exc())
d['Client']['Uptime'] = -1
try:
d['Client']['RebootsSaved'] = len(open(os.path.join(config.localroot,
'reboots_saved')).readlines())
except IOError as e:
if e.errno == errno.ENOENT:
d['Client']['RebootsSaved'] = 0
else:
d['Client']['RebootsSaved'] = -1
logging.debug(traceback.format_exc())
if inVirtualBox():
d['Client']['VirtualBox'] = True
d['Client']['VMInfo'] = config.vminfo
if 'IP' in config.localip:
d['Client']['LocalIP'] = config.localip['IP']
else:
d['Client']['LocalIP_error'] = config.localip['Error']
d['Client']['Config'] = {}
d['Client']['Config']['Autoinstall'] = getattr(config, 'cron_autoinstall', False)
if getattr(config, 'init', None) is not None:
d['Client']['Config']['Init'] = getattr(config, 'init')
d['Client']['Config']['Cron'] = getattr(config, 'cron', False)
d['Client']['MmapMinAddr'] = getMmapMinAddr()
serial_stat = getattr(config, 'serial_stat', None)
if serial_stat is not None:
d['Client']['SerialStat'] = serial_stat
d['Client']['Tools'] = {}
for key, path in [('Depmod', '/sbin/depmod'), ('Modprobe', '/sbin/modprobe')]:
val = {}
try:
val['Stat'] = tuple(os.stat(path))
except OSError:
val['Stat'] = ()
try:
val['Link'] = os.readlink(path)
except OSError:
val['Link'] = ''
d['Client']['Tools'][key] = val
d['Kernel'] = {}
d['Kernel']['Sysname'] = config.sysname
d['Kernel']['Release'] = config.release
d['Kernel']['Version'] = config.version
d['Kernel']['Architecture'] = config.arch
d['Kernel']['UserArchitecture'] = config.userarch
if config.run_uuid:
d['RunUUID'] = config.run_uuid
else:
d['RunUUID_error'] = config.run_uuid_error
if local_status is not None:
effective = local_status.getEffective()
if effective is not None:
effective = effective['PackageVersion']
d['ClientEffectiveKernel'] = effective
addIdentity = staticmethod(addIdentity)
def writeStatus(self, local, new_client, installed_updates, public=False):
status = {}
status['Status format version'] = STATUS_FILE_FORMAT_VERSION
status['Time'] = datetime.datetime.now(datetime.timezone.utc)
# EM status only contains info about updates, no client keys etc
if not public:
self.addIdentity(local.client_config, status, local_status=local)
status['Updates'] = {}
status['Updates']['Installed'] = []
# Python 2.3 doesn't have sorted() or sort(key = ...)
installed_sorted = list(installed_updates)
installed_sorted.sort(key=lambda u: u.order)
for u in installed_sorted:
d = {}
d['ID'] = u.id
d['Name'] = u.name
status['Updates']['Installed'].append(d)
status['New client'] = new_client
if public:
outfile = UPTRACK_EM_STATUS
else:
outfile = self.statusloc
mkdirp(os.path.dirname(outfile))
self._writeFile(status, outfile)
def writeResults(self, local, res):
results = {}
results['Results format version'] = STATUS_FILE_FORMAT_VERSION
results['Time'] = datetime.datetime.now(datetime.timezone.utc)
self.addIdentity(local.client_config, results, local_status=local)
results['Result'] = {}
results['Result']['Succeeded'] = []
for action in res.succeeded:
d = action.asDict()
results['Result']['Succeeded'].append(d)
results['Result']['Failed'] = []
for action in res.failed:
d = action.asDict()
results['Result']['Failed'].append(d)
results['Result']['Code'] = res.code
results['Result']['Message'] = res.message
if res.debug is not None:
results['Debug'] = res.debug
if res.uptrack_log is not None:
results['UptrackLog'] = res.uptrack_log
if res.alert is not None:
results['Result']['Alert'] = res.alert
if res.desupported is not None:
results['Result']['Desupported'] = res.desupported
if res.tray_icon_error is not None:
results['Result']['TrayIconError'] = res.tray_icon_error
if res.newkernel:
results['Result']['New Kernel'] = True
if local.client_config.uninstall:
results['Result']['Uninstalled'] = True
self._writeFile(results, self.resultsloc)
def writePlan(self, name, actions, public=False):
plan = {}
plan[name.title() + ' plan format version'] = STATUS_FILE_FORMAT_VERSION
plan['Time'] = datetime.datetime.now(datetime.timezone.utc)
plan['Plan'] = [dict([(k, act[k]) for k in
('Command', 'ID', 'Name', 'EffectiveKernel') if k in act])
for act in actions]
if public:
outfile = os.path.join(UPTRACK_EM_STATUS_DIR, 'kernel_' + name + '_plan')
else:
outfile = os.path.join(self.statusdir, name + '_plan')
self._writeFile(plan, outfile)
def writeUpgradePlan(self, plan):
self.writePlan('upgrade', plan)
self.writePlan('upgrade', plan, public=True)
def writeInitPlan(self, plan):
self.writePlan('init', plan)
def writeRemovePlan(self, plan):
self.writePlan('remove', plan)
def writeEffectiveKernel(self, effective, ids):
out = {'EffectiveKernel': effective, 'OriginalKernel': getKernelDict(), 'Installed': ids}
self._writeFile(out, UPTRACK_EFFECTIVE_KERNEL_FILE)
def sendResultToServer(self, config):
try:
# Results file might not exist if this is the first time
# uptrack is run and there is nothing to report (e.g. 'show')
contents = read_file(self.resultsloc)
except IOError:
return
results_time = yaml_load(contents)['Time']
try:
stamp_time = yaml_load(read_file(self.stamploc))
if stamp_time >= results_time:
return
except (IOError, yaml.YAMLError, TypeError):
pass
status_url = posixpath.join(config.remote,
parse.quote('result'))
c = getCurl()
c.setopt(pycurl.URL, status_url.encode('ISO-8859-1'))
c.setopt(pycurl.HTTPPOST, [('result', contents)])
c.setopt(pycurl.WRITEFUNCTION, lambda data: None)
c.perform()
yaml_dump(results_time, open(self.stamploc, 'w'))
class LocalStatus(object):
def __init__(self, config, remote_repo, logger):
self.client_config = config
self.statusdir = config.local
self.installed = set()
self.new_client = False
self.effective_kernel = None
self.remote_repo = remote_repo
self.logger = logger
def getInstalledIDs(self):
installed_ids = []
for f in glob.glob('/sys/module/ksplice_*/ksplice'):
if read_file(os.path.join(f, 'stage')).strip() == 'applied':
installed_ids.append(re.match('^/sys/module/ksplice_(.*)/ksplice$',
f).group(1))
for f in glob.glob('/sys/kernel/ksplice/*/stage'):
if read_file(f).strip() == 'applied':
installed_ids.append(re.match('^/sys/kernel/ksplice/(.*)/stage$',
f).group(1))
return installed_ids
def setEffective(self, effective):
sysname, arch, release, version = effective[0].split('/')
self.effective_kernel = {'Sysname': sysname, 'Architecture': arch,
'Release': release, 'Version': version, 'PackageVersion': effective[1]}
def getEffective(self):
"""Returns the effective kernel, either as set in this run or as
loaded from disk. Returns None if the effective kernel cannot
be determined."""
if self.effective_kernel is not None:
return self.effective_kernel
try:
f = open(UPTRACK_EFFECTIVE_KERNEL_FILE, 'r')
effective = yaml_load(f)
f.close()
except (IOError, yaml.YAMLError):
return None
try:
# Check that we booted into the same kernel as when the effective kernel
# data was written.
if getKernelDict() != effective['OriginalKernel']:
return None
# Check that we have the same updates loaded now as then.
were_installed = set(effective['Installed'])
now_installed = set(self.getInstalledIDs())
if were_installed != now_installed:
return None
self.effective_kernel = effective['EffectiveKernel']
return self.effective_kernel
except (TypeError, KeyError):
return None
def getInstalledUpdates(self):
list_installed = []
for id in self.getInstalledIDs():
u = self.remote_repo.idToUpdate(id)
if u:
list_installed.append(u)
self.installed = set(list_installed)
return self.installed
def unpackPlan(self, plan):
"""Augment a plan we read or downloaded with some extra info."""
for act in plan:
act['Update'] = self.remote_repo.idToUpdate(act['ID'])
act['Name'] = act['Update'].name
def readPlan(self, which_plan):
f = open(os.path.join(self.statusdir, which_plan + '_plan'), "r")
actions = yaml_load(f)['Plan']
f.close()
self.unpackPlan(actions)
return actions
def writeOutStatus(self, res, upgrade_plan, init_plan, remove_plan):
s = Status(self.statusdir)
logging.debug("Writing status to file.")
try:
# Call getEffective in case the file already has an effective
# version, which we have neither loaded nor updated.
self.getEffective()
installed = self.getInstalledUpdates()
s.writeStatus(self, self.new_client, installed)
s.writeStatus(self, self.new_client, installed, public=True)
if res is not None:
if res.code != 0:
res.uptrack_log = self.logger.getDebugLog()
s.writeResults(self, res)
if upgrade_plan is not None:
s.writeUpgradePlan(upgrade_plan)
if init_plan is not None:
s.writeInitPlan(init_plan)
if remove_plan is not None:
s.writeRemovePlan(remove_plan)
if self.effective_kernel is not None:
s.writeEffectiveKernel(self.effective_kernel, [u.id for u in installed])
except Exception:
logging.warning("Unable to write out status files")
logging.debug(traceback.format_exc())
return False
if (not self.client_config.in_offline_mode and not self.client_config.allow_no_net and
(not res or not res.code or res.code not in
(ERROR_NO_NETWORK,
ERROR_INVALID_KEY,
ERROR_MISSING_KEY))):
logging.debug("Sending result to server.")
try:
if res is not None:
s.sendResultToServer(self.client_config)
except Exception:
logging.warning("Unable to send status to management server")
logging.debug(traceback.format_exc())
return False
return True
def readInitPlan(self):
return self.readPlan('init')
def readRemovePlan(self):
return self.readPlan('remove')
def readUpgradePlan(self):
return self.readPlan('upgrade')
class PackageList(object):
def __init__(self, text, pl=None):
if pl is None:
pl = yaml_load(text)
self.package_list_yaml = pl
self.error = None
self.protocolVersion = None
self.kspliceToolsApiVersion = None
self.release = None
self.version = None
self.arch = None
self.clientVersionToInstall = '0'
self.clientVersionToParse = '0'
self.protocolVersion = pl['Protocol version']
self.kspliceToolsApiVersion = pl['Client']['Ksplice Tools API version']
kern = pl['Kernel']
self.release, self.version, self.arch = \
kern['Release'], kern['Version'], kern['Architecture']
self.clientVersionToParse = pl['Client'].get('Version to Parse', '0')
self.clientVersionToInstall = pl['Client'].get('Version to Install', '0')
self.ids = []
self.packageData = {}
for item in pl['Updates']:
self.ids.append(item['ID'])
self.packageData[item['ID']] = item
def download(c, url, filename, in_offline_mode, ifmodified=True, bytesio=None):
"""Downloads a file to disk with PycURL.
`c` - A pycurl.Curl() object. You probably want getCurl().
`url` - URL to download.
`filename` - Filename to download to.
`ifmodified` - If `filename` exists, only re-download it if the server's
copy of `url` is newer (i.e., do the If-Modified-Since / 304
Not Modified thing).
`bytesio` - A (c)BytesIO object that will be used to read content
from the server. This can be useful if a caller needs the
content of the response even if the server doesn't return
a 200 OK.
Returns the HTTP response code; if you want more information, use
c.getinfo().
Raises non-ENOENT errors from os.stat, and any error from pycurl.
"""
if in_offline_mode:
try:
shutil.copy2(parse.unquote(url), filename)
except IOError as e:
if e.errno == errno.EACCES:
return 403
else:
return 404
except Exception:
return 500
return 200
try:
if ifmodified:
try:
t = int(os.stat(filename).st_mtime)
c.setopt(pycurl.TIMEVALUE, t)
c.setopt(pycurl.TIMECONDITION, pycurl.TIMECONDITION_IFMODSINCE)
except OSError as e:
if e.errno != errno.ENOENT:
raise
if bytesio:
stream = bytesio
else:
stream = BytesIO()
c.setopt(pycurl.URL, url.encode('ISO-8859-1'))
c.setopt(pycurl.HTTPGET, 1)
c.setopt(pycurl.WRITEFUNCTION, stream.write)
c.perform()
rcode = c.getinfo(pycurl.RESPONSE_CODE)
if rcode == 200:
mkdirp(os.path.dirname(filename))
try:
write_file_binary(filename, stream.getvalue())
except Exception as e:
# If the entire file didn't get written, try not to leave a
# partial copy
try:
os.remove(filename)
except OSError as ee:
if ee.errno != errno.ENOENT:
raise ee
raise e
t = c.getinfo(pycurl.INFO_FILETIME)
if t > 0:
os.utime(filename, (t, t))
elif rcode >= 400 and rcode != 404:
logging.debug("The server returned error code %d:", rcode)
logging.debug(stream.getvalue())
return rcode
finally:
c.setopt(pycurl.TIMECONDITION, pycurl.TIMECONDITION_NONE)
class BaseUptrackConfig(object):
def __init__(self):
self.sysname, self.orig_hostname, self.release, self.version, self.arch, self.userarch = getUname()
self.hostname = None
config = ConfigParser()
try:
config.read([UPTRACK_CONFIG_FILE])
except ConfigParserError as e:
raise ResultException(1, "Unable to parse config file: " + e.message)
self.config = config
self.setMisc()
self.setProxy()
self.setSSL()
self.setOCIRegion()
self.setRemoteRoot()
self.setLocalPaths()
self.setCPUInfo()
self.setModules()
self.setVMInfo()
self.setIP()
self.removableModules = None
def setCPUInfo(self):
sockets = {}
processors = 0
try:
for line in open("/proc/cpuinfo").readlines():
if line.startswith("physical id"):
pid = line.split(":")[1][1:]
if pid in sockets:
sockets[pid] += 1
else:
sockets[pid] = 1
if line.startswith("processor\t"):
processors += 1
except IOError:
logging.debug(traceback.format_exc())
self.cpuinfo = [0, 0]
else:
if sockets == {}:
# Virtual machine with no physical processors
self.cpuinfo = [0, processors]
else:
self.cpuinfo = [len(sockets.keys()), sum(sockets.values())]
def setModules(self):
self.modules = []
try:
for line in open("/proc/modules").readlines():
(name, size) = line.split()[0:2]
if name.startswith("ksplice"):
continue
self.modules.append([name, size])
except IOError:
logging.debug(traceback.format_exc())
self.modules.sort()
def newUUID(self):
uuid = None
try:
proc = subprocess.Popen(['uuidgen'], stdout=subprocess.PIPE)
uuid = proc.communicate()[0].decode('utf-8').strip()
except subprocess.CalledProcessError:
raise ResultException(1, "Unable to generate a new Uptrack UUID.")
try:
mkdirp(os.path.dirname(UPTRACK_UUID_FILE))
write_file(UPTRACK_UUID_FILE, uuid + "\n")
except (IOError, OSError) as e:
raise ResultException(1, "Unable to write the Uptrack UUID file " +
UPTRACK_UUID_FILE + ":\n " + str(e))
return uuid
def regenerateCron(self):
p = subprocess.Popen(['/usr/lib/uptrack/regenerate-crontab'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output = p.communicate()[0].decode('utf-8')
if p.returncode != 0:
logging.debug("Error regenerating crontab.")
logging.debug(output)
try:
os.unlink(os.path.join(self.localroot, 'backoff-counter'))
os.unlink(os.path.join(self.localroot, 'backoff'))
except OSError:
pass
def updateBackoff(self, backoff):
try:
old = read_file(self.localroot + '/backoff')
except IOError:
old = None
if old != str(backoff) + '\n':
write_file(self.localroot + '/backoff', str(backoff) + '\n')
write_file(self.localroot + '/backoff-counter',
str(random.randrange(0, backoff)) + '\n')
def configureHostname(self):
"""
Adjust `hostname` if hostname_override_file is set, and set `fullhostname`.
"""
if self.config.has_option('Settings', 'hostname_override_file'):
hostname_override_file = self.config.get('Settings', 'hostname_override_file')
try:
self.fullhostname = self.hostname = read_file(hostname_override_file).strip()
if not self.hostname:
logging.error("You must supply a non-empty hostname.")
logging.error("Please check the hostname_override_file option in /etc/uptrack/uptrack.conf.")
sys.exit(1)
except (IOError, OSError):
logging.error("Unable to read hostname from %s." % (hostname_override_file,))
logging.error("Please check the hostname_override_file option in /etc/uptrack/uptrack.conf.")
sys.exit(1)
else:
self.hostname = self.orig_hostname
try:
self.fullhostname = socket.gethostbyaddr(self.hostname)[0]
except socket.error:
self.fullhostname = ''
def setMisc(self):
self.lockfile = "/var/lib/uptrack/lock"
self.accesskey = ""
if self.config.has_option('Auth', 'accesskey'):
self.accesskey = self.config.get('Auth', 'accesskey')
self.uuid = None
self.newuuid = None
self.olduuid = None
self.debug_to_server = getConfigBooleanOrDie(
self.config, 'Settings', 'debug_to_server', True)
self.use_hw_uuid = getConfigBooleanOrDie(
self.config, 'Auth', 'use_hw_uuid', False)
self.no_rmmod = getConfigBooleanOrDie(
self.config, 'Settings', 'no_rmmod', False)
self.run_uuid = None
self.run_uuid_error = None
try:
p = subprocess.Popen(['uuidgen'], stdout=subprocess.PIPE)
self.run_uuid = p.communicate()[0].decode('utf-8').strip()
except subprocess.CalledProcessError:
self.run_uuid_error = traceback.format_exc()
self.rh_derivative = os.path.exists('/etc/redhat-release')
def initWithLock(self):
# Note! This is not called by __init__, because UptrackConfig is not
# initialized under the repository lock. This must be called separately
# once the lock is held.
self.serial = 0
self.serial_stat = None
uuid = None
if self.use_hw_uuid:
uuid = self.vminfo.get('uuid').lower()
if uuid == '00000000-0000-0000-0000-000000000000':
uuid = None
if uuid is None:
try:
uuid = read_file(UPTRACK_UUID_FILE).strip()
try:
self.serial = int(read_file(UPTRACK_SERIAL_FILE).strip())
except ValueError:
self.serial_stat = tuple(os.stat(UPTRACK_SERIAL_FILE))
except (IOError, OSError):
pass
if not uuid:
uuid = self.newUUID()
self.setUUID(uuid)
self.configureHostname()
def incrementSerial(self):
""" Increment self.serial and write the result to disk.
Returns the previous serial number.
"""
old = self.serial
self.serial += 1
try:
tmp_serial_file = UPTRACK_SERIAL_FILE + ".tmp"
write_file(tmp_serial_file, "%d\n" % (self.serial,))
os.rename(tmp_serial_file, UPTRACK_SERIAL_FILE)
except (IOError, OSError) as e:
logging.debug("Unable to store new serial", exc_info=True)
raise ResultException(1,
"Unable to increment the Uptrack serial number (%s):\n%s"
% (UPTRACK_SERIAL_FILE, e))
return old
def setProxy(self):
""" Set self.proxy based on config and the environment.
Set self.proxy to the value of a proxy server to use to talk to the
Uptrack server, based on the config file, the envrionment, and the
global GConf database if available.
Upon return, self.proxy will be set in one of three ways:
- None: No proxy setting was detected. Uptrack will let pycurl attempt
to choose a proxy based on its own defaults.
- '': The user explicitly requested that no proxy be used. Uptrack will
force pycurl not to use a proxy.
- Any other string: The URL of an HTTPS proxy server to use with
the CONNECT method.
In order to allow the user to explicitly specify "no proxy" globally, we
accept the value 'none' (case insensitive) in the Network.https_proxy
setting in uptrack.conf, and translate it to self.proxy = ''. An empty
setting is taken to be unset, and will result in self.proxy being None.
(Note that, confusingly, this means that "Network.https_proxy = none"
corresponds to self.proxy = '', and vice versa.)
"""
self.proxy = None
if self.config.has_option('Network', 'https_proxy'):
proxy = self.config.get('Network', 'https_proxy').strip()
if proxy:
if proxy.lower() == 'none':
self.proxy = ''
else:
self.proxy = proxy
return
for key in ['https_proxy', 'HTTPS_PROXY', 'http_proxy']:
if key in os.environ:
self.proxy = os.environ[key]
return
# default to True to preserve behavior of old config files
enable_gconf = getConfigBooleanOrDie(
self.config, 'Network', 'gconf_proxy_lookup', True)
if not (have_gconf and enable_gconf):
return
try:
client = gconf.client_get_default()
if client.get_bool('/system/http_proxy/use_http_proxy'):
host = client.get_string('/system/http_proxy/host')
port = client.get_int('/system/http_proxy/port')
self.proxy = 'http://' + host + ":" + str(port)
except Exception:
pass
def setSSL(self):
self.ssl_ca_certs = []
if self.config.has_option('Network', 'ssl_ca_cert_file'):
self.ssl_ca_certs.append((pycurl.CAINFO,
self.config.get('Network', 'ssl_ca_cert_file')))
if self.config.has_option('Network', 'ssl_ca_cert_dir'):
self.ssl_ca_certs.append((pycurl.CAPATH,
self.config.get('Network', 'ssl_ca_cert_dir')))
def setOCIRegion(self):
self.oci_region = None
self.oci_domain = None
def setRemoteRoot(self):
self.remoteroot = None
def setUUID(self, uuid):
self.uuid = uuid
def setLocalPaths(self):
self.localroot = UPTRACK_CACHE_DIR
self.local = os.path.join(self.localroot,
self.sysname,
self.arch,
self.release,
self.version)
def setVMInfo(self):
if not hasattr(self, 'vminfo'):
self.vminfo = getVMInfo()
def setIP(self):
"""
Set localip to a dictionary of the form {"IP": "X.X.X.X"}.
If the suppress_ip config option is enabled, set a dummy
address. Otherwise, try to get it from the 'ip' command. Upon failure,
set localip to an error dict of the form {"Error": "error_msg"} instead.
"""
if getConfigBooleanOrDie(self.config, 'Settings', 'suppress_ip', False):
self.localip = {"IP": "0.0.0.0"}
return
try:
uri = self.remoteroot
if self.proxy:
uri = self.proxy
split_url = parse.urlsplit(uri)
# Curl accepts a proxy without leading http(s)://, which
# requires special processing here.
if self.proxy and '://' not in uri:
split_url = parse.urlsplit("http://" + uri)
if split_url.hostname:
remoteip = socket.gethostbyname(split_url.hostname)
p = subprocess.Popen(['ip', 'route', 'get', remoteip],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
routedata = p.communicate()[0].decode('utf-8')
if ' src ' in routedata:
self.localip = {'IP': routedata[routedata.index(' src '):].split()[1]}
else:
self.localip = {'Error': "Could not parse IP address from route data (%s)" % routedata}
else:
self.localip = {'Error': "Could not parse hostname out of remote or proxy (%s)" % uri}
except Exception as e:
self.localip = {'Error': "%s (host = %s, uri = %s)" % (str(e), split_url.hostname, uri)}
class OnlineUptrackConfig(BaseUptrackConfig):
def __init__(self):
self.in_offline_mode = False
super(OnlineUptrackConfig, self).__init__()
def setOCIRegion(self):
# OCI provides a metadata service that can be used to determine the region we
# are running in, however, we don't really want to incur the cost of that timeout
# to everybody who runs uptrack outside of OCI. OCI sets the `chassis_asset_tag`
# to "OracleCloud.com", so we will use that as a first-tier, fast check to
# determine if we should move on to the slower, http request for the actual data.
self.oci_region = None
self.oci_domain = None
# Don't bother looking if the bypass flag is set.
if getConfigBooleanOrDie(self.config, 'Network', 'bypass_oci_region_repo', False):
logging.debug('Bypassing OCI region check')
return
try:
with open('/sys/devices/virtual/dmi/id/chassis_asset_tag', 'rb') as f:
contents = f.read().decode('utf-8').strip()
if 'oraclecloud' not in contents.lower():
logging.debug('Chassis tag is "%s". Not an OCI system.' % contents)
return
except Exception:
logging.debug('Unable to find chassis_asset_tag. Not an OCI system.')
return
try:
logging.debug('We are an OCI system. Determining region.')
# __curl hasn't been initialized yet. :(
stream = BytesIO()
c = pycurl.Curl()
c.setopt(pycurl.USERAGENT, USERAGENT)
c.setopt(pycurl.OPT_FILETIME, 1)
c.setopt(pycurl.FOLLOWLOCATION, 1)
c.setopt(pycurl.MAXREDIRS, 5)
c.setopt(pycurl.ENCODING, '')
c.setopt(pycurl.URL, 'http://169.254.169.254/opc/v2/instance/'.encode('ISO-8859-1'))
c.setopt(pycurl.HTTPGET, 1)
c.setopt(pycurl.WRITEFUNCTION, stream.write)
c.setopt(pycurl.TIMEOUT, 1)
c.setopt(pycurl.HTTPHEADER, ['Authorization: Bearer Oracle'])
c.perform()
rcode = c.getinfo(pycurl.RESPONSE_CODE)
if rcode == 200:
metadata_string = stream.getvalue()
metadata = json.loads(metadata_string)
try:
self.oci_region = metadata['regionInfo']['regionIdentifier']
self.oci_domain = metadata['regionInfo']['realmDomainComponent']
logging.debug('OCI host running in %s (domain %s)', self.oci_region,
self.oci_domain)
except KeyError:
logging.debug('Unable to determine OCI region.')
logging.debug('Metadata contents:\n%s' % metadata_string)
else:
logging.debug('Metadata response: %s', rcode)
except Exception:
logging.debug('', exc_info=1)
def setRemoteRoot(self):
self.remoteroot = UPDATE_REPO_URL
if self.config.has_option("Network", "update_repo_url"):
remote = self.config.get("Network", "update_repo_url").strip()
if remote:
self.remoteroot = remote
# When an OCI region has been set, we always want to use that. This
# implementation will ensure that, even if you have a region-specific URL
# configured (say, updates.ksplice.uk-london-1.oci.oraclecloud.com), you will
# always get the local region's URL. That may be an issue if you've
# configured one region's host, and then you move the instance to another
# region.
if self.oci_region:
url = parse.urlparse(self.remoteroot)
# Why are we checking the hostname here? It's due to UPDATE_REPO_URL
# getting patched in server_is_oracle() for testing (see comment in
# afore-mentioned comment). Instead, we will check that we are actually
# an Oracle domain manually before we replace it. We could eliminate this
# check by having the code that does the patching also set
# `bypass_oci_region_repo` to False in /etc/uptrack/uptrack.conf.
if re.match(r'.*\.(ksplice|oracle|oraclecloud)\.com(:[0-9]+)?$', url[1]):
region_host = 'updates.ksplice.%s.oci.%s' % (self.oci_region, self.oci_domain)
self.remoteroot = parse.urlunparse(('https', region_host) + url[2:])
logging.debug('Region-local remote root set to %s', self.remoteroot)
def setUUID(self, uuid):
super(OnlineUptrackConfig, self).setUUID(uuid)
self.remote = posixpath.join(self.remoteroot,
parse.quote(self.accesskey),
"+uuid", parse.quote(self.uuid))
class OfflineUptrackConfig(BaseUptrackConfig):
def __init__(self):
self.in_offline_mode = True
super(OfflineUptrackConfig, self).__init__()
if self.config.has_option('Settings', 'effective_version'):
self.effective_version = self.config.get('Settings', 'effective_version')
else:
self.effective_version = None
def setRemoteRoot(self):
self.remoteroot = UPDATE_REPO_PATH
def setUUID(self, uuid):
super(OfflineUptrackConfig, self).setUUID(uuid)
self.remote = self.remoteroot
def inVirtualBox():
# PCI ID 0x80ee is VirtualBox virtual devices
# http://pci-ids.ucw.cz/read/PC/80ee
try:
for line in open('/proc/bus/pci/devices', 'r'):
fields = line.split()
if fields[1][0:4] == '80ee':
return True
except (IOError, IndexError):
pass
return False
class TimeoutException(Exception):
pass
def call_timed(command, output_timeout):
devnull = open('/dev/null', 'w')
proc = subprocess.Popen(command,
stdout=subprocess.PIPE, stderr=devnull)
if select.select([proc.stdout], [], [], output_timeout)[0]:
exit_code = proc.wait()
if exit_code == 0:
return proc.stdout.read().decode('utf-8').strip()
raise subprocess.CalledProcessError(exit_code, command)
else:
proc.kill()
raise TimeoutException
def getVMInfo():
"""Find the UUID of this machine and of any VMs it is hosting."""
vminfo = {}
devnull = open('/dev/null', 'w')
# On a Xen paravirt domU, you get the UUID from /sys/hypervisor/uuid.
# On most other systems (dom0, HVM domU, bare hardware, most other
# virtualization systems) you get the UUID from DMI, but accessing DMI
# fails on a Xen paravirt domU. So we check /sys/hypervisor first.
# Reading /sys/hypervisor/uuid hangs if xenstored hasn't started yet.
# See https://bugzilla.redhat.com/show_bug.cgi?id=225203
# So instead we spin off a child process to do the read, such that
# it's okay if it hangs.
try:
vminfo['uuid'] = call_timed(['cat', '/sys/hypervisor/uuid'], 1)
except (OSError, subprocess.CalledProcessError) as e:
vminfo['xen_error'] = str(e)
except TimeoutException:
vminfo['xen_error'] = 'Read of /sys/hypervisor/uuid timed out; is xenstored running?'
if vminfo.get('uuid') == '00000000-0000-0000-0000-000000000000':
vminfo['type'] = 'Xen dom0'
del vminfo['uuid']
if xenstored_is_running():
try:
vminfo['children'] = call_timed(['xenstore-list', '/vm'], 1).split('\n')
try:
vminfo['children'].remove('00000000-0000-0000-0000-000000000000')
except ValueError:
pass
except (OSError, subprocess.CalledProcessError) as e:
vminfo['xen_error'] = str(e)
except TimeoutException:
vminfo['xen_error'] = 'xenstored-list /vm timed out; is xenstored running?'
else:
vminfo['xen_error'] = "xenstored isn't running"
elif 'uuid' in vminfo:
vminfo['type'] = 'Xen paravirt domU'
# Checks for other virtualization systems would go here
if 'uuid' not in vminfo:
try:
# Bare metal, or Xen HVM domU, or VMware, or KVM
with open('/sys/class/dmi/id/product_uuid', 'r') as uuid:
vminfo['uuid'] = uuid.read().rstrip()
with open('/sys/class/dmi/id/product_name', 'r') as name:
vminfo.setdefault('type', name.read().rstrip())
except (IOError, OSError, subprocess.CalledProcessError) as e:
vminfo['dmidecode_error'] = str(e)
try:
vminfo['num_containers'] = len(open("/proc/vz/veinfo").readlines())
except IOError:
vminfo['num_containers'] = 0
return vminfo
def xenstored_is_running():
timeout = 15
while timeout > 0:
if proc_is_running("xenstored"):
return True
time.sleep(1)
timeout -= 1
return False
def proc_is_running(name):
for proc in process_list():
if name == os.path.basename(proc):
return True
return False
def process_list():
processes = set()
pids = [pid for pid in os.listdir('/proc') if pid.isdigit()]
for pid in pids:
try:
cmdline = open(os.path.join('/proc', pid, 'cmdline'), 'rb').read()
name = cmdline.split(b'\x00')
if name:
name = name[0].split()
if name:
processes.add(name[0])
except IOError:
continue
return processes
def getMmapMinAddr():
"""Return the value of `mmap_min_addr` on this machine."""
try:
mmap_min_addr = read_file('/proc/sys/vm/mmap_min_addr').strip()
except IOError:
mmap_min_addr = None
return mmap_min_addr