docstring
stringlengths 52
499
| function
stringlengths 67
35.2k
| __index_level_0__
int64 52.6k
1.16M
|
---|---|---|
Keep listening for events forever.
Args:
timeout_ms (int): How long to poll the Home Server for before
retrying.
exception_handler (func(exception)): Optional exception handler
function which can be used to handle exceptions in the caller
thread.
bad_sync_timeout (int): Base time to wait after an error before
retrying. Will be increased according to exponential backoff.
|
def listen_forever(self, timeout_ms=30000, exception_handler=None,
bad_sync_timeout=5):
_bad_sync_timeout = bad_sync_timeout
self.should_listen = True
while (self.should_listen):
try:
self._sync(timeout_ms)
_bad_sync_timeout = bad_sync_timeout
# TODO: we should also handle MatrixHttpLibError for retry in case no response
except MatrixRequestError as e:
logger.warning("A MatrixRequestError occured during sync.")
if e.code >= 500:
logger.warning("Problem occured serverside. Waiting %i seconds",
bad_sync_timeout)
sleep(bad_sync_timeout)
_bad_sync_timeout = min(_bad_sync_timeout * 2,
self.bad_sync_timeout_limit)
elif exception_handler is not None:
exception_handler(e)
else:
raise
except Exception as e:
logger.exception("Exception thrown during sync")
if exception_handler is not None:
exception_handler(e)
else:
raise
| 239,845 |
Start a listener thread to listen for events in the background.
Args:
timeout (int): How long to poll the Home Server for before
retrying.
exception_handler (func(exception)): Optional exception handler
function which can be used to handle exceptions in the caller
thread.
|
def start_listener_thread(self, timeout_ms=30000, exception_handler=None):
try:
thread = Thread(target=self.listen_forever,
args=(timeout_ms, exception_handler))
thread.daemon = True
self.sync_thread = thread
self.should_listen = True
thread.start()
except RuntimeError:
e = sys.exc_info()[0]
logger.error("Error: unable to start thread. %s", str(e))
| 239,846 |
Upload content to the home server and recieve a MXC url.
Args:
content (bytes): The data of the content.
content_type (str): The mimetype of the content.
filename (str): Optional. Filename of the content.
Raises:
MatrixUnexpectedResponse: If the homeserver gave a strange response
MatrixRequestError: If the upload failed for some reason.
|
def upload(self, content, content_type, filename=None):
try:
response = self.api.media_upload(content, content_type, filename)
if "content_uri" in response:
return response["content_uri"]
else:
raise MatrixUnexpectedResponse(
"The upload was successful, but content_uri wasn't found."
)
except MatrixRequestError as e:
raise MatrixRequestError(
code=e.code,
content="Upload failed: %s" % e
)
| 239,848 |
Remove mapping of an alias
Args:
room_alias(str): The alias to be removed.
Returns:
bool: True if the alias is removed, False otherwise.
|
def remove_room_alias(self, room_alias):
try:
self.api.remove_room_alias(room_alias)
return True
except MatrixRequestError:
return False
| 239,851 |
Update data on one-time keys count and upload new ones if necessary.
Args:
counts (dict): Counts of keys currently on the HS for each key type.
|
def update_one_time_key_counts(self, counts):
self.one_time_keys_manager.server_counts = counts
if self.one_time_keys_manager.should_upload():
logger.info('Uploading new one-time keys.')
self.upload_one_time_keys()
| 239,859 |
Signs a JSON object.
NOTE: The object is modified in-place and the return value can be ignored.
As specified, this is done by encoding the JSON object without ``signatures`` or
keys grouped as ``unsigned``, using canonical encoding.
Args:
json (dict): The JSON object to sign.
Returns:
The same JSON object, with a ``signatures`` key added. It is formatted as
``"signatures": ed25519:<device_id>: <base64_signature>``.
|
def sign_json(self, json):
signatures = json.pop('signatures', {})
unsigned = json.pop('unsigned', None)
signature_base64 = self.olm_account.sign(encode_canonical_json(json))
key_id = 'ed25519:{}'.format(self.device_id)
signatures.setdefault(self.user_id, {})[key_id] = signature_base64
json['signatures'] = signatures
if unsigned:
json['unsigned'] = unsigned
return json
| 239,860 |
Get this user's display name.
Args:
room (Room): Optional. When specified, return the display name of the user
in this room.
Returns:
The display name. Defaults to the user ID if not set.
|
def get_display_name(self, room=None):
if room:
try:
return room.members_displaynames[self.user_id]
except KeyError:
return self.user_id
if not self.displayname:
self.displayname = self.api.get_display_name(self.user_id)
return self.displayname or self.user_id
| 239,863 |
Set this users display name.
Args:
display_name (str): Display Name
|
def set_display_name(self, display_name):
self.displayname = display_name
return self.api.set_display_name(self.user_id, display_name)
| 239,864 |
Add a row to the table
Arguments:
row - row of data, should be a list with as many elements as the table
has fields
|
def add_row(self, row):
if self._field_names and len(row) != len(self._field_names):
raise Exception("Row has incorrect number of values, (actual) %d!=%d (expected)" %(len(row),len(self._field_names)))
if not self._field_names:
self.field_names = [("Field %d" % (n+1)) for n in range(0,len(row))]
self._rows.append(list(row))
| 240,102 |
Delete a row to the table
Arguments:
row_index - The index of the row you want to delete. Indexing starts at 0.
|
def del_row(self, row_index):
if row_index > len(self._rows)-1:
raise Exception("Cant delete row at index %d, table only has %d rows!" % (row_index, len(self._rows)))
del self._rows[row_index]
| 240,103 |
Add a column to the table.
Arguments:
fieldname - name of the field to contain the new column of data
column - column of data, should be a list with as many elements as the
table has rows
align - desired alignment for this column - "l" for left, "c" for centre and "r" for right
valign - desired vertical alignment for new columns - "t" for top, "m" for middle and "b" for bottom
|
def add_column(self, fieldname, column, align="c", valign="t"):
if len(self._rows) in (0, len(column)):
self._validate_align(align)
self._validate_valign(valign)
self._field_names.append(fieldname)
self._align[fieldname] = align
self._valign[fieldname] = valign
for i in range(0, len(column)):
if len(self._rows) < i+1:
self._rows.append([])
self._rows[i].append(column[i])
else:
raise Exception("Column length %d does not match number of rows %d!" % (len(column), len(self._rows)))
| 240,104 |
validate checks a kubernetes resource definition
Args:
definition (dict): resource definition
version (str): version of kubernetes to validate against
strict (bool): whether unexpected additional properties should be considered errors
Returns:
warnings (list), errors (list): warnings are missing validations, errors are validation failures
|
def validate(self, definition, version=None, strict=False):
if not HAS_KUBERNETES_VALIDATE:
raise KubernetesValidateMissing()
errors = list()
warnings = list()
try:
if version is None:
try:
version = self.version['kubernetes']['gitVersion']
except KeyError:
version = kubernetes_validate.latest_version()
kubernetes_validate.validate(definition, version, strict)
except kubernetes_validate.utils.ValidationError as e:
errors.append("resource definition validation error at %s: %s" % ('.'.join([str(item) for item in e.path]), e.message)) # noqa: B306
except VersionNotSupportedError as e:
errors.append("Kubernetes version %s is not supported by kubernetes-validate" % version)
except kubernetes_validate.utils.SchemaNotFoundError as e:
warnings.append("Could not find schema for object kind %s with API version %s in Kubernetes version %s (possibly Custom Resource?)" %
(e.kind, e.api_version, e.version))
return warnings, errors
| 240,130 |
A safety net.
Decorator for functions that are only allowed to return True or raise
an exception.
Args:
f: A function whose only expected return value is True.
Returns:
A wrapped functions whose guaranteed only return value is True.
|
def returns_true_or_raises(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
ret = f(*args, **kwargs)
if ret is not True:
raise RuntimeError("Unexpected return value %r" % ret)
return True
return wrapped
| 241,921 |
Performs a request, and checks that the status is OK, and that the
content-type matches expectations.
Args:
url: URL to request
method: either 'get' or 'post'
expected_content_type: prefix to match response content-type against
**kwargs: passed to the request method directly.
Raises:
RuntimeError if status_code does not match.
|
def request_and_check(self, url, method='get',
expected_content_type=None, **kwargs):
assert method in ['get', 'post']
result = self.driver.request(method, url, **kwargs)
if result.status_code != requests.codes.ok:
raise RuntimeError('Error requesting %r, status = %d' %
(url, result.status_code))
if expected_content_type is not None:
content_type = result.headers.get('content-type', '')
if not re.match(expected_content_type, content_type):
raise RuntimeError(
'Error requesting %r, content type %r does not match %r' %
(url, content_type, expected_content_type))
return result
| 242,310 |
Enable the list of network interfaces.
Args:
interfaces: list of string, the output device names to enable.
logger: logger object, used to write to SysLog and serial port.
dhclient_script: string, the path to a dhclient script used by dhclient.
|
def EnableNetworkInterfaces(
self, interfaces, logger, dhclient_script=None):
interfaces_to_up = [i for i in interfaces if i != 'eth0']
if interfaces_to_up:
logger.info('Enabling the Ethernet interfaces %s.', interfaces_to_up)
self._Dhcpcd(interfaces_to_up, logger)
| 242,924 |
Use dhcpcd to activate the interfaces.
Args:
interfaces: list of string, the output device names to enable.
logger: logger object, used to write to SysLog and serial port.
|
def _Dhcpcd(self, interfaces, logger):
for interface in interfaces:
dhcpcd = ['/sbin/dhcpcd']
try:
subprocess.check_call(dhcpcd + ['-x', interface])
except subprocess.CalledProcessError:
# Dhcpcd not yet running for this device.
logger.info('Dhcpcd not yet running for interface %s.', interface)
try:
subprocess.check_call(dhcpcd + [interface])
except subprocess.CalledProcessError:
# The interface is already active.
logger.warning('Could not activate interface %s.', interface)
| 242,925 |
Constructor.
Args:
logger: logger object, used to write to SysLog and serial port.
|
def __init__(self, logger=logging):
self.logger = logger
self.interfaces = self._CreateInterfaceMap()
| 242,926 |
Context manager for creating a temporary directory.
Args:
prefix: string, the prefix for the temporary directory.
run_dir: string, the base directory location of the temporary directory.
Yields:
string, the temporary directory created.
|
def _CreateTempDir(prefix, run_dir=None):
temp_dir = tempfile.mkdtemp(prefix=prefix + '-', dir=run_dir)
try:
yield temp_dir
finally:
shutil.rmtree(temp_dir)
| 242,929 |
Constructor.
Args:
script_type: string, the metadata script type to run.
default_shell: string, the default shell to execute the script.
run_dir: string, the base directory location of the temporary directory.
debug: bool, True if debug output should write to the console.
|
def __init__(
self, script_type, default_shell=None, run_dir=None, debug=False):
self.script_type = script_type
self.default_shell = default_shell
name = '%s-script' % self.script_type
facility = logging.handlers.SysLogHandler.LOG_DAEMON
self.logger = logger.Logger(name=name, debug=debug, facility=facility)
self.retriever = script_retriever.ScriptRetriever(self.logger, script_type)
self.executor = script_executor.ScriptExecutor(
self.logger, script_type, default_shell=default_shell)
self._RunScripts(run_dir=run_dir)
| 242,931 |
Retrieve metadata scripts and execute them.
Args:
run_dir: string, the base directory location of the temporary directory.
|
def _RunScripts(self, run_dir=None):
with _CreateTempDir(self.script_type, run_dir=run_dir) as dest_dir:
try:
self.logger.info('Starting %s scripts.', self.script_type)
script_dict = self.retriever.GetScripts(dest_dir)
self.executor.RunScripts(script_dict)
finally:
self.logger.info('Finished running %s scripts.', self.script_type)
| 242,932 |
Constructor.
Args:
debug: bool, True if debug output should write to the console.
|
def __init__(self, debug=False):
self.debug = debug
facility = logging.handlers.SysLogHandler.LOG_DAEMON
self.logger = logger.Logger(
name='instance-setup', debug=self.debug, facility=facility)
self.watcher = metadata_watcher.MetadataWatcher(logger=self.logger)
self.metadata_dict = None
self.instance_config = instance_config.InstanceConfig(logger=self.logger)
if self.instance_config.GetOptionBool('InstanceSetup', 'network_enabled'):
self.metadata_dict = self.watcher.GetMetadata()
instance_config_metadata = self._GetInstanceConfig()
self.instance_config = instance_config.InstanceConfig(
logger=self.logger, instance_config_metadata=instance_config_metadata)
if self.instance_config.GetOptionBool('InstanceSetup', 'set_host_keys'):
host_key_types = self.instance_config.GetOptionString(
'InstanceSetup', 'host_key_types')
self._SetSshHostKeys(host_key_types=host_key_types)
if self.instance_config.GetOptionBool('InstanceSetup', 'set_boto_config'):
self._SetupBotoConfig()
if self.instance_config.GetOptionBool(
'InstanceSetup', 'optimize_local_ssd'):
self._RunScript('google_optimize_local_ssd')
if self.instance_config.GetOptionBool('InstanceSetup', 'set_multiqueue'):
self._RunScript('google_set_multiqueue')
try:
self.instance_config.WriteConfig()
except (IOError, OSError) as e:
self.logger.warning(str(e))
| 242,934 |
Run a script and log the streamed script output.
Args:
script: string, the file location of an executable script.
|
def _RunScript(self, script):
process = subprocess.Popen(
script, shell=True, stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
while True:
for line in iter(process.stdout.readline, b''):
self.logger.info(line.decode('utf-8').rstrip('\n'))
if process.poll() is not None:
break
| 242,936 |
Generate a new SSH key.
Args:
key_type: string, the type of the SSH key.
key_dest: string, a file location to store the SSH key.
|
def _GenerateSshKey(self, key_type, key_dest):
# Create a temporary file to save the created RSA keys.
with tempfile.NamedTemporaryFile(prefix=key_type, delete=True) as temp:
temp_key = temp.name
command = ['ssh-keygen', '-t', key_type, '-f', temp_key, '-N', '', '-q']
try:
self.logger.info('Generating SSH key %s.', key_dest)
subprocess.check_call(command)
except subprocess.CalledProcessError:
self.logger.warning('Could not create SSH key %s.', key_dest)
return
shutil.move(temp_key, key_dest)
shutil.move('%s.pub' % temp_key, '%s.pub' % key_dest)
file_utils.SetPermissions(key_dest, mode=0o600)
file_utils.SetPermissions('%s.pub' % key_dest, mode=0o644)
| 242,937 |
Regenerates SSH host keys when the VM is restarted with a new IP address.
Booting a VM from an image with a known SSH key allows a number of attacks.
This function will regenerating the host key whenever the IP address
changes. This applies the first time the instance is booted, and each time
the disk is used to boot a new instance.
Args:
host_key_types: string, a comma separated list of host key types.
|
def _SetSshHostKeys(self, host_key_types=None):
section = 'Instance'
instance_id = self._GetInstanceId()
if instance_id != self.instance_config.GetOptionString(
section, 'instance_id'):
self.logger.info('Generating SSH host keys for instance %s.', instance_id)
file_regex = re.compile(r'ssh_host_(?P<type>[a-z0-9]*)_key\Z')
key_dir = '/etc/ssh'
key_files = [f for f in os.listdir(key_dir) if file_regex.match(f)]
key_types = host_key_types.split(',') if host_key_types else []
key_types_files = ['ssh_host_%s_key' % key_type for key_type in key_types]
for key_file in set(key_files) | set(key_types_files):
key_type = file_regex.match(key_file).group('type')
key_dest = os.path.join(key_dir, key_file)
self._GenerateSshKey(key_type, key_dest)
self._StartSshd()
self.instance_config.SetOption(section, 'instance_id', str(instance_id))
| 242,939 |
Constructor.
Args:
logger: logger object, used to write to SysLog and serial port.
script_type: string, the metadata script type to run.
|
def __init__(self, logger, script_type):
self.logger = logger
self.script_type = script_type
self.watcher = metadata_watcher.MetadataWatcher(logger=self.logger)
| 242,941 |
Download a Google Storage URL using an authentication token.
If the token cannot be fetched, fallback to unauthenticated download.
Args:
url: string, the URL to download.
dest_dir: string, the path to a directory for storing metadata scripts.
Returns:
string, the path to the file storing the metadata script.
|
def _DownloadAuthUrl(self, url, dest_dir):
dest_file = tempfile.NamedTemporaryFile(dir=dest_dir, delete=False)
dest_file.close()
dest = dest_file.name
self.logger.info(
'Downloading url from %s to %s using authentication token.', url, dest)
if not self.token:
response = self.watcher.GetMetadata(
self.token_metadata_key, recursive=False, retry=False)
if not response:
self.logger.info(
'Authentication token not found. Attempting unauthenticated '
'download.')
return self._DownloadUrl(url, dest_dir)
self.token = '%s %s' % (
response.get('token_type', ''), response.get('access_token', ''))
try:
request = urlrequest.Request(url)
request.add_unredirected_header('Metadata-Flavor', 'Google')
request.add_unredirected_header('Authorization', self.token)
content = urlrequest.urlopen(request).read().decode('utf-8')
except (httpclient.HTTPException, socket.error, urlerror.URLError) as e:
self.logger.warning('Could not download %s. %s.', url, str(e))
return None
with open(dest, 'wb') as f:
f.write(content)
return dest
| 242,942 |
Download a script from a given URL.
Args:
url: string, the URL to download.
dest_dir: string, the path to a directory for storing metadata scripts.
Returns:
string, the path to the file storing the metadata script.
|
def _DownloadUrl(self, url, dest_dir):
dest_file = tempfile.NamedTemporaryFile(dir=dest_dir, delete=False)
dest_file.close()
dest = dest_file.name
self.logger.info('Downloading url from %s to %s.', url, dest)
try:
urlretrieve.urlretrieve(url, dest)
return dest
except (httpclient.HTTPException, socket.error, urlerror.URLError) as e:
self.logger.warning('Could not download %s. %s.', url, str(e))
except Exception as e:
self.logger.warning('Exception downloading %s. %s.', url, str(e))
return None
| 242,943 |
Download the contents of the URL to the destination.
Args:
url: string, the URL to download.
dest_dir: string, the path to a directory for storing metadata scripts.
Returns:
string, the path to the file storing the metadata script.
|
def _DownloadScript(self, url, dest_dir):
# Check for the preferred Google Storage URL format:
# gs://<bucket>/<object>
if url.startswith(r'gs://'):
# Convert the string into a standard URL.
url = re.sub('^gs://', 'https://storage.googleapis.com/', url)
return self._DownloadAuthUrl(url, dest_dir)
header = r'http[s]?://'
domain = r'storage\.googleapis\.com'
# Many of the Google Storage URLs are supported below.
# It is prefered that customers specify their object using
# its gs://<bucket>/<object> url.
bucket = r'(?P<bucket>[a-z0-9][-_.a-z0-9]*[a-z0-9])'
# Accept any non-empty string that doesn't contain a wildcard character
obj = r'(?P<obj>[^\*\?]+)'
# Check for the Google Storage URLs:
# http://<bucket>.storage.googleapis.com/<object>
# https://<bucket>.storage.googleapis.com/<object>
gs_regex = re.compile(r'\A%s%s\.%s/%s\Z' % (header, bucket, domain, obj))
match = gs_regex.match(url)
if match:
return self._DownloadAuthUrl(url, dest_dir)
# Check for the other possible Google Storage URLs:
# http://storage.googleapis.com/<bucket>/<object>
# https://storage.googleapis.com/<bucket>/<object>
#
# The following are deprecated but checked:
# http://commondatastorage.googleapis.com/<bucket>/<object>
# https://commondatastorage.googleapis.com/<bucket>/<object>
gs_regex = re.compile(
r'\A%s(commondata)?%s/%s/%s\Z' % (header, domain, bucket, obj))
match = gs_regex.match(url)
if match:
return self._DownloadAuthUrl(url, dest_dir)
# Unauthenticated download of the object.
return self._DownloadUrl(url, dest_dir)
| 242,944 |
Retrieve the scripts from attribute metadata.
Args:
attribute_data: dict, the contents of the attributes metadata.
dest_dir: string, the path to a directory for storing metadata scripts.
Returns:
dict, a dictionary mapping metadata keys to files storing scripts.
|
def _GetAttributeScripts(self, attribute_data, dest_dir):
script_dict = {}
attribute_data = attribute_data or {}
metadata_key = '%s-script' % self.script_type
metadata_value = attribute_data.get(metadata_key)
if metadata_value:
self.logger.info('Found %s in metadata.', metadata_key)
with tempfile.NamedTemporaryFile(
mode='w', dir=dest_dir, delete=False) as dest:
dest.write(metadata_value.lstrip())
script_dict[metadata_key] = dest.name
metadata_key = '%s-script-url' % self.script_type
metadata_value = attribute_data.get(metadata_key)
if metadata_value:
self.logger.info('Found %s in metadata.', metadata_key)
script_dict[metadata_key] = self._DownloadScript(
metadata_value, dest_dir)
return script_dict
| 242,945 |
Retrieve the scripts to execute.
Args:
dest_dir: string, the path to a directory for storing metadata scripts.
Returns:
dict, a dictionary mapping set metadata keys with associated scripts.
|
def GetScripts(self, dest_dir):
metadata_dict = self.watcher.GetMetadata() or {}
try:
instance_data = metadata_dict['instance']['attributes']
except KeyError:
instance_data = None
self.logger.warning('Instance attributes were not found.')
try:
project_data = metadata_dict['project']['attributes']
except KeyError:
project_data = None
self.logger.warning('Project attributes were not found.')
return (self._GetAttributeScripts(instance_data, dest_dir)
or self._GetAttributeScripts(project_data, dest_dir))
| 242,946 |
Constructor.
Args:
logger: logger object, used to write to SysLog and serial port.
script_type: string, the type of the script we are running.
default_shell: string, the default shell to execute the script.
|
def __init__(self, logger, script_type, default_shell=None):
self.logger = logger
self.script_type = script_type
self.default_shell = default_shell or '/bin/bash'
| 242,947 |
Add executable permissions to a file.
Args:
metadata_script: string, the path to the executable file.
|
def _MakeExecutable(self, metadata_script):
mode = os.stat(metadata_script).st_mode
os.chmod(metadata_script, mode | stat.S_IEXEC)
| 242,948 |
Run a script and log the streamed script output.
Args:
metadata_key: string, the key specifing the metadata script.
metadata_script: string, the file location of an executable script.
|
def _RunScript(self, metadata_key, metadata_script):
process = subprocess.Popen(
metadata_script, shell=True,
executable=self.default_shell,
stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
while True:
for line in iter(process.stdout.readline, b''):
message = line.decode('utf-8', 'replace').rstrip('\n')
if message:
self.logger.info('%s: %s', metadata_key, message)
if process.poll() is not None:
break
self.logger.info('%s: Return code %s.', metadata_key, process.returncode)
| 242,949 |
Run the metadata scripts; execute a URL script first if one is provided.
Args:
script_dict: a dictionary mapping metadata keys to script files.
|
def RunScripts(self, script_dict):
metadata_types = ['%s-script-url', '%s-script']
metadata_keys = [key % self.script_type for key in metadata_types]
metadata_keys = [key for key in metadata_keys if script_dict.get(key)]
if not metadata_keys:
self.logger.info('No %s scripts found in metadata.', self.script_type)
for metadata_key in metadata_keys:
metadata_script = script_dict.get(metadata_key)
self._MakeExecutable(metadata_script)
self._RunScript(metadata_key, metadata_script)
| 242,950 |
Constructor.
Args:
config_file: string, the location of the config file.
config_header: string, the message to write at the top of the config.
|
def __init__(self, config_file=None, config_header=None):
self.config_file = config_file or CONFIG
self.config_header = config_header
self.config = parser.Parser()
self.config.read(self.config_file)
| 242,951 |
Create a file header in the config.
Args:
fp: int, a file pointer for writing the header.
|
def _AddHeader(self, fp):
text = textwrap.wrap(
textwrap.dedent(self.config_header), break_on_hyphens=False)
fp.write('\n'.join(['# ' + line for line in text]))
fp.write('\n\n')
| 242,952 |
Get the value of an option in the config file.
Args:
section: string, the section of the config file to check.
option: string, the option to retrieve the value of.
Returns:
string, the value of the option or None if the option doesn't exist.
|
def GetOptionString(self, section, option):
if self.config.has_option(section, option):
return self.config.get(section, option)
else:
return None
| 242,953 |
Get the value of an option in the config file.
Args:
section: string, the section of the config file to check.
option: string, the option to retrieve the value of.
Returns:
bool, True if the option is enabled or not set.
|
def GetOptionBool(self, section, option):
return (not self.config.has_option(section, option)
or self.config.getboolean(section, option))
| 242,954 |
Set the value of an option in the config file.
Args:
section: string, the section of the config file to check.
option: string, the option to set the value of.
value: string, the value to set the option.
overwrite: bool, True to overwrite an existing value in the config file.
|
def SetOption(self, section, option, value, overwrite=True):
if not overwrite and self.config.has_option(section, option):
return
if not self.config.has_section(section):
self.config.add_section(section)
self.config.set(section, option, str(value))
| 242,955 |
Write the config values to a given file.
Args:
config_file: string, the file location of the config file to write.
|
def WriteConfig(self, config_file=None):
config_file = config_file or self.config_file
config_name = os.path.splitext(os.path.basename(config_file))[0]
config_lock = (
'%s/lock/google_%s.lock' % (constants.LOCALSTATEDIR, config_name))
with file_utils.LockFile(config_lock):
with open(config_file, 'w') as config_fp:
if self.config_header:
self._AddHeader(config_fp)
self.config.write(config_fp)
| 242,956 |
Get a logging object with handlers for sending logs to SysLog.
Args:
name: string, the name of the logger which will be added to log entries.
debug: bool, True if debug output should write to the console.
facility: int, an encoding of the SysLog handler's facility and priority.
Returns:
logging object, an object for logging entries.
|
def Logger(name, debug=False, facility=None):
logger = logging.getLogger(name)
logger.handlers = []
logger.addHandler(logging.NullHandler())
logger.propagate = False
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(name + ': %(levelname)s %(message)s')
if debug:
# Create a handler for console logging.
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
if facility:
# Create a handler for sending logs to SysLog.
syslog_handler = logging.handlers.SysLogHandler(
address=constants.SYSLOG_SOCKET, facility=facility)
syslog_handler.setLevel(logging.INFO)
syslog_handler.setFormatter(formatter)
logger.addHandler(syslog_handler)
return logger
| 242,957 |
Configure a Linux user account.
Args:
user: string, the name of the Linux user account to create.
Returns:
bool, True if user creation succeeded.
|
def _AddUser(self, user):
self.logger.info('Creating a new user account for %s.', user)
command = self.useradd_cmd.format(user=user)
try:
subprocess.check_call(command.split(' '))
except subprocess.CalledProcessError as e:
self.logger.warning('Could not create user %s. %s.', user, str(e))
return False
else:
self.logger.info('Created user account %s.', user)
return True
| 242,960 |
Update group membership for a Linux user.
Args:
user: string, the name of the Linux user account.
groups: list, the group names to add the user as a member.
Returns:
bool, True if user update succeeded.
|
def _UpdateUserGroups(self, user, groups):
groups = ','.join(groups)
self.logger.debug('Updating user %s with groups %s.', user, groups)
command = self.usermod_cmd.format(user=user, groups=groups)
try:
subprocess.check_call(command.split(' '))
except subprocess.CalledProcessError as e:
self.logger.warning('Could not update user %s. %s.', user, str(e))
return False
else:
self.logger.debug('Updated user account %s.', user)
return True
| 242,961 |
Update the authorized keys file for a Linux user with a list of SSH keys.
Args:
user: string, the name of the Linux user account.
ssh_keys: list, the SSH key strings associated with the user.
Raises:
IOError, raised when there is an exception updating a file.
OSError, raised when setting permissions or writing to a read-only
file system.
|
def _UpdateAuthorizedKeys(self, user, ssh_keys):
pw_entry = self._GetUser(user)
if not pw_entry:
return
uid = pw_entry.pw_uid
gid = pw_entry.pw_gid
home_dir = pw_entry.pw_dir
ssh_dir = os.path.join(home_dir, '.ssh')
# Not all sshd's support multiple authorized_keys files so we have to
# share one with the user. We add each of our entries as follows:
# # Added by Google
# authorized_key_entry
authorized_keys_file = os.path.join(ssh_dir, 'authorized_keys')
# Do not write to the authorized keys file if it is a symlink.
if os.path.islink(ssh_dir) or os.path.islink(authorized_keys_file):
self.logger.warning(
'Not updating authorized keys for user %s. File is a symlink.', user)
return
# Create home directory if it does not exist. This can happen if _GetUser
# (getpwnam) returns non-local user info (e.g., from LDAP).
if not os.path.exists(home_dir):
file_utils.SetPermissions(home_dir, mode=0o755, uid=uid, gid=gid,
mkdir=True)
# Create ssh directory if it does not exist.
file_utils.SetPermissions(ssh_dir, mode=0o700, uid=uid, gid=gid, mkdir=True)
# Create entry in the authorized keys file.
prefix = self.logger.name + '-'
with tempfile.NamedTemporaryFile(
mode='w', prefix=prefix, delete=True) as updated_keys:
updated_keys_file = updated_keys.name
if os.path.exists(authorized_keys_file):
lines = open(authorized_keys_file).readlines()
else:
lines = []
google_lines = set()
for i, line in enumerate(lines):
if line.startswith(self.google_comment):
google_lines.update([i, i+1])
# Write user's authorized key entries.
for i, line in enumerate(lines):
if i not in google_lines and line:
line += '\n' if not line.endswith('\n') else ''
updated_keys.write(line)
# Write the Google authorized key entries at the end of the file.
# Each entry is preceded by '# Added by Google'.
for ssh_key in ssh_keys:
ssh_key += '\n' if not ssh_key.endswith('\n') else ''
updated_keys.write('%s\n' % self.google_comment)
updated_keys.write(ssh_key)
# Write buffered data to the updated keys file without closing it and
# update the Linux user's authorized keys file.
updated_keys.flush()
shutil.copy(updated_keys_file, authorized_keys_file)
file_utils.SetPermissions(
authorized_keys_file, mode=0o600, uid=uid, gid=gid)
| 242,962 |
Update sudoer group membership for a Linux user account.
Args:
user: string, the name of the Linux user account.
sudoer: bool, True if the user should be a sudoer.
Returns:
bool, True if user update succeeded.
|
def _UpdateSudoer(self, user, sudoer=False):
if sudoer:
self.logger.info('Adding user %s to the Google sudoers group.', user)
command = self.gpasswd_add_cmd.format(
user=user, group=self.google_sudoers_group)
else:
self.logger.info('Removing user %s from the Google sudoers group.', user)
command = self.gpasswd_remove_cmd.format(
user=user, group=self.google_sudoers_group)
try:
subprocess.check_call(command.split(' '))
except subprocess.CalledProcessError as e:
self.logger.warning('Could not update user %s. %s.', user, str(e))
return False
else:
self.logger.debug('Removed user %s from the Google sudoers group.', user)
return True
| 242,963 |
Remove a Linux user account's authorized keys file to prevent login.
Args:
user: string, the Linux user account to remove access.
|
def _RemoveAuthorizedKeys(self, user):
pw_entry = self._GetUser(user)
if not pw_entry:
return
home_dir = pw_entry.pw_dir
authorized_keys_file = os.path.join(home_dir, '.ssh', 'authorized_keys')
if os.path.exists(authorized_keys_file):
try:
os.remove(authorized_keys_file)
except OSError as e:
message = 'Could not remove authorized keys for user %s. %s.'
self.logger.warning(message, user, str(e))
| 242,964 |
Set the list of configured Google user accounts.
Args:
users: list, the username strings of the Linux accounts.
|
def SetConfiguredUsers(self, users):
prefix = self.logger.name + '-'
with tempfile.NamedTemporaryFile(
mode='w', prefix=prefix, delete=True) as updated_users:
updated_users_file = updated_users.name
for user in users:
updated_users.write(user + '\n')
updated_users.flush()
if not os.path.exists(self.google_users_dir):
os.makedirs(self.google_users_dir)
shutil.copy(updated_users_file, self.google_users_file)
file_utils.SetPermissions(self.google_users_file, mode=0o600, uid=0, gid=0)
| 242,966 |
Update a Linux user with authorized SSH keys.
Args:
user: string, the name of the Linux user account.
ssh_keys: list, the SSH key strings associated with the user.
Returns:
bool, True if the user account updated successfully.
|
def UpdateUser(self, user, ssh_keys):
if not bool(USER_REGEX.match(user)):
self.logger.warning('Invalid user account name %s.', user)
return False
if not self._GetUser(user):
# User does not exist. Attempt to create the user and add them to the
# appropriate user groups.
if not (self._AddUser(user)
and self._UpdateUserGroups(user, self.groups)):
return False
# Add the user to the google sudoers group.
if not self._UpdateSudoer(user, sudoer=True):
return False
# Don't try to manage account SSH keys with a shell set to disable
# logins. This helps avoid problems caused by operator and root sharing
# a home directory in CentOS and RHEL.
pw_entry = self._GetUser(user)
if pw_entry and os.path.basename(pw_entry.pw_shell) == 'nologin':
message = 'Not updating user %s. User set `nologin` as login shell.'
self.logger.debug(message, user)
return True
try:
self._UpdateAuthorizedKeys(user, ssh_keys)
except (IOError, OSError) as e:
message = 'Could not update the authorized keys file for user %s. %s.'
self.logger.warning(message, user, str(e))
return False
else:
return True
| 242,967 |
Remove a Linux user account.
Args:
user: string, the Linux user account to remove.
|
def RemoveUser(self, user):
self.logger.info('Removing user %s.', user)
if self.remove:
command = self.userdel_cmd.format(user=user)
try:
subprocess.check_call(command.split(' '))
except subprocess.CalledProcessError as e:
self.logger.warning('Could not remove user %s. %s.', user, str(e))
else:
self.logger.info('Removed user account %s.', user)
self._RemoveAuthorizedKeys(user)
self._UpdateSudoer(user, sudoer=False)
| 242,968 |
Constructor.
Args:
logger: logger object, used to write to SysLog and serial port.
|
def __init__(self, logger):
self.logger = logger
self.oslogin_installed = True
self.update_time = 0
| 242,969 |
Run the OS Login control script.
Args:
params: list, the params to pass to the script
Returns:
int, the return code from the call, or None if the script is not found.
|
def _RunOsLoginControl(self, params):
try:
return subprocess.call([constants.OSLOGIN_CONTROL_SCRIPT] + params)
except OSError as e:
if e.errno == errno.ENOENT:
return None
else:
raise
| 242,970 |
Check whether OS Login is installed.
Args:
two_factor: bool, True if two factor should be enabled.
Returns:
bool, True if OS Login is installed.
|
def _GetStatus(self, two_factor=False):
params = ['status']
if two_factor:
params += ['--twofactor']
retcode = self._RunOsLoginControl(params)
if retcode is None:
if self.oslogin_installed:
self.logger.warning('OS Login not installed.')
self.oslogin_installed = False
return None
# Prevent log spam when OS Login is not installed.
self.oslogin_installed = True
if not os.path.exists(constants.OSLOGIN_NSS_CACHE):
return False
return not retcode
| 242,971 |
Update whether OS Login is enabled and update NSS cache if necessary.
Args:
oslogin_desired: bool, enable OS Login if True, disable if False.
two_factor_desired: bool, enable two factor if True, disable if False.
Returns:
int, the return code from updating OS Login, or None if not present.
|
def UpdateOsLogin(self, oslogin_desired, two_factor_desired=False):
oslogin_configured = self._GetStatus(two_factor=False)
if oslogin_configured is None:
return None
two_factor_configured = self._GetStatus(two_factor=True)
# Two factor can only be enabled when OS Login is enabled.
two_factor_desired = two_factor_desired and oslogin_desired
if oslogin_desired:
params = ['activate']
if two_factor_desired:
params += ['--twofactor']
# OS Login is desired and not enabled.
if not oslogin_configured:
self.logger.info('Activating OS Login.')
return self._RunOsLoginControl(params) or self._RunOsLoginNssCache()
# Enable two factor authentication.
if two_factor_desired and not two_factor_configured:
self.logger.info('Activating OS Login two factor authentication.')
return self._RunOsLoginControl(params) or self._RunOsLoginNssCache()
# Deactivate two factor authentication.
if two_factor_configured and not two_factor_desired:
self.logger.info('Reactivating OS Login with two factor disabled.')
return (self._RunOsLoginControl(['deactivate'])
or self._RunOsLoginControl(params))
# OS Login features are already enabled. Update the cache if appropriate.
current_time = time.time()
if current_time - self.update_time > NSS_CACHE_DURATION_SEC:
self.update_time = current_time
return self._RunOsLoginNssCache()
elif oslogin_configured:
self.logger.info('Deactivating OS Login.')
return (self._RunOsLoginControl(['deactivate'])
or self._RemoveOsLoginNssCache())
# No action was needed.
return 0
| 242,974 |
Configure the network interfaces using dhclient.
Args:
interfaces: list of string, the output device names to enable.
logger: logger object, used to write to SysLog and serial port.
dhclient_script: string, the path to a dhclient script used by dhclient.
|
def CallDhclient(
interfaces, logger, dhclient_script=None):
logger.info('Enabling the Ethernet interfaces %s.', interfaces)
dhclient_command = ['dhclient']
if dhclient_script and os.path.exists(dhclient_script):
dhclient_command += ['-sf', dhclient_script]
try:
subprocess.check_call(dhclient_command + ['-x'] + interfaces)
subprocess.check_call(dhclient_command + interfaces)
except subprocess.CalledProcessError:
logger.warning('Could not enable interfaces %s.', interfaces)
| 242,975 |
Sync clock using hwclock.
Args:
logger: logger object, used to write to SysLog and serial port.
|
def CallHwclock(logger):
command = ['/sbin/hwclock', '--hctosys']
try:
subprocess.check_call(command)
except subprocess.CalledProcessError:
logger.warning('Failed to sync system time with hardware clock.')
else:
logger.info('Synced system time with hardware clock.')
| 242,976 |
Sync clock using ntpdate.
Args:
logger: logger object, used to write to SysLog and serial port.
|
def CallNtpdate(logger):
ntpd_inactive = subprocess.call(['service', 'ntpd', 'status'])
try:
if not ntpd_inactive:
subprocess.check_call(['service', 'ntpd', 'stop'])
subprocess.check_call(
'ntpdate `awk \'$1=="server" {print $2}\' /etc/ntp.conf`', shell=True)
if not ntpd_inactive:
subprocess.check_call(['service', 'ntpd', 'start'])
except subprocess.CalledProcessError:
logger.warning('Failed to sync system time with ntp server.')
else:
logger.info('Synced system time with ntp server.')
| 242,977 |
Constructor.
Args:
dhclient_script: string, the path to a dhclient script used by dhclient.
dhcp_command: string, a command to enable Ethernet interfaces.
debug: bool, True if debug output should write to the console.
|
def __init__(self, dhclient_script=None, dhcp_command=None, debug=False):
self.dhclient_script = dhclient_script or '/sbin/google-dhclient-script'
self.dhcp_command = dhcp_command
facility = logging.handlers.SysLogHandler.LOG_DAEMON
self.logger = logger.Logger(
name='network-setup', debug=debug, facility=facility)
self.distro_utils = distro_utils.Utils(debug=debug)
| 242,978 |
Enable the list of network interfaces.
Args:
interfaces: list of string, the output device names to enable.
|
def EnableNetworkInterfaces(self, interfaces):
# The default Ethernet interface is enabled by default. Do not attempt to
# enable interfaces if only one interface is specified in metadata.
if not interfaces or set(interfaces) == self.interfaces:
return
self.logger.info('Ethernet interfaces: %s.', interfaces)
self.interfaces = set(interfaces)
if self.dhcp_command:
try:
subprocess.check_call([self.dhcp_command])
except subprocess.CalledProcessError:
self.logger.warning('Could not enable Ethernet interfaces.')
return
# Distro-specific setup for network interfaces.
self.distro_utils.EnableNetworkInterfaces(
interfaces, self.logger, dhclient_script=self.dhclient_script)
| 242,979 |
Enable the list of network interfaces.
Args:
interfaces: list of string, the output device names to enable.
logger: logger object, used to write to SysLog and serial port.
dhclient_script: string, the path to a dhclient script used by dhclient.
|
def EnableNetworkInterfaces(
self, interfaces, logger, dhclient_script=None):
helpers.CallDhclient(interfaces, logger, dhclient_script=dhclient_script)
| 242,980 |
Constructor.
Args:
project_id: string, the project ID to use in the config file.
debug: bool, True if debug output should write to the console.
|
def __init__(self, project_id=None, debug=False):
self.logger = logger.Logger(name='boto-setup', debug=debug)
self.watcher = metadata_watcher.MetadataWatcher(logger=self.logger)
self._CreateConfig(project_id)
| 242,981 |
Create the boto config to support standalone GSUtil.
Args:
project_id: string, the project ID to use in the config file.
|
def _CreateConfig(self, project_id):
project_id = project_id or self._GetNumericProjectId()
# Our project doesn't support service accounts.
if not project_id:
return
self.boto_config_header %= (
self.boto_config_script, self.boto_config_template)
config = config_manager.ConfigManager(
config_file=self.boto_config_template,
config_header=self.boto_config_header)
boto_dir = os.path.dirname(self.boto_config_script)
config.SetOption('GSUtil', 'default_project_id', project_id)
config.SetOption('GSUtil', 'default_api_version', '2')
config.SetOption('GoogleCompute', 'service_account', 'default')
config.SetOption('Plugin', 'plugin_directory', boto_dir)
config.WriteConfig(config_file=self.boto_config)
| 242,983 |
Constructor.
Inherit from the ConfigManager class. Read the template for instance
defaults and write new sections and options. This prevents package
updates from overriding user set defaults.
Args:
logger: logger object, used to write to SysLog and serial port.
instance_config_metadata: string, a config file specified in metadata.
|
def __init__(self, logger=logging, instance_config_metadata=None):
self.logger = logger
self.instance_config_metadata = instance_config_metadata
self.instance_config_header %= (
self.instance_config_script, self.instance_config_template)
# User provided instance configs should always take precedence.
super(InstanceConfig, self).__init__(
config_file=self.instance_config_template,
config_header=self.instance_config_header)
# Use the instance config settings from metadata if specified. Then use
# settings in an instance config file if one exists. If a config
# file does not already exist, try to use the distro provided defaults. If
# no file exists, use the default configuration settings.
config_files = [self.instance_config, self.instance_config_distro]
config_defaults = []
if self.instance_config_metadata:
config = parser.Parser()
try:
config.read_file(stringio.StringIO(self.instance_config_metadata))
except parser.Error as e:
self.logger.error('Error parsing metadata configs: %s', str(e))
else:
config_defaults.append(
dict((s, dict(config.items(s))) for s in config.sections()))
for config_file in config_files:
if os.path.exists(config_file):
config = parser.Parser()
try:
config.read(config_file)
except parser.Error as e:
self.logger.error('Error parsing config file: %s', str(e))
else:
config_defaults.append(
dict((s, dict(config.items(s))) for s in config.sections()))
config_defaults.append(self.instance_config_options)
for defaults in config_defaults:
for section, options in sorted(defaults.items()):
for option, value in sorted(options.items()):
super(InstanceConfig, self).SetOption(
section, option, value, overwrite=False)
| 242,984 |
Constructor.
Args:
logger: logger object, used to write to SysLog and serial port.
proto_id: string, the routing protocol identifier for Google IP changes.
|
def __init__(self, logger, proto_id=None):
self.logger = logger
self.proto_id = proto_id or '66'
| 242,985 |
Create a dictionary of parameters to append to the ip route command.
Args:
**kwargs: dict, the string parameters to update in the ip route command.
Returns:
dict, the string parameters to append to the ip route command.
|
def _CreateRouteOptions(self, **kwargs):
options = {
'proto': self.proto_id,
'scope': 'host',
}
options.update(kwargs)
return options
| 242,986 |
Run a command with ip route and return the response.
Args:
args: list, the string ip route command args to execute.
options: dict, the string parameters to append to the ip route command.
Returns:
string, the standard output from the ip route command execution.
|
def _RunIpRoute(self, args=None, options=None):
args = args or []
options = options or {}
command = ['ip', 'route']
command.extend(args)
for item in options.items():
command.extend(item)
try:
process = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
except OSError as e:
self.logger.warning('Exception running %s. %s.', command, str(e))
else:
if process.returncode:
message = 'Non-zero exit status running %s. %s.'
self.logger.warning(message, command, stderr.strip())
else:
return stdout.decode('utf-8', 'replace')
return ''
| 242,987 |
Parse and validate forwarded IP addresses.
Args:
forwarded_ips: list, the IP address strings to parse.
Returns:
list, the valid IP address strings.
|
def ParseForwardedIps(self, forwarded_ips):
addresses = []
forwarded_ips = forwarded_ips or []
for ip in forwarded_ips:
if ip and (IP_REGEX.match(ip) or IP_ALIAS_REGEX.match(ip)):
addresses.append(ip[:-3] if ip.endswith('/32') else ip)
else:
self.logger.warning('Could not parse IP address: "%s".', ip)
return addresses
| 242,988 |
Retrieve the list of configured forwarded IP addresses.
Args:
interface: string, the output device to query.
interface_ip: string, current interface ip address.
Returns:
list, the IP address strings.
|
def GetForwardedIps(self, interface, interface_ip=None):
args = ['ls', 'table', 'local', 'type', 'local']
options = self._CreateRouteOptions(dev=interface)
result = self._RunIpRoute(args=args, options=options)
result = re.sub(r'local\s', r'', result)
return self.ParseForwardedIps(result.split())
| 242,989 |
Configure a new IP address on the network interface.
Args:
address: string, the IP address to configure.
interface: string, the output device to use.
|
def AddForwardedIp(self, address, interface):
address = address if IP_ALIAS_REGEX.match(address) else '%s/32' % address
args = ['add', 'to', 'local', address]
options = self._CreateRouteOptions(dev=interface)
self._RunIpRoute(args=args, options=options)
| 242,990 |
Parse and validate forwarded IP addresses.
Args:
forwarded_ips: list, the IP address strings to parse.
Returns:
list, the valid IP address strings.
|
def ParseForwardedIps(self, forwarded_ips):
addresses = []
forwarded_ips = forwarded_ips or []
for ip in forwarded_ips:
if ip and (IP_REGEX.match(ip) or IP_ALIAS_REGEX.match(ip)):
addresses.extend([str(addr) for addr in list(netaddr.IPNetwork(ip))])
else:
self.logger.warning('Could not parse IP address: "%s".', ip)
return addresses
| 242,991 |
Retrieve the list of configured forwarded IP addresses.
Args:
interface: string, the output device to query.
interface_ip: string, current interface ip address.
Returns:
list, the IP address strings.
|
def GetForwardedIps(self, interface, interface_ip=None):
try:
ips = netifaces.ifaddresses(interface)
ips = ips[netifaces.AF_INET]
except (ValueError, IndexError):
return []
forwarded_ips = []
for ip in ips:
if ip['addr'] != interface_ip:
full_addr = '%s/%d' % (ip['addr'], netaddr.IPAddress(ip['netmask']).netmask_bits())
forwarded_ips.append(full_addr)
return self.ParseForwardedIps(forwarded_ips)
| 242,992 |
Configure a new IP address on the network interface.
Args:
address: string, the IP address to configure.
interface: string, the output device to use.
|
def AddForwardedIp(self, address, interface):
for ip in list(netaddr.IPNetwork(address)):
self._RunIfconfig(args=[interface, 'alias', '%s/32' % str(ip)])
| 242,993 |
Delete an IP address on the network interface.
Args:
address: string, the IP address to configure.
interface: string, the output device to use.
|
def RemoveForwardedIp(self, address, interface):
ip = netaddr.IPNetwork(address)
self._RunIfconfig(args=[interface, '-alias', str(ip.ip)])
| 242,994 |
Constructor.
Args:
debug: bool, True if debug output should write to the console.
|
def __init__(self, debug=False):
facility = logging.handlers.SysLogHandler.LOG_DAEMON
self.logger = logger.Logger(
name='google-clock-skew', debug=debug, facility=facility)
self.distro_utils = distro_utils.Utils(debug=debug)
self.watcher = metadata_watcher.MetadataWatcher(logger=self.logger)
try:
with file_utils.LockFile(LOCKFILE):
self.logger.info('Starting Google Clock Skew daemon.')
self.watcher.WatchMetadata(
self.HandleClockSync, metadata_key=self.drift_token,
recursive=False)
except (IOError, OSError) as e:
self.logger.warning(str(e))
| 242,999 |
Called when clock drift token changes.
Args:
response: string, the metadata response with the new drift token value.
|
def HandleClockSync(self, response):
self.logger.info('Clock drift token has changed: %s.', response)
self.distro_utils.HandleClockSync(self.logger)
| 243,000 |
Enable the list of network interfaces.
Args:
interfaces: list of string, the output device names to enable.
logger: logger object, used to write to SysLog and serial port.
dhclient_script: string, the path to a dhclient script used by dhclient.
|
def EnableNetworkInterfaces(
self, interfaces, logger, dhclient_script=None):
# Should always exist in EL 7.
if os.path.exists(self.network_path):
self._DisableNetworkManager(interfaces, logger)
helpers.CallDhclient(interfaces, logger)
| 243,001 |
Disable network manager management on a list of network interfaces.
Args:
interfaces: list of string, the output device names enable.
logger: logger object, used to write to SysLog and serial port.
|
def _DisableNetworkManager(self, interfaces, logger):
for interface in interfaces:
interface_config = os.path.join(
self.network_path, 'ifcfg-%s' % interface)
if os.path.exists(interface_config):
self._ModifyInterface(
interface_config, 'DEVICE', interface, replace=False)
self._ModifyInterface(
interface_config, 'NM_CONTROLLED', 'no', replace=True)
else:
with open(interface_config, 'w') as interface_file:
interface_content = [
'# Added by Google.',
'BOOTPROTO=none',
'DEFROUTE=no',
'DEVICE=%s' % interface,
'IPV6INIT=no',
'NM_CONTROLLED=no',
'NOZEROCONF=yes',
'',
]
interface_file.write('\n'.join(interface_content))
logger.info('Created config file for interface %s.', interface)
| 243,002 |
Write a value to a config file if not already present.
Args:
interface_config: string, the path to a config file.
config_key: string, the configuration key to set.
config_value: string, the value to set for the configuration key.
replace: bool, replace the configuration option if already present.
|
def _ModifyInterface(
self, interface_config, config_key, config_value, replace=False):
config_entry = '%s=%s' % (config_key, config_value)
if not open(interface_config).read().count(config_key):
with open(interface_config, 'a') as config:
config.write('%s\n' % config_entry)
elif replace:
for line in fileinput.input(interface_config, inplace=True):
print(re.sub(r'%s=.*' % config_key, config_entry, line.rstrip()))
| 243,003 |
Parse the SSH key data into a user map.
Args:
account_data: string, the metadata server SSH key attributes data.
Returns:
dict, a mapping of the form: {'username': ['sshkey1, 'sshkey2', ...]}.
|
def _ParseAccountsData(self, account_data):
if not account_data:
return {}
lines = [line for line in account_data.splitlines() if line]
user_map = {}
for line in lines:
if not all(ord(c) < 128 for c in line):
self.logger.info('SSH key contains non-ascii character: %s.', line)
continue
split_line = line.split(':', 1)
if len(split_line) != 2:
self.logger.info('SSH key is not a complete entry: %s.', split_line)
continue
user, key = split_line
if self._HasExpired(key):
self.logger.debug('Expired SSH key for user %s: %s.', user, key)
continue
if user not in user_map:
user_map[user] = []
user_map[user].append(key)
logging.debug('User accounts: %s.', user_map)
return user_map
| 243,007 |
Get dictionaries for instance and project attributes.
Args:
metadata_dict: json, the deserialized contents of the metadata server.
Returns:
tuple, two dictionaries for instance and project attributes.
|
def _GetInstanceAndProjectAttributes(self, metadata_dict):
metadata_dict = metadata_dict or {}
try:
instance_data = metadata_dict['instance']['attributes']
except KeyError:
instance_data = {}
self.logger.warning('Instance attributes were not found.')
try:
project_data = metadata_dict['project']['attributes']
except KeyError:
project_data = {}
self.logger.warning('Project attributes were not found.')
return instance_data, project_data
| 243,008 |
Get the user accounts specified in metadata server contents.
Args:
metadata_dict: json, the deserialized contents of the metadata server.
Returns:
dict, a mapping of the form: {'username': ['sshkey1, 'sshkey2', ...]}.
|
def _GetAccountsData(self, metadata_dict):
instance_data, project_data = self._GetInstanceAndProjectAttributes(
metadata_dict)
valid_keys = [instance_data.get('sshKeys'), instance_data.get('ssh-keys')]
block_project = instance_data.get('block-project-ssh-keys', '').lower()
if block_project != 'true' and not instance_data.get('sshKeys'):
valid_keys.append(project_data.get('ssh-keys'))
valid_keys.append(project_data.get('sshKeys'))
accounts_data = '\n'.join([key for key in valid_keys if key])
return self._ParseAccountsData(accounts_data)
| 243,009 |
Provision and update Linux user accounts based on account metadata.
Args:
update_users: dict, authorized users mapped to their public SSH keys.
|
def _UpdateUsers(self, update_users):
for user, ssh_keys in update_users.items():
if not user or user in self.invalid_users:
continue
configured_keys = self.user_ssh_keys.get(user, [])
if set(ssh_keys) != set(configured_keys):
if not self.utils.UpdateUser(user, ssh_keys):
self.invalid_users.add(user)
else:
self.user_ssh_keys[user] = ssh_keys[:]
| 243,010 |
Deprovision Linux user accounts that do not appear in account metadata.
Args:
remove_users: list, the username strings of the Linux accounts to remove.
|
def _RemoveUsers(self, remove_users):
for username in remove_users:
self.utils.RemoveUser(username)
self.user_ssh_keys.pop(username, None)
self.invalid_users -= set(remove_users)
| 243,011 |
Get the value of the enable-oslogin metadata key.
Args:
metadata_dict: json, the deserialized contents of the metadata server.
Returns:
bool, True if OS Login is enabled for VM access.
|
def _GetEnableOsLoginValue(self, metadata_dict):
instance_data, project_data = self._GetInstanceAndProjectAttributes(
metadata_dict)
instance_value = instance_data.get('enable-oslogin')
project_value = project_data.get('enable-oslogin')
value = instance_value or project_value or ''
return value.lower() == 'true'
| 243,012 |
Called when there are changes to the contents of the metadata server.
Args:
result: json, the deserialized contents of the metadata server.
|
def HandleAccounts(self, result):
self.logger.debug('Checking for changes to user accounts.')
configured_users = self.utils.GetConfiguredUsers()
enable_oslogin = self._GetEnableOsLoginValue(result)
enable_two_factor = self._GetEnableTwoFactorValue(result)
if enable_oslogin:
desired_users = {}
self.oslogin.UpdateOsLogin(True, two_factor_desired=enable_two_factor)
else:
desired_users = self._GetAccountsData(result)
self.oslogin.UpdateOsLogin(False)
remove_users = sorted(set(configured_users) - set(desired_users.keys()))
self._UpdateUsers(desired_users)
self._RemoveUsers(remove_users)
self.utils.SetConfiguredUsers(desired_users.keys())
| 243,013 |
Set the appropriate SELinux context, if SELinux tools are installed.
Calls /sbin/restorecon on the provided path to set the SELinux context as
specified by policy. This call does not operate recursively.
Only some OS configurations use SELinux. It is therefore acceptable for
restorecon to be missing, in which case we do nothing.
Args:
path: string, the path on which to fix the SELinux context.
|
def _SetSELinuxContext(path):
restorecon = '/sbin/restorecon'
if os.path.isfile(restorecon) and os.access(restorecon, os.X_OK):
subprocess.call([restorecon, path])
| 243,014 |
Set the permissions and ownership of a path.
Args:
path: string, the path for which owner ID and group ID needs to be setup.
mode: octal string, the permissions to set on the path.
uid: int, the owner ID to be set for the path.
gid: int, the group ID to be set for the path.
mkdir: bool, True if the directory needs to be created.
|
def SetPermissions(path, mode=None, uid=None, gid=None, mkdir=False):
if mkdir and not os.path.exists(path):
os.mkdir(path, mode or 0o777)
elif mode:
os.chmod(path, mode)
if uid and gid:
os.chown(path, uid, gid)
_SetSELinuxContext(path)
| 243,015 |
Lock the provided file descriptor.
Args:
fd: int, the file descriptor of the file to lock.
path: string, the name of the file to lock.
blocking: bool, whether the function should return immediately.
Raises:
IOError, raised from flock while attempting to lock a file.
|
def Lock(fd, path, blocking):
operation = fcntl.LOCK_EX if blocking else fcntl.LOCK_EX | fcntl.LOCK_NB
try:
fcntl.flock(fd, operation)
except IOError as e:
if e.errno == errno.EWOULDBLOCK:
raise IOError('Exception locking %s. File already locked.' % path)
else:
raise IOError('Exception locking %s. %s.' % (path, str(e)))
| 243,016 |
Release the lock on the file.
Args:
fd: int, the file descriptor of the file to unlock.
path: string, the name of the file to lock.
Raises:
IOError, raised from flock while attempting to release a file lock.
|
def Unlock(fd, path):
try:
fcntl.flock(fd, fcntl.LOCK_UN | fcntl.LOCK_NB)
except IOError as e:
if e.errno == errno.EWOULDBLOCK:
raise IOError('Exception unlocking %s. Locked by another process.' % path)
else:
raise IOError('Exception unlocking %s. %s.' % (path, str(e)))
| 243,017 |
Interface to flock-based file locking to prevent concurrent executions.
Args:
path: string, the name of the file to lock.
blocking: bool, whether the function should return immediately.
Yields:
None, yields when a lock on the file is obtained.
Raises:
IOError, raised from flock locking operations on a file.
OSError, raised from file operations.
|
def LockFile(path, blocking=False):
fd = os.open(path, os.O_CREAT)
try:
Lock(fd, path, blocking)
yield
finally:
try:
Unlock(fd, path)
finally:
os.close(fd)
| 243,018 |
Constructor.
Args:
logger: logger object, used to write to SysLog and serial port.
timeout: int, timeout in seconds for metadata requests.
|
def __init__(self, logger=None, timeout=60):
self.etag = 0
self.logger = logger or logging
self.timeout = timeout
| 243,021 |
Performs a GET request with the metadata headers.
Args:
metadata_url: string, the URL to perform a GET request on.
params: dictionary, the query parameters in the GET request.
timeout: int, timeout in seconds for metadata requests.
Returns:
HTTP response from the GET request.
Raises:
urlerror.HTTPError: raises when the GET request fails.
|
def _GetMetadataRequest(self, metadata_url, params=None, timeout=None):
headers = {'Metadata-Flavor': 'Google'}
params = urlparse.urlencode(params or {})
url = '%s?%s' % (metadata_url, params)
request = urlrequest.Request(url, headers=headers)
request_opener = urlrequest.build_opener(urlrequest.ProxyHandler({}))
timeout = timeout or self.timeout
return request_opener.open(request, timeout=timeout*1.1)
| 243,022 |
Update the etag from an API response.
Args:
response: HTTP response with a header field.
Returns:
bool, True if the etag in the response header updated.
|
def _UpdateEtag(self, response):
etag = response.headers.get('etag', self.etag)
etag_updated = self.etag != etag
self.etag = etag
return etag_updated
| 243,023 |
Request the contents of metadata server and deserialize the response.
Args:
metadata_key: string, the metadata key to watch for changes.
recursive: bool, True if we should recursively watch for metadata changes.
wait: bool, True if we should wait for a metadata change.
timeout: int, timeout in seconds for returning metadata output.
Returns:
json, the deserialized contents of the metadata server.
|
def _GetMetadataUpdate(
self, metadata_key='', recursive=True, wait=True, timeout=None):
metadata_key = os.path.join(metadata_key, '') if recursive else metadata_key
metadata_url = os.path.join(METADATA_SERVER, metadata_key)
params = {
'alt': 'json',
'last_etag': self.etag,
'recursive': recursive,
'timeout_sec': timeout or self.timeout,
'wait_for_change': wait,
}
while True:
response = self._GetMetadataRequest(
metadata_url, params=params, timeout=timeout)
etag_updated = self._UpdateEtag(response)
if wait and not etag_updated and not timeout:
# Retry until the etag is updated.
continue
else:
# One of the following are true:
# - Waiting for change is not required.
# - The etag is updated.
# - The user specified a request timeout.
break
return json.loads(response.read().decode('utf-8'))
| 243,024 |
Wait for a successful metadata response.
Args:
metadata_key: string, the metadata key to watch for changes.
recursive: bool, True if we should recursively watch for metadata changes.
wait: bool, True if we should wait for a metadata change.
timeout: int, timeout in seconds for returning metadata output.
retry: bool, True if we should retry on failure.
Returns:
json, the deserialized contents of the metadata server.
|
def _HandleMetadataUpdate(
self, metadata_key='', recursive=True, wait=True, timeout=None,
retry=True):
exception = None
while True:
try:
return self._GetMetadataUpdate(
metadata_key=metadata_key, recursive=recursive, wait=wait,
timeout=timeout)
except (httpclient.HTTPException, socket.error, urlerror.URLError) as e:
if not isinstance(e, type(exception)):
exception = e
self.logger.error('GET request error retrieving metadata. %s.', e)
if retry:
continue
else:
break
| 243,025 |
Watch for changes to the contents of the metadata server.
Args:
handler: callable, a function to call with the updated metadata contents.
metadata_key: string, the metadata key to watch for changes.
recursive: bool, True if we should recursively watch for metadata changes.
timeout: int, timeout in seconds for returning metadata output.
|
def WatchMetadata(
self, handler, metadata_key='', recursive=True, timeout=None):
while True:
response = self._HandleMetadataUpdate(
metadata_key=metadata_key, recursive=recursive, wait=True,
timeout=timeout)
try:
handler(response)
except Exception as e:
self.logger.exception('Exception calling the response handler. %s.', e)
| 243,026 |
Retrieve the contents of metadata server for a metadata key.
Args:
metadata_key: string, the metadata key to watch for changes.
recursive: bool, True if we should recursively watch for metadata changes.
timeout: int, timeout in seconds for returning metadata output.
retry: bool, True if we should retry on failure.
Returns:
json, the deserialized contents of the metadata server or None if error.
|
def GetMetadata(
self, metadata_key='', recursive=True, timeout=None, retry=True):
return self._HandleMetadataUpdate(
metadata_key=metadata_key, recursive=recursive, wait=False,
timeout=timeout, retry=retry)
| 243,027 |
Constructor.
Args:
proto_id: string, the routing protocol identifier for Google IP changes.
debug: bool, True if debug output should write to the console.
|
def __init__(self, proto_id=None, debug=False):
facility = logging.handlers.SysLogHandler.LOG_DAEMON
self.logger = logger.Logger(
name='google-ip-forwarding', debug=debug, facility=facility)
self.ip_forwarding_utils = ip_forwarding_utils.IpForwardingUtils(
logger=self.logger, proto_id=proto_id)
| 243,028 |
Log the planned IP address changes.
Args:
configured: list, the IP address strings already configured.
desired: list, the IP address strings that will be configured.
to_add: list, the forwarded IP address strings to configure.
to_remove: list, the forwarded IP address strings to delete.
interface: string, the output device to modify.
|
def _LogForwardedIpChanges(
self, configured, desired, to_add, to_remove, interface):
if not to_add and not to_remove:
return
self.logger.info(
'Changing %s IPs from %s to %s by adding %s and removing %s.',
interface, configured or None, desired or None, to_add or None,
to_remove or None)
| 243,029 |
Configure the forwarded IP address on the network interface.
Args:
forwarded_ips: list, the forwarded IP address strings to configure.
interface: string, the output device to use.
|
def _AddForwardedIps(self, forwarded_ips, interface):
for address in forwarded_ips:
self.ip_forwarding_utils.AddForwardedIp(address, interface)
| 243,030 |
Remove the forwarded IP addresses from the network interface.
Args:
forwarded_ips: list, the forwarded IP address strings to delete.
interface: string, the output device to use.
|
def _RemoveForwardedIps(self, forwarded_ips, interface):
for address in forwarded_ips:
self.ip_forwarding_utils.RemoveForwardedIp(address, interface)
| 243,031 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.