Code
stringlengths
103
85.9k
Summary
sequencelengths
0
94
Please provide a description of the function:def get_keypair_fn(): keypair_name = get_keypair_name() account = get_account_number() region = get_region() fn = f'{PRIVATE_KEY_LOCATION}/{keypair_name}-{account}-{region}.pem' return fn
[ "Location of .pem file for current keypair" ]
Please provide a description of the function:def lookup_image(wildcard): ec2 = get_ec2_resource() filter_ = {'Name': 'name', 'Values': [wildcard]} images = list(ec2.images.filter(Filters=[filter_])) # Note, can add filtering by Owners as follows # images = list(ec2.images.filter_(Filters = [filter_], Owners=['self', 'amazon'])) assert len(images) <= 1, "Multiple images match " + str(wildcard) assert len(images) > 0, "No images match " + str(wildcard) return images[0]
[ "Returns unique ec2.Image whose name matches wildcard\n lookup_ami('pytorch*').name => ami-29fa\n \n https://boto3.readthedocs.io/en/latest/reference/services/ec2.html#image\n\n Assert fails if multiple images match or no images match.\n " ]
Please provide a description of the function:def lookup_instance(name: str, instance_type: str = '', image_name: str = '', states: tuple = ('running', 'stopped', 'initializing')): ec2 = get_ec2_resource() instances = ec2.instances.filter( Filters=[{'Name': 'instance-state-name', 'Values': states}]) prefix = get_prefix() username = get_username() # look for an existing instance matching job, ignore instances launched # by different user or under different resource name result = [] for i in instances.all(): instance_name = get_name(i) if instance_name != name: continue seen_prefix, seen_username = parse_key_name(i.key_name) if prefix != seen_prefix: print(f"Found {name} launched under {seen_prefix}, ignoring") continue if username != seen_username: print(f"Found {name} launched by {seen_username}, ignoring") continue if instance_type: assert i.instance_type == instance_type, f"Found existing instance for job {name} but different instance type ({i.instance_type}) than requested ({instance_type}), terminate {name} first or use new task name." if image_name: assert i.image.name == image_name, f"Found existing instance for job {name} but launched with different image ({i.image.name}) than requested ({image_name}), terminate {name} first or use new task name." result.append(i) assert len(result) < 2, f"Found two instances with name {name}" if not result: return None else: return result[0]
[ "Looks up AWS instance for given instance name, like\n simple.worker. If no instance found in current AWS environment, returns None. " ]
Please provide a description of the function:def ssh_to_task(task) -> paramiko.SSHClient: username = task.ssh_username hostname = task.public_ip ssh_key_fn = get_keypair_fn() print(f"ssh -i {ssh_key_fn} {username}@{hostname}") pkey = paramiko.RSAKey.from_private_key_file(ssh_key_fn) ssh_client = paramiko.SSHClient() ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) assert ssh_client counter = 1 while True: try: ssh_client.connect(hostname=hostname, username=username, pkey=pkey) if counter % 11 == 0: # occasionally re-obtain public ip, machine could've gotten restarted hostname = task.public_ip break except Exception as e: print( f'{task.name}: Exception connecting to {hostname} via ssh (could be a timeout): {e}') time.sleep(RETRY_INTERVAL_SEC) return ssh_client
[ "Create ssh connection to task's machine\n\n returns Paramiko SSH client connected to host.\n\n " ]
Please provide a description of the function:def validate_aws_name(name): assert len(name) <= 127 # disallow unicode characters to avoid pain assert name == name.encode('ascii').decode('ascii') assert aws_name_regexp.match(name)
[ "Validate resource name using AWS name restrictions from # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#tag-restrictions" ]
Please provide a description of the function:def delete_efs_by_id(efs_id): start_time = time.time() efs_client = get_efs_client() sys.stdout.write("deleting %s ... " % (efs_id,)) while True: try: response = efs_client.delete_file_system(FileSystemId=efs_id) if is_good_response(response): print("succeeded") break time.sleep(RETRY_INTERVAL_SEC) except Exception as e: print("Failed with %s" % (e,)) if time.time() - start_time - RETRY_INTERVAL_SEC < RETRY_TIMEOUT_SEC: print("Retrying in %s sec" % (RETRY_INTERVAL_SEC,)) time.sleep(RETRY_INTERVAL_SEC) else: print("Giving up") break
[ "Deletion sometimes fails, try several times." ]
Please provide a description of the function:def extract_attr_for_match(items, **kwargs): # find the value of attribute to return query_arg = None for arg, value in kwargs.items(): if value == -1: assert query_arg is None, "Only single query arg (-1 valued) is allowed" query_arg = arg result = [] filterset = set(kwargs.keys()) for item in items: match = True assert filterset.issubset( item.keys()), "Filter set contained %s which was not in record %s" % ( filterset.difference(item.keys()), item) for arg in item: if arg == query_arg: continue if arg in kwargs: if item[arg] != kwargs[arg]: match = False break if match: result.append(item[query_arg]) assert len(result) <= 1, "%d values matched %s, only allow 1" % ( len(result), kwargs) if result: return result[0] return None
[ "Helper method to get attribute value for an item matching some criterion.\n Specify target criteria value as dict, with target attribute having value -1\n\n Example:\n to extract state of vpc matching given vpc id\n\n response = [{'State': 'available', 'VpcId': 'vpc-2bb1584c'}]\n extract_attr_for_match(response, State=-1, VpcId='vpc-2bb1584c') #=> 'available'" ]
Please provide a description of the function:def get_instance_property(instance, property_name): name = get_name(instance) while True: try: value = getattr(instance, property_name) if value is not None: break print(f"retrieving {property_name} on {name} produced None, retrying") time.sleep(RETRY_INTERVAL_SEC) instance.reload() continue except Exception as e: print(f"retrieving {property_name} on {name} failed with {e}, retrying") time.sleep(RETRY_INTERVAL_SEC) try: instance.reload() except Exception: pass continue return value
[ "Retrieves property of an instance, keeps retrying until getting a non-None" ]
Please provide a description of the function:def get_name(tags_or_instance_or_id): ec2 = get_ec2_resource() if hasattr(tags_or_instance_or_id, 'tags'): tags = tags_or_instance_or_id.tags elif isinstance(tags_or_instance_or_id, str): tags = ec2.Instance(tags_or_instance_or_id).tags elif tags_or_instance_or_id is None: return EMPTY_NAME else: assert isinstance(tags_or_instance_or_id, Iterable), "expected iterable of tags" tags = tags_or_instance_or_id if not tags: return EMPTY_NAME names = [entry['Value'] for entry in tags if entry['Key'] == 'Name'] if not names: return '' if len(names) > 1: assert False, "have more than one name: " + str(names) return names[0]
[ "Helper utility to extract name out of tags dictionary or intancce.\n [{'Key': 'Name', 'Value': 'nexus'}] -> 'nexus'\n \n Assert fails if there's more than one name.\n Returns '' if there's less than one name.\n " ]
Please provide a description of the function:def wait_until_available(resource): while True: resource.load() if resource.state == 'available': break time.sleep(RETRY_INTERVAL_SEC)
[ "Waits until interval state becomes 'available'" ]
Please provide a description of the function:def maybe_create_placement_group(name='', max_retries=10): if not name: return client = get_ec2_client() while True: try: client.describe_placement_groups(GroupNames=[name]) print("Reusing placement_group group: " + name) break # no Exception means group name was found except Exception: print("Creating placement_group group: " + name) try: _response = client.create_placement_group(GroupName=name, Strategy='cluster') except Exception: # because of race can get InvalidPlacementGroup.Duplicate pass counter = 0 while True: try: res = client.describe_placement_groups(GroupNames=[name]) res_entry = res['PlacementGroups'][0] if res_entry['State'] == 'available': assert res_entry['Strategy'] == 'cluster' break except Exception as e: print("Got exception: %s" % (e,)) counter += 1 if counter >= max_retries: assert False, f'Failed to create placement_group group {name} in {max_retries} attempts' time.sleep(RETRY_INTERVAL_SEC)
[ "Creates placement_group group or reuses existing one. Crash if unable to create\n placement_group group. If name is empty, ignores request." ]
Please provide a description of the function:def lookup_instances(fragment, verbose=True, filter_by_key=True): def vprint(*args): if verbose: print(*args) region = get_region() client = get_ec2_client() ec2 = get_ec2_resource() response = client.describe_instances() assert is_good_response(response) instance_list = [] for instance in ec2.instances.all(): if instance.state['Name'] != 'running': continue name = get_name(instance) if (fragment in name or fragment in str(instance.public_ip_address) or fragment in str(instance.id) or fragment in str(instance.private_ip_address)): instance_list.append((util.toseconds(instance.launch_time), instance)) sorted_instance_list = reversed(sorted(instance_list, key=itemgetter(0))) filtered_instance_list = [] # filter by key vprint("Using region ", region) for (ts, instance) in sorted_instance_list: if filter_by_key and instance.key_name != get_keypair_name(): vprint(f"Got key {instance.key_name}, expected {get_keypair_name()}") continue filtered_instance_list.append(instance) return filtered_instance_list
[ "Returns ec2.Instance object whose name contains fragment, in reverse order of launching (ie,\n most recent intance first). Optionally filters by key, only including instances launched with\n key_name matching current username.\n\n args:\n verbose: print information about all matching instances found\n\n filter_by_key if True, ignore instances that are not launched with current\n user's default key\n " ]
Please provide a description of the function:def create_spot_instances(launch_specs, spot_price=26, expiration_mins=15): ec2c = get_ec2_client() num_tasks = launch_specs['MinCount'] or 1 if 'MinCount' in launch_specs: del launch_specs['MinCount'] if 'MaxCount' in launch_specs: del launch_specs['MaxCount'] if 'TagSpecifications' in launch_specs: try: tags = launch_specs['TagSpecifications'][0]['Tags'] except: pass del launch_specs['TagSpecifications'] import pytz # datetime is not timezone aware, use pytz to fix import datetime as dt now = dt.datetime.utcnow().replace(tzinfo=pytz.utc) spot_args = {} spot_args['LaunchSpecification'] = launch_specs spot_args['SpotPrice'] = str(spot_price) spot_args['InstanceCount'] = num_tasks spot_args['ValidUntil'] = now + dt.timedelta(minutes=expiration_mins) try: spot_requests = ec2c.request_spot_instances(**spot_args) except Exception as e: assert False, f"Spot instance request failed (out of capacity?), error was {e}" spot_requests = spot_requests['SpotInstanceRequests'] instance_ids = wait_on_fulfillment(ec2c, spot_requests) print('Instances fullfilled...') ec2 = get_ec2_resource() instances = list(ec2.instances.filter(Filters=[{'Name': 'instance-id', 'Values': list(filter(None, instance_ids))}])) if not all(instance_ids): for i in instances: i.terminate() raise RuntimeError('Failed to create spot instances:', instance_ids) if tags: for i in instances: i.create_tags(Tags=tags) return instances
[ "\n args:\n spot_price: default is $26 which is right above p3.16xlarge on demand price\n expiration_mins: this request only valid for this many mins from now\n " ]
Please provide a description of the function:def is_chief(task: backend.Task, run_name: str): global run_task_dict if run_name not in run_task_dict: return True task_list = run_task_dict[run_name] assert task in task_list, f"Task {task.name} doesn't belong to run {run_name}" return task_list[0] == task
[ "Returns True if task is chief task in the corresponding run" ]
Please provide a description of the function:def ossystem(cmd): p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) (stdout, stderr) = p.communicate() return stdout.decode('ascii')
[ "Like os.system, but returns output of command as string." ]
Please provide a description of the function:def make_task( name: str = '', run_name: str = '', install_script: str = '', instance_type: str = '', image_name: str = '', disk_size: int = 0, preemptible=None, logging_task: backend.Task = None, create_resources=True, spot=False ) -> Task: ncluster_globals.task_launched = True def log(*_args): if logging_task: logging_task.log(*_args) else: util.log(*_args) # if name not specified, use name which is the same across script invocations for given image/instance-type name = ncluster_globals.auto_assign_task_name_if_needed(name, instance_type, image_name) if not instance_type: instance_type = os.environ.get('NCLUSTER_INSTANCE', 't3.micro') log("Using instance " + instance_type) _set_aws_environment() if create_resources: _maybe_create_resources(logging_task=logging_task) else: pass run: Run = ncluster_globals.get_run_object(run_name) placement_group = '' if u.instance_supports_placement_groups(instance_type) and run: placement_group = run.placement_group log(f"Launching into placement_group group {placement_group}") u.maybe_create_placement_group(run.placement_group) if not image_name: image_name = os.environ.get('NCLUSTER_IMAGE', GENERIC_SMALL_IMAGE) log("Using image " + image_name) if preemptible is None: preemptible = os.environ.get('NCLUSTER_PREEMPTIBLE', False) preemptible = bool(preemptible) if preemptible: log("Using preemptible instances") image = u.lookup_image(image_name) keypair = u.get_keypair() security_group = u.get_security_group() ec2 = u.get_ec2_resource() instance = u.lookup_instance(name, instance_type, image_name) _maybe_start_instance(instance) _maybe_wait_for_initializing_instance(instance) # create the instance if not present if instance: log(f"Reusing {instance}") else: log(f"Allocating {instance_type} for task {name}") args = {'ImageId': image.id, 'InstanceType': instance_type, 'MinCount': 1, 'MaxCount': 1, 'SecurityGroupIds': [security_group.id], 'KeyName': keypair.name} args['TagSpecifications'] = [{ 'ResourceType': 'instance', 'Tags': [{ 'Key': 'Name', 'Value': name }] }] # subnet = u.get_subnet() # args['NetworkInterfaces'] = [{'SubnetId': subnet.id, # 'DeviceIndex': 0, # 'AssociatePublicIpAddress': True, # 'Groups': [security_group.id]}] # placement_specs = {'AvailabilityZone': u.get_zone()} placement_specs = {} if placement_group: placement_specs['GroupName'] = placement_group args['Placement'] = placement_specs args['Monitoring'] = {'Enabled': True} if disk_size: assert disk_size > 0 ebs = { 'VolumeSize': disk_size, 'VolumeType': 'gp2', } args['BlockDeviceMappings'] = [{ 'DeviceName': '/dev/sda1', 'Ebs': ebs }] # Use high throughput disk (0.065/iops-month = about $1/hour) if 'NCLUSTER_AWS_FAST_ROOTDISK' in os.environ: assert not disk_size, f"Specified both disk_size {disk_size} and $NCLUSTER_AWS_FAST_ROOTDISK, they are incompatible as $NCLUSTER_AWS_FAST_ROOTDISK hardwired disk size" ebs = { 'VolumeSize': 500, 'VolumeType': 'io1', 'Iops': 11500 } args['BlockDeviceMappings'] = [{ 'DeviceName': '/dev/sda1', 'Ebs': ebs }] instances = [] try: if spot: instances = u.create_spot_instances(args) else: instances = ec2.create_instances(**args) except Exception as e: log(f"Instance creation for {name} failed with ({e})") log( "You can change availability zone using export NCLUSTER_ZONE=...") log("Terminating") os.kill(os.getpid(), signal.SIGINT) # sys.exit() doesn't work inside thread assert instances, f"ec2.create_instances returned {instances}" log(f"Allocated {len(instances)} instances") instance = instances[0] task = Task(name, instance=instance, install_script=install_script, image_name=image_name, instance_type=instance_type) ncluster_globals.register_task(task, run_name) return task
[ "\n Create task on AWS.\n\n Automatically places it in singleton Run/singleton Job objects, see Run/Job/Task hierarchy for details\n https://docs.google.com/document/d/1Gg4T243cYrDUW1YDCikmqp7fzSQDU3rZxOkJr9ohhs8/edit#heading=h.j4td4oixogib\n\n\n Args:\n disk_size: default size of root disk, in GBs\n create_resources: whether this task will handle resource creation\n name: see ncluster.make_task\n run_name: see ncluster.make_task\n install_script: see ncluster.make_task\n instance_type: instance type to use, defaults to $NCLUSTER_INSTANCE or t3.micro if unset\n image_name: name of image, ie, \"Deep Learning AMI (Ubuntu) Version 12.0\", defaults to $NCLUSTER_IMAGE or amzn2-ami-hvm-2.0.20180622.1-x86_64-gp2 if unset\n preemptible: use cheaper preemptible/spot instances\n logging_task: partially initialized Task object, use it for logging\n\n Returns:\n\n " ]
Please provide a description of the function:def make_job( name: str = '', run_name: str = '', num_tasks: int = 1, install_script: str = '', instance_type: str = '', image_name: str = '', create_resources=True, **kwargs) -> Job: assert num_tasks > 0, f"Can't create job with {num_tasks} tasks" assert name.count( '.') <= 1, "Job name has too many .'s (see ncluster design: Run/Job/Task hierarchy for convention)" # dummy tasks for logging tasks = [backend.Task(f"{i}.{name}") for i in range(num_tasks)] _set_aws_environment(tasks[0]) if create_resources: _maybe_create_resources(tasks[0]) name = ncluster_globals.auto_assign_job_name_if_needed(name) run_name = ncluster_globals.auto_assign_run_name_if_needed(run_name) _run = ncluster_globals.create_run_if_needed(run_name, make_run) job = Job(name=name, tasks=tasks, run_name=run_name, **kwargs) exceptions = [] # make tasks in parallel def make_task_fn(i: int): try: tasks[i] = make_task(f"{i}.{name}", run_name=run_name, install_script=install_script, instance_type=instance_type, image_name=image_name, logging_task=tasks[i], create_resources=False, # handle resources in job already **kwargs) except Exception as e: exceptions.append(e) util.log("Creating threads") threads = [threading.Thread(name=f'make_task_{i}', target=make_task_fn, args=[i]) for i in range(num_tasks)] for thread in threads: thread.start() for thread in threads: thread.join() print("Exception are ", exceptions) if exceptions: raise exceptions[0] job.tasks = tasks # double check that all instances are in the same placement_group group # this can happen if some instances from previous smaller run are getting reused placement_dict = {task.instance.placement_group: task.name for task in job.tasks} # TODO: make placement_group group name derived from run, to make it deterministic # on individual instance restarts if len(placement_dict) > 1: util.log("Job tasks are spread over multiple placement_group groups") pprint.pprint(placement_dict) raise RuntimeError( f"Got instance spread over multiple placement_group groups: {placement_dict}. Must terminate all instances in run {run_name} and try again.") return job
[ "\n Args:\n create_resources: if True, will create resources if necessary\n name: see backend.make_task\n run_name: see backend.make_task\n num_tasks: number of tasks to launch\n install_script: see make_task\n instance_type: see make_task\n image_name: see make_task\n\n Returns:\n\n " ]
Please provide a description of the function:def _maybe_start_instance(instance): if not instance: return if instance.state['Name'] == 'stopped': instance.start() while True: print(f"Waiting for {instance} to start.") instance.reload() if instance.state['Name'] == 'running': break time.sleep(10)
[ "Starts instance if it's stopped, no-op otherwise." ]
Please provide a description of the function:def _maybe_wait_for_initializing_instance(instance): if not instance: return if instance.state['Name'] == 'initializing': while True: print(f"Waiting for {instance} to leave state 'initializing'.") instance.reload() if instance.state['Name'] == 'running': break time.sleep(10)
[ "Starts instance if it's stopped, no-op otherwise." ]
Please provide a description of the function:def _maybe_create_resources(logging_task: Task = None): def log(*args): if logging_task: logging_task.log(*args) else: util.log(*args) def should_create_resources(): prefix = u.get_prefix() if u.get_keypair_name() not in u.get_keypair_dict(): log(f"Missing {u.get_keypair_name()} keypair, creating resources") return True vpcs = u.get_vpc_dict() if prefix not in vpcs: log(f"Missing {prefix} vpc, creating resources") return True vpc = vpcs[prefix] gateways = u.get_gateway_dict(vpc) if prefix not in gateways: log(f"Missing {prefix} gateway, creating resources") return True return False try: # this locking is approximate, still possible for threads to slip through if os.path.exists(AWS_LOCK_FN): pid, ts, lock_taskname = open(AWS_LOCK_FN).read().split('-') ts = int(ts) log(f"waiting for aws resource creation, another resource initiation was " f"initiated {int(time.time()-ts)} seconds ago by " f"{lock_taskname}, delete lock file " f"{AWS_LOCK_FN} if this is an error") while True: if os.path.exists(AWS_LOCK_FN): log(f"waiting for lock file {AWS_LOCK_FN} to get deleted " f"initiated {int(time.time()-ts)} seconds ago by ") time.sleep(2) continue else: break return with open(AWS_LOCK_FN, 'w') as f: f.write( f'{os.getpid()}-{int(time.time())}-{logging_task.name if logging_task else ""}') if not should_create_resources(): util.log("Resources already created, no-op") os.remove(AWS_LOCK_FN) return create_lib.create_resources() finally: if os.path.exists(AWS_LOCK_FN): os.remove(AWS_LOCK_FN)
[ "Use heuristics to decide to possibly create resources", "Check if gateway, keypair, vpc exist." ]
Please provide a description of the function:def _set_aws_environment(task: Task = None): current_zone = os.environ.get('NCLUSTER_ZONE', '') current_region = os.environ.get('AWS_DEFAULT_REGION', '') def log(*args): if task: task.log(*args) else: util.log(*args) if current_region and current_zone: assert current_zone.startswith( current_region), f'Current zone "{current_zone}" ($NCLUSTER_ZONE) is not ' \ f'in current region "{current_region} ($AWS_DEFAULT_REGION)' assert u.get_session().region_name == current_region # setting from ~/.aws # zone is set, set region from zone if current_zone and not current_region: current_region = current_zone[:-1] os.environ['AWS_DEFAULT_REGION'] = current_region # neither zone nor region not set, use default setting for region # if default is not set, use NCLUSTER_DEFAULT_REGION if not current_region: current_region = u.get_session().region_name if not current_region: log(f"No default region available, using {NCLUSTER_DEFAULT_REGION}") current_region = NCLUSTER_DEFAULT_REGION os.environ['AWS_DEFAULT_REGION'] = current_region # zone not set, use first zone of the region # if not current_zone: # current_zone = current_region + 'a' # os.environ['NCLUSTER_ZONE'] = current_zone log(f"Using account {u.get_account_number()}, region {current_region}, " f"zone {current_zone}")
[ "Sets up AWS environment from NCLUSTER environment variables" ]
Please provide a description of the function:def join(self, ignore_errors=False): assert self._status_fn, "Asked to join a task which hasn't had any commands executed on it" check_interval = 0.2 status_fn = self._status_fn if not self.wait_for_file(status_fn, max_wait_sec=30): self.log(f"Retrying waiting for {status_fn}") while not self.exists(status_fn): self.log(f"Still waiting for {self._cmd}") self.wait_for_file(status_fn, max_wait_sec=30) contents = self.read(status_fn) # if empty wait a bit to allow for race condition if len(contents) == 0: time.sleep(check_interval) contents = self.read(status_fn) status = int(contents.strip()) self.last_status = status if status != 0: extra_msg = '(ignoring error)' if ignore_errors else '(failing)' if util.is_set('NCLUSTER_RUN_WITH_OUTPUT_ON_FAILURE') or True: self.log( f"Start failing output {extra_msg}: \n{'*'*80}\n\n '{self.read(self._out_fn)}'") self.log(f"\n{'*'*80}\nEnd failing output") if not ignore_errors: raise RuntimeError(f"Command {self._cmd} returned status {status}") else: self.log(f"Warning: command {self._cmd} returned status {status}") return status
[ "Waits until last executed command completed." ]
Please provide a description of the function:def _run_with_output_on_failure(self, cmd, non_blocking=False, ignore_errors=False, max_wait_sec=365 * 24 * 3600, check_interval=0.2) -> str: if not self._can_run: assert False, "Using .run before initialization finished" if '\n' in cmd: assert False, "Don't support multi-line for run2" cmd = cmd.strip() if cmd.startswith('#'): # ignore empty/commented out lines return '' self.run_counter += 1 self.log("tmux> %s", cmd) self._cmd = cmd self._cmd_fn = f'{self.remote_scratch}/{self.run_counter}.cmd' self._status_fn = f'{self.remote_scratch}/{self.run_counter}.status' self._out_fn = f'{self.remote_scratch}/{self.run_counter}.out' cmd = util.shell_strip_comment(cmd) assert '&' not in cmd, f"cmd {cmd} contains &, that breaks things" # modify command to dump shell success status into file self.file_write(self._cmd_fn, cmd + '\n') # modified_cmd = f'{cmd} > {out_fn} 2>&1; echo $? > {status_fn}' # https://stackoverflow.com/a/692407/419116 # $cmd > >(tee -a fn) 2> >(tee -a fn >&2) modified_cmd = f'{cmd} > >(tee -a {self._out_fn}) 2> >(tee -a {self._out_fn} >&2); echo $? > {self._status_fn}' modified_cmd = shlex.quote(modified_cmd) start_time = time.time() tmux_window = self.tmux_session + ':' + str(self.tmux_window_id) tmux_cmd = f"tmux send-keys -t {tmux_window} {modified_cmd} Enter" self._run_raw(tmux_cmd, ignore_errors=ignore_errors) if non_blocking: return 0 if not self.wait_for_file(self._status_fn, max_wait_sec=60): self.log(f"Retrying waiting for {self._status_fn}") elapsed_time = time.time() - start_time while not self.exists(self._status_fn) and elapsed_time < max_wait_sec: self.log(f"Still waiting for {cmd}") self.wait_for_file(self._status_fn, max_wait_sec=60) elapsed_time = time.time() - start_time contents = self.read(self._status_fn) # if empty wait a bit to allow for race condition if len(contents) == 0: time.sleep(check_interval) contents = self.read(self._status_fn) status = int(contents.strip()) self.last_status = status if status != 0: extra_msg = '(ignoring error)' if ignore_errors else '(failing)' self.log( f"Start failing output {extra_msg}: \n{'*'*80}\n\n '{self.read(self._out_fn)}'") self.log(f"\n{'*'*80}\nEnd failing output") if not ignore_errors: raise RuntimeError(f"Command {cmd} returned status {status}") else: self.log(f"Warning: command {cmd} returned status {status}") return self.read(self._out_fn)
[ "Experimental version of run propagates error messages to client. This command will be default \"run\" eventually" ]
Please provide a description of the function:def _run_raw(self, cmd: str, ignore_errors=False) -> Tuple[str, str]: # self._log("run_ssh: %s"%(cmd,)) stdin, stdout, stderr = u.call_with_retries(self.ssh_client.exec_command, command=cmd, get_pty=True) stdout_str = stdout.read().decode() stderr_str = stderr.read().decode() if stdout.channel.recv_exit_status() != 0: if not ignore_errors: self.log(f"command ({cmd}) failed with --->") self.log("failing stdout: " + stdout_str) self.log("failing stderr: " + stderr_str) assert False, "_run_raw failed (see logs for error)" return stdout_str, stderr_str
[ "Runs given cmd in the task using current SSH session, returns\n stdout/stderr as strings. Because it blocks until cmd is done, use it for\n short cmds. Silently ignores failing commands.\n\n This is a barebones method to be used during initialization that have\n minimal dependencies (no tmux)\n " ]
Please provide a description of the function:def upload(self, local_fn: str, remote_fn: str = '', dont_overwrite: bool = False) -> None: # support wildcard through glob if '*' in local_fn: for local_subfn in glob.glob(local_fn): self.upload(local_subfn) return if '#' in local_fn: # hashes also give problems from shell commands self.log("skipping backup file {local_fn}") return if not self.sftp: self.sftp = u.call_with_retries(self.ssh_client.open_sftp, 'self.ssh_client.open_sftp') def maybe_fix_mode(local_fn_, remote_fn_): mode = oct(os.stat(local_fn_)[stat.ST_MODE])[-3:] if '7' in mode: self.log(f"Making {remote_fn_} executable with mode {mode}") # use raw run, in case tmux is unavailable self._run_raw(f"chmod {mode} {remote_fn_}") # augmented SFTP client that can transfer directories, from # https://stackoverflow.com/a/19974994/419116 def _put_dir(source, target): def _safe_mkdir(path, mode=511, ignore_existing=True): try: self.sftp.mkdir(path, mode) except IOError: if ignore_existing: pass else: raise assert os.path.isdir(source) _safe_mkdir(target) for item in os.listdir(source): if os.path.isfile(os.path.join(source, item)): self.sftp.put(os.path.join(source, item), os.path.join(target, item)) maybe_fix_mode(os.path.join(source, item), os.path.join(target, item)) else: _safe_mkdir(f'{target}/{item}') _put_dir(f'{source}/{item}', f'{target}/{item}') if not remote_fn: remote_fn = os.path.basename(local_fn) self.log('uploading ' + local_fn + ' to ' + remote_fn) remote_fn = remote_fn.replace('~', self.homedir) if '/' in remote_fn: remote_dir = os.path.dirname(remote_fn) assert self.exists( remote_dir), f"Remote dir {remote_dir} doesn't exist" if dont_overwrite and self.exists(remote_fn): self.log("Remote file %s exists, skipping" % (remote_fn,)) return assert os.path.exists(local_fn), f"{local_fn} not found" if os.path.isdir(local_fn): _put_dir(local_fn, remote_fn) else: assert os.path.isfile(local_fn), "%s is not a file" % (local_fn,) # this crashes with IOError when upload failed if self.exists(remote_fn) and self.isdir(remote_fn): remote_fn = remote_fn + '/' + os.path.basename(local_fn) self.sftp.put(localpath=local_fn, remotepath=remote_fn) maybe_fix_mode(local_fn, remote_fn)
[ "Uploads file to remote instance. If location not specified, dumps it\n into default directory. If remote location has files or directories with the\n same name, behavior is undefined.", "Makes remote file execute for locally executable files", " Uploads the contents of the source directory to the target path.", " Augments mkdir by adding an option to not fail if the folder exists asdf asdf asdf as" ]
Please provide a description of the function:def switch_window(self, window_id: int): # windows are numbered sequentially 0, 1, 2, ... # create any missing windows and make them point to the same directory if window_id not in self.tmux_available_window_ids: for i in range(max(self.tmux_available_window_ids) + 1, window_id + 1): self._run_raw(f'tmux new-window -t {self.tmux_session} -d') self.tmux_available_window_ids.append(i) self.tmux_window_id = window_id
[ "\n Switches currently active tmux window for given task. 0 is the default window\n Args:\n window_id: integer id of tmux window to use\n " ]
Please provide a description of the function:def _replace_lines(fn, startswith, new_line): new_lines = [] for line in open(fn): if line.startswith(startswith): new_lines.append(new_line) else: new_lines.append(line) with open(fn, 'w') as f: f.write('\n'.join(new_lines))
[ "Replace lines starting with starts_with in fn with new_line." ]
Please provide a description of the function:def now_micros(absolute=False) -> int: micros = int(time.time() * 1e6) if absolute: return micros return micros - EPOCH_MICROS
[ "Return current micros since epoch as integer." ]
Please provide a description of the function:def now_millis(absolute=False) -> int: millis = int(time.time() * 1e3) if absolute: return millis return millis - EPOCH_MICROS // 1000
[ "Return current millis since epoch as integer." ]
Please provide a description of the function:def install_pdb_handler(): import signal import pdb def handler(_signum, _frame): pdb.set_trace() signal.signal(signal.SIGQUIT, handler)
[ "Make CTRL+\\ break into gdb." ]
Please provide a description of the function:def shell_add_echo(script): new_script = "" for cmd in script.split('\n'): cmd = cmd.strip() if not cmd: continue new_script += "echo \\* " + shlex.quote(cmd) + "\n" new_script += cmd + "\n" return new_script
[ "Goes over each line script, adds \"echo cmd\" in front of each cmd.\n\n ls a\n\n becomes\n\n echo * ls a\n ls a\n " ]
Please provide a description of the function:def random_id(k=5): # https://stackoverflow.com/questions/2257441/random-string-generation-with-upper-case-letters-and-digits-in-python return ''.join(random.choices(string.ascii_lowercase + string.digits, k=k))
[ "Random id to use for AWS identifiers." ]
Please provide a description of the function:def alphanumeric_hash(s: str, size=5): import hashlib import base64 hash_object = hashlib.md5(s.encode('ascii')) s = base64.b32encode(hash_object.digest()) result = s[:size].decode('ascii').lower() return result
[ "Short alphanumeric string derived from hash of given string" ]
Please provide a description of the function:def reverse_taskname(name: str) -> str: components = name.split('.') assert len(components) <= 3 return '.'.join(components[::-1])
[ "\n Reverses components in the name of task. Reversed convention is used for filenames since\n it groups log/scratch files of related tasks together\n\n 0.somejob.somerun -> somerun.somejob.0\n 0.somejob -> somejob.0\n somename -> somename\n\n Args:\n name: name of task\n\n " ]
Please provide a description of the function:def is_bash_builtin(cmd): # from compgen -b bash_builtins = ['alias', 'bg', 'bind', 'alias', 'bg', 'bind', 'break', 'builtin', 'caller', 'cd', 'command', 'compgen', 'complete', 'compopt', 'continue', 'declare', 'dirs', 'disown', 'echo', 'enable', 'eval', 'exec', 'exit', 'export', 'false', 'fc', 'fg', 'getopts', 'hash', 'help', 'history', 'jobs', 'kill', 'let', 'local', 'logout', 'mapfile', 'popd', 'printf', 'pushd', 'pwd', 'read', 'readarray', 'readonly', 'return', 'set', 'shift', 'shopt', 'source', 'suspend', 'test', 'times', 'trap', 'true', 'type', 'typeset', 'ulimit', 'umask', 'unalias', 'unset', 'wait'] toks = cmd.split() if toks and toks[0] in bash_builtins: return True return False
[ "Return true if command is invoking bash built-in\n " ]
Please provide a description of the function:def is_set(name): val = os.environ.get(name, '0') assert val == '0' or val == '1', f"env var {name} has value {val}, expected 0 or 1" return val == '1'
[ "Helper method to check if given property is set" ]
Please provide a description of the function:def assert_script_in_current_directory(): script = sys.argv[0] assert os.path.abspath(os.path.dirname(script)) == os.path.abspath( '.'), f"Change into directory of script {script} and run again."
[ "Assert fail if current directory is different from location of the script" ]
Please provide a description of the function:def push_ctx(app=None): if app is not None: ctx = app.test_request_context() ctx.fixtures_request_context = True ctx.push() if _app_ctx_stack is not None: _app_ctx_stack.top.fixtures_app_context = True # Make sure that we have an application in the current context if (_app_ctx_stack is None or _app_ctx_stack.top is None) and _request_ctx_stack.top is None: raise AssertionError('A Flask application must be specified for Fixtures to work.')
[ "Creates new test context(s) for the given app\n\n If the app is not None, it overrides any existing app and/or request\n context. In other words, we will use the app that was passed in to create\n a new test request context on the top of the stack. If, however, nothing\n was passed in, we will assume that another app and/or request context is\n already in place and use that to run the test suite. If no app or request\n context can be found, an AssertionError is emitted to let the user know\n that they must somehow specify an application for testing.\n\n " ]
Please provide a description of the function:def pop_ctx(): if getattr(_request_ctx_stack.top, 'fixtures_request_context', False): _request_ctx_stack.pop() if _app_ctx_stack is not None and getattr(_app_ctx_stack.top, 'fixtures_app_context', False): _app_ctx_stack.pop()
[ "Removes the test context(s) from the current stack(s)\n " ]
Please provide a description of the function:def load_fixtures(db, fixtures): conn = db.engine.connect() metadata = db.metadata for fixture in fixtures: if 'model' in fixture: module_name, class_name = fixture['model'].rsplit('.', 1) module = importlib.import_module(module_name) model = getattr(module, class_name) for fields in fixture['records']: obj = model(**fields) db.session.add(obj) db.session.commit() elif 'table' in fixture: table = Table(fixture['table'], metadata) conn.execute(table.insert(), fixture['records']) else: raise ValueError("Fixture missing a 'model' or 'table' field: {0}".format(json.dumps(fixture)))
[ "Loads the given fixtures into the database.\n " ]
Please provide a description of the function:def setup_handler(setup_fixtures_fn, setup_fn): def handler(obj): setup_fixtures_fn(obj) setup_fn(obj) return handler
[ "Returns a function that adds fixtures handling to the setup method.\n\n Makes sure that fixtures are setup before calling the given setup method.\n " ]
Please provide a description of the function:def teardown_handler(teardown_fixtures_fn, teardown_fn): def handler(obj): teardown_fn(obj) teardown_fixtures_fn(obj) return handler
[ "Returns a function that adds fixtures handling to the teardown method.\n\n Calls the given teardown method first before calling the fixtures teardown.\n " ]
Please provide a description of the function:def get_child_fn(attrs, names, bases): def call_method(obj, method): # The __get__ method takes an instance and an owner which changes # depending on the calling object. If the calling object is a class, # the instance is None and the owner will be the object itself. If the # calling object is an instance, the instance will be the calling object # and the owner will be its class. For more info on the __get__ method, # see http://docs.python.org/2/reference/datamodel.html#object.__get__. if isinstance(obj, type): instance = None owner = obj else: instance = obj owner = obj.__class__ method.__get__(instance, owner)() # Create a default function that calls the default method on each parent default_name = names[0] def default_fn(obj): for cls in bases: if hasattr(cls, default_name): call_method(obj, getattr(cls, default_name)) default_fn.__name__ = default_name # Get all of the functions in the child class that match the list of names fns = [(name, attrs[name]) for name in names if name in attrs] # Raise an error if more than one setup/teardown method is found if len(fns) > 1: raise RuntimeError("Cannot have more than one setup or teardown method per context (class or test).") # If one setup/teardown function was found, return it elif len(fns) == 1: name, fn = fns[0] def child_fn(obj): call_method(obj, fn) child_fn.__name__ = name return child_fn # Otherwise, return the default function else: return default_fn
[ "Returns a function from the child class that matches one of the names.\n\n Searches the child class's set of methods (i.e., the attrs dict) for all\n the functions matching the given list of names. If more than one is found,\n an exception is raised, if one is found, it is returned, and if none are\n found, a function that calls the default method on each parent class is\n returned.\n\n ", "Calls a method as either a class method or an instance method.\n " ]
Please provide a description of the function:def print_msg(msg, header, file=sys.stdout): DEFAULT_MSG_BLOCK_WIDTH = 60 # Calculate the length of the boarder on each side of the header and the # total length of the bottom boarder side_boarder_length = (DEFAULT_MSG_BLOCK_WIDTH - (len(header) + 2)) // 2 msg_block_width = side_boarder_length * 2 + (len(header) + 2) # Create the top and bottom boarders side_boarder = '#' * side_boarder_length top_boarder = '{0} {1} {2}'.format(side_boarder, header, side_boarder) bottom_boarder = '#' * msg_block_width def pad(line, length): padding_length = length - len(line) left_padding = ' ' * (padding_length//2) right_padding = ' ' * (padding_length - len(left_padding)) return '{0} {1} {2}'.format(left_padding, line, right_padding) words = msg.split(' ') lines = [] line = '' for word in words: if len(line + ' ' + word) <= msg_block_width - 4: line = (line + ' ' + word).strip() else: lines.append('#{0}#'.format(pad(line, msg_block_width - 4))) line = word lines.append('#{0}#'.format(pad(line, msg_block_width - 4))) # Print the full message print(file=file) print(top_boarder, file=file) print('#{0}#'.format(pad('', msg_block_width - 4)), file=file) for line in lines: print(line, file=file) print('#{0}#'.format(pad('', msg_block_width - 4)), file=file) print(bottom_boarder, file=file) print(file=file)
[ "Prints a boardered message to the screen", "Returns a string padded and centered by the given length" ]
Please provide a description of the function:def can_persist_fixtures(): # If we're running python 2.7 or greater, we're fine if sys.hexversion >= 0x02070000: return True # Otherwise, nose and py.test support the setUpClass and tearDownClass # methods, so if we're using either of those, go ahead and run the tests filename = inspect.stack()[-1][1] executable = os.path.split(filename)[1] return executable in ('py.test', 'nosetests')
[ "Returns True if it's possible to persist fixtures across tests.\n\n Flask-Fixtures uses the setUpClass and tearDownClass methods to persist\n fixtures across tests. These methods were added to unittest.TestCase in\n python 2.7. So, we can only persist fixtures when using python 2.7.\n However, the nose and py.test libraries add support for these methods\n regardless of what version of python we're running, so if we're running\n with either of those libraries, return True to persist fixtures.\n\n " ]
Please provide a description of the function:def get(self, count=None, since_id=None, silent=False): if not silent: print('Retrieving photos from Twitter API...') self.auth_user = self.verify_credentials().screen_name self.since_ids = read_since_ids(self.users) for user in self.users: if self.increment: since_id = self.since_ids.get(user) photos = self.load(user=user, count=count, since_id=since_id, num=self.num) self.photos[user] = photos[:self.num] self._total += len(self.photos[user]) if not photos and user in self.max_ids: del self.max_ids[user] return self.photos
[ "\n Get all photos from the user or members of the list\n :param count: Number of tweets to try and retrieve. If None, return\n all photos since `since_id`\n :param since_id: An integer specifying the oldest tweet id\n " ]
Please provide a description of the function:def read_since_ids(users): since_ids = {} for user in users: if config.has_option(SECTIONS['INCREMENTS'], user): since_ids[user] = config.getint(SECTIONS['INCREMENTS'], user) + 1 return since_ids
[ "\n Read max ids of the last downloads\n\n :param users: A list of users\n\n Return a dictionary mapping users to ids\n " ]
Please provide a description of the function:def set_max_ids(max_ids): config.read(CONFIG) for user, max_id in max_ids.items(): config.set(SECTIONS['INCREMENTS'], user, str(max_id)) with open(CONFIG, 'w') as f: config.write(f)
[ "\n Set max ids of the current downloads\n\n :param max_ids: A dictionary mapping users to ids\n " ]
Please provide a description of the function:def hash_bytes(buf): sha256 = hashlib.sha256() sha256.update(buf) return 'sha256:' + sha256.hexdigest()
[ "\n Hash bytes using the same method the registry uses (currently SHA-256).\n\n :param buf: Bytes to hash\n :type buf: binary str\n\n :rtype: str\n :returns: Hex-encoded hash of file's content (prefixed by ``sha256:``)\n " ]
Please provide a description of the function:def hash_file(filename): sha256 = hashlib.sha256() with open(filename, 'rb') as f: for chunk in iter(lambda: f.read(8192), b''): sha256.update(chunk) return 'sha256:' + sha256.hexdigest()
[ "\n Hash a file using the same method the registry uses (currently SHA-256).\n\n :param filename: Name of file to hash\n :type filename: str\n\n :rtype: str\n :returns: Hex-encoded hash of file's content (prefixed by ``sha256:``)\n " ]
Please provide a description of the function:def authenticate(self, username=None, password=None, actions=None, response=None, authorization=None): # pylint: disable=too-many-arguments,too-many-locals if response is None: with warnings.catch_warnings(): _ignore_warnings(self) response = self._sessions[0].get(self._base_url, verify=self._tlsverify) if response.ok: return None # pylint: disable=no-member if response.status_code != requests.codes.unauthorized: raise exceptions.DXFUnexpectedStatusCodeError(response.status_code, requests.codes.unauthorized) if self._insecure: raise exceptions.DXFAuthInsecureError() parsed = www_authenticate.parse(response.headers['www-authenticate']) if username is not None and password is not None: headers = { 'Authorization': 'Basic ' + base64.b64encode(_to_bytes_2and3(username + ':' + password)).decode('utf-8') } elif authorization is not None: headers = { 'Authorization': authorization } else: headers = {} if 'bearer' in parsed: info = parsed['bearer'] if actions and self._repo: scope = 'repository:' + self._repo + ':' + ','.join(actions) elif 'scope' in info: scope = info['scope'] else: scope = '' url_parts = list(urlparse.urlparse(info['realm'])) query = urlparse.parse_qs(url_parts[4]) query.update({ 'service': info['service'], 'scope': scope }) url_parts[4] = urlencode(query, True) url_parts[0] = 'https' if self._auth_host: url_parts[1] = self._auth_host auth_url = urlparse.urlunparse(url_parts) with warnings.catch_warnings(): _ignore_warnings(self) r = self._sessions[0].get(auth_url, headers=headers, verify=self._tlsverify) _raise_for_status(r) rjson = r.json() self.token = rjson['access_token'] if 'access_token' in rjson else rjson['token'] return self._token self._headers = headers return None
[ "\n Authenticate to the registry using a username and password,\n an authorization header or otherwise as the anonymous user.\n\n :param username: User name to authenticate as.\n :type username: str\n\n :param password: User's password.\n :type password: str\n\n :param actions: If you know which types of operation you need to make on the registry, specify them here. Valid actions are ``pull``, ``push`` and ``*``.\n :type actions: list\n\n :param response: When the ``auth`` function you passed to :class:`DXFBase`'s constructor is called, it is passed a HTTP response object. Pass it back to :meth:`authenticate` to have it automatically detect which actions are required.\n :type response: requests.Response\n\n :param authorization: ``Authorization`` header value.\n :type authorization: str\n\n :rtype: str\n :returns: Authentication token, if the registry supports bearer tokens. Otherwise ``None``, and HTTP Basic auth is used (if the registry requires authentication).\n " ]
Please provide a description of the function:def list_repos(self, batch_size=None, iterate=False): it = PaginatingResponse(self, '_base_request', '_catalog', 'repositories', params={'n': batch_size}) return it if iterate else list(it)
[ "\n List all repositories in the registry.\n\n :param batch_size: Number of repository names to ask the server for at a time.\n :type batch_size: int\n\n :param iterate: Whether to return iterator over the names or a list of all the names.\n :type iterate: bool\n\n :rtype: list or iterator of strings\n :returns: Repository names.\n " ]
Please provide a description of the function:def push_blob(self, filename=None, progress=None, data=None, digest=None, check_exists=True): # pylint: disable=too-many-arguments if filename is None: dgst = digest else: dgst = hash_file(filename) if check_exists: try: self._request('head', 'blobs/' + dgst) return dgst except requests.exceptions.HTTPError as ex: # pylint: disable=no-member if ex.response.status_code != requests.codes.not_found: raise r = self._request('post', 'blobs/uploads/') upload_url = r.headers['Location'] url_parts = list(urlparse.urlparse(upload_url)) query = urlparse.parse_qs(url_parts[4]) query.update({'digest': dgst}) url_parts[4] = urlencode(query, True) url_parts[0] = 'http' if self._insecure else 'https' upload_url = urlparse.urlunparse(url_parts) if filename is None: data = _ReportingChunks(dgst, data, progress) if progress else data self._base_request('put', upload_url, data=data) else: with open(filename, 'rb') as f: data = _ReportingFile(dgst, f, progress) if progress else f self._base_request('put', upload_url, data=data) return dgst
[ "\n Upload a file to the registry and return its (SHA-256) hash.\n\n The registry is content-addressable so the file's content (aka blob)\n can be retrieved later by passing the hash to :meth:`pull_blob`.\n\n :param filename: File to upload.\n :type filename: str\n\n :param data: Data to upload if ``filename`` isn't given. The data is uploaded in chunks and you must also pass ``digest``.\n :type data: Generator or iterator\n\n :param digest: Hash of the data to be uploaded in ``data``, if specified.\n :type digest: str (hex-encoded SHA-256, prefixed by ``sha256:``)\n\n :param progress: Optional function to call as the upload progresses. The function will be called with the hash of the file's content (or ``digest``), the blob just read from the file (or chunk from ``data``) and if ``filename`` is specified the total size of the file.\n :type progress: function(dgst, chunk, size)\n\n :param check_exists: Whether to check if a blob with the same hash already exists in the registry. If so, it won't be uploaded again.\n :type check_exists: bool\n\n :rtype: str\n :returns: Hash of file's content.\n " ]
Please provide a description of the function:def pull_blob(self, digest, size=False, chunk_size=None): if chunk_size is None: chunk_size = 8192 r = self._request('get', 'blobs/' + digest, stream=True) class Chunks(object): # pylint: disable=too-few-public-methods def __iter__(self): sha256 = hashlib.sha256() for chunk in r.iter_content(chunk_size): sha256.update(chunk) yield chunk dgst = 'sha256:' + sha256.hexdigest() if dgst != digest: raise exceptions.DXFDigestMismatchError(dgst, digest) return (Chunks(), long(r.headers['content-length'])) if size else Chunks()
[ "\n Download a blob from the registry given the hash of its content.\n\n :param digest: Hash of the blob's content (prefixed by ``sha256:``).\n :type digest: str\n\n :param size: Whether to return the size of the blob too.\n :type size: bool\n\n :param chunk_size: Number of bytes to download at a time. Defaults to 8192.\n :type chunk_size: int\n\n :rtype: iterator\n :returns: If ``size`` is falsey, a byte string iterator over the blob's content. If ``size`` is truthy, a tuple containing the iterator and the blob's size.\n " ]
Please provide a description of the function:def blob_size(self, digest): r = self._request('head', 'blobs/' + digest) return long(r.headers['content-length'])
[ "\n Return the size of a blob in the registry given the hash of its content.\n\n :param digest: Hash of the blob's content (prefixed by ``sha256:``).\n :type digest: str\n\n :rtype: long\n :returns: Whether the blob exists.\n " ]
Please provide a description of the function:def set_manifest(self, alias, manifest_json): self._request('put', 'manifests/' + alias, data=manifest_json, headers={'Content-Type': _schema2_mimetype})
[ "\n Give a name (alias) to a manifest.\n\n :param alias: Alias name\n :type alias: str\n\n :param manifest_json: A V2 Schema 2 manifest JSON string\n :type digests: list\n " ]
Please provide a description of the function:def set_alias(self, alias, *digests): # pylint: disable=too-many-locals try: manifest_json = self.make_manifest(*digests) self.set_manifest(alias, manifest_json) return manifest_json except requests.exceptions.HTTPError as ex: # pylint: disable=no-member if ex.response.status_code != requests.codes.bad_request: raise manifest_json = self.make_unsigned_manifest(alias, *digests) signed_json = _sign_manifest(manifest_json) self._request('put', 'manifests/' + alias, data=signed_json) return signed_json
[ "\n Give a name (alias) to a set of blobs. Each blob is specified by\n the hash of its content.\n\n :param alias: Alias name\n :type alias: str\n\n :param digests: List of blob hashes (prefixed by ``sha256:``).\n :type digests: list of strings\n\n :rtype: str\n :returns: The registry manifest used to define the alias. You almost definitely won't need this.\n " ]
Please provide a description of the function:def get_manifest_and_response(self, alias): r = self._request('get', 'manifests/' + alias, headers={'Accept': _schema2_mimetype + ', ' + _schema1_mimetype}) return r.content.decode('utf-8'), r
[ "\n Request the manifest for an alias and return the manifest and the\n response.\n\n :param alias: Alias name.\n :type alias: str\n\n :rtype: tuple\n :returns: Tuple containing the manifest as a string (JSON) and the `requests.Response <http://docs.python-requests.org/en/master/api/#requests.Response>`_\n " ]
Please provide a description of the function:def get_alias(self, alias=None, manifest=None, verify=True, sizes=False, dcd=None): # pylint: disable=too-many-arguments return self._get_alias(alias, manifest, verify, sizes, dcd, False)
[ "\n Get the blob hashes assigned to an alias.\n\n :param alias: Alias name. You almost definitely will only need to pass this argument.\n :type alias: str\n\n :param manifest: If you previously obtained a manifest, specify it here instead of ``alias``. You almost definitely won't need to do this.\n :type manifest: str\n\n :param verify: (v1 schema only) Whether to verify the integrity of the alias definition in the registry itself. You almost definitely won't need to change this from the default (``True``).\n :type verify: bool\n\n :param sizes: Whether to return sizes of the blobs along with their hashes\n :type sizes: bool\n\n :param dcd: (if ``manifest`` is specified) The Docker-Content-Digest header returned when getting the manifest. If present, this is checked against the manifest.\n :type dcd: str\n\n :rtype: list\n :returns: If ``sizes`` is falsey, a list of blob hashes (strings) which are assigned to the alias. If ``sizes`` is truthy, a list of (hash,size) tuples for each blob.\n " ]
Please provide a description of the function:def get_digest(self, alias=None, manifest=None, verify=True, dcd=None): return self._get_alias(alias, manifest, verify, False, dcd, True)
[ "\n (v2 schema only) Get the hash of an alias's configuration blob.\n\n For an alias created using ``dxf``, this is the hash of the first blob\n assigned to the alias.\n\n For a Docker image tag, this is the same as\n ``docker inspect alias --format='{{.Id}}'``.\n\n :param alias: Alias name. You almost definitely will only need to pass this argument.\n :type alias: str\n\n :param manifest: If you previously obtained a manifest, specify it here instead of ``alias``. You almost definitely won't need to do this.\n :type manifest: str\n\n :param verify: (v1 schema only) Whether to verify the integrity of the alias definition in the registry itself. You almost definitely won't need to change this from the default (``True``).\n :type verify: bool\n\n :param dcd: (if ``manifest`` is specified) The Docker-Content-Digest header returned when getting the manifest. If present, this is checked against the manifest.\n :type dcd: str\n\n :rtype: str\n :returns: Hash of the alias's configuration blob.\n " ]
Please provide a description of the function:def _get_dcd(self, alias): # https://docs.docker.com/registry/spec/api/#deleting-an-image # Note When deleting a manifest from a registry version 2.3 or later, # the following header must be used when HEAD or GET-ing the manifest # to obtain the correct digest to delete: # Accept: application/vnd.docker.distribution.manifest.v2+json return self._request( 'head', 'manifests/{}'.format(alias), headers={'Accept': _schema2_mimetype}, ).headers.get('Docker-Content-Digest')
[ "\n Get the Docker-Content-Digest header for an alias.\n\n :param alias: Alias name.\n :type alias: str\n\n :rtype: str\n :returns: DCD header for the alias.\n " ]
Please provide a description of the function:def del_alias(self, alias): dcd = self._get_dcd(alias) dgsts = self.get_alias(alias) self._request('delete', 'manifests/{}'.format(dcd)) return dgsts
[ "\n Delete an alias from the registry. The blobs it points to won't be deleted. Use :meth:`del_blob` for that.\n\n .. Note::\n On private registry, garbage collection might need to be run manually; see:\n https://docs.docker.com/registry/garbage-collection/\n\n :param alias: Alias name.\n :type alias: str\n\n :rtype: list\n :returns: A list of blob hashes (strings) which were assigned to the alias.\n " ]
Please provide a description of the function:def from_base(cls, base, repo): # pylint: disable=protected-access r = cls(base._host, repo, base._auth, base._insecure, base._auth_host, base._tlsverify) r._token = base._token r._headers = base._headers r._sessions = [base._sessions[0]] return r
[ "\n Create a :class:`DXF` object which uses the same host, settings and\n session as an existing :class:`DXFBase` object.\n\n :param base: Existing :class:`DXFBase` object.\n :type base: :class:`DXFBase`\n\n :param repo: Name of the repository to access on the registry. Typically this is of the form ``username/reponame`` but for your own registries you don't actually have to stick to that.\n :type repo: str\n\n :returns: :class:`DXF` object which shares configuration and session with ``base`` but which can also be used to operate on the ``repo`` repository.\n :rtype: :class:`DXF`\n " ]
Please provide a description of the function:def get_name(self, name_case=DdlParseBase.NAME_CASE.original): if name_case == self.NAME_CASE.lower: return self._name.lower() elif name_case == self.NAME_CASE.upper: return self._name.upper() else: return self._name
[ "\n Get Name converted case\n\n :param name_case: name case type\n * DdlParse.NAME_CASE.original : Return to no convert\n * DdlParse.NAME_CASE.lower : Return to lower\n * DdlParse.NAME_CASE.upper : Return to upper\n\n :return: name\n " ]
Please provide a description of the function:def constraint(self): constraint_arr = [] if self._not_null: constraint_arr.append("PRIMARY KEY" if self._pk else "NOT NULL") if self._unique: constraint_arr.append("UNIQUE") return " ".join(constraint_arr)
[ "Constraint string" ]
Please provide a description of the function:def bigquery_data_type(self): # BigQuery data type = {source_database: [data type, ...], ...} BQ_DATA_TYPE_DIC = OrderedDict() BQ_DATA_TYPE_DIC["STRING"] = {None: [re.compile(r"(CHAR|TEXT|CLOB|JSON|UUID)")]} BQ_DATA_TYPE_DIC["INTEGER"] = {None: [re.compile(r"INT|SERIAL|YEAR")]} BQ_DATA_TYPE_DIC["FLOAT"] = {None: [re.compile(r"(FLOAT|DOUBLE)"), "REAL", "MONEY"]} BQ_DATA_TYPE_DIC["DATETIME"] = { None: ["DATETIME", "TIMESTAMP", "TIMESTAMP WITHOUT TIME ZONE"], self.DATABASE.oracle: ["DATE"] } BQ_DATA_TYPE_DIC["TIMESTAMP"] = {None: ["TIMESTAMPTZ", "TIMESTAMP WITH TIME ZONE"]} BQ_DATA_TYPE_DIC["DATE"] = {None: ["DATE"]} BQ_DATA_TYPE_DIC["TIME"] = {None: ["TIME"]} BQ_DATA_TYPE_DIC["BOOLEAN"] = {None: [re.compile(r"BOOL")]} for bq_type, conditions in BQ_DATA_TYPE_DIC.items(): for source_db, source_datatypes in conditions.items(): for source_datatype in source_datatypes: if isinstance(source_datatype, str): if self._data_type == source_datatype \ and ( self._source_database == source_db or (self._source_database is not None and source_db is None)): return bq_type elif re.search(source_datatype, self._data_type) \ and ( self._source_database == source_db or (self._source_database is not None and source_db is None)): return bq_type if self._data_type in ["NUMERIC", "NUMBER", "DECIMAL"]: if self._scale is not None: return "FLOAT" if self._data_type == "NUMBER" \ and self._source_database == self.DATABASE.oracle \ and self._length is None: return "FLOAT" return "INTEGER" raise ValueError("Unknown data type : '{}'".format(self._data_type))
[ "Get BigQuery Legacy SQL data type" ]
Please provide a description of the function:def to_bigquery_field(self, name_case=DdlParseBase.NAME_CASE.original): col_name = self.get_name(name_case) mode = self.bigquery_mode if self.array_dimensional <= 1: # no or one dimensional array data type type = self.bigquery_legacy_data_type else: # multiple dimensional array data type type = "RECORD" fields = OrderedDict() fields_cur = fields for i in range(1, self.array_dimensional): is_last = True if i == self.array_dimensional - 1 else False fields_cur['fields'] = [OrderedDict()] fields_cur = fields_cur['fields'][0] fields_cur['name'] = "dimension_{}".format(i) fields_cur['type'] = self.bigquery_legacy_data_type if is_last else "RECORD" fields_cur['mode'] = self.bigquery_mode if is_last else "REPEATED" col = OrderedDict() col['name'] = col_name col['type'] = type col['mode'] = mode if self.array_dimensional > 1: col['fields'] = fields['fields'] return json.dumps(col)
[ "Generate BigQuery JSON field define" ]
Please provide a description of the function:def to_bigquery_fields(self, name_case=DdlParseBase.NAME_CASE.original): bq_fields = [] for col in self.values(): bq_fields.append(col.to_bigquery_field(name_case)) return "[{}]".format(",".join(bq_fields))
[ "\n Generate BigQuery JSON fields define\n\n :param name_case: name case type\n * DdlParse.NAME_CASE.original : Return to no convert\n * DdlParse.NAME_CASE.lower : Return to lower\n * DdlParse.NAME_CASE.upper : Return to upper\n\n :return: BigQuery JSON fields define\n " ]
Please provide a description of the function:def to_bigquery_fields(self, name_case=DdlParseBase.NAME_CASE.original): return self._columns.to_bigquery_fields(name_case)
[ "\n Generate BigQuery JSON fields define\n\n :param name_case: name case type\n * DdlParse.NAME_CASE.original : Return to no convert\n * DdlParse.NAME_CASE.lower : Return to lower\n * DdlParse.NAME_CASE.upper : Return to upper\n\n :return: BigQuery JSON fields define\n " ]
Please provide a description of the function:def to_bigquery_ddl(self, name_case=DdlParseBase.NAME_CASE.original): if self.schema is None: dataset = "dataset" elif name_case == self.NAME_CASE.lower: dataset = self.schema.lower() elif name_case == self.NAME_CASE.upper: dataset = self.schema.upper() else: dataset = self.schema cols_defs = [] for col in self.columns.values(): col_name = col.get_name(name_case) if col.array_dimensional < 1: # no array data type type = col.bigquery_standard_data_type not_null = " NOT NULL" if col.not_null else "" else: # one or multiple dimensional array data type type_front = "ARRAY<" type_back = ">" for i in range(1, col.array_dimensional): type_front += "STRUCT<dimension_{} ARRAY<".format(i) type_back += ">>" type = "{}{}{}".format(type_front, col.bigquery_standard_data_type, type_back) not_null = "" cols_defs.append("{name} {type}{not_null}".format( name=col_name, type=type, not_null=not_null, )) return textwrap.dedent( ).format( dataset=dataset, table=self.get_name(name_case), colmns_define=",\n ".join(cols_defs), )
[ "\n Generate BigQuery CREATE TABLE statements\n\n :param name_case: name case type\n * DdlParse.NAME_CASE.original : Return to no convert\n * DdlParse.NAME_CASE.lower : Return to lower\n * DdlParse.NAME_CASE.upper : Return to upper\n\n :return: BigQuery CREATE TABLE statements\n ", "\\\n #standardSQL\n CREATE TABLE `project.{dataset}.{table}`\n (\n {colmns_define}\n )" ]
Please provide a description of the function:def parse(self, ddl=None, source_database=None): if ddl is not None: self._ddl = ddl if source_database is not None: self.source_database = source_database if self._ddl is None: raise ValueError("DDL is not specified") ret = self._DDL_PARSE_EXPR.parseString(self._ddl) # print(ret.dump()) if "schema" in ret: self._table.schema = ret["schema"] self._table.name = ret["table"] self._table.is_temp = True if "temp" in ret else False for ret_col in ret["columns"]: if ret_col.getName() == "column": # add column col = self._table.columns.append( column_name=ret_col["name"], data_type_array=ret_col["type"], array_brackets=ret_col['array_brackets'] if "array_brackets" in ret_col else None) if "constraint" in ret_col: col.constraint = ret_col["constraint"] elif ret_col.getName() == "constraint": # set column constraint for col_name in ret_col["constraint_columns"]: col = self._table.columns[col_name] if ret_col["type"] == "PRIMARY KEY": col.not_null = True col.primary_key = True elif ret_col["type"] in ["UNIQUE", "UNIQUE KEY"]: col.unique = True elif ret_col["type"] == "NOT NULL": col.not_null = True return self._table
[ "\n Parse DDL script.\n\n :param ddl: DDL script\n :return: DdlParseTable, Parsed table define info.\n " ]
Please provide a description of the function:def launch(program, sock, stderr=True, cwd=None, env=None): if stderr is True: err = sock # redirect to socket elif stderr is False: err = open(os.devnull, 'wb') # hide elif stderr is None: err = None # redirect to console p = subprocess.Popen(program, shell=type(program) not in (list, tuple), stdin=sock, stdout=sock, stderr=err, cwd=cwd, env=env, close_fds=True) sock.close() return p
[ "\n A static method for launching a process that is connected to a given\n socket. Same rules from the Process constructor apply.\n " ]
Please provide a description of the function:def respond(self, packet, peer, flags=0): self.sock.sendto(packet, flags, peer)
[ "\n Send a message back to a peer.\n\n :param packet: The data to send\n :param peer: The address to send to, as a tuple (host, port)\n :param flags: Any sending flags you want to use for some reason\n " ]
Please provide a description of the function:def _parse_target(target, listen, udp, ipv6): if isinstance(target, str): if target.startswith('nc '): out_host = None out_port = None try: opts, pieces = getopt.getopt(target.split()[1:], 'u46lp:', []) except getopt.GetoptError as exc: raise ValueError(exc) for opt, arg in opts: if opt == '-u': udp = True elif opt == '-4': ipv6 = False elif opt == '-6': ipv6 = True elif opt == '-l': listen = True elif opt == '-p': out_port = int(arg) else: assert False, "unhandled option" if not pieces: pass elif len(pieces) == 1: if listen and pieces[0].isdigit(): out_port = int(pieces[0]) else: out_host = pieces[0] elif len(pieces) == 2 and pieces[1].isdigit(): out_host = pieces[0] out_port = int(pieces[1]) else: raise ValueError("Bad cmdline: %s" % target) if out_host is None: if listen: out_host = '::' if ipv6 else '0.0.0.0' else: raise ValueError("Missing address: %s" % target) if out_port is None: raise ValueError("Missing port: %s" % target) if _is_ipv6_addr(out_host): ipv6 = True return (out_host, out_port), listen, udp, ipv6 elif PROTOCAL_RE.match(target) is not None: parsed = urlparse(target) port = None try: scheme_udp, scheme_ipv6, scheme_port = KNOWN_SCHEMES[parsed.scheme] except KeyError: raise ValueError("Unknown scheme: %s" % parsed.scheme) if scheme_udp is not None: udp = scheme_udp if scheme_ipv6 is not None: ipv6 = scheme_ipv6 if scheme_port is not None: port = scheme_port if parsed.netloc.startswith('['): addr, extra = parsed.netloc[1:].split(']', 1) if extra.startswith(':'): port = int(extra[1:]) else: if ':' in parsed.netloc: addr, port = parsed.netloc.split(':', 1) port = int(port) else: addr = parsed.netloc if addr is None or port is None: raise ValueError("Can't parse addr/port from %s" % target) if _is_ipv6_addr(addr): ipv6 = True return (addr, port), listen, udp, ipv6 else: if target.startswith('['): addr, extra = target[1:].split(']', 1) if extra.startswith(':'): port = int(extra[1:]) else: port = None else: if ':' in target: addr, port = target.split(':', 1) port = int(port) else: addr = target port = None if port is None: raise ValueError("No port given: %s" % target) if _is_ipv6_addr(addr): ipv6 = True return (addr, port), listen, udp, ipv6 elif isinstance(target, (int, long)): if listen: out_port = target else: raise ValueError("Can't deal with number as connection address") return ('::' if ipv6 else '0.0.0.0', out_port), listen, udp, ipv6 elif isinstance(target, tuple): if len(target) >= 1 and isinstance(target[0], str) and _is_ipv6_addr(target[0]): ipv6 = True return target, listen, udp, ipv6 else: raise ValueError("Can't parse target: %r" % target)
[ "\n Takes the basic version of the user args and extract as much data as\n possible from target. Returns a tuple that is its arguments but\n sanitized.\n " ]
Please provide a description of the function:def _connect(self, target, listen, udp, ipv6, retry): ty = socket.SOCK_DGRAM if udp else socket.SOCK_STREAM fam = socket.AF_INET6 if ipv6 else socket.AF_INET self.sock = socket.socket(fam, ty) if listen: self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.sock.bind(target) if not udp: self.sock.listen(1) conn, addr = self.sock.accept() self.sock.close() self.sock = conn self.peer = addr else: self.buf, self.peer = self.sock.recvfrom(1024) self.sock.connect(self.peer) self._log_recv(self.buf, False) if self.verbose: self._print_verbose('Connection from %s accepted' % str(self.peer)) else: while True: try: self.sock.connect(target) except (socket.gaierror, socket.herror) as exc: raise NetcatError('Could not connect to %r: %r' \ % (target, exc)) except socket.error as exc: if retry: time.sleep(0.2) else: raise NetcatError('Could not connect to %r: %r' \ % (target, exc)) else: break self.peer = target
[ "\n Takes target/listen/udp/ipv6 and sets self.sock and self.peer\n " ]
Please provide a description of the function:def close(self): if self._sock_send is not None: self._sock_send.close() return self.sock.close()
[ "\n Close the socket.\n " ]
Please provide a description of the function:def shutdown(self, how=socket.SHUT_RDWR): if self._sock_send is not None: self._sock_send.shutdown(how) return self.sock.shutdown(how)
[ "\n Send a shutdown signal for both reading and writing, or whatever\n socket.SHUT_* constant you like.\n\n Shutdown differs from closing in that it explicitly changes the state of\n the socket resource to closed, whereas closing will only decrement the\n number of peers on this end of the socket, since sockets can be a\n resource shared by multiple peers on a single OS. When the number of\n peers reaches zero, the socket is closed, but not deallocated, so you\n still need to call close. (except that this is python and close is\n automatically called on the deletion of the socket)\n\n http://stackoverflow.com/questions/409783/socket-shutdown-vs-socket-close\n " ]
Please provide a description of the function:def shutdown_rd(self): if self._sock_send is not None: self.sock.close() else: return self.shutdown(socket.SHUT_RD)
[ "\n Send a shutdown signal for reading - you may no longer read from this\n socket.\n " ]
Please provide a description of the function:def shutdown_wr(self): if self._sock_send is not None: self._sock_send.close() else: return self.shutdown(socket.SHUT_WR)
[ "\n Send a shutdown signal for writing - you may no longer write to this\n socket.\n " ]
Please provide a description of the function:def _recv_predicate(self, predicate, timeout='default', raise_eof=True): if timeout == 'default': timeout = self._timeout self.timed_out = False start = time.time() try: while True: cut_at = predicate(self.buf) if cut_at > 0: break if timeout is not None: time_elapsed = time.time() - start if time_elapsed > timeout: raise socket.timeout self._settimeout(timeout - time_elapsed) data = self._recv(4096) self._log_recv(data, False) self.buf += data if not data: if raise_eof: raise NetcatError("Connection dropped!") cut_at = len(self.buf) break except KeyboardInterrupt: self._print_header('\n======== Connection interrupted! ========') raise except socket.timeout: self.timed_out = True if self._raise_timeout: raise NetcatTimeout() return b'' except socket.error as exc: raise NetcatError('Socket error: %r' % exc) self._settimeout(self._timeout) ret = self.buf[:cut_at] self.buf = self.buf[cut_at:] self._log_recv(ret, True) return ret
[ "\n Receive until predicate returns a positive integer.\n The returned number is the size to return.\n " ]
Please provide a description of the function:def recv(self, n=4096, timeout='default'): self._print_recv_header( '======== Receiving {0}B{timeout_text} ========', timeout, n) return self._recv_predicate(lambda s: min(n, len(s)), timeout)
[ "\n Receive at most n bytes (default 4096) from the socket\n\n Aliases: read, get\n " ]
Please provide a description of the function:def recv_until(self, s, max_size=None, timeout='default'): self._print_recv_header( '======== Receiving until {0}{timeout_text} ========', timeout, repr(s)) if max_size is None: max_size = 2 ** 62 def _predicate(buf): try: return min(buf.index(s) + len(s), max_size) except ValueError: return 0 if len(buf) < max_size else max_size return self._recv_predicate(_predicate, timeout)
[ "\n Recieve data from the socket until the given substring is observed.\n Data in the same datagram as the substring, following the substring,\n will not be returned and will be cached for future receives.\n\n Aliases: read_until, readuntil, recvuntil\n " ]
Please provide a description of the function:def recv_all(self, timeout='default'): self._print_recv_header('======== Receiving until close{timeout_text} ========', timeout) return self._recv_predicate(lambda s: 0, timeout, raise_eof=False)
[ "\n Return all data recieved until connection closes.\n\n Aliases: read_all, readall, recvall\n " ]
Please provide a description of the function:def recv_exactly(self, n, timeout='default'): self._print_recv_header( '======== Receiving until exactly {0}B{timeout_text} ========', timeout, n) return self._recv_predicate(lambda s: n if len(s) >= n else 0, timeout)
[ "\n Recieve exactly n bytes\n\n Aliases: read_exactly, readexactly, recvexactly\n " ]
Please provide a description of the function:def send(self, s): self._print_header('======== Sending ({0}) ========'.format(len(s))) self._log_send(s) out = len(s) while s: s = s[self._send(s):] return out
[ "\n Sends all the given data to the socket.\n\n Aliases: write, put, sendall, send_all\n " ]
Please provide a description of the function:def interact(self, insock=sys.stdin, outsock=sys.stdout): self._print_header('======== Beginning interactive session ========') if hasattr(outsock, 'buffer'): outsock = outsock.buffer # pylint: disable=no-member self.timed_out = False save_verbose = self.verbose self.verbose = 0 try: if self.buf: outsock.write(self.buf) outsock.flush() self.buf = b'' while True: readable_socks = select(self.sock, insock) for readable in readable_socks: if readable is insock: data = os.read(insock.fileno(), 4096) self.send(data) if not data: raise NetcatError else: data = self.recv(timeout=None) outsock.write(data) outsock.flush() if not data: raise NetcatError except KeyboardInterrupt: self.verbose = save_verbose self._print_header('\n======== Connection interrupted! ========') raise except (socket.error, NetcatError): self.verbose = save_verbose self._print_header('\n======== Connection dropped! ========') finally: self.verbose = save_verbose
[ "\n Connects the socket to the terminal for user interaction.\n Alternate input and output files may be specified.\n\n This method cannot be used with a timeout.\n\n Aliases: interactive, interaction\n " ]
Please provide a description of the function:def recv_line(self, max_size=None, timeout='default', ending=None): if ending is None: ending = self.LINE_ENDING return self.recv_until(ending, max_size, timeout)
[ "\n Recieve until the next newline , default \"\\\\n\". The newline string can\n be changed by changing ``nc.LINE_ENDING``. The newline will be returned\n as part of the string.\n\n Aliases: recvline, readline, read_line, readln, recvln\n " ]
Please provide a description of the function:def send_line(self, line, ending=None): if ending is None: ending = self.LINE_ENDING return self.send(line + ending)
[ "\n Write the string to the wire, followed by a newline. The newline string\n can be changed by changing ``nc.LINE_ENDING``.\n\n Aliases: sendline, writeline, write_line, writeln, sendln\n " ]
Please provide a description of the function:def is_active(self, timperiods): now = int(time.time()) timperiod = timperiods[self.modulation_period] if not timperiod or timperiod.is_time_valid(now): return True return False
[ "\n Know if this result modulation is active now\n\n :return: True is we are in the period, otherwise False\n :rtype: bool\n " ]
Please provide a description of the function:def module_return(self, return_code, timeperiods): # Only if in modulation_period of modulation_period == None if self.is_active(timeperiods): # Try to change the exit code only if a new one is defined if self.exit_code_modulation is not None: # First with the exit_code_match if return_code in self.exit_codes_match: return_code = self.exit_code_modulation return return_code
[ "Module the exit code if necessary ::\n\n * modulation_period is legit\n * exit_code_modulation\n * return_code in exit_codes_match\n\n :param return_code: actual code returned by the check\n :type return_code: int\n :return: return_code modulated if necessary (exit_code_modulation)\n :rtype: int\n " ]
Please provide a description of the function:def object(self, o_type, o_name=None): o_found = self._get_object(o_type=o_type, o_name=o_name) if not o_found: return {'_status': u'ERR', '_message': u'Required %s not found.' % o_type} return o_found
[ "Get an object from the scheduler.\n\n The result is a serialized object which is a Json structure containing:\n - content: the serialized object content\n - __sys_python_module__: the python class of the returned object\n\n The Alignak unserialize function of the alignak.misc.serialization package allows\n to restore the initial object.\n\n .. code-block:: python\n\n from alignak.misc.serialization import unserialize\n from alignak.objects.hostgroup import Hostgroup\n raw_data = req.get(\"http://127.0.0.1:7768/object/hostgroup/allhosts\")\n print(\"Got: %s / %s\" % (raw_data.status_code, raw_data.content))\n assert raw_data.status_code == 200\n object = raw_data.json()\n group = unserialize(object, True)\n assert group.__class__ == Hostgroup\n assert group.get_name() == 'allhosts'\n\n As an example:\n {\n \"__sys_python_module__\": \"alignak.objects.hostgroup.Hostgroup\",\n \"content\": {\n \"uuid\": \"32248642-97dd-4f39-aaa2-5120112a765d\",\n \"name\": \"\",\n \"hostgroup_name\": \"allhosts\",\n \"use\": [],\n \"tags\": [],\n \"alias\": \"All Hosts\",\n \"notes\": \"\",\n \"definition_order\": 100,\n \"register\": true,\n \"unknown_members\": [],\n \"notes_url\": \"\",\n \"action_url\": \"\",\n\n \"imported_from\": \"unknown\",\n \"conf_is_correct\": true,\n \"configuration_errors\": [],\n \"configuration_warnings\": [],\n \"realm\": \"\",\n \"downtimes\": {},\n \"hostgroup_members\": [],\n \"members\": [\n \"553d47bc-27aa-426c-a664-49c4c0c4a249\",\n \"f88093ca-e61b-43ff-a41e-613f7ad2cea2\",\n \"df1e2e13-552d-43de-ad2a-fe80ad4ba979\",\n \"d3d667dd-f583-4668-9f44-22ef3dcb53ad\"\n ]\n }\n }\n\n :param o_type: searched object type\n :type o_type: str\n :param o_name: searched object name (or uuid)\n :type o_name: str\n :return: serialized object information\n :rtype: str\n " ]
Please provide a description of the function:def dump(self, o_name=None, details=False, raw=False): # pylint: disable=too-many-locals, too-many-branches def get_host_info(host, services, details=False, raw=False): # pylint: disable=too-many-branches __props__ = [ 'last_check', 'state_id', 'state', 'state_type', 'is_problem', 'is_impact', 'output' ] if details: __props__ = __props__ + [ 'uuid', 'address', 'alias', 'business_impact', 'tags', 'customs', 'parents', 'long_output', 'perf_data', 'check_period', 'active_checks_enabled', 'passive_checks_enabled', 'check_freshness', 'freshness_threshold', 'freshness_state', 'get_overall_state', 'overall_state_id', 'state_id', 'state', 'state_type', 'passive_check', 'acknowledged', 'downtimed', 'next_check', 'last_time_up', 'last_time_down', 'last_time_ok', 'last_time_warning', 'last_time_critical', 'last_time_unknown', 'last_time_unreachable' ] host_data = OrderedDict({'type': 'host', 'host': host.get_name(), 'name': host.get_name()}) __header__ = ['type', 'host', 'name'] for key in __props__: if hasattr(host, key): __header__.append(key) if isinstance(getattr(host, key), Callable): host_data[key] = getattr(host, key)(services) elif isinstance(getattr(host, key), set): host_data[key] = list(getattr(host, key)) else: host_data[key] = getattr(host, key) if raw: host_data['_header_host'] = __header__ host_data['services'] = [] __header__ = ['type', 'host', 'name'] for service in host.services: service = services[service] service_data = OrderedDict({'type': 'service', 'host': host.get_name(), 'name': service.get_name()}) for key in __props__: if hasattr(service, key): if key not in __header__: __header__.append(key) if isinstance(getattr(service, key), Callable): service_data[key] = getattr(services, key)() elif isinstance(getattr(service, key), set): service_data[key] = list(getattr(service, key)) else: service_data[key] = getattr(service, key) host_data['services'].append(service_data) if raw: host_data['_header_service'] = __header__ return host_data if details is not False: details = bool(details) if raw is not False: raw = bool(raw) ls = [] try: hosts = self._get_objects('host') services = self._get_objects('service') if o_name is None: for host in hosts: ls.append(get_host_info(host, services, details=details, raw=raw)) else: # Perhaps we got an host uuid... host = hosts.find_by_name(o_name) if o_name in hosts: host = hosts[o_name] if host: ls.append(get_host_info(host, services, details=False, raw=raw)) except Exception as exp: # pylint: disable=broad-except return str(exp) + " / " + traceback.print_exc() if o_name and not host: return {'_status': u'ERR', '_message': u'Required host (%s) not found.' % o_name} if raw and ls: raw_ls_hosts = [] _header_host = ['type', 'host', 'name'] raw_ls_services = [] _header_service = ['type', 'host', 'name'] for item in ls: if len(item['_header_host']) > len(_header_host): _header_host = item['_header_host'] if len(item['_header_service']) > len(_header_service): _header_service = item['_header_service'] item.pop('_header_host') item.pop('_header_service') services = [] if 'services' in item: services = item.pop('services') # Write host line raw_ls_hosts.append(';'.join("%s" % val for val in list(item.values()))) for service in services: raw_ls_services.append( ';'.join("%s" % val for val in list(service.values()))) raw_ls_hosts.insert(0, ';'.join(_header_host)) raw_ls_services.insert(0, ';'.join(_header_service)) return [raw_ls_hosts, raw_ls_services] return ls
[ "Dump an host (all hosts) from the scheduler.\n\n This gets the main host information from the scheduler. If details is set, then some\n more information are provided. This will not get all the host known attributes but only\n a reduced set that will inform about the host and its services status\n\n If raw is set the information are provided in two string lists formated as CSV strings.\n The first list element contains the hosts information and the second one contains the\n services information.\n\n If an host name is provided, this function will get only this host information, else\n all the scheduler hosts are returned.\n\n As an example (raw format):\n [\n [ # Host information\n \"type;host;name;last_check;state_id;state;state_type;is_problem;is_impact;output\",\n \"BR_host;host;BR_host;1532451511;0;UP;HARD;False;False;Host assumed to be UP\"\n ],\n [ # Services information\n \"type;host;name;last_check;state_id;state;state_type;is_problem;is_impact;output\",\n \"BR_host;service;dummy_critical;1532451490;2;CRITICAL;SOFT;False;False;\n BR_host-dummy_critical-2\",\n \"BR_host;service;BR_Simple_And;0;0;OK;HARD;False;False;\",\n \"BR_host;service;dummy_unreachable;1532451501;4;UNREACHABLE;SOFT;False;False;\n BR_host-dummy_unreachable-4\",\n \"BR_host;service;dummy_no_output;1532451495;0;OK;HARD;False;False;\n Service internal check result: 0\",\n \"BR_host;service;dummy_unknown;1532451475;3;UNKNOWN;SOFT;False;False;\n BR_host-dummy_unknown-3\",\n \"BR_host;service;dummy_echo;1532451501;0;OK;HARD;False;False;\",\n \"BR_host;service;dummy_warning;1532451492;1;WARNING;SOFT;False;False;\n BR_host-dummy_warning-1\",\n \"BR_host;service;dummy_random;1532451496;2;CRITICAL;SOFT;False;False;\n Service internal check result: 2\",\n \"BR_host;service;dummy_ok;1532451492;0;OK;HARD;False;False;BR_host\"\n ]\n ]\n\n As an example (json format):\n {\n is_impact: false,\n name: \"BR_host\",\n state: \"UP\",\n last_check: 1532451811,\n state_type: \"HARD\",\n host: \"BR_host\",\n output: \"Host assumed to be UP\",\n services: [\n {\n is_impact: false,\n name: \"dummy_critical\",\n state: \"CRITICAL\",\n last_check: 1532451790,\n state_type: \"HARD\",\n host: \"BR_host\",\n output: \"BR_host-dummy_critical-2\",\n state_id: 2,\n type: \"service\",\n is_problem: true\n },\n {\n is_impact: true,\n name: \"BR_Simple_And\",\n state: \"WARNING\",\n last_check: 1532451775,\n state_type: \"SOFT\",\n host: \"BR_host\",\n output: \"\",\n state_id: 1,\n type: \"service\",\n is_problem: false\n },\n ....\n ....\n },\n state_id: 0,\n type: \"host\",\n is_problem: false\n }\n\n :param o_name: searched host name (or uuid)\n :type o_name: str\n :param details: less or more details\n :type details: bool\n :param raw: json or raw text format\n :type raw: bool\n :return: list of host and services information\n :rtype: list\n ", "Get the host information\n\n :return: None\n " ]
Please provide a description of the function:def monitoring_problems(self): if self.app.type != 'scheduler': return {'_status': u'ERR', '_message': u"This service is only available for a scheduler daemon"} res = self.identity() res.update(self.app.get_monitoring_problems()) return res
[ "Get Alignak scheduler monitoring status\n\n Returns an object with the scheduler livesynthesis\n and the known problems\n\n :return: scheduler live synthesis\n :rtype: dict\n " ]
Please provide a description of the function:def _wait_new_conf(self): # Stop the scheduling loop self.app.sched.stop_scheduling() super(SchedulerInterface, self)._wait_new_conf()
[ "Ask the scheduler to drop its configuration and wait for a new one.\n\n This overrides the default method from GenericInterface\n\n :return: None\n " ]
Please provide a description of the function:def _initial_broks(self, broker_name): with self.app.conf_lock: logger.info("A new broker just connected : %s", broker_name) return self.app.sched.fill_initial_broks(broker_name)
[ "Get initial_broks from the scheduler\n\n This is used by the brokers to prepare the initial status broks\n\n This do not send broks, it only makes scheduler internal processing. Then the broker\n must use the *_broks* API to get all the stuff\n\n :param broker_name: broker name, used to filter broks\n :type broker_name: str\n :return: None\n " ]
Please provide a description of the function:def _broks(self, broker_name): logger.debug("Getting broks for %s from the scheduler", broker_name) for broker_link in list(self.app.brokers.values()): if broker_name == broker_link.name: break else: logger.warning("Requesting broks for an unknown broker: %s", broker_name) return {} # Now get the broks for this specific broker with self.app.broks_lock: res = self.app.get_broks(broker_name) return serialize(res, True)
[ "Get the broks from a scheduler, used by brokers\n\n This is used by the brokers to get the broks list of a scheduler\n\n :param broker_name: broker name, used to filter broks\n :type broker_name: str\n :return: serialized brok list\n :rtype: dict\n " ]
Please provide a description of the function:def _checks(self, do_checks=False, do_actions=False, poller_tags=None, reactionner_tags=None, worker_name='none', module_types=None): if poller_tags is None: poller_tags = ['None'] if reactionner_tags is None: reactionner_tags = ['None'] if module_types is None: module_types = ['fork'] do_checks = (do_checks == 'True') do_actions = (do_actions == 'True') res = self.app.sched.get_to_run_checks(do_checks, do_actions, poller_tags, reactionner_tags, worker_name, module_types) return serialize(res, True)
[ "Get checks from scheduler, used by poller or reactionner when they are\n in active mode (passive = False)\n\n This function is not intended for external use. Let the poller and reactionner\n manage all this stuff by themselves ;)\n\n :param do_checks: used for poller to get checks\n :type do_checks: bool\n :param do_actions: used for reactionner to get actions\n :type do_actions: bool\n :param poller_tags: poller tags to filter on this poller\n :type poller_tags: list\n :param reactionner_tags: reactionner tags to filter on this reactionner\n :type reactionner_tags: list\n :param worker_name: Worker name asking (so that the scheduler add it to actions objects)\n :type worker_name: str\n :param module_types: Module type to filter actions/checks\n :type module_types: list\n :return: serialized check/action list\n :rtype: str\n " ]
Please provide a description of the function:def put_results(self): res = cherrypy.request.json who_sent = res['from'] results = res['results'] results = unserialize(results, no_load=True) if results: logger.debug("Got some results: %d results from %s", len(results), who_sent) else: logger.debug("-> no results") for result in results: logger.debug("-> result: %s", result) # Append to the scheduler result queue self.app.sched.waiting_results.put(result) return True
[ "Put results to scheduler, used by poller or reactionner when they are\n in active mode (passive = False)\n\n This function is not intended for external use. Let the poller and reactionner\n manage all this stuff by themselves ;)\n\n :param from: poller/reactionner identification\n :type from: str\n :param results: list of actions results\n :type results: list\n :return: True\n :rtype: bool\n " ]
Please provide a description of the function:def _run_external_commands(self): commands = cherrypy.request.json with self.app.lock: self.app.sched.run_external_commands(commands['cmds'])
[ "Post external_commands to scheduler (from arbiter)\n Wrapper to to app.sched.run_external_commands method\n\n :return: None\n " ]
Please provide a description of the function:def _get_objects(self, o_type): if o_type not in [t for t in self.app.sched.pushed_conf.types_creations]: return None try: _, _, strclss, _, _ = self.app.sched.pushed_conf.types_creations[o_type] o_list = getattr(self.app.sched, strclss) except Exception: # pylint: disable=broad-except return None return o_list
[ "Get an object list from the scheduler\n\n Returns None if the required object type (`o_type`) is not known or an exception is raised.\n Else returns the objects list\n\n :param o_type: searched object type\n :type o_type: str\n :return: objects list\n :rtype: alignak.objects.item.Items\n " ]