text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Generate .pypirc config with the given credentials.
<END_TASK>
<USER_TASK:>
Description:
def configure(username, password):
# type: (str, str) -> None
"""
Generate .pypirc config with the given credentials.
Example:
$ peltak pypi configure my_pypi_user my_pypi_pass
""" |
from peltak.extra.pypi import logic
logic.gen_pypirc(username, password) |
<SYSTEM_TASK:>
Returns translation of string passed.
<END_TASK>
<USER_TASK:>
Description:
def tr(self, subdomain: str, string_to_translate: str = "") -> str:
"""Returns translation of string passed.
:param str subdomain: subpart of strings dictionary.
Must be one of self.translations.keys() i.e. 'restrictions'
:param str string_to_translate: string you want to translate
""" |
if subdomain not in self.translations.keys():
raise ValueError(
"'{}' is not a correct subdomain."
" Must be one of {}".format(subdomain, self.translations.keys())
)
else:
pass
# translate
str_translated = self.translations.get(
subdomain, {"error": "Subdomain not found: {}".format(subdomain)}
).get(string_to_translate, "String not found")
# end of method
return str_translated |
<SYSTEM_TASK:>
This is a duplicte of the view code for DRF to stop future
<END_TASK>
<USER_TASK:>
Description:
def optout_saved(sender, instance, **kwargs):
"""
This is a duplicte of the view code for DRF to stop future
internal Django implementations breaking.
""" |
if instance.identity is None:
# look up using the address_type and address
identities = Identity.objects.filter_by_addr(
instance.address_type, instance.address
)
if identities.count() == 1:
instance.identity = identities[0] |
<SYSTEM_TASK:>
Given an interface number, gets the AWS elastic network
<END_TASK>
<USER_TASK:>
Description:
def get_eni_id(self, interface=1):
"""Given an interface number, gets the AWS elastic network
interface associated with the interface.
:param interface: Integer associated to the interface/device number
:return: String Elastic Network Interface ID or None if not found
:raises OSError, AWSAPIError, EC2UtilError
""" |
log = logging.getLogger(self.cls_logger + '.get_eni_id')
# Get the instance-id
if self.instance_id is None:
msg = 'Instance ID not found for this machine'
log.error(msg)
raise OSError(msg)
log.info('Found instance ID: {i}'.format(i=self.instance_id))
log.debug('Querying EC2 instances...')
try:
response = self.client.describe_instances(
DryRun=False,
InstanceIds=[self.instance_id]
)
except ClientError:
_, ex, trace = sys.exc_info()
msg = 'Unable to query EC2 for instances\n{e}'.format(e=str(ex))
log.error(msg)
raise AWSAPIError, msg, trace
log.debug('Found instance info: {r}'.format(r=response))
# Find the ENI ID
log.info('Looking for the ENI ID to alias...')
eni_id = None
try:
for reservation in response['Reservations']:
for instance in reservation['Instances']:
if instance['InstanceId'] == self.instance_id:
for network_interface in instance['NetworkInterfaces']:
if network_interface['Attachment']['DeviceIndex'] == interface:
eni_id = network_interface['NetworkInterfaceId']
except KeyError:
_, ex, trace = sys.exc_info()
msg = 'ENI ID not found in AWS response for interface: {i}'.format(i=interface)
log.error(msg)
raise EC2UtilError, msg, trace
log.info('Found ENI ID: {e}'.format(e=eni_id))
return eni_id |
<SYSTEM_TASK:>
Adds an IP address as a secondary IP address
<END_TASK>
<USER_TASK:>
Description:
def add_secondary_ip(self, ip_address, interface=1):
"""Adds an IP address as a secondary IP address
:param ip_address: String IP address to add as a secondary IP
:param interface: Integer associated to the interface/device number
:return: None
:raises: AWSAPIError, EC2UtilError
""" |
log = logging.getLogger(self.cls_logger + '.add_secondary_ip')
# Get the ENI ID
eni_id = self.get_eni_id(interface)
# Verify the ENI ID was found
if eni_id is None:
msg = 'Unable to find the corresponding ENI ID for interface: {i}'. \
format(i=interface)
log.error(msg)
raise EC2UtilError(msg)
else:
log.info('Found ENI ID: {e}'.format(e=eni_id))
# Assign the secondary IP address
log.info('Attempting to assign the secondary IP address...')
try:
self.client.assign_private_ip_addresses(
NetworkInterfaceId=eni_id,
PrivateIpAddresses=[
ip_address,
],
AllowReassignment=True
)
except ClientError:
_, ex, trace = sys.exc_info()
msg = 'Unable to assign secondary IP address\n{e}'.format(e=str(ex))
log.error(msg)
raise AWSAPIError, msg, trace
log.info('Successfully added secondary IP address {s} to ENI ID {e} on interface {i}'.format(
s=ip_address, e=eni_id, i=interface)) |
<SYSTEM_TASK:>
Given an elastic IP address and an interface number, associates the
<END_TASK>
<USER_TASK:>
Description:
def associate_elastic_ip(self, allocation_id, interface=1, private_ip=None):
"""Given an elastic IP address and an interface number, associates the
elastic IP to the interface number on this host.
:param allocation_id: String ID for the elastic IP
:param interface: Integer associated to the interface/device number
:param private_ip: String IP address of the private IP address to
assign
:return: None
:raises: OSError, AWSAPIError, EC2UtilError
""" |
log = logging.getLogger(self.cls_logger + '.associate_elastic_ip')
if private_ip is None:
log.info('No private IP address provided, getting the primary IP'
'address on interface {i}...'.format(i=interface))
private_ip = get_ip_addresses()['eth{i}'.format(i=interface)]
log.info('Associating Elastic IP {e} on interface {i} on IP {p}'.format(
e=allocation_id, i=interface, p=private_ip))
# Get the ENI ID
log.info('Getting the ENI ID for interface: {i}'.format(i=interface))
eni_id = self.get_eni_id(interface)
# Verify the ENI ID was found
if eni_id is None:
msg = 'Unable to find the corresponding ENI ID for interface: {i}'. \
format(i=interface)
log.error(msg)
raise OSError(msg)
else:
log.info('Found ENI ID: {e}'.format(e=eni_id))
# Assign the secondary IP address
log.info('Attempting to assign the secondary IP address...')
try:
response = self.client.associate_address(
NetworkInterfaceId=eni_id,
AllowReassociation=True,
AllocationId=allocation_id,
PrivateIpAddress=private_ip
)
except ClientError:
_, ex, trace = sys.exc_info()
msg = 'Unable to attach elastic IP address {a} to interface {i}\n{e}'.format(
a=allocation_id, i=interface, e=str(ex))
log.error(msg)
raise AWSAPIError, msg, trace
code = response['ResponseMetadata']['HTTPStatusCode']
if code != 200:
msg = 'associate_address returned invalid code: {c}'.format(c=code)
log.error(msg)
raise AWSAPIError(msg)
log.info('Successfully associated elastic IP address ID {a} to interface {i} on ENI ID {e}'.format(
a=allocation_id, i=interface, e=eni_id)) |
<SYSTEM_TASK:>
Allocates an elastic IP address
<END_TASK>
<USER_TASK:>
Description:
def allocate_elastic_ip(self):
"""Allocates an elastic IP address
:return: Dict with allocation ID and Public IP that were created
:raises: AWSAPIError, EC2UtilError
""" |
log = logging.getLogger(self.cls_logger + '.allocate_elastic_ip')
# Attempt to allocate a new elastic IP
log.info('Attempting to allocate an elastic IP...')
try:
response = self.client.allocate_address(
DryRun=False,
Domain='vpc'
)
except ClientError:
_, ex, trace = sys.exc_info()
msg = 'Unable to allocate a new elastic IP address\n{e}'.format(e=str(ex))
log.error(msg)
raise AWSAPIError, msg, trace
allocation_id = response['AllocationId']
public_ip = response['PublicIp']
log.info('Allocated Elastic IP with ID {a} and Public IP address {p}'.
format(a=allocation_id, p=public_ip))
# Verify the Address was allocated successfully
log.info('Verifying the elastic IP address was allocated and is available '
'for use...')
ready = False
verification_timer = [2]*60 + [5]*60 + [10]*18
num_checks = len(verification_timer)
for i in range(0, num_checks):
wait_time = verification_timer[i]
try:
self.client.describe_addresses(
DryRun=False,
AllocationIds=[allocation_id]
)
except ClientError:
_, ex, trace = sys.exc_info()
log.info('Elastic IP address {p} with Allocation ID {a} is not available for use, trying again in '
'{w} sec...\n{e}'.format(p=public_ip, a=allocation_id, w=wait_time, e=str(ex)))
time.sleep(wait_time)
else:
log.info('Elastic IP {p} with Allocation ID {a} is available for use'.format(
p=public_ip, a=allocation_id))
ready = True
break
if ready:
return {'AllocationId': allocation_id, 'PublicIp': public_ip}
else:
msg = 'Unable to verify existence of new Elastic IP {p} with Allocation ID: {a}'. \
format(p=public_ip, a=allocation_id)
log.error(msg)
raise EC2UtilError(msg) |
<SYSTEM_TASK:>
Returns the elastic IP info for this instance any are
<END_TASK>
<USER_TASK:>
Description:
def get_elastic_ips(self):
"""Returns the elastic IP info for this instance any are
attached
:return: (dict) Info about the Elastic IPs
:raises AWSAPIError
""" |
log = logging.getLogger(self.cls_logger + '.get_elastic_ips')
instance_id = get_instance_id()
if instance_id is None:
log.error('Unable to get the Instance ID for this machine')
return
log.info('Found Instance ID: {i}'.format(i=instance_id))
log.info('Querying AWS for info about instance ID {i}...'.format(i=instance_id))
try:
instance_info = self.client.describe_instances(DryRun=False, InstanceIds=[instance_id])
except ClientError:
_, ex, trace = sys.exc_info()
msg = 'Unable to query AWS to get info for instance {i}\n{e}'.format(i=instance_id, e=ex)
log.error(msg)
raise AWSAPIError, msg, trace
# Get the list of Public/Elastic IPs for this instance
public_ips = []
for network_interface in instance_info['Reservations'][0]['Instances'][0]['NetworkInterfaces']:
network_interface_id = network_interface['NetworkInterfaceId']
log.info('Checking ENI: {n}...'.format(n=network_interface_id))
try:
public_ips.append(network_interface['Association']['PublicIp'])
except KeyError:
log.info('No Public IP found for Network Interface ID: {n}'.format(n=network_interface_id))
else:
log.info('Found public IP for Network Interface ID {n}: {p}'.format(
n=network_interface_id, p=network_interface['Association']['PublicIp']))
# Return if no Public/Elastic IPs found
if len(public_ips) == 0:
log.info('No Elastic IPs found for this instance: {i}'.format(i=instance_id))
return
else:
log.info('Found Public IPs: {p}'.format(p=public_ips))
# Get info for each Public/Elastic IP
try:
address_info = self.client.describe_addresses(DryRun=False, PublicIps=public_ips)
except ClientError:
_, ex, trace = sys.exc_info()
msg = 'Unable to query AWS to get info for addresses {p}\n{e}'.format(p=public_ips, e=str(ex))
log.error(msg)
raise AWSAPIError, msg, trace
if not address_info:
msg = 'No address info return for Public IPs: {p}'.format(p=public_ips)
log.error(msg)
raise AWSAPIError(msg)
return address_info |
<SYSTEM_TASK:>
For each attached Elastic IP, disassociate it
<END_TASK>
<USER_TASK:>
Description:
def disassociate_elastic_ips(self):
"""For each attached Elastic IP, disassociate it
:return: None
:raises AWSAPIError
""" |
log = logging.getLogger(self.cls_logger + '.disassociate_elastic_ips')
try:
address_info = self.get_elastic_ips()
except AWSAPIError:
_, ex, trace = sys.exc_info()
msg = 'Unable to determine Elastic IPs on this EC2 instance'
log.error(msg)
raise AWSAPIError, msg, trace
# Return is no elastic IPs were found
if not address_info:
log.info('No elastic IPs found to disassociate')
return
# Disassociate each Elastic IP
for address in address_info['Addresses']:
association_id = address['AssociationId']
public_ip = address['PublicIp']
log.info('Attempting to disassociate address {p} from Association ID: {a}'.format(
p=public_ip, a=association_id))
try:
self.client.disassociate_address(PublicIp=public_ip, AssociationId=association_id)
except ClientError:
_, ex, trace = sys.exc_info()
msg = 'There was a problem disassociating Public IP {p} from Association ID {a}'.format(
p=public_ip, a=association_id)
log.error(msg)
raise AWSAPIError, msg, trace
else:
log.info('Successfully disassociated Public IP: {p}'.format(p=public_ip)) |
<SYSTEM_TASK:>
Lists security groups in the VPC. If vpc_id is not provided, use self.vpc_id
<END_TASK>
<USER_TASK:>
Description:
def list_security_groups_in_vpc(self, vpc_id=None):
"""Lists security groups in the VPC. If vpc_id is not provided, use self.vpc_id
:param vpc_id: (str) VPC ID to list security groups for
:return: (list) Security Group info
:raises: AWSAPIError, EC2UtilError
""" |
log = logging.getLogger(self.cls_logger + '.list_security_groups_in_vpc')
if vpc_id is None and self.vpc_id is not None:
vpc_id = self.vpc_id
else:
msg = 'Unable to determine VPC ID to use to create the Security Group'
log.error(msg)
raise EC2UtilError(msg)
# Create a filter on the VPC ID
filters = [
{
'Name': 'vpc-id',
'Values': [vpc_id]
}
]
# Get a list of security groups in the VPC
log.info('Querying for a list of security groups in VPC ID: {v}'.format(v=vpc_id))
try:
security_groups = self.client.describe_security_groups(DryRun=False, Filters=filters)
except ClientError:
_, ex, trace = sys.exc_info()
msg = 'Unable to query AWS for a list of security groups in VPC ID: {v}\n{e}'.format(
v=vpc_id, e=str(ex))
raise AWSAPIError, msg, trace
return security_groups |
<SYSTEM_TASK:>
Revokes all ingress rules for a security group bu ID
<END_TASK>
<USER_TASK:>
Description:
def revoke_security_group_ingress(self, security_group_id, ingress_rules):
"""Revokes all ingress rules for a security group bu ID
:param security_group_id: (str) Security Group ID
:param port: (str) TCP Port number
:param ingress_rules: (list) List of IP permissions (see AWS API docs re: IpPermissions)
:return: None
:raises: AWSAPIError, EC2UtilError
""" |
log = logging.getLogger(self.cls_logger + '.revoke_security_group_ingress')
log.info('Revoking ingress rules from security group: {g}'.format(g=security_group_id))
try:
self.client.revoke_security_group_ingress(
DryRun=False,
GroupId=security_group_id,
IpPermissions=ingress_rules)
except ClientError:
_, ex, trace = sys.exc_info()
msg = 'Unable to remove existing Security Group rules for port from Security Group: {g}\n{e}'.format(
g=security_group_id, e=str(ex))
raise AWSAPIError, msg, trace |
<SYSTEM_TASK:>
Launches an EC2 instance with the specified parameters, intended to launch
<END_TASK>
<USER_TASK:>
Description:
def launch_instance(self, ami_id, key_name, subnet_id, security_group_id=None, security_group_list=None,
user_data_script_path=None, instance_type='t2.small', root_device_name='/dev/xvda'):
"""Launches an EC2 instance with the specified parameters, intended to launch
an instance for creation of a CONS3RT template.
:param ami_id: (str) ID of the AMI to launch from
:param key_name: (str) Name of the key-pair to use
:param subnet_id: (str) IF of the VPC subnet to attach the instance to
:param security_group_id: (str) ID of the security group, of not provided the default will be applied
appended to security_group_list if provided
:param security_group_id_list: (list) of IDs of the security group, if not provided the default will be applied
:param user_data_script_path: (str) Path to the user-data script to run
:param instance_type: (str) Instance Type (e.g. t2.micro)
:param root_device_name: (str) The device name for the root volume
:return:
""" |
log = logging.getLogger(self.cls_logger + '.launch_instance')
log.info('Launching with AMI ID: {a}'.format(a=ami_id))
log.info('Launching with Key Pair: {k}'.format(k=key_name))
if security_group_list:
if not isinstance(security_group_list, list):
raise EC2UtilError('security_group_list must be a list')
if security_group_id and security_group_list:
security_group_list.append(security_group_id)
elif security_group_id and not security_group_list:
security_group_list = [security_group_id]
log.info('Launching with security group list: {s}'.format(s=security_group_list))
user_data = None
if user_data_script_path is not None:
if os.path.isfile(user_data_script_path):
with open(user_data_script_path, 'r') as f:
user_data = f.read()
monitoring = {'Enabled': False}
block_device_mappings = [
{
'DeviceName': root_device_name,
'Ebs': {
'VolumeSize': 100,
'DeleteOnTermination': True
}
}
]
log.info('Attempting to launch the EC2 instance now...')
try:
response = self.client.run_instances(
DryRun=False,
ImageId=ami_id,
MinCount=1,
MaxCount=1,
KeyName=key_name,
SecurityGroupIds=security_group_list,
UserData=user_data,
InstanceType=instance_type,
Monitoring=monitoring,
SubnetId=subnet_id,
InstanceInitiatedShutdownBehavior='stop',
BlockDeviceMappings=block_device_mappings
)
except ClientError:
_, ex, trace = sys.exc_info()
msg = '{n}: There was a problem launching the EC2 instance\n{e}'.format(n=ex.__class__.__name__, e=str(ex))
raise EC2UtilError, msg, trace
instance_id = response['Instances'][0]['InstanceId']
output = {
'InstanceId': instance_id,
'InstanceInfo': response['Instances'][0]
}
return output |
<SYSTEM_TASK:>
Describes the EC2 instances
<END_TASK>
<USER_TASK:>
Description:
def get_ec2_instances(self):
"""Describes the EC2 instances
:return: dict containing EC2 instance data
:raises: EC2UtilError
""" |
log = logging.getLogger(self.cls_logger + '.get_ec2_instances')
log.info('Describing EC2 instances...')
try:
response = self.client.describe_instances()
except ClientError:
_, ex, trace = sys.exc_info()
msg = '{n}: There was a problem describing EC2 instances\n{e}'.format(n=ex.__class__.__name__, e=str(ex))
raise EC2UtilError, msg, trace
return response |
<SYSTEM_TASK:>
Build project documentation.
<END_TASK>
<USER_TASK:>
Description:
def docs_cli(ctx, recreate, gen_index, run_doctests):
# type: (click.Context, bool, bool, bool) -> None
""" Build project documentation.
This command will run sphinx-refdoc first to generate the reference
documentation for the code base. Then it will run sphinx to generate the
final docs. You can configure the directory that stores the docs source
(index.rst, conf.py, etc.) using the DOC_SRC_PATH conf variable. In case you
need it, the sphinx build directory is located in ``BUILD_DIR/docs``.
The reference documentation will be generated for all directories listed
under 'REFDOC_PATHS conf variable. By default it is empty so no reference
docs are generated.
Sample Config::
\b
build_dir: '.build'
docs:
path: 'docs'
reference:
- 'src/mypkg'
Examples::
\b
$ peltak docs # Generate docs for the project
$ peltak docs --no-index # Skip main reference index
$ peltak docs --recreate --no-index # Build docs from clean slate
""" |
if ctx.invoked_subcommand:
return
from peltak.logic import docs
docs.docs(recreate, gen_index, run_doctests) |
<SYSTEM_TASK:>
Convert a value to a Unicode object for matching with a query.
<END_TASK>
<USER_TASK:>
Description:
def as_string(value):
"""Convert a value to a Unicode object for matching with a query.
None becomes the empty string. Bytestrings are silently decoded.
""" |
if six.PY2:
buffer_types = buffer, memoryview # noqa: F821
else:
buffer_types = memoryview
if value is None:
return u''
elif isinstance(value, buffer_types):
return bytes(value).decode('utf8', 'ignore')
elif isinstance(value, bytes):
return value.decode('utf8', 'ignore')
else:
return six.text_type(value) |
<SYSTEM_TASK:>
Attempts to decode a bytestring path to a unicode object for the
<END_TASK>
<USER_TASK:>
Description:
def displayable_path(path, separator=u'; '):
"""Attempts to decode a bytestring path to a unicode object for the
purpose of displaying it to the user. If the `path` argument is a
list or a tuple, the elements are joined with `separator`.
""" |
if isinstance(path, (list, tuple)):
return separator.join(displayable_path(p) for p in path)
elif isinstance(path, six.text_type):
return path
elif not isinstance(path, bytes):
# A non-string object: just get its unicode representation.
return six.text_type(path)
try:
return path.decode(_fsencoding(), 'ignore')
except (UnicodeError, LookupError):
return path.decode('utf8', 'ignore') |
<SYSTEM_TASK:>
Get version storage for the given version file.
<END_TASK>
<USER_TASK:>
Description:
def get_version_storage():
# type: () -> VersionStorage
""" Get version storage for the given version file.
The storage engine used depends on the extension of the *version_file*.
""" |
version_file = conf.get_path('version_file', 'VERSION')
if version_file.endswith('.py'):
return PyVersionStorage(version_file)
elif version_file.endswith('package.json'):
return NodeVersionStorage(version_file)
else:
return RawVersionStorage(version_file) |
<SYSTEM_TASK:>
Write the project version to .py file.
<END_TASK>
<USER_TASK:>
Description:
def write(self, version):
# type: (str) -> None
""" Write the project version to .py file.
This will regex search in the file for a
``__version__ = VERSION_STRING`` and substitute the version string
for the new version.
""" |
with open(self.version_file) as fp:
content = fp.read()
ver_statement = "__version__ = '{}'".format(version)
new_content = RE_PY_VERSION.sub(ver_statement, content)
fs.write_file(self.version_file, new_content) |
<SYSTEM_TASK:>
Auxiliary function that specifies the logarithmic gray scale.
<END_TASK>
<USER_TASK:>
Description:
def loggray(x, a, b):
"""Auxiliary function that specifies the logarithmic gray scale.
a and b are the cutoffs.""" |
linval = 10.0 + 990.0 * (x-float(a))/(b-a)
return (np.log10(linval)-1.0)*0.5 * 255.0 |
<SYSTEM_TASK:>
I robustly rebin your image by a given factor.
<END_TASK>
<USER_TASK:>
Description:
def rebin(self, factor):
"""
I robustly rebin your image by a given factor.
You simply specify a factor, and I will eventually take care of a crop to bring
the image to interger-multiple-of-your-factor dimensions.
Note that if you crop your image before, you must directly crop to compatible dimensions !
We update the binfactor, this allows you to draw on the image later, still using the
orignial pixel coordinates.
Here we work on the numpy array.
""" |
if self.pilimage != None:
raise RuntimeError, "Cannot rebin anymore, PIL image already exists !"
if type(factor) != type(0):
raise RuntimeError, "Rebin factor must be an integer !"
if factor < 1:
return
origshape = np.asarray(self.numpyarray.shape)
neededshape = origshape - (origshape % factor)
if not (origshape == neededshape).all():
if self.verbose :
print "Rebinning %ix%i : I have to crop from %s to %s" % (factor, factor, origshape, neededshape)
self.crop(0, neededshape[0], 0, neededshape[1])
else:
if self.verbose :
print "Rebinning %ix%i : I do not need to crop" % (factor, factor)
self.numpyarray = rebin(self.numpyarray, neededshape/factor) # we call the rebin function defined below
# The integer division neededshape/factor is ok, we checked for this above.
self.binfactor = int(self.binfactor * factor) |
<SYSTEM_TASK:>
Auxiliary method to make a draw object if not yet done.
<END_TASK>
<USER_TASK:>
Description:
def makedraw(self):
"""Auxiliary method to make a draw object if not yet done.
This is also called by changecolourmode, when we go from L to RGB, to get a new draw object.
""" |
if self.draw == None:
self.draw = imdw.Draw(self.pilimage) |
<SYSTEM_TASK:>
The inverse operation of rebin, applied on the PIL image.
<END_TASK>
<USER_TASK:>
Description:
def upsample(self, factor):
"""
The inverse operation of rebin, applied on the PIL image.
Do this before writing text or drawing on the image !
The coordinates will be automatically converted for you
""" |
self.checkforpilimage()
if type(factor) != type(0):
raise RuntimeError, "Upsample factor must be an integer !"
if self.verbose:
print "Upsampling by a factor of %i" % factor
self.pilimage = self.pilimage.resize((self.pilimage.size[0] * factor, self.pilimage.size[1] * factor))
self.upsamplefactor = factor
self.draw = None |
<SYSTEM_TASK:>
Most elementary drawing, single pixel, used mainly for testing purposes.
<END_TASK>
<USER_TASK:>
Description:
def drawpoint(self, x, y, colour = None):
"""
Most elementary drawing, single pixel, used mainly for testing purposes.
Coordinates are those of your initial image !
""" |
self.checkforpilimage()
colour = self.defaultcolour(colour)
self.changecolourmode(colour)
self.makedraw()
(pilx, pily) = self.pilcoords((x,y))
self.draw.point((pilx, pily), fill = colour) |
<SYSTEM_TASK:>
We write a title, centered below the image.
<END_TASK>
<USER_TASK:>
Description:
def writetitle(self, titlestring, colour = None):
"""
We write a title, centered below the image.
""" |
self.checkforpilimage()
colour = self.defaultcolour(colour)
self.changecolourmode(colour)
self.makedraw()
self.loadtitlefont()
imgwidth = self.pilimage.size[0]
imgheight = self.pilimage.size[1]
textwidth = self.draw.textsize(titlestring, font = self.titlefont)[0]
textxpos = imgwidth/2.0 - textwidth/2.0
textypos = imgheight - 30
self.draw.text((textxpos, textypos), titlestring, fill = colour, font = self.titlefont)
if self.verbose :
print "I've written a title on the image." |
<SYSTEM_TASK:>
We add a longer chunk of text on the upper left corner of the image.
<END_TASK>
<USER_TASK:>
Description:
def writeinfo(self, linelist, colour = None):
"""
We add a longer chunk of text on the upper left corner of the image.
Provide linelist, a list of strings that will be written one below the other.
""" |
self.checkforpilimage()
colour = self.defaultcolour(colour)
self.changecolourmode(colour)
self.makedraw()
self.loadinfofont()
for i, line in enumerate(linelist):
topspacing = 5 + (12 + 5)*i
self.draw.text((10, topspacing), line, fill = colour, font = self.infofont)
if self.verbose :
print "I've written some info on the image." |
<SYSTEM_TASK:>
Calls drawcircle and writelable for an list of stars.
<END_TASK>
<USER_TASK:>
Description:
def drawstarslist(self, dictlist, r = 10, colour = None):
"""
Calls drawcircle and writelable for an list of stars.
Provide a list of dictionnaries, where each dictionnary contains "name", "x", and "y".
""" |
self.checkforpilimage()
colour = self.defaultcolour(colour)
self.changecolourmode(colour)
self.makedraw()
for star in dictlist:
self.drawcircle(star["x"], star["y"], r = r, colour = colour, label = star["name"])
#self.writelabel(star["x"], star["y"], star["name"], r = r, colour = colour)
if self.verbose :
print "I've drawn %i stars." % len(dictlist) |
<SYSTEM_TASK:>
Writes the PIL image into a png.
<END_TASK>
<USER_TASK:>
Description:
def tonet(self, outfile):
"""
Writes the PIL image into a png.
We do not want to flip the image at this stage, as you might have written on it !
""" |
self.checkforpilimage()
if self.verbose :
print "Writing image to %s...\n%i x %i pixels, mode %s" % (outfile, self.pilimage.size[0], self.pilimage.size[1], self.pilimage.mode)
self.pilimage.save(outfile, "PNG") |
<SYSTEM_TASK:>
Sets up the CORS headers response based on the settings used for the API.
<END_TASK>
<USER_TASK:>
Description:
def cors_setup(self, request):
"""
Sets up the CORS headers response based on the settings used for the API.
:param request: <pyramid.request.Request>
""" |
def cors_headers(request, response):
if request.method.lower() == 'options':
response.headers.update({
'-'.join([p.capitalize() for p in k.split('_')]): v
for k, v in self.cors_options.items()
})
else:
origin = self.cors_options.get('access_control_allow_origin', '*')
expose_headers = self.cors_options.get('access_control_expose_headers', '')
response.headers['Access-Control-Allow-Origin'] = origin
if expose_headers:
response.headers['Access-Control-Expose-Headers'] = expose_headers
# setup the CORS supported response
request.add_response_callback(cors_headers) |
<SYSTEM_TASK:>
Returns a new service for the given request.
<END_TASK>
<USER_TASK:>
Description:
def factory(self, request, parent=None, name=None):
"""
Returns a new service for the given request.
:param request | <pyramid.request.Request>
:return <pyramid_restful.services.AbstractService>
""" |
traverse = request.matchdict['traverse']
# show documentation at the root path
if not traverse:
return {}
else:
service = {}
name = name or traverse[0]
# look for direct pattern matches
traversed = '/' + '/'.join(traverse)
service_type = None
service_object = None
for route, endpoint in self.routes:
result = route.match(traversed)
if result is not None:
request.matchdict = result
request.endpoint = endpoint
break
else:
try:
service_type, service_object = self.services[name]
except KeyError:
raise HTTPNotFound()
if service_type:
if isinstance(service_type, Endpoint):
service[name] = service_type
elif service_object is None:
service[name] = service_type(request)
else:
service[name] = service_type(request, service_object)
request.api_service = service
return service |
<SYSTEM_TASK:>
Exposes a given service to this API.
<END_TASK>
<USER_TASK:>
Description:
def register(self, service, name=''):
"""
Exposes a given service to this API.
""" |
# expose a sub-factory
if isinstance(service, ApiFactory):
self.services[name] = (service.factory, None)
# expose a module dynamically as a service
elif inspect.ismodule(service):
name = name or service.__name__.split('.')[-1]
# exclude endpoints with patterns
for obj in vars(service).values():
endpoint = getattr(obj, 'endpoint', None)
if isinstance(endpoint, Endpoint) and endpoint.pattern:
route = Route('', endpoint.pattern)
self.routes.append((route, endpoint))
self.services[name] = (ModuleService, service)
# expose a class dynamically as a service
elif inspect.isclass(service):
name = name or service.__name__
self.services[name] = (ClassService, service)
# expose an endpoint directly
elif isinstance(getattr(service, 'endpoint', None), Endpoint):
if service.endpoint.pattern:
route = Route('', service.endpoint.pattern)
self.routes.append((route, service.endpoint))
else:
self.services[service.endpoint.name] = (service.endpoint, None)
# expose a scope
elif isinstance(service, dict):
for srv in service.values():
try:
self.register(srv)
except RuntimeError:
pass
# expose a list of services
elif isinstance(service, list):
for srv in service:
try:
self.register(srv)
except RuntimeError:
pass
# expose a service directly
else:
raise RuntimeError('Invalid service provide: {0} ({1}).'.format(service, type(service))) |
<SYSTEM_TASK:>
Serves this API from the inputted root path
<END_TASK>
<USER_TASK:>
Description:
def serve(self, config, path, route_name=None, permission=None, **view_options):
"""
Serves this API from the inputted root path
""" |
route_name = route_name or path.replace('/', '.').strip('.')
path = path.strip('/') + '*traverse'
self.route_name = route_name
self.base_permission = permission
# configure the route and the path
config.add_route(route_name, path, factory=self.factory)
config.add_view(
self.handle_standard_error,
route_name=route_name,
renderer='json2',
context=StandardError
),
config.add_view(
self.handle_http_error,
route_name=route_name,
renderer='json2',
context=HTTPException
)
config.add_view(
self.process,
route_name=route_name,
renderer='json2',
**view_options
) |
<SYSTEM_TASK:>
Adds extra new lines to the top, bottom or both of a String
<END_TASK>
<USER_TASK:>
Description:
def padd(text, padding="top", size=1):
""" Adds extra new lines to the top, bottom or both of a String
@text: #str text to pad
@padding: #str 'top', 'bottom' or 'all'
@size: #int number of new lines
-> #str padded @text
..
from vital.debug import *
padd("Hello world")
# -> '\\nHello world'
padd("Hello world", size=5, padding="all")
# -> '\\n\\n\\n\\n\\nHello world\\n\\n\\n\\n\\n'
..
""" |
if padding:
padding = padding.lower()
pad_all = padding == 'all'
padding_top = ""
if padding and (padding == 'top' or pad_all):
padding_top = "".join("\n" for x in range(size))
padding_bottom = ""
if padding and (padding == 'bottom' or pad_all):
padding_bottom = "".join("\n" for x in range(size))
return "{}{}{}".format(padding_top, text, padding_bottom)
return text |
<SYSTEM_TASK:>
Colorizes text for terminal outputs
<END_TASK>
<USER_TASK:>
Description:
def colorize(text, color="BLUE", close=True):
""" Colorizes text for terminal outputs
@text: #str to colorize
@color: #str color from :mod:colors
@close: #bool whether or not to reset the color
-> #str colorized @text
..
from vital.debug import colorize
colorize("Hello world", "blue")
# -> '\x1b[0;34mHello world\x1b[1;m'
colorize("Hello world", "blue", close=False)
# -> '\x1b[0;34mHello world'
..
""" |
if color:
color = getattr(colors, color.upper())
return color + uncolorize(str(text)) + (colors.RESET if close else "")
return text |
<SYSTEM_TASK:>
Bolds text for terminal outputs
<END_TASK>
<USER_TASK:>
Description:
def bold(text, close=True):
""" Bolds text for terminal outputs
@text: #str to bold
@close: #bool whether or not to reset the bold flag
-> #str bolded @text
..
from vital.debug import bold
bold("Hello world")
# -> '\x1b[1mHello world\x1b[1;m'
bold("Hello world", close=False)
# -> '\x1b[1mHello world'
..
""" |
return getattr(colors, "BOLD") + str(text) + \
(colors.RESET if close else "") |
<SYSTEM_TASK:>
Gets the class object which defined a given method
<END_TASK>
<USER_TASK:>
Description:
def get_class_that_defined_method(meth):
""" Gets the class object which defined a given method
@meth: a class method
-> owner class object
""" |
if inspect.ismethod(meth):
for cls in inspect.getmro(meth.__self__.__class__):
if cls.__dict__.get(meth.__name__) is meth:
return cls
meth = meth.__func__ # fallback to __qualname__ parsing
if inspect.isfunction(meth):
cls = getattr(
inspect.getmodule(meth),
meth.__qualname__.split('.<locals>', 1)[0].rsplit('.', 1)[0])
if isinstance(cls, type):
return cls
return None |
<SYSTEM_TASK:>
Formats the object name in a pretty way
<END_TASK>
<USER_TASK:>
Description:
def format_obj_name(obj, delim="<>"):
""" Formats the object name in a pretty way
@obj: any python object
@delim: the characters to wrap a parent object name in
-> #str formatted name
..
from vital.debug import format_obj_name
format_obj_name(vital.debug.Timer)
# -> 'Timer<vital.debug>'
format_obj_name(vital.debug)
# -> 'debug<vital>'
format_obj_name(vital.debug.Timer.time)
# -> 'time<vital.debug.Timer>'
..
""" |
pname = ""
parent_name = get_parent_name(obj)
if parent_name:
pname = "{}{}{}".format(delim[0], get_parent_name(obj), delim[1])
return "{}{}".format(get_obj_name(obj), pname) |
<SYSTEM_TASK:>
`Creates prettier object representations`
<END_TASK>
<USER_TASK:>
Description:
def preprX(*attributes, address=True, full_name=False,
pretty=False, keyless=False, **kwargs):
""" `Creates prettier object representations`
@*attributes: (#str) instance attributes within the object you
wish to display. Attributes can be recursive
e.g. |one.two.three| for access to |self.one.two.three|
@address: (#bool) |True| to include the memory address
@full_name: (#bool) |True| to include the full path to the
object vs. the qualified name
@pretty: (#bool) |True| to allow bolding and coloring
@keyless: (#bool) |True| to display the values of @attributes
withotu their attribute names
..
class Foo(object):
def __init__(self, bar, baz=None):
self.bar = bar
self.baz = baz
__repr__ = prepr('bar', 'baz', address=False)
foo = Foo('foobar')
repr(foo)
..
|<Foo:bar=`foobar`, baz=None>|
""" |
def _format(obj, attribute):
try:
if keyless:
val = getattr_in(obj, attribute)
if val is not None:
return repr(val)
else:
return '%s=%s' % (attribute,
repr(getattr_in(obj, attribute)))
except AttributeError:
return None
def prep(obj, address=address, full_name=full_name, pretty=pretty,
keyless=keyless, **kwargs):
if address:
address = ":%s" % hex(id(obj))
else:
address = ""
data = list(filter(lambda x: x is not None,
map(lambda a: _format(obj, a), attributes)))
if data:
data = ':%s' % ', '.join(data)
else:
data = ''
return stdout_encode("<%s%s%s>" % (get_obj_name(obj), data, address))
return prep |
<SYSTEM_TASK:>
-> a randomized domain-like name
<END_TASK>
<USER_TASK:>
Description:
def randdomain(self):
""" -> a randomized domain-like name """ |
return '.'.join(
rand_readable(3, 6, use=self.random, density=3)
for _ in range(self.random.randint(1, 2))
).lower() |
<SYSTEM_TASK:>
Recursively converts lists to tuples
<END_TASK>
<USER_TASK:>
Description:
def _to_tuple(self, _list):
""" Recursively converts lists to tuples """ |
result = list()
for l in _list:
if isinstance(l, list):
result.append(tuple(self._to_tuple(l)))
else:
result.append(l)
return tuple(result) |
<SYSTEM_TASK:>
Generates random values for sequence-like objects
<END_TASK>
<USER_TASK:>
Description:
def sequence(self, struct, size=1000, tree_depth=1, append_callable=None):
""" Generates random values for sequence-like objects
@struct: the sequence-like structure you want to fill with random
data
@size: #int number of random values to include in each @tree_depth
@tree_depth: #int dict tree dimensions size, i.e.
1=|(value1, value2)|
2=|((value1, value2), (value1, value2))|
@append_callable: #callable method which appends/adds data to your
sequence-like structure - e.g. :meth:list.append
-> random @struct
..
from collections import UserList
from vital.debug import RandData
class MySequence(UserList):
pass
rd = RandData(int)
my_seq = MySequence()
rd.sequence(my_seq, 3, 1, my_seq.append)
# -> [88508293836062443, 49097807561770961, 55043550817099444]
..
""" |
if not tree_depth:
return self._map_type()
_struct = struct()
add_struct = _struct.append if not append_callable \
else getattr(_struct, append_callable)
for x in range(size):
add_struct(self.sequence(
struct, size, tree_depth-1, append_callable))
return _struct |
<SYSTEM_TASK:>
Generates random values for dict-like objects
<END_TASK>
<USER_TASK:>
Description:
def mapping(self, struct, key_depth=1000, tree_depth=1,
update_callable=None):
""" Generates random values for dict-like objects
@struct: the dict-like structure you want to fill with random data
@size: #int number of random values to include in each @tree_depth
@tree_depth: #int dict tree dimensions size, i.e.
1=|{key: value}|
2=|{key: {key: value}, key2: {key2: value2}}|
@update_callable: #callable method which updates data in your
dict-like structure - e.g. :meth:builtins.dict.update
-> random @struct
..
from collections import UserDict
from vital.debug import RandData
class MyDict(UserDict):
pass
rd = RandData(int)
my_dict = MyDict()
rd.dict(my_dict, 3, 1, my_dict.update)
# -> {
# 'SE0ZNy0F6O': 42078648993195761,
# 'pbK': 70822820981335987,
# '0A5Aa7': 17503122029338459}
..
""" |
if not tree_depth:
return self._map_type()
_struct = struct()
add_struct = _struct.update if not update_callable \
else getattr(_struct, update_callable)
for x in range(key_depth):
add_struct({
self.randstr: self.mapping(
struct, key_depth, tree_depth-1, update_callable)
})
return _struct |
<SYSTEM_TASK:>
Length of the highest index in chars = justification size
<END_TASK>
<USER_TASK:>
Description:
def _format_numeric_sequence(self, _sequence, separator="."):
""" Length of the highest index in chars = justification size """ |
if not _sequence:
return colorize(_sequence, "purple")
_sequence = _sequence if _sequence is not None else self.obj
minus = (2 if self._depth > 0 else 0)
just_size = len(str(len(_sequence)))
out = []
add_out = out.append
for i, item in enumerate(_sequence):
self._incr_just_size(just_size+minus)
add_out(self._numeric_prefix(
i, self.pretty(item, display=False),
just=just_size, color="blue", separator=separator))
self._decr_just_size(just_size+minus)
if not self._depth:
return padd("\n".join(out) if out else str(out), padding="top")
else:
return "\n".join(out) if out else str(out) |
<SYSTEM_TASK:>
Formats object names in a pretty fashion
<END_TASK>
<USER_TASK:>
Description:
def objname(self, obj=None):
""" Formats object names in a pretty fashion """ |
obj = obj or self.obj
_objname = self.pretty_objname(obj, color=None)
_objname = "'{}'".format(colorize(_objname, "blue"))
return _objname |
<SYSTEM_TASK:>
Determines the type of the object and maps it to the correct
<END_TASK>
<USER_TASK:>
Description:
def _format_obj(self, item=None):
""" Determines the type of the object and maps it to the correct
formatter
""" |
# Order here matters, odd behavior with tuples
if item is None:
return getattr(self, 'number')(item)
elif isinstance(item, self.str_):
#: String
return item + " "
elif isinstance(item, bytes):
#: Bytes
return getattr(self, 'bytes')(item)
elif isinstance(item, self.numeric_):
#: Float, int, etc.
return getattr(self, 'number')(item)
elif isinstance(item, self.dict_):
#: Dict
return getattr(self, 'dict')(item)
elif isinstance(item, self.list_):
#: List
return getattr(self, 'list')(item)
elif isinstance(item, tuple):
#: Tuple
return getattr(self, 'tuple')(item)
elif isinstance(item, types.GeneratorType):
#: Generator
return getattr(self, 'generator')(item)
elif isinstance(item, self.set_):
#: Set
return getattr(self, 'set')(item)
elif isinstance(item, deque):
#: Deque
return getattr(self, 'deque')(item)
elif isinstance(item, Sequence):
#: Sequence
return getattr(self, 'sequence')(item)
#: Any other object
return getattr(self, 'object')(item) |
<SYSTEM_TASK:>
Pretty prints object name
<END_TASK>
<USER_TASK:>
Description:
def pretty_objname(self, obj=None, maxlen=50, color="boldcyan"):
""" Pretty prints object name
@obj: the object whose name you want to pretty print
@maxlen: #int maximum length of an object name to print
@color: your choice of :mod:colors or |None|
-> #str pretty object name
..
from vital.debug import Look
print(Look.pretty_objname(dict))
# -> 'dict\x1b[1;36m<builtins>\x1b[1;m'
..
""" |
parent_name = lambda_sub("", get_parent_name(obj) or "")
objname = get_obj_name(obj)
if color:
objname += colorize("<{}>".format(parent_name), color, close=False)
else:
objname += "<{}>".format(parent_name)
objname = objname if len(objname) < maxlen else \
objname[:(maxlen-1)]+"…>"
if color:
objname += colors.RESET
return objname |
<SYSTEM_TASK:>
Outputs the message to the terminal
<END_TASK>
<USER_TASK:>
Description:
def _print_message(self, flag_message=None, color=None, padding=None,
reverse=False):
""" Outputs the message to the terminal """ |
if flag_message:
flag_message = stdout_encode(flag(flag_message,
color=color if self.pretty else None,
show=False))
if not reverse:
print(padd(flag_message, padding),
self.format_messages(self.message))
else:
print(self.format_messages(self.message),
padd(flag_message, padding))
else:
print(self.format_messages(self.message))
self.message = [] |
<SYSTEM_TASK:>
Calculates and colorizes the percent difference between @best
<END_TASK>
<USER_TASK:>
Description:
def _pct_diff(self, best, other):
""" Calculates and colorizes the percent difference between @best
and @other
""" |
return colorize("{}%".format(
round(((best-other)/best)*100, 2)).rjust(10), "red") |
<SYSTEM_TASK:>
An LRU cache for asyncio coroutines in Python 3.5
<END_TASK>
<USER_TASK:>
Description:
def async_lru(size=100):
""" An LRU cache for asyncio coroutines in Python 3.5
..
@async_lru(1024)
async def slow_coroutine(*args, **kwargs):
return await some_other_slow_coroutine()
..
""" |
cache = collections.OrderedDict()
def decorator(fn):
@wraps(fn)
@asyncio.coroutine
def memoizer(*args, **kwargs):
key = str((args, kwargs))
try:
result = cache.pop(key)
cache[key] = result
except KeyError:
if len(cache) >= size:
cache.popitem(last=False)
result = cache[key] = yield from fn(*args, **kwargs)
return result
return memoizer
return decorator |
<SYSTEM_TASK:>
Print sys message to stdout.
<END_TASK>
<USER_TASK:>
Description:
def info(msg, *args, **kw):
# type: (str, *Any, **Any) -> None
""" Print sys message to stdout.
System messages should inform about the flow of the script. This should
be a major milestones during the build.
""" |
if len(args) or len(kw):
msg = msg.format(*args, **kw)
shell.cprint('-- <32>{}<0>'.format(msg)) |
<SYSTEM_TASK:>
Per step status messages
<END_TASK>
<USER_TASK:>
Description:
def err(msg, *args, **kw):
# type: (str, *Any, **Any) -> None
""" Per step status messages
Use this locally in a command definition to highlight more important
information.
""" |
if len(args) or len(kw):
msg = msg.format(*args, **kw)
shell.cprint('-- <31>{}<0>'.format(msg)) |
<SYSTEM_TASK:>
Return the BranchDetails for the current branch.
<END_TASK>
<USER_TASK:>
Description:
def current_branch():
# type: () -> BranchDetails
""" Return the BranchDetails for the current branch.
Return:
BranchDetails: The details of the current branch.
""" |
cmd = 'git symbolic-ref --short HEAD'
branch_name = shell.run(
cmd,
capture=True,
never_pretend=True
).stdout.strip()
return BranchDetails.parse(branch_name) |
<SYSTEM_TASK:>
Get the name of the branches that this commit belongs to.
<END_TASK>
<USER_TASK:>
Description:
def commit_branches(sha1):
# type: (str) -> List[str]
""" Get the name of the branches that this commit belongs to. """ |
cmd = 'git branch --contains {}'.format(sha1)
return shell.run(
cmd,
capture=True,
never_pretend=True
).stdout.strip().split() |
<SYSTEM_TASK:>
Try to guess the base branch for the current branch.
<END_TASK>
<USER_TASK:>
Description:
def guess_base_branch():
# type: (str) -> Optional[str, None]
""" Try to guess the base branch for the current branch.
Do not trust this guess. git makes it pretty much impossible to guess
the base branch reliably so this function implements few heuristics that
will work on most common use cases but anything a bit crazy will probably
trip this function.
Returns:
Optional[str]: The name of the base branch for the current branch if
guessable or **None** if can't guess.
""" |
my_branch = current_branch(refresh=True).name
curr = latest_commit()
if len(curr.branches) > 1:
# We're possibly at the beginning of the new branch (currently both
# on base and new branch).
other = [x for x in curr.branches if x != my_branch]
if len(other) == 1:
return other[0]
return None
else:
# We're on one branch
parent = curr
while parent and my_branch in parent.branches:
curr = parent
if len(curr.branches) > 1:
other = [x for x in curr.branches if x != my_branch]
if len(other) == 1:
return other[0]
return None
parents = [p for p in curr.parents if my_branch in p.branches]
num_parents = len(parents)
if num_parents > 2:
# More than two parent, give up
return None
if num_parents == 2:
# This is a merge commit.
for p in parents:
if p.branches == [my_branch]:
parent = p
break
elif num_parents == 1:
parent = parents[0]
elif num_parents == 0:
parent = None
return None |
<SYSTEM_TASK:>
Return the author of the given commit.
<END_TASK>
<USER_TASK:>
Description:
def commit_author(sha1=''):
# type: (str) -> Author
""" Return the author of the given commit.
Args:
sha1 (str):
The sha1 of the commit to query. If not given, it will return the
sha1 for the current commit.
Returns:
Author: A named tuple ``(name, email)`` with the commit author details.
""" |
with conf.within_proj_dir():
cmd = 'git show -s --format="%an||%ae" {}'.format(sha1)
result = shell.run(
cmd,
capture=True,
never_pretend=True
).stdout
name, email = result.split('||')
return Author(name, email) |
<SYSTEM_TASK:>
Return a list of unstaged files in the project repository.
<END_TASK>
<USER_TASK:>
Description:
def unstaged():
# type: () -> List[str]
""" Return a list of unstaged files in the project repository.
Returns:
list[str]: The list of files not tracked by project git repo.
""" |
with conf.within_proj_dir():
status = shell.run(
'git status --porcelain',
capture=True,
never_pretend=True
).stdout
results = []
for file_status in status.split(os.linesep):
if file_status.strip() and file_status[0] == ' ':
results.append(file_status[3:].strip())
return results |
<SYSTEM_TASK:>
Return a list of patterns in the project .gitignore
<END_TASK>
<USER_TASK:>
Description:
def ignore():
# type: () -> List[str]
""" Return a list of patterns in the project .gitignore
Returns:
list[str]: List of patterns set to be ignored by git.
""" |
def parse_line(line): # pylint: disable=missing-docstring
# Decode if necessary
if not isinstance(line, string_types):
line = line.decode('utf-8')
# Strip comment
line = line.split('#', 1)[0].strip()
return line
ignore_files = [
conf.proj_path('.gitignore'),
conf.proj_path('.git/info/exclude'),
config().get('core.excludesfile')
]
result = []
for ignore_file in ignore_files:
if not (ignore_file and os.path.exists(ignore_file)):
continue
with open(ignore_file) as fp:
parsed = (parse_line(l) for l in fp.readlines())
result += [x for x in parsed if x]
return result |
<SYSTEM_TASK:>
Return a list of branches in the current repo.
<END_TASK>
<USER_TASK:>
Description:
def branches():
# type: () -> List[str]
""" Return a list of branches in the current repo.
Returns:
list[str]: A list of branches in the current repo.
""" |
out = shell.run(
'git branch',
capture=True,
never_pretend=True
).stdout.strip()
return [x.strip('* \t\n') for x in out.splitlines()] |
<SYSTEM_TASK:>
Tag the current commit.
<END_TASK>
<USER_TASK:>
Description:
def tag(name, message, author=None):
# type: (str, str, Author, bool) -> None
""" Tag the current commit.
Args:
name (str):
The tag name.
message (str):
The tag message. Same as ``-m`` parameter in ``git tag``.
author (Author):
The commit author. Will default to the author of the commit.
pretend (bool):
If set to **True** it will print the full ``git tag`` command
instead of actually executing it.
""" |
cmd = (
'git -c "user.name={author.name}" -c "user.email={author.email}" '
'tag -a "{name}" -m "{message}"'
).format(
author=author or latest_commit().author,
name=name,
message=message.replace('"', '\\"').replace('`', '\\`'),
)
shell.run(cmd) |
<SYSTEM_TASK:>
Returns all tags in the repo.
<END_TASK>
<USER_TASK:>
Description:
def tags():
# type: () -> List[str]
""" Returns all tags in the repo.
Returns:
list[str]: List of all tags in the repo, sorted as versions.
All tags returned by this function will be parsed as if the contained
versions (using ``v:refname`` sorting).
""" |
return shell.run(
'git tag --sort=v:refname',
capture=True,
never_pretend=True
).stdout.strip().splitlines() |
<SYSTEM_TASK:>
Verify if the given branch exists.
<END_TASK>
<USER_TASK:>
Description:
def verify_branch(branch_name):
# type: (str) -> bool
""" Verify if the given branch exists.
Args:
branch_name (str):
The name of the branch to check.
Returns:
bool: **True** if a branch with name *branch_name* exits, **False**
otherwise.
""" |
try:
shell.run(
'git rev-parse --verify {}'.format(branch_name),
never_pretend=True
)
return True
except IOError:
return False |
<SYSTEM_TASK:>
Return branches protected by deletion.
<END_TASK>
<USER_TASK:>
Description:
def protected_branches():
# type: () -> list[str]
""" Return branches protected by deletion.
By default those are master and devel branches as configured in pelconf.
Returns:
list[str]: Names of important branches that should not be deleted.
""" |
master = conf.get('git.master_branch', 'master')
develop = conf.get('git.devel_branch', 'develop')
return conf.get('git.protected_branches', (master, develop)) |
<SYSTEM_TASK:>
List of all branches this commit is a part of.
<END_TASK>
<USER_TASK:>
Description:
def branches(self):
# type: () -> List[str]
""" List of all branches this commit is a part of. """ |
if self._branches is None:
cmd = 'git branch --contains {}'.format(self.sha1)
out = shell.run(
cmd,
capture=True,
never_pretend=True
).stdout.strip()
self._branches = [x.strip('* \t\n') for x in out.splitlines()]
return self._branches |
<SYSTEM_TASK:>
Parents of the this commit.
<END_TASK>
<USER_TASK:>
Description:
def parents(self):
# type: () -> List[CommitDetails]
""" Parents of the this commit. """ |
if self._parents is None:
self._parents = [CommitDetails.get(x) for x in self.parents_sha1]
return self._parents |
<SYSTEM_TASK:>
Return this commits number.
<END_TASK>
<USER_TASK:>
Description:
def number(self):
# type: () -> int
""" Return this commits number.
This is the same as the total number of commits in history up until
this commit.
This value can be useful in some CI scenarios as it allows to track
progress on any given branch (although there can be two commits with the
same number existing on different branches).
Returns:
int: The commit number/index.
""" |
cmd = 'git log --oneline {}'.format(self.sha1)
out = shell.run(cmd, capture=True, never_pretend=True).stdout.strip()
return len(out.splitlines()) |
<SYSTEM_TASK:>
Return details about a given commit.
<END_TASK>
<USER_TASK:>
Description:
def get(cls, sha1=''):
# type: (str) -> CommitDetails
""" Return details about a given commit.
Args:
sha1 (str):
The sha1 of the commit to query. If not given, it will return
the details for the latest commit.
Returns:
CommitDetails: Commit details. You can use the instance of the
class to query git tree further.
""" |
with conf.within_proj_dir():
cmd = 'git show -s --format="%H||%an||%ae||%s||%b||%P" {}'.format(
sha1
)
result = shell.run(cmd, capture=True, never_pretend=True).stdout
sha1, name, email, title, desc, parents = result.split('||')
return CommitDetails(
sha1=sha1,
author=Author(name, email),
title=title,
desc=desc,
parents_sha1=parents.split(),
) |
<SYSTEM_TASK:>
Sets the log level for cons3rt assets
<END_TASK>
<USER_TASK:>
Description:
def set_log_level(cls, log_level):
"""Sets the log level for cons3rt assets
This method sets the logging level for cons3rt assets using
pycons3rt. The loglevel is read in from a deployment property
called loglevel and set appropriately.
:type log_level: str
:return: True if log level was set, False otherwise.
""" |
log = logging.getLogger(cls.cls_logger + '.set_log_level')
log.info('Attempting to set the log level...')
if log_level is None:
log.info('Arg loglevel was None, log level will not be updated.')
return False
if not isinstance(log_level, basestring):
log.error('Passed arg loglevel must be a string')
return False
log_level = log_level.upper()
log.info('Attempting to set log level to: %s...', log_level)
if log_level == 'DEBUG':
cls._logger.setLevel(logging.DEBUG)
elif log_level == 'INFO':
cls._logger.setLevel(logging.INFO)
elif log_level == 'WARN':
cls._logger.setLevel(logging.WARN)
elif log_level == 'WARNING':
cls._logger.setLevel(logging.WARN)
elif log_level == 'ERROR':
cls._logger.setLevel(logging.ERROR)
else:
log.error('Could not set log level, this is not a valid log level: %s', log_level)
return False
log.info('pycons3rt loglevel set to: %s', log_level)
return True |
<SYSTEM_TASK:>
Finds an in @obj via a period-delimited string @name.
<END_TASK>
<USER_TASK:>
Description:
def getattr_in(obj, name):
""" Finds an in @obj via a period-delimited string @name.
@obj: (#object)
@name: (#str) |.|-separated keys to search @obj in
..
obj.deep.attr = 'deep value'
getattr_in(obj, 'obj.deep.attr')
..
|'deep value'|
""" |
for part in name.split('.'):
obj = getattr(obj, part)
return obj |
<SYSTEM_TASK:>
Imports a module, class or method from string and unwraps it
<END_TASK>
<USER_TASK:>
Description:
def import_from(name):
""" Imports a module, class or method from string and unwraps it
if wrapped by functools
@name: (#str) name of the python object
-> imported object
""" |
obj = name
if isinstance(name, str) and len(name):
try:
obj = locate(name)
assert obj is not None
except (AttributeError, TypeError, AssertionError, ErrorDuringImport):
try:
name = name.split(".")
attr = name[-1]
name = ".".join(name[:-1])
mod = importlib.import_module(name)
obj = getattr(mod, attr)
except (SyntaxError, AttributeError, ImportError, ValueError):
try:
name = name.split(".")
attr_sup = name[-1]
name = ".".join(name[:-1])
mod = importlib.import_module(name)
obj = getattr(getattr(mod, attr_sup), attr)
except:
# We give up.
pass
obj = unwrap_obj(obj)
return obj |
<SYSTEM_TASK:>
Gets the actual object from a decorated or wrapped function
<END_TASK>
<USER_TASK:>
Description:
def unwrap_obj(obj):
""" Gets the actual object from a decorated or wrapped function
@obj: (#object) the object to unwrap
""" |
try:
obj = obj.fget
except (AttributeError, TypeError):
pass
try:
# Cached properties
if obj.func.__doc__ == obj.__doc__:
obj = obj.func
except AttributeError:
pass
try:
# Setter/Getters
obj = obj.getter
except AttributeError:
pass
try:
# Wrapped Funcs
obj = inspect.unwrap(obj)
except:
pass
return obj |
<SYSTEM_TASK:>
Load configuration from file.
<END_TASK>
<USER_TASK:>
Description:
def load():
# type: () -> None
""" Load configuration from file.
This will search the directory structure upwards to find the project root
(directory containing ``pelconf.py`` file). Once found it will import the
config file which should initialize all the configuration (using
`peltak.core.conf.init()` function).
You can also have both yaml (configuration) and python (custom commands)
living together. Just remember that calling `conf.init()` will overwrite
the config defined in YAML.
""" |
with within_proj_dir():
if os.path.exists('pelconf.yaml'):
load_yaml_config('pelconf.yaml')
if os.path.exists('pelconf.py'):
load_py_config('pelconf.py') |
<SYSTEM_TASK:>
Load a YAML configuration.
<END_TASK>
<USER_TASK:>
Description:
def load_yaml_config(conf_file):
# type: (str) -> None
""" Load a YAML configuration.
This will not update the configuration but replace it entirely.
Args:
conf_file (str):
Path to the YAML config. This function will not check the file name
or extension and will just crash if the given file does not exist or
is not a valid YAML file.
""" |
global g_config
with open(conf_file) as fp:
# Initialize config
g_config = util.yaml_load(fp)
# Add src_dir to sys.paths if it's set. This is only done with YAML
# configs, py configs have to do this manually.
src_dir = get_path('src_dir', None)
if src_dir is not None:
sys.path.insert(0, src_dir)
for cmd in get('commands', []):
_import(cmd) |
<SYSTEM_TASK:>
Import configuration from a python file.
<END_TASK>
<USER_TASK:>
Description:
def load_py_config(conf_file):
# type: (str) -> None
""" Import configuration from a python file.
This will just import the file into python. Sky is the limit. The file
has to deal with the configuration all by itself (i.e. call conf.init()).
You will also need to add your src directory to sys.paths if it's not the
current working directory. This is done automatically if you use yaml
config as well.
Args:
conf_file (str):
Path to the py module config. This function will not check the file
name or extension and will just crash if the given file does not
exist or is not a valid python file.
""" |
if sys.version_info >= (3, 5):
from importlib import util
spec = util.spec_from_file_location('pelconf', conf_file)
mod = util.module_from_spec(spec)
spec.loader.exec_module(mod)
elif sys.version_info >= (3, 3):
from importlib import machinery
loader = machinery.SourceFileLoader('pelconf', conf_file)
_ = loader.load_module()
elif sys.version_info <= (3, 0):
import imp
imp.load_source('pelconf', conf_file) |
<SYSTEM_TASK:>
Load template from file.
<END_TASK>
<USER_TASK:>
Description:
def load_template(filename):
# type: (str) -> str
""" Load template from file.
The templates are part of the package and must be included as
``package_data`` in project ``setup.py``.
Args:
filename (str):
The template path. Relative to `peltak` package directory.
Returns:
str: The content of the chosen template.
""" |
template_file = os.path.join(PKG_DIR, 'templates', filename)
with open(template_file) as fp:
return fp.read() |
<SYSTEM_TASK:>
Return an absolute path to the given project relative path.
<END_TASK>
<USER_TASK:>
Description:
def within_proj_dir(path='.'):
# type: (Optional[str]) -> str
""" Return an absolute path to the given project relative path.
:param path:
Project relative path that will be converted to the system wide absolute
path.
:return:
Absolute path.
""" |
curr_dir = os.getcwd()
os.chdir(proj_path(path))
yield
os.chdir(curr_dir) |
<SYSTEM_TASK:>
Get config value with the given name and optional default.
<END_TASK>
<USER_TASK:>
Description:
def get(name, *default):
# type: (str, Any) -> Any
""" Get config value with the given name and optional default.
Args:
name (str):
The name of the config value.
*default (Any):
If given and the key doesn't not exist, this will be returned
instead. If it's not given and the config value does not exist,
AttributeError will be raised
Returns:
The requested config value. This is one of the global values defined
in this file. If the value does not exist it will return `default` if
give or raise `AttributeError`.
Raises:
AttributeError: If the value does not exist and `default` was not given.
""" |
global g_config
curr = g_config
for part in name.split('.'):
if part in curr:
curr = curr[part]
elif default:
return default[0]
else:
raise AttributeError("Config value '{}' does not exist".format(
name
))
return curr |
<SYSTEM_TASK:>
Get config value as path relative to the project directory.
<END_TASK>
<USER_TASK:>
Description:
def get_path(name, *default):
# type: (str, Any) -> Any
""" Get config value as path relative to the project directory.
This allows easily defining the project configuration within the fabfile
as always relative to that fabfile.
Args:
name (str):
The name of the config value containing the path.
*default (Any):
If given and the key doesn't not exist, this will be returned
instead. If it's not given and the config value does not exist,
AttributeError will be raised
Returns:
The requested config value. This is one of the global values defined
in this file. If the value does not exist it will return `default` if
give or raise `AttributeError`.
Raises:
AttributeError: If the value does not exist and `default` was not given.
""" |
global g_config
value = get(name, *default)
if value is None:
return None
return proj_path(value) |
<SYSTEM_TASK:>
Find the project path by going up the file tree.
<END_TASK>
<USER_TASK:>
Description:
def _find_proj_root():
# type: () -> Optional[str]
""" Find the project path by going up the file tree.
This will look in the current directory and upwards for the pelconf file
(.yaml or .py)
""" |
proj_files = frozenset(('pelconf.py', 'pelconf.yaml'))
curr = os.getcwd()
while curr.startswith('/') and len(curr) > 1:
if proj_files & frozenset(os.listdir(curr)):
return curr
else:
curr = os.path.dirname(curr)
return None |
<SYSTEM_TASK:>
Verify the specified bucket exists
<END_TASK>
<USER_TASK:>
Description:
def validate_bucket(self):
"""Verify the specified bucket exists
This method validates that the bucket name passed in the S3Util
constructor actually exists.
:return: None
""" |
log = logging.getLogger(self.cls_logger + '.validate_bucket')
log.info('Attempting to get bucket: {b}'.format(b=self.bucket_name))
max_tries = 10
count = 1
while count <= max_tries:
log.info('Attempting to connect to S3 bucket %s, try %s of %s',
self.bucket_name, count, max_tries)
try:
self.s3client.head_bucket(Bucket=self.bucket_name)
except ClientError as e:
_, ex, trace = sys.exc_info()
error_code = int(e.response['Error']['Code'])
log.debug(
'Connecting to bucket %s produced response code: %s',
self.bucket_name, error_code)
if error_code == 404:
msg = 'Error 404 response indicates that bucket {b} does not ' \
'exist:\n{e}'.format(b=self.bucket_name, e=str(ex))
log.error(msg)
raise S3UtilError, msg, trace
elif error_code == 500 or error_code == 503:
if count >= max_tries:
msg = 'S3 bucket is not accessible at this time: {b}\n{e}'.format(
b=self.bucket_name, e=str(ex))
log.error(msg)
raise S3UtilError, msg, trace
else:
log.warn('AWS returned error code 500 or 503, re-trying in 2 sec...')
time.sleep(5)
count += 1
continue
else:
msg = 'Connecting to S3 bucket {b} returned code: {c}\n{e}'.\
format(b=self.bucket_name, c=error_code, e=str(ex))
log.error(msg)
raise S3UtilError, msg, trace
else:
log.info('Found bucket: %s', self.bucket_name)
return |
<SYSTEM_TASK:>
Private method for downloading from S3
<END_TASK>
<USER_TASK:>
Description:
def __download_from_s3(self, key, dest_dir):
"""Private method for downloading from S3
This private helper method takes a key and the full path to
the destination directory, assumes that the args have been
validated by the public caller methods, and attempts to
download the specified key to the dest_dir.
:param key: (str) S3 key for the file to be downloaded
:param dest_dir: (str) Full path destination directory
:return: (str) Downloaded file destination if the file was
downloaded successfully, None otherwise.
""" |
log = logging.getLogger(self.cls_logger + '.__download_from_s3')
filename = key.split('/')[-1]
if filename is None:
log.error('Could not determine the filename from key: %s', key)
return None
destination = dest_dir + '/' + filename
log.info('Attempting to download %s from bucket %s to destination %s',
key, self.bucket_name, destination)
max_tries = 10
count = 1
while count <= max_tries:
log.info('Attempting to download file %s, try %s of %s', key,
count, max_tries)
try:
self.s3client.download_file(
Bucket=self.bucket_name, Key=key, Filename=destination)
except ClientError:
if count >= max_tries:
_, ex, trace = sys.exc_info()
msg = 'Unable to download key {k} from S3 bucket {b}:\n{e}'.format(
k=key, b=self.bucket_name, e=str(ex))
log.error(msg)
raise S3UtilError, msg, trace
else:
log.warn('Download failed, re-trying...')
count += 1
time.sleep(5)
continue
else:
log.info('Successfully downloaded %s from S3 bucket %s to: %s',
key,
self.bucket_name,
destination)
return destination |
<SYSTEM_TASK:>
Downloads a file by key from the specified S3 bucket
<END_TASK>
<USER_TASK:>
Description:
def download_file_by_key(self, key, dest_dir):
"""Downloads a file by key from the specified S3 bucket
This method takes the full 'key' as the arg, and attempts to
download the file to the specified dest_dir as the destination
directory. This method sets the downloaded filename to be the
same as it is on S3.
:param key: (str) S3 key for the file to be downloaded.
:param dest_dir: (str) Full path destination directory
:return: (str) Downloaded file destination if the file was
downloaded successfully, None otherwise.
""" |
log = logging.getLogger(self.cls_logger + '.download_file_by_key')
if not isinstance(key, basestring):
log.error('key argument is not a string')
return None
if not isinstance(dest_dir, basestring):
log.error('dest_dir argument is not a string')
return None
if not os.path.isdir(dest_dir):
log.error('Directory not found on file system: %s', dest_dir)
return None
try:
dest_path = self.__download_from_s3(key, dest_dir)
except S3UtilError:
raise
return dest_path |
<SYSTEM_TASK:>
Downloads a file by regex from the specified S3 bucket
<END_TASK>
<USER_TASK:>
Description:
def download_file(self, regex, dest_dir):
"""Downloads a file by regex from the specified S3 bucket
This method takes a regular expression as the arg, and attempts
to download the file to the specified dest_dir as the
destination directory. This method sets the downloaded filename
to be the same as it is on S3.
:param regex: (str) Regular expression matching the S3 key for
the file to be downloaded.
:param dest_dir: (str) Full path destination directory
:return: (str) Downloaded file destination if the file was
downloaded successfully, None otherwise.
""" |
log = logging.getLogger(self.cls_logger + '.download_file')
if not isinstance(regex, basestring):
log.error('regex argument is not a string')
return None
if not isinstance(dest_dir, basestring):
log.error('dest_dir argument is not a string')
return None
if not os.path.isdir(dest_dir):
log.error('Directory not found on file system: %s', dest_dir)
return None
key = self.find_key(regex)
if key is None:
log.warn('Could not find a matching S3 key for: %s', regex)
return None
return self.__download_from_s3(key, dest_dir) |
<SYSTEM_TASK:>
Attempts to find a single S3 key based on the passed regex
<END_TASK>
<USER_TASK:>
Description:
def find_key(self, regex):
"""Attempts to find a single S3 key based on the passed regex
Given a regular expression, this method searches the S3 bucket
for a matching key, and returns it if exactly 1 key matches.
Otherwise, None is returned.
:param regex: (str) Regular expression for an S3 key
:return: (str) Full length S3 key matching the regex, None
otherwise
""" |
log = logging.getLogger(self.cls_logger + '.find_key')
if not isinstance(regex, basestring):
log.error('regex argument is not a string')
return None
log.info('Looking up a single S3 key based on regex: %s', regex)
matched_keys = []
for item in self.bucket.objects.all():
log.debug('Checking if regex matches key: %s', item.key)
match = re.search(regex, item.key)
if match:
matched_keys.append(item.key)
if len(matched_keys) == 1:
log.info('Found matching key: %s', matched_keys[0])
return matched_keys[0]
elif len(matched_keys) > 1:
log.info('Passed regex matched more than 1 key: %s', regex)
return None
else:
log.info('Passed regex did not match any key: %s', regex)
return None |
<SYSTEM_TASK:>
Uploads a file using the passed S3 key
<END_TASK>
<USER_TASK:>
Description:
def upload_file(self, filepath, key):
"""Uploads a file using the passed S3 key
This method uploads a file specified by the filepath to S3
using the provided S3 key.
:param filepath: (str) Full path to the file to be uploaded
:param key: (str) S3 key to be set for the upload
:return: True if upload is successful, False otherwise.
""" |
log = logging.getLogger(self.cls_logger + '.upload_file')
log.info('Attempting to upload file %s to S3 bucket %s as key %s...',
filepath, self.bucket_name, key)
if not isinstance(filepath, basestring):
log.error('filepath argument is not a string')
return False
if not isinstance(key, basestring):
log.error('key argument is not a string')
return False
if not os.path.isfile(filepath):
log.error('File not found on file system: %s', filepath)
return False
try:
self.s3client.upload_file(
Filename=filepath, Bucket=self.bucket_name, Key=key)
except ClientError as e:
log.error('Unable to upload file %s to bucket %s as key %s:\n%s',
filepath, self.bucket_name, key, e)
return False
else:
log.info('Successfully uploaded file to S3 bucket %s as key %s',
self.bucket_name, key)
return True |
<SYSTEM_TASK:>
Deletes the specified key
<END_TASK>
<USER_TASK:>
Description:
def delete_key(self, key_to_delete):
"""Deletes the specified key
:param key_to_delete:
:return:
""" |
log = logging.getLogger(self.cls_logger + '.delete_key')
log.info('Attempting to delete key: {k}'.format(k=key_to_delete))
try:
self.s3client.delete_object(Bucket=self.bucket_name, Key=key_to_delete)
except ClientError:
_, ex, trace = sys.exc_info()
log.error('ClientError: Unable to delete key: {k}\n{e}'.format(k=key_to_delete, e=str(ex)))
return False
else:
log.info('Successfully deleted key: {k}'.format(k=key_to_delete))
return True |
<SYSTEM_TASK:>
Print error and exit if the current branch is not of a given type.
<END_TASK>
<USER_TASK:>
Description:
def assert_branch_type(branch_type):
# type: (str) -> None
""" Print error and exit if the current branch is not of a given type.
Args:
branch_type (str):
The branch type. This assumes the branch is in the '<type>/<title>`
format.
""" |
branch = git.current_branch(refresh=True)
if branch.type != branch_type:
if context.get('pretend', False):
log.info("Would assert that you're on a <33>{}/*<32> branch",
branch_type)
else:
log.err("Not on a <33>{}<31> branch!", branch_type)
fmt = ("The branch must follow <33>{required_type}/<name><31>"
"format and your branch is called <33>{name}<31>.")
log.err(fmt, required_type=branch_type, name=branch.name)
sys.exit(1) |
<SYSTEM_TASK:>
Delete the given branch.
<END_TASK>
<USER_TASK:>
Description:
def git_branch_delete(branch_name):
# type: (str) -> None
""" Delete the given branch.
Args:
branch_name (str):
Name of the branch to delete.
""" |
if branch_name not in git.protected_branches():
log.info("Deleting branch <33>{}", branch_name)
shell.run('git branch -d {}'.format(branch_name)) |
<SYSTEM_TASK:>
Rename the current branch
<END_TASK>
<USER_TASK:>
Description:
def git_branch_rename(new_name):
# type: (str) -> None
""" Rename the current branch
Args:
new_name (str):
New name for the current branch.
""" |
curr_name = git.current_branch(refresh=True).name
if curr_name not in git.protected_branches():
log.info("Renaming branch from <33>{}<32> to <33>{}".format(
curr_name, new_name
))
shell.run('git branch -m {}'.format(new_name)) |
<SYSTEM_TASK:>
Checkout or create a given branch
<END_TASK>
<USER_TASK:>
Description:
def git_checkout(branch_name, create=False):
# type: (str, bool) -> None
""" Checkout or create a given branch
Args:
branch_name (str):
The name of the branch to checkout or create.
create (bool):
If set to **True** it will create the branch instead of checking it
out.
""" |
log.info("Checking out <33>{}".format(branch_name))
shell.run('git checkout {} {}'.format('-b' if create else '', branch_name)) |
<SYSTEM_TASK:>
Return the base branch for the current branch.
<END_TASK>
<USER_TASK:>
Description:
def get_base_branch():
# type: () -> str
""" Return the base branch for the current branch.
This function will first try to guess the base branch and if it can't it
will let the user choose the branch from the list of all local branches.
Returns:
str: The name of the branch the current branch is based on.
""" |
base_branch = git.guess_base_branch()
if base_branch is None:
log.info("Can't guess the base branch, you have to pick one yourself:")
base_branch = choose_branch()
return base_branch |
<SYSTEM_TASK:>
Show the user a menu to pick a branch from the existing ones.
<END_TASK>
<USER_TASK:>
Description:
def choose_branch(exclude=None):
# type: (List[str]) -> str
""" Show the user a menu to pick a branch from the existing ones.
Args:
exclude (list[str]):
List of branch names to exclude from the menu. By default it will
exclude master and develop branches. To show all branches pass an
empty array here.
Returns:
str: The name of the branch chosen by the user. If the user inputs an
invalid choice, he will be asked again (and again) until he picks a
a valid branch.
""" |
if exclude is None:
master = conf.get('git.master_branch', 'master')
develop = conf.get('git.devel_branch', 'develop')
exclude = {master, develop}
branches = list(set(git.branches()) - exclude)
# Print the menu
for i, branch_name in enumerate(branches):
shell.cprint('<90>[{}] <33>{}'.format(i + 1, branch_name))
# Get a valid choice from the user
choice = 0
while choice < 1 or choice > len(branches):
prompt = "Pick a base branch from the above [1-{}]".format(
len(branches)
)
choice = click.prompt(prompt, value_proc=int)
if not (1 <= choice <= len(branches)):
fmt = "Invalid choice {}, you must pick a number between {} and {}"
log.err(fmt.format(choice, 1, len(branches)))
return branches[choice - 1] |
<SYSTEM_TASK:>
Yields random values from @seq until @seq is empty
<END_TASK>
<USER_TASK:>
Description:
def randrange(seq):
""" Yields random values from @seq until @seq is empty """ |
seq = seq.copy()
choose = rng().choice
remove = seq.remove
for x in range(len(seq)):
y = choose(seq)
remove(y)
yield y |
<SYSTEM_TASK:>
Validate the ip_address
<END_TASK>
<USER_TASK:>
Description:
def validate_ip_address(ip_address):
"""Validate the ip_address
:param ip_address: (str) IP address
:return: (bool) True if the ip_address is valid
""" |
# Validate the IP address
log = logging.getLogger(mod_logger + '.validate_ip_address')
if not isinstance(ip_address, basestring):
log.warn('ip_address argument is not a string')
return False
# Ensure there are 3 dots
num_dots = 0
for c in ip_address:
if c == '.':
num_dots += 1
if num_dots != 3:
log.info('Not a valid IP address: {i}'.format(i=ip_address))
return False
# Use the socket module to test
try:
socket.inet_aton(ip_address)
except socket.error as e:
log.info('Not a valid IP address: {i}\n{e}'.format(i=ip_address, e=e))
return False
else:
log.info('Validated IP address: %s', ip_address)
return True |
<SYSTEM_TASK:>
Uses the ip addr command to enumerate IP addresses by device
<END_TASK>
<USER_TASK:>
Description:
def ip_addr():
"""Uses the ip addr command to enumerate IP addresses by device
:return: (dict) Containing device: ip_address
""" |
log = logging.getLogger(mod_logger + '.ip_addr')
log.debug('Running the ip addr command...')
ip_addr_output = {}
command = ['ip', 'addr']
try:
ip_addr_result = run_command(command, timeout_sec=20)
except CommandError:
_, ex, trace = sys.exc_info()
msg = 'There was a problem running command: {c}'.format(c=' '.join(command))
raise CommandError, msg, trace
ip_addr_lines = ip_addr_result['output'].split('\n')
for line in ip_addr_lines:
line = line.strip()
if line.startswith('inet6'):
continue
elif line.startswith('inet'):
parts = line.split()
try:
ip_address = parts[1].strip().split('/')[0]
except KeyError:
continue
else:
if not validate_ip_address(ip_address):
continue
else:
for part in parts:
part = part.strip()
if part.strip().startswith('eth') or part.strip().startswith('eno') or \
part.strip().startswith('ens'):
device = part
ip_addr_output[device] = ip_address
return ip_addr_output |
<SYSTEM_TASK:>
Configures the source IP address for a Linux interface
<END_TASK>
<USER_TASK:>
Description:
def set_source_ip_for_interface(source_ip_address, desired_source_ip_address, device_num=0):
"""Configures the source IP address for a Linux interface
:param source_ip_address: (str) Source IP address to change
:param desired_source_ip_address: (str) IP address to configure as the source in outgoing packets
:param device_num: (int) Integer interface device number to configure
:return: None
:raises: TypeError, ValueError, OSError
""" |
log = logging.getLogger(mod_logger + '.set_source_ip_for_interface')
if not isinstance(source_ip_address, basestring):
msg = 'arg source_ip_address must be a string'
log.error(msg)
raise TypeError(msg)
if not isinstance(desired_source_ip_address, basestring):
msg = 'arg desired_source_ip_address must be a string'
log.error(msg)
raise TypeError(msg)
if not validate_ip_address(ip_address=source_ip_address):
msg = 'The arg source_ip_address was found to be an invalid IP address. Please pass a valid IP address'
log.error(msg)
raise ValueError(msg)
if not validate_ip_address(ip_address=desired_source_ip_address):
msg = 'The arg desired_source_ip_address was found to be an invalid IP address. Please pass a valid IP address'
log.error(msg)
raise ValueError(msg)
# Determine the device name based on the device_num
log.debug('Attempting to determine the device name based on the device_num arg...')
try:
int(device_num)
except ValueError:
if isinstance(device_num, basestring):
device_name = device_num
log.info('Provided device_num is not an int, assuming it is the full device name: {d}'.format(
d=device_name))
else:
raise TypeError('device_num arg must be a string or int')
else:
device_name = 'eth{n}'.format(n=str(device_num))
log.info('Provided device_num is an int, assuming device name is: {d}'.format(d=device_name))
# Build the command
# iptables -t nat -I POSTROUTING -o eth0 -s ${RA_ORIGINAL_IP} -j SNAT --to-source
command = ['iptables', '-t', 'nat', '-I', 'POSTROUTING', '-o', device_name, '-s',
source_ip_address, '-j', 'SNAT', '--to-source', desired_source_ip_address]
log.info('Running command: {c}'.format(c=command))
try:
result = run_command(command, timeout_sec=20)
except CommandError:
_, ex, trace = sys.exc_info()
msg = 'There was a problem running iptables command: {c}\n{e}'.format(c=' '.join(command), e=str(ex))
log.error(msg)
raise OSError, msg, trace
if int(result['code']) != 0:
msg = 'The iptables command produced an error with exit code: {c}, and output:\n{o}'.format(
c=result['code'], o=result['output'])
log.error(msg)
raise OSError(msg)
log.info('Successfully configured the source IP for {d} to be: {i}'.format(
d=device_name, i=desired_source_ip_address)) |
<SYSTEM_TASK:>
Generate shell color opcodes from a pretty coloring syntax.
<END_TASK>
<USER_TASK:>
Description:
def fmt(msg, *args, **kw):
# type: (str, *Any, **Any) -> str
""" Generate shell color opcodes from a pretty coloring syntax. """ |
global is_tty
if len(args) or len(kw):
msg = msg.format(*args, **kw)
opcode_subst = '\x1b[\\1m' if is_tty else ''
return re.sub(r'<(\d{1,2})>', opcode_subst, msg) |
<SYSTEM_TASK:>
Send the cons3rt agent log file
<END_TASK>
<USER_TASK:>
Description:
def send_cons3rt_agent_logs(self):
"""Send the cons3rt agent log file
:return:
""" |
log = logging.getLogger(self.cls_logger + '.send_cons3rt_agent_logs')
if self.cons3rt_agent_log_dir is None:
log.warn('There is not CONS3RT agent log directory on this system')
return
log.debug('Searching for log files in directory: {d}'.format(d=self.cons3rt_agent_log_dir))
for item in os.listdir(self.cons3rt_agent_log_dir):
item_path = os.path.join(self.cons3rt_agent_log_dir, item)
if os.path.isfile(item_path):
log.info('Sending email with cons3rt agent log file: {f}'.format(f=item_path))
try:
self.send_text_file(text_file=item_path)
except (TypeError, OSError, AssetMailerError):
_, ex, trace = sys.exc_info()
msg = '{n}: There was a problem sending CONS3RT agent log file: {f}\n{e}'.format(
n=ex.__class__.__name__, f=item_path, e=str(ex))
raise AssetMailerError, msg, trace
else:
log.info('Successfully sent email with file: {f}'.format(f=item_path)) |
<SYSTEM_TASK:>
Performs URL encoding for passwords
<END_TASK>
<USER_TASK:>
Description:
def encode_password(password):
"""Performs URL encoding for passwords
:param password: (str) password to encode
:return: (str) encoded password
""" |
log = logging.getLogger(mod_logger + '.password_encoder')
log.debug('Encoding password: {p}'.format(p=password))
encoded_password = ''
for c in password:
encoded_password += encode_character(char=c)
log.debug('Encoded password: {p}'.format(p=encoded_password))
return encoded_password |
<SYSTEM_TASK:>
Returns URL encoding for a single character
<END_TASK>
<USER_TASK:>
Description:
def encode_character(char):
"""Returns URL encoding for a single character
:param char (str) Single character to encode
:returns (str) URL-encoded character
""" |
if char == '!': return '%21'
elif char == '"': return '%22'
elif char == '#': return '%23'
elif char == '$': return '%24'
elif char == '%': return '%25'
elif char == '&': return '%26'
elif char == '\'': return '%27'
elif char == '(': return '%28'
elif char == ')': return '%29'
elif char == '*': return '%2A'
elif char == '+': return '%2B'
elif char == ',': return '%2C'
elif char == '-': return '%2D'
elif char == '.': return '%2E'
elif char == '/': return '%2F'
elif char == ':': return '%3A'
elif char == ';': return '%3B'
elif char == '<': return '%3C'
elif char == '=': return '%3D'
elif char == '>': return '%3E'
elif char == '?': return '%3F'
elif char == '@': return '%40'
elif char == '[': return '%5B'
elif char == '\\': return '%5C'
elif char == ']': return '%5D'
elif char == '^': return '%5E'
elif char == '_': return '%5F'
elif char == '`': return '%60'
elif char == '{': return '%7B'
elif char == '|': return '%7C'
elif char == '}': return '%7D'
elif char == '~': return '%7E'
elif char == ' ': return '%7F'
else: return char |
<SYSTEM_TASK:>
Get different tags as dicts ready to use as dropdown lists.
<END_TASK>
<USER_TASK:>
Description:
def search_tags_as_filters(tags):
"""Get different tags as dicts ready to use as dropdown lists.""" |
# set dicts
actions = {}
contacts = {}
formats = {}
inspire = {}
keywords = {}
licenses = {}
md_types = dict()
owners = defaultdict(str)
srs = {}
unused = {}
# 0/1 values
compliance = 0
type_dataset = 0
# parsing tags
print(len(tags.keys()))
i = 0
for tag in sorted(tags.keys()):
i += 1
# actions
if tag.startswith("action"):
actions[tags.get(tag, tag)] = tag
continue
# compliance INSPIRE
elif tag.startswith("conformity"):
compliance = 1
continue
# contacts
elif tag.startswith("contact"):
contacts[tags.get(tag)] = tag
continue
# formats
elif tag.startswith("format"):
formats[tags.get(tag)] = tag
continue
# INSPIRE themes
elif tag.startswith("keyword:inspire"):
inspire[tags.get(tag)] = tag
continue
# keywords
elif tag.startswith("keyword:isogeo"):
keywords[tags.get(tag)] = tag
continue
# licenses
elif tag.startswith("license"):
licenses[tags.get(tag)] = tag
continue
# owners
elif tag.startswith("owner"):
owners[tags.get(tag)] = tag
continue
# SRS
elif tag.startswith("coordinate-system"):
srs[tags.get(tag)] = tag
continue
# types
elif tag.startswith("type"):
md_types[tags.get(tag)] = tag
if tag in ("type:vector-dataset", "type:raster-dataset"):
type_dataset += 1
else:
pass
continue
# ignored tags
else:
unused[tags.get(tag)] = tag
continue
# override API tags to allow all datasets filter - see #
if type_dataset == 2:
md_types["Donnée"] = "type:dataset"
else:
pass
# printing
# print("There are:"
# "\n{} actions"
# "\n{} contacts"
# "\n{} formats"
# "\n{} INSPIRE themes"
# "\n{} keywords"
# "\n{} licenses"
# "\n{} owners"
# "\n{} SRS"
# "\n{} types"
# "\n{} unused".format(len(actions),
# len(contacts),
# len(formats),
# len(inspire),
# len(keywords),
# len(licenses),
# len(owners),
# len(srs),
# len(md_types),
# len(unused)
# ))
# storing dicts
tags_parsed = {
"actions": actions,
"compliance": compliance,
"contacts": contacts,
"formats": formats,
"inspire": inspire,
"keywords": keywords,
"licenses": licenses,
"owners": owners,
"srs": srs,
"types": md_types,
"unused": unused,
}
# method ending
return tags_parsed |
<SYSTEM_TASK:>
Generates identicon image based on passed data.
<END_TASK>
<USER_TASK:>
Description:
def image(request, data):
"""
Generates identicon image based on passed data.
Arguments:
data - Data which should be used for generating an identicon. This data
will be used in order to create a digest which is used for generating the
identicon. If the data passed is a hex digest already, the digest will be
used as-is.
Returns:
Identicon image in raw format.
""" |
# Get image width, height, padding, and format from GET parameters, or
# fall-back to default values from settings.
try:
width = int(request.GET.get("w", PYDENTICON_WIDTH))
except ValueError:
raise SuspiciousOperation("Identicon width must be a positive integer.")
try:
height = int(request.GET.get("h", PYDENTICON_HEIGHT))
except ValueError:
raise SuspiciousOperation("Identicon height must be a positive integer.")
output_format = request.GET.get("f", PYDENTICON_FORMAT)
try:
padding = [int(p) for p in request.GET["p"].split(",")]
except KeyError:
padding = PYDENTICON_PADDING
except ValueError:
raise SuspiciousOperation("Identicon padding must consist out of 4 positive integers separated with commas.")
if "i" in request.GET:
inverted = request.GET.get("i")
if inverted.lower() == "true":
inverted = True
elif inverted.lower() == "false":
inverted = False
else:
raise SuspiciousOperation("Inversion parameter must be a boolean (true/false).")
else:
inverted = PYDENTICON_INVERT
# Validate the input parameters.
if not isinstance(width, int) or width <= 0:
raise SuspiciousOperation("Identicon width must be a positive integer.")
if not isinstance(height, int) or height <= 0:
raise SuspiciousOperation("Identicon height must be a positive integer.")
if not all([isinstance(p, int) and p >= 0 for p in padding]) or len(padding) != 4:
raise SuspiciousOperation("Padding must be a 4-element tuple consisting out of positive integers.")
# Set-up correct content type based on requested identicon format.
if output_format == "png":
content_type = "image/png"
elif output_format == "ascii":
content_type = "text/plain"
else:
raise SuspiciousOperation("Unsupported identicon format requested - '%s' % output_format")
# Initialise a generator.
generator = Generator(PYDENTICON_ROWS, PYDENTICON_COLUMNS,
foreground = PYDENTICON_FOREGROUND, background = PYDENTICON_BACKGROUND,
digest = PYDENTICON_DIGEST)
# Generate the identicion.
content = generator.generate(data, width, height, padding=padding, output_format=output_format, inverted=inverted)
# Create and return the response.
response = HttpResponse(content, content_type=content_type)
return response |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.