text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Convenience method allowing easy dumping to and loading from json. <END_TASK> <USER_TASK:> Description: def json(self, json_string=None): """ Convenience method allowing easy dumping to and loading from json. """
if json_string is not None: return self.__init__(loads(json_string)) dump = self if isinstance(self, HAR.log): dump = {"log": dump} return dumps(dump, default=lambda x: dict(x))
<SYSTEM_TASK:> Start the Gateway Search Request and return the address information <END_TASK> <USER_TASK:> Description: def start_search(self): """ Start the Gateway Search Request and return the address information :rtype: (string,int) :return: a tuple(string(IP),int(Port) when found or None when timeout occurs """
self._asyncio_loop = asyncio.get_event_loop() # Creating Broadcast Receiver coroutine_listen = self._asyncio_loop.create_datagram_endpoint( lambda: self.KNXSearchBroadcastReceiverProtocol( self._process_response, self._timeout_handling, self._timeout, self._asyncio_loop ), local_addr=(self._broadcast_ip_address, 0) ) self._listener_transport, listener_protocol = \ self._asyncio_loop.run_until_complete(coroutine_listen) # We are ready to fire the broadcast message coroutine_broadcaster = self._asyncio_loop.create_datagram_endpoint( lambda: self.KNXSearchBroadcastProtocol( self._asyncio_loop, self._listener_transport.get_extra_info('sockname') [1]), remote_addr=(self._broadcast_address, self._broadcast_port)) self._broadcaster_transport, broadcast_protocol = \ self._asyncio_loop.run_until_complete(coroutine_broadcaster) # Waiting for all Broadcast receive or timeout self._asyncio_loop.run_forever() # Got Response or Timeout if self._resolved_gateway_ip_address is None and \ self._resolved_gateway_ip_port is None: LOGGER.debug("Gateway not found!") return None else: LOGGER.debug("Gateway found at %s:%s", self._resolved_gateway_ip_address, self._resolved_gateway_ip_port) return self._resolved_gateway_ip_address, \ self._resolved_gateway_ip_port
<SYSTEM_TASK:> Processing the incoming UDP Datagram from the Broadcast Socket <END_TASK> <USER_TASK:> Description: def _process_response(self, received_data): """ Processing the incoming UDP Datagram from the Broadcast Socket :param received_data: UDP Datagram Package to Process :type received_data: Byte """
resp = bytearray(received_data) self._resolved_gateway_ip_address = str.format( "{}.{}.{}.{}", resp[8], resp[9], resp[10], resp[11]) self._resolved_gateway_ip_port = struct.unpack( '!h', bytes(resp[12:14]))[0]
<SYSTEM_TASK:> append binary data to an upload <END_TASK> <USER_TASK:> Description: def append(self, fdata, offset, query='/content/uploads'): """ append binary data to an upload `fdata` - binary data to send to pulp `offset` - the amount of previously-uploaded data """
query = '%s/%s/%s/' % (query, self.uid, offset) _r = self.connector.put(query, fdata, log_data=False, auto_create_json_str=False) juicer.utils.Log.log_notice("Appending to: %s" % query) juicer.utils.Log.log_debug("Continuing upload with append. POST returned with data: %s" % str(_r.content)) return _r.status_code
<SYSTEM_TASK:> import the completed upload into pulp <END_TASK> <USER_TASK:> Description: def import_upload(self, nvrea, ftype='rpm', rpm_name='', desc=None, htype='md5', lic=None, group=None, vendor=None, req=None): """ import the completed upload into pulp `ftype` - the type of the upload `rpm_name` - the name of the uploaded rpm `desc` - description of the rpm `htype` - checksum type `lic` - license used in the packaged software `group` - package group `vendor` - software vendor `req` - dependencies """
query = '/repositories/%s/actions/import_upload/' % self.repoid data = {'upload_id': self.uid, 'unit_type_id': ftype, 'unit_key': { 'name': rpm_name, 'version': nvrea[1], 'release': nvrea[2], 'epoch': nvrea[3], 'arch': nvrea[4], 'checksumtype': htype, 'checksum': self.cksum, }, 'unit_metadata': { 'filename': self.pkg_name, 'license': lic if lic else '', 'requires': req if req else '', # 'type': ftype, 'description': desc if desc else '', # 'size': self.size, 'vendor': vendor if vendor else '', 'relativepath': self.pkg_name, } } _r = self.connector.post(query, data) if _r.status_code not in [Constants.PULP_POST_OK, Constants.PULP_POST_ACCEPTED]: juicer.utils.Log.log_error("Import error importing '%s'... server said: \n %s", (self.pkg_name, juicer.utils.load_json_str(_r.content))) _r.raise_for_status() juicer.utils.Log.log_debug("Finalized upload id %s" % self.uid)
<SYSTEM_TASK:> pulp leaves droppings if you don't specifically tell it <END_TASK> <USER_TASK:> Description: def clean_upload(self, query='/content/uploads/'): """ pulp leaves droppings if you don't specifically tell it to clean up after itself. use this to do so. """
query = query + self.uid + '/' _r = self.connector.delete(query) if _r.status_code == Constants.PULP_DELETE_OK: juicer.utils.Log.log_info("Cleaned up after upload request.") else: _r.raise_for_status()
<SYSTEM_TASK:> Check that the config file is present and readable. If not, <END_TASK> <USER_TASK:> Description: def _user_config_file(): """ Check that the config file is present and readable. If not, copy a template in place. """
config_file = Constants.USER_CONFIG if os.path.exists(config_file) and os.access(config_file, os.R_OK): return config_file elif os.path.exists(config_file) and not os.access(config_file, os.R_OK): raise IOError("Can not read %s" % config_file) else: shutil.copy(Constants.EXAMPLE_USER_CONFIG, config_file) raise JuicerConfigError("Default config file created.\nCheck man 5 juicer.conf.")
<SYSTEM_TASK:> return a pymongo db connection for interacting with cart objects <END_TASK> <USER_TASK:> Description: def cart_db(): """ return a pymongo db connection for interacting with cart objects """
config = _config_file() _config_test(config) juicer.utils.Log.log_debug("Establishing cart connection:") cart_con = MongoClient(dict(config.items(config.sections()[0]))['cart_host']) cart_db = cart_con.carts return cart_db
<SYSTEM_TASK:> Connect to mongo and store your cart in the specified collection. <END_TASK> <USER_TASK:> Description: def upload_cart(cart, collection): """ Connect to mongo and store your cart in the specified collection. """
cart_cols = cart_db() cart_json = read_json_document(cart.cart_file()) try: cart_id = cart_cols[collection].save(cart_json) except MongoErrors.AutoReconnect: raise JuicerConfigError("Error saving cart to `cart_host`. Ensure that this node is the master.") return cart_id
<SYSTEM_TASK:> Give back an array of dicts with the connection <END_TASK> <USER_TASK:> Description: def get_login_info(): """ Give back an array of dicts with the connection information for all the environments. """
connections = {} _defaults = {} _defaults['start_in'] = '' _defaults['rpm_sign_plugin'] = '' config = _config_file() _config_test(config) juicer.utils.Log.log_debug("Loading connection information:") for section in config.sections(): cfg = dict(config.items(section)) connections[section] = Connectors(cfg) if 'start_in' in cfg: _defaults['start_in'] = cfg['start_in'] if 'rpm_sign_plugin' in cfg: _defaults['rpm_sign_plugin'] = cfg['rpm_sign_plugin'] juicer.utils.Log.log_debug("[%s] username: %s, base_url: %s" % \ (section, \ cfg['username'], \ cfg['base_url'])) _defaults['environments'] = config.sections() return (connections, _defaults)
<SYSTEM_TASK:> Return defined environments from config file for default <END_TASK> <USER_TASK:> Description: def get_environments(): """ Return defined environments from config file for default environment values. """
config = ConfigParser.SafeConfigParser() config = _config_file() juicer.utils.Log.log_debug("Reading environment sections:") environments = config.sections() juicer.utils.Log.log_debug("Read environment sections: %s", ', '.join(environments)) return environments
<SYSTEM_TASK:> determine if two environments are on the same host. returns <END_TASK> <USER_TASK:> Description: def env_same_host(env1, env2): """ determine if two environments are on the same host. returns true or false """
config = _config_file() h1 = dict(config.items(env1))['base_url'] h2 = dict(config.items(env2))['base_url'] return h1 == h2
<SYSTEM_TASK:> Given an environment, return the next environment in the <END_TASK> <USER_TASK:> Description: def get_next_environment(env): """ Given an environment, return the next environment in the promotion hierarchy """
config = _config_file() juicer.utils.Log.log_debug("Finding next environment...") if env not in config.sections(): raise JuicerConfigError("%s is not a server configured in juicer.conf", env) section = dict(config.items(env)) if 'promotes_to' not in section.keys(): err = "Environment `%s` has no entry for `promotes_to`\nCheck man 5 juicer.conf." % env raise JuicerConfigError(err) return section['promotes_to']
<SYSTEM_TASK:> Given a connection and a repoid, return the url of the repository <END_TASK> <USER_TASK:> Description: def pulp_repo_path(connection, repoid): """ Given a connection and a repoid, return the url of the repository """
dl_base = connection.base_url.replace('/pulp/api/v2', '/pulp/repos') _m = re.match('(.*)-(.*)', repoid) repo = _m.group(1) env = _m.group(2) return "%s/%s/%s" % (dl_base, env, repo)
<SYSTEM_TASK:> Determine if user exists in specified environment. <END_TASK> <USER_TASK:> Description: def user_exists_p(login, connector): """ Determine if user exists in specified environment. """
url = '/users/' + login + '/' _r = connector.get(url) return (_r.status_code == Constants.PULP_GET_OK)
<SYSTEM_TASK:> Flatten an arbitrary depth nested list. <END_TASK> <USER_TASK:> Description: def flatten(x): """ Flatten an arbitrary depth nested list. """
# Lifted from: http://stackoverflow.com/a/406822/263969 result = [] for el in x: if hasattr(el, "__iter__") and not isinstance(el, basestring): result.extend(flatten(el)) else: result.append(el) return result
<SYSTEM_TASK:> `title` - Name of the file to write. <END_TASK> <USER_TASK:> Description: def write_json_document(title, body): """ `title` - Name of the file to write. `body` - Python datastructure representing the document. This method handles transforming the body into a proper json string, and then writing the file to disk. """
if not title.endswith('.json'): title += '.json' json_body = create_json_str(body) if os.path.exists(title): juicer.utils.Log.log_warn("Cart file '%s' already exists, overwriting with new data." % title) f = open(title, 'w') f.write(json_body) f.flush() f.close()
<SYSTEM_TASK:> Reads in a json document and returns a native python <END_TASK> <USER_TASK:> Description: def read_json_document(title): """ Reads in a json document and returns a native python datastructure. """
if not title.endswith('.json'): juicer.utils.Log.log_warn("File name (%s) does not end with '.json', appending it automatically." % title) title += '.json' if not os.path.exists(title): raise IOError("Could not find file: '%s'" % title) f = open(title, 'r') doc = f.read() f.close() return load_json_str(doc)
<SYSTEM_TASK:> `search_base` - The directory to begin walking down. <END_TASK> <USER_TASK:> Description: def find_pattern(search_base, pattern='*.rpm'): """ `search_base` - The directory to begin walking down. `pattern` - File pattern to match for. This is a generator which yields the full path to files (one at a time) which match the given glob (`pattern`). """
# Stolen from http://rosettacode.org/wiki/Walk_a_directory/Recursively#Python if (not os.path.isdir(search_base)) and os.path.exists(search_base): # Adapt the algorithm to gracefully handle non-directory search paths yield search_base else: for root, dirs, files in os.walk(search_base): for filename in fnmatch.filter(files, pattern): yield os.path.join(root, filename)
<SYSTEM_TASK:> Filter a list of packages into local and remotes. <END_TASK> <USER_TASK:> Description: def filter_package_list(package_list): """ Filter a list of packages into local and remotes. """
remote_pkgs = [] local_pkgs = [] possible_remotes = filter(lambda i: not os.path.exists(i), package_list) juicer.utils.Log.log_debug("Considering %s possible remotes" % len(possible_remotes)) for item in possible_remotes: remote_pkgs.extend(juicer.utils.Remotes.assemble_remotes(item)) juicer.utils.Log.log_notice("Remote packages: %s" % str(remote_pkgs)) possible_locals = filter(os.path.exists, package_list) possible_locals = filter(is_rpm, possible_locals) juicer.utils.Log.log_debug("Considering %s possible locals" % len(possible_locals)) for item in possible_locals: for match in find_pattern(item): local_pkgs.append(match) juicer.utils.Log.log_notice("Local packages: %s" % str(local_pkgs)) filtered_package_list = dedupe(remote_pkgs + local_pkgs) return filtered_package_list
<SYSTEM_TASK:> `returns_output` - Returns all print output in a list. <END_TASK> <USER_TASK:> Description: def mute(returns_output=False): """ `returns_output` - Returns all print output in a list. Capture or ignore all print output generated by a function. Usage: output = mute(returns_output=True)(module.my_func)(args) """
def decorator(func): @wraps(func) def wrapper(*args, **kwargs): saved_stdout = sys.stdout sys.stdout = cStringIO.StringIO() try: out = func(*args, **kwargs) if returns_output: out = sys.stdout.getvalue().strip().split() finally: sys.stdout = saved_stdout return out return wrapper return decorator
<SYSTEM_TASK:> Download the file `url` and save it to the local disk as <END_TASK> <USER_TASK:> Description: def save_url_as(url, save_as): """ Download the file `url` and save it to the local disk as `save_as`. """
remote = requests.get(url, verify=False) if not remote.status_code == Constants.PULP_GET_OK: raise JuicerPulpError("A %s error occurred trying to get %s" % (remote.status_code, url)) with open(save_as, 'wb') as data: data.write(remote.content)
<SYSTEM_TASK:> return a str containing a link to the rpm in the pulp repository <END_TASK> <USER_TASK:> Description: def remote_url(connector, env, repo, filename): """ return a str containing a link to the rpm in the pulp repository """
dl_base = connector.base_url.replace('/pulp/api/v2', '/pulp/repos') repoid = '%s-%s' % (repo, env) _r = connector.get('/repositories/%s/' % repoid) if not _r.status_code == Constants.PULP_GET_OK: # maybe the repo name is the repoid _r = connector.get('/repositories/%s/' % repo) if not _r.status_code == Constants.PULP_GET_OK: raise JuicerPulpError("%s was not found as a repoid. Status code %s returned by pulp" % \ (repoid, _r.status_code)) repo = juicer.utils.load_json_str(_r.content)['display_name'] link = '%s/%s/%s/%s' % (dl_base, env, repo, filename) return link
<SYSTEM_TASK:> Hand back the hdr - duh - if the pkg is foobar handback None <END_TASK> <USER_TASK:> Description: def return_hdr(ts, package): """ Hand back the hdr - duh - if the pkg is foobar handback None Shamelessly stolen from Seth Vidal http://yum.baseurl.org/download/misc/checksig.py """
try: fdno = os.open(package, os.O_RDONLY) except OSError: hdr = None return hdr ts.setVSFlags(~(rpm.RPMVSF_NOMD5 | rpm.RPMVSF_NEEDPAYLOAD)) try: hdr = ts.hdrFromFdno(fdno) except rpm.error: hdr = None raise rpm.error if type(hdr) != rpm.hdr: hdr = None ts.setVSFlags(0) os.close(fdno) return hdr
<SYSTEM_TASK:> hand back signature information and an error code <END_TASK> <USER_TASK:> Description: def get_sig_info(hdr): """ hand back signature information and an error code Shamelessly stolen from Seth Vidal http://yum.baseurl.org/download/misc/checksig.py """
string = '%|DSAHEADER?{%{DSAHEADER:pgpsig}}:{%|RSAHEADER?{%{RSAHEADER:pgpsig}}:{%|SIGGPG?{%{SIGGPG:pgpsig}}:{%|SIGPGP?{%{SIGPGP:pgpsig}}:{(none)}|}|}|}|' siginfo = hdr.sprintf(string) if siginfo != '(none)': error = 0 sigtype, sigdate, sigid = siginfo.split(',') else: error = 101 sigtype = 'MD5' sigdate = 'None' sigid = 'None' infotuple = (sigtype, sigdate, sigid) return error, infotuple
<SYSTEM_TASK:> check if rpm has a signature, we don't care if it's valid or not <END_TASK> <USER_TASK:> Description: def check_sig(package): """ check if rpm has a signature, we don't care if it's valid or not at the moment Shamelessly stolen from Seth Vidal http://yum.baseurl.org/download/misc/checksig.py """
rpmroot = '/' ts = rpm.TransactionSet(rpmroot) sigerror = 0 ts.setVSFlags(0) hdr = return_hdr(ts, package) sigerror, (sigtype, sigdate, sigid) = get_sig_info(hdr) if sigid == 'None': keyid = 'None' else: keyid = sigid[-8:] if keyid != 'None': return True else: return False
<SYSTEM_TASK:> Query information about the RPM at `rpm_path`. <END_TASK> <USER_TASK:> Description: def rpm_info(rpm_path): """ Query information about the RPM at `rpm_path`. """
ts = rpm.TransactionSet() ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES) rpm_info = {} rpm_fd = open(rpm_path, 'rb') pkg = ts.hdrFromFdno(rpm_fd) rpm_info['name'] = pkg['name'] rpm_info['version'] = pkg['version'] rpm_info['release'] = pkg['release'] rpm_info['epoch'] = 0 rpm_info['arch'] = pkg['arch'] rpm_info['nvrea'] = tuple((rpm_info['name'], rpm_info['version'], rpm_info['release'], rpm_info['epoch'], rpm_info['arch'])) rpm_info['cksum'] = hashlib.md5(rpm_path).hexdigest() rpm_info['size'] = os.path.getsize(rpm_path) rpm_info['package_basename'] = os.path.basename(rpm_path) rpm_fd.close() return rpm_info
<SYSTEM_TASK:> upload an rpm into pulp <END_TASK> <USER_TASK:> Description: def upload_rpm(rpm_path, repoid, connector, callback=None): """upload an rpm into pulp rpm_path: path to an rpm connector: the connector to use for interacting with pulp callback: Optional callback to call after an RPM is uploaded. Callback should accept one argument, the name of the RPM which was uploaded """
ts = rpm.TransactionSet() ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES) info = rpm_info(rpm_path) pkg_name = info['name'] nvrea = info['nvrea'] cksum = info['cksum'] size = info['size'] package_basename = info['package_basename'] juicer.utils.Log.log_notice("Expected amount to seek: %s (package size by os.path.getsize)" % size) # initiate upload upload = juicer.utils.Upload.Upload(package_basename, cksum, size, repoid, connector) #create a statusbar pbar = ProgressBar(size) # read in rpm total_seeked = 0 rpm_fd = open(rpm_path, 'rb') rpm_fd.seek(0) while total_seeked < size: rpm_data = rpm_fd.read(Constants.UPLOAD_AT_ONCE) last_offset = total_seeked total_seeked += len(rpm_data) juicer.utils.Log.log_notice("Seeked %s data... (total seeked: %s)" % (len(rpm_data), total_seeked)) upload_code = upload.append(fdata=rpm_data, offset=last_offset) if upload_code != Constants.PULP_PUT_OK: juicer.utils.Log.log_error("Upload failed.") pbar.update(len(rpm_data)) pbar.finish() rpm_fd.close() juicer.utils.Log.log_notice("Seeked total data: %s" % total_seeked) # finalize upload rpm_id = upload.import_upload(nvrea=nvrea, rpm_name=pkg_name) juicer.utils.Log.log_debug("RPM upload complete. New 'packageid': %s" % rpm_id) # clean up working dir upload.clean_upload() # Run callbacks? if callback: try: juicer.utils.Log.log_debug("Calling upload callack: %s" % str(callback)) callback(pkg_name) except Exception: juicer.utils.Log.log_error("Exception raised in callback: %s", str(callback)) pass return rpm_id
<SYSTEM_TASK:> accesses mongodb and return a cart spec stored there <END_TASK> <USER_TASK:> Description: def download_cart(cart_name, env): """ accesses mongodb and return a cart spec stored there """
cart_con = cart_db() carts = cart_con[env] return carts.find_one({'_id': cart_name})
<SYSTEM_TASK:> returns a dict object representing a cart stored in pulp <END_TASK> <USER_TASK:> Description: def get_cart(base_url, env, cart_name): """ returns a dict object representing a cart stored in pulp base_url: a str for the base_url (eg: http://sweet.pulp.repo/pulp/api/) env: a str with the the name of the environement (eg: prod) cart_name: a str with the name of the cart to get """
base_url = base_url.replace('/pulp/api/', '/pulp/repos') url = '%s/%s/carts/%s.json' % (base_url, env, cart_name) rsock = urllib2.urlopen(url) data = rsock.read() rsock.close() return load_json_str(data)
<SYSTEM_TASK:> returns a list of carts containing a package with the specified name <END_TASK> <USER_TASK:> Description: def search_carts(env, pkg_name, repos): """ returns a list of carts containing a package with the specified name env: the name of an environment from the juicer config pkg_name: the name of the package for which to search repos: a list of repos in which to search for the package """
db = cart_db() carts = db[env] for repo in repos: field = 'repos_items.%s' % repo value = '.*%s.*' % pkg_name found_carts = [] for cart in carts.find({field: {'$regex': value}}): found_carts.append(cart) return found_carts
<SYSTEM_TASK:> Wrap `msg` in bars to create a header effect <END_TASK> <USER_TASK:> Description: def header(msg): """ Wrap `msg` in bars to create a header effect """
# Accounting for '| ' and ' |' width = len(msg) + 4 s = [] s.append('-' * width) s.append("| %s |" % msg) s.append('-' * width) return '\n'.join(s)
<SYSTEM_TASK:> Raises exception if the repo references undefined environments <END_TASK> <USER_TASK:> Description: def repo_in_defined_envs(repo, all_envs): """Raises exception if the repo references undefined environments"""
remaining_envs = set(repo['env']) - set(all_envs) if set(repo['env']) - set(all_envs): raise JuicerRepoInUndefinedEnvs("Repo def %s references undefined environments: %s" % (repo['name'], ", ".join(list(remaining_envs)))) else: return True
<SYSTEM_TASK:> Compare a juicer repo def with a given pulp definition. Compute and <END_TASK> <USER_TASK:> Description: def repo_def_matches_reality(juicer_def, pulp_def): """Compare a juicer repo def with a given pulp definition. Compute and return the update necessary to make `pulp_def` match `juicer_def`. `juicer_def` - A JuicerRepo() object representing a juicer repository `pulp_def` - A PulpRepo() object representing a pulp repository """
return juicer.common.Repo.RepoDiff(juicer_repo=juicer_def, pulp_repo=pulp_def)
<SYSTEM_TASK:> Return `n` size lists from a given list `l` <END_TASK> <USER_TASK:> Description: def chunk_list(l, n): """Return `n` size lists from a given list `l`"""
return [l[i:i + n] for i in range(0, len(l), n)]
<SYSTEM_TASK:> Read config file and return config items as a dict <END_TASK> <USER_TASK:> Description: def readconf(conffile, section_name=None, log_name=None, defaults=None, raw=False): """ Read config file and return config items as a dict :param conffile: path to config file, or a file-like object (hasattr readline) :param section_name: config section to read (will return all sections if not defined) :param log_name: name to be used with logging (will use section_name if not defined) :param defaults: dict of default values to pre-populate the config with :returns: dict of config items """
if defaults is None: defaults = {} if raw: c = RawConfigParser(defaults) else: c = ConfigParser(defaults) if hasattr(conffile, 'readline'): c.readfp(conffile) else: if not c.read(conffile): print ("Unable to read config file %s") % conffile sys.exit(1) if section_name: if c.has_section(section_name): conf = dict(c.items(section_name)) else: print ("Unable to find %s config section in %s") % \ (section_name, conffile) sys.exit(1) if "log_name" not in conf: if log_name is not None: conf['log_name'] = log_name else: conf['log_name'] = section_name else: conf = {} for s in c.sections(): conf.update({s: dict(c.items(s))}) if 'log_name' not in conf: conf['log_name'] = log_name conf['__file__'] = conffile return conf
<SYSTEM_TASK:> Make a group. <END_TASK> <USER_TASK:> Description: def group_factory(bridge, number, name, led_type): """ Make a group. :param bridge: Member of this bridge. :param number: Group number (1-4). :param name: Name of group. :param led_type: Either `RGBW`, `WRGB`, `RGBWW`, `WHITE`, `DIMMER` or `BRIDGE_LED`. :returns: New group. """
if led_type in [RGBW, BRIDGE_LED]: return RgbwGroup(bridge, number, name, led_type) elif led_type == RGBWW: return RgbwwGroup(bridge, number, name) elif led_type == WHITE: return WhiteGroup(bridge, number, name) elif led_type == DIMMER: return DimmerGroup(bridge, number, name) elif led_type == WRGB: return WrgbGroup(bridge, number, name) else: raise ValueError('Invalid LED type: %s', led_type)
<SYSTEM_TASK:> Add a group. <END_TASK> <USER_TASK:> Description: def add_group(self, number, name, led_type): """ Add a group. :param number: Group number (1-4). :param name: Group name. :param led_type: Either `RGBW`, `WRGB`, `RGBWW`, `WHITE`, `DIMMER` or `BRIDGE_LED`. :returns: Added group. """
group = group_factory(self, number, name, led_type) self.groups.append(group) return group
<SYSTEM_TASK:> Send a command to the physical bridge. <END_TASK> <USER_TASK:> Description: def send(self, command, reps=REPS, wait=MIN_WAIT): """ Send a command to the physical bridge. :param command: A Command instance. :param reps: Number of repetitions. :param wait: Wait time in seconds. """
# Enqueue the command. self._command_queue.put((command, reps, wait)) # Wait before accepting another command. # This keeps individual groups relatively synchronized. sleep = reps * wait * self.active if command.select and self._selected_number != command.group_number: sleep += SELECT_WAIT time.sleep(sleep)
<SYSTEM_TASK:> Consume commands from the queue. <END_TASK> <USER_TASK:> Description: def _consume(self): """ Consume commands from the queue. The command is repeated according to the configured value. Wait after each command is sent. The bridge socket is a shared resource. It must only be used by one thread at a time. Note that this can and will delay commands if multiple groups are attempting to communicate at the same time on the same bridge. """
while not self.is_closed: # Get command from queue. msg = self._command_queue.get() # Closed if msg is None: return # Use the lock so we are sure is_ready is not changed during execution # and the socket is not in use with self._lock: # Check if bridge is ready if self.is_ready: (command, reps, wait) = msg # Select group if a different group is currently selected. if command.select and self._selected_number != command.group_number: if self._send_raw(command.select_command.get_bytes(self)): self._selected_number = command.group_number time.sleep(SELECT_WAIT) else: # Stop sending on socket error self.is_ready = False # Repeat command as necessary. for _ in range(reps): if self.is_ready: if self._send_raw(command.get_bytes(self)): time.sleep(wait) else: # Stop sending on socket error self.is_ready = False # Wait if bridge is not ready, we're only reading is_ready, no lock needed if not self.is_ready and not self.is_closed: # For older bridges, always try again, there's no keep-alive thread if self.version < 6: # Give the reconnect some time time.sleep(RECONNECT_TIME) self.is_ready = True
<SYSTEM_TASK:> Send keep alive messages continuously to bridge. <END_TASK> <USER_TASK:> Description: def _keep_alive(self): """ Send keep alive messages continuously to bridge. """
send_next_keep_alive_at = 0 while not self.is_closed: if not self.is_ready: self._reconnect() continue if time.monotonic() > send_next_keep_alive_at: command = KEEP_ALIVE_COMMAND_PREAMBLE + [self.wb1, self.wb2] self._send_raw(command) need_response_by = time.monotonic() + KEEP_ALIVE_TIME # Wait for responses timeout = max(0, need_response_by - time.monotonic()) ready = select.select([self._socket], [], [], timeout) if ready[0]: try: response = bytearray(12) self._socket.recv_into(response) if response[:5] == bytearray(KEEP_ALIVE_RESPONSE_PREAMBLE): send_next_keep_alive_at = need_response_by except (socket.error, socket.timeout): with self._lock: self.is_ready = False elif send_next_keep_alive_at < need_response_by: # Acquire the lock to make sure we don't change self.is_ready # while _consume() is sending commands with self._lock: self.is_ready = False
<SYSTEM_TASK:> Closes the connection to the bridge. <END_TASK> <USER_TASK:> Description: def close(self): """ Closes the connection to the bridge. """
self.is_closed = True self.is_ready = False self._command_queue.put(None)
<SYSTEM_TASK:> Create a new CEMIMessage initialized from the given CEMI data. <END_TASK> <USER_TASK:> Description: def from_body(cls, cemi): """Create a new CEMIMessage initialized from the given CEMI data."""
# TODO: check that length matches message = cls() message.code = cemi[0] offset = cemi[1] message.ctl1 = cemi[2 + offset] message.ctl2 = cemi[3 + offset] message.src_addr = cemi[4 + offset] * 256 + cemi[5 + offset] message.dst_addr = cemi[6 + offset] * 256 + cemi[7 + offset] message.mpdu_len = cemi[8 + offset] tpci_apci = cemi[9 + offset] * 256 + cemi[10 + offset] apci = tpci_apci & 0x3ff # for APCI codes see KNX Standard 03/03/07 Application layer # table Application Layer control field if apci & 0x080: # Group write message.cmd = CEMIMessage.CMD_GROUP_WRITE elif apci == 0: message.cmd = CEMIMessage.CMD_GROUP_READ elif apci & 0x40: message.cmd = CEMIMessage.CMD_GROUP_RESPONSE else: message.cmd = CEMIMessage.CMD_UNKNOWN apdu = cemi[10 + offset:] if len(apdu) != message.mpdu_len: raise KNXException( "APDU LEN should be {} but is {}".format( message.mpdu_len, len(apdu))) if len(apdu) == 1: message.data = [apci & 0x2f] else: message.data = cemi[11 + offset:] return message
<SYSTEM_TASK:> Initilize the CEMI frame with the given destination address. <END_TASK> <USER_TASK:> Description: def init_group(self, dst_addr=1): """Initilize the CEMI frame with the given destination address."""
self.code = 0x11 # frametype 1, repeat 1, system broadcast 1, priority 3, ack-req 0, # confirm-flag 0 self.ctl1 = 0xbc self.ctl2 = 0xe0 # dst addr type 1, hop count 6, extended frame format self.src_addr = 0 self.dst_addr = dst_addr
<SYSTEM_TASK:> Initialize the CEMI frame for a group write operation. <END_TASK> <USER_TASK:> Description: def init_group_write(self, dst_addr=1, data=None, dptsize=0): """Initialize the CEMI frame for a group write operation."""
self.init_group(dst_addr) # unnumbered data packet, group write self.tpci_apci = 0x00 * 256 + 0x80 self.dptsize = dptsize if data is None: self.data = [0] else: self.data = data
<SYSTEM_TASK:> Initialize the CEMI frame for a group read operation. <END_TASK> <USER_TASK:> Description: def init_group_read(self, dst_addr=1): """Initialize the CEMI frame for a group read operation."""
self.init_group(dst_addr) self.tpci_apci = 0x00 # unnumbered data packet, group read self.data = [0]
<SYSTEM_TASK:> Convert the CEMI frame object to its byte representation. <END_TASK> <USER_TASK:> Description: def to_body(self): """Convert the CEMI frame object to its byte representation."""
body = [self.code, 0x00, self.ctl1, self.ctl2, (self.src_addr >> 8) & 0xff, (self.src_addr >> 0) & 0xff, (self.dst_addr >> 8) & 0xff, (self.dst_addr >> 0) & 0xff] if self.dptsize == 0 and (len(self.data) == 1) and ((self.data[0] & 0xC0) == 0): # less than 6 bit of data, pack into APCI byte body.extend([1, (self.tpci_apci >> 8) & 0xff, ((self.tpci_apci >> 0) & 0xff) + self.data[0]]) else: body.extend([1 + len(self.data), (self.tpci_apci >> 8) & 0xff, (self.tpci_apci >> 0) & 0xff]) body.extend(self.data) return body
<SYSTEM_TASK:> Disconnect an open tunnel connection <END_TASK> <USER_TASK:> Description: def disconnect(self): """Disconnect an open tunnel connection"""
if self.connected and self.channel: logging.debug("Disconnecting KNX/IP tunnel...") frame = KNXIPFrame(KNXIPFrame.DISCONNECT_REQUEST) frame.body = self.hpai_body() # TODO: Glaube Sequence erhoehen ist nicht notwendig im Control # Tunnel beim Disconnect??? if self.seq < 0xff: self.seq += 1 else: self.seq = 0 self.control_socket.sendto( bytes(frame.to_frame()), (self.remote_ip, self.remote_port)) # TODO: Impelement the Disconnect_Response Handling from Gateway # Control Channel > Client Control Channel else: logging.debug("Disconnect - no connection, nothing to do") self.channel = None self.connected = False
<SYSTEM_TASK:> Check the state of the connection using connection state request. <END_TASK> <USER_TASK:> Description: def check_connection_state(self): """Check the state of the connection using connection state request. This sends a CONNECTION_STATE_REQUEST. This method will only return True, if the connection is established and no error code is returned from the KNX/IP gateway """
if not self.connected: self.connection_state = -1 return False frame = KNXIPFrame(KNXIPFrame.CONNECTIONSTATE_REQUEST) frame.body = self.hpai_body() # Send maximum 3 connection state requests with a 10 second timeout res = False self.connection_state = 0 maximum_retry = 3 for retry_counter in range(0, maximum_retry): logging.debug("Heartbeat: Send connection state request") # Suggestion: # Carve the Control Socket out of the KNXIPTunnel # Class and Public only the Send and Receive # function and Implement in there the Heartbeat so we # can block when other Functions want to send self.control_socket.settimeout(10) # Kind of a quirks self.control_socket.sendto(bytes(frame.to_frame()), (self.remote_ip, self.remote_port)) try: self.control_socket.sendto(bytes(frame.to_frame()), (self.remote_ip, self.remote_port)) receive = self.control_socket.recv(1024) except socket.timeout: logging.info("Heartbeat: No response, Retry Counter %d/%d", retry_counter, maximum_retry) break frame = KNXIPFrame.from_frame(receive) if frame.service_type_id == KNXIPFrame.CONNECTIONSTATE_RESPONSE: if frame.body[1] == KNXIPFrame.E_NO_ERROR: logging.debug("Heartbeat: Successful") res = True break if frame.body[1] == KNXIPFrame.E_CONNECTION_ID: logging.error( "Heartbeat: Response No active " "connection found for Channel:%d ", self.channel ) if frame.body[1] == KNXIPFrame.E_DATA_CONNECTION: logging.error( "Heartbeat: Response Data Connection Error Response " "for Channel:%d ", self.channel ) if frame.body[1] == KNXIPFrame.E_DATA_CONNECTION: logging.error( "Heartbeat: Response KNX Sub Network Error Response " "for Channel:%d ", self.channel ) else: logging.error("Heartbeat: Invalid Response!") if self.connection_state != 0: logging.info("Heartbeat: Connection state was %s", self.connection_state) res = False if not res: if self.connection_state == 0: self.connection_state = -1 self.disconnect() return False return True
<SYSTEM_TASK:> Create a body with HPAI information. <END_TASK> <USER_TASK:> Description: def hpai_body(self): """ Create a body with HPAI information. This is used for disconnect and connection state requests. """
body = [] # ============ IP Body ========== body.extend([self.channel]) # Communication Channel Id body.extend([0x00]) # Reserverd # =========== Client HPAI =========== body.extend([0x08]) # HPAI Length body.extend([0x01]) # Host Protocol # Tunnel Client Socket IP body.extend(ip_to_array(self.control_socket.getsockname()[0])) # Tunnel Client Socket Port body.extend(int_to_array(self.control_socket.getsockname()[1])) return body
<SYSTEM_TASK:> Sends a tunneling request based on the given CEMI data. <END_TASK> <USER_TASK:> Description: def send_tunnelling_request(self, cemi, auto_connect=True): """Sends a tunneling request based on the given CEMI data. This method does not wait for an acknowledge or result frame. """
if not self.connected: if auto_connect: if not self.connect(): raise KNXException("KNX tunnel not reconnected") else: raise KNXException("KNX tunnel not connected") frame = KNXIPFrame(KNXIPFrame.TUNNELING_REQUEST) # Connection header see KNXnet/IP 4.4.6 TUNNELLING_REQUEST body = [0x04, self.channel, self.seq, 0x00] if self.seq < 0xff: self.seq += 1 else: self.seq = 0 body.extend(cemi.to_body()) frame.body = body self.data_server.socket.sendto( frame.to_frame(), (self.remote_ip, self.remote_port)) # See KNX specification 3.8.4 chapter 2.6 "Frame confirmation" # Send KNX packet 2 times if not acknowledged and close # the connection if no ack is received res = self.ack_semaphore.acquire(blocking=True, timeout=1) # Resend package if not acknowledged after 1 seconds if not res: self.data_server.socket.sendto( frame.to_frame(), (self.remote_ip, self.remote_port)) res = self.ack_semaphore.acquire(blocking=True, timeout=1) # disconnect and reconnect of not acknowledged if not res: self.disconnect() self.connect() return res
<SYSTEM_TASK:> Send a group read to the KNX bus and return the result. <END_TASK> <USER_TASK:> Description: def group_read(self, addr, use_cache=True, timeout=1): """Send a group read to the KNX bus and return the result."""
if use_cache: res = self.value_cache.get(addr) if res: logging.debug( "Got value of group address %s from cache: %s", addr, res) return res cemi = CEMIMessage() cemi.init_group_read(addr) with self._lock: # There might be old messages in the result quue, remove them self.result_queue.queue.clear() self.send_tunnelling_request(cemi) # Wait for the result try: res = self.result_queue.get(block=True, timeout=timeout) except queue.Empty: return None self.result_queue.task_done() return res
<SYSTEM_TASK:> Send a group write to the given address. <END_TASK> <USER_TASK:> Description: def group_write(self, addr, data, dptsize=0): """Send a group write to the given address. The method does not check if the address exists and the write request is valid. """
cemi = CEMIMessage() cemi.init_group_write(addr, data, dptsize) with self._lock: self.send_tunnelling_request(cemi) # Workaround for lost KNX packets if self._write_delay: time.sleep(self._write_delay)
<SYSTEM_TASK:> Toggle the value of an 1-bit group address. <END_TASK> <USER_TASK:> Description: def group_toggle(self, addr, use_cache=True): """Toggle the value of an 1-bit group address. If the object has a value != 0, it will be set to 0, otherwise to 1 """
data = self.group_read(addr, use_cache) if len(data) != 1: problem = "Can't toggle a {}-octet group address {}".format( len(data), addr) logging.error(problem) raise KNXException(problem) if data[0] == 0: self.group_write(addr, [1]) elif data[0] == 1: self.group_write(addr, [0]) else: problem = "Can't toggle group address {} as value is {}".format( addr, data[0]) logging.error(problem) raise KNXException(problem)
<SYSTEM_TASK:> Adds a listener to messages received on a specific address <END_TASK> <USER_TASK:> Description: def register_listener(self, address, func): """Adds a listener to messages received on a specific address If some KNX messages will be received from the KNX bus, this listener will be called func(address, data). There can be multiple listeners for a given address """
try: listeners = self.address_listeners[address] except KeyError: listeners = [] self.address_listeners[address] = listeners if not func in listeners: listeners.append(func) return True
<SYSTEM_TASK:> Removes a listener function for a given address <END_TASK> <USER_TASK:> Description: def unregister_listener(self, address, func): """Removes a listener function for a given address Remove the listener for the given address. Returns true if the listener was found and removed, false otherwise """
listeners = self.address_listeners[address] if listeners is None: return False if func in listeners: listeners.remove(func) return True return False
<SYSTEM_TASK:> Process a message received from the KNX bus. <END_TASK> <USER_TASK:> Description: def received_message(self, address, data): """Process a message received from the KNX bus."""
self.value_cache.set(address, data) if self.notify: self.notify(address, data) try: listeners = self.address_listeners[address] except KeyError: listeners = [] for listener in listeners: listener(address, data)
<SYSTEM_TASK:> Convert a float to a 2 byte KNX float value <END_TASK> <USER_TASK:> Description: def float_to_knx2(floatval): """Convert a float to a 2 byte KNX float value"""
if floatval < -671088.64 or floatval > 670760.96: raise KNXException("float {} out of valid range".format(floatval)) floatval = floatval * 100 i = 0 for i in range(0, 15): exp = pow(2, i) if ((floatval / exp) >= -2048) and ((floatval / exp) < 2047): break if floatval < 0: sign = 1 mantisse = int(2048 + (floatval / exp)) else: sign = 0 mantisse = int(floatval / exp) return [(sign << 7) + (i << 3) + (mantisse >> 8), mantisse & 0xff]
<SYSTEM_TASK:> Convert a KNX 2 byte float object to a float <END_TASK> <USER_TASK:> Description: def knx2_to_float(knxdata): """Convert a KNX 2 byte float object to a float"""
if len(knxdata) != 2: raise KNXException("Can only convert a 2 Byte object to float") data = knxdata[0] * 256 + knxdata[1] sign = data >> 15 exponent = (data >> 11) & 0x0f mantisse = float(data & 0x7ff) if sign == 1: mantisse = -2048 + mantisse return mantisse * pow(2, exponent) / 100
<SYSTEM_TASK:> Converts a time and day-of-week to a KNX time object <END_TASK> <USER_TASK:> Description: def time_to_knx(timeval, dow=0): """Converts a time and day-of-week to a KNX time object"""
knxdata = [0, 0, 0] knxdata[0] = ((dow & 0x07) << 5) + timeval.hour knxdata[1] = timeval.minute knxdata[2] = timeval.second return knxdata
<SYSTEM_TASK:> Converts a KNX time to a tuple of a time object and the day of week <END_TASK> <USER_TASK:> Description: def knx_to_time(knxdata): """Converts a KNX time to a tuple of a time object and the day of week"""
if len(knxdata) != 3: raise KNXException("Can only convert a 3 Byte object to time") dow = knxdata[0] >> 5 res = time(knxdata[0] & 0x1f, knxdata[1], knxdata[2]) return [res, dow]
<SYSTEM_TASK:> Convert a an 8 byte KNX time and date object to its components <END_TASK> <USER_TASK:> Description: def knx_to_datetime(knxdata): """Convert a an 8 byte KNX time and date object to its components"""
if len(knxdata) != 8: raise KNXException("Can only convert an 8 Byte object to datetime") year = knxdata[0] + 1900 month = knxdata[1] day = knxdata[2] hour = knxdata[3] & 0x1f minute = knxdata[4] second = knxdata[5] return datetime(year, month, day, hour, minute, second)
<SYSTEM_TASK:> Set the group saturation. <END_TASK> <USER_TASK:> Description: def saturation(self, saturation): """ Set the group saturation. :param saturation: Saturation in decimal percent (0.0-1.0). """
if saturation < 0 or saturation > 1: raise ValueError("Saturation must be a percentage " "represented as decimal 0-1.0") self._saturation = saturation self._update_color() if saturation == 0: self.white() else: cmd = self.command_set.saturation(saturation) self.send(cmd)
<SYSTEM_TASK:> Downloads a file from the given url, displays <END_TASK> <USER_TASK:> Description: def download_file_with_progress_bar(url): """Downloads a file from the given url, displays a progress bar. Returns a io.BytesIO object """
request = requests.get(url, stream=True) if request.status_code == 404: msg = ('there was a 404 error trying to reach {} \nThis probably ' 'means the requested version does not exist.'.format(url)) logger.error(msg) sys.exit() total_size = int(request.headers["Content-Length"]) chunk_size = 1024 bars = int(total_size / chunk_size) bytes_io = io.BytesIO() pbar = tqdm(request.iter_content(chunk_size=chunk_size), total=bars, unit="kb", leave=False) for chunk in pbar: bytes_io.write(chunk) return bytes_io
<SYSTEM_TASK:> Remove driver executables from the specified outputdir. <END_TASK> <USER_TASK:> Description: def clean(outputdir, drivers=None): """Remove driver executables from the specified outputdir. drivers can be a list of drivers to filter which executables to remove. Specify a version using an equal sign i.e.: 'chrome=2.2' """
if drivers: # Generate a list of tuples: [(driver_name, requested_version)] # If driver string does not contain a version, the second element # of the tuple is None. # Example: # [('driver_a', '2.2'), ('driver_b', None)] drivers_split = [helpers.split_driver_name_and_version(x) for x in drivers] file_data = [(helpers.normalize_driver_name(x[0]), x[1]) for x in drivers_split] else: file_data = [(x, None) for x in config.ALL_DRIVERS] files = [file for file in os.listdir(outputdir) if os.path.isfile(os.path.join(outputdir, file))] for file in files: for data in file_data: prefix, version = data starts_with = file.startswith(prefix) version_match = 'N/A' if version is not None: file_version = helpers.extract_version_from_filename(file) if file_version == version: version_match = True else: version_match = False if starts_with and version_match in [True, 'N/A']: filepath = os.path.join(outputdir, file) try: os.remove(filepath) except OSError: pass finally: logger.info('removed {}'.format(file)) break
<SYSTEM_TASK:> `cart` - Release cart to push items from <END_TASK> <USER_TASK:> Description: def push(self, cart, env=None, callback=None): """ `cart` - Release cart to push items from `callback` - Optional callback to call if juicer.utils.upload_rpm succeeds Pushes the items in a release cart to the pre-release environment. """
juicer.utils.Log.log_debug("Initializing push of cart '%s'" % cart.cart_name) if not env: env = self._defaults['start_in'] cart.current_env = env self.sign_cart_for_env_maybe(cart, env) self.upload(env, cart, callback) return True
<SYSTEM_TASK:> `cart` - Release cart to publish in json format <END_TASK> <USER_TASK:> Description: def publish(self, cart, env=None): """ `cart` - Release cart to publish in json format Publish a release cart in JSON format to the pre-release environment. """
juicer.utils.Log.log_debug("Initializing publish of cart '%s'" % cart.cart_name) if not env: env = self._defaults['start_in'] cart_id = juicer.utils.upload_cart(cart, env) juicer.utils.Log.log_debug('%s uploaded with an id of %s' % (cart.cart_name, cart_id)) return True
<SYSTEM_TASK:> `cart_name` - Name of this release cart <END_TASK> <USER_TASK:> Description: def create_manifest(self, cart_name, manifests): """ `cart_name` - Name of this release cart `manifests` - a list of manifest files """
cart = juicer.common.Cart.Cart(cart_name) for manifest in manifests: cart.add_from_manifest(manifest, self.connectors) cart.save() return cart
<SYSTEM_TASK:> search for a package stored in a pulp repo <END_TASK> <USER_TASK:> Description: def search(self, pkg_name=None, search_carts=False, query='/content/units/rpm/search/'): """ search for a package stored in a pulp repo `pkg_name` - substring in the name of the package `search_carts` - whether or not to return carts that include the listed package """
# this data block is... yeah. searching in pulp v2 is painful # # https://pulp-dev-guide.readthedocs.org/en/latest/rest-api/content/retrieval.html#search-for-units # https://pulp-dev-guide.readthedocs.org/en/latest/rest-api/conventions/criteria.html#search-criteria # # those are the API docs for searching data = { 'criteria': { 'filters': {'filename': {'$regex': ".*%s.*" % pkg_name}}, 'sort': [['name', 'ascending']], 'fields': ['name', 'description', 'version', 'release', 'arch', 'filename'] }, 'include_repos': 'true' } repos = [] juicer.utils.Log.log_info('Packages:') for env in self.args.environment: juicer.utils.Log.log_debug("Querying %s server" % env) _r = self.connectors[env].post(query, data) if not _r.status_code == Constants.PULP_POST_OK: juicer.utils.Log.log_debug("Expected PULP_POST_OK, got %s", _r.status_code) _r.raise_for_status() juicer.utils.Log.log_info('%s:' % str.upper(env)) pkg_list = juicer.utils.load_json_str(_r.content) for package in pkg_list: # if the package is in a repo, show a link to the package in said repo # otherwise, show nothing if len(package['repository_memberships']) > 0: target = package['repository_memberships'][0] _r = self.connectors[env].get('/repositories/%s/' % target) if not _r.status_code == Constants.PULP_GET_OK: raise JuicerPulpError("%s was not found as a repoid. A %s status code was returned" % (target, _r.status_code)) repo = juicer.utils.load_json_str(_r.content)['display_name'] repos.append(repo) link = juicer.utils.remote_url(self.connectors[env], env, repo, package['filename']) else: link = '' juicer.utils.Log.log_info('%s\t%s\t%s\t%s' % (package['name'], package['version'], package['release'], link)) if search_carts: # if the package is in a cart, show the cart name juicer.utils.Log.log_info('\nCarts:') for env in self.args.environment: carts = juicer.utils.search_carts(env, pkg_name, repos) for cart in carts: juicer.utils.Log.log_info(cart['_id'])
<SYSTEM_TASK:> `carts` - A list of cart names <END_TASK> <USER_TASK:> Description: def merge(self, carts=None, new_cart_name=None): """ `carts` - A list of cart names `new_cart_name` - Resultant cart name Merge the contents of N carts into a new cart TODO: Sanity check that each cart in `carts` exists. Try 'juicer pull'ing carts that can't be located locally. Then cry like a baby and error out. """
if new_cart_name is not None: cart_name = new_cart_name else: cart_name = carts[0] result_cart = juicer.common.Cart.Cart(cart_name) items_hash = {} for cart in carts: # 1. Grab items from each cart and shit them into result_cart tmpcart = juicer.common.Cart.Cart(cart, autoload=True) for repo, items in tmpcart.iterrepos(): if str(repo) in [str(key) for key in items_hash.keys()]: items_hash[str(repo)] += [str(item) for item in items] else: items_hash[str(repo)] = [str(item) for item in items] # 2. Remove duplicates for key in items_hash.keys(): items_hash[key] = list(set(items_hash[key])) # 3. Wrap it up result_cart[key] = items_hash[key] result_cart.save() # You can not fail at merging carts? return True
<SYSTEM_TASK:> `cartname` - Name of cart <END_TASK> <USER_TASK:> Description: def pull(self, cartname=None, env=None): """ `cartname` - Name of cart Pull remote cart from the pre release (base) environment """
if not env: env = self._defaults['start_in'] juicer.utils.Log.log_debug("Initializing pulling cart: %s ...", cartname) cart_file = os.path.join(juicer.common.Cart.CART_LOCATION, cartname) cart_file += '.json' cart_check = juicer.utils.download_cart(cartname, env) if cart_check is None: print 'error: cart \'%s\' does not exist' % cartname return None else: juicer.utils.write_json_document(cart_file, juicer.utils.download_cart(cartname, env)) return cart_check
<SYSTEM_TASK:> `name` - name of cart <END_TASK> <USER_TASK:> Description: def promote(self, cart_name): """ `name` - name of cart Promote a cart from its current environment to the next in the chain. """
cart = juicer.common.Cart.Cart(cart_name=cart_name, autoload=True, autosync=True) old_env = cart.current_env cart.current_env = juicer.utils.get_next_environment(cart.current_env) # figure out what needs to be done to promote packages. If # packages are going between environments that are on the same # host and we don't need to sign them just associate with both # repos. if juicer.utils.env_same_host(old_env, cart.current_env) and (self.connectors[old_env].requires_signature == self.connectors[cart.current_env].requires_signature): juicer.utils.Log.log_info("Envs %s and %s exist on the same host, calling remote associate action" % (old_env, cart.current_env)) juicer.utils.Log.log_info("Promoting %s from %s to %s" % (cart_name, old_env, cart.current_env)) # iterate through packages and associate to new repo for repo, items in cart.iterrepos(): query = '/repositories/%s-%s/actions/associate/' % (repo, cart.current_env) for item in items: source_repo_id = '%s-%s' % (repo, old_env) data = { 'source_repo_id': str(source_repo_id), 'criteria': { 'type_ids': ['rpm'], 'filters': { 'unit': { 'filename': str(item.path.split('/')[-1]) } } } } _r = self.connectors[cart.current_env].post(query, data) if _r.status_code != Constants.PULP_POST_ACCEPTED: raise JuicerPulpError("Package association call was not accepted. Terminating!") else: # association was accepted so publish destination repo con = self.connectors[cart.current_env] con.post('/repositories/%s-%s/actions/publish/' % (repo, cart.current_env), {'id': 'yum_distributor'}) # also update the item's remote path filename = item.path.split('/')[-1] item.update('%s/%s' % (juicer.utils.pulp_repo_path(con, '%s-%s' % (repo, cart.current_env)), filename)) # we didn't bomb out yet so let the user know what's up juicer.utils.Log.log_info("Package association calls were accepted. Trusting that your packages existed in %s" % old_env) # we can save and publish here because upload does this too... cart.save() self.publish(cart) else: juicer.utils.Log.log_debug("Syncing down rpms...") cart.sync_remotes() self.sign_cart_for_env_maybe(cart, cart.current_env) juicer.utils.Log.log_info("Promoting %s from %s to %s" % (cart_name, old_env, cart.current_env)) for repo in cart.repos(): juicer.utils.Log.log_debug("Promoting %s to %s in %s" % (cart[repo], repo, cart.current_env)) # reiterating that upload will save and publish the cart self.upload(cart.current_env, cart)
<SYSTEM_TASK:> Sign the items to upload, if the env requires a signature. <END_TASK> <USER_TASK:> Description: def sign_cart_for_env_maybe(self, cart, env=None): """ Sign the items to upload, if the env requires a signature. `cart` - Cart to sign `envs` - The cart is signed if env has the property: requires_signature = True Will attempt to load the rpm_sign_plugin defined in ~/.config/juicer/config, which must be a plugin inheriting from juicer.common.RpmSignPlugin. If available, we'll call cart.sign_items() with a reference to the rpm_sign_plugin.sign_rpms method. """
if self.connectors[env].requires_signature: cart.sync_remotes(force=True) juicer.utils.Log.log_notice("%s requires RPM signatures", env) juicer.utils.Log.log_notice("Checking for rpm_sign_plugin definition ...") module_name = self._defaults['rpm_sign_plugin'] if self._defaults['rpm_sign_plugin']: juicer.utils.Log.log_notice("Found rpm_sign_plugin definition: %s", self._defaults['rpm_sign_plugin']) juicer.utils.Log.log_notice("Attempting to load ...") try: rpm_sign_plugin = __import__(module_name, fromlist=[module_name]) juicer.utils.Log.log_notice("Successfully loaded %s ...", module_name) plugin_object = getattr(rpm_sign_plugin, module_name.split('.')[-1]) signer = plugin_object() cart.sign_items(signer.sign_rpms) except ImportError as e: juicer.utils.Log.log_notice("there was a problem using %s ... error: %s", module_name, e) raise JuicerRpmSignPluginError("could not load rpm_sign_plugin: %s; additional information: %s" % \ (module_name, e)) if not juicer.utils.rpms_signed_p([item.path for item in cart.items()]): raise JuicerNotSignedError('RPMs have not been signed.') else: raise JuicerConfigError("Did not find an rpm_sign_plugin in config file but the %s environment requires signed RPMs." % env) return True else: return None
<SYSTEM_TASK:> `repo` - Repo name. <END_TASK> <USER_TASK:> Description: def publish_repo(self, repo, env): """ `repo` - Repo name. `env` - Environment. Publish a repository. This action regenerates metadata. """
_r = self.connectors[env].post('/repositories/%s-%s/actions/publish/' % (repo, env), {'id': 'yum_distributor'}) if _r.status_code != Constants.PULP_POST_ACCEPTED: _r.raise_for_status() else: juicer.utils.Log.log_info("`%s` published in `%s`" % (repo, env))
<SYSTEM_TASK:> `repo_name` - name of the repository to prune <END_TASK> <USER_TASK:> Description: def prune_repo(self, repo_name=None, daycount=None, envs=[], query='/repositories/'): """ `repo_name` - name of the repository to prune """
orphan_query = '/content/orphans/rpm/' t = datetime.datetime.now() - datetime.timedelta(days = daycount) juicer.utils.Log.log_debug("Prune Repo: %s", repo_name) juicer.utils.Log.log_debug("Pruning packages created before %s" % t) for env in self.args.envs: if not juicer.utils.repo_exists_p(repo_name, self.connectors[env], env): juicer.utils.Log.log_info("repo `%s` doesn't exist in %s... skipping!", (repo_name, env)) continue else: url = "%s%s-%s/actions/unassociate/" % (query, repo_name, env) # FIXME: this should be at least 90 days ago data = {'created' : t.strftime('%Y-%m-%d%H:%M:%S')} print data _r = self.connectors[env].post(url, data) if _r.status_code == Constants.PULP_POST_ACCEPTED: pub_query = '/repositories/%s-%s/actions/publish/' % (repo_name, env) pub_data = {'id': 'yum_distributor'} _r = self.connectors[env].post(pub_query, pub_data) if _r.status_code == Constants.PULP_POST_ACCEPTED: juicer.utils.Log.log_info("pruned repo `%s` in %s", repo_name, env) else: _r.raise_for_status() # after pruning, remove orphaned packages _r = self.connectors[env].get(orphan_query) if _r.status_code is Constants.PULP_POST_OK: if len(juicer.utils.load_json_str(_r.content)) > 0: __r = self.connectors[env].delete(orphan_query) if __r.status_code is Constants.PULP_DELETE_ACCEPTED: juicer.utils.Log.log_debug("deleted orphaned rpms in %s." % env) else: juicer.utils.Log.log_error("unable to delete orphaned rpms in %s. a %s error was returned", (env, __r.status_code)) else: juicer.utils.Log.log_error("unable to get a list of orphaned rpms. encountered a %s error." % _r.status_code)
<SYSTEM_TASK:> `cartname` - name of the cart to delete <END_TASK> <USER_TASK:> Description: def delete(self, cartname): """ `cartname` - name of the cart to delete Delete a cart both from your local filesystem and the mongo database """
cart = juicer.common.Cart.Cart(cart_name=cartname) cart.implode(self._defaults['start_in'])
<SYSTEM_TASK:> Reset the instance <END_TASK> <USER_TASK:> Description: def reset(self): """Reset the instance - reset rows and header """
self._hline_string = None self._row_size = None self._header = [] self._rows = []
<SYSTEM_TASK:> Set the desired columns width <END_TASK> <USER_TASK:> Description: def set_cols_width(self, array): """Set the desired columns width - the elements of the array should be integers, specifying the width of each column. For example: [10, 20, 5] """
self._check_row_size(array) try: array = map(int, array) if reduce(min, array) <= 0: raise ValueError except ValueError: sys.stderr.write("Wrong argument in column width specification\n") raise self._width = array
<SYSTEM_TASK:> Check that the specified array fits the previous rows size <END_TASK> <USER_TASK:> Description: def _check_row_size(self, array): """Check that the specified array fits the previous rows size """
if not self._row_size: self._row_size = len(array) elif self._row_size != len(array): raise ArraySizeError, "array should contain %d elements" \ % self._row_size
<SYSTEM_TASK:> Check if alignment has been specified, set default one if not <END_TASK> <USER_TASK:> Description: def _check_align(self): """Check if alignment has been specified, set default one if not """
if not hasattr(self, "_align"): self._align = ["l"]*self._row_size if not hasattr(self, "_valign"): self._valign = ["t"]*self._row_size
<SYSTEM_TASK:> Transition between two values. <END_TASK> <USER_TASK:> Description: def transition(value, maximum, start, end): """ Transition between two values. :param value: Current iteration. :param maximum: Maximum number of iterations. :param start: Start value. :param end: End value. :returns: Transitional value. """
return round(start + (end - start) * value / maximum, 2)
<SYSTEM_TASK:> Steps between two values. <END_TASK> <USER_TASK:> Description: def steps(current, target, max_steps): """ Steps between two values. :param current: Current value (0.0-1.0). :param target: Target value (0.0-1.0). :param max_steps: Maximum number of steps. """
if current < 0 or current > 1.0: raise ValueError("current value %s is out of bounds (0.0-1.0)", current) if target < 0 or target > 1.0: raise ValueError("target value %s is out of bounds (0.0-1.0)", target) return int(abs((current * max_steps) - (target * max_steps)))
<SYSTEM_TASK:> Check if email exists on Stormpath. <END_TASK> <USER_TASK:> Description: def clean_email(self): """Check if email exists on Stormpath. The email address is unique across all Stormpath applications. The username is only unique within a Stormpath application. """
try: accounts = APPLICATION.accounts.search({'email': self.cleaned_data['email']}) if len(accounts): msg = 'User with that email already exists.' raise forms.ValidationError(msg) except Error as e: raise forms.ValidationError(str(e)) return self.cleaned_data['email']
<SYSTEM_TASK:> Check if passwords match and are valid. <END_TASK> <USER_TASK:> Description: def clean_new_password2(self): """Check if passwords match and are valid."""
password1 = self.cleaned_data.get('new_password1') password2 = self.cleaned_data.get('new_password2') try: directory = APPLICATION.default_account_store_mapping.account_store directory.password_policy.strength.validate_password(password2) except ValueError as e: raise forms.ValidationError(str(e)) if password1 and password2: if password1 != password2: raise forms.ValidationError("The two passwords didn't match.") return password2
<SYSTEM_TASK:> Helper function for creating a provider directory <END_TASK> <USER_TASK:> Description: def create_provider_directory(provider, redirect_uri): """Helper function for creating a provider directory"""
dir = CLIENT.directories.create({ 'name': APPLICATION.name + '-' + provider, 'provider': { 'client_id': settings.STORMPATH_SOCIAL[provider.upper()]['client_id'], 'client_secret': settings.STORMPATH_SOCIAL[provider.upper()]['client_secret'], 'redirect_uri': redirect_uri, 'provider_id': provider, }, }) APPLICATION.account_store_mappings.create({ 'application': APPLICATION, 'account_store': dir, 'list_index': 99, 'is_default_account_store': False, 'is_default_group_store': False, })
<SYSTEM_TASK:> Draws the invoice header <END_TASK> <USER_TASK:> Description: def draw_header(canvas): """ Draws the invoice header """
canvas.setStrokeColorRGB(0.9, 0.5, 0.2) canvas.setFillColorRGB(0.2, 0.2, 0.2) canvas.setFont('Helvetica', 16) canvas.drawString(18 * cm, -1 * cm, 'Invoice') canvas.drawInlineImage(settings.INV_LOGO, 1 * cm, -1 * cm, 250, 16) canvas.setLineWidth(4) canvas.line(0, -1.25 * cm, 21.7 * cm, -1.25 * cm)
<SYSTEM_TASK:> Draws the business address <END_TASK> <USER_TASK:> Description: def draw_address(canvas): """ Draws the business address """
business_details = ( u'COMPANY NAME LTD', u'STREET', u'TOWN', U'COUNTY', U'POSTCODE', U'COUNTRY', u'', u'', u'Phone: +00 (0) 000 000 000', u'Email: [email protected]', u'Website: www.example.com', u'Reg No: 00000000' ) canvas.setFont('Helvetica', 9) textobject = canvas.beginText(13 * cm, -2.5 * cm) for line in business_details: textobject.textLine(line) canvas.drawText(textobject)
<SYSTEM_TASK:> Draws the invoice footer <END_TASK> <USER_TASK:> Description: def draw_footer(canvas): """ Draws the invoice footer """
note = ( u'Bank Details: Street address, Town, County, POSTCODE', u'Sort Code: 00-00-00 Account No: 00000000 (Quote invoice number).', u'Please pay via bank transfer or cheque. All payments should be made in CURRENCY.', u'Make cheques payable to Company Name Ltd.', ) textobject = canvas.beginText(1 * cm, -27 * cm) for line in note: textobject.textLine(line) canvas.drawText(textobject)
<SYSTEM_TASK:> Return man page content for the given `cmdln.Cmdln` subclass name. <END_TASK> <USER_TASK:> Description: def mkmanpage(name): """Return man page content for the given `cmdln.Cmdln` subclass name."""
mod_name, class_name = name.rsplit('.', 1) mod = __import__(mod_name) inst = getattr(mod, class_name)() sections = cmdln.man_sections_from_cmdln(inst) sys.stdout.write(''.join(sections))
<SYSTEM_TASK:> Put files and directories under version control, scheduling <END_TASK> <USER_TASK:> Description: def do_add(self, subcmd, opts, *args): """Put files and directories under version control, scheduling them for addition to repository. They will be added in next commit. usage: add PATH... ${cmd_option_list} """
print "'svn %s' opts: %s" % (subcmd, opts) print "'svn %s' args: %s" % (subcmd, args)
<SYSTEM_TASK:> Output the content of specified files or <END_TASK> <USER_TASK:> Description: def do_blame(self, subcmd, opts, *args): """Output the content of specified files or URLs with revision and author information in-line. usage: blame TARGET... ${cmd_option_list} """
print "'svn %s' opts: %s" % (subcmd, opts) print "'svn %s' args: %s" % (subcmd, args)
<SYSTEM_TASK:> Output the content of specified files or URLs. <END_TASK> <USER_TASK:> Description: def do_cat(self, subcmd, opts, *args): """Output the content of specified files or URLs. usage: cat TARGET... ${cmd_option_list} """
print "'svn %s' opts: %s" % (subcmd, opts) print "'svn %s' args: %s" % (subcmd, args)
<SYSTEM_TASK:> Check out a working copy from a repository. <END_TASK> <USER_TASK:> Description: def do_checkout(self, subcmd, opts, *args): """Check out a working copy from a repository. usage: checkout URL... [PATH] Note: If PATH is omitted, the basename of the URL will be used as the destination. If multiple URLs are given each will be checked out into a sub-directory of PATH, with the name of the sub-directory being the basename of the URL. ${cmd_option_list} """
print "'svn %s' opts: %s" % (subcmd, opts) print "'svn %s' args: %s" % (subcmd, args)
<SYSTEM_TASK:> Recursively clean up the working copy, removing locks, resuming <END_TASK> <USER_TASK:> Description: def do_cleanup(self, subcmd, opts, *args): """Recursively clean up the working copy, removing locks, resuming unfinished operations, etc. usage: cleanup [PATH...] ${cmd_option_list} """
print "'svn %s' opts: %s" % (subcmd, opts) print "'svn %s' args: %s" % (subcmd, args)
<SYSTEM_TASK:> Send changes from your working copy to the repository. <END_TASK> <USER_TASK:> Description: def do_commit(self, subcmd, opts, *args): """Send changes from your working copy to the repository. usage: commit [PATH...] A log message must be provided, but it can be empty. If it is not given by a --message or --file option, an editor will be started. ${cmd_option_list} """
print "'svn %s' opts: %s" % (subcmd, opts) print "'svn %s' args: %s" % (subcmd, args)
<SYSTEM_TASK:> Duplicate something in working copy or repository, remembering history. <END_TASK> <USER_TASK:> Description: def do_copy(self, subcmd, opts, *args): """Duplicate something in working copy or repository, remembering history. usage: copy SRC DST SRC and DST can each be either a working copy (WC) path or URL: WC -> WC: copy and schedule for addition (with history) WC -> URL: immediately commit a copy of WC to URL URL -> WC: check out URL into WC, schedule for addition URL -> URL: complete server-side copy; used to branch & tag ${cmd_option_list} """
print "'svn %s' opts: %s" % (subcmd, opts) print "'svn %s' args: %s" % (subcmd, args)
<SYSTEM_TASK:> Remove files and directories from version control. <END_TASK> <USER_TASK:> Description: def do_delete(self, subcmd, opts, *args): """Remove files and directories from version control. usage: 1. delete PATH... 2. delete URL... 1. Each item specified by a PATH is scheduled for deletion upon the next commit. Files, and directories that have not been committed, are immediately removed from the working copy. PATHs that are, or contain, unversioned or modified items will not be removed unless the --force option is given. 2. Each item specified by a URL is deleted from the repository via an immediate commit. ${cmd_option_list} """
print "'svn %s' opts: %s" % (subcmd, opts) print "'svn %s' args: %s" % (subcmd, args)
<SYSTEM_TASK:> Display the differences between two paths. <END_TASK> <USER_TASK:> Description: def do_diff(self, subcmd, opts, *args): """Display the differences between two paths. usage: 1. diff [-r N[:M]] [TARGET[@REV]...] 2. diff [-r N[:M]] --old=OLD-TGT[@OLDREV] [--new=NEW-TGT[@NEWREV]] \ [PATH...] 3. diff OLD-URL[@OLDREV] NEW-URL[@NEWREV] 1. Display the changes made to TARGETs as they are seen in REV between two revisions. TARGETs may be working copy paths or URLs. N defaults to BASE if any TARGET is a working copy path, otherwise it must be specified. M defaults to the current working version if any TARGET is a working copy path, otherwise it defaults to HEAD. 2. Display the differences between OLD-TGT as it was seen in OLDREV and NEW-TGT as it was seen in NEWREV. PATHs, if given, are relative to OLD-TGT and NEW-TGT and restrict the output to differences for those paths. OLD-TGT and NEW-TGT may be working copy paths or URL[@REV]. NEW-TGT defaults to OLD-TGT if not specified. -r N makes OLDREV default to N, -r N:M makes OLDREV default to N and NEWREV default to M. 3. Shorthand for 'svn diff --old=OLD-URL[@OLDREV] --new=NEW-URL[@NEWREV]' Use just 'svn diff' to display local modifications in a working copy. ${cmd_option_list} """
print "'svn %s' opts: %s" % (subcmd, opts) print "'svn %s' args: %s" % (subcmd, args)
<SYSTEM_TASK:> Commit an unversioned file or tree into the repository. <END_TASK> <USER_TASK:> Description: def do_import(self, subcmd, opts, *args): """Commit an unversioned file or tree into the repository. usage: import [PATH] URL Recursively commit a copy of PATH to URL. If PATH is omitted '.' is assumed. Parent directories are created as necessary in the repository. ${cmd_option_list} """
print "'svn %s' opts: %s" % (subcmd, opts) print "'svn %s' args: %s" % (subcmd, args)
<SYSTEM_TASK:> Display information about a file or directory. <END_TASK> <USER_TASK:> Description: def do_info(self, subcmd, opts, *args): """Display information about a file or directory. usage: info [PATH...] Print information about each PATH (default: '.'). ${cmd_option_list} """
print "'svn %s' opts: %s" % (subcmd, opts) print "'svn %s' args: %s" % (subcmd, args)