text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Returns the first function passed as an argument to the second, <END_TASK> <USER_TASK:> Description: def wrap(self, wrapper): """ Returns the first function passed as an argument to the second, allowing you to adjust arguments, run code before and after, and conditionally execute the original function. """
def wrapped(*args, **kwargs): if kwargs: kwargs["object"] = self.obj else: args = list(args) args.insert(0, self.obj) return wrapper(*args, **kwargs) return self._wrap(wrapped)
<SYSTEM_TASK:> Returns a function that is the composition of a list of functions, each <END_TASK> <USER_TASK:> Description: def compose(self, *args): """ Returns a function that is the composition of a list of functions, each consuming the return value of the function that follows. """
args = list(args) def composed(*ar, **kwargs): lastRet = self.obj(*ar, **kwargs) for i in args: lastRet = i(lastRet) return lastRet return self._wrap(composed)
<SYSTEM_TASK:> Returns a function that will only be executed after being <END_TASK> <USER_TASK:> Description: def after(self, func): """ Returns a function that will only be executed after being called N times. """
ns = self.Namespace() ns.times = self.obj if ns.times <= 0: return func() def work_after(*args): if ns.times <= 1: return func(*args) ns.times -= 1 return self._wrap(work_after)
<SYSTEM_TASK:> Invert the keys and values of an object. <END_TASK> <USER_TASK:> Description: def invert(self): """ Invert the keys and values of an object. The values must be serializable. """
keys = self._clean.keys() inverted = {} for key in keys: inverted[self.obj[key]] = key return self._wrap(inverted)
<SYSTEM_TASK:> Return a sorted list of the function names available on the object. <END_TASK> <USER_TASK:> Description: def functions(self): """ Return a sorted list of the function names available on the object. """
names = [] for i, k in enumerate(self.obj): if _(self.obj[k]).isCallable(): names.append(k) return self._wrap(sorted(names))
<SYSTEM_TASK:> Return a copy of the object only containing the <END_TASK> <USER_TASK:> Description: def pick(self, *args): """ Return a copy of the object only containing the whitelisted properties. """
ns = self.Namespace() ns.result = {} def by(key, *args): if key in self.obj: ns.result[key] = self.obj[key] _.each(self._flatten(args, True, []), by) return self._wrap(ns.result)
<SYSTEM_TASK:> Fill in a given object with default properties. <END_TASK> <USER_TASK:> Description: def defaults(self, *args): """ Fill in a given object with default properties. """
ns = self.Namespace ns.obj = self.obj def by(source, *ar): for i, prop in enumerate(source): if prop not in ns.obj: ns.obj[prop] = source[prop] _.each(args, by) return self._wrap(ns.obj)
<SYSTEM_TASK:> Invokes interceptor with the obj, and then returns obj. <END_TASK> <USER_TASK:> Description: def tap(self, interceptor): """ Invokes interceptor with the obj, and then returns obj. The primary purpose of this method is to "tap into" a method chain, in order to perform operations on intermediate results within the chain. """
interceptor(self.obj) return self._wrap(self.obj)
<SYSTEM_TASK:> Is a given array, string, or object empty? <END_TASK> <USER_TASK:> Description: def isEmpty(self): """ Is a given array, string, or object empty? An "empty" object has no enumerable own-properties. """
if self.obj is None: return True if self._clean.isString(): ret = self.obj.strip() is "" elif self._clean.isDict(): ret = len(self.obj.keys()) == 0 else: ret = len(self.obj) == 0 return self._wrap(ret)
<SYSTEM_TASK:> Check if the given object is a file <END_TASK> <USER_TASK:> Description: def isFile(self): """ Check if the given object is a file """
try: filetype = file except NameError: filetype = io.IOBase return self._wrap(type(self.obj) is filetype)
<SYSTEM_TASK:> Javascript's join implementation <END_TASK> <USER_TASK:> Description: def join(self, glue=" "): """ Javascript's join implementation """
j = glue.join([str(x) for x in self.obj]) return self._wrap(j)
<SYSTEM_TASK:> If the value of the named property is a function then invoke it; <END_TASK> <USER_TASK:> Description: def result(self, property, *args): """ If the value of the named property is a function then invoke it; otherwise, return it. """
if self.obj is None: return self._wrap(self.obj) if(hasattr(self.obj, property)): value = getattr(self.obj, property) else: value = self.obj.get(property) if _.isCallable(value): return self._wrap(value(*args)) return self._wrap(value)
<SYSTEM_TASK:> Add your own custom functions to the Underscore object, ensuring that <END_TASK> <USER_TASK:> Description: def mixin(self): """ Add your own custom functions to the Underscore object, ensuring that they're correctly added to the OOP wrapper as well. """
methods = self.obj for i, k in enumerate(methods): setattr(underscore, k, methods[k]) self.makeStatic() return self._wrap(self.obj)
<SYSTEM_TASK:> Escape a string for HTML interpolation. <END_TASK> <USER_TASK:> Description: def escape(self): """ Escape a string for HTML interpolation. """
# & must be handled first self.obj = self.obj.replace("&", self._html_escape_table["&"]) for i, k in enumerate(self._html_escape_table): v = self._html_escape_table[k] if k is not "&": self.obj = self.obj.replace(k, v) return self._wrap(self.obj)
<SYSTEM_TASK:> Within an interpolation, evaluation, or escaping, remove HTML escaping <END_TASK> <USER_TASK:> Description: def unescape(self): """ Within an interpolation, evaluation, or escaping, remove HTML escaping that had been previously added. """
for i, k in enumerate(self._html_escape_table): v = self._html_escape_table[k] self.obj = self.obj.replace(v, k) return self._wrap(self.obj)
<SYSTEM_TASK:> returns the object instead of instance <END_TASK> <USER_TASK:> Description: def value(self): """ returns the object instead of instance """
if self._wrapped is not self.Null: return self._wrapped else: return self.obj
<SYSTEM_TASK:> Provide static access to underscore class <END_TASK> <USER_TASK:> Description: def makeStatic(): """ Provide static access to underscore class """
p = lambda value: inspect.ismethod(value) or inspect.isfunction(value) for eachMethod in inspect.getmembers(underscore, predicate=p): m = eachMethod[0] if not hasattr(_, m): def caller(a): def execute(*args): if len(args) == 1: r = getattr(underscore(args[0]), a)() elif len(args) > 1: rargs = args[1:] r = getattr(underscore(args[0]), a)(*rargs) else: r = getattr(underscore([]), a)() return r return execute _.__setattr__(m, caller(m)) # put the class itself as a parameter so that we can use it on outside _.__setattr__("underscore", underscore) _.templateSettings = {}
<SYSTEM_TASK:> Initialise and configure the app, database, scheduler, etc. <END_TASK> <USER_TASK:> Description: def init(): """Initialise and configure the app, database, scheduler, etc. This should be called once at application startup or at tests startup (and not e.g. called once for each test case). """
global _users, _names _configure_app(app) _users, _names = _init_login_manager(app) _configure_logger() init_scheduler(app.config.get('SQLALCHEMY_DATABASE_URI')) db.init(app.config.get('SQLALCHEMY_DATABASE_URI'))
<SYSTEM_TASK:> Initialise and configure the login manager. <END_TASK> <USER_TASK:> Description: def _init_login_manager(app_): """Initialise and configure the login manager."""
login_manager = flogin.LoginManager() login_manager.setup_app(app_) login_manager.anonymous_user = Anonymous login_manager.login_view = "login" users = {app_.config['USERNAME']: User('Admin', 0)} names = dict((int(v.get_id()), k) for k, v in users.items()) @login_manager.user_loader def load_user(userid): userid = int(userid) name = names.get(userid) return users.get(name) return users, names
<SYSTEM_TASK:> Configure the given logger for production deployment. <END_TASK> <USER_TASK:> Description: def _configure_logger_for_production(logger): """Configure the given logger for production deployment. Logs to stderr and file, and emails errors to admins. """
stderr_handler = logging.StreamHandler(sys.stderr) stderr_handler.setLevel(logging.INFO) if 'STDERR' in app.config: logger.addHandler(stderr_handler) file_handler = logging.handlers.RotatingFileHandler( app.config.get('LOG_FILE'), maxBytes=67108864, backupCount=5) file_handler.setLevel(logging.INFO) if 'LOG_FILE' in app.config: logger.addHandler(file_handler) mail_handler = logging.handlers.SMTPHandler( '127.0.0.1', app.config.get('FROM_EMAIL'), app.config.get('ADMINS', []), 'CKAN Service Error') mail_handler.setLevel(logging.ERROR) if 'FROM_EMAIL' in app.config: logger.addHandler(mail_handler)
<SYSTEM_TASK:> Initialise and configure the scheduler. <END_TASK> <USER_TASK:> Description: def init_scheduler(db_uri): """Initialise and configure the scheduler."""
global scheduler scheduler = apscheduler.Scheduler() scheduler.misfire_grace_time = 3600 scheduler.add_jobstore( sqlalchemy_store.SQLAlchemyJobStore(url=db_uri), 'default') scheduler.add_listener( job_listener, events.EVENT_JOB_EXECUTED | events.EVENT_JOB_MISSED | events.EVENT_JOB_ERROR) return scheduler
<SYSTEM_TASK:> Initialise the database. <END_TASK> <USER_TASK:> Description: def init(uri, echo=False): """Initialise the database. Initialise the sqlalchemy engine, metadata and table objects that we use to connect to the database. Create the database and the database tables themselves if they don't already exist. :param uri: the sqlalchemy database URI :type uri: string :param echo: whether or not to have the sqlalchemy engine log all statements to stdout :type echo: bool """
global ENGINE, _METADATA, JOBS_TABLE, METADATA_TABLE, LOGS_TABLE ENGINE = sqlalchemy.create_engine(uri, echo=echo, convert_unicode=True) _METADATA = sqlalchemy.MetaData(ENGINE) JOBS_TABLE = _init_jobs_table() METADATA_TABLE = _init_metadata_table() LOGS_TABLE = _init_logs_table() _METADATA.create_all(ENGINE)
<SYSTEM_TASK:> Return the job with the given job_id as a dict. <END_TASK> <USER_TASK:> Description: def get_job(job_id): """Return the job with the given job_id as a dict. The dict also includes any metadata or logs associated with the job. Returns None instead of a dict if there's no job with the given job_id. The keys of a job dict are: "job_id": The unique identifier for the job (unicode) "job_type": The name of the job function that will be executed for this job (unicode) "status": The current status of the job, e.g. "pending", "complete", or "error" (unicode) "data": Any output data returned by the job if it has completed successfully. This may be any JSON-serializable type, e.g. None, a string, a dict, etc. "error": If the job failed with an error this will be a dict with a "message" key whose value is a string error message. The dict may also have other keys specific to the particular type of error. If the job did not fail with an error then "error" will be None. "requested_timestamp": The time at which the job was requested (string) "finished_timestamp": The time at which the job finished (string) "sent_data": The input data for the job, provided by the client site. This may be any JSON-serializable type, e.g. None, a string, a dict, etc. "result_url": The callback URL that CKAN Service Provider will post the result to when the job finishes (unicode) "api_key": The API key that CKAN Service Provider will use when posting the job result to the result_url (unicode or None). A None here doesn't mean that there was no API key: CKAN Service Provider deletes the API key from the database after it has posted the result to the result_url. "job_key": The key that users must provide (in the Authorization header of the HTTP request) to be authorized to modify the job (unicode). For example requests to the CKAN Service Provider API need this to get the status or output data of a job or to delete a job. If you login to CKAN Service Provider as an administrator then you can administer any job without providing its job_key. "metadata": Any custom metadata associated with the job (dict) "logs": Any logs associated with the job (list) """
# Avoid SQLAlchemy "Unicode type received non-unicode bind param value" # warnings. if job_id: job_id = unicode(job_id) result = ENGINE.execute( JOBS_TABLE.select().where(JOBS_TABLE.c.job_id == job_id)).first() if not result: return None # Turn the result into a dictionary representation of the job. result_dict = {} for field in result.keys(): value = getattr(result, field) if value is None: result_dict[field] = value elif field in ('sent_data', 'data', 'error'): result_dict[field] = json.loads(value) elif isinstance(value, datetime.datetime): result_dict[field] = value.isoformat() else: result_dict[field] = unicode(value) result_dict['metadata'] = _get_metadata(job_id) result_dict['logs'] = _get_logs(job_id) return result_dict
<SYSTEM_TASK:> Add a new job with status "pending" to the jobs table. <END_TASK> <USER_TASK:> Description: def add_pending_job(job_id, job_key, job_type, api_key, data=None, metadata=None, result_url=None): """Add a new job with status "pending" to the jobs table. All code that adds jobs to the jobs table should go through this function. Code that adds to the jobs table manually should be refactored to use this function. May raise unspecified exceptions from Python core, SQLAlchemy or JSON! TODO: Document and unit test these! :param job_id: a unique identifier for the job, used as the primary key in ckanserviceprovider's "jobs" database table :type job_id: unicode :param job_key: the key required to administer the job via the API :type job_key: unicode :param job_type: the name of the job function that will be executed for this job :type job_key: unicode :param api_key: the client site API key that ckanserviceprovider will use when posting the job result to the result_url :type api_key: unicode :param data: The input data for the job (called sent_data elsewhere) :type data: Any JSON-serializable type :param metadata: A dict of arbitrary (key, value) metadata pairs to be stored along with the job. The keys should be strings, the values can be strings or any JSON-encodable type. :type metadata: dict :param result_url: the callback URL that ckanserviceprovider will post the job result to when the job has finished :type result_url: unicode """
if not data: data = {} data = json.dumps(data) # Turn strings into unicode to stop SQLAlchemy # "Unicode type received non-unicode bind param value" warnings. if job_id: job_id = unicode(job_id) if job_type: job_type = unicode(job_type) if result_url: result_url = unicode(result_url) if api_key: api_key = unicode(api_key) if job_key: job_key = unicode(job_key) data = unicode(data) if not metadata: metadata = {} conn = ENGINE.connect() trans = conn.begin() try: conn.execute(JOBS_TABLE.insert().values( job_id=job_id, job_type=job_type, status='pending', requested_timestamp=datetime.datetime.now(), sent_data=data, result_url=result_url, api_key=api_key, job_key=job_key)) # Insert any (key, value) metadata pairs that the job has into the # metadata table. inserts = [] for key, value in metadata.items(): type_ = 'string' if not isinstance(value, basestring): value = json.dumps(value) type_ = 'json' # Turn strings into unicode to stop SQLAlchemy # "Unicode type received non-unicode bind param value" warnings. key = unicode(key) value = unicode(value) inserts.append( {"job_id": job_id, "key": key, "value": value, "type": type_} ) if inserts: conn.execute(METADATA_TABLE.insert(), inserts) trans.commit() except Exception: trans.rollback() raise finally: conn.close()
<SYSTEM_TASK:> Validate and return the given error object. <END_TASK> <USER_TASK:> Description: def _validate_error(error): """Validate and return the given error object. Based on the given error object, return either None or a dict with a "message" key whose value is a string (the dict may also have any other keys that it wants). The given "error" object can be: - None, in which case None is returned - A string, in which case a dict like this will be returned: {"message": error_string} - A dict with a "message" key whose value is a string, in which case the dict will be returned unchanged :param error: the error object to validate :raises InvalidErrorObjectError: If the error object doesn't match any of the allowed types """
if error is None: return None elif isinstance(error, basestring): return {"message": error} else: try: message = error["message"] if isinstance(message, basestring): return error else: raise InvalidErrorObjectError( "error['message'] must be a string") except (TypeError, KeyError): raise InvalidErrorObjectError( "error must be either a string or a dict with a message key")
<SYSTEM_TASK:> Update the database row for the given job_id with the given job_dict. <END_TASK> <USER_TASK:> Description: def _update_job(job_id, job_dict): """Update the database row for the given job_id with the given job_dict. All functions that update rows in the jobs table do it by calling this helper function. job_dict is a dict with values corresponding to the database columns that should be updated, e.g.: {"status": "complete", "data": ...} """
# Avoid SQLAlchemy "Unicode type received non-unicode bind param value" # warnings. if job_id: job_id = unicode(job_id) if "error" in job_dict: job_dict["error"] = _validate_error(job_dict["error"]) job_dict["error"] = json.dumps(job_dict["error"]) # Avoid SQLAlchemy "Unicode type received non-unicode bind param value" # warnings. job_dict["error"] = unicode(job_dict["error"]) # Avoid SQLAlchemy "Unicode type received non-unicode bind param value" # warnings. if "data" in job_dict: job_dict["data"] = unicode(job_dict["data"]) ENGINE.execute( JOBS_TABLE.update() .where(JOBS_TABLE.c.job_id == job_id) .values(**job_dict))
<SYSTEM_TASK:> Mark a job as completed successfully. <END_TASK> <USER_TASK:> Description: def mark_job_as_completed(job_id, data=None): """Mark a job as completed successfully. :param job_id: the job_id of the job to be updated :type job_id: unicode :param data: the output data returned by the job :type data: any JSON-serializable type (including None) """
update_dict = { "status": "complete", "data": json.dumps(data), "finished_timestamp": datetime.datetime.now(), } _update_job(job_id, update_dict)
<SYSTEM_TASK:> Mark a job as failed with an error. <END_TASK> <USER_TASK:> Description: def mark_job_as_errored(job_id, error_object): """Mark a job as failed with an error. :param job_id: the job_id of the job to be updated :type job_id: unicode :param error_object: the error returned by the job :type error_object: either a string or a dict with a "message" key whose value is a string """
update_dict = { "status": "error", "error": error_object, "finished_timestamp": datetime.datetime.now(), } _update_job(job_id, update_dict)
<SYSTEM_TASK:> Return any metadata for the given job_id from the metadata table. <END_TASK> <USER_TASK:> Description: def _get_metadata(job_id): """Return any metadata for the given job_id from the metadata table."""
# Avoid SQLAlchemy "Unicode type received non-unicode bind param value" # warnings. job_id = unicode(job_id) results = ENGINE.execute( METADATA_TABLE.select().where( METADATA_TABLE.c.job_id == job_id)).fetchall() metadata = {} for row in results: value = row['value'] if row['type'] == 'json': value = json.loads(value) metadata[row['key']] = value return metadata
<SYSTEM_TASK:> Return any logs for the given job_id from the logs table. <END_TASK> <USER_TASK:> Description: def _get_logs(job_id): """Return any logs for the given job_id from the logs table."""
# Avoid SQLAlchemy "Unicode type received non-unicode bind param value" # warnings. job_id = unicode(job_id) results = ENGINE.execute( LOGS_TABLE.select().where(LOGS_TABLE.c.job_id == job_id)).fetchall() results = [dict(result) for result in results] for result in results: result.pop("job_id") return results
<SYSTEM_TASK:> Searches match in attributes against given pattern and if <END_TASK> <USER_TASK:> Description: def check_node_attributes(pattern, node, *attributes): """ Searches match in attributes against given pattern and if finds the match against any of them returns True. """
for attribute_name in attributes: attribute = node.get(attribute_name) if attribute is not None and pattern.search(attribute): return True return False
<SYSTEM_TASK:> Generates a hash_id for the node in question. <END_TASK> <USER_TASK:> Description: def generate_hash_id(node): """ Generates a hash_id for the node in question. :param node: lxml etree node """
try: content = tostring(node) except Exception: logger.exception("Generating of hash failed") content = to_bytes(repr(node)) hash_id = md5(content).hexdigest() return hash_id[:8]
<SYSTEM_TASK:> Computes the ratio for text in given node and text in links <END_TASK> <USER_TASK:> Description: def get_link_density(node, node_text=None): """ Computes the ratio for text in given node and text in links contained in the node. It is computed from number of characters in the texts. :parameter Element node: HTML element in which links density is computed. :parameter string node_text: Text content of given node if it was obtained before. :returns float: Returns value of computed 0 <= density <= 1, where 0 means no links and 1 means that node contains only links. """
if node_text is None: node_text = node.text_content() node_text = normalize_whitespace(node_text.strip()) text_length = len(node_text) if text_length == 0: return 0.0 links_length = sum(map(_get_normalized_text_length, node.findall(".//a"))) # Give 50 bonus chars worth of length for each img. # Tweaking this 50 down a notch should help if we hit false positives. img_bonuses = 50 * len(node.findall(".//img")) links_length = max(0, links_length - img_bonuses) return links_length / text_length
<SYSTEM_TASK:> Short helper for checking unlikely status. <END_TASK> <USER_TASK:> Description: def is_unlikely_node(node): """ Short helper for checking unlikely status. If the class or id are in the unlikely list, and there's not also a class/id in the likely list then it might need to be removed. """
unlikely = check_node_attributes(CLS_UNLIKELY, node, "class", "id") maybe = check_node_attributes(CLS_MAYBE, node, "class", "id") return bool(unlikely and not maybe and node.tag != "body")
<SYSTEM_TASK:> Given a list of potential nodes, find some initial scores to start <END_TASK> <USER_TASK:> Description: def score_candidates(nodes): """Given a list of potential nodes, find some initial scores to start"""
MIN_HIT_LENTH = 25 candidates = {} for node in nodes: logger.debug("* Scoring candidate %s %r", node.tag, node.attrib) # if the node has no parent it knows of then it ends up creating a # body & html tag to parent the html fragment parent = node.getparent() if parent is None: logger.debug("Skipping candidate - parent node is 'None'.") continue grand = parent.getparent() if grand is None: logger.debug("Skipping candidate - grand parent node is 'None'.") continue # if paragraph is < `MIN_HIT_LENTH` characters don't even count it inner_text = node.text_content().strip() if len(inner_text) < MIN_HIT_LENTH: logger.debug( "Skipping candidate - inner text < %d characters.", MIN_HIT_LENTH) continue # initialize readability data for the parent # add parent node if it isn't in the candidate list if parent not in candidates: candidates[parent] = ScoredNode(parent) if grand not in candidates: candidates[grand] = ScoredNode(grand) # add a point for the paragraph itself as a base content_score = 1 if inner_text: # add 0.25 points for any commas within this paragraph commas_count = inner_text.count(",") content_score += commas_count * 0.25 logger.debug("Bonus points for %d commas.", commas_count) # subtract 0.5 points for each double quote within this paragraph double_quotes_count = inner_text.count('"') content_score += double_quotes_count * -0.5 logger.debug( "Penalty points for %d double-quotes.", double_quotes_count) # for every 100 characters in this paragraph, add another point # up to 3 points length_points = len(inner_text) / 100 content_score += min(length_points, 3.0) logger.debug("Bonus points for length of text: %f", length_points) # add the score to the parent logger.debug( "Bonus points for parent %s %r with score %f: %f", parent.tag, parent.attrib, candidates[parent].content_score, content_score) candidates[parent].content_score += content_score # the grand node gets half logger.debug( "Bonus points for grand %s %r with score %f: %f", grand.tag, grand.attrib, candidates[grand].content_score, content_score / 2.0) candidates[grand].content_score += content_score / 2.0 if node not in candidates: candidates[node] = ScoredNode(node) candidates[node].content_score += content_score for candidate in candidates.values(): adjustment = 1.0 - get_link_density(candidate.node) candidate.content_score *= adjustment logger.debug( "Link density adjustment for %s %r: %f", candidate.node.tag, candidate.node.attrib, adjustment) return candidates
<SYSTEM_TASK:> Actually, this function creates a time stamp vector <END_TASK> <USER_TASK:> Description: def getTime(self): """ Actually, this function creates a time stamp vector based on the number of samples and sample rate. """
T = 1/float(self.samp[self.nrates-1]) endtime = self.endsamp[self.nrates-1] * T t = numpy.linspace(0,endtime,self.endsamp[self.nrates-1]) return t
<SYSTEM_TASK:> Returns the COMTRADE ID of a given channel number. <END_TASK> <USER_TASK:> Description: def getAnalogID(self,num): """ Returns the COMTRADE ID of a given channel number. The number to be given is the same of the COMTRADE header. """
listidx = self.An.index(num) # Get the position of the channel number. return self.Ach_id[listidx]
<SYSTEM_TASK:> Reads the COMTRADE ID of a given channel number. <END_TASK> <USER_TASK:> Description: def getDigitalID(self,num): """ Reads the COMTRADE ID of a given channel number. The number to be given is the same of the COMTRADE header. """
listidx = self.Dn.index(num) # Get the position of the channel number. return self.Dch_id[listidx]
<SYSTEM_TASK:> Returns the type of the channel 'num' based <END_TASK> <USER_TASK:> Description: def getAnalogType(self,num): """ Returns the type of the channel 'num' based on its unit stored in the Comtrade header file. Returns 'V' for a voltage channel and 'I' for a current channel. """
listidx = self.An.index(num) unit = self.uu[listidx] if unit == 'kV' or unit == 'V': return 'V' elif unit == 'A' or unit == 'kA': return 'I' else: print 'Unknown channel type' return 0
<SYSTEM_TASK:> Reads the contents of the Comtrade .dat file and store them in a <END_TASK> <USER_TASK:> Description: def ReadDataFile(self): """ Reads the contents of the Comtrade .dat file and store them in a private variable. For accessing a specific channel data, see methods getAnalogData and getDigitalData. """
if os.path.isfile(self.filename[0:-4] + '.dat'): filename = self.filename[0:-4] + '.dat' elif os.path.isfile(self.filename[0:-4] + '.DAT'): filename = self.filename[0:-4] + '.DAT' else: print "Data file File not found." return 0 self.filehandler = open(filename,'rb') self.DatFileContent = self.filehandler.read() # END READING .dat FILE. self.filehandler.close() # Close file. return 1
<SYSTEM_TASK:> Returns an array of numbers containing the data values of the channel <END_TASK> <USER_TASK:> Description: def getAnalogChannelData(self,ChNumber): """ Returns an array of numbers containing the data values of the channel number "ChNumber". ChNumber is the number of the channal as in .cfg file. """
if not self.DatFileContent: print "No data file content. Use the method ReadDataFile first" return 0 if (ChNumber > self.A): print "Channel number greater than the total number of channels." return 0 # Fomating string for struct module: str_struct = "ii%dh" %(self.A + int(numpy.ceil((float(self.D)/float(16))))) # Number of bytes per sample: NB = 4 + 4 + self.A*2 + int(numpy.ceil((float(self.D)/float(16))))*2 # Number of samples: N = self.getNumberOfSamples() # Empty column vector: values = numpy.empty((N,1)) ch_index = self.An.index(ChNumber) # Reading the values from DatFileContent string: for i in range(N): data = struct.unpack(str_struct,self.DatFileContent[i*NB:(i*NB)+NB]) values[i] = data[ChNumber+1] # The first two number ar the sample index and timestamp values = values * self.a[ch_index] # a factor values = values + self.b[ch_index] # b factor return values
<SYSTEM_TASK:> Builds a base document with the body as root. <END_TASK> <USER_TASK:> Description: def build_base_document(dom, return_fragment=True): """ Builds a base document with the body as root. :param dom: Parsed lxml tree (Document Object Model). :param bool return_fragment: If True only <div> fragment is returned. Otherwise full HTML document is returned. """
body_element = dom.find(".//body") if body_element is None: fragment = fragment_fromstring('<div id="readabilityBody"/>') fragment.append(dom) else: body_element.tag = "div" body_element.set("id", "readabilityBody") fragment = body_element return document_from_fragment(fragment, return_fragment)
<SYSTEM_TASK:> Looks through siblings for content that might also be related. <END_TASK> <USER_TASK:> Description: def check_siblings(candidate_node, candidate_list): """ Looks through siblings for content that might also be related. Things like preambles, content split by ads that we removed, etc. """
candidate_css = candidate_node.node.get("class") potential_target = candidate_node.content_score * 0.2 sibling_target_score = potential_target if potential_target > 10 else 10 parent = candidate_node.node.getparent() siblings = parent.getchildren() if parent is not None else [] for sibling in siblings: append = False content_bonus = 0 if sibling is candidate_node.node: append = True # Give a bonus if sibling nodes and top candidates have the example # same class name if candidate_css and sibling.get("class") == candidate_css: content_bonus += candidate_node.content_score * 0.2 if sibling in candidate_list: adjusted_score = \ candidate_list[sibling].content_score + content_bonus if adjusted_score >= sibling_target_score: append = True if sibling.tag == "p": link_density = get_link_density(sibling) content = sibling.text_content() content_length = len(content) if content_length > 80 and link_density < 0.25: append = True elif content_length < 80 and link_density == 0: if ". " in content: append = True if append: logger.debug( "Sibling appended: %s %r", sibling.tag, sibling.attrib) if sibling.tag not in ("div", "p"): # We have a node that isn't a common block level element, like # a form or td tag. Turn it into a div so it doesn't get # filtered out later by accident. sibling.tag = "div" if candidate_node.node != sibling: candidate_node.node.append(sibling) return candidate_node
<SYSTEM_TASK:> Cleans up the final document we return as the readable article. <END_TASK> <USER_TASK:> Description: def clean_document(node): """Cleans up the final document we return as the readable article."""
if node is None or len(node) == 0: return None logger.debug("\n\n-------------- CLEANING DOCUMENT -----------------") to_drop = [] for n in node.iter(): # clean out any in-line style properties if "style" in n.attrib: n.set("style", "") # remove embended objects unless it's wanted video if n.tag in ("object", "embed") and not ok_embedded_video(n): logger.debug("Dropping node %s %r", n.tag, n.attrib) to_drop.append(n) # clean headings with bad css or high link density if n.tag in ("h1", "h2", "h3", "h4") and get_class_weight(n) < 0: logger.debug("Dropping <%s>, it's insignificant", n.tag) to_drop.append(n) if n.tag in ("h3", "h4") and get_link_density(n) > 0.33: logger.debug("Dropping <%s>, it's insignificant", n.tag) to_drop.append(n) # drop block element without content and children if n.tag in ("div", "p"): text_content = shrink_text(n.text_content()) if len(text_content) < 5 and not n.getchildren(): logger.debug( "Dropping %s %r without content.", n.tag, n.attrib) to_drop.append(n) # finally try out the conditional cleaning of the target node if clean_conditionally(n): to_drop.append(n) drop_nodes_with_parents(to_drop) return node
<SYSTEM_TASK:> Remove the clean_el if it looks like bad content based on rules. <END_TASK> <USER_TASK:> Description: def clean_conditionally(node): """Remove the clean_el if it looks like bad content based on rules."""
if node.tag not in ('form', 'table', 'ul', 'div', 'p'): return # this is not the tag we are looking for weight = get_class_weight(node) # content_score = LOOK up the content score for this node we found # before else default to 0 content_score = 0 if weight + content_score < 0: logger.debug('Dropping conditional node') logger.debug('Weight + score < 0') return True commas_count = node.text_content().count(',') if commas_count < 10: logger.debug( "There are %d commas so we're processing more.", commas_count) # If there are not very many commas, and the number of # non-paragraph elements is more than paragraphs or other ominous # signs, remove the element. p = len(node.findall('.//p')) img = len(node.findall('.//img')) li = len(node.findall('.//li')) - 100 inputs = len(node.findall('.//input')) embed = 0 embeds = node.findall('.//embed') for e in embeds: if ok_embedded_video(e): embed += 1 link_density = get_link_density(node) content_length = len(node.text_content()) remove_node = False if li > p and node.tag != 'ul' and node.tag != 'ol': logger.debug('Conditional drop: li > p and not ul/ol') remove_node = True elif inputs > p / 3.0: logger.debug('Conditional drop: inputs > p/3.0') remove_node = True elif content_length < 25 and (img == 0 or img > 2): logger.debug('Conditional drop: len < 25 and 0/>2 images') remove_node = True elif weight < 25 and link_density > 0.2: logger.debug('Conditional drop: weight small (%f) and link is dense (%f)', weight, link_density) remove_node = True elif weight >= 25 and link_density > 0.5: logger.debug('Conditional drop: weight big but link heavy') remove_node = True elif (embed == 1 and content_length < 75) or embed > 1: logger.debug( 'Conditional drop: embed w/o much content or many embed') remove_node = True if remove_node: logger.debug('Node will be removed: %s %r %s', node.tag, node.attrib, node.text_content()[:30]) return remove_node return False
<SYSTEM_TASK:> Finds cadidate nodes for the readable version of the article. <END_TASK> <USER_TASK:> Description: def find_candidates(document): """ Finds cadidate nodes for the readable version of the article. Here's we're going to remove unlikely nodes, find scores on the rest, clean up and return the final best match. """
nodes_to_score = set() should_remove = set() for node in document.iter(): if is_unlikely_node(node): logger.debug( "We should drop unlikely: %s %r", node.tag, node.attrib) should_remove.add(node) elif is_bad_link(node): logger.debug( "We should drop bad link: %s %r", node.tag, node.attrib) should_remove.add(node) elif node.tag in SCORABLE_TAGS: nodes_to_score.add(node) return score_candidates(nodes_to_score), should_remove
<SYSTEM_TASK:> Helper to determine if the node is link that is useless. <END_TASK> <USER_TASK:> Description: def is_bad_link(node): """ Helper to determine if the node is link that is useless. We've hit articles with many multiple links that should be cleaned out because they're just there to pollute the space. See tests for examples. """
if node.tag != "a": return False name = node.get("name") href = node.get("href") if name and not href: return True if href: href_parts = href.split("#") if len(href_parts) == 2 and len(href_parts[1]) > 25: return True return False
<SYSTEM_TASK:> Generates list of candidates from the DOM. <END_TASK> <USER_TASK:> Description: def candidates(self): """Generates list of candidates from the DOM."""
dom = self.dom if dom is None or len(dom) == 0: return None candidates, unlikely_candidates = find_candidates(dom) drop_nodes_with_parents(unlikely_candidates) return candidates
<SYSTEM_TASK:> If we fail to find a good candidate we need to find something else. <END_TASK> <USER_TASK:> Description: def _handle_no_candidates(self): """ If we fail to find a good candidate we need to find something else. """
# since we've not found a good candidate we're should help this if self.dom is not None and len(self.dom): dom = prep_article(self.dom) dom = build_base_document(dom, self._return_fragment) return self._remove_orphans( dom.get_element_by_id("readabilityBody")) else: logger.info("No document to use.") return build_error_document(self._return_fragment)
<SYSTEM_TASK:> Converts bytes stream containing an HTML page into Unicode. <END_TASK> <USER_TASK:> Description: def decode_html(html): """ Converts bytes stream containing an HTML page into Unicode. Tries to guess character encoding from meta tag of by "chardet" library. """
if isinstance(html, unicode): return html match = CHARSET_META_TAG_PATTERN.search(html) if match: declared_encoding = match.group(1).decode("ASCII") # proceed unknown encoding as if it wasn't found at all with ignored(LookupError): return html.decode(declared_encoding, "ignore") # try to enforce UTF-8 firstly with ignored(UnicodeDecodeError): return html.decode("utf8") text = TAG_MARK_PATTERN.sub(to_bytes(" "), html) diff = text.decode("utf8", "ignore").encode("utf8") sizes = len(diff), len(text) # 99% of text is UTF-8 if abs(len(text) - len(diff)) < max(sizes) * 0.01: return html.decode("utf8", "ignore") # try detect encoding encoding = "utf8" encoding_detector = chardet.detect(text) if encoding_detector["encoding"]: encoding = encoding_detector["encoding"] return html.decode(encoding, "ignore")
<SYSTEM_TASK:> Nodes have properties, which are facts like the <END_TASK> <USER_TASK:> Description: def _parse_properties(self): """Nodes have properties, which are facts like the name, description, url etc. Loop through each of them and set it as attributes on this company so that we can make calls like company.name person.description """
props_dict = self.data.get('properties', {}) for prop_name in self.KNOWN_PROPERTIES: if prop_name in props_dict: setattr(self, prop_name, props_dict.get(prop_name)) else: setattr(self, prop_name, None)
<SYSTEM_TASK:> Nodes have Relationships, and similarly to properties, <END_TASK> <USER_TASK:> Description: def _parse_relationship(self): """Nodes have Relationships, and similarly to properties, we set it as an attribute on the Organization so we can make calls like company.current_team person.degrees """
rs_dict = self.data.get('relationships', {}) for rs_name in self.KNOWN_RELATIONSHIPS: if rs_name in rs_dict: setattr( self, rs_name, Relationship(rs_name, rs_dict.get(rs_name))) else: # fill in other relationships with None values setattr(self, rs_name, NoneRelationshipSingleton)
<SYSTEM_TASK:> Reset time and counts. <END_TASK> <USER_TASK:> Description: def open(self): """ Reset time and counts. """
self.startTime = datetime.datetime.now() self.offset = 0 return self
<SYSTEM_TASK:> Update self and parent with intermediate progress. <END_TASK> <USER_TASK:> Description: def update(self, sent): """ Update self and parent with intermediate progress. """
self.offset = sent now = datetime.datetime.now() elapsed = (now - self.startTime).total_seconds() if elapsed > 0: mbps = (sent * 8 / (10 ** 6)) / elapsed else: mbps = None self._display(sent, now, self.name, mbps)
<SYSTEM_TASK:> Display intermediate progress. <END_TASK> <USER_TASK:> Description: def _display(self, sent, now, chunk, mbps): """ Display intermediate progress. """
if self.parent is not None: self.parent._display(self.parent.offset + sent, now, chunk, mbps) return elapsed = now - self.startTime if sent > 0 and self.total is not None and sent <= self.total: eta = (self.total - sent) * elapsed.total_seconds() / sent eta = datetime.timedelta(seconds=eta) else: eta = None self.output.write( "\r %s: Sent %s%s%s ETA: %s (%s) %s%20s\r" % ( elapsed, util.humanize(sent), "" if self.total is None else " of %s" % (util.humanize(self.total),), "" if self.total is None else " (%d%%)" % (int(100 * sent / self.total),), eta, "" if not mbps else "%.3g Mbps " % (mbps,), chunk or "", " ", ) ) self.output.flush()
<SYSTEM_TASK:> Stop overwriting display, or update parent. <END_TASK> <USER_TASK:> Description: def close(self): """ Stop overwriting display, or update parent. """
if self.parent: self.parent.update(self.parent.offset + self.offset) return self.output.write("\n") self.output.flush()
<SYSTEM_TASK:> Return friendly abbreviated string for uuid. <END_TASK> <USER_TASK:> Description: def _printUUID(uuid, detail='word'): """ Return friendly abbreviated string for uuid. """
if not isinstance(detail, int): detail = detailNum[detail] if detail > detailNum['word']: return uuid if uuid is None: return None return "%s...%s" % (uuid[:4], uuid[-4:])
<SYSTEM_TASK:> Return logging function. <END_TASK> <USER_TASK:> Description: def skipDryRun(logger, dryRun, level=logging.DEBUG): """ Return logging function. When logging function called, will return True if action should be skipped. Log will indicate if skipped because of dry run. """
# This is an undocumented "feature" of logging module: # logging.log() requires a numeric level # logging.getLevelName() maps names to numbers if not isinstance(level, int): level = logging.getLevelName(level) return ( functools.partial(_logDryRun, logger, level) if dryRun else functools.partial(logger.log, level) )
<SYSTEM_TASK:> Return list of all volumes in this Store's selected directory. <END_TASK> <USER_TASK:> Description: def listVolumes(self): """ Return list of all volumes in this Store's selected directory. """
for (vol, paths) in self.paths.items(): for path in paths: if path.startswith('/'): continue if path == '.': continue if self.userVolume is not None and os.path.basename(path) != self.userVolume: continue yield vol break
<SYSTEM_TASK:> Get a path appropriate for sending the volume from this Store. <END_TASK> <USER_TASK:> Description: def getSendPath(self, volume): """ Get a path appropriate for sending the volume from this Store. The path may be relative or absolute in this Store. """
try: return self._fullPath(next(iter(self.getPaths(volume)))) except StopIteration: return None
<SYSTEM_TASK:> From a set of source paths, recommend a destination path. <END_TASK> <USER_TASK:> Description: def selectReceivePath(self, paths): """ From a set of source paths, recommend a destination path. The paths are relative or absolute, in a source Store. The result will be absolute, suitable for this destination Store. """
logger.debug("%s", paths) if not paths: path = os.path.basename(self.userPath) + '/Anon' try: # Relative paths are preferred path = [p for p in paths if not p.startswith("/")][0] except IndexError: # If no relative path, just use the first path path = os.path.relpath(list(paths)[0], self.userPath) return self._fullPath(path)
<SYSTEM_TASK:> Return fullPath relative to Store directory. <END_TASK> <USER_TASK:> Description: def _relativePath(self, fullPath): """ Return fullPath relative to Store directory. Return fullPath if fullPath is not inside directory. Return None if fullPath is outside our scope. """
if fullPath is None: return None assert fullPath.startswith("/"), fullPath path = os.path.relpath(fullPath, self.userPath) if not path.startswith("../"): return path elif self.ignoreExtraVolumes: return None else: return fullPath
<SYSTEM_TASK:> Send this difference to the dest Store. <END_TASK> <USER_TASK:> Description: def sendTo(self, dest, chunkSize): """ Send this difference to the dest Store. """
vol = self.toVol paths = self.sink.getPaths(vol) if self.sink == dest: logger.info("Keep: %s", self) self.sink.keep(self) else: # Log, but don't skip yet, so we can log more detailed skipped actions later skipDryRun(logger, dest.dryrun, 'INFO')("Xfer: %s", self) receiveContext = dest.receive(self, paths) sendContext = self.sink.send(self) # try: # receiveContext.metadata['btrfsVersion'] = self.btrfsVersion # except AttributeError: # pass transfer(sendContext, receiveContext, chunkSize) if vol.hasInfo(): infoContext = dest.receiveVolumeInfo(paths) if infoContext is None: # vol.writeInfo(sys.stdout) pass else: with infoContext as stream: vol.writeInfo(stream)
<SYSTEM_TASK:> Write one line of diff information. <END_TASK> <USER_TASK:> Description: def writeInfoLine(self, stream, fromUUID, size): """ Write one line of diff information. """
if size is None or fromUUID is None: return if not isinstance(size, int): logger.warning("Bad size: %s", size) return stream.write(str("%s\t%s\t%d\n" % ( self.uuid, fromUUID, size, )))
<SYSTEM_TASK:> Write information about diffs into a file stream for use later. <END_TASK> <USER_TASK:> Description: def writeInfo(self, stream): """ Write information about diffs into a file stream for use later. """
for (fromUUID, size) in Diff.theKnownSizes[self.uuid].iteritems(): self.writeInfoLine(stream, fromUUID, size)
<SYSTEM_TASK:> Will have information to write. <END_TASK> <USER_TASK:> Description: def hasInfo(self): """ Will have information to write. """
count = len([None for (fromUUID, size) in Diff.theKnownSizes[self.uuid].iteritems() if size is not None and fromUUID is not None ]) return count > 0
<SYSTEM_TASK:> Read previously-written information about diffs. <END_TASK> <USER_TASK:> Description: def readInfo(stream): """ Read previously-written information about diffs. """
try: for line in stream: (toUUID, fromUUID, size) = line.split() try: size = int(size) except Exception: logger.warning("Bad size: %s", size) continue logger.debug("diff info: %s %s %d", toUUID, fromUUID, size) Diff.theKnownSizes[toUUID][fromUUID] = size except Exception as error: logger.warn("Can't read .bs info file (%s)", error)
<SYSTEM_TASK:> Test whether edge is in this sink. <END_TASK> <USER_TASK:> Description: def hasEdge(self, diff): """ Test whether edge is in this sink. """
return diff.toVol in [d.toVol for d in self.diffs[diff.fromVol]]
<SYSTEM_TASK:> Returns dict with fullpath, to, from. <END_TASK> <USER_TASK:> Description: def _parseKeyName(self, name): """ Returns dict with fullpath, to, from. """
if name.endswith(Store.theInfoExtension): return {'type': 'info'} match = self.keyPattern.match(name) if not match: return None match = match.groupdict() match.update(type='diff') return match
<SYSTEM_TASK:> Return a human-readable string for number. <END_TASK> <USER_TASK:> Description: def humanize(number): """ Return a human-readable string for number. """
# units = ('bytes', 'KB', 'MB', 'GB', 'TB') # base = 1000 units = ('bytes', 'KiB', 'MiB', 'GiB', 'TiB') base = 1024 if number is None: return None pow = int(math.log(number, base)) if number > 0 else 0 pow = min(pow, len(units) - 1) mantissa = number / (base ** pow) return "%.4g %s" % (mantissa, units[pow])
<SYSTEM_TASK:> Return a context manager for stream that will store a diff. <END_TASK> <USER_TASK:> Description: def receive(self, path, diff, showProgress=True): """ Return a context manager for stream that will store a diff. """
directory = os.path.dirname(path) cmd = ["btrfs", "receive", "-e", directory] if Store.skipDryRun(logger, self.dryrun)("Command: %s", cmd): return None if not os.path.exists(directory): os.makedirs(directory) process = subprocess.Popen( cmd, stdin=subprocess.PIPE, stderr=subprocess.PIPE, stdout=DEVNULL, ) _makeNice(process) return _Writer(process, process.stdin, path, diff, showProgress)
<SYSTEM_TASK:> Return all diffs used in optimal network. <END_TASK> <USER_TASK:> Description: def iterDiffs(self): """ Return all diffs used in optimal network. """
nodes = self.nodes.values() nodes.sort(key=lambda node: self._height(node)) for node in nodes: yield node.diff
<SYSTEM_TASK:> Get rid of all intermediate nodes that aren't needed. <END_TASK> <USER_TASK:> Description: def _prune(self): """ Get rid of all intermediate nodes that aren't needed. """
done = False while not done: done = True for node in [node for node in self.nodes.values() if node.intermediate]: if not [dep for dep in self.nodes.values() if dep.previous == node.volume]: # logger.debug("Removing unnecessary node %s", node) del self.nodes[node.volume] done = False
<SYSTEM_TASK:> This will convert all attributes that are list with only one item string into simple string. It seems that LDAP always return lists, even when it doesn <END_TASK> <USER_TASK:> Description: def __compress_attributes(self, dic): """ This will convert all attributes that are list with only one item string into simple string. It seems that LDAP always return lists, even when it doesn t make sense. :param dic: :return: """
result = {} for k, v in dic.iteritems(): if isinstance(v, types.ListType) and len(v) == 1: if k not in ('msExchMailboxSecurityDescriptor', 'msExchSafeSendersHash', 'msExchBlockedSendersHash', 'replicationSignature', 'msExchSafeRecipientsHash', 'sIDHistory', 'msRTCSIP-UserRoutingGroupId', 'mSMQDigests', 'mSMQSignCertificates', 'msExchMasterAccountSid', 'msExchPreviousAccountSid', 'msExchUMPinChecksum', 'userSMIMECertificate', 'userCertificate', 'userCert', 'msExchDisabledArchiveGUID', 'msExchUMPinChecksum', 'msExchUMSpokenName', 'objectSid', 'objectGUID', 'msExchArchiveGUID', 'thumbnailPhoto', 'msExchMailboxGuid'): try: result[k] = v[0].decode('utf-8') except Exception as e: logging. error("Failed to decode attribute: %s -- %s" % (k, e)) result[k] = v[0] return result
<SYSTEM_TASK:> Mark this volume to be kept in path. <END_TASK> <USER_TASK:> Description: def _keepVol(self, vol): """ Mark this volume to be kept in path. """
if vol is None: return if vol in self.extraVolumes: del self.extraVolumes[vol] return if vol not in self.paths: raise Exception("%s not in %s" % (vol, self)) paths = [os.path.basename(path) for path in self.paths[vol]] newPath = self.selectReceivePath(paths) if self._skipDryRun(logger, 'INFO')("Copy %s to %s", vol, newPath): return self.butterVolumes[vol.uuid].copy(newPath)
<SYSTEM_TASK:> Write specified key arguments into data structure. <END_TASK> <USER_TASK:> Description: def write(self, keyArgs): """ Write specified key arguments into data structure. """
# bytearray doesn't work with fcntl args = array.array('B', (0,) * self.size) self._struct.pack_into(args, 0, *list(self.yieldArgs(keyArgs))) return args
<SYSTEM_TASK:> Take a flat arglist, and pop relevent values and return as a value or tuple. <END_TASK> <USER_TASK:> Description: def popValue(self, argList): """ Take a flat arglist, and pop relevent values and return as a value or tuple. """
# return self._Tuple(*[name for (name, typeObj) in self._types.items()]) return self._Tuple(*[typeObj.popValue(argList) for (name, typeObj) in self._types.items()])
<SYSTEM_TASK:> Read and advance. <END_TASK> <USER_TASK:> Description: def read(self, structure): """ Read and advance. """
start = self.offset self.skip(structure.size) return structure.read(self.buf, start)
<SYSTEM_TASK:> Read next chunk as another buffer. <END_TASK> <USER_TASK:> Description: def readBuffer(self, newLength): """ Read next chunk as another buffer. """
result = Buffer(self.buf, self.offset, newLength) self.skip(newLength) return result
<SYSTEM_TASK:> Return full paths from linux root. <END_TASK> <USER_TASK:> Description: def linuxPaths(self): """ Return full paths from linux root. The first path returned will be the path through the top-most mount. (Usually the root). """
for ((dirTree, dirID, dirSeq), (dirPath, name)) in self.links.items(): for path in self.fileSystem.volumes[dirTree].linuxPaths: yield path + "/" + dirPath + name if self.fullPath in self.fileSystem.mounts: yield self.fileSystem.mounts[self.fullPath]
<SYSTEM_TASK:> Delete this subvolume from the filesystem. <END_TASK> <USER_TASK:> Description: def destroy(self): """ Delete this subvolume from the filesystem. """
path = next(iter(self.linuxPaths)) directory = _Directory(os.path.dirname(path)) with directory as device: device.SNAP_DESTROY(name=str(os.path.basename(path)), )
<SYSTEM_TASK:> Make another snapshot of this into dirName. <END_TASK> <USER_TASK:> Description: def copy(self, path): """ Make another snapshot of this into dirName. """
directoryPath = os.path.dirname(path) if not os.path.exists(directoryPath): os.makedirs(directoryPath) logger.debug('Create copy of %s in %s', os.path.basename(path), directoryPath) with self._snapshot() as source, _Directory(directoryPath) as dest: dest.SNAP_CREATE_V2( flags=BTRFS_SUBVOL_RDONLY, name=str(os.path.basename(path)), fd=source.fd, ) with SnapShot(path) as destShot: flags = destShot.SUBVOL_GETFLAGS() destShot.SUBVOL_SETFLAGS(flags=flags.flags & ~BTRFS_SUBVOL_RDONLY) destShot.SET_RECEIVED_SUBVOL( uuid=self.received_uuid or self.uuid, stransid=self.sent_gen or self.current_gen, stime=timeOrNone(self.info.stime) or timeOrNone(self.info.ctime) or 0, flags=0, ) destShot.SUBVOL_SETFLAGS(flags=flags.flags)
<SYSTEM_TASK:> Subvolumes contained in this mount. <END_TASK> <USER_TASK:> Description: def subvolumes(self): """ Subvolumes contained in this mount. """
self.SYNC() self._getDevices() self._getRoots() self._getMounts() self._getUsage() volumes = self.volumes.values() volumes.sort(key=(lambda v: v.fullPath)) return volumes
<SYSTEM_TASK:> Zero and recalculate quota sizes to subvolume sizes will be correct. <END_TASK> <USER_TASK:> Description: def _rescanSizes(self, force=True): """ Zero and recalculate quota sizes to subvolume sizes will be correct. """
status = self.QUOTA_CTL(cmd=BTRFS_QUOTA_CTL_ENABLE).status logger.debug("CTL Status: %s", hex(status)) status = self.QUOTA_RESCAN_STATUS() logger.debug("RESCAN Status: %s", status) if not status.flags: if not force: return self.QUOTA_RESCAN() logger.warn("Waiting for btrfs quota usage scan...") self.QUOTA_RESCAN_WAIT()
<SYSTEM_TASK:> Get a tag-length-value encoded attribute. <END_TASK> <USER_TASK:> Description: def TLV_GET(attrs, attrNum, format): """ Get a tag-length-value encoded attribute. """
attrView = attrs[attrNum] if format == 's': format = str(attrView.len) + format try: (result,) = struct.unpack_from(format, attrView.buf, attrView.offset) except TypeError: # Working around struct.unpack_from issue #10212 (result,) = struct.unpack_from(format, str(bytearray(attrView.buf)), attrView.offset) return result
<SYSTEM_TASK:> Put a tag-length-value encoded attribute. <END_TASK> <USER_TASK:> Description: def TLV_PUT(attrs, attrNum, format, value): """ Put a tag-length-value encoded attribute. """
attrView = attrs[attrNum] if format == 's': format = str(attrView.len) + format struct.pack_into(format, attrView.buf, attrView.offset, value)
<SYSTEM_TASK:> Label a method as a command with name. <END_TASK> <USER_TASK:> Description: def command(name, mode): """ Label a method as a command with name. """
def decorator(fn): commands[name] = fn.__name__ _Client._addMethod(fn.__name__, name, mode) return fn return decorator
<SYSTEM_TASK:> Serialize to a dictionary. <END_TASK> <USER_TASK:> Description: def diff(self, diff): """ Serialize to a dictionary. """
if diff is None: return None return dict( toVol=diff.toUUID, fromVol=diff.fromUUID, size=diff.size, sizeIsEstimated=diff.sizeIsEstimated, )
<SYSTEM_TASK:> Open connection to remote host. <END_TASK> <USER_TASK:> Description: def _open(self): """ Open connection to remote host. """
if self._process is not None: return cmd = [ 'ssh', self._host, 'sudo', 'buttersink', '--server', '--mode', self._mode, self._directory ] logger.debug("Connecting with: %s", cmd) self._process = subprocess.Popen( cmd, stdin=subprocess.PIPE, stderr=sys.stderr, # stdout=sys.stdout, stdout=subprocess.PIPE, ) version = self.version() logger.info("Remote version: %s", version)
<SYSTEM_TASK:> Close connection to remote host. <END_TASK> <USER_TASK:> Description: def _close(self): """ Close connection to remote host. """
if self._process is None: return self.quit() self._process.stdin.close() logger.debug("Waiting for ssh process to finish...") self._process.wait() # Wait for ssh session to finish. # self._process.terminate() # self._process.kill() self._process = None
<SYSTEM_TASK:> Run the server. Returns with system error code. <END_TASK> <USER_TASK:> Description: def run(self): """ Run the server. Returns with system error code. """
normalized = os.path.normpath(self.path) + ("/" if self.path.endswith("/") else "") if self.path != normalized: sys.stderr.write("Please use full path '%s'" % (normalized,)) return -1 self.butterStore = ButterStore.ButterStore(None, self.path, self.mode, dryrun=False) # self.butterStore.ignoreExtraVolumes = True self.toObj = _Arg2Obj(self.butterStore) self.toDict = _Obj2Dict() self.running = True with self.butterStore: with self: while self.running: self._processCommand() return 0
<SYSTEM_TASK:> Send parseable json result of command. <END_TASK> <USER_TASK:> Description: def _sendResult(self, result): """ Send parseable json result of command. """
# logger.debug("Result: %s", result) try: result = json.dumps(result) except Exception as error: result = json.dumps(self._errorInfo(command, error)) sys.stdout.write(result) sys.stdout.write("\n") sys.stdout.flush()
<SYSTEM_TASK:> Receive a btrfs diff. <END_TASK> <USER_TASK:> Description: def receive(self, path, diffTo, diffFrom): """ Receive a btrfs diff. """
diff = self.toObj.diff(diffTo, diffFrom) self._open(self.butterStore.receive(diff, [path, ]))
<SYSTEM_TASK:> Map namedtuples given a pair of key, value lists. <END_TASK> <USER_TASK:> Description: def load_lists(keys=[], values=[], name='NT'): """ Map namedtuples given a pair of key, value lists. """
mapping = dict(zip(keys, values)) return mapper(mapping, _nt_name=name)
<SYSTEM_TASK:> Convert mappings to namedtuples recursively. <END_TASK> <USER_TASK:> Description: def mapper(mapping, _nt_name='NT'): """ Convert mappings to namedtuples recursively. """
if isinstance(mapping, Mapping) and not isinstance(mapping, AsDict): for key, value in list(mapping.items()): mapping[key] = mapper(value) return namedtuple_wrapper(_nt_name, **mapping) elif isinstance(mapping, list): return [mapper(item) for item in mapping] return mapping
<SYSTEM_TASK:> Use ignore to prevent a mapping from being mapped to a namedtuple. <END_TASK> <USER_TASK:> Description: def ignore(mapping): """ Use ignore to prevent a mapping from being mapped to a namedtuple. """
if isinstance(mapping, Mapping): return AsDict(mapping) elif isinstance(mapping, list): return [ignore(item) for item in mapping] return mapping
<SYSTEM_TASK:> If DIR_PATH does not exist, makes it. Failing that, raises Exception. <END_TASK> <USER_TASK:> Description: def ensure_dir(dir_path): """ If DIR_PATH does not exist, makes it. Failing that, raises Exception. Returns True if dir already existed; False if it had to be made. """
exists = dir_exists(dir_path) if not exists: try: os.makedirs(dir_path) except(Exception,RuntimeError), e: raise Exception("Unable to create directory %s. Cause %s" % (dir_path, e)) return exists
<SYSTEM_TASK:> Validates OpenSSL to ensure it has TLS_FALLBACK_SCSV supported <END_TASK> <USER_TASK:> Description: def validate_openssl(): """ Validates OpenSSL to ensure it has TLS_FALLBACK_SCSV supported """
try: open_ssl_exe = which("openssl") if not open_ssl_exe: raise Exception("No openssl exe found in path") try: # execute a an invalid command to get output with available options # since openssl does not have a --help option unfortunately execute_command([open_ssl_exe, "s_client", "invalidDummyCommand"]) except subprocess.CalledProcessError as e: if "fallback_scsv" not in e.output: raise Exception("openssl does not support TLS_FALLBACK_SCSV") except Exception as e: raise MongoctlException("Unsupported OpenSSL. %s" % e)
<SYSTEM_TASK:> Validates the member document against current rs conf <END_TASK> <USER_TASK:> Description: def validate_against_current_config(self, current_rs_conf): """ Validates the member document against current rs conf 1- If there is a member in current config with _id equals to my id then ensure hosts addresses resolve to the same host 2- If there is a member in current config with host resolving to my host then ensure that if my id is et then it must equal member._id """
# if rs is not configured yet then there is nothing to validate if not current_rs_conf: return my_host = self.get_host() current_member_confs = current_rs_conf['members'] err = None for curr_mem_conf in current_member_confs: if (self.id and self.id == curr_mem_conf['_id'] and not is_same_address(my_host, curr_mem_conf['host'])): err = ("Member config is not consistent with current rs " "config. \n%s\n. Both have the sam _id but addresses" " '%s' and '%s' do not resolve to the same host." % (document_pretty_string(curr_mem_conf), my_host, curr_mem_conf['host'] )) elif (is_same_address(my_host, curr_mem_conf['host']) and self.id and self.id != curr_mem_conf['_id']): err = ("Member config is not consistent with current rs " "config. \n%s\n. Both addresses" " '%s' and '%s' resolve to the same host but _ids '%s'" " and '%s' are not equal." % (document_pretty_string(curr_mem_conf), my_host, curr_mem_conf['host'], self.id, curr_mem_conf['_id'])) if err: raise MongoctlException("Invalid member configuration:\n%s \n%s" % (self, err))
<SYSTEM_TASK:> Returns the best secondary member to be used for dumping <END_TASK> <USER_TASK:> Description: def get_dump_best_secondary(self, max_repl_lag=None): """ Returns the best secondary member to be used for dumping best = passives with least lags, if no passives then least lag """
secondary_lag_tuples = [] primary_member = self.get_primary_member() if not primary_member: raise MongoctlException("Unable to determine primary member for" " cluster '%s'" % self.id) master_status = primary_member.get_server().get_member_rs_status() if not master_status: raise MongoctlException("Unable to determine replicaset status for" " primary member '%s'" % primary_member.get_server().id) for member in self.get_members(): if member.get_server().is_secondary(): repl_lag = member.get_server().get_repl_lag(master_status) if max_repl_lag and repl_lag > max_repl_lag: log_info("Excluding member '%s' because it's repl lag " "(in seconds)%s is more than max %s. " % (member.get_server().id, repl_lag, max_repl_lag)) continue secondary_lag_tuples.append((member,repl_lag)) def best_secondary_comp(x, y): x_mem, x_lag = x y_mem, y_lag = y if x_mem.is_passive(): if y_mem.is_passive(): return x_lag - y_lag else: return -1 elif y_mem.is_passive(): return 1 else: return x_lag - y_lag if secondary_lag_tuples: secondary_lag_tuples.sort(best_secondary_comp) return secondary_lag_tuples[0][0]