text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Read one character from stdin. <END_TASK> <USER_TASK:> Description: def getch(self): """ Read one character from stdin. If stdin is not a tty or set `getch_enabled`=False, read input as one line. :return: unicode: """
ch = self._get_one_char() if self.keep_input_clean: self.clear_input_buffer() try: # accept only unicode characters (for Python 2) uch = to_unicode(ch, 'ascii') except UnicodeError: return '' return uch if self._check_key_repeat(uch) else ''
<SYSTEM_TASK:> Read line from stdin. <END_TASK> <USER_TASK:> Description: def gets(self): """ Read line from stdin. The trailing newline will be omitted. :return: string: """
ret = self.stdin.readline() if ret == '': raise EOFError # To break out of EOF loop return ret.rstrip('\n')
<SYSTEM_TASK:> Creates the subparser for this particular command <END_TASK> <USER_TASK:> Description: def create_parser(self, subparsers): """ Creates the subparser for this particular command """
self.parser = subparsers.add_parser(self.name, help=self.help, parents=self.parents) self.add_arguments() self.parser.set_defaults(func=self.handle) return self.parser
<SYSTEM_TASK:> Definition and addition of all arguments. <END_TASK> <USER_TASK:> Description: def add_arguments(self): """ Definition and addition of all arguments. """
if self.parser is None: raise TypeError("Parser cannot be None, has create_parser been called?") for keys, kwargs in self.args.items(): if not isinstance(keys, tuple): keys = (keys,) self.parser.add_argument(*keys, **kwargs)
<SYSTEM_TASK:> Close the connection to the AMQP compliant broker. <END_TASK> <USER_TASK:> Description: def close(self): """ Close the connection to the AMQP compliant broker. """
if self.channel is not None: self.channel.close() if self.__connection is not None: self.__connection.close()
<SYSTEM_TASK:> Open a connection to the AMQP compliant broker. <END_TASK> <USER_TASK:> Description: def open(self): """ Open a connection to the AMQP compliant broker. """
self._connection = \ amqp.Connection(host='%s:%s' % (self.hostname, self.port), userid=self.username, password=self.password, virtual_host=self.virtual_host, insist=False) self.channel = self._connection.channel()
<SYSTEM_TASK:> Publish the specified object that the function automatically converts <END_TASK> <USER_TASK:> Description: def publish(self, message_type, message_payload): """ Publish the specified object that the function automatically converts into a JSON string representation. This function use the lowered class name of the service as the AMQP routing key. For instance, if the class ``ExampleService`` inherits from the base class ``BaseService``, the methods of an instance of this class will publish messages using the routing key named ``exampleservice``. @param message_type: string representing the type of the event, more likely ``on_something_happened`. @param message_payload: an object to convert into a JSON string representation and to publish. """
payload = json.dumps(jsonpickle.Pickler(unpicklable=False).flatten(message_payload)) message = amqp.Message(payload) message.properties["delivery_mode"] = 2 name = 'majormode.%s.%s.%s' % (settings.ENVIRONMENT_STAGE, self.service_name.lower(), message_type.lower()) self.channel.queue_declare(queue=name, durable=True, exclusive=False, auto_delete=False) self.channel.exchange_declare(exchange=name, type="direct", durable=True, auto_delete=False,) self.channel.queue_bind(queue=name, exchange=name, routing_key=name) self.channel.basic_publish(message, exchange=name, routing_key=name)
<SYSTEM_TASK:> Daemonize the program, ie. make it run in the "background", detach <END_TASK> <USER_TASK:> Description: def daemonize(): """ Daemonize the program, ie. make it run in the "background", detach it from its controlling terminal and from its controlling process group session. NOTES: - This function also umask(0) and chdir("/") - stdin, stdout, and stderr are redirected from/to /dev/null SEE ALSO: http://www.unixguide.net/unix/programming/1.7.shtml """
try: pid = os.fork() if pid > 0: os._exit(0) # pylint: disable-msg=W0212 except OSError, e: log.exception("first fork() failed: %d (%s)", e.errno, e.strerror) sys.exit(1) os.setsid() os.umask(0) os.chdir("/") try: pid = os.fork() if pid > 0: os._exit(0) # pylint: disable-msg=W0212 except OSError, e: log.exception("second fork() failed: %d (%s)", e.errno, e.strerror) sys.exit(1) try: devnull_fd = os.open(os.devnull, os.O_RDWR) for stdf in (sys.__stdout__, sys.__stderr__): try: stdf.flush() except Exception: # pylint: disable-msg=W0703,W0704 pass for stdf in (sys.__stdin__, sys.__stdout__, sys.__stderr__): try: os.dup2(devnull_fd, stdf.fileno()) except OSError: # pylint: disable-msg=W0704 pass except Exception: # pylint: disable-msg=W0703 log.exception("error during file descriptor redirection")
<SYSTEM_TASK:> Redirect a system stream to a specified file. <END_TASK> <USER_TASK:> Description: def redirect_stream(system_stream, target_stream): """ Redirect a system stream to a specified file. `system_stream` is a standard system stream such as ``sys.stdout``. `target_stream` is an open file object that should replace the corresponding system stream object. If `target_stream` is ``None``, defaults to opening the operating system's null device and using its file descriptor. """
if target_stream is None: target_fd = os.open(os.devnull, os.O_RDWR) else: target_fd = target_stream.fileno() os.dup2(target_fd, system_stream.fileno())
<SYSTEM_TASK:> Forces UTF-8 on stdout and stderr; in some crazy environments, <END_TASK> <USER_TASK:> Description: def ensure_utf8_streams(): """Forces UTF-8 on stdout and stderr; in some crazy environments, they use 'ascii' encoding by default """
def ensure_utf8_stream(stream): if not isinstance(stream, io.StringIO): stream = getwriter("utf-8")(getattr(stream, "buffer", stream)) stream.encoding = "utf-8" return stream sys.stdout, sys.stderr = (ensure_utf8_stream(s) for s in (sys.stdout, sys.stderr))
<SYSTEM_TASK:> if list2 does not start with list1, we can't really check and return 0 <END_TASK> <USER_TASK:> Description: def compare_parts(list1, list2): """ if list2 does not start with list1, we can't really check and return 0 """
for i, item in enumerate(list1): if item != list2[i]: return 0 if len(list2) > len(list1): return ISDIR else: return ISFILE
<SYSTEM_TASK:> This parallel fetcher uses gevent one uses gevent <END_TASK> <USER_TASK:> Description: def fetch_event(urls): """ This parallel fetcher uses gevent one uses gevent """
rs = (grequests.get(u) for u in urls) return [content.json() for content in grequests.map(rs)]
<SYSTEM_TASK:> Simple case for sending some e-mail using a template. <END_TASK> <USER_TASK:> Description: def sendTemplate(mailer, sender, recipient, template, context, hook=_nop): """ Simple case for sending some e-mail using a template. """
headers, parts = template.evaluate(context) headers["From"] = sender headers["To"] = recipient hook(headers, parts) content = mime.buildMessage(headers, parts) return mailer.send(sender, recipient, content)
<SYSTEM_TASK:> Download and return paths of all platform-specific binaries <END_TASK> <USER_TASK:> Description: def get_binaries(): """Download and return paths of all platform-specific binaries"""
paths = [] for arp in [False, True]: paths.append(get_binary(arp=arp)) return paths
<SYSTEM_TASK:> Implement the backward propagation using the instructions above. <END_TASK> <USER_TASK:> Description: def backward_propagation(parameters, cache, X, Y): """ Implement the backward propagation using the instructions above. Arguments: parameters -- python dictionary containing our parameters cache -- a dictionary containing "Z1", "A1", "Z2" and "A2". X -- input data of shape (2, number of examples) Y -- "true" labels vector of shape (1, number of examples) Returns: grads -- python dictionary containing your gradients with respect to different parameters """
m = X.shape[1] # First, retrieve W1 and W2 from the dictionary "parameters". W1 = parameters["W1"] W2 = parameters["W2"] # Retrieve also A1 and A2 from dictionary "cache". A1 = cache["A1"] A2 = cache["A2"] # Backward propagation: calculate dW1, db1, dW2, db2. dZ2 = A2 - Y dW2 = 1.0 / m * np.dot(dZ2, A1.T) db2 = 1.0 / m * np.sum(dZ2, axis=1, keepdims=True) dZ1 = W2.T * dZ2 * (1 - np.power(A1, 2)) dW1 = 1.0 / m * np.dot(dZ1, X.T) db1 = 1.0 / m * np.sum(dZ1, axis=1, keepdims=True) grads = {"dW1": dW1, "db1": db1, "dW2": dW2, "db2": db2} return grads
<SYSTEM_TASK:> Updates parameters using the gradient descent update rule given above <END_TASK> <USER_TASK:> Description: def update_parameters(parameters, grads, learning_rate=1.2): """ Updates parameters using the gradient descent update rule given above Arguments: parameters -- python dictionary containing your parameters grads -- python dictionary containing your gradients Returns: parameters -- python dictionary containing your updated parameters """
# Retrieve each parameter from the dictionary "parameters" W1 = parameters["W1"] b1 = parameters["b1"] W2 = parameters["W2"] b2 = parameters["b2"] # Retrieve each gradient from the dictionary "grads" dW1 = grads["dW1"] db1 = grads["db1"] dW2 = grads["dW2"] db2 = grads["db2"] # Update rule for each parameter W1 -= learning_rate * dW1 b1 -= learning_rate * db1 W2 -= learning_rate * dW2 b2 -= learning_rate * db2 parameters = {"W1": W1, "b1": b1, "W2": W2, "b2": b2} return parameters
<SYSTEM_TASK:> Using the learned parameters, predicts a class for each example in X <END_TASK> <USER_TASK:> Description: def predict(parameters, X): """ Using the learned parameters, predicts a class for each example in X Arguments: parameters -- python dictionary containing your parameters X -- input data of size (n_x, m) Returns predictions -- vector of predictions of our model (red: 0 / blue: 1) """
# Computes probabilities using forward propagation, # and classifies to 0/1 using 0.5 as the threshold. A2, cache = forward_propagation(X, parameters) predictions = np.array([1 if (i > 0.5) else 0 for i in A2[0]]) return predictions
<SYSTEM_TASK:> send email, recommended use 163 mailbox service, as it is tested. <END_TASK> <USER_TASK:> Description: def send_email(sender, pw, to, subject, content, files=None, service='163'): """send email, recommended use 163 mailbox service, as it is tested. :param sender: str email address of sender :param pw: str password for sender :param to: str email addressee :param subject: str subject of email :param content: str content of email :param files: list path list of attachments :param service: str smtp server address, optional is ['163', 'qq'] :return: None """
se = EmailSender(from_=sender, pw=pw, service=service) se.send_email(to=to, subject=subject, content=content, files=files) se.quit()
<SYSTEM_TASK:> Load an object from a module by name <END_TASK> <USER_TASK:> Description: def load_object_by_name(object_name): """Load an object from a module by name"""
mod_name, attr = object_name.rsplit('.', 1) mod = import_module(mod_name) return getattr(mod, attr)
<SYSTEM_TASK:> Beanchmark for 1000 objects with 2 fields. <END_TASK> <USER_TASK:> Description: def bench_serpy(): """Beanchmark for 1000 objects with 2 fields. """
class FooSerializer(serpy.DictSerializer): """The serializer schema definition.""" # Use a Field subclass like IntField if you need more validation. attr_2 = serpy.IntField() attr_1 = serpy.StrField() return [FooSerializer(obj).data for obj in object_loader()]
<SYSTEM_TASK:> Return the decomposition of the specified locale into a language code <END_TASK> <USER_TASK:> Description: def decompose_locale(locale, strict=True): """ Return the decomposition of the specified locale into a language code and a country code. @param locale: a string representation of a locale, i.e., a ISO 639-3 alpha-3 code (or alpha-2 code), optionally followed by a dash character ``-`` and a ISO 3166-1 alpha-2 code. If ``None`` passed, the function returns the default locale, i.e., standard English ``('eng', None)``. @param strict: indicate whether the string representation of a locale has to be strictly compliant with RFC 4646, or whether a Java- style locale (character ``_`` instead of ``-``) is accepted. @return: a tuple ``(language_code, country_code)``, where the first code represents a ISO 639-3 alpha-3 code (or alpha-2 code), and the second code a ISO 3166-1 alpha-2 code. """
if locale is None: return ('eng', None) match = REGEX_LOCALE.match(locale) if match is None: if strict == True: raise Locale.MalformedLocaleException() match = REGEX_JAVA_LOCALE.match(locale) if match is None: raise Locale.MalformedLocaleException() (_, locale_language_code, locale_country_code, language_code) = match.groups() return (locale_language_code, locale_country_code) if language_code is None \ else (language_code, None)
<SYSTEM_TASK:> Load `drops` from the given dropin. <END_TASK> <USER_TASK:> Description: def load_drops(self, dropin): """Load `drops` from the given dropin. Args: dropin (string): path of a dropin, e.g. dropin.auth Returns: An iterable contains the drops object in the given dropin This method load drops object by some sort of convension. For example, assuming we want to load drops type `models` from dropin `dropin.articls`. The drops are discoveried with the following sequence:: import dropin.articles drops = dropin.articles.models if anything goes wrong, next try is :: import dropin.articles.models as drops if the current drops object has attribute **__drops__** :: drops = drops.__drops__ if the current drops object is a callable :: drops = drops() if not drops was found, an empty list is returned. """
obj = load_object(dropin) try: drops = getattr(obj, self.drops_type) except AttributeError: try: drops = load_object('%s.%s' % (dropin, self.drops_type)) except ImportError: drops = None if hasattr(drops, '__drops__'): drops = drops.__drops__ if callable(drops): drops = drops(self.app) return drops or []
<SYSTEM_TASK:> Register the `drops` in given `dropin` to a flask app. <END_TASK> <USER_TASK:> Description: def register_drops(self, dropin): """Register the `drops` in given `dropin` to a flask app. Args: app (Flask): the flask app to be initialized dropin (string): path of a python module or object, e.g. dropin.auth This is the only method that a drops loader **must** implment. The default behavior in the base loader is to store all the drops object in the app's extentions dict. For example, the drops with type `models` will be stored in a list which is accessible through:: app.extensions['dropin']['models'] or through DropInManager instance which provide a simple proxy to the dropin extension of `current_app`:: dropin = DropInManager() dropin.models Whereas the BlueprintsLoader overrided this method to actually register the blueprints to the app. """
drops = self.app.extensions['dropin'].setdefault(self.drops_type, []) drops.extend(self.load_drops(dropin))
<SYSTEM_TASK:> Merge the `others` schema into this instance. <END_TASK> <USER_TASK:> Description: def merge(cls, *others): """ Merge the `others` schema into this instance. The values will all be read from the provider of the original object. """
for other in others: for k, v in other: setattr(cls, k, BoundValue(cls, k, v.value))
<SYSTEM_TASK:> Validate against NodeType. <END_TASK> <USER_TASK:> Description: def field_value(self, value): """Validate against NodeType. """
if not self.is_array: return self.field_type(value) if isinstance(value, (list, tuple, set)): return [self.field_type(item) for item in value] return self.field_type(value)
<SYSTEM_TASK:> Validate value before actual instance setting based on type. <END_TASK> <USER_TASK:> Description: def is_valid(self, value): """Validate value before actual instance setting based on type. Args: value (object): The value object for validation. Returns: True if value validation succeeds else False. """
if not self.is_array: return self._valid(value) if isinstance(value, (list, set, tuple)): return all([self._valid(item) for item in value]) return self._valid(value)
<SYSTEM_TASK:> A simple dialog <END_TASK> <USER_TASK:> Description: def simple(type, short, long=None, parent=None, buttons=gtk.BUTTONS_OK, default=None, **kw): """A simple dialog :param type: The type of dialog :param short: The short description :param long: The long description :param parent: The parent Window to make this dialog transient to :param buttons: A buttons enum :param default: A default response """
if buttons == gtk.BUTTONS_OK: default = gtk.RESPONSE_OK return _message_dialog(type, short, long, parent=parent, buttons=buttons, default=default, **kw)
<SYSTEM_TASK:> An open dialog. <END_TASK> <USER_TASK:> Description: def open_filechooser(title, parent=None, patterns=None, folder=None, filter=None, multiple=False, _before_run=None, action=None): """An open dialog. :param parent: window or None :param patterns: file match patterns :param folder: initial folder :param filter: file filter Use of filter and patterns at the same time is invalid. """
assert not (patterns and filter) if multiple: if action is not None and action != gtk.FILE_CHOOSER_ACTION_OPEN: raise ValueError('`multiple` is only valid for the action ' '`gtk.FILE_CHOOSER_ACTION_OPEN`.') action = gtk.FILE_CHOOSER_ACTION_OPEN else: assert action is not None filechooser = gtk.FileChooserDialog(title, parent, action, (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OPEN, gtk.RESPONSE_OK)) if multiple: filechooser.set_select_multiple(True) if patterns or filter: if not filter: filter = gtk.FileFilter() for pattern in patterns: filter.add_pattern(pattern) filechooser.set_filter(filter) filechooser.set_default_response(gtk.RESPONSE_OK) if folder: filechooser.set_current_folder(folder) try: if _before_run is not None: _before_run(filechooser) response = filechooser.run() if response not in (gtk.RESPONSE_OK, gtk.RESPONSE_NONE): return if multiple: return filechooser.get_filenames() else: return filechooser.get_filename() finally: _destroy(filechooser)
<SYSTEM_TASK:> Count the total number of NaNs in every column <END_TASK> <USER_TASK:> Description: def count_nans(df): """ Count the total number of NaNs in every column Parameters -------------------- df pd.DataFrame Returns -------------------- nas_df pd.DataFrame """
cols = df.columns res = [] for col in cols: length = len(df[col]) not_nas = len(df[col].dropna()) nas = length - not_nas rate = round(nas/length, 4) # add unique value uv = len(df[col].unique()) res_ = (col, nas, not_nas, rate, uv) res.append(res_) nas_df = pd.DataFrame(res, columns=['Column', 'NaNs', 'Not_NaNs', 'Rate', 'UV']) return nas_df
<SYSTEM_TASK:> Attempt to find the project root, returns None if not found <END_TASK> <USER_TASK:> Description: def find_project_dir(path=os.getcwd()): """Attempt to find the project root, returns None if not found"""
path_split = os.path.split(path) while path_split[1]: if in_armstrong_project(path): return path path = path_split[0] path_split = os.path.split(path) # we ran out of parents return None
<SYSTEM_TASK:> Prepare headers object for request (add cache information <END_TASK> <USER_TASK:> Description: def prepare_headers(self, headers, cache_info=None): """ Prepare headers object for request (add cache information :param headers: Headers object :type headers: dict :param cache_info: Cache information to add :type cache_info: floscraper.models.CacheInfo :return: Prepared headers :rtype: dict """
if self.use_advanced and cache_info: hkeys = headers.keys() if cache_info.access_time and "If-Modified-Since" not in hkeys: headers['If-Modified-Since'] = cache_info.access_time.strftime( "%a, %d %b %Y %H:%M:%S GMT" ) if cache_info.etag and "If-None-Match" not in hkeys: headers['If-None-Match'] = cache_info.etag return headers
<SYSTEM_TASK:> Try to retrieve url from cache if available <END_TASK> <USER_TASK:> Description: def get(self, url, ignore_access_time=False): """ Try to retrieve url from cache if available :param url: Url to retrieve :type url: str | unicode :param ignore_access_time: Should ignore the access time :type ignore_access_time: bool :return: (data, CacheInfo) None, None -> not found in cache None, CacheInfo -> found, but is expired data, CacheInfo -> found in cache :rtype: (None | str | unicode, None | floscraper.models.CacheInfo) """
key = hashlib.md5(url).hexdigest() accessed = self._cache_meta_get(key) if not accessed: # not previously cached self.debug("From inet {}".format(url)) return None, None if isinstance(accessed, dict): cached = CacheInfo.from_dict(accessed) else: cached = CacheInfo(accessed) now = now_utc() if now - cached.access_time > self.duration and not ignore_access_time: # cached expired -> remove self.debug("From inet (expired) {}".format(url)) return None, cached try: res = self._cache_get(key) except: self.exception("Failed to read cache") self.debug("From inet (failure) {}".format(url)) return None, None self.debug("From cache {}".format(url)) return res, cached
<SYSTEM_TASK:> Basic authority parser that splits authority into component parts <END_TASK> <USER_TASK:> Description: def split_authority(authority): """ Basic authority parser that splits authority into component parts >>> split_authority("user:password@host:port") ('user', 'password', 'host', 'port') """
if '@' in authority: userinfo, hostport = authority.split('@', 1) else: userinfo, hostport = None, authority if userinfo and ':' in userinfo: user, passwd = userinfo.split(':', 1) else: user, passwd = userinfo, None if hostport and ':' in hostport: host, port = hostport.split(':', 1) else: host, port = hostport, None if not host: host = None return (user, passwd, host, port)
<SYSTEM_TASK:> Parse the URI. <END_TASK> <USER_TASK:> Description: def parse(self, uri, defaults=None): """Parse the URI. uri is the uri to parse. defaults is a scheme-dependent list of values to use if there is no value for that part in the supplied URI. The return value is a tuple of scheme-dependent length. """
return tuple([self.scheme_of(uri)] + list(self.parser_for(uri)(defaults).parse(uri)))
<SYSTEM_TASK:> Join the parts of a URI back together to form a valid URI. <END_TASK> <USER_TASK:> Description: def unparse(self, pieces, defaults=None): """Join the parts of a URI back together to form a valid URI. pieces is a tuble of URI pieces. The scheme must be in pieces[0] so that the rest of the pieces can be interpreted. """
return self.parser_for(pieces[0])(defaults).unparse(pieces)
<SYSTEM_TASK:> Return the Parser object used to parse a particular URI. <END_TASK> <USER_TASK:> Description: def parser_for(self, uri): """Return the Parser object used to parse a particular URI. Parser objects are required to have only 'parse' and 'unparse' methods. """
return self._parsers.get(self.scheme_of(uri), DefaultURIParser)
<SYSTEM_TASK:> Extract electric field from simulation file "V_0Ereim.dat" <END_TASK> <USER_TASK:> Description: def load_field(wdir, shape_grid): """Extract electric field from simulation file "V_0Ereim.dat" Parameters ---------- wdir: str or pathlib.Path path to the working directory shape_grid: tuple of ints the shape of the simulation data grid Notes ----- These are the files present in the working directory bhfield.log: log file containing scattering coeffs etc (as well as definitions of the output fields) bhdebug.log: extra information (checking roundoff errors etc) E_0allf.dat: E2 = EFSQ[dimensionless] = (relative) squared amplitude of complex E-field; Ec*(Ec^*) / E0**2 E_1core.dat: E2 inside the core E_2coat.dat: E2 inside the coating E_3exte.dat: E2 at the outside of the sphere U_*.dat: U = UABS[F m-1 s-1] is the (relative) absorbed energy per unit volume and time; Ua [W m-3] / E0**2 EU_zax.txt: E2(0,0,z) and U(0,0,z) along z-axis; it may be blank if the grid does not include such points V_0Eelli.dat: vector electric field; vibration ellipse (major & minor axes), ellipticity, azimuth[deg], p-a angle (phi)[deg], handedness angle[deg] & handedness V_0Ereim.dat: vector electric field; snapshots [Re (t=0), Im (t=period/4)] V_0Helli.dat: vector magnetic field; vibration ellipse (major & minor axes), ellipticity, azimuth[deg], p-a angle (phi)[deg], E-&H-phase dif[deg], handedness angle[deg] & handedness V_0Hreim.dat: vector magnetic field; snapshots [Re (t=0), Im (t=period/4)] V_0Poynt.dat: Poynting vector <S>, EH angle, optical irradiance (intensity) (norm<S>), I(plane), -div<S> (1st-3rd), UABS & DIVSR V_1*.dat: vector fields inside the core V_2*.dat: vector fields inside the coating V_3*.dat: vector fields at the outside of the sphere """
wdir = pathlib.Path(wdir) check_simulation(wdir) field_file = wdir / "V_0Ereim.dat" a = np.loadtxt(field_file) assert shape_grid[0] == int( shape_grid[0]), "resulting x-size is not an integer" assert shape_grid[1] == int( shape_grid[1]), "resulting y-size is not an integer" Ex = a[:, 3] + 1j * a[:, 6] # Ey = a[:,4] + 1j*a[:,7] # Ez = a[:,5] + 1j*a[:,8] Exend = Ex.reshape((shape_grid[1], shape_grid[0])).transpose() return Exend
<SYSTEM_TASK:> Check bhdebug.txt to make sure that you specify enough digits to <END_TASK> <USER_TASK:> Description: def check_simulation(wdir): """ Check bhdebug.txt to make sure that you specify enough digits to overcome roundoff errors. """
wdir = pathlib.Path(wdir) field = wdir / "V_0Ereim.dat" if not (field.exists() and field.stat().st_size > 130): msg = "Output {} does not exist or is too small!".format(field) raise BHFIELDExecutionError(msg)
<SYSTEM_TASK:> Verify the response message. <END_TASK> <USER_TASK:> Description: def verify_response(self, response_json, signed_id_name='transactionid'): """ Verify the response message. :param response_json: :param signed_id_name: :return: """
auth_json = response_json.get('auth', {}) nonce = auth_json.get('nonce', '') timestamp = auth_json.get('timestamp', '') signature = binascii.unhexlify(auth_json.get('signature', '')) signed_id = response_json.get(signed_id_name, '') return self.verify_signature(signature=signature, nonce=nonce, timestamp=timestamp, signed_id=signed_id)
<SYSTEM_TASK:> Compare the current place object to another passed to the comparison <END_TASK> <USER_TASK:> Description: def _eq__(self, other): """ Compare the current place object to another passed to the comparison method. The two place objects must have the same identification, even if some of their attributes might be different. @param other: a ``Place`` instance to compare with the current place object. @return: ``True`` if the given place corresponds to the current place; ``False`` otherwise. """
return self.place_id and other.place_id \ and self.place_id == other.place_id
<SYSTEM_TASK:> Build a ``Place`` instance from the specified JSON object. <END_TASK> <USER_TASK:> Description: def from_json(payload): """ Build a ``Place`` instance from the specified JSON object. @param payload: JSON representation of a place:: { "area_id": string, "address": { component_type: string, ... } \ [ { "locale": string, component_type: string, ... }, ... ], "category": string, "contacts": [ [ name:string, value:string, is_primary:boolean ], ... ], "cover_photo_id": string, "cover_photo_url": string, "boundaries": [ vertex, ... ], "locale": string, "location": coordinates, "timezone": integer } where: * ``area_id`` (optional): identification of the geographic area where the place is located in. This parameter is optional if the parameter ``address`` is passed. Both ``area_id`` and ``address`` can be passed to the function. If this parameter is passed to the function, it takes precedence over any administrative division of the same level or larger that would be defined as part of the address components. * ``address`` (required): postal address of the place, composed of one or more address components, which textual information is written in the specified locale. An address component is defined with a component type and its value. The component type is a string representation of an item of the enumeration ``AddressComponentType``. * ``category_id`` (optional): category qualifying this place. * ``contacts`` (optional): list of properties such as e-mail addresses, phone numbers, etc., in respect of the electronic business card specification (vCard). The contact information is represented by a list of tuples of the following form:: [ [ name:ContantPropertyName, value:string[, is_primary:boolean] ], ... ] where: * ``name`` (required): type of this contact information, which can be one of these standard names in respect of the electronic business card specification (vCard). * ``value`` (required): value of this contact information representing by a string, such as ``+84.01272170781``, the formatted value for a telephone number property. * ``is_primary`` (optional): indicate whether this contact information is the primary for this place. By default, the first contact information of a given type is the primary of this place. * ``cover_photo_id``: identification of the cover photo of the place, if any defined. * ``cover_photo_url``: Uniform Resource Locator (URL) that specifies the location of the cover photo of the place, if any defined. The client application can use this URL and append the query parameter ``size`` to specify a given pixel resolution of this photo, such as ``thumbnail``, ``small``, ``medium``, ``large``. * ``boundaries`` (optional): a collection of one or more polygons that delimit the topological space of the place. All of the polygons are within the spatial reference system. It corresponds to an array of vertices. There must be at least three vertices. Each vertex is a tuple composed of a longitude, a latitude, and a altitude:: [ longitude, latitude, altitude ] Note that the first and last vertices must not be identical; a polygon is "auto-closed" between the first and last vertices. * ``locale`` (required): locale of the textual information that describes this place. A locale corresponds to a tag respecting RFC 4646, i.e., a ISO 639-3 alpha-3 code element optionally followed by a dash character ``-`` and a ISO 3166-1 alpha-2 code (referencing the country that this language might be specific to). For example: ``eng`` (which denotes a standard English), ``eng-US`` (which denotes an American English). * ``location`` (optional): geographic coordinates of the location of the place represented with the following JSON structure:: { "accuracy": decimal "altitude": decimal, "latitude": decimal, "longitude": decimal, } where: * ``accuracy`` (optional): accuracy of the place's position in meters. * ``altitude`` (optional): altitude in meters of the place. * ``latitude`` (required): latitude-angular distance, expressed in decimal degrees (WGS84 datum), measured from the center of the Earth, of a point north or south of the Equator corresponding to the place's location. * ``longitude`` (required): longitude-angular distance, expressed in decimal degrees (WGS84 datum), measured from the center of the Earth, of a point east or west of the Prime Meridian corresponding to the place's location. .. note:: The parameter ``location`` is ignored when the parameter ``boundaries`` is provided. The platform computes the coordinates of the geometric center (centroid) of the polygon representing the boundaries of the place. It corresponds to the arithmetic mean ("average") position of all the points in the shape. * ``timezone`` (required): time zone at the place's location. It is the difference between the time at this location and UTC (Coordinated Universal Time). UTC is also known as GMT or Greenwich Mean Time or Zulu Time. @note: the name of the place corresponds to the address component ``recipient_name``. @return: a ``Place`` instance or ``None`` if the JSON payload is nil. """
return payload and \ Place([ (float(lon), float(lat), float(alt)) for (lon, lat, alt) in payload['boundaries']] if payload.get('boundaries') else GeoPoint.from_json(payload['location']), address=payload.get('address') and ( Place.__parse_address__(payload['address']) if isinstance(payload['address'], dict) else [ Place.__parse_address__(address) for address in payload['address'] ]), category_id=cast.string_to_uuid(payload.get('category_id')), area_id=cast.string_to_uuid(payload.get('area_id')), contacts=payload.get('contacts') and [ (cast.string_to_enum(contact[0], ContactPropertyName), # name contact[1], # value contact[2] if len(contact) >= 3 else None, # is_primary contact[3] if len(contact) == 4 else None) # is_verified for contact in payload['contacts'] ], cover_photo_id=cast.string_to_uuid(payload.get('cover_photo_id')), cover_photo_url=payload.get('cover_photo_url'), locale=cast.string_to_locale(payload.get('locale')) and DEFAULT_LOCALE, object_status=payload.get("object_status"), place_id=cast.string_to_uuid(payload.get('place_id')), timezone=payload.get('timezone'))
<SYSTEM_TASK:> An int, float, long, bool, string, or None literal with the given <END_TASK> <USER_TASK:> Description: def ex_literal(val): """An int, float, long, bool, string, or None literal with the given value. """
if val is None: return ast.Name('None', ast.Load()) elif isinstance(val, int): return ast.Num(val) elif isinstance(val, bool): return ast.Name(bytes(val), ast.Load()) elif isinstance(val, str): return ast.Str(val) raise TypeError(u'no literal for {0}'.format(type(val)))
<SYSTEM_TASK:> Assign an expression into a single variable. The expression may <END_TASK> <USER_TASK:> Description: def ex_varassign(name, expr): """Assign an expression into a single variable. The expression may either be an `ast.expr` object or a value to be used as a literal. """
if not isinstance(expr, ast.expr): expr = ex_literal(expr) return ast.Assign([ex_lvalue(name)], expr)
<SYSTEM_TASK:> A function-call expression with only positional parameters. The <END_TASK> <USER_TASK:> Description: def ex_call(func, args): """A function-call expression with only positional parameters. The function may be an expression or the name of a function. Each argument may be an expression or a value to be used as a literal. """
if isinstance(func, str): func = ex_rvalue(func) args = list(args) for i in range(len(args)): if not isinstance(args[i], ast.expr): args[i] = ex_literal(args[i]) if sys.version_info[:2] < (3, 5): return ast.Call(func, args, [], None, None) else: return ast.Call(func, args, [])
<SYSTEM_TASK:> Compile a list of statements as the body of a function and return <END_TASK> <USER_TASK:> Description: def compile_func(arg_names, statements, name='_the_func', debug=False): """Compile a list of statements as the body of a function and return the resulting Python function. If `debug`, then print out the bytecode of the compiled function. """
func_def = ast.FunctionDef( name=name, args=ast.arguments( args=[ast.arg(arg=n, annotation=None) for n in arg_names], kwonlyargs=[], kw_defaults=[], defaults=[ex_literal(None) for _ in arg_names], ), body=statements, decorator_list=[], ) mod = ast.Module([func_def]) ast.fix_missing_locations(mod) prog = compile(mod, '<generated>', 'exec') # Debug: show bytecode. if debug: dis.dis(prog) for const in prog.co_consts: if isinstance(const, types.CodeType): dis.dis(const) the_locals = {} exec(prog, {}, the_locals) return the_locals[name]
<SYSTEM_TASK:> Parse a top-level template string Expression. Any extraneous text <END_TASK> <USER_TASK:> Description: def _parse(template): """Parse a top-level template string Expression. Any extraneous text is considered literal text. """
parser = Parser(template) parser.parse_expression() parts = parser.parts remainder = parser.string[parser.pos:] if remainder: parts.append(remainder) return Expression(parts)
<SYSTEM_TASK:> Evaluate the symbol in the environment, returning a Unicode <END_TASK> <USER_TASK:> Description: def evaluate(self, env): """Evaluate the symbol in the environment, returning a Unicode string. """
if self.ident in env.values: # Substitute for a value. return env.values[self.ident] else: # Keep original text. return self.original
<SYSTEM_TASK:> Evaluate the function call in the environment, returning a <END_TASK> <USER_TASK:> Description: def evaluate(self, env): """Evaluate the function call in the environment, returning a Unicode string. """
if self.ident in env.functions: arg_vals = [expr.evaluate(env) for expr in self.args] try: out = env.functions[self.ident](*arg_vals) except Exception as exc: # Function raised exception! Maybe inlining the name of # the exception will help debug. return u'<%s>' % str(exc) return str(out) else: return self.original
<SYSTEM_TASK:> Evaluate the entire expression in the environment, returning <END_TASK> <USER_TASK:> Description: def evaluate(self, env): """Evaluate the entire expression in the environment, returning a Unicode string. """
out = [] for part in self.parts: if isinstance(part, str): out.append(part) else: out.append(part.evaluate(env)) return u''.join(map(str, out))
<SYSTEM_TASK:> Compile the expression to a list of Python AST expressions, a <END_TASK> <USER_TASK:> Description: def translate(self): """Compile the expression to a list of Python AST expressions, a set of variable names used, and a set of function names. """
expressions = [] varnames = set() funcnames = set() for part in self.parts: if isinstance(part, str): expressions.append(ex_literal(part)) else: e, v, f = part.translate() expressions.extend(e) varnames.update(v) funcnames.update(f) return expressions, varnames, funcnames
<SYSTEM_TASK:> Parse a list of arguments starting at ``pos``, returning a <END_TASK> <USER_TASK:> Description: def parse_argument_list(self): """Parse a list of arguments starting at ``pos``, returning a list of Expression objects. Does not modify ``parts``. Should leave ``pos`` pointing to a } character or the end of the string. """
# Try to parse a subexpression in a subparser. expressions = [] while self.pos < len(self.string): subparser = Parser(self.string[self.pos:], in_argument=True) subparser.parse_expression() # Extract and advance past the parsed expression. expressions.append(Expression(subparser.parts)) self.pos += subparser.pos if self.pos >= len(self.string) or \ self.string[self.pos] == GROUP_CLOSE: # Argument list terminated by EOF or closing brace. break # Only other way to terminate an expression is with ,. # Continue to the next argument. assert self.string[self.pos] == ARG_SEP self.pos += 1 return expressions
<SYSTEM_TASK:> Returns True if is print or False otherwise <END_TASK> <USER_TASK:> Description: def is_print(value, space = True, tab = False, crlf = False): """ Returns True if is print or False otherwise """
if not isinstance(value, basestring): return False regex = r'\x00-\x08\x0B\x0C\x0E-\x1F\x7F' if not space: regex += r'\x20' if tab: regex += r'\x09' if crlf: regex += r'\x0A\x0D' return re.match(r'[' + regex + ']', value, re.U) is None
<SYSTEM_TASK:> Fill file at @path with @lines then flush all buffers <END_TASK> <USER_TASK:> Description: def file_writelines_flush_sync(path, lines): """ Fill file at @path with @lines then flush all buffers (Python and system buffers) """
fp = open(path, 'w') try: fp.writelines(lines) flush_sync_file_object(fp) finally: fp.close()
<SYSTEM_TASK:> Recursively create some directories if needed so that the directory where <END_TASK> <USER_TASK:> Description: def file_w_create_directories(filepath): """ Recursively create some directories if needed so that the directory where @filepath must be written exists, then open it in "w" mode and return the file object. """
dirname = os.path.dirname(filepath) if dirname and dirname != os.path.curdir and not os.path.isdir(dirname): os.makedirs(dirname) return open(filepath, 'w')
<SYSTEM_TASK:> Capture existing preauth. <END_TASK> <USER_TASK:> Description: def capture_sale(self, transaction_id, capture_amount, message=None): """ Capture existing preauth. :param transaction_id: :param capture_amount: :param message: :return: status code """
request_data = { "amount": self.base.convert_decimal_to_hundreds(capture_amount), "currency": self.currency, "message": message } url = "%s%s%s/capture" % (self.api_endpoint, constants.TRANSACTION_STATUS_ENDPOINT, transaction_id) username = self.base.get_username() password = self.base.get_password(username=username, request_url=url) response = requests.put(url, json=request_data, auth=HTTPBasicAuth(username=username, password=password)) if response.status_code == 404: raise TransactionDoesNotExist('Wrong transaction ID!') if not self.base.verify_response(response.json()): raise SignatureValidationException('Server signature verification has failed') response_json = response.json() return response_json.get('status')
<SYSTEM_TASK:> Returns the folder where the code of the caller's caller lives <END_TASK> <USER_TASK:> Description: def caller_folder(): """ Returns the folder where the code of the caller's caller lives """
import inspect caller_file = inspect.stack()[2][1] if os.path.exists(caller_file): return os.path.abspath(os.path.dirname(caller_file)) else: return os.path.abspath(os.getcwd())
<SYSTEM_TASK:> Response as dict <END_TASK> <USER_TASK:> Description: def to_dict(self): """ Response as dict :return: response :rtype: dict """
cache_info = None if self.cache_info: cache_info = self.cache_info.to_dict() return { 'cache_info': cache_info, 'html': self.html, 'scraped': self.scraped, 'raw': self.raw }
<SYSTEM_TASK:> Response from dict <END_TASK> <USER_TASK:> Description: def from_dict(d): """ Response from dict :param d: Dict to load :type d: dict :return: response :rtype: Response """
if d is None: return None return Response( d.get('html'), CacheInfo.from_dict(d.get('cache_info')), d.get('scraped'), d.get('raw') )
<SYSTEM_TASK:> Return a cv image of the board or empty board if not provided. <END_TASK> <USER_TASK:> Description: def _create_board_image_cv(self, board=None): """Return a cv image of the board or empty board if not provided."""
board = board or base.Board() # empty board by default tile_h, tile_w = self._TILE_SHAPE[0:2] board_shape = tile_h * 8, tile_w * 8, 3 board_image = numpy.zeros(board_shape, dtype=numpy.uint8) # place each tile on the image for (row, col), tile in board.positions_with_tile(): tile_image = self._tile_images[tile._type] t, l = row * tile_h, col * tile_w b, r = t + tile_h, l + tile_w board_image[t:b, l:r] = tile_image return board_image
<SYSTEM_TASK:> Add a white tile border to indicate the swap. <END_TASK> <USER_TASK:> Description: def _draw_swap_cv(self, board_image, swap): """Add a white tile border to indicate the swap."""
tile_h, tile_w = self._TILE_SHAPE[0:2] # get a single bounding box (row_1, col_1), (row_2, col_2) = swap t = tile_h * min(row_1, row_2) b = tile_h * (1 + max(row_1, row_2)) l = tile_w * min(col_1, col_2) r = tile_w * (1 + max(col_1, col_2)) top_left = (l, t) bottom_right = (r, b) data.cv2.rectangle(board_image, top_left, bottom_right, color=(255, 255, 255), thickness = 4)
<SYSTEM_TASK:> Present the results if they have become available or timed out. <END_TASK> <USER_TASK:> Description: def _scheduled_check_for_summaries(self): """Present the results if they have become available or timed out."""
if self._analysis_process is None: return # handle time out timed_out = time.time() - self._analyze_start_time > self.time_limit if timed_out: self._handle_results('Analysis timed out but managed\n' ' to get lower turn results.', 'Analysis timed out with no results.') return # handle standard completion try: self._analysis_process.join(0.001) except AssertionError: pass # if some timing issue with closed process, just continue if not self._analysis_process.is_alive(): self._handle_results('Completed analysis.', 'Unable to find the game on screen.') return #finally, if it's still alive, then come back later self._base.after(self._POLL_PERIOD_MILLISECONDS, self._scheduled_check_for_summaries)
<SYSTEM_TASK:> Get the next summary and present it. <END_TASK> <USER_TASK:> Description: def _next(self): """Get the next summary and present it."""
self.summaries.rotate(-1) current_summary = self.summaries[0] self._update_summary(current_summary)
<SYSTEM_TASK:> Get the previous summary and present it. <END_TASK> <USER_TASK:> Description: def _previous(self): """Get the previous summary and present it."""
self.summaries.rotate() current_summary = self.summaries[0] self._update_summary(current_summary)
<SYSTEM_TASK:> Update the message area with blank or a message. <END_TASK> <USER_TASK:> Description: def _update_notification(self, message=None): """Update the message area with blank or a message."""
if message is None: message = '' message_label = self._parts['notification label'] message_label.config(text=message) self._base.update()
<SYSTEM_TASK:> Update all parts of the summary or clear when no summary. <END_TASK> <USER_TASK:> Description: def _update_summary(self, summary=None): """Update all parts of the summary or clear when no summary."""
board_image_label = self._parts['board image label'] # get content for update or use blanks when no summary if summary: # make a board image with the swap drawn on it # board, action, text = summary.board, summary.action, summary.text board_image_cv = self._create_board_image_cv(summary.board) self._draw_swap_cv(board_image_cv, summary.action) board_image_tk = self._convert_cv_to_tk(board_image_cv) text = '' if not summary.score is None: text += 'Score: {:3.1f}'.format(summary.score) if (not summary.mana_drain_leaves is None) and\ (not summary.total_leaves is None): text += ' Mana Drains: {}/{}' \ ''.format(summary.mana_drain_leaves, summary.total_leaves) else: #clear any stored state image and use the blank board_image_tk = board_image_label._blank_image text = '' # update the UI parts with the content board_image_label._board_image = board_image_tk board_image_label.config(image=board_image_tk) # update the summary text summary_label = self._parts['summary label'] summary_label.config(text=text) # refresh the UI self._base.update()
<SYSTEM_TASK:> Issue the request. <END_TASK> <USER_TASK:> Description: def send(self): """Issue the request. Uses httplib2.Http support for handling redirects. Returns an httplib2.Response, which may be augmented by the proc_response() method. Note that the default implementation of proc_response() causes an appropriate exception to be raised if the response code is >= 400. """
# Pre-process the request try: self.procstack.proc_request(self) except exc.ShortCircuit, e: self._debug("Request pre-processing short-circuited") # Short-circuited; we have an (already processed) response return e.response self._debug("Sending %r request to %r (body %r, headers %r)", self.method, self.url, self.body, self.headers) # Issue the request (resp, content) = self.client.request(self.url, self.method, self.body, self.headers, self.max_redirects) # Save the body in the response resp.body = content # Do any processing on the response that's desired try: self.proc_response(resp) except: # Process the exception result = self.procstack.proc_exception(*sys.exc_info()) if not result: # Not handled, re-raise it raise else: # Handled and we have a fully post-processed response return result # Return the response, post-processing it return self.procstack.proc_response(resp)
<SYSTEM_TASK:> Process response hook. <END_TASK> <USER_TASK:> Description: def proc_response(self, resp): """Process response hook. Process non-redirect responses received by the send() method. May augment the response. The default implementation causes an exception to be raised if the response status code is >= 400. """
# Raise exceptions for error responses if resp.status >= 400: e = exc.exception_map.get(resp.status, exc.HTTPException) self._debug(" Response was a %d fault, raising %s", resp.status, e.__name__) raise e(resp)
<SYSTEM_TASK:> Add the label argument by default, no need to specify it in args. <END_TASK> <USER_TASK:> Description: def add_arguments(self): """ Add the label argument by default, no need to specify it in args. """
super(LabelCommand, self).add_arguments() self.parser.add_argument('labels', metavar=self.label, nargs="+")
<SYSTEM_TASK:> Decorator for EnumValue rich comparison methods. <END_TASK> <USER_TASK:> Description: def _comparator(func): """ Decorator for EnumValue rich comparison methods. """
def comparator_wrapper(self, other): try: # [PATCH] The code was originally the following: # # assert self.enumtype == other.enumtype # result = func(self.index, other.index) # # which first statement causes an issue when serializing/unserializing object # from/to memcached using pylibmc, which built a new instance of the # enumeration. Therefore two items are stated different while semantically # the same. # # These two lines are replaced by the following, which relies on the fact that # developers are not likely naming two items of distinct enumerations the same # way, and less likely to compare two items of two distinct enumerations. # # (Daniel CAUNE; [email protected]; 2012-05-11) result = func(self.key, other.key) except (AssertionError, AttributeError): result = NotImplemented return result comparator_wrapper.__name__ = func.__name__ comparator_wrapper.__doc__ = getattr(float, func.__name__).__doc__ return comparator_wrapper
<SYSTEM_TASK:> Return a new enumeration object extended with the specified items. <END_TASK> <USER_TASK:> Description: def extend(self, *keys, **kwargs): """ Return a new enumeration object extended with the specified items. """
this = copy.deepcopy(self) value_type = kwargs.get('value_type', EnumValue) if not keys: raise EnumEmptyError() keys = tuple(keys) values = [None] * len(keys) for i, key in enumerate(keys): value = value_type(this, i, key) values[i] = value try: super(Enum, this).__setattr__(key, value) except TypeError: raise EnumBadKeyError(key) this.__dict__['_keys'] = this.__dict__['_keys'] + keys this.__dict__['_values'] += values return this
<SYSTEM_TASK:> Generate folders to best match metadata. <END_TASK> <USER_TASK:> Description: def options(self, data): """Generate folders to best match metadata. The results will be a single, perfectly matched folder, or the two nearest neighbours of an imperfect match. :param dict data: metadata matching criteria. This method is a generator. It yields :py:class:`turberfield.dialogue.model.SceneScript.Folder` objects. """
if self.mapping_key(data) in self.keys: yield next(i for i in self.folders if i.metadata == data) else: index = bisect.bisect_left(self.keys, self.mapping_key(data)) posns = sorted(set([max(0, index - 1), index])) yield from (self.folders[i] for i in posns)
<SYSTEM_TASK:> Create a widget for a schema item <END_TASK> <USER_TASK:> Description: def widget_for(element): """Create a widget for a schema item """
view_type = _view_type_for_element(element) if view_type is None: raise KeyError('No view type for %r' % element) builder = view_widgets.get(view_type) if builder is None: raise KeyError('No widget type for %r' % view_type) return builder(element)
<SYSTEM_TASK:> Optical path difference projection of a dielectric sphere <END_TASK> <USER_TASK:> Description: def projection(radius=5e-6, sphere_index=1.339, medium_index=1.333, wavelength=550e-9, pixel_size=1e-7, grid_size=(80, 80), center=(39.5, 39.5)): """Optical path difference projection of a dielectric sphere Parameters ---------- radius: float Radius of the sphere [m] sphere_index: float Refractive index of the sphere medium_index: float Refractive index of the surrounding medium wavelength: float Vacuum wavelength of the imaging light [m] pixel_size: float Pixel size [m] grid_size: tuple of floats Resulting image size in x and y [px] center: tuple of floats Center position in image coordinates [px] Returns ------- qpi: qpimage.QPImage Quantitative phase data set """
# grid x = np.arange(grid_size[0]).reshape(-1, 1) y = np.arange(grid_size[1]).reshape(1, -1) cx, cy = center # sphere location rpx = radius / pixel_size r = rpx**2 - (x - cx)**2 - (y - cy)**2 # distance z = np.zeros_like(r) rvalid = r > 0 z[rvalid] = 2 * np.sqrt(r[rvalid]) * pixel_size # phase = delta_n * 2PI * z / wavelength phase = (sphere_index - medium_index) * 2 * np.pi * z / wavelength meta_data = {"pixel size": pixel_size, "wavelength": wavelength, "medium index": medium_index, "sim center": center, "sim radius": radius, "sim index": sphere_index, "sim model": "projection", } qpi = qpimage.QPImage(data=phase, which_data="phase", meta_data=meta_data) return qpi
<SYSTEM_TASK:> Get the first toplevel widget in a gtk.Builder hierarchy. <END_TASK> <USER_TASK:> Description: def get_first_builder_window(builder): """Get the first toplevel widget in a gtk.Builder hierarchy. This is mostly used for guessing purposes, and an explicit naming is always going to be a better situation. """
for obj in builder.get_objects(): if isinstance(obj, gtk.Window): # first window return obj
<SYSTEM_TASK:> Get the toplevel widget from a gtk.Builder file. <END_TASK> <USER_TASK:> Description: def get_builder_toplevel(self, builder): """Get the toplevel widget from a gtk.Builder file. The main view implementation first searches for the widget named as self.toplevel_name (which defaults to "main". If this is missing, or not a gtk.Window, the first toplevel window found in the gtk.Builder is used. """
toplevel = builder.get_object(self.toplevel_name) if not gobject.type_is_a(toplevel, gtk.Window): toplevel = None if toplevel is None: toplevel = get_first_builder_window(builder) return toplevel
<SYSTEM_TASK:> Cut unicode string from left to fit a given width. <END_TASK> <USER_TASK:> Description: def unicode_left(s, width): """Cut unicode string from left to fit a given width."""
i = 0 j = 0 for ch in s: j += __unicode_width_mapping[east_asian_width(ch)] if width < j: break i += 1 return s[:i]
<SYSTEM_TASK:> Cut unicode string from right to fit a given width. <END_TASK> <USER_TASK:> Description: def unicode_right(s, width): """Cut unicode string from right to fit a given width."""
i = len(s) j = 0 for ch in reversed(s): j += __unicode_width_mapping[east_asian_width(ch)] if width < j: break i -= 1 return s[i:]
<SYSTEM_TASK:> Schema Validation class factory. <END_TASK> <USER_TASK:> Description: def schema_factory(schema_name, **schema_nodes): """Schema Validation class factory. Args: schema_name(str): The namespace of the schema. schema_nodes(dict): The attr_names / SchemaNodes mapping of schema. Returns: A Schema class. Raises: SchemaError, for bad attribute setting initialization. Examples: >>> from schema_factory import FloatNode, StringNode, SchemaNode >>> >>> PointSchema = schema_factory( ... schema_name='point', ... lat=FloatNode(), ... lng=FloatNode(), ... ) ... >>> point = PointSchema(lat=34, lng=29.01) >>> print(point.to_dict) OrderedDict([('lat', 34.0), ('lng', 29.01)]) >>> point2 = PointSchema(lat='34', lng='0') >>> print(point2.to_dict) OrderedDict([('lat', 34.0), ('lng', 0.0)]) >>> RegionSchema = schema_factory( ... schema_name='Region', ... name=StringNode(), ... country_code=StringNode( required=True, validators=[lambda x: len(x) == 2]), ... location=SchemaNode(PointSchema, required=False, default=None), ... keywords=StringNode(array=True, required=False, default=[]) ... ) ... >>> region = RegionSchema(name='Athens', country_code='gr', location={'lat': 32.7647, 'lng': 27.03}) >>> print(region) <RegionSchema instance, attributes:['country_code', 'keywords', 'location', 'name']> >>> region.keywords [] >>> region2 = RegionSchema(name='Athens') Traceback (most recent call last): ... schema_factory.errors.SchemaError: Missing Required Attributes: {'country_code'} >>> region3 = RegionSchema(name='Athens', country_code='gr', location={'lat': 32.7647, 'lng': 27.03}, ... foo='bar') Traceback (most recent call last): ... schema_factory.errors.SchemaError: Invalid Attributes RegionSchema for {'foo'}. >>> region4 = RegionSchema(name='Athens', country_code='gr', keywords=['Acropolis', 'Mousaka', 434132]) """
schema_dict = dict() schema_dict.update(schema_nodes) def cls_repr(self): # pragma: no cover return "<{} instance at: 0x{:x}>".format(self.__class__, id(self)) def cls_str(self): # pragma: no cover return "<{} instance, attributes:{}>".format( self.__class__.__name__, self.schema_nodes ) def cls_init(self, **kwargs): kwargs_set = set(kwargs) if not self.required.issubset(kwargs_set): raise SchemaError('Missing Required Attributes: {}'.format( self.required.difference(kwargs_set) )) if not set(kwargs).issubset(set(self.schema_nodes)): raise SchemaError('Invalid Attributes {} for {}.'.format( self.__class__.__name__, set(kwargs).difference(set(self.schema_nodes)) )) for attr_name in kwargs: setattr(self, attr_name, kwargs[attr_name]) def to_dict(self): return OrderedDict([(k, getattr(self, k)) for k in self.schema_nodes]) schema_dict['to_dict'] = property(to_dict) schema_dict['__init__'] = cls_init schema_dict['__repr__'] = cls_repr schema_dict['__str__'] = cls_str return SchemaType('{}Schema'.format(schema_name.title()), (), schema_dict)
<SYSTEM_TASK:> Serialize Nodes and attributes <END_TASK> <USER_TASK:> Description: def serialize(self, *fields): """Serialize Nodes and attributes """
if fields: if not set(fields).issubset(self.data_nodes): raise SchemaError('Invalid field for serialization: {}'.format(set(fields).difference(self.data_nodes))) return OrderedDict([(k, getattr(self, k)) for k in fields]) return OrderedDict([(k, getattr(self, k)) for k in self.data_nodes])
<SYSTEM_TASK:> Run all availalbe sanitizers across a configuration. <END_TASK> <USER_TASK:> Description: def sanitize(configuration, error_fn): """ Run all availalbe sanitizers across a configuration. Arguments: configuration - a full project configuration error_fn - A function to call if a sanitizer check fails. The function takes a single argument: a description of the problem; provide specifics if possible, including the componnet, the part of the configuration that presents an issue, etc.. """
for name, sanitize_fn in _SANITIZERS.items(): sanitize_fn(configuration, lambda warning, n=name: error_fn(n, warning))
<SYSTEM_TASK:> Convert slice to integer, based on sign and endian flags. <END_TASK> <USER_TASK:> Description: def s2n(self, offset, length, signed=0): """ Convert slice to integer, based on sign and endian flags. Usually this offset is assumed to be relative to the beginning of the start of the EXIF information. For some cameras that use relative tags, this offset may be relative to some other starting point. """
self.file.seek(self.offset + offset) sliced = self.file.read(length) if self.endian == 'I': val = s2n_intel(sliced) else: val = s2n_motorola(sliced) # Sign extension? if signed: msb = 1 << (8 * length - 1) if val & msb: val -= (msb << 1) return val
<SYSTEM_TASK:> Convert offset to string. <END_TASK> <USER_TASK:> Description: def n2s(self, offset, length): """Convert offset to string."""
s = '' for dummy in range(length): if self.endian == 'I': s += chr(offset & 0xFF) else: s = chr(offset & 0xFF) + s offset = offset >> 8 return s
<SYSTEM_TASK:> Return the list of IFDs in the header. <END_TASK> <USER_TASK:> Description: def list_ifd(self): """Return the list of IFDs in the header."""
i = self._first_ifd() ifds = [] while i: ifds.append(i) i = self._next_ifd(i) return ifds
<SYSTEM_TASK:> Extract uncompressed TIFF thumbnail. <END_TASK> <USER_TASK:> Description: def extract_tiff_thumbnail(self, thumb_ifd): """ Extract uncompressed TIFF thumbnail. Take advantage of the pre-existing layout in the thumbnail IFD as much as possible """
thumb = self.tags.get('Thumbnail Compression') if not thumb or thumb.printable != 'Uncompressed TIFF': return entries = self.s2n(thumb_ifd, 2) # this is header plus offset to IFD ... if self.endian == 'M': tiff = 'MM\x00*\x00\x00\x00\x08' else: tiff = 'II*\x00\x08\x00\x00\x00' # ... plus thumbnail IFD data plus a null "next IFD" pointer self.file.seek(self.offset + thumb_ifd) tiff += self.file.read(entries * 12 + 2) + '\x00\x00\x00\x00' # fix up large value offset pointers into data area for i in range(entries): entry = thumb_ifd + 2 + 12 * i tag = self.s2n(entry, 2) field_type = self.s2n(entry + 2, 2) type_length = FIELD_TYPES[field_type][0] count = self.s2n(entry + 4, 4) old_offset = self.s2n(entry + 8, 4) # start of the 4-byte pointer area in entry ptr = i * 12 + 18 # remember strip offsets location if tag == 0x0111: strip_off = ptr strip_len = count * type_length # is it in the data area? if count * type_length > 4: # update offset pointer (nasty "strings are immutable" crap) # should be able to say "tiff[ptr:ptr+4]=newoff" newoff = len(tiff) tiff = tiff[:ptr] + self.n2s(newoff, 4) + tiff[ptr + 4:] # remember strip offsets location if tag == 0x0111: strip_off = newoff strip_len = 4 # get original data and store it self.file.seek(self.offset + old_offset) tiff += self.file.read(count * type_length) # add pixel strips and update strip offset info old_offsets = self.tags['Thumbnail StripOffsets'].values old_counts = self.tags['Thumbnail StripByteCounts'].values for i in range(len(old_offsets)): # update offset pointer (more nasty "strings are immutable" crap) offset = self.n2s(len(tiff), strip_len) tiff = tiff[:strip_off] + offset + tiff[strip_off + strip_len:] strip_off += strip_len # add pixel strip to end self.file.seek(self.offset + old_offsets[i]) tiff += self.file.read(old_counts[i]) self.tags['TIFFThumbnail'] = tiff
<SYSTEM_TASK:> Decode the variable length encoded camera info section. <END_TASK> <USER_TASK:> Description: def _canon_decode_camera_info(self, camera_info_tag): """ Decode the variable length encoded camera info section. """
model = self.tags.get('Image Model', None) if not model: return model = str(model.values) camera_info_tags = None for (model_name_re, tag_desc) in makernote.canon.CAMERA_INFO_MODEL_MAP.items(): if re.search(model_name_re, model): camera_info_tags = tag_desc break else: return # We are assuming here that these are all unsigned bytes (Byte or # Unknown) if camera_info_tag.field_type not in (1, 7): return camera_info = struct.pack('<%dB' % len(camera_info_tag.values), *camera_info_tag.values) # Look for each data value and decode it appropriately. for offset, tag in camera_info_tags.items(): tag_format = tag[1] tag_size = struct.calcsize(tag_format) if len(camera_info) < offset + tag_size: continue packed_tag_value = camera_info[offset:offset + tag_size] tag_value = struct.unpack(tag_format, packed_tag_value)[0] tag_name = tag[0] if len(tag) > 2: if callable(tag[2]): tag_value = tag[2](tag_value) else: tag_value = tag[2].get(tag_value, tag_value) logger.debug(" %s %s", tag_name, tag_value) self.tags['MakerNote ' + tag_name] = IfdTag(str(tag_value), None, 0, None, None, None)
<SYSTEM_TASK:> Create the user interface <END_TASK> <USER_TASK:> Description: def create_ui(self): """Create the user interface create_ui is a method called during the Delegate's initialisation process, to create, add to, or modify any UI created by GtkBuilder files. """
self.entry = gtk.Entry() self.widget.add(self.entry)
<SYSTEM_TASK:> Generate hash for given file <END_TASK> <USER_TASK:> Description: def get_file_hash(file_path, block_size=1024, hasher=None): """ Generate hash for given file :param file_path: Path to file :type file_path: str :param block_size: Size of block to be read at once (default: 1024) :type block_size: int :param hasher: Use specific hasher, defaults to md5 (default: None) :type hasher: _hashlib.HASH :return: Hash of file :rtype: str """
if hasher is None: hasher = hashlib.md5() with open(file_path, 'rb') as f: while True: buffer = f.read(block_size) if len(buffer) <= 0: break hasher.update(buffer) return hasher.hexdigest()
<SYSTEM_TASK:> Get list of this plugins resources and a hash to check for file changes <END_TASK> <USER_TASK:> Description: def resource_get_list(self): """ Get list of this plugins resources and a hash to check for file changes (It is recommended to keep a in memory representation of this struct and not to generate it upon each request) :return: List of supported resources and hashes :rtype: list[(unicode, unicode)] """
if not self._resources: return self.resource_update_list() res = [] with self._resource_lock: for key in self._resources: res.append((key, self._resources[key]['hash'])) return res
<SYSTEM_TASK:> Update internal struct of resource, hash list and get diff <END_TASK> <USER_TASK:> Description: def resource_update_list(self, reset=False): """ Update internal struct of resource, hash list and get diff (Warning: Resource names have to be unique!!) :param reset: Should resources be rebuild from scratch (default: False) :type reset: bool :return: List of resources and hashes that changed :rtype: list[(unicode, unicode)] """
if not self._resource_path: raise PluginException("No resource path set") if not os.path.isdir(self._resource_path): raise PluginException( u"Resource path directory '{}' not found".format( self._resource_path ) ) res = [] with self._resource_lock: if reset: self._resources = {} old = dict(self._resources) for dirname, dirnames, filenames in os.walk(self._resource_path): for file_name in filenames: file_ext = os.path.splitext(file_name)[1].lower()[1:] if file_ext not in self._resource_file_types: self.debug(u"Skipping '{}'".format(file_name)) continue file_path = os.path.join(dirname, file_name) try: file_hash = get_file_hash(file_path) except: self.exception( u"Failed to hash '{}'".format(file_path) ) continue self._resources[file_name] = { 'name': file_name, 'path': file_path, 'hash': file_hash, 'checked': datetime.datetime.utcnow() } # generate diff for key in self._resources: resource = self._resources[key] if key not in old or old[key]['hash'] != resource['hash']: # new file or hash changed res.append((key, resource['hash'])) return res
<SYSTEM_TASK:> Safely calls the method with the given methname on the given <END_TASK> <USER_TASK:> Description: def _safe_call(obj, methname, *args, **kwargs): """ Safely calls the method with the given methname on the given object. Remaining positional and keyword arguments are passed to the method. The return value is None, if the method is not available, or the return value of the method. """
meth = getattr(obj, methname, None) if meth is None or not callable(meth): return return meth(*args, **kwargs)
<SYSTEM_TASK:> Post-process a response through all processors in the stack, <END_TASK> <USER_TASK:> Description: def proc_response(self, resp, startidx=None): """ Post-process a response through all processors in the stack, in reverse order. For convenience, returns the response passed to the method. The startidx argument is an internal interface only used by the proc_request() and proc_exception() methods to process a response through a subset of response processors. """
# If we're empty, bail out early if not self: return resp # Select appropriate starting index if startidx is None: startidx = len(self) for idx in range(startidx, -1, -1): _safe_call(self[idx], 'proc_response', resp) # Return the response we were passed return resp
<SYSTEM_TASK:> Find a build cache somewhere in a parent directory. <END_TASK> <USER_TASK:> Description: def find_config(): """Find a build cache somewhere in a parent directory."""
previous = "" current = os.getcwd() while previous != current: check_path = os.path.join(current, "build.cache") if os.path.isfile(check_path): return check_path else: previous = current current = os.path.dirname(current) raise Exception("Can't find build cache")
<SYSTEM_TASK:> Load a build cache, updating it if necessary. <END_TASK> <USER_TASK:> Description: def update_cache(force=False, cache_file=None): """ Load a build cache, updating it if necessary. A cache is considered outdated if any of its inputs have changed. Arguments force -- Consider a cache outdated regardless of whether its inputs have been modified. """
if not cache_file: cache_file = find_config() cache_config = devpipeline_configure.parser.read_config(cache_file) cache = devpipeline_configure.cache._CachedConfig(cache_config, cache_file) if force or _is_outdated(cache_file, cache): cache = devpipeline_configure.config.process_config( cache_config.get("DEFAULT", "dp.build_config"), os.path.dirname(cache_file), "build.cache", profiles=cache_config.get("DEFAULT", "dp.profile_name", fallback=None), overrides=cache_config.get("DEFAULT", "dp.overrides", fallback=None), ) devpipeline_core.sanitizer.sanitize( cache, lambda n, m: print("{} [{}]".format(m, n)) ) return cache
<SYSTEM_TASK:> Function called by the WSGI server. <END_TASK> <USER_TASK:> Description: def app(environ, start_response): """Function called by the WSGI server."""
r = HttpRequestHandler(environ, start_response, Router).dispatch() return r
<SYSTEM_TASK:> Decorator to create a Job from a function. <END_TASK> <USER_TASK:> Description: def make_job(job_name, **kwargs): """ Decorator to create a Job from a function. Give a job name and add extra fields to the job. @make_job("ExecuteDecJob", command=mongoengine.StringField(required=True), output=mongoengine.StringField(default=None)) def execute(job: Job): job.log_info('ExecuteJob %s - Executing command...' % job.uuid) result = subprocess.run(job.command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) job.output = result.stdout.decode('utf-8') + " " + result.stderr.decode('utf-8') """
def wraps(func): kwargs['process'] = func job = type(job_name, (Job,), kwargs) globals()[job_name] = job return job return wraps
<SYSTEM_TASK:> Print unicode or bytes universally. <END_TASK> <USER_TASK:> Description: def print_safe(str_or_bytes, encoding='utf-8', errors='ignore', output=sys.stdout, newline='\n'): """ Print unicode or bytes universally. :param str_or_bytes: string :param encoding: encoding :param output: output file handler :param errors: error handling scheme. Refer to codecs.register_error. """
writer = output.buffer if hasattr(output, 'buffer') else output # When the input type is bytes, verify it can be decoded with the specified encoding. decoded = str_or_bytes if is_unicode(str_or_bytes) else to_unicode(str_or_bytes, encoding, errors) encoded = to_bytes(decoded, encoding, errors) writer.write(encoded + to_bytes(newline, encoding, errors)) output.flush()
<SYSTEM_TASK:> expects "msg_info" to have the field 'files_containers_id' <END_TASK> <USER_TASK:> Description: def set_verified(self, msg_info): """ expects "msg_info" to have the field 'files_containers_id' This call already executes "update_last_checked_time" so it doesn't need to be called separately """
assert hasattr(msg_info, 'files_containers_id') with self._session_resource as session: session.execute( update(FilesDestinations) .where(FilesDestinations.file_containers_id == msg_info.files_containers_id) .values(verification_info=msg_info.msg_id) ) self.update_last_checked_time(msg_info)
<SYSTEM_TASK:> returns 0 if it doesn't correspond to an uploaded container <END_TASK> <USER_TASK:> Description: def is_uploaded_container(self, msg_info): """ returns 0 if it doesn't correspond to an uploaded container -1 if it corresponds to an uploaded container but it is corrupted 1 if it corresponds to an uploaded container and is OK """
results = { 'BAD': -1, 'NOT_FCB': 0, 'OK': 1 } for part in msg_info.msg_body.walk(): if part.is_multipart(): continue """ if part.get('Content-Disposition') is None: print("no content dispo") continue """ if part.get_content_type() == 'text/plain': if self._is_content_from_fcb(part.get_payload()): self._log.debug("Body detected as FCB: %s", part.get_payload()) else: self._log.debug("Body doesn't match FCB: %s", part.get_payload()) continue attachment_name = self._get_attachment_name(part) if not attachment_name: self._log.debug("Couldn't get attachment name. Will ignore the part.") continue files_container = self._get_files_container_by_name(attachment_name) if files_container: sha1_in_db = files_container.sha1 msg_info.files_containers_id = files_container.id tmp_file = FileInfo(os.path.join(tempfile.gettempdir(), "downloaded.tmp")) fp = open(tmp_file.path, 'wb') fp.write(part.get_payload(decode=1)) fp.flush() fp.close() if tmp_file.sha1 == sha1_in_db: self._log.info("File container '%s' verified!", attachment_name) result = results['OK'] else: self._log.error("File container '%s' doesn't match the sha1 sum. Expected '%s' but got '%s'", attachment_name, sha1_in_db, tmp_file.sha1) result = results['BAD'] os.remove(tmp_file.path) return result else: self._log.debug("Attached file '%s' not found in DB. Will ignore this mail.", attachment_name) return results['NOT_FCB']
<SYSTEM_TASK:> Setup a ComboBox or ComboBoxEntry based on a list of strings. <END_TASK> <USER_TASK:> Description: def combobox_set_model_from_list(cb, items): """Setup a ComboBox or ComboBoxEntry based on a list of strings."""
cb.clear() model = gtk.ListStore(str) for i in items: model.append([i]) cb.set_model(model) if type(cb) == gtk.ComboBoxEntry: cb.set_text_column(0) elif type(cb) == gtk.ComboBox: cell = gtk.CellRendererText() cb.pack_start(cell, True) cb.add_attribute(cell, 'text', 0)
<SYSTEM_TASK:> Create an adapter which uses only the last construct. <END_TASK> <USER_TASK:> Description: def LastOf(*subcons): """ Create an adapter which uses only the last construct. If first argument is a string it will be the name. """
name = "seq" if isinstance(subcons[0], six.string_types): name = subcons[0] subcons = subcons[1:] return IndexingAdapter(Sequence(name, *subcons), -1)