text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Applies the wordification methodology on the target table <END_TASK> <USER_TASK:> Description: def run(self, num_of_processes=multiprocessing.cpu_count()): """ Applies the wordification methodology on the target table :param num_of_processes: number of processes """
# class + wordification on every example of the main table p = multiprocessing.Pool(num_of_processes) indices = chunks(list(range(len(self.target_table))), num_of_processes) # ) for ex_idxs in indices: self.resulting_documents.extend(wordify_examples(self.name_to_table, self.connecting_tables, self.context, self.index_by_value, self.target_table.name, self.word_att_length, ex_idxs)) p.close() p.join() for i, ex in enumerate(self.target_table): self.resulting_classes.append(ex.get_class())
<SYSTEM_TASK:> Counts word frequency and calculates tf-idf values for words in every document. <END_TASK> <USER_TASK:> Description: def calculate_weights(self, measure='tfidf'): """ Counts word frequency and calculates tf-idf values for words in every document. :param measure: example weights approach (can be one of ``tfidf, binary, tf``). """
from math import log # TODO replace with spipy matrices (and calculate with scikit) if measure == 'tfidf': self.calculate_idf() for doc_idx, document in enumerate(self.resulting_documents): train_word_count = defaultdict(int) self.tf_idfs[doc_idx] = {} for word in document: train_word_count[word] += 1 for word in document: if measure == "binary": tf = 1 idf = 1 else: tf = train_word_count[word] idf = 1 if measure == "tf" else (self.idf[word] if word in self.idf else None) if idf != None: self.tf_idfs[doc_idx][word] = tf * idf
<SYSTEM_TASK:> Filter out words that occur less than minimum_word_frequency times. <END_TASK> <USER_TASK:> Description: def prune(self, minimum_word_frequency_percentage=1): """ Filter out words that occur less than minimum_word_frequency times. :param minimum_word_frequency_percentage: minimum frequency of words to keep """
pruned_resulting_documents = [] for document in self.resulting_documents: new_document = [] for word in document: if self.word_in_how_many_documents[word] >= minimum_word_frequency_percentage / 100. * len( self.resulting_documents): new_document.append(word) pruned_resulting_documents.append(new_document) self.resulting_documents = pruned_resulting_documents
<SYSTEM_TASK:> Constructs string of all documents. <END_TASK> <USER_TASK:> Description: def wordify(self): """ Constructs string of all documents. :return: document representation of the dataset, one line per document :rtype: str """
string_documents = [] for klass, document in zip(self.resulting_classes, self.resulting_documents): string_documents.append("!" + str(klass) + " " + '' .join(document)) return '\n'.join(string_documents)
<SYSTEM_TASK:> Return fsencoded value or read raw data from stdin if value is None. <END_TASK> <USER_TASK:> Description: def binary_value_or_stdin(value): """ Return fsencoded value or read raw data from stdin if value is None. """
if value is None: reader = io.open(sys.stdin.fileno(), mode='rb', closefd=False) return reader.read() elif six.PY3: return os.fsencode(value) else: return value
<SYSTEM_TASK:> get a string representing the color, using HTML notation <END_TASK> <USER_TASK:> Description: def get_html_color(self): """ get a string representing the color, using HTML notation """
color = self.color return ("#%02x%02x%02x" % ( int(color.red), int(color.green), int(color.blue) ))
<SYSTEM_TASK:> Forget training for label 'label_name' <END_TASK> <USER_TASK:> Description: def forget(self, label_name): """ Forget training for label 'label_name' """
self._bayes.pop(label_name) baye_dir = self._get_baye_dir(label_name) logger.info("Deleting label training {} : {}".format( label_name, baye_dir )) rm_rf(baye_dir)
<SYSTEM_TASK:> Take into account that a label has been renamed <END_TASK> <USER_TASK:> Description: def rename(self, old_label_name, new_label_name): """ Take into account that a label has been renamed """
assert(old_label_name != new_label_name) self._bayes.pop(old_label_name) old_baye_dir = self._get_baye_dir(old_label_name) new_baye_dir = self._get_baye_dir(new_label_name) logger.info("Renaming label training {} -> {} : {} -> {}".format( old_label_name, new_label_name, old_baye_dir, new_baye_dir )) os.rename(old_baye_dir, new_baye_dir)
<SYSTEM_TASK:> Create the page's thumbnail <END_TASK> <USER_TASK:> Description: def __make_thumbnail(self, width, height): """ Create the page's thumbnail """
(w, h) = self.size factor = max( (float(w) / width), (float(h) / height) ) w /= factor h /= factor return self.get_image((round(w), round(h)))
<SYSTEM_TASK:> thumbnail with a memory cache <END_TASK> <USER_TASK:> Description: def get_thumbnail(self, width, height): """ thumbnail with a memory cache """
# get from the file thumb_path = self._get_thumb_path() try: doc_file_path = self.get_doc_file_path() if (self.fs.exists(thumb_path) and self.fs.getmtime(doc_file_path) < self.fs.getmtime(thumb_path)): with self.fs.open(thumb_path, 'rb') as fd: thumbnail = PIL.Image.open(fd) thumbnail.load() if thumbnail.size[0] == width or thumbnail.size[1] == height: # fills the specified area return thumbnail logger.warning( "[%s] Unexpected thumbnail size: %s instead of %s ;" " Updating thumbnail ...", str(self.doc.docid), str(thumbnail.size), str((width, height)) ) except Exception as exc: logger.warning( "[%s] Failed to check doc and thumbnail mdate. Forcing update" " of the thumbnail", str(self.doc.docid), exc_info=exc ) logger.info("[%s] Updating thumbnail ...", str(self.doc.docid)) thumbnail = self.__make_thumbnail(width, height) with self.fs.open(thumb_path, 'wb') as fd: thumbnail.save(fd, format="JPEG") return thumbnail
<SYSTEM_TASK:> Get all the keywords related of this page <END_TASK> <USER_TASK:> Description: def __get_keywords(self): """ Get all the keywords related of this page Returns: An array of strings """
txt = self.text for line in txt: for word in split_words(line): yield(word)
<SYSTEM_TASK:> Strip all the accents from the string <END_TASK> <USER_TASK:> Description: def strip_accents(string): """ Strip all the accents from the string """
return u''.join( (character for character in unicodedata.normalize('NFD', string) if unicodedata.category(character) != 'Mn'))
<SYSTEM_TASK:> Act as 'rm -rf' in the shell <END_TASK> <USER_TASK:> Description: def rm_rf(path): """ Act as 'rm -rf' in the shell """
if os.path.isfile(path): os.unlink(path) elif os.path.isdir(path): for root, dirs, files in os.walk(path, topdown=False): for filename in files: filepath = os.path.join(root, filename) logger.info("Deleting file %s" % filepath) os.unlink(filepath) for dirname in dirs: dirpath = os.path.join(root, dirname) if os.path.islink(dirpath): logger.info("Deleting link %s" % dirpath) os.unlink(dirpath) else: logger.info("Deleting dir %s" % dirpath) os.rmdir(dirpath) logger.info("Deleting dir %s", path) os.rmdir(path)
<SYSTEM_TASK:> Convert a PIL image into a Cairo surface <END_TASK> <USER_TASK:> Description: def image2surface(img): """ Convert a PIL image into a Cairo surface """
if not CAIRO_AVAILABLE: raise Exception("Cairo not available(). image2surface() cannot work.") # TODO(Jflesch): Python 3 problem # cairo.ImageSurface.create_for_data() raises NotImplementedYet ... # img.putalpha(256) # (width, height) = img.size # imgd = img.tobytes('raw', 'BGRA') # imga = array.array('B', imgd) # stride = width * 4 # return cairo.ImageSurface.create_for_data( # imga, cairo.FORMAT_ARGB32, width, height, stride) # So we fall back to this method: global g_lock with g_lock: img_io = io.BytesIO() img.save(img_io, format="PNG") img_io.seek(0) return cairo.ImageSurface.create_from_png(img_io)
<SYSTEM_TASK:> Get the text corresponding to this page <END_TASK> <USER_TASK:> Description: def _get_text(self): """ Get the text corresponding to this page """
boxes = self.boxes txt = [] for line in boxes: txt_line = u"" for box in line.word_boxes: txt_line += u" " + box.content txt.append(txt_line) return txt
<SYSTEM_TASK:> Returns an image object corresponding to the page <END_TASK> <USER_TASK:> Description: def __get_img(self): """ Returns an image object corresponding to the page """
with self.fs.open(self.__img_path, 'rb') as fd: img = PIL.Image.open(fd) img.load() return img
<SYSTEM_TASK:> Move the page number by a given offset. Beware to not let any hole <END_TASK> <USER_TASK:> Description: def change_index(self, offset=0): """ Move the page number by a given offset. Beware to not let any hole in the page numbers when doing this. Make sure also that the wanted number is available. Will also change the page number of the current object. """
src = {} src["box"] = self.__get_box_path() src["img"] = self.__get_img_path() src["thumb"] = self._get_thumb_path() page_nb = self.page_nb page_nb += offset logger.info("--> Moving page %d (+%d) to index %d" % (self.page_nb, offset, page_nb)) self.page_nb = page_nb dst = {} dst["box"] = self.__get_box_path() dst["img"] = self.__get_img_path() dst["thumb"] = self._get_thumb_path() for key in src.keys(): if self.fs.exists(src[key]): if self.fs.exists(dst[key]): logger.error("Error: file already exists: %s" % dst[key]) assert(0) self.fs.rename(src[key], dst[key])
<SYSTEM_TASK:> Delete the page. May delete the whole document if it's actually the <END_TASK> <USER_TASK:> Description: def destroy(self): """ Delete the page. May delete the whole document if it's actually the last page. """
logger.info("Destroying page: %s" % self) if self.doc.nb_pages <= 1: self.doc.destroy() return doc_pages = self.doc.pages[:] current_doc_nb_pages = self.doc.nb_pages paths = [ self.__get_box_path(), self.__get_img_path(), self._get_thumb_path(), ] for path in paths: if self.fs.exists(path): self.fs.unlink(path) for page_nb in range(self.page_nb + 1, current_doc_nb_pages): page = doc_pages[page_nb] page.change_index(offset=-1)
<SYSTEM_TASK:> This function takes the pitch data and returns it quantized to given <END_TASK> <USER_TASK:> Description: def discretize(self, intervals, slope_thresh=1500, cents_thresh=50): """ This function takes the pitch data and returns it quantized to given set of intervals. All transactions must happen in cent scale. slope_thresh is the bound beyond which the pitch contour is said to transit from one svara to another. It is specified in cents/sec. cents_thresh is a limit within which two pitch values are considered the same. This is what pushes the quantization limit. The function returns quantized pitch data. """
#eps = np.finfo(float).eps #pitch = median_filter(pitch, 7)+eps self.pitch = median_filter(self.pitch, 7) pitch_quantized = np.zeros(len(self.pitch)) pitch_quantized[0] = utils.find_nearest_index(intervals, self.pitch[0]) pitch_quantized[-1] = utils.find_nearest_index(intervals, self.pitch[-1]) for i in xrange(1, len(self.pitch)-1): if self.pitch[i] == -10000: pitch_quantized[i] = -10000 continue slope_back = abs((self.pitch[i] - self.pitch[i-1])/(self.timestamps[i] - self.timestamps[i-1])) slope_front = abs((self.pitch[i+1] - self.pitch[i])/(self.timestamps[i+1] - self.timestamps[i])) if slope_front < slope_thresh or slope_back < slope_thresh: ind = utils.find_nearest_index(intervals, self.pitch[i]) cents_diff = abs(self.pitch[i] - intervals[ind]) if cents_diff <= cents_thresh: pitch_quantized[i] = intervals[ind] else: pitch_quantized[i] = -10000 else: pitch_quantized[i] = -10000 self.pitch = pitch_quantized
<SYSTEM_TASK:> Assume the identity of another target. This can be useful to make the <END_TASK> <USER_TASK:> Description: def assume(self, other): """ Assume the identity of another target. This can be useful to make the global target assume the identity of an ELF executable. Arguments: other(:class:`Target`): The target whose identity to assume. Example: >>> from pwny import * >>> target.assume(ELF('my-executable')) """
self._arch = other._arch self._bits = other._bits self._endian = other._endian self._mode = other._mode
<SYSTEM_TASK:> Just return True if the user is an authenticated staff member. <END_TASK> <USER_TASK:> Description: def has_read_permission(self, request, path): """ Just return True if the user is an authenticated staff member. Extensions could base the permissions on the path too. """
user = request.user if not user.is_authenticated(): return False elif user.is_superuser: return True elif user.is_staff: return True else: return False
<SYSTEM_TASK:> Encode the given string as an OID. <END_TASK> <USER_TASK:> Description: def encode(string): """ Encode the given string as an OID. >>> import snmp_passpersist as snmp >>> snmp.PassPersist.encode("hello") '5.104.101.108.108.111' >>> """
result=".".join([ str(ord(s)) for s in string ]) return "%s." % (len(string)) + result
<SYSTEM_TASK:> Remove the base OID from the given string. <END_TASK> <USER_TASK:> Description: def cut_oid(self,full_oid): """ Remove the base OID from the given string. >>> import snmp_passpersist as snmp >>> pp=snmp.PassPersist(".1.3.6.1.3.53.8") >>> pp.cut_oid(".1.3.6.1.3.53.8.28.12") '28.12' """
if not full_oid.startswith(self.base_oid.rstrip('.')): return None else: return full_oid[len(self.base_oid):]
<SYSTEM_TASK:> General function to add an oid entry to the MIB subtree. <END_TASK> <USER_TASK:> Description: def add_oid_entry(self, oid, type, value, label=None): """General function to add an oid entry to the MIB subtree."""
if self.debug: print('DEBUG: %s %s %s %s'%(oid,type,value,label)) item={'type': str(type), 'value': str(value)} if label is not None: item['label']=str(label) self.pending[oid]=item
<SYSTEM_TASK:> Short helper to add an object ID value to the MIB subtree. <END_TASK> <USER_TASK:> Description: def add_oid(self,oid,value,label=None): """Short helper to add an object ID value to the MIB subtree."""
self.add_oid_entry(oid,'OBJECTID',value,label=label)
<SYSTEM_TASK:> Short helper to add an integer value to the MIB subtree. <END_TASK> <USER_TASK:> Description: def add_int(self,oid,value,label=None): """Short helper to add an integer value to the MIB subtree."""
self.add_oid_entry(oid,'INTEGER',value,label=label)
<SYSTEM_TASK:> Short helper to add an octet value to the MIB subtree. <END_TASK> <USER_TASK:> Description: def add_oct(self,oid,value,label=None): """Short helper to add an octet value to the MIB subtree."""
self.add_oid_entry(oid,'OCTET',value,label=label)
<SYSTEM_TASK:> Short helper to add a string value to the MIB subtree. <END_TASK> <USER_TASK:> Description: def add_str(self,oid,value,label=None): """Short helper to add a string value to the MIB subtree."""
self.add_oid_entry(oid,'STRING',value,label=label)
<SYSTEM_TASK:> Short helper to add an IP address value to the MIB subtree. <END_TASK> <USER_TASK:> Description: def add_ip(self,oid,value,label=None): """Short helper to add an IP address value to the MIB subtree."""
self.add_oid_entry(oid,'IPADDRESS',value,label=label)
<SYSTEM_TASK:> Short helper to add a 32 bit counter value to the MIB subtree. <END_TASK> <USER_TASK:> Description: def add_cnt_32bit(self,oid,value,label=None): """Short helper to add a 32 bit counter value to the MIB subtree."""
# Truncate integer to 32bits ma,x self.add_oid_entry(oid,'Counter32',int(value)%4294967296,label=label)
<SYSTEM_TASK:> Short helper to add a 64 bit counter value to the MIB subtree. <END_TASK> <USER_TASK:> Description: def add_cnt_64bit(self,oid,value,label=None): """Short helper to add a 64 bit counter value to the MIB subtree."""
# Truncate integer to 64bits ma,x self.add_oid_entry(oid,'Counter64',int(value)%18446744073709551615,label=label)
<SYSTEM_TASK:> Short helper to add a gauge value to the MIB subtree. <END_TASK> <USER_TASK:> Description: def add_gau(self,oid,value,label=None): """Short helper to add a gauge value to the MIB subtree."""
self.add_oid_entry(oid,'GAUGE',value,label=label)
<SYSTEM_TASK:> Short helper to add a timeticks value to the MIB subtree. <END_TASK> <USER_TASK:> Description: def add_tt(self,oid,value,label=None): """Short helper to add a timeticks value to the MIB subtree."""
self.add_oid_entry(oid,'TIMETICKS',value,label=label)
<SYSTEM_TASK:> Main function that handle SNMP's pass_persist protocol, called by <END_TASK> <USER_TASK:> Description: def main_passpersist(self): """ Main function that handle SNMP's pass_persist protocol, called by the start method. Direct call is unnecessary. """
line = sys.stdin.readline().strip() if not line: raise EOFError() if 'PING' in line: print("PONG") elif 'getnext' in line: oid = self.cut_oid(sys.stdin.readline().strip()) if oid is None: print("NONE") elif oid == "": # Fallback to the first entry print(self.get_first()) else: print(self.get_next(oid)) elif 'get' in line: oid = self.cut_oid(sys.stdin.readline().strip()) if oid is None: print("NONE") else: print(self.get(oid)) elif 'set' in line: oid = sys.stdin.readline().strip() typevalue = sys.stdin.readline().strip() self.set(oid, typevalue) elif 'DUMP' in line: # Just for debbuging from pprint import pprint pprint(self.data) else: print("NONE") sys.stdout.flush()
<SYSTEM_TASK:> Main function called by the updater thread. <END_TASK> <USER_TASK:> Description: def main_update(self): """ Main function called by the updater thread. Direct call is unnecessary. """
# Renice updater thread to limit overload try: os.nice(1) except AttributeError as er: pass # os.nice is not available on windows time.sleep(self.refresh) try: while True: # We pick a timestamp to take in account the time used by update() timestamp=time.time() # Update data with user's defined function self.update() # We use this trick because we cannot use signals in a backoffice threads # and alarm() mess up with readline() in the main thread. delay=(timestamp+self.refresh)-time.time() if delay > 0: if delay > self.refresh: time.sleep(self.refresh) else: time.sleep(delay) # Commit change exactly every 'refresh' seconds, whatever update() takes long. # Commited values are a bit old, but for RRD, punctuals values # are better than fresh-but-not-time-constants values. self.commit() except Exception as e: self.error=e raise
<SYSTEM_TASK:> Call the default or user setter function if available <END_TASK> <USER_TASK:> Description: def set(self, oid, typevalue): """ Call the default or user setter function if available """
success = False type_ = typevalue.split()[0] value = typevalue.lstrip(type_).strip().strip('"') ret_value = self.get_setter(oid)(oid, type_, value) if ret_value: if ret_value in ErrorValues or ret_value == 'DONE': print(ret_value) elif ret_value == True: print('DONE') elif ret_value == False: print(Error.NotWritable) else: raise RuntimeError("wrong return value: %s" % str(ret_value)) else: print(Error.NotWritable)
<SYSTEM_TASK:> Start the SNMP's protocol handler and the updater thread <END_TASK> <USER_TASK:> Description: def start(self, user_func, refresh): """ Start the SNMP's protocol handler and the updater thread user_func is a reference to an update function, ran every 'refresh' seconds. """
self.update=user_func self.refresh=refresh self.error=None # First load self.update() self.commit() # Start updater thread up = threading.Thread(None,self.main_update,"Updater") up.daemon = True up.start() # Main loop while up.isAlive(): # Do not serve data if the Updater thread has died try: self.main_passpersist() except: up._Thread__stop() raise
<SYSTEM_TASK:> Finds members of a certain type in obj. <END_TASK> <USER_TASK:> Description: def _get_members_of_type(obj, member_type): """ Finds members of a certain type in obj. :param obj: A model instance or class. :param member_type: The type of the menber we are trying to find. :rtype: A :class:`list` of ``member_type`` found in ``obj`` """
if not issubclass(type(obj), ModelBase): obj = obj.__class__ key_hash = [] for key in dir(obj): try: attr = getattr(obj, key) except AttributeError as e: try: attr = obj.__dict__[key] except KeyError: raise AttributeError(INTROSPECTION_ERROR % (e, obj, member_type)) if type(attr) is member_type: key_hash.append((key, attr)) return key_hash
<SYSTEM_TASK:> Provide the next element of the list. <END_TASK> <USER_TASK:> Description: def next(self): """ Provide the next element of the list. """
if self.idx >= len(self.page_list): raise StopIteration() page = self.page_list[self.idx] self.idx += 1 return page
<SYSTEM_TASK:> Compute the number of pages in the document. It basically counts <END_TASK> <USER_TASK:> Description: def _get_nb_pages(self): """ Compute the number of pages in the document. It basically counts how many JPG files there are in the document. """
try: filelist = self.fs.listdir(self.path) count = 0 for filepath in filelist: filename = self.fs.basename(filepath) if (filename[-4:].lower() != "." + ImgPage.EXT_IMG or (filename[-10:].lower() == "." + ImgPage.EXT_THUMB) or (filename[:len(ImgPage.FILE_PREFIX)].lower() != ImgPage.FILE_PREFIX)): continue count += 1 return count except IOError as exc: logger.debug("Exception while trying to get the number of" " pages of '%s': %s", self.docid, exc) return 0 except OSError as exc: if exc.errno != errno.ENOENT: logger.error("Exception while trying to get the number of" " pages of '%s': %s", self.docid, exc) raise return 0
<SYSTEM_TASK:> Steal a page from another document <END_TASK> <USER_TASK:> Description: def steal_page(self, page): """ Steal a page from another document """
if page.doc == self: return self.fs.mkdir_p(self.path) new_page = ImgPage(self, self.nb_pages) logger.info("%s --> %s" % (str(page), str(new_page))) new_page._steal_content(page)
<SYSTEM_TASK:> A context manager used to guard recursion depth for some function. <END_TASK> <USER_TASK:> Description: def recursion_depth(key): """ A context manager used to guard recursion depth for some function. Multiple functions can be kept separately because it will be counted per key. Any exceptions raise in the recursive function will reset the counter, because the stack will be unwinded. usage:: with recursion_depth('some_function_name') as recursion_level: if recursion_level > getattr(settings, 'RECURSION_LIMIT', sys.getrecursionlimit() / 10): raise Exception("Too deep") # do some recursive dangerous things. :param key: The key under which the recursion depth is kept. """
try: if not getattr(RECURSION_LEVEL_DICT, 'key', False): RECURSION_LEVEL_DICT.key = 0 RECURSION_LEVEL_DICT.key += 1 yield RECURSION_LEVEL_DICT.key RECURSION_LEVEL_DICT.key -= 1 except Exception as e: RECURSION_LEVEL_DICT.key = 0 raise e
<SYSTEM_TASK:> returns the first value of predicate applied to list, which <END_TASK> <USER_TASK:> Description: def first_match(predicate, lst): """ returns the first value of predicate applied to list, which does not return None >>> >>> def return_if_even(x): ... if x % 2 is 0: ... return x ... return None >>> >>> first_match(return_if_even, [1, 3, 4, 7]) 4 >>> first_match(return_if_even, [1, 3, 5, 7]) >>> :param predicate: a function that returns None or a value. :param list: A list of items that can serve as input to ``predicate``. :rtype: whatever ``predicate`` returns instead of None. (or None). """
for item in lst: val = predicate(item) if val is not None: return val return None
<SYSTEM_TASK:> Loop through all bases of cls <END_TASK> <USER_TASK:> Description: def bases_walker(cls): """ Loop through all bases of cls >>> str = u'hai' >>> for base in bases_walker(unicode): ... isinstance(str, base) True True :param cls: The class in which we want to loop through the base classes. """
for base in cls.__bases__: yield base for more in bases_walker(base): yield more
<SYSTEM_TASK:> Iteratively read data from a GTFS table. Returns namedtuples. <END_TASK> <USER_TASK:> Description: def iterread(self, table): """Iteratively read data from a GTFS table. Returns namedtuples."""
self.log('Reading: %s'%table) # Entity class cls = self.FACTORIES[table] f = self._open(table) # csv reader if unicodecsv: data = unicodecsv.reader(f, encoding='utf-8-sig') else: data = csv.reader(f) header = data.next() headerlen = len(header) ent = collections.namedtuple( 'EntityNamedTuple', map(str, header) ) for row in data: if len(row) == 0: continue # Get rid of extra spaces. row = [i.strip() for i in row] # pad to length if necessary... :( if len(row) < headerlen: row += ['']*(headerlen-len(row)) yield cls.from_row(ent._make(row), self) f.close()
<SYSTEM_TASK:> Write entities out to filename in csv format. <END_TASK> <USER_TASK:> Description: def write(self, filename, entities, sortkey=None, columns=None): """Write entities out to filename in csv format. Note: this doesn't write directly into a Zip archive, because this behavior is difficult to achieve with Zip archives. Use make_zip() to create a new GTFS Zip archive. """
if os.path.exists(filename): raise IOError('File exists: %s'%filename) # Make sure we have all the entities loaded. if sortkey: entities = sorted(entities, key=lambda x:x[sortkey]) if not columns: columns = set() for entity in entities: columns |= set(entity.keys()) columns = sorted(columns) # Write the csv file with open(filename, 'wb') as f: writer = unicodecsv.writer(f) # , encoding='utf-8-sig' writer.writerow(columns) for entity in entities: writer.writerow([entity.get(column) for column in columns])
<SYSTEM_TASK:> Create a Zip archive. <END_TASK> <USER_TASK:> Description: def make_zip(self, filename, files=None, path=None, clone=None, compress=True): """Create a Zip archive. Provide any of the following: files - A list of files path - A directory of .txt files clone - Copy any files from a zip archive not specified above Duplicate files will be ignored. The 'files' argument will be used first, then files found in the specified 'path', then in the specified 'clone' archive. """
if filename and os.path.exists(filename): raise IOError('File exists: %s'%filename) files = files or [] arcnames = [] if path and os.path.isdir(path): files += glob.glob(os.path.join(path, '*.txt')) if compress: compress_level = zipfile.ZIP_DEFLATED else: compress_level = zipfile.ZIP_STORED # Write files. self.log("Creating zip archive: %s"%filename) zf = zipfile.ZipFile(filename, 'a', compression=compress_level) for f in files: base = os.path.basename(f) if base in arcnames: self.log('... skipping: %s'%f) else: self.log('... adding: %s'%f) arcnames.append(base) zf.write(f, base) # Clone from existing zip archive. if clone and os.path.exists(clone): zc = zipfile.ZipFile(clone) for f in zc.namelist(): base = os.path.basename(f) if os.path.splitext(base)[-1] != '.txt': pass # self.log('... skipping from clone: %s'%f) elif base in arcnames: self.log('... skipping from clone: %s'%f) else: self.log('... adding from clone: %s'%f) arcnames.append(base) with zc.open(f) as i: data = i.read() zf.writestr(base, data) zf.close()
<SYSTEM_TASK:> Return the route shapes as a dictionary. <END_TASK> <USER_TASK:> Description: def shapes(self): """Return the route shapes as a dictionary."""
# Todo: Cache? if self._shapes: return self._shapes # Group together by shape_id self.log("Generating shapes...") ret = collections.defaultdict(entities.ShapeLine) for point in self.read('shapes'): ret[point['shape_id']].add_child(point) self._shapes = ret return self._shapes
<SYSTEM_TASK:> Allocate a piece of data that will be included in the shellcode body. <END_TASK> <USER_TASK:> Description: def alloc_data(self, value): """ Allocate a piece of data that will be included in the shellcode body. Arguments: value(...): The value to add to the shellcode. Can be bytes or string type. Returns: ~pwnypack.types.Offset: The offset used to address the data. """
if isinstance(value, six.binary_type): return self._alloc_data(value) elif isinstance(value, six.text_type): return self._alloc_data(value.encode('utf-8') + b'\0') else: raise TypeError('No idea how to encode %s' % repr(value))
<SYSTEM_TASK:> Translate a list of operations into its assembler source. <END_TASK> <USER_TASK:> Description: def compile(self, ops): """ Translate a list of operations into its assembler source. Arguments: ops(list): A list of shellcode operations. Returns: str: The assembler source code that implements the shellcode. """
def _compile(): code = [] for op in ops: if isinstance(op, SyscallInvoke): code.extend(self.syscall(op)) elif isinstance(op, LoadRegister): code.extend(self.reg_load(op.register, op.value)) elif isinstance(op, str): code.extend(op.split('\n')) else: raise ValueError('No idea how to assemble "%s"' % repr(op)) return ['\t%s' % line for line in code] # We do 2 passes to make sure all data is allocated so buffers point at the right offset. _compile() return '\n'.join(self.finalize(self.data_finalizer(_compile(), self.data))) + '\n'
<SYSTEM_TASK:> Assemble a list of operations into executable code. <END_TASK> <USER_TASK:> Description: def assemble(self, ops): """ Assemble a list of operations into executable code. Arguments: ops(list): A list of shellcode operations. Returns: bytes: The executable code that implements the shellcode. """
return pwnypack.asm.asm(self.compile(ops), target=self.target)
<SYSTEM_TASK:> For a given value, the function finds the nearest value <END_TASK> <USER_TASK:> Description: def find_nearest_index(arr, value): """For a given value, the function finds the nearest value in the array and returns its index."""
arr = np.array(arr) index = (abs(arr-value)).argmin() return index
<SYSTEM_TASK:> Shut down the socket immediately. <END_TASK> <USER_TASK:> Description: def kill(self): """ Shut down the socket immediately. """
self._socket.shutdown(socket.SHUT_RDWR) self._socket.close()
<SYSTEM_TASK:> Read until the channel is closed. <END_TASK> <USER_TASK:> Description: def read_eof(self, echo=None): """ Read until the channel is closed. Args: echo(bool): Whether to write the read data to stdout. Returns: bytes: The read data. """
d = b'' while True: try: d += self.read(1, echo) except EOFError: return d
<SYSTEM_TASK:> Read until a certain string is encountered.. <END_TASK> <USER_TASK:> Description: def read_until(self, s, echo=None): """ Read until a certain string is encountered.. Args: s(bytes): The string to wait for. echo(bool): Whether to write the read data to stdout. Returns: bytes: The data up to and including *s*. Raises: EOFError: If the channel was closed. """
s_len = len(s) buf = self.read(s_len, echo) while buf[-s_len:] != s: buf += self.read(1, echo) return buf
<SYSTEM_TASK:> Write data to channel. <END_TASK> <USER_TASK:> Description: def write(self, data, echo=None): """ Write data to channel. Args: data(bytes): The data to write to the channel. echo(bool): Whether to echo the written data to stdout. Raises: EOFError: If the channel was closed before all data was sent. """
if echo or (echo is None and self.echo): sys.stdout.write(data.decode('latin1')) sys.stdout.flush() self.channel.write(data)
<SYSTEM_TASK:> Write a byte sequences to the channel and terminate it with carriage <END_TASK> <USER_TASK:> Description: def writeline(self, line=b'', sep=b'\n', echo=None): """ Write a byte sequences to the channel and terminate it with carriage return and line feed. Args: line(bytes): The line to send. sep(bytes): The separator to use after each line. echo(bool): Whether to echo the written data to stdout. Raises: EOFError: If the channel was closed before all data was sent. """
self.writelines([line], sep, echo)
<SYSTEM_TASK:> Interact with the socket. This will send all keyboard input to the <END_TASK> <USER_TASK:> Description: def interact(self): """ Interact with the socket. This will send all keyboard input to the socket and input from the socket to the console until an EOF occurs. """
sockets = [sys.stdin, self.channel] while True: ready = select.select(sockets, [], [])[0] if sys.stdin in ready: line = sys.stdin.readline().encode('latin1') if not line: break self.write(line) if self.channel in ready: self.read(1, echo=True)
<SYSTEM_TASK:> Return all the importer objects that can handle the specified files. <END_TASK> <USER_TASK:> Description: def get_possible_importers(file_uris, current_doc=None): """ Return all the importer objects that can handle the specified files. Possible imports may vary depending on the currently active document """
importers = [] for importer in IMPORTERS: if importer.can_import(file_uris, current_doc): importers.append(importer) return importers
<SYSTEM_TASK:> Check that the specified file looks like a directory containing many <END_TASK> <USER_TASK:> Description: def can_import(self, file_uris, current_doc=None): """ Check that the specified file looks like a directory containing many pdf files """
if len(file_uris) <= 0: return False try: for file_uri in file_uris: file_uri = self.fs.safe(file_uri) for child in self.fs.recurse(file_uri): if self.check_file_type(child): return True except GLib.GError: pass return False
<SYSTEM_TASK:> Check that the specified file looks like an image supported by PIL <END_TASK> <USER_TASK:> Description: def can_import(self, file_uris, current_doc=None): """ Check that the specified file looks like an image supported by PIL """
if len(file_uris) <= 0: return False for file_uri in file_uris: file_uri = self.fs.safe(file_uri) if not self.check_file_type(file_uri): return False return True
<SYSTEM_TASK:> Perform cyclical exclusive or operations on ``data``. <END_TASK> <USER_TASK:> Description: def xor(key, data): """ Perform cyclical exclusive or operations on ``data``. The ``key`` can be a an integer *(0 <= key < 256)* or a byte sequence. If the key is smaller than the provided ``data``, the ``key`` will be repeated. Args: key(int or bytes): The key to xor ``data`` with. data(bytes): The data to perform the xor operation on. Returns: bytes: The result of the exclusive or operation. Examples: >>> from pwny import * >>> xor(5, b'ABCD') b'DGFA' >>> xor(5, b'DGFA') b'ABCD' >>> xor(b'pwny', b'ABCDEFGHIJKLMNOPQRSTUVWXYZ') b'15-=51)19=%5=9!)!%=-%!9!)-' >>> xor(b'pwny', b'15-=51)19=%5=9!)!%=-%!9!)-') b'ABCDEFGHIJKLMNOPQRSTUVWXYZ' """
if type(key) is int: key = six.int2byte(key) key_len = len(key) return b''.join( six.int2byte(c ^ six.indexbytes(key, i % key_len)) for i, c in enumerate(six.iterbytes(data)) )
<SYSTEM_TASK:> Apply a caesar cipher to a string. <END_TASK> <USER_TASK:> Description: def caesar(shift, data, shift_ranges=('az', 'AZ')): """ Apply a caesar cipher to a string. The caesar cipher is a substition cipher where each letter in the given alphabet is replaced by a letter some fixed number down the alphabet. If ``shift`` is ``1``, *A* will become *B*, *B* will become *C*, etc... You can define the alphabets that will be shift by specifying one or more shift ranges. The characters will than be shifted within the given ranges. Args: shift(int): The shift to apply. data(str): The string to apply the cipher to. shift_ranges(list of str): Which alphabets to shift. Returns: str: The string with the caesar cipher applied. Examples: >>> caesar(16, 'Pwnypack') 'Fmdofqsa' >>> caesar(-16, 'Fmdofqsa') 'Pwnypack' >>> caesar(16, 'PWNYpack', shift_ranges=('AZ',)) 'FMDOpack' >>> caesar(16, 'PWNYpack', shift_ranges=('Az',)) '`g^iFqsA' """
alphabet = dict( (chr(c), chr((c - s + shift) % (e - s + 1) + s)) for s, e in map(lambda r: (ord(r[0]), ord(r[-1])), shift_ranges) for c in range(s, e + 1) ) return ''.join(alphabet.get(c, c) for c in data)
<SYSTEM_TASK:> Convert bytes to their hexadecimal representation, optionally joined by a <END_TASK> <USER_TASK:> Description: def enhex(d, separator=''): """ Convert bytes to their hexadecimal representation, optionally joined by a given separator. Args: d(bytes): The data to convert to hexadecimal representation. separator(str): The separator to insert between hexadecimal tuples. Returns: str: The hexadecimal representation of ``d``. Examples: >>> from pwny import * >>> enhex(b'pwnypack') '70776e797061636b' >>> enhex(b'pwnypack', separator=' ') '70 77 6e 79 70 61 63 6b' """
v = binascii.hexlify(d).decode('ascii') if separator: return separator.join( v[i:i+2] for i in range(0, len(v), 2) ) else: return v
<SYSTEM_TASK:> Xor a value with a key. <END_TASK> <USER_TASK:> Description: def xor_app(parser, cmd, args): # pragma: no cover """ Xor a value with a key. """
parser.add_argument( '-d', '--dec', help='interpret the key as a decimal integer', dest='type', action='store_const', const=int ) parser.add_argument( '-x', '--hex', help='interpret the key as an hexadecimal integer', dest='type', action='store_const', const=lambda v: int(v, 16) ) parser.add_argument('key', help='the key to xor the value with') parser.add_argument('value', help='the value to xor, read from stdin if omitted', nargs='?') args = parser.parse_args(args) if args.type is not None: args.key = args.type(args.key) return xor(args.key, pwnypack.main.binary_value_or_stdin(args.value))
<SYSTEM_TASK:> Caesar crypt a value with a key. <END_TASK> <USER_TASK:> Description: def caesar_app(parser, cmd, args): # pragma: no cover """ Caesar crypt a value with a key. """
parser.add_argument('shift', type=int, help='the shift to apply') parser.add_argument('value', help='the value to caesar crypt, read from stdin if omitted', nargs='?') parser.add_argument( '-s', '--shift-range', dest='shift_ranges', action='append', help='specify a character range to shift (defaults to a-z, A-Z)' ) args = parser.parse_args(args) if not args.shift_ranges: args.shift_ranges = ['az', 'AZ'] return caesar(args.shift, pwnypack.main.string_value_or_stdin(args.value), args.shift_ranges)
<SYSTEM_TASK:> encode a series of key=value pairs into a query string. <END_TASK> <USER_TASK:> Description: def enurlform_app(parser, cmd, args): # pragma: no cover """ encode a series of key=value pairs into a query string. """
parser.add_argument('values', help='the key=value pairs to URL encode', nargs='+') args = parser.parse_args(args) return enurlform(dict(v.split('=', 1) for v in args.values))
<SYSTEM_TASK:> decode a query string into its key value pairs. <END_TASK> <USER_TASK:> Description: def deurlform_app(parser, cmd, args): # pragma: no cover """ decode a query string into its key value pairs. """
parser.add_argument('value', help='the query string to decode') args = parser.parse_args(args) return ' '.join('%s=%s' % (key, value) for key, values in deurlform(args.value).items() for value in values)
<SYSTEM_TASK:> perform frequency analysis on a value. <END_TASK> <USER_TASK:> Description: def frequency_app(parser, cmd, args): # pragma: no cover """ perform frequency analysis on a value. """
parser.add_argument('value', help='the value to analyse, read from stdin if omitted', nargs='?') args = parser.parse_args(args) data = frequency(six.iterbytes(pwnypack.main.binary_value_or_stdin(args.value))) return '\n'.join( '0x%02x (%c): %d' % (key, chr(key), value) if key >= 32 and chr(key) in string.printable else '0x%02x ---: %d' % (key, value) for key, value in data.items() )
<SYSTEM_TASK:> Update the circuit breaker with an error event. <END_TASK> <USER_TASK:> Description: def error(self, err=None): """Update the circuit breaker with an error event."""
if self.state == 'half-open': self.test_fail_count = min(self.test_fail_count + 1, 16) self.errors.append(self.clock()) if len(self.errors) > self.maxfail: time = self.clock() - self.errors.pop(0) if time < self.time_unit: if time == 0: time = 0.0001 self.log.debug('error rate: %f errors per second' % ( float(self.maxfail) / time)) self.open(err)
<SYSTEM_TASK:> Return a circuit breaker for the given ID. <END_TASK> <USER_TASK:> Description: def context(self, id): """Return a circuit breaker for the given ID."""
if id not in self.circuits: self.circuits[id] = self.factory(self.clock, self.log.getChild(id), self.error_types, self.maxfail, self.reset_timeout, self.time_unit, backoff_cap=self.backoff_cap, with_jitter=self.with_jitter) return self.circuits[id]
<SYSTEM_TASK:> Start an interactive python interpreter with pwny imported globally. <END_TASK> <USER_TASK:> Description: def shell(_parser, cmd, args): # pragma: no cover """ Start an interactive python interpreter with pwny imported globally. """
parser = argparse.ArgumentParser( prog=_parser.prog, description=_parser.description, ) group = parser.add_mutually_exclusive_group() group.set_defaults(shell=have_bpython and 'bpython' or (have_IPython and 'ipython' or 'python')) if have_bpython: group.add_argument( '--bpython', action='store_const', dest='shell', const='bpython', help='Use the bpython interpreter' ) if have_IPython: group.add_argument( '--ipython', action='store_const', dest='shell', const='ipython', help='Use the IPython interpreter' ) group.add_argument( '--python', action='store_const', dest='shell', const='python', help='Use the default python interpreter' ) args = parser.parse_args(args) import pwny pwny_locals = dict( (key, getattr(pwny, key)) for key in dir(pwny) if not key.startswith('__') and not key == 'shell' ) if args.shell == 'bpython': from bpython import embed embed(pwny_locals, banner=BANNER) elif args.shell == 'ipython': from IPython import start_ipython start_ipython( argv=['--ext=pwnypack.ipython_ext'], ) else: import code code.interact(BANNER, local=pwny_locals)
<SYSTEM_TASK:> Return the sorted StopTimes for this trip. <END_TASK> <USER_TASK:> Description: def stop_sequence(self): """Return the sorted StopTimes for this trip."""
return sorted( self.stop_times(), key=lambda x:int(x.get('stop_sequence')) )
<SYSTEM_TASK:> Returns a list of tuples. The first item in the tuple is the directional <END_TASK> <USER_TASK:> Description: def getGestureAndSegments(points): """ Returns a list of tuples. The first item in the tuple is the directional integer, and the second item is a tuple of integers for the start and end indexes of the points that make up the stroke. """
strokes, strokeSegments = _identifyStrokes(points) return list(zip(strokes, strokeSegments))
<SYSTEM_TASK:> Returns the Levenshtein Distance between two strings, `s1` and `s2` as an <END_TASK> <USER_TASK:> Description: def levenshteinDistance(s1, s2): """ Returns the Levenshtein Distance between two strings, `s1` and `s2` as an integer. http://en.wikipedia.org/wiki/Levenshtein_distance The Levenshtein Distance (aka edit distance) is how many changes (i.e. insertions, deletions, substitutions) have to be made to convert one string into another. For example, the Levenshtein distance between "kitten" and "sitting" is 3, since the following three edits change one into the other, and there is no way to do it with fewer than three edits: kitten -> sitten -> sittin -> sitting """
singleLetterMapping = {DOWNLEFT: '1', DOWN:'2', DOWNRIGHT:'3', LEFT:'4', RIGHT:'6', UPLEFT:'7', UP:'8', UPRIGHT:'9'} len1 = len([singleLetterMapping[letter] for letter in s1]) len2 = len([singleLetterMapping[letter] for letter in s2]) matrix = list(range(len1 + 1)) * (len2 + 1) for i in range(len2 + 1): matrix[i] = list(range(i, i + len1 + 1)) for i in range(len2): for j in range(len1): if s1[j] == s2[i]: matrix[i+1][j+1] = min(matrix[i+1][j] + 1, matrix[i][j+1] + 1, matrix[i][j]) else: matrix[i+1][j+1] = min(matrix[i+1][j] + 1, matrix[i][j+1] + 1, matrix[i][j] + 1) return matrix[len2][len1]
<SYSTEM_TASK:> This is a factory function that creates a form for a model with internationalised <END_TASK> <USER_TASK:> Description: def make_localised_form(model, form, exclude=None): """ This is a factory function that creates a form for a model with internationalised field. The model should be decorated with the L10N decorater. """
newfields = {} for localized_field in model.localized_fields: # get the descriptor, which contains the form field default_field_descriptor = getattr(model, localized_field) # See if we've got overridden fields in a custom form. if hasattr(form, 'declared_fields'): form_field = form.declared_fields.get( localized_field, default_field_descriptor.form_field) else: form_field = default_field_descriptor.form_field # wrap the widget to show the origin of the value; # either database, catalog or fallback. if type(form_field.widget) is not WidgetWrapper: form_field.widget = WidgetWrapper(form_field.widget) newfields[localized_field] = form_field if hasattr(form, 'Meta'): setattr(form.Meta, 'model', model) else: newfields['Meta'] = type('Meta', tuple(), {'model':model}) newfields['localized_fields'] = model.localized_fields return ModelFormMetaclass(model.__name__, (LocalisedForm, form), newfields)
<SYSTEM_TASK:> Override save method to also save the localised fields. <END_TASK> <USER_TASK:> Description: def save(self, commit=True): """ Override save method to also save the localised fields. """
# set the localised fields for localized_field in self.instance.localized_fields: setattr(self.instance, localized_field, self.cleaned_data[localized_field]) return super(LocalisedForm, self).save(commit)
<SYSTEM_TASK:> Validates the uniqueness of fields, but also handles the localized_fields. <END_TASK> <USER_TASK:> Description: def validate_unique(self): """ Validates the uniqueness of fields, but also handles the localized_fields. """
form_errors = [] try: super(LocalisedForm, self).validate_unique() except ValidationError as e: form_errors += e.messages # add unique validation for the localized fields. localized_fields_checks = self._get_localized_field_checks() bad_fields = set() field_errors, global_errors = self._perform_unique_localized_field_checks(localized_fields_checks) bad_fields.union(field_errors) form_errors.extend(global_errors) for field_name in bad_fields: del self.cleaned_data[field_name] if form_errors: # Raise the unique together errors since they are considered # form-wide. raise ValidationError(form_errors)
<SYSTEM_TASK:> Get the checks we must perform for the localized fields. <END_TASK> <USER_TASK:> Description: def _get_localized_field_checks(self): """ Get the checks we must perform for the localized fields. """
localized_fields_checks = [] for localized_field in self.instance.localized_fields: if self.cleaned_data.get(localized_field) is None: continue f = getattr(self.instance.__class__, localized_field, None) if f and f.unique: if f.unique: local_name = get_real_fieldname(localized_field, self.language) localized_fields_checks.append((localized_field, local_name)) return localized_fields_checks
<SYSTEM_TASK:> Do the checks for the localized fields. <END_TASK> <USER_TASK:> Description: def _perform_unique_localized_field_checks(self, unique_checks): """ Do the checks for the localized fields. """
bad_fields = set() form_errors = [] for (field_name, local_field_name) in unique_checks: lookup_kwargs = {} lookup_value = self.cleaned_data[field_name] # ModelChoiceField will return an object instance rather than # a raw primary key value, so convert it to a pk value before # using it in a lookup. lookup_value = getattr(lookup_value, 'pk', lookup_value) lookup_kwargs[str(local_field_name)] = lookup_value qs = self.instance.__class__._default_manager.filter(**lookup_kwargs) # Exclude the current object from the query if we are editing an # instance (as opposed to creating a new one) if self.instance.pk is not None: qs = qs.exclude(pk=self.instance.pk) # This cute trick with extra/values is the most efficient way to # tell if a particular query returns any results. if qs.extra(select={'a': 1}).values('a').order_by(): self._errors[field_name] = ErrorList([self.unique_error_message([field_name])]) bad_fields.add(field_name) return bad_fields, form_errors
<SYSTEM_TASK:> look for dependency that setuptools cannot check or that are too painful to <END_TASK> <USER_TASK:> Description: def find_missing_modules(): """ look for dependency that setuptools cannot check or that are too painful to install with setuptools """
missing_modules = [] for module in MODULES: try: __import__(module[1]) except ImportError: missing_modules.append(module) return missing_modules
<SYSTEM_TASK:> If ``request.session was modified``, or if the configuration is to save <END_TASK> <USER_TASK:> Description: def process_response(self, request, response): """ If ``request.session was modified``, or if the configuration is to save the session every time, save the changes and set a session cookie. """
try: modified = request.session.modified except AttributeError: pass else: if modified or settings.SESSION_SAVE_EVERY_REQUEST: if request.session.get_expire_at_browser_close(): max_age = None expires = None else: max_age = request.session.get_expiry_age() expires_time = time.time() + max_age expires = cookie_date(expires_time) # Save the session data and refresh the client cookie. request.session.save() response.set_cookie(settings.SESSION_COOKIE_NAME, request.session.session_key, max_age=max_age, expires=expires, domain=settings.SESSION_COOKIE_DOMAIN, path=settings.SESSION_COOKIE_PATH, secure=settings.SESSION_COOKIE_SECURE or None) return response
<SYSTEM_TASK:> Returns all documents of the collection <END_TASK> <USER_TASK:> Description: def all(cls, collection, skip=None, limit=None): """ Returns all documents of the collection :param collection Collection instance :param skip The number of documents to skip in the query :param limit The maximal amount of documents to return. The skip is applied before the limit restriction. :returns Document list """
kwargs = { 'skip': skip, 'limit': limit, } return cls._construct_query(name='all', collection=collection, multiple=True, **kwargs)
<SYSTEM_TASK:> This will find all documents in the collection that match the specified example object, <END_TASK> <USER_TASK:> Description: def update_by_example(cls, collection, example_data, new_value, keep_null=False, wait_for_sync=None, limit=None): """ This will find all documents in the collection that match the specified example object, and partially update the document body with the new value specified. Note that document meta-attributes such as _id, _key, _from, _to etc. cannot be replaced. Note: the limit attribute is not supported on sharded collections. Using it will result in an error. Returns result dict of the request. :param collection Collection instance :param example_data An example document that all collection documents are compared against. :param new_value A document containing all the attributes to update in the found documents. :param keep_null This parameter can be used to modify the behavior when handling null values. Normally, null values are stored in the database. By setting the keepNull parameter to false, this behavior can be changed so that all attributes in data with null values will be removed from the updated document. :param wait_for_sync if set to true, then all removal operations will instantly be synchronised to disk. If this is not specified, then the collection's default sync behavior will be applied. :param limit an optional value that determines how many documents to update at most. If limit is specified but is less than the number of documents in the collection, it is undefined which of the documents will be updated. :returns dict """
kwargs = { 'newValue': new_value, 'options': { 'keepNull': keep_null, 'waitForSync': wait_for_sync, 'limit': limit, } } return cls._construct_query(name='update-by-example', collection=collection, example=example_data, result=False, **kwargs)
<SYSTEM_TASK:> This will find all documents in the collection that match the specified example object. <END_TASK> <USER_TASK:> Description: def remove_by_example(cls, collection, example_data, wait_for_sync=None, limit=None): """ This will find all documents in the collection that match the specified example object. Note: the limit attribute is not supported on sharded collections. Using it will result in an error. The options attributes waitForSync and limit can given yet without an ecapsulation into a json object. But this may be deprecated in future versions of arango Returns result dict of the request. :param collection Collection instance :param example_data An example document that all collection documents are compared against. :param wait_for_sync if set to true, then all removal operations will instantly be synchronised to disk. If this is not specified, then the collection's default sync behavior will be applied. :param limit an optional value that determines how many documents to replace at most. If limit is specified but is less than the number of documents in the collection, it is undefined which of the documents will be replaced. """
kwargs = { 'options': { 'waitForSync': wait_for_sync, 'limit': limit, } } return cls._construct_query(name='remove-by-example', collection=collection, example=example_data, result=False, **kwargs)
<SYSTEM_TASK:> This will find all documents matching a given example, using the specified hash index. <END_TASK> <USER_TASK:> Description: def get_by_example_hash(cls, collection, index_id, example_data, allow_multiple=False, skip=None, limit=None): """ This will find all documents matching a given example, using the specified hash index. :param collection Collection instance :param index_id ID of the index which should be used for the query :param example_data The example document :param allow_multiple If the query can return multiple documents :param skip The number of documents to skip in the query :param limit The maximal amount of documents to return. The skip is applied before the limit restriction. :returns Single document / Document list """
kwargs = { 'index': index_id, 'skip': skip, 'limit': limit, } return cls._construct_query(name='by-example-hash', collection=collection, example=example_data, multiple=allow_multiple, **kwargs)
<SYSTEM_TASK:> This will find all documents matching a given example, using the specified skiplist index. <END_TASK> <USER_TASK:> Description: def get_by_example_skiplist(cls, collection, index_id, example_data, allow_multiple=True, skip=None, limit=None): """ This will find all documents matching a given example, using the specified skiplist index. :param collection Collection instance :param index_id ID of the index which should be used for the query :param example_data The example document :param allow_multiple If the query can return multiple documents :param skip The number of documents to skip in the query :param limit The maximal amount of documents to return. The skip is applied before the limit restriction. :returns Single document / Document list """
kwargs = { 'index': index_id, 'skip': skip, 'limit': limit, } return cls._construct_query(name='by-example-skiplist', collection=collection, example=example_data, multiple=allow_multiple, **kwargs)
<SYSTEM_TASK:> This will find all documents within a given range. In order to execute a range query, a <END_TASK> <USER_TASK:> Description: def range(cls, collection, attribute, left, right, closed, index_id, skip=None, limit=None): """ This will find all documents within a given range. In order to execute a range query, a skip-list index on the queried attribute must be present. :param collection Collection instance :param attribute The attribute path to check :param left The lower bound :param right The upper bound :param closed If true, use interval including left and right, otherwise exclude right, but include left :param index_id ID of the index which should be used for the query :param skip The number of documents to skip in the query :param limit The maximal amount of documents to return. The skip is applied before the limit restriction. :returns Document list """
kwargs = { 'index': index_id, 'attribute': attribute, 'left': left, 'right': right, 'closed': closed, 'skip': skip, 'limit': limit, } return cls._construct_query(name='range', collection=collection, multiple=True, **kwargs)
<SYSTEM_TASK:> This will find all documents from the collection that match the fulltext query specified in query. <END_TASK> <USER_TASK:> Description: def fulltext(cls, collection, attribute, example_text, index_id, skip=None, limit=None): """ This will find all documents from the collection that match the fulltext query specified in query. In order to use the fulltext operator, a fulltext index must be defined for the collection and the specified attribute. :param collection Collection instance :param attribute The attribute path to check :param example_text Text which should be used to search :param index_id ID of the index which should be used for the query :param skip The number of documents to skip in the query :param limit The maximal amount of documents to return. The skip is applied before the limit restriction. :returns Document list """
kwargs = { 'index': index_id, 'attribute': attribute, 'query': example_text, 'skip': skip, 'limit': limit, } return cls._construct_query(name='fulltext', collection=collection, multiple=True, **kwargs)
<SYSTEM_TASK:> The default will find at most 100 documents near the given coordinate. <END_TASK> <USER_TASK:> Description: def near(cls, collection, latitude, longitude, index_id, distance=None, skip=None, limit=None): """ The default will find at most 100 documents near the given coordinate. The returned list is sorted according to the distance, with the nearest document being first in the list. If there are near documents of equal distance, documents are chosen randomly from this set until the limit is reached. In order to use the near operator, a geo index must be defined for the collection. This index also defines which attribute holds the coordinates for the document. If you have more then one geo-spatial index, you can use the geo field to select a particular index. :param collection Collection instance :param latitude The latitude of the coordinate :param longitude The longitude of the coordinate :param index_id ID of the index which should be used for the query :param distance If given, the attribute key used to return the distance to the given coordinate. If specified, distances are returned in meters. :param skip The number of documents to skip in the query :param limit The maximal amount of documents to return. The skip is applied before the limit restriction. :returns Document list """
kwargs = { 'geo': index_id, 'latitude': latitude, 'longitude': longitude, 'distance': distance, 'skip': skip, 'limit': limit, } return cls._construct_query(name='near', collection=collection, multiple=True, **kwargs)
<SYSTEM_TASK:> Get the pwm values for a specific state of the led. <END_TASK> <USER_TASK:> Description: def _get_pwm_values(self, brightness=None, color=None): """ Get the pwm values for a specific state of the led. If a state argument is omitted, current value is used. :param brightness: The brightness of the state. :param color: The color of the state. :return: The pwm values. """
if brightness is None: brightness = self.brightness if color is None: color = self.color return [(x / 255) * brightness for x in self._rgb_to_rgbw(color)]
<SYSTEM_TASK:> Return an AMQEndpoint instance configured with the given AMQP uri. <END_TASK> <USER_TASK:> Description: def from_uri(cls, reactor, uri): """Return an AMQEndpoint instance configured with the given AMQP uri. @see: https://www.rabbitmq.com/uri-spec.html """
uri = URI.fromBytes(uri.encode(), defaultPort=5672) kwargs = {} host = uri.host.decode() if "@" in host: auth, host = uri.netloc.decode().split("@") username, password = auth.split(":") kwargs.update({"username": username, "password": password}) vhost = uri.path.decode() if len(vhost) > 1: vhost = vhost[1:] # Strip leading "/" kwargs["vhost"] = vhost params = parse_qs(uri.query) kwargs.update({name.decode(): value[0].decode() for name, value in params.items()}) if "heartbeat" in kwargs: kwargs["heartbeat"] = int(kwargs["heartbeat"]) return cls(reactor, host, uri.port, **kwargs)
<SYSTEM_TASK:> Apply the current stage of the transition based on current time. <END_TASK> <USER_TASK:> Description: def step(self): """Apply the current stage of the transition based on current time."""
if self.cancelled or self.finished: return if not self.pwm_stages: self._finish() return if self.duration == 0: progress = 1 else: run_time = time.time() - self._start_time progress = max(0, min(1, run_time / self.duration)) self.stage_index = math.ceil((len(self.pwm_stages) - 1) * progress) stage = self.pwm_stages[self.stage_index] self._driver.set_pwm(stage) if progress == 1: self._finish()
<SYSTEM_TASK:> Mark transition as finished and execute callback. <END_TASK> <USER_TASK:> Description: def _finish(self): """Mark transition as finished and execute callback."""
self.finished = True if self._callback: self._callback(self) self._finish_event.set()
<SYSTEM_TASK:> Synchronize local and remote representations. <END_TASK> <USER_TASK:> Description: def sync(self, graph_commons): """Synchronize local and remote representations."""
if self['id'] is None: return remote_graph = graph_commons.graphs(self['id']) # TODO: less forceful, more elegant self.edges = remote_graph.edges self.nodes = remote_graph.nodes self.node_types = remote_graph.node_types self.edge_types = remote_graph.edge_types self._edges = dict((edge['id'], edge) for edge in self.edges) self._nodes = dict((node['id'], node) for node in self.nodes) self._node_types = dict((t['id'], t) for t in self.node_types) self._edge_types = dict((t['id'], t) for t in self.edge_types)
<SYSTEM_TASK:> Queue a transition for execution. <END_TASK> <USER_TASK:> Description: def execute(self, transition): """ Queue a transition for execution. :param transition: The transition """
self._transitions.append(transition) if self._thread is None or not self._thread.isAlive(): self._thread = threading.Thread(target=self._transition_loop) self._thread.setDaemon(True) self._thread.start()
<SYSTEM_TASK:> Execute all queued transitions step by step. <END_TASK> <USER_TASK:> Description: def _transition_loop(self): """Execute all queued transitions step by step."""
while self._transitions: start = time.time() for transition in self._transitions: transition.step() if transition.finished: self._transitions.remove(transition) time_delta = time.time() - start sleep_time = max(0, self.MIN_STEP_TIME - time_delta) time.sleep(sleep_time)
<SYSTEM_TASK:> Update the pwm values of the driver regarding the current state. <END_TASK> <USER_TASK:> Description: def _update_pwm(self): """Update the pwm values of the driver regarding the current state."""
if self._is_on: values = self._get_pwm_values() else: values = [0] * len(self._driver.pins) self._driver.set_pwm(values)
<SYSTEM_TASK:> Transition to the specified state of the led. <END_TASK> <USER_TASK:> Description: def transition(self, duration, is_on=None, **kwargs): """ Transition to the specified state of the led. If another transition is already running, it is aborted. :param duration: The duration of the transition. :param is_on: The on-off state to transition to. :param kwargs: The state to transition to. """
self._cancel_active_transition() dest_state = self._prepare_transition(is_on, **kwargs) total_steps = self._transition_steps(**dest_state) state_stages = [self._transition_stage(step, total_steps, **dest_state) for step in range(total_steps)] pwm_stages = [self._get_pwm_values(**stage) for stage in state_stages] callback = partial(self._transition_callback, is_on) self._active_transition = Transition(self._driver, duration, state_stages, pwm_stages, callback) TransitionManager().execute(self._active_transition) return self._active_transition
<SYSTEM_TASK:> Callback that is called when a transition has ended. <END_TASK> <USER_TASK:> Description: def _transition_callback(self, is_on, transition): """ Callback that is called when a transition has ended. :param is_on: The on-off state to transition to. :param transition: The transition that has ended. """
# Update state properties if transition.state_stages: state = transition.state_stages[transition.stage_index] if self.is_on and is_on is False: # If led was turned off, set brightness to initial value # so that the brightness is restored when it is turned on again state['brightness'] = self.brightness self.set(is_on=is_on, cancel_transition=False, **state) self._active_transition = None