text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_context_data(self, **kwargs): """ Adds `next` to the context. This makes sure that the `next` parameter doesn't get lost if the form was submitted invalid. """
ctx = super(UserMediaImageViewMixin, self).get_context_data(**kwargs) ctx.update({ 'action': self.action, 'next': self.next, }) return ctx
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_success_url(self): """ Returns the success URL. This is either the given `next` URL parameter or the content object's `get_absolute_url` method's return value. """
if self.next: return self.next if self.object and self.object.content_object: return self.object.content_object.get_absolute_url() raise Exception( 'No content object given. Please provide ``next`` in your POST' ' data')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def dispatch(self, request, *args, **kwargs): """Adds useful objects to the class and performs security checks."""
self._add_next_and_user(request) self.content_object = None self.content_type = None self.object_id = kwargs.get('object_id', None) if kwargs.get('content_type'): # Check if the user forged the URL and posted a non existant # content type try: self.content_type = ContentType.objects.get( model=kwargs.get('content_type')) except ContentType.DoesNotExist: raise Http404 if self.content_type: # Check if the user forged the URL and tries to append the image to # an object that does not exist try: self.content_object = \ self.content_type.get_object_for_this_type( pk=self.object_id) except ObjectDoesNotExist: raise Http404 if self.content_object and hasattr(self.content_object, 'user'): # Check if the user forged the URL and tries to append the image to # an object that does not belong to him if not self.content_object.user == self.user: raise Http404 return super(CreateImageView, self).dispatch(request, *args, **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_queryset(self): """ Making sure that a user can only delete his own images. Even when he forges the request URL. """
queryset = super(DeleteImageView, self).get_queryset() queryset = queryset.filter(user=self.user) return queryset
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_queryset(self): """ Making sure that a user can only edit his own images. Even when he forges the request URL. """
queryset = super(UpdateImageView, self).get_queryset() queryset = queryset.filter(user=self.user) return queryset
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _delete_images(self, instance): """Deletes all user media images of the given instance."""
UserMediaImage.objects.filter( content_type=ContentType.objects.get_for_model(instance), object_id=instance.pk, user=instance.user, ).delete()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def clean_image(self): """ It seems like in Django 1.5 something has changed. When Django tries to validate the form, it checks if the generated filename fit into the max_length. But at this point, self.instance.user is not yet set so our filename generation function cannot create the new file path because it needs the user id. Setting self.instance.user at this point seems to work as a workaround. """
self.instance.user = self.user data = self.cleaned_data.get('image') return data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_image_file_path(instance, filename): """Returns a unique filename for images."""
ext = filename.split('.')[-1] filename = '%s.%s' % (uuid.uuid4(), ext) return os.path.join( 'user_media', str(instance.user.pk), 'images', filename)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def image_post_delete_handler(sender, instance, **kwargs): """ Makes sure that a an image is also deleted from the media directory. This should prevent a load of "dead" image files on disc. """
for f in glob.glob('{}/{}*'.format(instance.image.storage.location, instance.image.name)): if not os.path.isdir(f): instance.image.storage.delete(f)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def box_coordinates(self): """Returns a thumbnail's coordinates."""
if ( self.thumb_x is not None and self.thumb_y is not None and self.thumb_x2 is not None and self.thumb_y2 is not None ): return ( int(self.thumb_x), int(self.thumb_y), int(self.thumb_x2), int(self.thumb_y2), ) return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def large_size(self, as_string=True): """Returns a thumbnail's large size."""
size = getattr(settings, 'USER_MEDIA_THUMB_SIZE_LARGE', (150, 150)) if as_string: return u'{}x{}'.format(size[0], size[1]) return size
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def crop_box(im, box=False, **kwargs): """Uses box coordinates to crop an image without resizing it first."""
if box: im = im.crop(box) return im
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def load_germanet(host = None, port = None, database_name = 'germanet'): ''' Loads a GermaNet instance connected to the given MongoDB instance. Arguments: - `host`: the hostname of the MongoDB instance - `port`: the port number of the MongoDB instance - `database_name`: the name of the GermaNet database on the MongoDB instance ''' client = MongoClient(host, port) germanet_db = client[database_name] return GermaNet(germanet_db)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def cache_size(self, new_value): ''' Set the cache size used to reduce the number of database access operations. ''' if type(new_value) == int and 0 < new_value: if self._lemma_cache is not None: self._lemma_cache = repoze.lru.LRUCache(new_value) self._synset_cache = repoze.lru.LRUCache(new_value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def all_lemmas(self): ''' A generator over all the lemmas in the GermaNet database. ''' for lemma_dict in self._mongo_db.lexunits.find(): yield Lemma(self, lemma_dict)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def lemmas(self, lemma, pos = None): ''' Looks up lemmas in the GermaNet database. Arguments: - `lemma`: - `pos`: ''' if pos is not None: if pos not in SHORT_POS_TO_LONG: return None pos = SHORT_POS_TO_LONG[pos] lemma_dicts = self._mongo_db.lexunits.find({'orthForm': lemma, 'category': pos}) else: lemma_dicts = self._mongo_db.lexunits.find({'orthForm': lemma}) return sorted([Lemma(self, lemma_dict) for lemma_dict in lemma_dicts])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def all_synsets(self): ''' A generator over all the synsets in the GermaNet database. ''' for synset_dict in self._mongo_db.synsets.find(): yield Synset(self, synset_dict)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def synsets(self, lemma, pos = None): ''' Looks up synsets in the GermaNet database. Arguments: - `lemma`: - `pos`: ''' return sorted(set(lemma_obj.synset for lemma_obj in self.lemmas(lemma, pos)))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def synset(self, synset_repr): ''' Looks up a synset in GermaNet using its string representation. Arguments: - `synset_repr`: a unicode string containing the lemma, part of speech, and sense number of the first lemma of the synset >>> gn.synset(u'funktionieren.v.2') Synset(funktionieren.v.2) ''' parts = synset_repr.split('.') if len(parts) != 3: return None lemma, pos, sensenum = parts if not sensenum.isdigit() or pos not in SHORT_POS_TO_LONG: return None sensenum = int(sensenum, 10) pos = SHORT_POS_TO_LONG[pos] lemma_dict = self._mongo_db.lexunits.find_one({'orthForm': lemma, 'category': pos, 'sense': sensenum}) if lemma_dict: return Lemma(self, lemma_dict).synset
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def get_synset_by_id(self, mongo_id): ''' Builds a Synset object from the database entry with the given ObjectId. Arguments: - `mongo_id`: a bson.objectid.ObjectId object ''' cache_hit = None if self._synset_cache is not None: cache_hit = self._synset_cache.get(mongo_id) if cache_hit is not None: return cache_hit synset_dict = self._mongo_db.synsets.find_one({'_id': mongo_id}) if synset_dict is not None: synset = Synset(self, synset_dict) if self._synset_cache is not None: self._synset_cache.put(mongo_id, synset) return synset
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def get_lemma_by_id(self, mongo_id): ''' Builds a Lemma object from the database entry with the given ObjectId. Arguments: - `mongo_id`: a bson.objectid.ObjectId object ''' cache_hit = None if self._lemma_cache is not None: cache_hit = self._lemma_cache.get(mongo_id) if cache_hit is not None: return cache_hit lemma_dict = self._mongo_db.lexunits.find_one({'_id': mongo_id}) if lemma_dict is not None: lemma = Lemma(self, lemma_dict) if self._lemma_cache is not None: self._lemma_cache.put(mongo_id, lemma) return lemma
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def find_germanet_xml_files(xml_path): ''' Globs the XML files contained in the given directory and sorts them into sections for import into the MongoDB database. Arguments: - `xml_path`: the path to the directory containing the GermaNet XML files ''' xml_files = sorted(glob.glob(os.path.join(xml_path, '*.xml'))) # sort out the lexical files lex_files = [xml_file for xml_file in xml_files if re.match(r'(adj|nomen|verben)\.', os.path.basename(xml_file).lower())] xml_files = sorted(set(xml_files) - set(lex_files)) if not lex_files: print('ERROR: cannot find lexical information files') # sort out the GermaNet relations file gn_rels_file = [xml_file for xml_file in xml_files if os.path.basename(xml_file).lower() == 'gn_relations.xml'] xml_files = sorted(set(xml_files) - set(gn_rels_file)) if not gn_rels_file: print('ERROR: cannot find relations file gn_relations.xml') gn_rels_file = None else: if 1 < len(gn_rels_file): print ('WARNING: more than one relations file gn_relations.xml, ' 'taking first match') gn_rels_file = gn_rels_file[0] # sort out the wiktionary paraphrase files wiktionary_files = [xml_file for xml_file in xml_files if re.match(r'wiktionaryparaphrases-', os.path.basename(xml_file).lower())] xml_files = sorted(set(xml_files) - set(wiktionary_files)) if not wiktionary_files: print('WARNING: cannot find wiktionary paraphrase files') # sort out the interlingual index file ili_files = [xml_file for xml_file in xml_files if os.path.basename(xml_file).lower().startswith( 'interlingualindex')] xml_files = sorted(set(xml_files) - set(ili_files)) if not ili_files: print('WARNING: cannot find interlingual index file') if xml_files: print('WARNING: unrecognised xml files:', xml_files) return lex_files, gn_rels_file, wiktionary_files, ili_files
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def read_relation_file(filename): ''' Reads the GermaNet relation file ``gn_relations.xml`` which lists all the relations holding between lexical units and synsets. Arguments: - `filename`: ''' with open(filename, 'rb') as input_file: doc = etree.parse(input_file) lex_rels = [] con_rels = [] assert doc.getroot().tag == 'relations' for child in doc.getroot(): if child.tag == 'lex_rel': if 0 < len(child): print('<lex_rel> has unexpected child node') child_dict = dict(child.items()) warn_attribs('', child, RELATION_ATTRIBS, RELATION_ATTRIBS_REQD) if child_dict['dir'] not in LEX_REL_DIRS: print('unrecognized <lex_rel> dir', child_dict['dir']) if child_dict['dir'] == 'both' and 'inv' not in child_dict: print('<lex_rel> has dir=both but does not specify inv') lex_rels.append(child_dict) elif child.tag == 'con_rel': if 0 < len(child): print('<con_rel> has unexpected child node') child_dict = dict(child.items()) warn_attribs('', child, RELATION_ATTRIBS, RELATION_ATTRIBS_REQD) if child_dict['dir'] not in CON_REL_DIRS: print('unrecognised <con_rel> dir', child_dict['dir']) if (child_dict['dir'] in ['both', 'revert'] and 'inv' not in child_dict): print('<con_rel> has dir={0} but does not specify inv'.format( child_dict['dir'])) con_rels.append(child_dict) else: print('unrecognised child of <relations>', child) continue return lex_rels, con_rels
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def read_paraphrase_file(filename): ''' Reads in a GermaNet wiktionary paraphrase file and returns its contents as a list of dictionary structures. Arguments: - `filename`: ''' with open(filename, 'rb') as input_file: doc = etree.parse(input_file) assert doc.getroot().tag == 'wiktionaryParaphrases' paraphrases = [] for child in doc.getroot(): if child.tag == 'wiktionaryParaphrase': paraphrase = child warn_attribs('', paraphrase, PARAPHRASE_ATTRIBS) if 0 < len(paraphrase): print('unrecognised child of <wiktionaryParaphrase>', list(paraphrase)) paraphrase_dict = dict(paraphrase.items()) if paraphrase_dict['edited'] not in MAP_YESNO_TO_BOOL: print('<paraphrase> attribute "edited" has unexpected value', paraphrase_dict['edited']) else: paraphrase_dict['edited'] = MAP_YESNO_TO_BOOL[ paraphrase_dict['edited']] if not paraphrase_dict['wiktionarySenseId'].isdigit(): print('<paraphrase> attribute "wiktionarySenseId" has ' 'non-integer value', paraphrase_dict['edited']) else: paraphrase_dict['wiktionarySenseId'] = \ int(paraphrase_dict['wiktionarySenseId'], 10) paraphrases.append(paraphrase_dict) else: print('unknown child of <wiktionaryParaphrases>', child) return paraphrases
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def insert_lexical_information(germanet_db, lex_files): ''' Reads in the given lexical information files and inserts their contents into the given MongoDB database. Arguments: - `germanet_db`: a pymongo.database.Database object - `lex_files`: a list of paths to XML files containing lexial information ''' # drop the database collections if they already exist germanet_db.lexunits.drop() germanet_db.synsets.drop() # inject data from XML files into the database for lex_file in lex_files: synsets = read_lexical_file(lex_file) for synset in synsets: synset = dict((SYNSET_KEY_REWRITES.get(key, key), value) for (key, value) in synset.items()) lexunits = synset['lexunits'] synset['lexunits'] = germanet_db.lexunits.insert(lexunits) synset_id = germanet_db.synsets.insert(synset) for lexunit in lexunits: lexunit['synset'] = synset_id lexunit['category'] = synset['category'] germanet_db.lexunits.save(lexunit) # index the two collections by id germanet_db.synsets.create_index('id') germanet_db.lexunits.create_index('id') # also index lexunits by lemma, lemma-pos, and lemma-pos-sensenum germanet_db.lexunits.create_index([('orthForm', DESCENDING)]) germanet_db.lexunits.create_index([('orthForm', DESCENDING), ('category', DESCENDING)]) germanet_db.lexunits.create_index([('orthForm', DESCENDING), ('category', DESCENDING), ('sense', DESCENDING)]) print('Inserted {0} synsets, {1} lexical units.'.format( germanet_db.synsets.count(), germanet_db.lexunits.count()))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def insert_lemmatisation_data(germanet_db): ''' Creates the lemmatiser collection in the given MongoDB instance using the data derived from the Projekt deutscher Wortschatz. Arguments: - `germanet_db`: a pymongo.database.Database object ''' # drop the database collection if it already exists germanet_db.lemmatiser.drop() num_lemmas = 0 input_file = gzip.open(os.path.join(os.path.dirname(__file__), LEMMATISATION_FILE)) for line in input_file: line = line.decode('iso-8859-1').strip().split('\t') assert len(line) == 2 germanet_db.lemmatiser.insert(dict(list(zip(('word', 'lemma'), line)))) num_lemmas += 1 input_file.close() # index the collection on 'word' germanet_db.lemmatiser.create_index('word') print('Inserted {0} lemmatiser entries.'.format(num_lemmas))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def insert_infocontent_data(germanet_db): ''' For every synset in GermaNet, inserts count information derived from SDEWAC. Arguments: - `germanet_db`: a pymongo.database.Database object ''' gnet = germanet.GermaNet(germanet_db) # use add one smoothing gn_counts = defaultdict(lambda: 1.) total_count = 1 input_file = gzip.open(os.path.join(os.path.dirname(__file__), WORD_COUNT_FILE)) num_lines_read = 0 num_lines = 0 for line in input_file: line = line.decode('utf-8').strip().split('\t') num_lines += 1 if len(line) != 3: continue count, pos, word = line num_lines_read += 1 count = int(count) synsets = set(gnet.synsets(word, pos)) if not synsets: continue # Although Resnik (1995) suggests dividing count by the number # of synsets, Patwardhan et al (2003) argue against doing # this. count = float(count) / len(synsets) for synset in synsets: total_count += count paths = synset.hypernym_paths scount = float(count) / len(paths) for path in paths: for ss in path: gn_counts[ss._id] += scount print('Read {0} of {1} lines from count file.'.format(num_lines_read, num_lines)) print('Recorded counts for {0} synsets.'.format(len(gn_counts))) print('Total count is {0}'.format(total_count)) input_file.close() # update all the synset records in GermaNet num_updates = 0 for synset in germanet_db.synsets.find(): synset['infocont'] = gn_counts[synset['_id']] / total_count germanet_db.synsets.save(synset) num_updates += 1 print('Updated {0} synsets.'.format(num_updates))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def compute_max_min_depth(germanet_db): ''' For every part of speech in GermaNet, computes the maximum min_depth in that hierarchy. Arguments: - `germanet_db`: a pymongo.database.Database object ''' gnet = germanet.GermaNet(germanet_db) max_min_depths = defaultdict(lambda: -1) for synset in gnet.all_synsets(): min_depth = synset.min_depth if max_min_depths[synset.category] < min_depth: max_min_depths[synset.category] = min_depth if germanet_db.metainfo.count() == 0: germanet_db.metainfo.insert({}) metainfo = germanet_db.metainfo.find_one() metainfo['max_min_depths'] = max_min_depths germanet_db.metainfo.save(metainfo) print('Computed maximum min_depth for all parts of speech:') print(u', '.join(u'{0}: {1}'.format(k, v) for (k, v) in sorted(max_min_depths.items())).encode('utf-8'))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def AssignVar(self, value): """Assign a value to this Value."""
self.value = value # Call OnAssignVar on options. [option.OnAssignVar() for option in self.options]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _CheckLine(self, line): """Passes the line through each rule until a match is made. Args: line: A string, the current input line. """
for rule in self._cur_state: matched = self._CheckRule(rule, line) if matched: for value in matched.groupdict(): self._AssignVar(matched, value) if self._Operations(rule): # Not a Continue so check for state transition. if rule.new_state: if rule.new_state not in ('End', 'EOF'): self._cur_state = self.states[rule.new_state] self._cur_state_name = rule.new_state break
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _make_graphite_api_points_list(influxdb_data): """Make graphite-api data points dictionary from Influxdb ResultSet data"""
_data = {} for key in influxdb_data.keys(): _data[key[0]] = [(datetime.datetime.fromtimestamp(float(d['time'])), d['value']) for d in influxdb_data.get_points(key[0])] return _data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _setup_logger(self, level, log_file): """Setup log level and log file if set"""
if logger.handlers: return level = getattr(logging, level.upper()) logger.setLevel(level) formatter = logging.Formatter( '[%(levelname)s] %(asctime)s - %(module)s.%(funcName)s() - %(message)s') handler = logging.StreamHandler() logger.addHandler(handler) handler.setFormatter(formatter) if not log_file: return try: handler = TimedRotatingFileHandler(log_file) except IOError: logger.error("Could not write to %s, falling back to stdout", log_file) else: logger.addHandler(handler) handler.setFormatter(formatter)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def walk_subclasses(root): """Does not yield the input class"""
classes = [root] visited = set() while classes: cls = classes.pop() if cls is type or cls in visited: continue classes.extend(cls.__subclasses__()) visited.add(cls) if cls is not root: yield cls
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def new_expiry(days=DEFAULT_PASTE_LIFETIME_DAYS): """Return an expiration `days` in the future"""
now = delorean.Delorean() return now + datetime.timedelta(days=days)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sync(obj, engine): """Mark the object as having been persisted at least once. Store the latest snapshot of all marked values."""
snapshot = Condition() # Only expect values (or lack of a value) for columns that have been explicitly set for column in sorted(_obj_tracking[obj]["marked"], key=lambda col: col.dynamo_name): value = getattr(obj, column.name, None) value = engine._dump(column.typedef, value) condition = column == value # The renderer shouldn't try to dump the value again. # We're dumping immediately in case the value is mutable, # such as a set or (many) custom data types. condition.dumped = True snapshot &= condition _obj_tracking[obj]["snapshot"] = snapshot
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def printable_name(column, path=None): """Provided for debug output when rendering conditions. User.name[3]["foo"][0]["bar"] -> name[3].foo[0].bar """
pieces = [column.name] path = path or path_of(column) for segment in path: if isinstance(segment, str): pieces.append(segment) else: pieces[-1] += "[{}]".format(segment) return ".".join(pieces)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def iter_conditions(condition): """Yield all conditions within the given condition. If the root condition is and/or/not, it is not yielded (unless a cyclic reference to it is found)."""
conditions = list() visited = set() # Has to be split out, since we don't want to visit the root (for cyclic conditions) # but we don't want to yield it (if it's non-cyclic) because this only yields inner conditions if condition.operation in {"and", "or"}: conditions.extend(reversed(condition.values)) elif condition.operation == "not": conditions.append(condition.values[0]) else: conditions.append(condition) while conditions: condition = conditions.pop() if condition in visited: continue visited.add(condition) yield condition if condition.operation in {"and", "or", "not"}: conditions.extend(reversed(condition.values))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def iter_columns(condition): """ Yield all columns in the condition or its inner conditions. Unwraps proxies when the condition's column (or any of its values) include paths. """
# Like iter_conditions, this can't live in each condition without going possibly infinite on the # recursion, or passing the visited set through every call. That makes the signature ugly, so we # take care of it here. Luckily, it's pretty easy to leverage iter_conditions and just unpack the # actual columns. visited = set() for condition in iter_conditions(condition): if condition.operation in ("and", "or", "not"): continue # Non-meta conditions always have a column, and each of values has the potential to be a column. # Comparison will only have a list of len 1, but it's simpler to just iterate values and check each # unwrap proxies created for paths column = proxied(condition.column) # special case for None # this could also have skipped on isinstance(condition, Condition) # but this is slightly more flexible for users to create their own None-sentinel Conditions if column is None: continue if column not in visited: visited.add(column) yield column for value in condition.values: if isinstance(value, ComparisonMixin): if value not in visited: visited.add(value) yield value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _value_ref(self, column, value, *, dumped=False, inner=False): """inner=True uses column.typedef.inner_type instead of column.typedef"""
ref = ":v{}".format(self.next_index) # Need to dump this value if not dumped: typedef = column.typedef for segment in path_of(column): typedef = typedef[segment] if inner: typedef = typedef.inner_typedef value = self.engine._dump(typedef, value) self.attr_values[ref] = value self.counts[ref] += 1 return ref, value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pop_refs(self, *refs): """Decrement the usage of each ref by 1. If this was the last use of a ref, remove it from attr_names or attr_values. """
for ref in refs: name = ref.name count = self.counts[name] # Not tracking this ref if count < 1: continue # Someone else is using this ref elif count > 1: self.counts[name] -= 1 # Last reference else: logger.debug("popping last usage of {}".format(ref)) self.counts[name] -= 1 if ref.type == "value": del self.attr_values[name] else: # Clean up both name indexes path_segment = self.attr_names[name] del self.attr_names[name] del self.name_attr_index[path_segment]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def render(self, obj=None, condition=None, atomic=False, update=False, filter=None, projection=None, key=None): """Main entry point for rendering multiple expressions. All parameters are optional, except obj when atomic or update are True. :param obj: *(Optional)* An object to render an atomic condition or update expression for. Required if update or atomic are true. Default is False. :param condition: *(Optional)* Rendered as a "ConditionExpression" for a conditional operation. If atomic is True, the two are rendered in an AND condition. Default is None. :type condition: :class:`~bloop.conditions.BaseCondition` :param bool atomic: *(Optional)* True if an atomic condition should be created for ``obj`` and rendered as a "ConditionExpression". Default is False. :param bool update: *(Optional)* True if an "UpdateExpression" should be rendered for ``obj``. Default is False. :param filter: *(Optional)* A filter condition for a query or scan, rendered as a "FilterExpression". Default is None. :type filter: :class:`~bloop.conditions.BaseCondition` :param projection: *(Optional)* A set of Columns to include in a query or scan, redered as a "ProjectionExpression". Default is None. :type projection: set :class:`~bloop.models.Column` :param key: *(Optional)* A key condition for queries, rendered as a "KeyConditionExpression". Default is None. :type key: :class:`~bloop.conditions.BaseCondition` """
if (atomic or update) and not obj: raise InvalidCondition("An object is required to render atomic conditions or updates without an object.") if filter: self.render_filter_expression(filter) if projection: self.render_projection_expression(projection) if key: self.render_key_expression(key) # Condition requires a bit of work, because either one can be empty/false condition = (condition or Condition()) & (get_snapshot(obj) if atomic else Condition()) if condition: self.render_condition_expression(condition) if update: self.render_update_expression(obj)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _unpack(self, record, key, expected): """Replaces the attr dict at the given key with an instance of a Model"""
attrs = record.get(key) if attrs is None: return obj = unpack_from_dynamodb( attrs=attrs, expected=expected, model=self.model, engine=self.engine ) object_loaded.send(self.engine, engine=self.engine, obj=obj) record[key] = obj
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def reformat_record(record): """Repack a record into a cleaner structure for consumption."""
return { "key": record["dynamodb"].get("Keys", None), "new": record["dynamodb"].get("NewImage", None), "old": record["dynamodb"].get("OldImage", None), "meta": { "created_at": record["dynamodb"]["ApproximateCreationDateTime"], "event": { "id": record["eventID"], "type": record["eventName"].lower(), "version": record["eventVersion"] }, "sequence_number": record["dynamodb"]["SequenceNumber"], } }
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def token(self): """JSON-serializable representation of the current Shard state. The token is enough to rebuild the Shard as part of rebuilding a Stream. :returns: Shard state as a json-friendly dict :rtype: dict """
if self.iterator_type in RELATIVE_ITERATORS: logger.warning("creating shard token at non-exact location \"{}\"".format(self.iterator_type)) token = { "stream_arn": self.stream_arn, "shard_id": self.shard_id, "iterator_type": self.iterator_type, "sequence_number": self.sequence_number, } if self.parent: token["parent"] = self.parent.shard_id if not self.iterator_type: del token["iterator_type"] if not self.sequence_number: del token["sequence_number"] return token
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def jump_to(self, *, iterator_type, sequence_number=None): """Move to a new position in the shard using the standard parameters to GetShardIterator. :param str iterator_type: "trim_horizon", "at_sequence", "after_sequence", "latest" :param str sequence_number: *(Optional)* Sequence number to use with at/after sequence. Default is None. """
# Just a simple wrapper; let the caller handle RecordsExpired self.iterator_id = self.session.get_shard_iterator( stream_arn=self.stream_arn, shard_id=self.shard_id, iterator_type=iterator_type, sequence_number=sequence_number) self.iterator_type = iterator_type self.sequence_number = sequence_number self.empty_responses = 0
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_children(self): """If the Shard doesn't have any children, tries to find some from DescribeStream. If the Shard is open this won't find any children, so an empty response doesn't mean the Shard will **never** have children. """
# Child count is fixed the first time any of the following happen: # 0 :: stream closed or throughput decreased # 1 :: shard was open for ~4 hours # 2 :: throughput increased if self.children: return self.children # ParentShardId -> [Shard, ...] by_parent = collections.defaultdict(list) # ShardId -> Shard by_id = {} for shard in self.session.describe_stream( stream_arn=self.stream_arn, first_shard=self.shard_id)["Shards"]: parent_list = by_parent[shard.get("ParentShardId")] shard = Shard( stream_arn=self.stream_arn, shard_id=shard["ShardId"], parent=shard.get("ParentShardId"), session=self.session) parent_list.append(shard) by_id[shard.shard_id] = shard # Find this shard when looking up shards by ParentShardId by_id[self.shard_id] = self # Insert this shard's children, then handle its child's descendants etc. to_insert = collections.deque(by_parent[self.shard_id]) while to_insert: shard = to_insert.popleft() # ParentShardId -> Shard shard.parent = by_id[shard.parent] shard.parent.children.append(shard) # Continue for any shards that have this shard as their parent to_insert.extend(by_parent[shard.shard_id]) return self.children
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_records(self): """Get the next set of records in this shard. An empty list doesn't guarantee the shard is exhausted. :returns: A list of reformatted records. May be empty. """
# Won't be able to find new records. if self.exhausted: return [] # Already caught up, just the one call please. if self.empty_responses >= CALLS_TO_REACH_HEAD: return self._apply_get_records_response(self.session.get_stream_records(self.iterator_id)) # Up to 5 calls to try and find a result while self.empty_responses < CALLS_TO_REACH_HEAD and not self.exhausted: records = self._apply_get_records_response(self.session.get_stream_records(self.iterator_id)) if records: return records return []
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def bind(self, model, *, skip_table_setup=False): """Create backing tables for a model and its non-abstract subclasses. :param model: Base model to bind. Can be abstract. :param skip_table_setup: Don't create or verify the table in DynamoDB. Default is False. :raises bloop.exceptions.InvalidModel: if ``model`` is not a subclass of :class:`~bloop.models.BaseModel`. """
# Make sure we're looking at models validate_is_model(model) concrete = set(filter(lambda m: not m.Meta.abstract, walk_subclasses(model))) if not model.Meta.abstract: concrete.add(model) logger.debug("binding non-abstract models {}".format( sorted(c.__name__ for c in concrete) )) # create_table doesn't block until ACTIVE or validate. # It also doesn't throw when the table already exists, making it safe # to call multiple times for the same unbound model. if skip_table_setup: logger.info("skip_table_setup is True; not trying to create tables or validate models during bind") else: self.session.clear_cache() is_creating = {} for model in concrete: table_name = self._compute_table_name(model) before_create_table.send(self, engine=self, model=model) if not skip_table_setup: if table_name in is_creating: continue creating = self.session.create_table(table_name, model) is_creating[table_name] = creating for model in concrete: if not skip_table_setup: table_name = self._compute_table_name(model) if is_creating[table_name]: # polls until table is active self.session.describe_table(table_name) if model.Meta.ttl: self.session.enable_ttl(table_name, model) if model.Meta.backups and model.Meta.backups["enabled"]: self.session.enable_backups(table_name, model) self.session.validate_table(table_name, model) model_validated.send(self, engine=self, model=model) model_bound.send(self, engine=self, model=model) logger.info("successfully bound {} models to the engine".format(len(concrete)))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def delete(self, *objs, condition=None, atomic=False): """Delete one or more objects. :param objs: objects to delete. :param condition: only perform each delete if this condition holds. :param bool atomic: only perform each delete if the local and DynamoDB versions of the object match. :raises bloop.exceptions.ConstraintViolation: if the condition (or atomic) is not met. """
objs = set(objs) validate_not_abstract(*objs) for obj in objs: self.session.delete_item({ "TableName": self._compute_table_name(obj.__class__), "Key": dump_key(self, obj), **render(self, obj=obj, atomic=atomic, condition=condition) }) object_deleted.send(self, engine=self, obj=obj) logger.info("successfully deleted {} objects".format(len(objs)))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load(self, *objs, consistent=False): """Populate objects from DynamoDB. :param objs: objects to delete. :param bool consistent: Use `strongly consistent reads`__ if True. Default is False. :raises bloop.exceptions.MissingKey: if any object doesn't provide a value for a key column. :raises bloop.exceptions.MissingObjects: if one or more objects aren't loaded. __ http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadConsistency.html """
get_table_name = self._compute_table_name objs = set(objs) validate_not_abstract(*objs) table_index, object_index, request = {}, {}, {} for obj in objs: table_name = get_table_name(obj.__class__) key = dump_key(self, obj) index = index_for(key) if table_name not in object_index: table_index[table_name] = list(sorted(key.keys())) object_index[table_name] = {} request[table_name] = {"Keys": [], "ConsistentRead": consistent} if index not in object_index[table_name]: request[table_name]["Keys"].append(key) object_index[table_name][index] = set() object_index[table_name][index].add(obj) response = self.session.load_items(request) for table_name, list_of_attrs in response.items(): for attrs in list_of_attrs: key_shape = table_index[table_name] key = extract_key(key_shape, attrs) index = index_for(key) for obj in object_index[table_name].pop(index): unpack_from_dynamodb( attrs=attrs, expected=obj.Meta.columns, engine=self, obj=obj) object_loaded.send(self, engine=self, obj=obj) if not object_index[table_name]: object_index.pop(table_name) if object_index: not_loaded = set() for index in object_index.values(): for index_set in index.values(): not_loaded.update(index_set) logger.info("loaded {} of {} objects".format(len(objs) - len(not_loaded), len(objs))) raise MissingObjects("Failed to load some objects.", objects=not_loaded) logger.info("successfully loaded {} objects".format(len(objs)))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save(self, *objs, condition=None, atomic=False): """Save one or more objects. :param objs: objects to save. :param condition: only perform each save if this condition holds. :param bool atomic: only perform each save if the local and DynamoDB versions of the object match. :raises bloop.exceptions.ConstraintViolation: if the condition (or atomic) is not met. """
objs = set(objs) validate_not_abstract(*objs) for obj in objs: self.session.save_item({ "TableName": self._compute_table_name(obj.__class__), "Key": dump_key(self, obj), **render(self, obj=obj, atomic=atomic, condition=condition, update=True) }) object_saved.send(self, engine=self, obj=obj) logger.info("successfully saved {} objects".format(len(objs)))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def backing_type_for(value): """Returns the DynamoDB backing type for a given python value's type :: 4 -> 'N' ['x', 3] -> 'L' {2, 4} -> 'SS' """
if isinstance(value, str): vtype = "S" elif isinstance(value, bytes): vtype = "B" # NOTE: numbers.Number check must come **AFTER** bool check since isinstance(True, numbers.Number) elif isinstance(value, bool): vtype = "BOOL" elif isinstance(value, numbers.Number): vtype = "N" elif isinstance(value, dict): vtype = "M" elif isinstance(value, list): vtype = "L" elif isinstance(value, set): if not value: vtype = "SS" # doesn't matter, Set(x) should dump an empty set the same for all x else: inner = next(iter(value)) if isinstance(inner, str): vtype = "SS" elif isinstance(inner, bytes): vtype = "BS" elif isinstance(inner, numbers.Number): vtype = "NS" else: raise ValueError(f"Unknown set type for inner value {inner!r}") else: raise ValueError(f"Can't dump unexpected type {type(value)!r} for value {value!r}") return vtype
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def stream_replicate(): """Monitor changes in approximately real-time and replicate them"""
stream = primary.stream(SomeDataBlob, "trim_horizon") next_heartbeat = pendulum.now() while True: now = pendulum.now() if now >= next_heartbeat: stream.heartbeat() next_heartbeat = now.add(minutes=10) record = next(stream) if record is None: continue if record["new"] is not None: replica.save(record["new"]) else: replica.delete(record["old"])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _move_stream_endpoint(coordinator, position): """Move to the "trim_horizon" or "latest" of the entire stream."""
# 0) Everything will be rebuilt from DescribeStream. stream_arn = coordinator.stream_arn coordinator.roots.clear() coordinator.active.clear() coordinator.buffer.clear() # 1) Build a Dict[str, Shard] of the current Stream from a DescribeStream call current_shards = coordinator.session.describe_stream(stream_arn=stream_arn)["Shards"] current_shards = unpack_shards(current_shards, stream_arn, coordinator.session) # 2) Roots are any shards without parents. coordinator.roots.extend(shard for shard in current_shards.values() if not shard.parent) # 3.0) Stream trim_horizon is the combined trim_horizon of all roots. if position == "trim_horizon": for shard in coordinator.roots: shard.jump_to(iterator_type="trim_horizon") coordinator.active.extend(coordinator.roots) # 3.1) Stream latest is the combined latest of all shards without children. else: for root in coordinator.roots: for shard in root.walk_tree(): if not shard.children: shard.jump_to(iterator_type="latest") coordinator.active.append(shard)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _move_stream_token(coordinator, token): """Move to the Stream position described by the token. The following rules are applied when interpolation is required: - If a shard does not exist (past the trim_horizon) it is ignored. If that shard had children, its children are also checked against the existing shards. - If none of the shards in the token exist, then InvalidStream is raised. - If a Shard expects its iterator to point to a SequenceNumber that is now past that Shard's trim_horizon, the Shard instead points to trim_horizon. """
stream_arn = coordinator.stream_arn = token["stream_arn"] # 0) Everything will be rebuilt from the DescribeStream masked by the token. coordinator.roots.clear() coordinator.active.clear() coordinator.closed.clear() coordinator.buffer.clear() # Injecting the token gives us access to the standard shard management functions token_shards = unpack_shards(token["shards"], stream_arn, coordinator.session) coordinator.roots = [shard for shard in token_shards.values() if not shard.parent] coordinator.active.extend(token_shards[shard_id] for shard_id in token["active"]) # 1) Build a Dict[str, Shard] of the current Stream from a DescribeStream call current_shards = coordinator.session.describe_stream(stream_arn=stream_arn)["Shards"] current_shards = unpack_shards(current_shards, stream_arn, coordinator.session) # 2) Trying to find an intersection with the actual Stream by walking each root shard's tree. # Prune any Shard with no children that's not part of the actual Stream. # Raise InvalidStream if the entire token is pruned. unverified = collections.deque(coordinator.roots) while unverified: shard = unverified.popleft() if shard.shard_id not in current_shards: logger.info("Unknown or expired shard \"{}\" - pruning from stream token".format(shard.shard_id)) coordinator.remove_shard(shard, drop_buffered_records=True) unverified.extend(shard.children) # 3) Everything was pruned, so the token describes an unknown stream. if not coordinator.roots: raise InvalidStream("This token has no relation to the actual Stream.") # 4) Now that everything's verified, grab new iterators for the coordinator's active Shards. for shard in coordinator.active: try: if shard.iterator_type is None: # Descendant of an unknown shard shard.iterator_type = "trim_horizon" # Move back to the token's specified position shard.jump_to(iterator_type=shard.iterator_type, sequence_number=shard.sequence_number) except RecordsExpired: # This token shard's sequence_number is beyond the trim_horizon. # The next closest record is at trim_horizon. msg = "SequenceNumber \"{}\" in shard \"{}\" beyond trim horizon: jumping to trim_horizon" logger.info(msg.format(shard.sequence_number, shard.shard_id)) shard.jump_to(iterator_type="trim_horizon")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def advance_shards(self): """Poll active shards for records and insert them into the buffer. Rotate exhausted shards. Returns immediately if the buffer isn't empty. """
# Don't poll shards when there are pending records. if self.buffer: return # 0) Collect new records from all active shards. record_shard_pairs = [] for shard in self.active: records = next(shard) if records: record_shard_pairs.extend((record, shard) for record in records) self.buffer.push_all(record_shard_pairs) self.migrate_closed_shards()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def heartbeat(self): """Keep active shards with "trim_horizon", "latest" iterators alive by advancing their iterators."""
for shard in self.active: if shard.sequence_number is None: records = next(shard) # Success! This shard now has an ``at_sequence`` iterator if records: self.buffer.push_all((record, shard) for record in records) self.migrate_closed_shards()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def token(self): """JSON-serializable representation of the current Stream state. Use :func:`Engine.stream(YourModel, token) <bloop.engine.Engine.stream>` to create an identical stream, or :func:`stream.move_to(token) <bloop.stream.Stream.move_to>` to move an existing stream to this position. :returns: Stream state as a json-friendly dict :rtype: dict """
# 0) Trace roots and active shards active_ids = [] shard_tokens = [] for root in self.roots: for shard in root.walk_tree(): shard_tokens.append(shard.token) # dedupe, stream_arn will be in the root token shard_tokens[-1].pop("stream_arn") active_ids.extend((shard.shard_id for shard in self.active)) # 1) Inject closed shards for shard in self.closed.keys(): active_ids.append(shard.shard_id) shard_tokens.append(shard.token) shard_tokens[-1].pop("stream_arn") return { "stream_arn": self.stream_arn, "active": active_ids, "shards": shard_tokens }
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def remove_shard(self, shard, drop_buffered_records=False): """Remove a Shard from the Coordinator. Drops all buffered records from the Shard. If the Shard is active or a root, it is removed and any children promoted to those roles. :param shard: The shard to remove :type shard: :class:`~bloop.stream.shard.Shard` :param bool drop_buffered_records: Whether records from this shard should be removed. Default is False. """
try: self.roots.remove(shard) except ValueError: # Wasn't a root Shard pass else: self.roots.extend(shard.children) try: self.active.remove(shard) except ValueError: # Wasn't an active Shard pass else: self.active.extend(shard.children) if drop_buffered_records: # TODO can this be improved? Gets expensive for high-volume streams with large buffers heap = self.buffer.heap # Clear buffered records from the shard. Each record is (ordering, record, shard) to_remove = [x for x in heap if x[2] is shard] for x in to_remove: heap.remove(x)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def move_to(self, position): """Set the Coordinator to a specific endpoint or time, or load state from a token. :param position: "trim_horizon", "latest", :class:`~datetime.datetime`, or a :attr:`Coordinator.token <bloop.stream.coordinator.Coordinator.token>` """
if isinstance(position, collections.abc.Mapping): move = _move_stream_token elif hasattr(position, "timestamp") and callable(position.timestamp): move = _move_stream_time elif isinstance(position, str) and position.lower() in ["latest", "trim_horizon"]: move = _move_stream_endpoint else: raise InvalidPosition("Don't know how to move to position {!r}".format(position)) move(self, position)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def push(self, record, shard): """Push a new record into the buffer :param dict record: new record :param shard: Shard the record came from :type shard: :class:`~bloop.stream.shard.Shard` """
heapq.heappush(self.heap, heap_item(self.clock, record, shard))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def unpack_from_dynamodb(*, attrs, expected, model=None, obj=None, engine=None, context=None, **kwargs): """Push values by dynamo_name into an object"""
context = context or {"engine": engine} engine = engine or context.get("engine", None) if not engine: raise ValueError("You must provide engine or a context with an engine.") if model is None and obj is None: raise ValueError("You must provide a model or obj to unpack.") if model is not None and obj is not None: raise ValueError("Only specify model or obj.") if model: obj = model.Meta.init() for column in expected: value = attrs.get(column.dynamo_name, None) value = engine._load(column.typedef, value, context=context, **kwargs) setattr(obj, column.name, value) return obj
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def setdefault(obj, field, default): """Set an object's field to default if it doesn't have a value"""
setattr(obj, field, getattr(obj, field, default))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def bind_column(model, name, column, force=False, recursive=False, copy=False) -> Column: """Bind a column to the model with the given name. This method is primarily used during BaseModel.__init_subclass__, although it can be used to easily attach a new column to an existing model: .. code-block:: python import bloop.models class User(BaseModel): id = Column(String, hash_key=True) email = Column(String, dynamo_name="e") bound = bloop.models.bind_column(User, "email", email) assert bound is email # rebind with force, and use a copy bound = bloop.models.bind_column(User, "email", email, force=True, copy=True) assert bound is not email If an existing index refers to this column, it will be updated to point to the new column using :meth:`~bloop.models.refresh_index`, including recalculating the index projection. Meta attributes including ``Meta.columns``, ``Meta.hash_key``, etc. will be updated if necessary. If ``name`` or the column's ``dynamo_name`` conflicts with an existing column or index on the model, raises :exc:`~bloop.exceptions.InvalidModel` unless ``force`` is True. If ``recursive`` is ``True`` and there are existing subclasses of ``model``, a copy of the column will attempt to bind to each subclass. The recursive calls will not force the bind, and will always use a new copy. If ``copy`` is ``True`` then a copy of the provided column is used. This uses a shallow copy via :meth:`~bloop.models.Column.__copy__`. :param model: The model to bind the column to. :param name: The name to bind the column as. In effect, used for ``setattr(model, name, column)`` :param column: The column to bind to the model. :param force: Unbind existing columns or indexes with the same name or dynamo_name. Default is False. :param recursive: Bind to each subclass of this model. Default is False. :param copy: Use a copy of the column instead of the column directly. Default is False. :return: The bound column. This is a new column when ``copy`` is True, otherwise the input column. """
if not subclassof(model, BaseModel): raise InvalidModel(f"{model} is not a subclass of BaseModel") meta = model.Meta if copy: column = copyfn(column) # TODO elif column.model is not None: logger.warning(f"Trying to rebind column bound to {column.model}") column._name = name safe_repr = unbound_repr(column) # Guard against name, dynamo_name collisions; if force=True, unbind any matches same_dynamo_name = ( util.index(meta.columns, "dynamo_name").get(column.dynamo_name) or util.index(meta.indexes, "dynamo_name").get(column.dynamo_name) ) same_name = ( meta.columns_by_name.get(column.name) or util.index(meta.indexes, "name").get(column.name) ) if column.hash_key and column.range_key: raise InvalidModel(f"Tried to bind {safe_repr} as both a hash and range key.") if force: if same_name: unbind(meta, name=column.name) if same_dynamo_name: unbind(meta, dynamo_name=column.dynamo_name) else: if same_name: raise InvalidModel( f"The column {safe_repr} has the same name as an existing column " f"or index {same_name}. Did you mean to bind with force=True?") if same_dynamo_name: raise InvalidModel( f"The column {safe_repr} has the same dynamo_name as an existing " f"column or index {same_name}. Did you mean to bind with force=True?") if column.hash_key and meta.hash_key: raise InvalidModel( f"Tried to bind {safe_repr} but {meta.model} " f"already has a different hash_key: {meta.hash_key}") if column.range_key and meta.range_key: raise InvalidModel( f"Tried to bind {safe_repr} but {meta.model} " f"already has a different range_key: {meta.range_key}") # success! # -------------------------------- column.model = meta.model meta.columns.add(column) meta.columns_by_name[name] = column setattr(meta.model, name, column) if column.hash_key: meta.hash_key = column meta.keys.add(column) if column.range_key: meta.range_key = column meta.keys.add(column) try: for index in meta.indexes: refresh_index(meta, index) except KeyError as e: raise InvalidModel( f"Binding column {column} removed a required column for index {unbound_repr(index)}") from e if recursive: for subclass in util.walk_subclasses(meta.model): try: bind_column(subclass, name, column, force=False, recursive=False, copy=True) except InvalidModel: pass return column
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def bind_index(model, name, index, force=False, recursive=True, copy=False) -> Index: """Bind an index to the model with the given name. This method is primarily used during BaseModel.__init_subclass__, although it can be used to easily attach a new index to an existing model: .. code-block:: python import bloop.models class User(BaseModel): id = Column(String, hash_key=True) email = Column(String, dynamo_name="e") by_email = GlobalSecondaryIndex(projection="keys", hash_key="email") bound = bloop.models.bind_index(User, "by_email", by_email) assert bound is by_email # rebind with force, and use a copy bound = bloop.models.bind_index(User, "by_email", by_email, force=True, copy=True) assert bound is not by_email If ``name`` or the index's ``dynamo_name`` conflicts with an existing column or index on the model, raises :exc:`~bloop.exceptions.InvalidModel` unless ``force`` is True. If ``recursive`` is ``True`` and there are existing subclasses of ``model``, a copy of the index will attempt to bind to each subclass. The recursive calls will not force the bind, and will always use a new copy. If ``copy`` is ``True`` then a copy of the provided index is used. This uses a shallow copy via :meth:`~bloop.models.Index.__copy__`. :param model: The model to bind the index to. :param name: The name to bind the index as. In effect, used for ``setattr(model, name, index)`` :param index: The index to bind to the model. :param force: Unbind existing columns or indexes with the same name or dynamo_name. Default is False. :param recursive: Bind to each subclass of this model. Default is False. :param copy: Use a copy of the index instead of the index directly. Default is False. :return: The bound index. This is a new column when ``copy`` is True, otherwise the input index. """
if not subclassof(model, BaseModel): raise InvalidModel(f"{model} is not a subclass of BaseModel") meta = model.Meta if copy: index = copyfn(index) # TODO elif index.model is not None: logger.warning(f"Trying to rebind index bound to {index.model}") index._name = name safe_repr = unbound_repr(index) # Guard against name, dynamo_name collisions; if force=True, unbind any matches same_dynamo_name = ( util.index(meta.columns, "dynamo_name").get(index.dynamo_name) or util.index(meta.indexes, "dynamo_name").get(index.dynamo_name) ) same_name = ( meta.columns_by_name.get(index.name) or util.index(meta.indexes, "name").get(index.name) ) if isinstance(index, LocalSecondaryIndex) and not meta.range_key: raise InvalidModel("An LSI requires the Model to have a range key.") if force: if same_name: unbind(meta, name=index.name) if same_dynamo_name: unbind(meta, dynamo_name=index.dynamo_name) else: if same_name: raise InvalidModel( f"The index {safe_repr} has the same name as an existing index " f"or column {same_name}. Did you mean to bind with force=True?") if same_dynamo_name: raise InvalidModel( f"The index {safe_repr} has the same dynamo_name as an existing " f"index or column {same_name}. Did you mean to bind with force=True?") # success! # -------------------------------- index.model = meta.model meta.indexes.add(index) setattr(meta.model, name, index) if isinstance(index, LocalSecondaryIndex): meta.lsis.add(index) if isinstance(index, GlobalSecondaryIndex): meta.gsis.add(index) try: refresh_index(meta, index) except KeyError as e: raise InvalidModel("Index expected a hash or range key that does not exist") from e if recursive: for subclass in util.walk_subclasses(meta.model): try: bind_index(subclass, name, index, force=False, recursive=False, copy=True) except InvalidModel: pass return index
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def refresh_index(meta, index) -> None: """Recalculate the projection, hash_key, and range_key for the given index. :param meta: model.Meta to find columns by name :param index: The index to refresh """
# All projections include model + index keys projection_keys = set.union(meta.keys, index.keys) proj = index.projection mode = proj["mode"] if mode == "keys": proj["included"] = projection_keys elif mode == "all": proj["included"] = meta.columns elif mode == "include": # pragma: no branch if all(isinstance(p, str) for p in proj["included"]): proj["included"] = set(meta.columns_by_name[n] for n in proj["included"]) else: proj["included"] = set(proj["included"]) proj["included"].update(projection_keys) if proj["strict"]: proj["available"] = proj["included"] else: proj["available"] = meta.columns
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def unbind(meta, name=None, dynamo_name=None) -> None: """Unconditionally remove any columns or indexes bound to the given name or dynamo_name. .. code-block:: python import bloop.models class User(BaseModel): id = Column(String, hash_key=True) email = Column(String, dynamo_name="e") by_email = GlobalSecondaryIndex(projection="keys", hash_key=email) for dynamo_name in ("id", "e", "by_email"): bloop.models.unbind(User.Meta, dynamo_name=dynamo_name) assert not User.Meta.columns assert not User.Meta.indexes assert not User.Meta.keys .. warning:: This method does not pre- or post- validate the model with the requested changes. You are responsible for ensuring the model still has a hash key, that required columns exist for each index, etc. :param meta: model.Meta to remove the columns or indexes from :param name: column or index name to unbind by. Default is None. :param dynamo_name: column or index name to unbind by. Default is None. """
if name is not None: columns = {x for x in meta.columns if x.name == name} indexes = {x for x in meta.indexes if x.name == name} elif dynamo_name is not None: columns = {x for x in meta.columns if x.dynamo_name == dynamo_name} indexes = {x for x in meta.indexes if x.dynamo_name == dynamo_name} else: raise RuntimeError("Must provide name= or dynamo_name= to unbind from meta") # Nothing in bloop should allow name or dynamo_name # collisions to exist, so this is either a bug or # the user manually hacked up meta. assert len(columns) <= 1 assert len(indexes) <= 1 assert not (columns and indexes) if columns: [column] = columns meta.columns.remove(column) # If these don't line up, there's likely a bug in bloop # or the user manually hacked up columns_by_name expect_same = meta.columns_by_name[column.name] assert expect_same is column meta.columns_by_name.pop(column.name) if column in meta.keys: meta.keys.remove(column) if meta.hash_key is column: meta.hash_key = None if meta.range_key is column: meta.range_key = None delattr(meta.model, column.name) if indexes: [index] = indexes meta.indexes.remove(index) if index in meta.gsis: meta.gsis.remove(index) if index in meta.lsis: meta.lsis.remove(index) delattr(meta.model, index.name)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _dump(cls, obj, *, context, **kwargs): """ obj -> dict """
if obj is None: return None dump = context["engine"]._dump filtered = filter( lambda item: item[1] is not None, (( column.dynamo_name, dump(column.typedef, getattr(obj, column.name, None), context=context, **kwargs) ) for column in cls.Meta.columns)) return dict(filtered) or None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_valid_superset(actual_projection, index): """Returns True if the actual index is a valid superset of the expected index"""
projection_type = actual_projection["ProjectionType"] if projection_type == "ALL": return True meta = index.model.Meta # all index types provide index keys and model keys provides = set.union(meta.keys, index.keys) if projection_type == "KEYS_ONLY": pass elif projection_type == "INCLUDE": # pragma: no branch (unknown projections break loud) by_dynamo_name = {column.dynamo_name: column for column in meta.columns} provides.update( by_dynamo_name[name] for name in actual_projection["NonKeyAttributes"] if name in by_dynamo_name # ignore columns the projection provides if the model doesn't care about them ) else: logger.info(f"unexpected index ProjectionType '{projection_type}'") return False expects = index.projection["included"] return provides.issuperset(expects)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save_item(self, item): """Save an object to DynamoDB. :param item: Unpacked into kwargs for :func:`boto3.DynamoDB.Client.update_item`. :raises bloop.exceptions.ConstraintViolation: if the condition (or atomic) is not met. """
try: self.dynamodb_client.update_item(**item) except botocore.exceptions.ClientError as error: handle_constraint_violation(error)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def delete_item(self, item): """Delete an object in DynamoDB. :param item: Unpacked into kwargs for :func:`boto3.DynamoDB.Client.delete_item`. :raises bloop.exceptions.ConstraintViolation: if the condition (or atomic) is not met. """
try: self.dynamodb_client.delete_item(**item) except botocore.exceptions.ClientError as error: handle_constraint_violation(error)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_items(self, items): """Loads any number of items in chunks, handling continuation tokens. :param items: Unpacked in chunks into "RequestItems" for :func:`boto3.DynamoDB.Client.batch_get_item`. """
loaded_items = {} requests = collections.deque(create_batch_get_chunks(items)) while requests: request = requests.pop() try: response = self.dynamodb_client.batch_get_item(RequestItems=request) except botocore.exceptions.ClientError as error: raise BloopException("Unexpected error while loading items.") from error # Accumulate results for table_name, table_items in response.get("Responses", {}).items(): loaded_items.setdefault(table_name, []).extend(table_items) # Push additional request onto the deque. # "UnprocessedKeys" is {} if this request is done if response["UnprocessedKeys"]: requests.append(response["UnprocessedKeys"]) return loaded_items
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_table(self, table_name, model): """Create the model's table. Returns True if the table is being created, False otherwise. Does not wait for the table to create, and does not validate an existing table. Will not raise "ResourceInUseException" if the table exists or is being created. :param str table_name: The name of the table to create for the model. :param model: The :class:`~bloop.models.BaseModel` to create the table for. :return: True if the table is being created, False if the table exists :rtype: bool """
table = create_table_request(table_name, model) try: self.dynamodb_client.create_table(**table) is_creating = True except botocore.exceptions.ClientError as error: handle_table_exists(error, model) is_creating = False return is_creating
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def describe_table(self, table_name): """ Polls until the table is ready, then returns the first result when the table was ready. The returned dict is standardized to ensure all fields are present, even when empty or across different DynamoDB API versions. TTL information is also inserted. :param table_name: The name of the table to describe :return: The (sanitized) result of DescribeTable["Table"] :rtype: dict """
if table_name in self._tables: return self._tables[table_name] status, description = None, {} calls = 0 while status is not ready: calls += 1 try: description = self.dynamodb_client.describe_table(TableName=table_name)["Table"] except botocore.exceptions.ClientError as error: raise BloopException("Unexpected error while describing table.") from error status = simple_table_status(description) logger.debug("describe_table: table \"{}\" was in ACTIVE state after {} calls".format(table_name, calls)) try: ttl = self.dynamodb_client.describe_time_to_live(TableName=table_name) except botocore.exceptions.ClientError as error: raise BloopException("Unexpected error while describing ttl.") from error try: backups = self.dynamodb_client.describe_continuous_backups(TableName=table_name) except botocore.exceptions.ClientError as error: raise BloopException("Unexpected error while describing continuous backups.") from error description["TimeToLiveDescription"] = { "AttributeName": _read_field(ttl, None, "TimeToLiveDescription", "AttributeName"), "TimeToLiveStatus": _read_field(ttl, None, "TimeToLiveDescription", "TimeToLiveStatus"), } description["ContinuousBackupsDescription"] = { "ContinuousBackupsStatus": _read_field( backups, None, "ContinuousBackupsDescription", "ContinuousBackupsStatus"), } table = self._tables[table_name] = sanitize_table_description(description) return table
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validate_table(self, table_name, model): """Polls until a creating table is ready, then verifies the description against the model's requirements. The model may have a subset of all GSIs and LSIs on the table, but the key structure must be exactly the same. The table must have a stream if the model expects one, but not the other way around. When read or write units are not specified for the model or any GSI, the existing values will always pass validation. :param str table_name: The name of the table to validate the model against. :param model: The :class:`~bloop.models.BaseModel` to validate the table of. :raises bloop.exceptions.TableMismatch: When the table does not meet the constraints of the model. """
actual = self.describe_table(table_name) if not compare_tables(model, actual): raise TableMismatch("The expected and actual tables for {!r} do not match.".format(model.__name__)) # Fill in values that Meta doesn't know ahead of time (such as arns). # These won't be populated unless Meta explicitly cares about the value if model.Meta.stream: stream_arn = model.Meta.stream["arn"] = actual["LatestStreamArn"] logger.debug(f"Set {model.__name__}.Meta.stream['arn'] to '{stream_arn}' from DescribeTable response") if model.Meta.ttl: ttl_enabled = actual["TimeToLiveDescription"]["TimeToLiveStatus"].lower() == "enabled" model.Meta.ttl["enabled"] = ttl_enabled logger.debug(f"Set {model.__name__}.Meta.ttl['enabled'] to '{ttl_enabled}' from DescribeTable response") # Fill in meta values that the table didn't care about (eg. billing=None) if model.Meta.encryption is None: sse_enabled = actual["SSEDescription"]["Status"].lower() == "enabled" model.Meta.encryption = {"enabled": sse_enabled} logger.debug( f"Set {model.__name__}.Meta.encryption['enabled'] to '{sse_enabled}' from DescribeTable response") if model.Meta.backups is None: backups = actual["ContinuousBackupsDescription"]["ContinuousBackupsStatus"] == "ENABLED" model.Meta.backups = {"enabled": backups} logger.debug(f"Set {model.__name__}.Meta.backups['enabled'] to '{backups}' from DescribeTable response") if model.Meta.billing is None: billing_mode = { "PAY_PER_REQUEST": "on_demand", "PROVISIONED": "provisioned" }[actual["BillingModeSummary"]["BillingMode"]] model.Meta.billing = {"mode": billing_mode} logger.debug(f"Set {model.__name__}.Meta.billing['mode'] to '{billing_mode}' from DescribeTable response") if model.Meta.read_units is None: read_units = model.Meta.read_units = actual["ProvisionedThroughput"]["ReadCapacityUnits"] logger.debug( f"Set {model.__name__}.Meta.read_units to {read_units} from DescribeTable response") if model.Meta.write_units is None: write_units = model.Meta.write_units = actual["ProvisionedThroughput"]["WriteCapacityUnits"] logger.debug( f"Set {model.__name__}.Meta.write_units to {write_units} from DescribeTable response") # Replace any ``None`` values for read_units, write_units in GSIs with their actual values gsis = {index["IndexName"]: index for index in actual["GlobalSecondaryIndexes"]} for index in model.Meta.gsis: read_units = gsis[index.dynamo_name]["ProvisionedThroughput"]["ReadCapacityUnits"] write_units = gsis[index.dynamo_name]["ProvisionedThroughput"]["WriteCapacityUnits"] if index.read_units is None: index.read_units = read_units logger.debug( f"Set {model.__name__}.{index.name}.read_units to {read_units} from DescribeTable response") if index.write_units is None: index.write_units = write_units logger.debug( f"Set {model.__name__}.{index.name}.write_units to {write_units} from DescribeTable response")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check_hash_key(query_on, key): """Only allows == against query_on.hash_key"""
return ( isinstance(key, BaseCondition) and (key.operation == "==") and (key.column is query_on.hash_key) )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check_range_key(query_on, key): """BeginsWith, Between, or any Comparison except '!=' against query_on.range_key"""
return ( isinstance(key, BaseCondition) and key.operation in ("begins_with", "between", "<", ">", "<=", ">=", "==") and key.column is query_on.range_key )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def count(self): """Number of items that have been loaded from DynamoDB so far, including buffered items."""
if self.request["Select"] == "COUNT": while not self.exhausted: next(self, None) return self._count
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def scanned(self): """Number of items that DynamoDB evaluated, before any filter was applied."""
if self.request["Select"] == "COUNT": while not self.exhausted: next(self, None) return self._scanned
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def reset(self): """Reset to the initial state, clearing the buffer and zeroing count and scanned."""
self.buffer.clear() self._count = 0 self._scanned = 0 self._exhausted = False self.request.pop("ExclusiveStartKey", None)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def prepare(self): """ Create a new PreparedTransaction that can be committed. This is called automatically when exiting the transaction as a context: .. code-block:: python # automatically calls commit when exiting :return: """
tx = PreparedTransaction() tx.prepare( engine=self.engine, mode=self.mode, items=self._items, ) return tx
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def prepare(self, engine, mode, items) -> None: """ Create a unique transaction id and dumps the items into a cached request object. """
self.tx_id = str(uuid.uuid4()).replace("-", "") self.engine = engine self.mode = mode self.items = items self._prepare_request()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def commit(self) -> None: """ Commit the transaction with a fixed transaction id. A read transaction can call commit() any number of times, while a write transaction can only use the same tx_id for 10 minutes from the first call. """
now = datetime.now(timezone.utc) if self.first_commit_at is None: self.first_commit_at = now if self.mode == "r": response = self.engine.session.transaction_read(self._request) elif self.mode == "w": if now - self.first_commit_at > MAX_TOKEN_LIFETIME: raise TransactionTokenExpired response = self.engine.session.transaction_write(self._request, self.tx_id) else: raise ValueError(f"unrecognized mode {self.mode}") self._handle_response(response)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load(self, *objs) -> "ReadTransaction": """ Add one or more objects to be loaded in this transaction. At most 10 items can be loaded in the same transaction. All objects will be loaded each time you call commit(). :param objs: Objects to add to the set that are loaded in this transaction. :return: this transaction for chaining :raises bloop.exceptions.MissingObjects: if one or more objects aren't loaded. """
self._extend([TxItem.new("get", obj) for obj in objs]) return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check(self, obj, condition) -> "WriteTransaction": """ Add a condition which must be met for the transaction to commit. While the condition is checked against the provided object, that object will not be modified. It is only used to provide the hash and range key to apply the condition to. At most 10 items can be checked, saved, or deleted in the same transaction. The same idempotency token will be used for a single prepared transaction, which allows you to safely call commit on the PreparedCommit object multiple times. :param obj: The object to use for the transaction condition. This object will not be modified. :param condition: A condition on an object which must hold for the transaction to commit. :return: this transaction for chaining """
self._extend([TxItem.new("check", obj, condition)]) return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save(self, *objs, condition=None, atomic=False) -> "WriteTransaction": """ Add one or more objects to be saved in this transaction. At most 10 items can be checked, saved, or deleted in the same transaction. The same idempotency token will be used for a single prepared transaction, which allows you to safely call commit on the PreparedCommit object multiple times. :param objs: Objects to add to the set that are updated in this transaction. :param condition: A condition for these objects which must hold for the transaction to commit. :param bool atomic: only commit the transaction if the local and DynamoDB versions of the object match. :return: this transaction for chaining """
self._extend([TxItem.new("save", obj, condition, atomic) for obj in objs]) return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def encode(self, cube_dimensions): """ Produces a numpy array of integers which encode the supplied cube dimensions. """
return np.asarray([getattr(cube_dimensions[d], s) for d in self._dimensions for s in self._schema], dtype=np.int32)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def decode(self, descriptor): """ Produce a list of dictionaries for each dimension in this transcoder """
i = iter(descriptor) n = len(self._schema) # Add the name key to our schema schema = self._schema + ('name',) # For each dimensions, generator takes n items off iterator # wrapping the descriptor, making a tuple with the dimension # name appended tuple_gen = (tuple(itertools.islice(i, n)) + (d, ) for d in self._dimensions) # Generate dictionary by mapping schema keys to generated tuples return [{ k: v for k, v in zip(schema, t) } for t in tuple_gen]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def dl_cub(cub_url, cub_archive_name): """ Download cub archive from cub_url and store it in cub_archive_name """
with open(cub_archive_name, 'wb') as f: remote_file = urllib2.urlopen(cub_url) meta = remote_file.info() # The server may provide us with the size of the file. cl_header = meta.getheaders("Content-Length") remote_file_size = int(cl_header[0]) if len(cl_header) > 0 else None # Initialise variables local_file_size = 0 block_size = 128*1024 # Do the download while True: data = remote_file.read(block_size) if not data: break f.write(data) local_file_size += len(data) if (remote_file_size is not None and not local_file_size == remote_file_size): log.warn("Local file size '{}' " "does not match remote '{}'".format( local_file_size, remote_file_size)) remote_file.close()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sha_hash_file(filename): """ Compute the SHA1 hash of filename """
hash_sha = hashlib.sha1() with open(filename, 'rb') as f: for chunk in iter(lambda: f.read(1024*1024), b""): hash_sha.update(chunk) return hash_sha.hexdigest()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def install_cub(mb_inc_path): """ Downloads and installs cub into mb_inc_path """
cub_url = 'https://github.com/NVlabs/cub/archive/1.6.4.zip' cub_sha_hash = '0d5659200132c2576be0b3959383fa756de6105d' cub_version_str = 'Current release: v1.6.4 (12/06/2016)' cub_zip_file = 'cub.zip' cub_zip_dir = 'cub-1.6.4' cub_unzipped_path = os.path.join(mb_inc_path, cub_zip_dir) cub_new_unzipped_path = os.path.join(mb_inc_path, 'cub') cub_header = os.path.join(cub_new_unzipped_path, 'cub', 'cub.cuh') cub_readme = os.path.join(cub_new_unzipped_path, 'README.md' ) # Check for a reasonably valid install cub_installed, _ = is_cub_installed(cub_readme, cub_header, cub_version_str) if cub_installed: log.info("NVIDIA cub installation found " "at '{}'".format(cub_new_unzipped_path)) return log.info("No NVIDIA cub installation found") # Do we already have a valid cub zip file have_valid_cub_file = (os.path.exists(cub_zip_file) and os.path.isfile(cub_zip_file) and sha_hash_file(cub_zip_file) == cub_sha_hash) if have_valid_cub_file: log.info("Valid NVIDIA cub archive found '{}'".format(cub_zip_file)) # Download if we don't have a valid file else: log.info("Downloading cub archive '{}'".format(cub_url)) dl_cub(cub_url, cub_zip_file) cub_file_sha_hash = sha_hash_file(cub_zip_file) # Compare against our supplied hash if cub_sha_hash != cub_file_sha_hash: msg = ('Hash of file %s downloaded from %s ' 'is %s and does not match the expected ' 'hash of %s. Please manually download ' 'as per the README.md instructions.') % ( cub_zip_file, cub_url, cub_file_sha_hash, cub_sha_hash) raise InstallCubException(msg) # Unzip into montblanc/include/cub with zipfile.ZipFile(cub_zip_file, 'r') as zip_file: # Remove any existing installs shutil.rmtree(cub_unzipped_path, ignore_errors=True) shutil.rmtree(cub_new_unzipped_path, ignore_errors=True) # Unzip zip_file.extractall(mb_inc_path) # Rename. cub_unzipped_path is mb_inc_path/cub_zip_dir shutil.move(cub_unzipped_path, cub_new_unzipped_path) log.info("NVIDIA cub archive unzipped into '{}'".format( cub_new_unzipped_path)) there, reason = is_cub_installed(cub_readme, cub_header, cub_version_str) if not there: raise InstallCubException(reason)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_tensorflow_extension(nvcc_settings, device_info): """ Create an extension that builds the custom tensorflow ops """
import tensorflow as tf import glob use_cuda = (bool(nvcc_settings['cuda_available']) and tf.test.is_built_with_cuda()) # Source and includes source_path = os.path.join('montblanc', 'impl', 'rime', 'tensorflow', 'rime_ops') sources = glob.glob(os.path.join(source_path, '*.cpp')) # Header dependencies depends = glob.glob(os.path.join(source_path, '*.h')) # Include directories tf_inc = tf.sysconfig.get_include() include_dirs = [os.path.join('montblanc', 'include'), source_path] include_dirs += [tf_inc, os.path.join(tf_inc, "external", "nsync", "public")] # Libraries library_dirs = [tf.sysconfig.get_lib()] libraries = ['tensorflow_framework'] extra_link_args = ['-fPIC', '-fopenmp', '-g0'] # Macros define_macros = [ ('_MWAITXINTRIN_H_INCLUDED', None), ('_FORCE_INLINES', None), ('_GLIBCXX_USE_CXX11_ABI', 0)] # Common flags flags = ['-std=c++11'] gcc_flags = flags + ['-g0', '-fPIC', '-fopenmp', '-O2'] gcc_flags += ['-march=native', '-mtune=native'] nvcc_flags = flags + [] # Add cuda specific build information, if it is available if use_cuda: # CUDA source files sources += glob.glob(os.path.join(source_path, '*.cu')) # CUDA include directories include_dirs += nvcc_settings['include_dirs'] # CUDA header dependencies depends += glob.glob(os.path.join(source_path, '*.cuh')) # CUDA libraries library_dirs += nvcc_settings['library_dirs'] libraries += nvcc_settings['libraries'] # Flags nvcc_flags += ['-x', 'cu'] nvcc_flags += ['--compiler-options', '"-fPIC"'] # --gpu-architecture=sm_xy flags nvcc_flags += cuda_architecture_flags(device_info) # Ideally this would be set in define_macros, but # this must be set differently for gcc and nvcc nvcc_flags += ['-DGOOGLE_CUDA=%d' % int(use_cuda)] return Extension(tensorflow_extension_name, sources=sources, include_dirs=include_dirs, depends=depends, library_dirs=library_dirs, libraries=libraries, define_macros=define_macros, # this syntax is specific to this build system # we're only going to use certain compiler args with nvcc and not with gcc # the implementation of this trick is in customize_compiler_for_nvcc() above extra_compile_args={ 'gcc': gcc_flags, 'nvcc': nvcc_flags }, extra_link_args=extra_link_args, )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def updated_dimensions(self): """ Inform montblanc about dimension sizes """
return [("ntime", args.ntime), # Timesteps ("nchan", args.nchan), # Channels ("na", args.na), # Antenna ("npsrc", len(lm_coords))]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def point_lm(self, context): """ Supply point source lm coordinates to montblanc """
# Shape (npsrc, 2) (ls, us), _ = context.array_extents(context.name) return np.asarray(lm_coords[ls:us], dtype=context.dtype)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def point_stokes(self, context): """ Supply point source stokes parameters to montblanc """
# Shape (npsrc, ntime, 4) (ls, us), (lt, ut), (l, u) = context.array_extents(context.name) data = np.empty(context.shape, context.dtype) data[ls:us,:,l:u] = np.asarray(lm_stokes)[ls:us,None,:] return data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def uvw(self, context): """ Supply UVW antenna coordinates to montblanc """
# Shape (ntime, na, 3) (lt, ut), (la, ua), (l, u) = context.array_extents(context.name) # Create empty UVW coordinates data = np.empty(context.shape, context.dtype) data[:,:,0] = np.arange(la+1, ua+1) # U = antenna index data[:,:,1] = 0 # V = 0 data[:,:,2] = 0 # W = 0 return data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def nr_of_baselines(na, auto_correlations=False): """ Compute the number of baselines for the given number of antenna. Can specify whether auto-correlations should be taken into account """
m = (na-1) if auto_correlations is False else (na+1) return (na*m)//2
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def nr_of_antenna(nbl, auto_correlations=False): """ Compute the number of antenna for the given number of baselines. Can specify whether auto-correlations should be taken into account """
t = 1 if auto_correlations is False else -1 return int(t + math.sqrt(1 + 8*nbl)) // 2
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def array_bytes(shape, dtype): """ Estimates the memory in bytes required for an array of the supplied shape and dtype """
return np.product(shape)*np.dtype(dtype).itemsize
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def random_like(ary=None, shape=None, dtype=None): """ Returns a random array of the same shape and type as the supplied array argument, or the supplied shape and dtype """
if ary is not None: shape, dtype = ary.shape, ary.dtype elif shape is None or dtype is None: raise ValueError(( 'random_like(ary, shape, dtype) must be supplied ' 'with either an array argument, or the shape and dtype ' 'of the desired random array.')) if np.issubdtype(dtype, np.complexfloating): return (np.random.random(size=shape) + \ np.random.random(size=shape)*1j).astype(dtype) else: return np.random.random(size=shape).astype(dtype)