text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Deactivate public report for this check. <END_TASK> <USER_TASK:> Description: def removePublicReport(self): """Deactivate public report for this check. Returns status message"""
response = self.pingdom.request('DELETE', 'reports.public/%s' % self.id) return response.json()['message']
<SYSTEM_TASK:> Extract the dependencies from the bundle and its sub-bundles. <END_TASK> <USER_TASK:> Description: def extract_deps(bundles, log=None): """Extract the dependencies from the bundle and its sub-bundles."""
def _flatten(bundle): deps = [] if hasattr(bundle, 'npm'): deps.append(bundle.npm) for content in bundle.contents: if isinstance(content, BundleBase): deps.extend(_flatten(content)) return deps flatten_deps = [] for bundle in bundles: flatten_deps.extend(_flatten(bundle)) packages = defaultdict(list) for dep in flatten_deps: for pkg, version in dep.items(): packages[pkg].append(version) deps = {} for package, versions in packages.items(): deps[package] = semver.max_satisfying(versions, '*', True) if log and len(versions) > 1: log('Warn: {0} version {1} resolved to: {2}'.format( repr(package), versions, repr(deps[package]) )) return deps
<SYSTEM_TASK:> Make a semantic version from Python PEP440 version. <END_TASK> <USER_TASK:> Description: def make_semver(version_str): """Make a semantic version from Python PEP440 version. Semantic versions does not handle post-releases. """
v = parse_version(version_str) major = v._version.release[0] try: minor = v._version.release[1] except IndexError: minor = 0 try: patch = v._version.release[2] except IndexError: patch = 0 prerelease = [] if v._version.pre: prerelease.append(''.join(str(x) for x in v._version.pre)) if v._version.dev: prerelease.append(''.join(str(x) for x in v._version.dev)) prerelease = '.'.join(prerelease) # Create semver version = '{0}.{1}.{2}'.format(major, minor, patch) if prerelease: version += '-{0}'.format(prerelease) if v.local: version += '+{0}'.format(v.local) return version
<SYSTEM_TASK:> Calculate the max number of item that an option can stored in the pool at give time. <END_TASK> <USER_TASK:> Description: def get_max_size(pool, num_option, item_length): """ Calculate the max number of item that an option can stored in the pool at give time. This is to limit the pool size to POOL_SIZE Args: option_index (int): the index of the option to calculate the size for pool (dict): answer pool num_option (int): total number of options available for the question item_length (int): the length of the item Returns: int: the max number of items that `option_index` can have """
max_items = POOL_SIZE / item_length # existing items plus the reserved for min size. If there is an option has 1 item, POOL_OPTION_MIN_SIZE - 1 space # is reserved. existing = POOL_OPTION_MIN_SIZE * num_option + sum([max(0, len(pool.get(i, {})) - 5) for i in xrange(num_option)]) return int(max_items - existing)
<SYSTEM_TASK:> submit a student answer to the answer pool <END_TASK> <USER_TASK:> Description: def offer_answer(pool, answer, rationale, student_id, algo, options): """ submit a student answer to the answer pool The answer maybe selected to stay in the pool depending on the selection algorithm Args: pool (dict): answer pool Answer pool format: { option1_index: { 'student_id': { can store algorithm specific info here }, ... } option2_index: ... } answer (int): the option student selected rationale (str): the rationale text student_id (str): student identifier algo (str): the selection algorithm options (dict): the options available in the question Raises: UnknownChooseAnswerAlgorithm: when we don't know the algorithm """
if algo['name'] == 'simple': offer_simple(pool, answer, rationale, student_id, options) elif algo['name'] == 'random': offer_random(pool, answer, rationale, student_id, options) else: raise UnknownChooseAnswerAlgorithm()
<SYSTEM_TASK:> The simple selection algorithm. <END_TASK> <USER_TASK:> Description: def offer_simple(pool, answer, rationale, student_id, options): """ The simple selection algorithm. This algorithm randomly select an answer from the pool to discard and add the new one when the pool reaches the limit """
existing = pool.setdefault(answer, {}) if len(existing) >= get_max_size(pool, len(options), POOL_ITEM_LENGTH_SIMPLE): student_id_to_remove = random.choice(existing.keys()) del existing[student_id_to_remove] existing[student_id] = {} pool[answer] = existing
<SYSTEM_TASK:> The random selection algorithm. The same as simple algorithm <END_TASK> <USER_TASK:> Description: def offer_random(pool, answer, rationale, student_id, options): """ The random selection algorithm. The same as simple algorithm """
offer_simple(pool, answer, rationale, student_id, options)
<SYSTEM_TASK:> This validator checks if the answers includes all possible options <END_TASK> <USER_TASK:> Description: def validate_seeded_answers_simple(answers, options, algo): """ This validator checks if the answers includes all possible options Args: answers (str): the answers to be checked options (dict): all options that should exist in the answers algo (str): selection algorithm Returns: None if everything is good. Otherwise, the missing option error message. """
seen_options = {} for answer in answers: if answer: key = options[answer['answer']].get('text') if options[answer['answer']].get('image_url'): key += options[answer['answer']].get('image_url') seen_options.setdefault(key, 0) seen_options[key] += 1 missing_options = [] index = 1 for option in options: key = option.get('text') + option.get('image_url') if option.get('image_url') else option.get('text') if option.get('text') != 'n/a': if seen_options.get(key, 0) == 0: missing_options.append(_('Option ') + str(index)) index += 1 if missing_options: return {'seed_error': _('Missing option seed(s): ') + ', '.join(missing_options)} return None
<SYSTEM_TASK:> Validate answers based on selection algorithm <END_TASK> <USER_TASK:> Description: def validate_seeded_answers(answers, options, algo): """ Validate answers based on selection algorithm This is called when instructor setup the tool and providing seeded answers to the question. This function is trying to validate if instructor provided enough seeds for a give algorithm. e.g. we require 1 seed for each option in simple algorithm and at least 1 seed for random algorithm. Because otherwise, the first student won't be able to see the answers on the second step where he/she suppose to compare and review other students answers. Args: answers (list): list of dict that contain seeded answers options (dict): all options that should exist in the answers algo (str): selection algorithm Returns: None if successful, otherwise error message """
if algo['name'] == 'simple': return validate_seeded_answers_simple(answers, options, algo) elif algo['name'] == 'random': return validate_seeded_answers_random(answers) else: raise UnknownChooseAnswerAlgorithm()
<SYSTEM_TASK:> Select other student's answers from answer pool or seeded answers based on the selection algorithm <END_TASK> <USER_TASK:> Description: def get_other_answers(pool, seeded_answers, get_student_item_dict, algo, options): """ Select other student's answers from answer pool or seeded answers based on the selection algorithm Args: pool (dict): answer pool, format: { option1_index: { student_id: { can store algorithm specific info here } }, option2_index: { student_id: { ... } } } seeded_answers (list): seeded answers from instructor [ {'answer': 0, 'rationale': 'rationale A'}, {'answer': 1, 'rationale': 'rationale B'}, ] get_student_item_dict (callable): get student item dict function to return student item dict algo (str): selection algorithm options (dict): answer options for the question Returns: dict: answers based on the selection algorithm """
# "#" means the number of responses returned should be the same as the number of options. num_responses = len(options) \ if 'num_responses' not in algo or algo['num_responses'] == "#" \ else int(algo['num_responses']) if algo['name'] == 'simple': return get_other_answers_simple(pool, seeded_answers, get_student_item_dict, num_responses) elif algo['name'] == 'random': return get_other_answers_random(pool, seeded_answers, get_student_item_dict, num_responses) else: raise UnknownChooseAnswerAlgorithm()
<SYSTEM_TASK:> Get answers from others with simple algorithm, which picks one answer for each option. <END_TASK> <USER_TASK:> Description: def get_other_answers_simple(pool, seeded_answers, get_student_item_dict, num_responses): """ Get answers from others with simple algorithm, which picks one answer for each option. Args: see `get_other_answers` num_responses (int): the number of responses to be returned. This value may not be respected if there is not enough answers to return Returns: dict: answers based on the selection algorithm """
ret = [] # clean up answers so that all keys are int pool = {int(k): v for k, v in pool.items()} total_in_pool = len(seeded_answers) merged_pool = convert_seeded_answers(seeded_answers) student_id = get_student_item_dict()['student_id'] # merge the dictionaries in the answer dictionary for key in pool: total_in_pool += len(pool[key]) # if student_id has value, we assume the student just submitted an answer. So removing it # from total number in the pool if student_id in pool[key].keys(): total_in_pool -= 1 if key in merged_pool: merged_pool[key].update(pool[key].items()) else: merged_pool[key] = pool[key] # remember which option+student_id is selected, so that we don't have duplicates in the result selected = [] # loop until we have enough answers to return while len(ret) < min(num_responses, total_in_pool): for option, students in merged_pool.items(): student = student_id i = 0 while (student == student_id or i > 100) and (str(option) + student) not in selected: # retry until we got a different one or after 100 retries # we are suppose to get a different student answer or a seeded one in a few tries # as we have at least one seeded answer for each option in the algo. And it is not # suppose to overflow i order to break the loop student = random.choice(students.keys()) i += 1 selected.append(str(option)+student) if student.startswith('seeded'): # seeded answer, get the rationale from local rationale = students[student] else: student_item = get_student_item_dict(student) submission = sas_api.get_answers_for_student(student_item) rationale = submission.get_rationale(0) ret.append({'option': option, 'rationale': rationale}) # check if we have enough answers if len(ret) >= min(num_responses, total_in_pool): break return {"answers": ret}
<SYSTEM_TASK:> Get answers from others with random algorithm, which randomly select answer from the pool. <END_TASK> <USER_TASK:> Description: def get_other_answers_random(pool, seeded_answers, get_student_item_dict, num_responses): """ Get answers from others with random algorithm, which randomly select answer from the pool. Student may get three answers for option 1 or one answer for option 1 and two answers for option 2. Args: see `get_other_answers` num_responses (int): the number of responses to be returned. This value may not be respected if there is not enough answers to return Returns: dict: answers based on the selection algorithm """
ret = [] # clean up answers so that all keys are int pool = {int(k): v for k, v in pool.items()} seeded = {'seeded'+str(index): answer for index, answer in enumerate(seeded_answers)} merged_pool = seeded.keys() for key in pool: merged_pool += pool[key].keys() # shuffle random.shuffle(merged_pool) # get student identifier student_id = get_student_item_dict()['student_id'] for student in merged_pool: if len(ret) >= num_responses: # have enough answers break elif student == student_id: # this is the student's answer so don't return continue if student.startswith('seeded'): option = seeded[student]['answer'] rationale = seeded[student]['rationale'] else: student_item = get_student_item_dict(student) submission = sas_api.get_answers_for_student(student_item) rationale = submission.get_rationale(0) option = submission.get_vote(0) ret.append({'option': option, 'rationale': rationale}) return {"answers": ret}
<SYSTEM_TASK:> Convert seeded answers into the format that can be merged into student answers. <END_TASK> <USER_TASK:> Description: def convert_seeded_answers(answers): """ Convert seeded answers into the format that can be merged into student answers. Args: answers (list): seeded answers Returns: dict: seeded answers with student answers format: { 0: { 'seeded0': 'rationaleA' } 1: { 'seeded1': 'rationaleB' } } """
converted = {} for index, answer in enumerate(answers): converted.setdefault(answer['answer'], {}) converted[answer['answer']]['seeded' + str(index)] = answer['rationale'] return converted
<SYSTEM_TASK:> Mark the unit of work as failed in the database and update the listener <END_TASK> <USER_TASK:> Description: def mark(self): """ Mark the unit of work as failed in the database and update the listener so as to skip it next time. """
self.reliableListener.lastRun = extime.Time() BatchProcessingError( store=self.reliableListener.store, processor=self.reliableListener.processor, listener=self.reliableListener.listener, item=self.workUnit, error=self.failure.getErrorMessage())
<SYSTEM_TASK:> Add the given Item to the set which will be notified of Items <END_TASK> <USER_TASK:> Description: def addReliableListener(self, listener, style=iaxiom.LOCAL): """ Add the given Item to the set which will be notified of Items available for processing. Note: Each Item is processed synchronously. Adding too many listeners to a single batch processor will cause the L{step} method to block while it sends notification to each listener. @param listener: An Item instance which provides a C{processItem} method. @return: An Item representing L{listener}'s persistent tracking state. """
existing = self.store.findUnique(_ReliableListener, attributes.AND(_ReliableListener.processor == self, _ReliableListener.listener == listener), default=None) if existing is not None: return existing for work in self.store.query(self.workUnitType, sort=self.workUnitType.storeID.descending, limit=1): forwardMark = work.storeID backwardMark = work.storeID + 1 break else: forwardMark = 0 backwardMark = 0 if self.scheduled is None: self.scheduled = extime.Time() iaxiom.IScheduler(self.store).schedule(self, self.scheduled) return _ReliableListener(store=self.store, processor=self, listener=listener, forwardMark=forwardMark, backwardMark=backwardMark, style=style)
<SYSTEM_TASK:> Remove a previously added listener. <END_TASK> <USER_TASK:> Description: def removeReliableListener(self, listener): """ Remove a previously added listener. """
self.store.query(_ReliableListener, attributes.AND(_ReliableListener.processor == self, _ReliableListener.listener == listener)).deleteFromStore() self.store.query(BatchProcessingError, attributes.AND(BatchProcessingError.processor == self, BatchProcessingError.listener == listener)).deleteFromStore()
<SYSTEM_TASK:> Return an iterable of the listeners which have been added to <END_TASK> <USER_TASK:> Description: def getReliableListeners(self): """ Return an iterable of the listeners which have been added to this batch processor. """
for rellist in self.store.query(_ReliableListener, _ReliableListener.processor == self): yield rellist.listener
<SYSTEM_TASK:> Called to indicate that a new item of the type monitored by this batch <END_TASK> <USER_TASK:> Description: def itemAdded(self): """ Called to indicate that a new item of the type monitored by this batch processor is being added to the database. If this processor is not already scheduled to run, this will schedule it. It will also start the batch process if it is not yet running and there are any registered remote listeners. """
localCount = self.store.query( _ReliableListener, attributes.AND(_ReliableListener.processor == self, _ReliableListener.style == iaxiom.LOCAL), limit=1).count() remoteCount = self.store.query( _ReliableListener, attributes.AND(_ReliableListener.processor == self, _ReliableListener.style == iaxiom.REMOTE), limit=1).count() if localCount and self.scheduled is None: self.scheduled = extime.Time() iaxiom.IScheduler(self.store).schedule(self, self.scheduled) if remoteCount: batchService = iaxiom.IBatchService(self.store, None) if batchService is not None: batchService.start()
<SYSTEM_TASK:> Invoke the given bound item method in the batch process. <END_TASK> <USER_TASK:> Description: def call(self, itemMethod): """ Invoke the given bound item method in the batch process. Return a Deferred which fires when the method has been invoked. """
item = itemMethod.im_self method = itemMethod.im_func.func_name return self.batchController.getProcess().addCallback( CallItemMethod(storepath=item.store.dbdir, storeid=item.storeID, method=method).do)
<SYSTEM_TASK:> Run tasks until stopService is called. <END_TASK> <USER_TASK:> Description: def processWhileRunning(self): """ Run tasks until stopService is called. """
work = self.step() for result, more in work: yield result if not self.running: break if more: delay = 0.1 else: delay = 10.0 yield task.deferLater(reactor, delay, lambda: None)
<SYSTEM_TASK:> find every column in every sheet and put it in a new sheet or book. <END_TASK> <USER_TASK:> Description: def getcols(sheetMatch=None,colMatch="Decay"): """find every column in every sheet and put it in a new sheet or book."""
book=BOOK() if sheetMatch is None: matchingSheets=book.sheetNames print('all %d sheets selected '%(len(matchingSheets))) else: matchingSheets=[x for x in book.sheetNames if sheetMatch in x] print('%d of %d sheets selected matching "%s"'%(len(matchingSheets),len(book.sheetNames),sheetMatch)) matchingSheetsWithCol=[] for sheetName in matchingSheets: i = book.sheetNames.index(sheetName) # index of that sheet for j,colName in enumerate(book.sheets[i].colDesc): if colMatch in colName: matchingSheetsWithCol.append((sheetName,j)) break else: print(" no match in [%s]%s"%(book.bookName,sheetName)) print("%d of %d of those have your column"%(len(matchingSheetsWithCol),len(matchingSheets))) for item in matchingSheetsWithCol: print(item,item[0],item[1])
<SYSTEM_TASK:> Return a dictionary representing the namespace which should be <END_TASK> <USER_TASK:> Description: def namespace(self): """ Return a dictionary representing the namespace which should be available to the user. """
self._ns = { 'db': self.store, 'store': store, 'autocommit': False, } return self._ns
<SYSTEM_TASK:> Create a new account in the given store. <END_TASK> <USER_TASK:> Description: def addAccount(self, siteStore, username, domain, password): """ Create a new account in the given store. @param siteStore: A site Store to which login credentials will be added. @param username: Local part of the username for the credentials to add. @param domain: Domain part of the username for the credentials to add. @param password: Password for the credentials to add. @rtype: L{LoginAccount} @return: The added account. """
for ls in siteStore.query(userbase.LoginSystem): break else: ls = self.installOn(siteStore) try: acc = ls.addAccount(username, domain, password) except userbase.DuplicateUser: raise usage.UsageError("An account by that name already exists.") return acc
<SYSTEM_TASK:> Create some instances of a particular type in a store. <END_TASK> <USER_TASK:> Description: def createSomeItems(store, itemType, values, counter): """ Create some instances of a particular type in a store. """
for i in counter: itemType(store=store, **values)
<SYSTEM_TASK:> save the instance or create a new one.. <END_TASK> <USER_TASK:> Description: def save(self, commit=True): """save the instance or create a new one.."""
# walk through the document fields for field_name, field in iter_valid_fields(self._meta): setattr(self.instance, field_name, self.cleaned_data.get(field_name)) if commit: self.instance.save() return self.instance
<SYSTEM_TASK:> Collect all the items that should be deleted when an item or items <END_TASK> <USER_TASK:> Description: def dependentItems(store, tableClass, comparisonFactory): """ Collect all the items that should be deleted when an item or items of a particular item type are deleted. @param tableClass: An L{Item} subclass. @param comparison: A one-argument callable taking an attribute and returning an L{iaxiom.IComparison} describing the items to collect. @return: An iterable of items to delete. """
for cascadingAttr in (_cascadingDeletes.get(tableClass, []) + _cascadingDeletes.get(None, [])): for cascadedItem in store.query(cascadingAttr.type, comparisonFactory(cascadingAttr)): yield cascadedItem
<SYSTEM_TASK:> Generate a dummy subclass of Item that will have the given attributes, <END_TASK> <USER_TASK:> Description: def declareLegacyItem(typeName, schemaVersion, attributes, dummyBases=()): """ Generate a dummy subclass of Item that will have the given attributes, and the base Item methods, but no methods of its own. This is for use with upgrading. @param typeName: a string, the Axiom TypeName to have attributes for. @param schemaVersion: an int, the (old) version of the schema this is a proxy for. @param attributes: a dict mapping {columnName: attr instance} describing the schema of C{typeName} at C{schemaVersion}. @param dummyBases: a sequence of 4-tuples of (baseTypeName, baseSchemaVersion, baseAttributes, baseBases) representing the dummy bases of this legacy class. """
if (typeName, schemaVersion) in _legacyTypes: return _legacyTypes[typeName, schemaVersion] if dummyBases: realBases = [declareLegacyItem(*A) for A in dummyBases] else: realBases = (Item,) attributes = attributes.copy() attributes['__module__'] = 'item_dummy' attributes['__legacy__'] = True attributes['typeName'] = typeName attributes['schemaVersion'] = schemaVersion result = type(str('DummyItem<%s,%d>' % (typeName, schemaVersion)), realBases, attributes) assert result is not None, 'wtf, %r' % (type,) _legacyTypes[(typeName, schemaVersion)] = result return result
<SYSTEM_TASK:> Class decorator for indicating a powerup's powerup interfaces. <END_TASK> <USER_TASK:> Description: def empowerment(iface, priority=0): """ Class decorator for indicating a powerup's powerup interfaces. The class will also be declared as implementing the interface. @type iface: L{zope.interface.Interface} @param iface: The powerup interface. @type priority: int @param priority: The priority the powerup will be installed at. """
def _deco(cls): cls.powerupInterfaces = ( tuple(getattr(cls, 'powerupInterfaces', ())) + ((iface, priority),)) implementer(iface)(cls) return cls return _deco
<SYSTEM_TASK:> Remove a powerup. <END_TASK> <USER_TASK:> Description: def powerDown(self, powerup, interface=None): """ Remove a powerup. If no interface is specified, and the type of the object being installed has a "powerupInterfaces" attribute (containing either a sequence of interfaces, or a sequence of (interface, priority) tuples), the target will be powered down with this object on those interfaces. If this object has a "__getPowerupInterfaces__" method, it will be called with an iterable of (interface, priority) tuples. The iterable of (interface, priority) tuples it returns will then be uninstalled. (Note particularly that if powerups are added or removed to the collection described above between calls to powerUp and powerDown, more powerups or less will be removed than were installed.) """
if interface is None: for interface, priority in powerup._getPowerupInterfaces(): self.powerDown(powerup, interface) else: for cable in self.store.query(_PowerupConnector, AND(_PowerupConnector.item == self, _PowerupConnector.interface == unicode(qual(interface)), _PowerupConnector.powerup == powerup)): cable.deleteFromStore() return raise ValueError("Not powered up for %r with %r" % (interface, powerup))
<SYSTEM_TASK:> Return an iterator of the interfaces for which the given powerup is <END_TASK> <USER_TASK:> Description: def interfacesFor(self, powerup): """ Return an iterator of the interfaces for which the given powerup is installed on this object. This is not implemented for in-memory powerups. It will probably fail in an unpredictable, implementation-dependent way if used on one. """
pc = _PowerupConnector for iface in self.store.query(pc, AND(pc.item == self, pc.powerup == powerup)).getColumn('interface'): yield namedAny(iface)
<SYSTEM_TASK:> Collect powerup interfaces this object declares that it can be <END_TASK> <USER_TASK:> Description: def _getPowerupInterfaces(self): """ Collect powerup interfaces this object declares that it can be installed on. """
powerupInterfaces = getattr(self.__class__, "powerupInterfaces", ()) pifs = [] for x in powerupInterfaces: if isinstance(x, type(Interface)): #just an interface pifs.append((x, 0)) else: #an interface and a priority pifs.append(x) m = getattr(self, "__getPowerupInterfaces__", None) if m is not None: pifs = m(pifs) try: pifs = [(i, p) for (i, p) in pifs] except ValueError: raise ValueError("return value from %r.__getPowerupInterfaces__" " not an iterable of 2-tuples" % (self,)) return pifs
<SYSTEM_TASK:> Prepare each attribute in my schema for insertion into a given store, <END_TASK> <USER_TASK:> Description: def _schemaPrepareInsert(self, store): """ Prepare each attribute in my schema for insertion into a given store, either by upgrade or by creation. This makes sure all references point to this store and all relative paths point to this store's files directory. """
for name, atr in self.getSchema(): atr.prepareInsert(self, store)
<SYSTEM_TASK:> Create and return a new instance from a row from the store. <END_TASK> <USER_TASK:> Description: def existingInStore(cls, store, storeID, attrs): """Create and return a new instance from a row from the store."""
self = cls.__new__(cls) self.__justCreated = False self.__subinit__(__store=store, storeID=storeID, __everInserted=True) schema = self.getSchema() assert len(schema) == len(attrs), "invalid number of attributes" for data, (name, attr) in zip(attrs, schema): attr.loaded(self, data) self.activate() return self
<SYSTEM_TASK:> return all persistent class attributes <END_TASK> <USER_TASK:> Description: def getSchema(cls): """ return all persistent class attributes """
schema = [] for name, atr in cls.__attributes__: atr = atr.__get__(None, cls) if isinstance(atr, SQLAttribute): schema.append((name, atr)) cls.getSchema = staticmethod(lambda schema=schema: schema) return schema
<SYSTEM_TASK:> Called after the database is brought into a consistent state with this <END_TASK> <USER_TASK:> Description: def committed(self): """ Called after the database is brought into a consistent state with this object. """
if self.__deleting: self.deleted() if not self.__legacy__: self.store.objectCache.uncache(self.storeID, self) self.__store = None self.__justCreated = False
<SYSTEM_TASK:> Register a callable which can perform a schema upgrade between two <END_TASK> <USER_TASK:> Description: def registerUpgrader(upgrader, typeName, oldVersion, newVersion): """ Register a callable which can perform a schema upgrade between two particular versions. @param upgrader: A one-argument callable which will upgrade an object. It is invoked with an instance of the old version of the object. @param typeName: The database typename for which this is an upgrader. @param oldVersion: The version from which this will upgrade. @param newVersion: The version to which this will upgrade. This must be exactly one greater than C{oldVersion}. """
# assert (typeName, oldVersion, newVersion) not in _upgradeRegistry, "duplicate upgrader" # ^ this makes the tests blow up so it's just disabled for now; perhaps we # should have a specific test mode # assert newVersion - oldVersion == 1, "read the doc string" assert isinstance(typeName, str), "read the doc string" _upgradeRegistry[typeName, oldVersion] = upgrader
<SYSTEM_TASK:> Does the given table have an explicit oid column? <END_TASK> <USER_TASK:> Description: def _hasExplicitOid(store, table): """ Does the given table have an explicit oid column? """
return any(info[1] == 'oid' for info in store.querySchemaSQL( 'PRAGMA *DATABASE*.table_info({})'.format(table)))
<SYSTEM_TASK:> Upgrade a table to have an explicit oid. <END_TASK> <USER_TASK:> Description: def _upgradeTableOid(store, table, createTable, postCreate=lambda: None): """ Upgrade a table to have an explicit oid. Must be called in a transaction to avoid corrupting the database. """
if _hasExplicitOid(store, table): return store.executeSchemaSQL( 'ALTER TABLE *DATABASE*.{0} RENAME TO {0}_temp'.format(table)) createTable() store.executeSchemaSQL( 'INSERT INTO *DATABASE*.{0} ' 'SELECT oid, * FROM *DATABASE*.{0}_temp'.format(table)) store.executeSchemaSQL('DROP TABLE *DATABASE*.{0}_temp'.format(table)) postCreate()
<SYSTEM_TASK:> Upgrade the system tables to use explicit oid columns. <END_TASK> <USER_TASK:> Description: def upgradeSystemOid(store): """ Upgrade the system tables to use explicit oid columns. """
store.transact( _upgradeTableOid, store, 'axiom_types', lambda: store.executeSchemaSQL(CREATE_TYPES)) store.transact( _upgradeTableOid, store, 'axiom_objects', lambda: store.executeSchemaSQL(CREATE_OBJECTS), lambda: store.executeSchemaSQL(CREATE_OBJECTS_IDX))
<SYSTEM_TASK:> Upgrade a store to use explicit oid columns. <END_TASK> <USER_TASK:> Description: def upgradeExplicitOid(store): """ Upgrade a store to use explicit oid columns. This allows VACUUMing the database without corrupting it. This requires copying all of axiom_objects and axiom_types, as well as all item tables that have not yet been upgraded. Consider VACUUMing the database afterwards to reclaim space. """
upgradeSystemOid(store) for typename, version in store.querySchemaSQL(LATEST_TYPES): cls = _typeNameToMostRecentClass[typename] if cls.schemaVersion != version: remaining = store.querySQL( 'SELECT oid FROM {} LIMIT 1'.format( store._tableNameFor(typename, version))) if len(remaining) == 0: # Nothing to upgrade continue else: raise RuntimeError( '{}:{} not fully upgraded to {}'.format( typename, version, cls.schemaVersion)) store.transact( _upgradeTableOid, store, store._tableNameOnlyFor(typename, version), lambda: store._justCreateTable(cls), lambda: store._createIndexesFor(cls, []))
<SYSTEM_TASK:> Check that all of the accumulated old Item types have a way to get <END_TASK> <USER_TASK:> Description: def checkUpgradePaths(self): """ Check that all of the accumulated old Item types have a way to get from their current version to the latest version. @raise axiom.errors.NoUpgradePathAvailable: for any, and all, Items that do not have a valid upgrade path """
cantUpgradeErrors = [] for oldVersion in self._oldTypesRemaining: # We have to be able to get from oldVersion.schemaVersion to # the most recent type. currentType = _typeNameToMostRecentClass.get( oldVersion.typeName, None) if currentType is None: # There isn't a current version of this type; it's entirely # legacy, will be upgraded by deleting and replacing with # something else. continue typeInQuestion = oldVersion.typeName upgver = oldVersion.schemaVersion while upgver < currentType.schemaVersion: # Do we have enough of the schema present to upgrade? if ((typeInQuestion, upgver) not in _upgradeRegistry): cantUpgradeErrors.append( "No upgrader present for %s (%s) from %d to %d" % ( typeInQuestion, qual(currentType), upgver, upgver + 1)) # Is there a type available for each upgrader version? if upgver+1 != currentType.schemaVersion: if (typeInQuestion, upgver+1) not in _legacyTypes: cantUpgradeErrors.append( "Type schema required for upgrade missing:" " %s version %d" % ( typeInQuestion, upgver+1)) upgver += 1 if cantUpgradeErrors: raise NoUpgradePathAvailable('\n '.join(cantUpgradeErrors))
<SYSTEM_TASK:> Upgrade a legacy item. <END_TASK> <USER_TASK:> Description: def upgradeItem(self, thisItem): """ Upgrade a legacy item. @raise axiom.errors.UpgraderRecursion: If the given item is already in the process of being upgraded. """
sid = thisItem.storeID if sid in self._currentlyUpgrading: raise UpgraderRecursion() self._currentlyUpgrading[sid] = thisItem try: return upgradeAllTheWay(thisItem) finally: self._currentlyUpgrading.pop(sid)
<SYSTEM_TASK:> Upgrade the entire store in batches, yielding after each batch. <END_TASK> <USER_TASK:> Description: def upgradeBatch(self, n): """ Upgrade the entire store in batches, yielding after each batch. @param n: Number of upgrades to perform per transaction @type n: C{int} @raise axiom.errors.ItemUpgradeError: if an item upgrade failed @return: A generator that yields after each batch upgrade. This needs to be consumed for upgrading to actually take place. """
store = self.store def _doBatch(itemType): upgradedAnything = False for theItem in store.query(itemType, limit=n): upgradedAnything = True try: self.upgradeItem(theItem) except: f = Failure() raise ItemUpgradeError( f, theItem.storeID, itemType, _typeNameToMostRecentClass[itemType.typeName]) return upgradedAnything if self.upgradesPending: didAny = False while self._oldTypesRemaining: t0 = self._oldTypesRemaining[0] upgradedAnything = store.transact(_doBatch, t0) if not upgradedAnything: self._oldTypesRemaining.pop(0) if didAny: msg("%s finished upgrading %s" % (store.dbdir.path, qual(t0))) continue elif not didAny: didAny = True msg("%s beginning upgrade..." % (store.dbdir.path,)) yield None if didAny: msg("%s completely upgraded." % (store.dbdir.path,))
<SYSTEM_TASK:> Obtains the lvm, vg_t and lv_t handle. Usually you would never need to use this <END_TASK> <USER_TASK:> Description: def open(self): """ Obtains the lvm, vg_t and lv_t handle. Usually you would never need to use this method unless you are doing operations using the ctypes function wrappers in conversion.py *Raises:* * HandleError """
self.vg.open() self.__lvh = lvm_lv_from_uuid(self.vg.handle, self.uuid) if not bool(self.__lvh): raise HandleError("Failed to initialize LV Handle.")
<SYSTEM_TASK:> Returns the logical volume name. <END_TASK> <USER_TASK:> Description: def name(self): """ Returns the logical volume name. """
self.open() name = lvm_lv_get_name(self.__lvh) self.close() return name
<SYSTEM_TASK:> Returns True if the logical volume is active, False otherwise. <END_TASK> <USER_TASK:> Description: def is_active(self): """ Returns True if the logical volume is active, False otherwise. """
self.open() active = lvm_lv_is_active(self.__lvh) self.close() return bool(active)
<SYSTEM_TASK:> Returns True if the logical volume is suspended, False otherwise. <END_TASK> <USER_TASK:> Description: def is_suspended(self): """ Returns True if the logical volume is suspended, False otherwise. """
self.open() susp = lvm_lv_is_suspended(self.__lvh) self.close() return bool(susp)
<SYSTEM_TASK:> Returns the logical volume size in the given units. Default units are MiB. <END_TASK> <USER_TASK:> Description: def size(self, units="MiB"): """ Returns the logical volume size in the given units. Default units are MiB. *Args:* * units (str): Unit label ('MiB', 'GiB', etc...). Default is MiB. """
self.open() size = lvm_lv_get_size(self.__lvh) self.close() return size_convert(size, units)
<SYSTEM_TASK:> Deactivates the logical volume. <END_TASK> <USER_TASK:> Description: def deactivate(self): """ Deactivates the logical volume. *Raises:* * HandleError """
self.open() d = lvm_lv_deactivate(self.handle) self.close() if d != 0: raise CommitError("Failed to deactivate LV.")
<SYSTEM_TASK:> Obtains the lvm, vg_t and pv_t handle. Usually you would never need to use this <END_TASK> <USER_TASK:> Description: def open(self): """ Obtains the lvm, vg_t and pv_t handle. Usually you would never need to use this method unless you are doing operations using the ctypes function wrappers in conversion.py *Raises:* * HandleError """
self.vg.open() self.__pvh = lvm_pv_from_uuid(self.vg.handle, self.uuid) if not bool(self.__pvh): raise HandleError("Failed to initialize PV Handle.")
<SYSTEM_TASK:> Returns the physical volume device path. <END_TASK> <USER_TASK:> Description: def name(self): """ Returns the physical volume device path. """
self.open() name = lvm_pv_get_name(self.handle) self.close() return name
<SYSTEM_TASK:> Returns the physical volume mda count. <END_TASK> <USER_TASK:> Description: def mda_count(self): """ Returns the physical volume mda count. """
self.open() mda = lvm_pv_get_mda_count(self.handle) self.close() return mda
<SYSTEM_TASK:> Returns the physical volume size in the given units. Default units are MiB. <END_TASK> <USER_TASK:> Description: def size(self, units="MiB"): """ Returns the physical volume size in the given units. Default units are MiB. *Args:* * units (str): Unit label ('MiB', 'GiB', etc...). Default is MiB. """
self.open() size = lvm_pv_get_size(self.handle) self.close() return size_convert(size, units)
<SYSTEM_TASK:> Returns the device size in the given units. Default units are MiB. <END_TASK> <USER_TASK:> Description: def dev_size(self, units="MiB"): """ Returns the device size in the given units. Default units are MiB. *Args:* * units (str): Unit label ('MiB', 'GiB', etc...). Default is MiB. """
self.open() size = lvm_pv_get_dev_size(self.handle) self.close() return size_convert(size, units)
<SYSTEM_TASK:> Returns the free size in the given units. Default units are MiB. <END_TASK> <USER_TASK:> Description: def free(self, units="MiB"): """ Returns the free size in the given units. Default units are MiB. *Args:* * units (str): Unit label ('MiB', 'GiB', etc...). Default is MiB. """
self.open() size = lvm_pv_get_free(self.handle) self.close() return size_convert(size, units)
<SYSTEM_TASK:> A wrapper function to validate formdata against mongoengine-field <END_TASK> <USER_TASK:> Description: def mongoengine_validate_wrapper(old_clean, new_clean): """ A wrapper function to validate formdata against mongoengine-field validator and raise a proper django.forms ValidationError if there are any problems. """
def inner_validate(value): value = old_clean(value) try: new_clean(value) return value except ValidationError, e: raise forms.ValidationError(e) return inner_validate
<SYSTEM_TASK:> walk through the available valid fields.. <END_TASK> <USER_TASK:> Description: def iter_valid_fields(meta): """walk through the available valid fields.."""
# fetch field configuration and always add the id_field as exclude meta_fields = getattr(meta, 'fields', ()) meta_exclude = getattr(meta, 'exclude', ()) meta_exclude += (meta.document._meta.get('id_field'),) # walk through meta_fields or through the document fields to keep # meta_fields order in the form if meta_fields: for field_name in meta_fields: field = meta.document._fields.get(field_name) if field: yield (field_name, field) else: for field_name, field in meta.document._fields.iteritems(): # skip excluded fields if field_name not in meta_exclude: yield (field_name, field)
<SYSTEM_TASK:> Remove this object from the target, as well as any dependencies <END_TASK> <USER_TASK:> Description: def uninstallFrom(self, target): """ Remove this object from the target, as well as any dependencies that it automatically installed which were not explicitly "pinned" by calling "install", and raising an exception if anything still depends on this. """
#did this class powerup on any interfaces? powerdown if so. target.powerDown(self) for dc in self.store.query(_DependencyConnector, _DependencyConnector.target==target): if dc.installee is self: dc.deleteFromStore() for item in installedUniqueRequirements(self, target): uninstallFrom(item, target) callback = getattr(self, "uninstalled", None) if callback is not None: callback()
<SYSTEM_TASK:> If this item is installed on another item, return the install <END_TASK> <USER_TASK:> Description: def installedOn(self): """ If this item is installed on another item, return the install target. Otherwise return None. """
try: return self.store.findUnique(_DependencyConnector, _DependencyConnector.installee == self ).target except ItemNotFound: return None
<SYSTEM_TASK:> Return an iterable of things installed on the target that <END_TASK> <USER_TASK:> Description: def installedDependents(self, target): """ Return an iterable of things installed on the target that require this item. """
for dc in self.store.query(_DependencyConnector, _DependencyConnector.target == target): depends = dependentsOf(dc.installee.__class__) if self.__class__ in depends: yield dc.installee
<SYSTEM_TASK:> Return an iterable of things installed on the target that this item <END_TASK> <USER_TASK:> Description: def installedUniqueRequirements(self, target): """ Return an iterable of things installed on the target that this item requires and are not required by anything else. """
myDepends = dependentsOf(self.__class__) #XXX optimize? for dc in self.store.query(_DependencyConnector, _DependencyConnector.target==target): if dc.installee is self: #we're checking all the others not ourself continue depends = dependentsOf(dc.installee.__class__) if self.__class__ in depends: raise DependencyError( "%r cannot be uninstalled from %r, " "%r still depends on it" % (self, target, dc.installee)) for cls in myDepends[:]: #If one of my dependencies is required by somebody #else, leave it alone if cls in depends: myDepends.remove(cls) for dc in self.store.query(_DependencyConnector, _DependencyConnector.target==target): if (dc.installee.__class__ in myDepends and not dc.explicitlyInstalled): yield dc.installee
<SYSTEM_TASK:> Return an iterable of things installed on the target that this <END_TASK> <USER_TASK:> Description: def installedRequirements(self, target): """ Return an iterable of things installed on the target that this item requires. """
myDepends = dependentsOf(self.__class__) for dc in self.store.query(_DependencyConnector, _DependencyConnector.target == target): if dc.installee.__class__ in myDepends: yield dc.installee
<SYSTEM_TASK:> Format a schema mismatch for human consumption. <END_TASK> <USER_TASK:> Description: def _diffSchema(diskSchema, memorySchema): """ Format a schema mismatch for human consumption. @param diskSchema: The on-disk schema. @param memorySchema: The in-memory schema. @rtype: L{bytes} @return: A description of the schema differences. """
diskSchema = set(diskSchema) memorySchema = set(memorySchema) diskOnly = diskSchema - memorySchema memoryOnly = memorySchema - diskSchema diff = [] if diskOnly: diff.append('Only on disk:') diff.extend(map(repr, diskOnly)) if memoryOnly: diff.append('Only in memory:') diff.extend(map(repr, memoryOnly)) return '\n'.join(diff)
<SYSTEM_TASK:> Close this file and commit it to its permanent location. <END_TASK> <USER_TASK:> Description: def close(self): """ Close this file and commit it to its permanent location. @return: a Deferred which fires when the file has been moved (and backed up to tertiary storage, if necessary). """
now = time.time() try: file.close(self) _mkdirIfNotExists(self._destpath.dirname()) self.finalpath = self._destpath os.rename(self.name, self.finalpath.path) os.utime(self.finalpath.path, (now, now)) except: return defer.fail() return defer.succeed(self.finalpath)
<SYSTEM_TASK:> Generate the SQL string which follows the "FROM" string and before the <END_TASK> <USER_TASK:> Description: def _computeFromClause(self, tables): """ Generate the SQL string which follows the "FROM" string and before the "WHERE" string in the final SQL statement. """
tableAliases = [] self.fromClauseParts = [] for table in tables: # The indirect calls to store.getTableName() will create the tables # if needed. (XXX That's bad, actually. They should get created # some other way if necessary. -exarkun) tableName = table.getTableName(self.store) tableAlias = table.getTableAlias(self.store, tuple(tableAliases)) if tableAlias is None: self.fromClauseParts.append(tableName) else: tableAliases.append(tableAlias) self.fromClauseParts.append('%s AS %s' % (tableName, tableAlias)) self.sortClauseParts = [] for attr, direction in self.sort.orderColumns(): assert direction in ('ASC', 'DESC'), "%r not in ASC,DESC" % (direction,) if attr.type not in tables: raise ValueError( "Ordering references type excluded from comparison") self.sortClauseParts.append( '%s %s' % (attr.getColumnName(self.store), direction))
<SYSTEM_TASK:> Return a generator which yields the massaged results of this query with <END_TASK> <USER_TASK:> Description: def _selectStuff(self, verb='SELECT'): """ Return a generator which yields the massaged results of this query with a particular SQL verb. For an attribute query, massaged results are of the type of that attribute. For an item query, they are items of the type the query is supposed to return. @param verb: a str containing the SQL verb to execute. This really must be some variant of 'SELECT', the only two currently implemented being 'SELECT' and 'SELECT DISTINCT'. """
sqlResults = self._runQuery(verb, self._queryTarget) for row in sqlResults: yield self._massageData(row)
<SYSTEM_TASK:> This method is deprecated, a holdover from when queries were iterators, <END_TASK> <USER_TASK:> Description: def next(self): """ This method is deprecated, a holdover from when queries were iterators, rather than iterables. @return: one element of massaged data. """
if self._selfiter is None: warnings.warn( "Calling 'next' directly on a query is deprecated. " "Perhaps you want to use iter(query).next(), or something " "more expressive like store.findFirst or store.findOrCreate?", DeprecationWarning, stacklevel=2) self._selfiter = self.__iter__() return self._selfiter.next()
<SYSTEM_TASK:> Split up the work of gathering a result set into multiple smaller <END_TASK> <USER_TASK:> Description: def paginate(self, pagesize=20): """ Split up the work of gathering a result set into multiple smaller 'pages', allowing very large queries to be iterated without blocking for long periods of time. While simply iterating C{paginate()} is very similar to iterating a query directly, using this method allows the work to obtain the results to be performed on demand, over a series of different transaction. @param pagesize: the number of results gather in each chunk of work. (This is mostly for testing paginate's implementation.) @type pagesize: L{int} @return: an iterable which yields all the results of this query. """
sort = self.sort oc = list(sort.orderColumns()) if not oc: # You can't have an unsorted pagination. sort = self.tableClass.storeID.ascending oc = list(sort.orderColumns()) if len(oc) != 1: raise RuntimeError("%d-column sorts not supported yet with paginate" %(len(oc),)) sortColumn = oc[0][0] if oc[0][1] == 'ASC': sortOp = operator.gt else: sortOp = operator.lt if _isColumnUnique(sortColumn): # This is the easy case. There is never a tie to be broken, so we # can just remember our last value and yield from there. Right now # this only happens when the column is a storeID, but hopefully in # the future we will have more of this. tiebreaker = None else: tiebreaker = self.tableClass.storeID tied = lambda a, b: (sortColumn.__get__(a) == sortColumn.__get__(b)) def _AND(a, b): if a is None: return b return attributes.AND(a, b) results = list(self.store.query(self.tableClass, self.comparison, sort=sort, limit=pagesize + 1)) while results: if len(results) == 1: # XXX TODO: reject 0 pagesize. If the length of the result set # is 1, there's no next result to test for a tie with, so we # must be at the end, and we should just yield the result and finish. yield results[0] return for resultidx in range(len(results) - 1): # check for a tie. result = results[resultidx] nextResult = results[resultidx + 1] if tied(result, nextResult): # Yield any ties first, in the appropriate order. lastTieBreaker = tiebreaker.__get__(result) # Note that this query is _NOT_ limited: currently large ties # will generate arbitrarily large amounts of work. trq = self.store.query( self.tableClass, _AND(self.comparison, sortColumn == sortColumn.__get__(result))) tiedResults = list(trq) tiedResults.sort(key=lambda rslt: (sortColumn.__get__(result), tiebreaker.__get__(result))) for result in tiedResults: yield result # re-start the query here ('result' is set to the # appropriate value by the inner loop) break else: yield result lastSortValue = sortColumn.__get__(result) # hooray namespace pollution results = list(self.store.query( self.tableClass, _AND(self.comparison, sortOp(sortColumn, sortColumn.__get__(result))), sort=sort, limit=pagesize + 1))
<SYSTEM_TASK:> Convert a row into an Item instance by loading cached items or <END_TASK> <USER_TASK:> Description: def _massageData(self, row): """ Convert a row into an Item instance by loading cached items or creating new ones based on query results. @param row: an n-tuple, where n is the number of columns specified by my item type. @return: an instance of the type specified by this query. """
result = self.store._loadedItem(self.tableClass, row[0], row[1:]) assert result.store is not None, "result %r has funky store" % (result,) return result
<SYSTEM_TASK:> Delete all the Items which are found by this query. <END_TASK> <USER_TASK:> Description: def deleteFromStore(self): """ Delete all the Items which are found by this query. """
if (self.limit is None and not isinstance(self.sort, attributes.UnspecifiedOrdering)): # The ORDER BY is pointless here, and SQLite complains about it. return self.cloneQuery(sort=None).deleteFromStore() #We can do this the fast way or the slow way. # If there's a 'deleted' callback on the Item type or 'deleteFromStore' # is overridden, we have to do it the slow way. deletedOverridden = ( self.tableClass.deleted.im_func is not item.Item.deleted.im_func) deleteFromStoreOverridden = ( self.tableClass.deleteFromStore.im_func is not item.Item.deleteFromStore.im_func) if deletedOverridden or deleteFromStoreOverridden: for it in self: it.deleteFromStore() else: # Find other item types whose instances need to be deleted # when items of the type in this query are deleted, and # remove them from the store. def itemsToDelete(attr): return attr.oneOf(self.getColumn("storeID")) if not item.allowDeletion(self.store, self.tableClass, itemsToDelete): raise errors.DeletionDisallowed( 'Cannot delete item; ' 'has referents with whenDeleted == reference.DISALLOW') for it in item.dependentItems(self.store, self.tableClass, itemsToDelete): it.deleteFromStore() # actually run the DELETE for the items in this query. self._runQuery('DELETE', "")
<SYSTEM_TASK:> Convert a row into a tuple of Item instances, by slicing it <END_TASK> <USER_TASK:> Description: def _massageData(self, row): """ Convert a row into a tuple of Item instances, by slicing it according to the number of columns for each instance, and then proceeding as for ItemQuery._massageData. @param row: an n-tuple, where n is the total number of columns specified by all the item types in this query. @return: a tuple of instances of the types specified by this query. """
offset = 0 resultBits = [] for i, tableClass in enumerate(self.tableClass): numAttrs = self.schemaLengths[i] result = self.store._loadedItem(self.tableClass[i], row[offset], row[offset+1:offset+numAttrs]) assert result.store is not None, "result %r has funky store" % (result,) resultBits.append(result) offset += numAttrs return tuple(resultBits)
<SYSTEM_TASK:> Clone the original query which this distinct query wraps, and return a new <END_TASK> <USER_TASK:> Description: def cloneQuery(self, limit=_noItem, sort=_noItem): """ Clone the original query which this distinct query wraps, and return a new wrapper around that clone. """
newq = self.query.cloneQuery(limit=limit, sort=sort) return self.__class__(newq)
<SYSTEM_TASK:> Count the number of distinct results of the wrapped query. <END_TASK> <USER_TASK:> Description: def count(self): """ Count the number of distinct results of the wrapped query. @return: an L{int} representing the number of distinct results. """
if not self.query.store.autocommit: self.query.store.checkpoint() target = ', '.join([ tableClass.storeID.getColumnName(self.query.store) for tableClass in self.query.tableClass ]) sql, args = self.query._sqlAndArgs( 'SELECT DISTINCT', target) sql = 'SELECT COUNT(*) FROM (' + sql + ')' result = self.query.store.querySQL(sql, args) assert len(result) == 1, 'more than one result: %r' % (result,) return result[0][0] or 0
<SYSTEM_TASK:> Return the sum of all the values returned by this query. If no results <END_TASK> <USER_TASK:> Description: def sum(self): """ Return the sum of all the values returned by this query. If no results are specified, return None. Note: for non-numeric column types the result of this method will be nonsensical. @return: a number or None. """
res = self._runQuery('SELECT', 'SUM(%s)' % (self._queryTarget,)) or [(0,)] assert len(res) == 1, "more than one result: %r" % (res,) dbval = res[0][0] or 0 return self.attribute.outfilter(dbval, _FakeItemForFilter(self.store))
<SYSTEM_TASK:> Open a new file somewhere in this Store's file area. <END_TASK> <USER_TASK:> Description: def newFile(self, *path): """ Open a new file somewhere in this Store's file area. @param path: a sequence of path segments. @return: an L{AtomicFile}. """
assert len(path) > 0, "newFile requires a nonzero number of segments" if self.dbdir is None: if self.filesdir is None: raise RuntimeError("This in-memory store has no file directory") else: tmpbase = self.filesdir else: tmpbase = self.dbdir tmpname = tmpbase.child('temp').child(str(tempCounter.next()) + ".tmp") return AtomicFile(tmpname.path, self.newFilePath(*path))
<SYSTEM_TASK:> Note that this database contains old versions of a particular type. <END_TASK> <USER_TASK:> Description: def _prepareOldVersionOf(self, typename, version, persistedSchema): """ Note that this database contains old versions of a particular type. Create the appropriate dummy item subclass and queue the type to be upgraded. @param typename: The I{typeName} associated with the schema for which to create a dummy item class. @param version: The I{schemaVersion} of the old version of the schema for which to create a dummy item class. @param persistedSchema: A mapping giving information about all schemas stored in the database, used to create the attributes of the dummy item class. """
appropriateSchema = persistedSchema[typename, version] # create actual attribute objects dummyAttributes = {} for (attribute, sqlType, indexed, pythontype, docstring) in appropriateSchema: atr = pythontype(indexed=indexed, doc=docstring) dummyAttributes[attribute] = atr dummyBases = [] oldType = declareLegacyItem( typename, version, dummyAttributes, dummyBases) self._upgradeManager.queueTypeUpgrade(oldType) return oldType
<SYSTEM_TASK:> Create multiple items in the store without loading <END_TASK> <USER_TASK:> Description: def batchInsert(self, itemType, itemAttributes, dataRows): """ Create multiple items in the store without loading corresponding Python objects into memory. the items' C{stored} callback will not be called. Example:: myData = [(37, u"Fred", u"Wichita"), (28, u"Jim", u"Fresno"), (43, u"Betty", u"Dubuque")] myStore.batchInsert(FooItem, [FooItem.age, FooItem.name, FooItem.city], myData) @param itemType: an Item subclass to create instances of. @param itemAttributes: an iterable of attributes on the Item subclass. @param dataRows: an iterable of iterables, each the same length as C{itemAttributes} and containing data corresponding to each attribute in it. @return: None. """
class FakeItem: pass _NEEDS_DEFAULT = object() # token for lookup failure fakeOSelf = FakeItem() fakeOSelf.store = self sql = itemType._baseInsertSQL(self) indices = {} schema = [attr for (name, attr) in itemType.getSchema()] for i, attr in enumerate(itemAttributes): indices[attr] = i for row in dataRows: oid = self.store.executeSchemaSQL( _schema.CREATE_OBJECT, [self.store.getTypeID(itemType)]) insertArgs = [oid] for attr in schema: i = indices.get(attr, _NEEDS_DEFAULT) if i is _NEEDS_DEFAULT: pyval = attr.default else: pyval = row[i] dbval = attr._convertPyval(fakeOSelf, pyval) insertArgs.append(dbval) self.executeSQL(sql, insertArgs)
<SYSTEM_TASK:> Retrieve the fully qualified name of the table holding items <END_TASK> <USER_TASK:> Description: def getTableName(self, tableClass): """ Retrieve the fully qualified name of the table holding items of a particular class in this store. If the table does not exist in the database, it will be created as a side-effect. @param tableClass: an Item subclass @raises axiom.errors.ItemClassesOnly: if an object other than a subclass of Item is passed. @return: a string """
if not (isinstance(tableClass, type) and issubclass(tableClass, item.Item)): raise errors.ItemClassesOnly("Only subclasses of Item have table names.") if tableClass not in self.typeToTableNameCache: self.typeToTableNameCache[tableClass] = self._tableNameFor(tableClass.typeName, tableClass.schemaVersion) # make sure the table exists self.getTypeID(tableClass) return self.typeToTableNameCache[tableClass]
<SYSTEM_TASK:> Retrieve the typeID associated with a particular table in the <END_TASK> <USER_TASK:> Description: def getTypeID(self, tableClass): """ Retrieve the typeID associated with a particular table in the in-database schema for this Store. A typeID is an opaque integer representing the Item subclass, and the associated table in this Store's SQLite database. @param tableClass: a subclass of Item @return: an integer """
key = (tableClass.typeName, tableClass.schemaVersion) if key in self.typenameAndVersionToID: return self.typenameAndVersionToID[key] return self.transact(self._maybeCreateTable, tableClass, key)
<SYSTEM_TASK:> Execute the table creation DDL for an Item subclass. <END_TASK> <USER_TASK:> Description: def _justCreateTable(self, tableClass): """ Execute the table creation DDL for an Item subclass. Indexes are *not* created. @type tableClass: type @param tableClass: an Item subclass """
sqlstr = [] sqlarg = [] # needs to be calculated including version tableName = self._tableNameFor(tableClass.typeName, tableClass.schemaVersion) sqlstr.append("CREATE TABLE %s (" % tableName) # The column is named "oid" instead of "storeID" for backwards # compatibility with the implicit oid/rowid column in old Stores. sqlarg.append("oid INTEGER PRIMARY KEY") for nam, atr in tableClass.getSchema(): sqlarg.append("\n%s %s" % (atr.getShortColumnName(self), atr.sqltype)) sqlstr.append(', '.join(sqlarg)) sqlstr.append(')') self.createSQL(''.join(sqlstr))
<SYSTEM_TASK:> Retrieve an item by its storeID, and return it. <END_TASK> <USER_TASK:> Description: def getItemByID(self, storeID, default=_noItem, autoUpgrade=True): """ Retrieve an item by its storeID, and return it. Note: most of the failure modes of this method are catastrophic and should not be handled by application code. The only one that application programmers should be concerned with is KeyError. They are listed for educational purposes. @param storeID: an L{int} which refers to the store. @param default: if passed, return this value rather than raising in the case where no Item is found. @raise TypeError: if storeID is not an integer. @raise UnknownItemType: if the storeID refers to an item row in the database, but the corresponding type information is not available to Python. @raise RuntimeError: if the found item's class version is higher than the current application is aware of. (In other words, if you have upgraded a database to a new schema and then attempt to open it with a previous version of the code.) @raise errors.ItemNotFound: if no item existed with the given storeID. @return: an Item, or the given default, if it was passed and no row corresponding to the given storeID can be located in the database. """
if not isinstance(storeID, (int, long)): raise TypeError("storeID *must* be an int or long, not %r" % ( type(storeID).__name__,)) if storeID == STORE_SELF_ID: return self try: return self.objectCache.get(storeID) except KeyError: pass log.msg(interface=iaxiom.IStatEvent, stat_cache_misses=1, key=storeID) results = self.querySchemaSQL(_schema.TYPEOF_QUERY, [storeID]) assert (len(results) in [1, 0]),\ "Database panic: more than one result for TYPEOF!" if results: typename, module, version = results[0] useMostRecent = False moreRecentAvailable = False # The schema may have changed since the last time I saw the # database. Let's look to see if this is suspiciously broken... if _typeIsTotallyUnknown(typename, version): # Another process may have created it - let's re-up the schema # and see what we get. self._startup() # OK, all the modules have been loaded now, everything # verified. if _typeIsTotallyUnknown(typename, version): # If there is STILL no inkling of it anywhere, we are # almost certainly boned. Let's tell the user in a # structured way, at least. raise errors.UnknownItemType( "cannot load unknown schema/version pair: %r %r - id: %r" % (typename, version, storeID)) if typename in _typeNameToMostRecentClass: moreRecentAvailable = True mostRecent = _typeNameToMostRecentClass[typename] if mostRecent.schemaVersion < version: raise RuntimeError("%s:%d - was found in the database and most recent %s is %d" % (typename, version, typename, mostRecent.schemaVersion)) if mostRecent.schemaVersion == version: useMostRecent = True if useMostRecent: T = mostRecent else: T = self.getOldVersionOf(typename, version) # for the moment we're going to assume no inheritance attrs = self.querySQL(T._baseSelectSQL(self), [storeID]) if len(attrs) == 0: if default is _noItem: raise errors.ItemNotFound( 'No results for known-to-be-good object') return default elif len(attrs) > 1: raise errors.DataIntegrityError( 'Too many results for {:d}'.format(storeID)) attrs = attrs[0] x = T.existingInStore(self, storeID, attrs) if moreRecentAvailable and (not useMostRecent) and autoUpgrade: # upgradeVersion will do caching as necessary, we don't have to # cache here. (It must, so that app code can safely call # upgradeVersion and get a consistent object out of it.) x = self.transact(self._upgradeManager.upgradeItem, x) elif not x.__legacy__: # We loaded the most recent version of an object self.objectCache.cache(storeID, x) return x if default is _noItem: raise errors.ItemNotFound(storeID) return default
<SYSTEM_TASK:> For use with auto-committing statements such as CREATE TABLE or CREATE <END_TASK> <USER_TASK:> Description: def createSQL(self, sql, args=()): """ For use with auto-committing statements such as CREATE TABLE or CREATE INDEX. """
before = time.time() self._execSQL(sql, args) after = time.time() if after - before > 2.0: log.msg('Extremely long CREATE: %s' % (after - before,)) log.msg(sql)
<SYSTEM_TASK:> Run my runnable, and reschedule or delete myself based on its result. <END_TASK> <USER_TASK:> Description: def invokeRunnable(self): """ Run my runnable, and reschedule or delete myself based on its result. Must be run in a transaction. """
runnable = self.runnable if runnable is None: self.deleteFromStore() else: try: self.running = True newTime = runnable.run() finally: self.running = False self._rescheduleFromRun(newTime)
<SYSTEM_TASK:> Remove from given item from the schedule. <END_TASK> <USER_TASK:> Description: def unscheduleFirst(self, runnable): """ Remove from given item from the schedule. If runnable is scheduled to run multiple times, only the temporally first is removed. """
for evt in self.store.query(TimedEvent, TimedEvent.runnable == runnable, sort=TimedEvent.time.ascending): evt.deleteFromStore() break
<SYSTEM_TASK:> Return an iterable of the times at which the given item is scheduled to <END_TASK> <USER_TASK:> Description: def scheduledTimes(self, runnable): """ Return an iterable of the times at which the given item is scheduled to run. """
events = self.store.query( TimedEvent, TimedEvent.runnable == runnable) return (event.time for event in events if not event.running)
<SYSTEM_TASK:> Start calling persistent timed events whose time has come. <END_TASK> <USER_TASK:> Description: def startService(self): """ Start calling persistent timed events whose time has come. """
super(_SiteScheduler, self).startService() self._transientSchedule(self.now(), self.now())
<SYSTEM_TASK:> If this service's store is attached to its parent, ask the parent to <END_TASK> <USER_TASK:> Description: def _transientSchedule(self, when, now): """ If this service's store is attached to its parent, ask the parent to schedule this substore to tick at the given time. @param when: The time at which to tick. @type when: L{epsilon.extime.Time} @param now: Present for signature compatibility with L{_SiteScheduler._transientSchedule}, but ignored otherwise. """
if self.store.parent is not None: subStore = self.store.parent.getItemByID(self.store.idInParent) hook = self.store.parent.findOrCreate( _SubSchedulerParentHook, subStore=subStore) hook._schedule(when)
<SYSTEM_TASK:> Remove the components in the site store for this SubScheduler. <END_TASK> <USER_TASK:> Description: def migrateDown(self): """ Remove the components in the site store for this SubScheduler. """
subStore = self.store.parent.getItemByID(self.store.idInParent) ssph = self.store.parent.findUnique( _SubSchedulerParentHook, _SubSchedulerParentHook.subStore == subStore, default=None) if ssph is not None: te = self.store.parent.findUnique(TimedEvent, TimedEvent.runnable == ssph, default=None) if te is not None: te.deleteFromStore() ssph.deleteFromStore()
<SYSTEM_TASK:> Recreate the hooks in the site store to trigger this SubScheduler. <END_TASK> <USER_TASK:> Description: def migrateUp(self): """ Recreate the hooks in the site store to trigger this SubScheduler. """
te = self.store.findFirst(TimedEvent, sort=TimedEvent.time.descending) if te is not None: self._transientSchedule(te.time, None)
<SYSTEM_TASK:> Tokenize a text, index a term matrix, and build out a graph. <END_TASK> <USER_TASK:> Description: def build_graph(path, term_depth=1000, skim_depth=10, d_weights=False, **kwargs): """ Tokenize a text, index a term matrix, and build out a graph. Args: path (str): The file path. term_depth (int): Consider the N most frequent terms. skim_depth (int): Connect each word to the N closest siblings. d_weights (bool): If true, give "close" nodes low weights. Returns: Skimmer: The indexed graph. """
# Tokenize text. click.echo('\nTokenizing text...') t = Text.from_file(path) click.echo('Extracted %d tokens' % len(t.tokens)) m = Matrix() # Index the term matrix. click.echo('\nIndexing terms:') m.index(t, t.most_frequent_terms(term_depth), **kwargs) g = Skimmer() # Construct the network. click.echo('\nGenerating graph:') g.build(t, m, skim_depth, d_weights) return g
<SYSTEM_TASK:> Render a spring layout. <END_TASK> <USER_TASK:> Description: def draw_spring(self, **kwargs): """ Render a spring layout. """
nx.draw_spring( self.graph, with_labels=True, font_size=10, edge_color='#dddddd', node_size=0, **kwargs ) plt.show()
<SYSTEM_TASK:> 1. For each term in the passed matrix, score its KDE similarity with <END_TASK> <USER_TASK:> Description: def build(self, text, matrix, skim_depth=10, d_weights=False): """ 1. For each term in the passed matrix, score its KDE similarity with all other indexed terms. 2. With the ordered stack of similarities in hand, skim off the top X pairs and add them as edges. Args: text (Text): The source text instance. matrix (Matrix): An indexed term matrix. skim_depth (int): The number of siblings for each term. d_weights (bool): If true, give "close" words low edge weights. """
for anchor in bar(matrix.keys): n1 = text.unstem(anchor) # Heaviest pair scores: pairs = matrix.anchored_pairs(anchor).items() for term, weight in list(pairs)[:skim_depth]: # If edges represent distance, use the complement of the raw # score, so that similar words are connected by "short" edges. if d_weights: weight = 1-weight n2 = text.unstem(term) # NetworkX does not handle numpy types when writing graphml, # so we cast the weight to a regular float. self.graph.add_edge(n1, n2, weight=float(weight))
<SYSTEM_TASK:> Gets a named section from the configuration source. <END_TASK> <USER_TASK:> Description: def get_settings(self, section=None, defaults=None): """ Gets a named section from the configuration source. :param section: a :class:`str` representing the section you want to retrieve from the configuration source. If ``None`` this will fallback to the :attr:`plaster.PlasterURL.fragment`. :param defaults: a :class:`dict` that will get passed to :class:`configparser.ConfigParser` and will populate the ``DEFAULT`` section. :return: A :class:`plaster_pastedeploy.ConfigDict` of key/value pairs. """
# This is a partial reimplementation of # ``paste.deploy.loadwsgi.ConfigLoader:get_context`` which supports # "set" and "get" options and filters out any other globals section = self._maybe_get_default_name(section) if self.filepath is None: return {} parser = self._get_parser(defaults) defaults = parser.defaults() try: raw_items = parser.items(section) except NoSectionError: return {} local_conf = OrderedDict() get_from_globals = {} for option, value in raw_items: if option.startswith("set "): name = option[4:].strip() defaults[name] = value elif option.startswith("get "): name = option[4:].strip() get_from_globals[name] = value # insert a value into local_conf to preserve the order local_conf[name] = None else: # annoyingly pastedeploy filters out all defaults unless # "get foo" is used to pull it in if option in defaults: continue local_conf[option] = value for option, global_option in get_from_globals.items(): local_conf[option] = defaults[global_option] return ConfigDict(local_conf, defaults, self)
<SYSTEM_TASK:> Reads the configuration source and finds and loads a WSGI <END_TASK> <USER_TASK:> Description: def get_wsgi_app(self, name=None, defaults=None): """ Reads the configuration source and finds and loads a WSGI application defined by the entry with name ``name`` per the PasteDeploy configuration format and loading mechanism. :param name: The named WSGI app to find, load and return. Defaults to ``None`` which becomes ``main`` inside :func:`paste.deploy.loadapp`. :param defaults: The ``global_conf`` that will be used during app instantiation. :return: A WSGI application. """
name = self._maybe_get_default_name(name) defaults = self._get_defaults(defaults) return loadapp( self.pastedeploy_spec, name=name, relative_to=self.relative_to, global_conf=defaults, )
<SYSTEM_TASK:> Reads the configuration source and finds and loads a WSGI server <END_TASK> <USER_TASK:> Description: def get_wsgi_server(self, name=None, defaults=None): """ Reads the configuration source and finds and loads a WSGI server defined by the server entry with the name ``name`` per the PasteDeploy configuration format and loading mechanism. :param name: The named WSGI server to find, load and return. Defaults to ``None`` which becomes ``main`` inside :func:`paste.deploy.loadserver`. :param defaults: The ``global_conf`` that will be used during server instantiation. :return: A WSGI server runner callable which accepts a WSGI app. """
name = self._maybe_get_default_name(name) defaults = self._get_defaults(defaults) return loadserver( self.pastedeploy_spec, name=name, relative_to=self.relative_to, global_conf=defaults, )
<SYSTEM_TASK:> Reads the configuration soruce and finds and loads a WSGI filter <END_TASK> <USER_TASK:> Description: def get_wsgi_filter(self, name=None, defaults=None): """Reads the configuration soruce and finds and loads a WSGI filter defined by the filter entry with the name ``name`` per the PasteDeploy configuration format and loading mechanism. :param name: The named WSGI filter to find, load and return. Defaults to ``None`` which becomes ``main`` inside :func:`paste.deploy.loadfilter`. :param defaults: The ``global_conf`` that will be used during filter instantiation. :return: A callable that can filter a WSGI application. """
name = self._maybe_get_default_name(name) defaults = self._get_defaults(defaults) return loadfilter( self.pastedeploy_spec, name=name, relative_to=self.relative_to, global_conf=defaults, )
<SYSTEM_TASK:> Checks a name and determines whether to use the default name. <END_TASK> <USER_TASK:> Description: def _maybe_get_default_name(self, name): """Checks a name and determines whether to use the default name. :param name: The current name to check. :return: Either None or a :class:`str` representing the name. """
if name is None and self.uri.fragment: name = self.uri.fragment return name
<SYSTEM_TASK:> Set cursor position on the color corresponding to the hue value. <END_TASK> <USER_TASK:> Description: def set(self, hue): """Set cursor position on the color corresponding to the hue value."""
x = hue / 360. * self.winfo_width() self.coords('cursor', x, 0, x, self.winfo_height()) self._variable.set(hue)
<SYSTEM_TASK:> Returns the type of the page with that name. <END_TASK> <USER_TASK:> Description: def getPageType(name,number=False): """Returns the type of the page with that name. If that name doesn't exist, None is returned. Args: name (str): name of the page to get the folder from number (bool): if True, return numbers (i.e., a graph will be 3) if False, return words where appropriate (i.e, "graph") Returns: string of the type of object the page is """
if not name in pageNames(): return None pageType=PyOrigin.Pages(name).GetType() if number: return str(pageType) if pageType==1: return "matrix" if pageType==2: return "book" if pageType==3: return "graph" if pageType==4: return "layout" if pageType==5: return "notes"
<SYSTEM_TASK:> Prints every page in the project to the console. <END_TASK> <USER_TASK:> Description: def listEverything(matching=False): """Prints every page in the project to the console. Args: matching (str, optional): if given, only return names with this string in it """
pages=pageNames() if matching: pages=[x for x in pages if matching in x] for i,page in enumerate(pages): pages[i]="%s%s (%s)"%(pageFolder(page),page,getPageType(page)) print("\n".join(sorted(pages)))