text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Extract a bag-of-words for a corpus of Twitter lists pertaining to a Twitter user. <END_TASK> <USER_TASK:> Description: def user_twitter_list_bag_of_words(twitter_list_corpus, sent_tokenize, _treebank_word_tokenize, tagger, lemmatizer, lemmatize, stopset, first_cap_re, all_cap_re, digits_punctuation_whitespace_re, pos_set): """ Extract a bag-of-words for a corpus of Twitter lists pertaining to a Twitter user. Inputs: - twitter_list_corpus: A python list of Twitter lists in json format. - lemmatizing: A string containing one of the following: "porter", "snowball" or "wordnet". Output: - bag_of_words: A bag-of-words in python dictionary format. - lemma_to_keywordbag_total: Aggregated python dictionary that maps stems/lemmas to original topic keywords. """
# Extract a bag-of-words from a list of Twitter lists. # May result in empty sets list_of_keyword_sets, list_of_lemma_to_keywordbags = clean_list_of_twitter_list(twitter_list_corpus, sent_tokenize, _treebank_word_tokenize, tagger, lemmatizer, lemmatize, stopset, first_cap_re, all_cap_re, digits_punctuation_whitespace_re, pos_set) # Reduce keyword sets. bag_of_words = reduce_list_of_bags_of_words(list_of_keyword_sets) # Reduce lemma to keywordbag maps. lemma_to_keywordbag_total = defaultdict(lambda: defaultdict(int)) for lemma_to_keywordbag in list_of_lemma_to_keywordbags: for lemma, keywordbag in lemma_to_keywordbag.items(): for keyword, multiplicity in keywordbag.items(): lemma_to_keywordbag_total[lemma][keyword] += multiplicity return bag_of_words, lemma_to_keywordbag_total
<SYSTEM_TASK:> Returns a generator of n-length chunks of an input iterable, with appropriate padding at the end. <END_TASK> <USER_TASK:> Description: def grouper(iterable, n, pad_value=None): """ Returns a generator of n-length chunks of an input iterable, with appropriate padding at the end. Example: grouper(3, 'abcdefg', 'x') --> ('a','b','c'), ('d','e','f'), ('g','x','x') Inputs: - iterable: The source iterable that needs to be chunkified. - n: The size of the chunks. - pad_value: The value with which the last chunk will be padded. Output: - chunk_gen: A generator of n-length chunks of an input iterable. """
chunk_gen = (chunk for chunk in zip_longest(*[iter(iterable)]*n, fillvalue=pad_value)) return chunk_gen
<SYSTEM_TASK:> A python generator that yields 100-length sub-list chunks. <END_TASK> <USER_TASK:> Description: def chunks(iterable, n): """ A python generator that yields 100-length sub-list chunks. Input: - full_list: The input list that is to be separated in chunks of 100. - chunk_size: Should be set to 100, unless the Twitter API changes. Yields: - sub_list: List chunks of length 100. """
for i in np.arange(0, len(iterable), n): yield iterable[i:i+n]
<SYSTEM_TASK:> A generator of n-length chunks of an input iterable <END_TASK> <USER_TASK:> Description: def split_every(iterable, n): # TODO: Remove this, or make it return a generator. """ A generator of n-length chunks of an input iterable """
i = iter(iterable) piece = list(islice(i, n)) while piece: yield piece piece = list(islice(i, n))
<SYSTEM_TASK:> Tries to figure out which type of property value that should be merged and <END_TASK> <USER_TASK:> Description: def merge_properties(item_properties, prop_name, merge_value): """ Tries to figure out which type of property value that should be merged and invoke the right function. Returns new properties if the merge was successful otherwise False. """
existing_value = item_properties.get(prop_name, None) if not existing_value: # A node without existing values for the property item_properties[prop_name] = merge_value else: if type(merge_value) is int or type(merge_value) is str: item_properties[prop_name] = existing_value + merge_value elif type(merge_value) is list: item_properties[prop_name] = merge_list(existing_value, merge_value) else: return False return item_properties
<SYSTEM_TASK:> Fetch census estimates from table. <END_TASK> <USER_TASK:> Description: def fetch_state_data(self, states): """ Fetch census estimates from table. """
print("Fetching census data") for table in CensusTable.objects.all(): api = self.get_series(table.series) for variable in table.variables.all(): estimate = "{}_{}".format(table.code, variable.code) print( ">> Fetching {} {} {}".format( table.year, table.series, estimate ) ) for state in tqdm(states): self.get_state_estimates_by_state( api=api, table=table, variable=variable, estimate=estimate, state=state, ) self.get_county_estimates_by_state( api=api, table=table, variable=variable, estimate=estimate, state=state, ) self.get_district_estimates_by_state( api=api, table=table, variable=variable, estimate=estimate, state=state, )
<SYSTEM_TASK:> Returns True if there is atleast one annotation by a given name, otherwise False. <END_TASK> <USER_TASK:> Description: def has(self, name): """ Returns True if there is atleast one annotation by a given name, otherwise False. """
for a in self.all_annotations: if a.name == name: return True return False
<SYSTEM_TASK:> Get the first annotation by a given name. <END_TASK> <USER_TASK:> Description: def get_first(self, name): """ Get the first annotation by a given name. """
for a in self.all_annotations: if a.name == name: return a return None
<SYSTEM_TASK:> Get all the annotation by a given name. <END_TASK> <USER_TASK:> Description: def get_all(self, name): """ Get all the annotation by a given name. """
return [annot for annot in self.all_annotations if annot.name == name]
<SYSTEM_TASK:> Return the first value of a particular param by name if it exists otherwise false. <END_TASK> <USER_TASK:> Description: def first_value_of(self, name, default_value = None): """ Return the first value of a particular param by name if it exists otherwise false. """
vals = self.values_of(name) if vals is not None: return vals if type(vals) is not list else vals[0] return default_value
<SYSTEM_TASK:> Returns the packages used for HaTeMiLe for Python. <END_TASK> <USER_TASK:> Description: def get_packages(): """ Returns the packages used for HaTeMiLe for Python. :return: The packages used for HaTeMiLe for Python. :rtype: list(str) """
packages = find_packages(exclude=['tests']) packages.append('') packages.append('js') packages.append(LOCALES_DIRECTORY) for directory in os.listdir(LOCALES_DIRECTORY): packages.append(LOCALES_DIRECTORY + '.' + directory) return packages
<SYSTEM_TASK:> Returns the packages with static files of HaTeMiLe for Python. <END_TASK> <USER_TASK:> Description: def get_package_data(): """ Returns the packages with static files of HaTeMiLe for Python. :return: The packages with static files of HaTeMiLe for Python. :rtype: dict(str, list(str)) """
package_data = { '': ['*.xml'], 'js': ['*.js'], LOCALES_DIRECTORY: ['*'] } for directory in os.listdir(LOCALES_DIRECTORY): package_data[LOCALES_DIRECTORY + '.' + directory] = ['*.json'] return package_data
<SYSTEM_TASK:> Returns the content of 'requirements.txt' in a list. <END_TASK> <USER_TASK:> Description: def get_requirements(): """ Returns the content of 'requirements.txt' in a list. :return: The content of 'requirements.txt'. :rtype: list(str) """
requirements = [] with open( os.path.join(BASE_DIRECTORY, 'requirements.txt'), 'r', encoding='utf-8' ) as requirements_file: lines = requirements_file.readlines() for line in lines: requirements.append(line.strip()) return requirements
<SYSTEM_TASK:> Easy way to query by session id <END_TASK> <USER_TASK:> Description: def where_session_id(cls, session_id): """ Easy way to query by session id """
try: session = cls.query.filter_by(session_id=session_id).one() return session except (NoResultFound, MultipleResultsFound): return None
<SYSTEM_TASK:> Count sessions with user_id <END_TASK> <USER_TASK:> Description: def count(cls, user_id): """ Count sessions with user_id """
return cls.query.with_entities( cls.user_id).filter_by(user_id=user_id).count()
<SYSTEM_TASK:> Returns the current code branch <END_TASK> <USER_TASK:> Description: def get_branch(): """ Returns the current code branch """
if os.getenv('GIT_BRANCH'): # Travis branch = os.getenv('GIT_BRANCH') elif os.getenv('BRANCH_NAME'): # Jenkins 2 branch = os.getenv('BRANCH_NAME') else: branch = check_output( "git rev-parse --abbrev-ref HEAD".split(" ") ).decode('utf-8').strip() return branch.replace("/", "_")
<SYSTEM_TASK:> Returns the current code version <END_TASK> <USER_TASK:> Description: def get_version(): """ Returns the current code version """
try: return check_output( "git describe --tags".split(" ") ).decode('utf-8').strip() except CalledProcessError: return check_output( "git rev-parse --short HEAD".split(" ") ).decode('utf-8').strip()
<SYSTEM_TASK:> Returns the sha of the last completed jenkins build for this project. <END_TASK> <USER_TASK:> Description: def jenkins_last_build_sha(): """ Returns the sha of the last completed jenkins build for this project. Expects JOB_URL in environment """
job_url = os.getenv('JOB_URL') job_json_url = "{0}/api/json".format(job_url) response = urllib.urlopen(job_json_url) job_data = json.loads(response.read()) last_completed_build_url = job_data['lastCompletedBuild']['url'] last_complete_build_json_url = "{0}/api/json".format(last_completed_build_url) response = urllib.urlopen(last_complete_build_json_url) last_completed_build = json.loads(response.read()) return last_completed_build[1]['lastBuiltRevision']['SHA1']
<SYSTEM_TASK:> Returns a list of the files changed between two commits <END_TASK> <USER_TASK:> Description: def get_changed_files_from(old_commit_sha, new_commit_sha): """ Returns a list of the files changed between two commits """
return check_output( "git diff-tree --no-commit-id --name-only -r {0}..{1}".format( old_commit_sha, new_commit_sha ).split(" ") ).decode('utf-8').strip()
<SYSTEM_TASK:> A generator that opens a file containing many json tweets and yields all the tweets contained inside. <END_TASK> <USER_TASK:> Description: def extract_snow_tweets_from_file_generator(json_file_path): """ A generator that opens a file containing many json tweets and yields all the tweets contained inside. Input: - json_file_path: The path of a json file containing a tweet in each line. Yields: - tweet: A tweet in python dictionary (json) format. """
with open(json_file_path, "r", encoding="utf-8") as fp: for file_line in fp: tweet = json.loads(file_line) yield tweet
<SYSTEM_TASK:> A generator that returns all SNOW tweets stored in disk. <END_TASK> <USER_TASK:> Description: def extract_all_snow_tweets_from_disk_generator(json_folder_path): """ A generator that returns all SNOW tweets stored in disk. Input: - json_file_path: The path of the folder containing the raw data. Yields: - tweet: A tweet in python dictionary (json) format. """
# Get a generator with all file paths in the folder json_file_path_generator = (json_folder_path + "/" + name for name in os.listdir(json_folder_path)) for path in json_file_path_generator: for tweet in extract_snow_tweets_from_file_generator(path): yield tweet
<SYSTEM_TASK:> Saves file f to full_path and set rules. <END_TASK> <USER_TASK:> Description: def save_file(f, full_path): """ Saves file f to full_path and set rules. """
make_dirs_for_file_path(full_path, mode=dju_settings.DJU_IMG_CHMOD_DIR) with open(full_path, 'wb') as t: f.seek(0) while True: buf = f.read(dju_settings.DJU_IMG_RW_FILE_BUFFER_SIZE) if not buf: break t.write(buf) os.chmod(full_path, dju_settings.DJU_IMG_CHMOD_FILE)
<SYSTEM_TASK:> Checks if img_id has real file on filesystem. <END_TASK> <USER_TASK:> Description: def is_img_id_exists(img_id): """ Checks if img_id has real file on filesystem. """
main_rel_path = get_relative_path_from_img_id(img_id) main_path = media_path(main_rel_path) return os.path.isfile(main_path)
<SYSTEM_TASK:> Checks if img_id is valid. <END_TASK> <USER_TASK:> Description: def is_img_id_valid(img_id): """ Checks if img_id is valid. """
t = re.sub(r'[^a-z0-9_:\-\.]', '', img_id, re.IGNORECASE) t = re.sub(r'\.+', '.', t) if img_id != t or img_id.count(':') != 1: return False profile, base_name = img_id.split(':', 1) if not profile or not base_name: return False try: get_profile_configs(profile) except ValueError: return False return True
<SYSTEM_TASK:> Remove tmp prefix from file path or url. <END_TASK> <USER_TASK:> Description: def remove_tmp_prefix_from_file_path(file_path): """ Remove tmp prefix from file path or url. """
path, filename = os.path.split(file_path) return os.path.join(path, remove_tmp_prefix_from_filename(filename)).replace('\\', '/')
<SYSTEM_TASK:> Removes tmp prefix from filename and rename main and variant files. <END_TASK> <USER_TASK:> Description: def make_permalink(img_id): """ Removes tmp prefix from filename and rename main and variant files. Returns img_id without tmp prefix. """
profile, filename = img_id.split(':', 1) new_img_id = profile + ':' + remove_tmp_prefix_from_filename(filename) urls = get_files_by_img_id(img_id) if urls is None: return urls move_list = {(urls['main'], remove_tmp_prefix_from_file_path(urls['main']))} for var_label, var_file_path in urls['variants'].iteritems(): move_list.add((var_file_path, remove_tmp_prefix_from_file_path(var_file_path))) for file_path_from, file_path_to in move_list: os.rename(media_path(file_path_from), media_path(file_path_to)) return new_img_id
<SYSTEM_TASK:> Unified method to make request to the Github API <END_TASK> <USER_TASK:> Description: def request(self, method, url, **kwargs): """ Unified method to make request to the Github API :param method: HTTP Method to use :param url: URL to reach :param kwargs: dictionary of arguments (params for URL parameters, data for post/put data) :return: Response """
if "data" in kwargs: kwargs["data"] = json.dumps(kwargs["data"]) kwargs["headers"] = { 'Content-Type': 'application/json', 'Authorization': 'token %s' % self.__token__, } req = make_request( method, url, **kwargs ) self.logger.debug( "Request::{}::{}".format(method, url), extra={ "request": kwargs, "response": {"headers": req.headers, "code": req.status_code, "data": req.content} } ) return req
<SYSTEM_TASK:> Decide the name of the default branch given the file and the configuration <END_TASK> <USER_TASK:> Description: def default_branch(self, file): """ Decide the name of the default branch given the file and the configuration :param file: File with informations about it :return: Branch Name """
if isinstance(self.__default_branch__, str): return self.__default_branch__ elif self.__default_branch__ == GithubProxy.DEFAULT_BRANCH.NO: return self.master_upstream else: return file.sha[:8]
<SYSTEM_TASK:> Initialize the application and register the blueprint <END_TASK> <USER_TASK:> Description: def init_app(self, app): """ Initialize the application and register the blueprint :param app: Flask Application :return: Blueprint of the current nemo app :rtype: flask.Blueprint """
self.app = app self.__blueprint__ = Blueprint( self.__name__, self.__name__, url_prefix=self.__prefix__, ) for url, name, methods in self.__urls__: self.blueprint.add_url_rule( url, view_func=getattr(self, name), endpoint=name.replace("r_", ""), methods=methods ) self.app = self.app.register_blueprint(self.blueprint) return self.blueprint
<SYSTEM_TASK:> Create a new file on github <END_TASK> <USER_TASK:> Description: def put(self, file): """ Create a new file on github :param file: File to create :return: File or self.ProxyError """
input_ = { "message": file.logs, "author": file.author.dict(), "content": file.base64, "branch": file.branch } uri = "{api}/repos/{origin}/contents/{path}".format( api=self.github_api_url, origin=self.origin, path=file.path ) data = self.request("PUT", uri, data=input_) if data.status_code == 201: file.pushed = True return file else: decoded_data = json.loads(data.content.decode("utf-8")) return self.ProxyError( data.status_code, (decoded_data, "message"), step="put", context={ "uri": uri, "params": input_ } )
<SYSTEM_TASK:> Check on github if a file exists <END_TASK> <USER_TASK:> Description: def get(self, file): """ Check on github if a file exists :param file: File to check status of :return: File with new information, including blob, or Error :rtype: File or self.ProxyError """
uri = "{api}/repos/{origin}/contents/{path}".format( api=self.github_api_url, origin=self.origin, path=file.path ) params = { "ref": file.branch } data = self.request("GET", uri, params=params) # We update the file blob because it exists and we need it for update if data.status_code == 200: data = json.loads(data.content.decode("utf-8")) file.blob = data["sha"] elif data.status_code == 404: pass else: decoded_data = json.loads(data.content.decode("utf-8")) return self.ProxyError( data.status_code, (decoded_data, "message"), step="get", context={ "uri": uri, "params": params } ) return file
<SYSTEM_TASK:> Make an update query on Github API for given file <END_TASK> <USER_TASK:> Description: def update(self, file): """ Make an update query on Github API for given file :param file: File to update, with its content :return: File with new information, including success (or Error) """
params = { "message": file.logs, "author": file.author.dict(), "content": file.base64, "sha": file.blob, "branch": file.branch } uri = "{api}/repos/{origin}/contents/{path}".format( api=self.github_api_url, origin=self.origin, path=file.path ) data = self.request("PUT", uri, data=params) if data.status_code == 200: file.pushed = True return file else: reply = json.loads(data.content.decode("utf-8")) return self.ProxyError( data.status_code, (reply, "message"), step="update", context={ "uri": uri, "params": params } )
<SYSTEM_TASK:> Create a pull request <END_TASK> <USER_TASK:> Description: def pull_request(self, file): """ Create a pull request :param file: File to push through pull request :return: URL of the PullRequest or Proxy Error """
uri = "{api}/repos/{upstream}/pulls".format( api=self.github_api_url, upstream=self.upstream, path=file.path ) params = { "title": "[Proxy] {message}".format(message=file.logs), "body": "", "head": "{origin}:{branch}".format(origin=self.origin.split("/")[0], branch=file.branch), "base": self.master_upstream } data = self.request("POST", uri, data=params) if data.status_code == 201: return json.loads(data.content.decode("utf-8"))["html_url"] else: reply = json.loads(data.content.decode("utf-8")) return self.ProxyError( data.status_code, reply["message"], step="pull_request", context={ "uri": uri, "params": params } )
<SYSTEM_TASK:> Check if a reference exists <END_TASK> <USER_TASK:> Description: def get_ref(self, branch, origin=None): """ Check if a reference exists :param branch: The branch to check if it exists :return: Sha of the branch if it exists, False if it does not exist, self.ProxyError if it went wrong """
if not origin: origin = self.origin uri = "{api}/repos/{origin}/git/refs/heads/{branch}".format( api=self.github_api_url, origin=origin, branch=branch ) data = self.request("GET", uri) if data.status_code == 200: data = json.loads(data.content.decode("utf-8")) if isinstance(data, list): # No addresses matches, we get search results which stars with {branch} return False # Otherwise, we get one record return data["object"]["sha"] elif data.status_code == 404: return False else: decoded_data = json.loads(data.content.decode("utf-8")) return self.ProxyError( data.status_code, (decoded_data, "message"), step="get_ref", context={ "uri": uri } )
<SYSTEM_TASK:> Make a branch on github <END_TASK> <USER_TASK:> Description: def make_ref(self, branch): """ Make a branch on github :param branch: Name of the branch to create :return: Sha of the branch or self.ProxyError """
master_sha = self.get_ref(self.master_upstream) if not isinstance(master_sha, str): return self.ProxyError( 404, "The default branch from which to checkout is either not available or does not exist", step="make_ref" ) params = { "ref": "refs/heads/{branch}".format(branch=branch), "sha": master_sha } uri = "{api}/repos/{origin}/git/refs".format( api=self.github_api_url, origin=self.origin ) data = self.request("POST", uri, data=params) if data.status_code == 201: data = json.loads(data.content.decode("utf-8")) return data["object"]["sha"] else: decoded_data = json.loads(data.content.decode("utf-8")) return self.ProxyError( data.status_code, (decoded_data, "message"), step="make_ref", context={ "uri": uri, "params": params } )
<SYSTEM_TASK:> Check sent sha against the salted hash of the content <END_TASK> <USER_TASK:> Description: def check_sha(self, sha, content): """ Check sent sha against the salted hash of the content :param sha: SHA sent through fproxy-secure-hash header :param content: Base 64 encoded Content :return: Boolean indicating equality """
rightful_sha = sha256(bytes("{}{}".format(content, self.secret), "utf-8")).hexdigest() return sha == rightful_sha
<SYSTEM_TASK:> Patch reference on the origin master branch <END_TASK> <USER_TASK:> Description: def patch_ref(self, sha): """ Patch reference on the origin master branch :param sha: Sha to use for the branch :return: Status of success :rtype: str or self.ProxyError """
uri = "{api}/repos/{origin}/git/refs/heads/{branch}".format( api=self.github_api_url, origin=self.origin, branch=self.master_fork ) data = { "sha": sha, "force": True } reply = self.request( "PATCH", uri, data=data ) if reply.status_code == 200: dic = json.loads(reply.content.decode("utf-8")) return dic["object"]["sha"] else: dic = json.loads(reply.content.decode("utf-8")) return self.ProxyError( reply.status_code, (dic, "message"), step="patch", context={ "uri": uri, "data": data } )
<SYSTEM_TASK:> Function which receives the data from Perseids <END_TASK> <USER_TASK:> Description: def r_receive(self, filename): """ Function which receives the data from Perseids - Check the branch does not exist - Make the branch if needed - Receive PUT from Perseids - Check if content exist - Update/Create content - Open Pull Request - Return PR link to Perseids It can take a "branch" URI parameter for the name of the branch :param filename: Path for the file :return: JSON Response with status_code 201 if successful. """
########################################### # Retrieving data ########################################### content = request.data.decode("utf-8") # Content checking if not content: error = self.ProxyError(300, "Content is missing") return error.response() author_name = request.args.get("author_name", self.default_author.name) author_email = request.args.get("author_email", self.default_author.email) author = Author(author_name, author_email) date = request.args.get("date", datetime.datetime.now().date().isoformat()) logs = request.args.get("logs", "{} updated {}".format(author.name, filename)) self.logger.info("Receiving query from {}".format(author_name), extra={"IP": request.remote_addr}) ########################################### # Checking data security ########################################### secure_sha = None if "fproxy-secure-hash" in request.headers: secure_sha = request.headers["fproxy-secure-hash"] if not secure_sha or not self.check_sha(secure_sha, content): error = self.ProxyError(300, "Hash does not correspond with content") return error.response() ########################################### # Setting up data ########################################### file = File( path=filename, content=content, author=author, date=date, logs=logs ) file.branch = request.args.get("branch", self.default_branch(file)) ########################################### # Ensuring branch exists ########################################### branch_status = self.get_ref(file.branch) if isinstance(branch_status, self.ProxyError): # If we have an error from github API return branch_status.response() elif not branch_status: # If it does not exist # We create a branch branch_status = self.make_ref(file.branch) # If branch creation did not work if isinstance(branch_status, self.ProxyError): return branch_status.response() ########################################### # Pushing files ########################################### # Check if file exists # It feeds file.blob parameter, which tells us the sha of the file if it exists file = self.get(file) if isinstance(file, self.ProxyError): # If we have an error from github API return file.response() # If it has a blob set up, it means we can update given file if file.blob: file = self.update(file) # Otherwise, we create it else: file = self.put(file) if isinstance(file, self.ProxyError): return file.response() ########################################### # Making pull request ########################################### pr_url = self.pull_request(file) if isinstance(pr_url, self.ProxyError): return pr_url.response() reply = { "status": "success", "message": "The workflow was well applied", "pr_url": pr_url } data = jsonify(reply) data.status_code = 201 return data
<SYSTEM_TASK:> Updates a fork Master <END_TASK> <USER_TASK:> Description: def r_update(self): """ Updates a fork Master - Check the ref of the origin repository - Patch reference of fork repository - Return status to Perseids :return: JSON Response with status_code 201 if successful. """
# Getting Master Branch upstream = self.get_ref(self.master_upstream, origin=self.upstream) if isinstance(upstream, bool): return (ProxyError( 404, "Upstream Master branch '{0}' does not exist".format(self.master_upstream), step="get_upstream_ref" )).response() elif isinstance(upstream, self.ProxyError): return upstream.response() # Patching new_sha = self.patch_ref(upstream) if isinstance(new_sha, self.ProxyError): return new_sha.response() self.logger.info("Updated repository {} to sha {}".format(self.origin, new_sha), extra={"former_sha": upstream}) return jsonify({ "status": "success", "commit": new_sha })
<SYSTEM_TASK:> delete by email <END_TASK> <USER_TASK:> Description: def delete_where_user_id(cls, user_id): """ delete by email """
result = cls.where_user_id(user_id) if result is None: return None result.delete() return True
<SYSTEM_TASK:> Will eventually load information for Apple_Boot volume. <END_TASK> <USER_TASK:> Description: def load(self, filename, offset): """Will eventually load information for Apple_Boot volume. Not yet implemented"""
try: self.offset = offset # self.fd = open(filename, 'rb') # self.fd.close() except IOError as e: print(e)
<SYSTEM_TASK:> shortcut for resolving from root container <END_TASK> <USER_TASK:> Description: def resolve(accessor: hexdi.core.clstype) -> __gentype__.T: """ shortcut for resolving from root container :param accessor: accessor for resolving object :return: resolved object of requested type """
return hexdi.core.get_root_container().resolve(accessor=accessor)
<SYSTEM_TASK:> shortcut for bind_type on root container <END_TASK> <USER_TASK:> Description: def bind_type(type_to_bind: hexdi.core.restype, accessor: hexdi.core.clstype, lifetime_manager: hexdi.core.ltype): """ shortcut for bind_type on root container :param type_to_bind: type that will be resolved by accessor :param accessor: accessor for resolving object :param lifetime_manager: type of lifetime manager for this binding """
hexdi.core.get_root_container().bind_type(type_to_bind, accessor, lifetime_manager)
<SYSTEM_TASK:> shortcut for bind_type with PermanentLifeTimeManager on root container <END_TASK> <USER_TASK:> Description: def bind_permanent(type_to_bind: hexdi.core.restype, accessor: hexdi.core.clstype): """ shortcut for bind_type with PermanentLifeTimeManager on root container :param type_to_bind: type that will be resolved by accessor :param accessor: accessor for resolving object """
hexdi.core.get_root_container().bind_type(type_to_bind, accessor, lifetime.PermanentLifeTimeManager)
<SYSTEM_TASK:> shortcut for bind_type with PerResolveLifeTimeManager on root container <END_TASK> <USER_TASK:> Description: def bind_transient(type_to_bind: hexdi.core.restype, accessor: hexdi.core.clstype): """ shortcut for bind_type with PerResolveLifeTimeManager on root container :param type_to_bind: type that will be resolved by accessor :param accessor: accessor for resolving object """
hexdi.core.get_root_container().bind_type(type_to_bind, accessor, lifetime.PerResolveLifeTimeManager)
<SYSTEM_TASK:> Sets up the system repository with the given repository type. <END_TASK> <USER_TASK:> Description: def setup_system_repository(self, repository_type, reset_on_start, repository_class=None): """ Sets up the system repository with the given repository type. :param str repository_type: Repository type to use for the SYSTEM repository. :param bool reset_on_start: Flag to indicate whether stored system resources should be discarded on startup. :param repository_class: class to use for the system repository. If not given, the registered class for the given type will be used. """
# Set up the system entity repository (this does not join the # transaction and is in autocommit mode). cnf = dict(messaging_enable=True, messaging_reset_on_start=reset_on_start) system_repo = self.new(repository_type, name=REPOSITORY_DOMAINS.SYSTEM, repository_class=repository_class, configuration=cnf) self.set(system_repo)
<SYSTEM_TASK:> Convenience method to initialize all repositories that have not been <END_TASK> <USER_TASK:> Description: def initialize_all(self): """ Convenience method to initialize all repositories that have not been initialized yet. """
for repo in itervalues_(self.__repositories): if not repo.is_initialized: repo.initialize()
<SYSTEM_TASK:> Used to cache the bundle definitions rather than loading from config every time they're used <END_TASK> <USER_TASK:> Description: def get_bundles(): """ Used to cache the bundle definitions rather than loading from config every time they're used """
global _cached_bundles if not _cached_bundles: _cached_bundles = BundleManager() for bundle_conf in bundles_settings.BUNDLES: _cached_bundles[bundle_conf[0]] = Bundle(bundle_conf) return _cached_bundles
<SYSTEM_TASK:> Used to cache the bundle versions rather than loading them from the bundle versions file every time they're used <END_TASK> <USER_TASK:> Description: def get_bundle_versions(): """ Used to cache the bundle versions rather than loading them from the bundle versions file every time they're used """
global _cached_versions if not bundles_settings.BUNDLES_VERSION_FILE: _cached_versions = {} if _cached_versions is None: locs = {} try: execfile(bundles_settings.BUNDLES_VERSION_FILE, locs) _cached_versions = locs['BUNDLES_VERSIONS'] except IOError: _cached_versions = {} return _cached_versions
<SYSTEM_TASK:> Return the filename of the bundled bundle <END_TASK> <USER_TASK:> Description: def get_url(self, version=None): """ Return the filename of the bundled bundle """
if self.fixed_bundle_url: return self.fixed_bundle_url return '%s.%s.%s' % (os.path.join(self.bundle_url_root, self.bundle_filename), version or self.get_version(), self.bundle_type)
<SYSTEM_TASK:> Return a list of file urls - will return a single item if settings.USE_BUNDLES is True <END_TASK> <USER_TASK:> Description: def get_file_urls(self): """ Return a list of file urls - will return a single item if settings.USE_BUNDLES is True """
if self.use_bundle: return [self.get_url()] return [bundle_file.file_url for bundle_file in self.files]
<SYSTEM_TASK:> Returns a batch instance after exporting a batch of txs. <END_TASK> <USER_TASK:> Description: def export_batch(self): """Returns a batch instance after exporting a batch of txs. """
batch = self.batch_cls( model=self.model, history_model=self.history_model, using=self.using ) if batch.items: try: json_file = self.json_file_cls(batch=batch, path=self.path) json_file.write() except JSONDumpFileError as e: raise TransactionExporterError(e) batch.close() return batch return None
<SYSTEM_TASK:> Ensure key is either in schema's attributes or already set on self. <END_TASK> <USER_TASK:> Description: def _check_key(self, key): """ Ensure key is either in schema's attributes or already set on self. """
self.setup_schema() if key not in self._attrs and key not in self: raise KeyError(key)
<SYSTEM_TASK:> Download a HiRISE EDR set of .IMG files to the CWD <END_TASK> <USER_TASK:> Description: def hirise_edr(self, pid, chunk_size=1024*1024): """ Download a HiRISE EDR set of .IMG files to the CWD You must know the full id to specifiy the filter to use, ie: PSP_XXXXXX_YYYY will download every EDR IMG file available PSP_XXXXXX_YYYY_R will download every EDR RED filter IMG file PSP_XXXXXX_YYYY_BG12_0 will download only the BG12_0 As a wild card is auto applied to the end of the provided pid pid: product ID of the CTX EDR, partial IDs ok chunk_size: Chunk size in bytes to use in download """
productid = "{}*".format(pid) query = {"target" : "mars", "query" : "product", "results" : "f", "output" : "j", "pt" : "EDR", "iid" : "HiRISE", "ihid" : "MRO", "productid" : productid} # Query the ODE products = query_ode(self.ode_url, query) # Validate query results with conditions for this particular query if len(products) > 30: print("Error: Too many products selected for in query, Make PID more specific") sys.exit(1) if not isinstance(products, list): print("Error: Too few responses from server to be a full HiRISE EDR, ") else: # proceed to download for product in products: download_edr_img_files(product, self.https, chunk_size)
<SYSTEM_TASK:> Verifies NTFS filesystem signature. <END_TASK> <USER_TASK:> Description: def detect(self, filename, offset, standalone=False): """Verifies NTFS filesystem signature. Returns: bool: True if filesystem signature at offset 0x03 \ matches 'NTFS ', False otherwise. """
r = RawStruct( filename=filename, offset=offset + SIG_OFFSET, length=SIG_SIZE) oem_id = r.data if oem_id == b"NTFS ": return True return False
<SYSTEM_TASK:> Load the action from configuration <END_TASK> <USER_TASK:> Description: def load(cls, v): """Load the action from configuration"""
if v is None: return [] if isinstance(v, list): return [ Action(s) for s in v ] elif isinstance(v, str): return [Action(v)] else: raise ParseError("Couldn't parse action: %r" % v)
<SYSTEM_TASK:> Create a graphviz .dot representation of the automaton. <END_TASK> <USER_TASK:> Description: def make_dot(self, filename_or_stream, auts): """Create a graphviz .dot representation of the automaton."""
if isinstance(filename_or_stream, str): stream = file(filename_or_stream, 'w') else: stream = filename_or_stream dot = DotFile(stream) for aut in auts: dot.start(aut.name) dot.node('shape=Mrecord width=1.5') for st in aut.states: label = st.name if st.entering: label += '|%s' % '\\l'.join(str(st) for st in st.entering) if st.leaving: label += '|%s' % '\\l'.join(str(st) for st in st.leaving) label = '{%s}' % label dot.state(st.name, label=label) for st in aut.states: for tr in st.transitions: dot.transition(tr.s_from.name, tr.s_to.name, tr.when) dot.end() dot.finish()
<SYSTEM_TASK:> Create a bucket, directory, or empty file. <END_TASK> <USER_TASK:> Description: def create(self, url): """Create a bucket, directory, or empty file."""
bucket, obj_key = _parse_url(url) if not bucket: raise InvalidURL(url, "You must specify a bucket and (optional) path") if obj_key: target = "/".join((bucket, obj_key)) else: target = bucket return self.call("CreateBucket", bucket=target)
<SYSTEM_TASK:> Destroy a bucket, directory, or file. Specifying recursive=True <END_TASK> <USER_TASK:> Description: def destroy(self, url, recursive=False): """Destroy a bucket, directory, or file. Specifying recursive=True recursively deletes all subdirectories and files."""
bucket, obj_key = _parse_url(url) if not bucket: raise InvalidURL(url, "You must specify a bucket and (optional) path") if obj_key: target = "/".join((bucket, obj_key)) else: target = bucket if recursive: for obj in self.get(url, delimiter=''): self.destroy(obj['url']) return self.call("DeleteBucket", bucket=target)
<SYSTEM_TASK:> Copy a local file to an S3 location. <END_TASK> <USER_TASK:> Description: def upload(self, local_path, remote_url): """Copy a local file to an S3 location."""
bucket, key = _parse_url(remote_url) with open(local_path, 'rb') as fp: return self.call("PutObject", bucket=bucket, key=key, body=fp)
<SYSTEM_TASK:> Copy S3 data to a local file. <END_TASK> <USER_TASK:> Description: def download(self, remote_url, local_path, buffer_size=8 * 1024): """Copy S3 data to a local file."""
bucket, key = _parse_url(remote_url) response_file = self.call("GetObject", bucket=bucket, key=key)['Body'] with open(local_path, 'wb') as fp: buf = response_file.read(buffer_size) while buf: fp.write(buf) buf = response_file.read(buffer_size)
<SYSTEM_TASK:> Copy an S3 object to another S3 location. <END_TASK> <USER_TASK:> Description: def copy(self, src_url, dst_url): """Copy an S3 object to another S3 location."""
src_bucket, src_key = _parse_url(src_url) dst_bucket, dst_key = _parse_url(dst_url) if not dst_bucket: dst_bucket = src_bucket params = { 'copy_source': '/'.join((src_bucket, src_key)), 'bucket': dst_bucket, 'key': dst_key, } return self.call("CopyObject", **params)
<SYSTEM_TASK:> Copy a single S3 object to another S3 location, then delete the <END_TASK> <USER_TASK:> Description: def move(self, src_url, dst_url): """Copy a single S3 object to another S3 location, then delete the original object."""
self.copy(src_url, dst_url) self.destroy(src_url)
<SYSTEM_TASK:> get_shard_names returns an array containing the names of the shards <END_TASK> <USER_TASK:> Description: def get_shard_names(self): """ get_shard_names returns an array containing the names of the shards in the cluster. This is determined with num_shards and shard_name_format """
results = [] for shard_num in range(0, self.num_shards()): shard_name = self.get_shard_name(shard_num) results.append(shard_name) return results
<SYSTEM_TASK:> get_canonical_key_id is used by get_canonical_key, see the comment <END_TASK> <USER_TASK:> Description: def get_canonical_key_id(self, key_id): """ get_canonical_key_id is used by get_canonical_key, see the comment for that method for more explanation. Keyword arguments: key_id -- the key id (e.g. '12345') returns the canonical key id (e.g. '12') """
shard_num = self.get_shard_num_by_key_id(key_id) return self._canonical_keys[shard_num]
<SYSTEM_TASK:> get_shard_by_num returns the shard at index shard_num. <END_TASK> <USER_TASK:> Description: def get_shard_by_num(self, shard_num): """ get_shard_by_num returns the shard at index shard_num. Keyword arguments: shard_num -- The shard index Returns a redis.StrictRedis connection or raises a ValueError. """
if shard_num < 0 or shard_num >= self.num_shards(): raise ValueError("requested invalid shard# {0}".format(shard_num)) return self._shards[shard_num]
<SYSTEM_TASK:> _get_key_id_from_key returns the key id from a key, if found. otherwise <END_TASK> <USER_TASK:> Description: def _get_key_id_from_key(self, key): """ _get_key_id_from_key returns the key id from a key, if found. otherwise it just returns the key to be used as the key id. Keyword arguments: key -- The key to derive the ID from. If curly braces are found in the key, then the contents of the curly braces are used as the key id for the key. Returns the key id portion of the key, or the whole key if no hash tags are present. """
key_id = key regex = '{0}([^{1}]*){2}'.format(self._hash_start, self._hash_stop, self._hash_stop) m = re.search(regex, key) if m is not None: # Use what's inside the hash tags as the key id, if present. # Otherwise the whole key will be used as the key id. key_id = m.group(1) return key_id
<SYSTEM_TASK:> A canonical key id is the lowest integer key id that maps to <END_TASK> <USER_TASK:> Description: def compute_canonical_key_ids(self, search_amplifier=100): """ A canonical key id is the lowest integer key id that maps to a particular shard. The mapping to canonical key ids depends on the number of shards. Returns a dictionary mapping from shard number to canonical key id. This method will throw an exception if it fails to compute all of the canonical key ids. """
canonical_keys = {} num_shards = self.num_shards() # Guarantees enough to find all keys without running forever num_iterations = (num_shards**2) * search_amplifier for key_id in range(1, num_iterations): shard_num = self.get_shard_num_by_key(str(key_id)) if shard_num in canonical_keys: continue canonical_keys[shard_num] = str(key_id) if len(canonical_keys) == num_shards: break if len(canonical_keys) != num_shards: raise ValueError("Failed to compute enough keys. " + "Wanted %d, got %d (search_amp=%d).".format( num_shards, len(canonical_keys), search_amplifier)) return canonical_keys
<SYSTEM_TASK:> keys wrapper that queries every shard. This is an expensive <END_TASK> <USER_TASK:> Description: def keys(self, args): """ keys wrapper that queries every shard. This is an expensive operation. This method should be invoked on a TwemRedis instance as if it were being invoked directly on a StrictRedis instance. """
results = {} # TODO: parallelize for shard_num in range(0, self.num_shards()): shard = self.get_shard_by_num(shard_num) results[shard_num] = shard.keys(args) return results
<SYSTEM_TASK:> mget wrapper that batches keys per shard and execute as few <END_TASK> <USER_TASK:> Description: def mget(self, args): """ mget wrapper that batches keys per shard and execute as few mgets as necessary to fetch the keys from all the shards involved. This method should be invoked on a TwemRedis instance as if it were being invoked directly on a StrictRedis instance. """
key_map = collections.defaultdict(list) results = {} for key in args: shard_num = self.get_shard_num_by_key(key) key_map[shard_num].append(key) # TODO: parallelize for shard_num in key_map.keys(): shard = self.get_shard_by_num(shard_num) results[shard_num] = shard.mget(key_map[shard_num]) return results
<SYSTEM_TASK:> mset wrapper that batches keys per shard and execute as few <END_TASK> <USER_TASK:> Description: def mset(self, args): """ mset wrapper that batches keys per shard and execute as few msets as necessary to set the keys in all the shards involved. This method should be invoked on a TwemRedis instance as if it were being invoked directly on a StrictRedis instance. """
key_map = collections.defaultdict(dict) result_count = 0 for key in args.keys(): value = args[key] shard_num = self.get_shard_num_by_key(key) key_map[shard_num][key] = value # TODO: parallelize for shard_num in key_map.keys(): shard = self.get_shard_by_num(shard_num) result_count += shard.mset(key_map[shard_num]) return result_count
<SYSTEM_TASK:> Generator for sequential numeric numbers. <END_TASK> <USER_TASK:> Description: def id_generator(start=0): """ Generator for sequential numeric numbers. """
count = start while True: send_value = (yield count) if not send_value is None: if send_value < count: raise ValueError('Values from ID generator must increase ' 'monotonically (current value: %d; value ' 'sent to generator: %d).' % (count, send_value)) count = send_value else: count += 1
<SYSTEM_TASK:> Truncates the message to the given limit length. The beginning and the <END_TASK> <USER_TASK:> Description: def truncate(message, limit=500): """ Truncates the message to the given limit length. The beginning and the end of the message are left untouched. """
if len(message) > limit: trc_msg = ''.join([message[:limit // 2 - 2], ' .. ', message[len(message) - limit // 2 + 2:]]) else: trc_msg = message return trc_msg
<SYSTEM_TASK:> Construct a route to be parsed into flask App <END_TASK> <USER_TASK:> Description: def make_route(self, route) -> dict: """ Construct a route to be parsed into flask App """
middleware = route['middleware'] if 'middleware' in route else None # added to ALL requests to support xhr cross-site requests route['methods'].append('OPTIONS') return { 'url': route['url'], 'name': route['name'], 'methods': route['methods'], 'middleware': middleware, 'callback': { 'module': route['function'].__module__, 'class': route['function'].__qualname__.rsplit('.', 1)[0], 'function': route['function'].__name__ } }
<SYSTEM_TASK:> diffusion driver are the underlying `dW` of each process `X` in a SDE like `dX = m dt + s dW` <END_TASK> <USER_TASK:> Description: def diffusion_driver(self): """ diffusion driver are the underlying `dW` of each process `X` in a SDE like `dX = m dt + s dW` :return list(StochasticProcess): """
if self._diffusion_driver is None: return self, if isinstance(self._diffusion_driver, list): return tuple(self._diffusion_driver) if isinstance(self._diffusion_driver, tuple): return self._diffusion_driver return self._diffusion_driver,
<SYSTEM_TASK:> Ensures all historical model codenames exist in Django's Permission <END_TASK> <USER_TASK:> Description: def reset_codenames(self, dry_run=None, clear_existing=None): """Ensures all historical model codenames exist in Django's Permission model. """
self.created_codenames = [] self.updated_names = [] actions = ["add", "change", "delete", "view"] if django.VERSION >= (2, 1): actions.append("view") for app in django_apps.get_app_configs(): for model in app.get_models(): try: getattr(model, model._meta.simple_history_manager_attribute) except AttributeError: pass else: self.update_or_create( model, dry_run=dry_run, clear_existing=clear_existing ) if dry_run: print("This is a dry-run. No modifications were made.") if self.created_codenames: print("The following historical permission.codenames were be added:") pprint(self.created_codenames) else: print("No historical permission.codenames were added.") if self.updated_names: print("The following historical permission.names were updated:") pprint(self.updated_names) else: print("No historical permission.names were updated.")
<SYSTEM_TASK:> Checks if the given attribute name is a member attribute of the given <END_TASK> <USER_TASK:> Description: def is_resource_class_member_attribute(rc, attr_name): """ Checks if the given attribute name is a member attribute of the given registered resource. """
attr = get_resource_class_attribute(rc, attr_name) return attr.kind == RESOURCE_ATTRIBUTE_KINDS.MEMBER
<SYSTEM_TASK:> Checks if the given attribute name is a collection attribute of the given <END_TASK> <USER_TASK:> Description: def is_resource_class_collection_attribute(rc, attr_name): """ Checks if the given attribute name is a collection attribute of the given registered resource. """
attr = get_resource_class_attribute(rc, attr_name) return attr.kind == RESOURCE_ATTRIBUTE_KINDS.COLLECTION
<SYSTEM_TASK:> This function passes a running window along the length of the given <END_TASK> <USER_TASK:> Description: def window(iterable, n=2, cast=tuple): """ This function passes a running window along the length of the given iterable. By default, the return value is a tuple, but the cast parameter can be used to change the final result. """
it = iter(iterable) win = deque((next(it) for _ in repeat(None, n)), maxlen=n) if len(win) < n: raise ValueError('Window size was greater than iterable length') yield cast(win) append = win.append for e in it: append(e) yield cast(win)
<SYSTEM_TASK:> Add a number of months to the given date <END_TASK> <USER_TASK:> Description: def add_months(self, value: int) -> datetime: """ Add a number of months to the given date """
self.value = self.value + relativedelta(months=value) return self.value
<SYSTEM_TASK:> Initializes from the given date value <END_TASK> <USER_TASK:> Description: def from_date(self, value: date) -> datetime: """ Initializes from the given date value """
assert isinstance(value, date) #self.value = datetime.combine(value, time.min) self.value = datetime(value.year, value.month, value.day) return self.value
<SYSTEM_TASK:> Returns the day name <END_TASK> <USER_TASK:> Description: def get_day_name(self) -> str: """ Returns the day name """
weekday = self.value.isoweekday() - 1 return calendar.day_name[weekday]
<SYSTEM_TASK:> Provides end of the month for the given date <END_TASK> <USER_TASK:> Description: def end_of_month(self) -> datetime: """ Provides end of the month for the given date """
# Increase month by 1, result = self.value + relativedelta(months=1) # take the 1st day of the (next) month, result = result.replace(day=1) # subtract one day result = result - relativedelta(days=1) self.value = result return self.value
<SYSTEM_TASK:> Checks if the date is at the end of the month <END_TASK> <USER_TASK:> Description: def is_end_of_month(self) -> bool: """ Checks if the date is at the end of the month """
end_of_month = Datum() # get_end_of_month(value) end_of_month.end_of_month() return self.value == end_of_month.value
<SYSTEM_TASK:> Sets the day value <END_TASK> <USER_TASK:> Description: def set_day(self, day: int) -> datetime: """ Sets the day value """
self.value = self.value.replace(day=day) return self.value
<SYSTEM_TASK:> Sets the current value <END_TASK> <USER_TASK:> Description: def set_value(self, value: datetime): """ Sets the current value """
assert isinstance(value, datetime) self.value = value
<SYSTEM_TASK:> Returns start of day <END_TASK> <USER_TASK:> Description: def start_of_day(self) -> datetime: """ Returns start of day """
self.value = datetime(self.value.year, self.value.month, self.value.day) return self.value
<SYSTEM_TASK:> Subtracts dates from the given value <END_TASK> <USER_TASK:> Description: def subtract_days(self, days: int) -> datetime: """ Subtracts dates from the given value """
self.value = self.value - relativedelta(days=days) return self.value
<SYSTEM_TASK:> Subtracts number of weeks from the current value <END_TASK> <USER_TASK:> Description: def subtract_weeks(self, weeks: int) -> datetime: """ Subtracts number of weeks from the current value """
self.value = self.value - timedelta(weeks=weeks) return self.value
<SYSTEM_TASK:> Subtracts a number of months from the current value <END_TASK> <USER_TASK:> Description: def subtract_months(self, months: int) -> datetime: """ Subtracts a number of months from the current value """
self.value = self.value - relativedelta(months=months) return self.value
<SYSTEM_TASK:> Set the value to yesterday <END_TASK> <USER_TASK:> Description: def yesterday(self) -> datetime: """ Set the value to yesterday """
self.value = datetime.today() - timedelta(days=1) return self.value
<SYSTEM_TASK:> This method parses a UUID protobuf message type from its component <END_TASK> <USER_TASK:> Description: def get_uuid_string(low=None, high=None, **x): """This method parses a UUID protobuf message type from its component 'high' and 'low' longs into a standard formatted UUID string Args: x (dict): containing keys, 'low' and 'high' corresponding to the UUID protobuf message type Returns: str: UUID formatted string """
if low is None or high is None: return None x = ''.join([parse_part(low), parse_part(high)]) return '-'.join([x[:8], x[8:12], x[12:16], x[16:20], x[20:32]])
<SYSTEM_TASK:> search Zenodo record for string `search` <END_TASK> <USER_TASK:> Description: def search(self, search): """search Zenodo record for string `search` :param search: string to search :return: Record[] results """
search = search.replace('/', ' ') # zenodo can't handle '/' in search query params = {'q': search} return self._get_records(params)
<SYSTEM_TASK:> get list from QueryDict and remove blank date from list. <END_TASK> <USER_TASK:> Description: def qdict_get_list(qdict, k): """ get list from QueryDict and remove blank date from list. """
pks = qdict.getlist(k) return [e for e in pks if e]
<SYSTEM_TASK:> AJAX view adapted from django-progressbarupload <END_TASK> <USER_TASK:> Description: def upload_progress(request): """ AJAX view adapted from django-progressbarupload Return the upload progress and total length values """
if 'X-Progress-ID' in request.GET: progress_id = request.GET['X-Progress-ID'] elif 'X-Progress-ID' in request.META: progress_id = request.META['X-Progress-ID'] if 'logfilename' in request.GET: logfilename = request.GET['logfilename'] elif 'logfilename' in request.META: logfilename = request.META['logfilename'] cache_key = "%s_%s" % (request.META['REMOTE_ADDR'], progress_id) data = cache.get(cache_key) if not data: data = cache.get(logfilename.replace(' ','_')) return HttpResponse(json.dumps(data))
<SYSTEM_TASK:> Set foreground- and background colors and intensity. <END_TASK> <USER_TASK:> Description: def set_color(self, fg=None, bg=None, intensify=False, target=sys.stdout): """Set foreground- and background colors and intensity."""
raise NotImplementedError
<SYSTEM_TASK:> Adds the given entity to this cache. <END_TASK> <USER_TASK:> Description: def add(self, entity): """ Adds the given entity to this cache. :param entity: Entity to add. :type entity: Object implementing :class:`everest.interfaces.IEntity`. :raises ValueError: If the ID of the entity to add is ``None`` (unless the `allow_none_id` constructor argument was set). """
do_append = self.__check_new(entity) if do_append: self.__entities.append(entity)
<SYSTEM_TASK:> Removes the given entity from this cache. <END_TASK> <USER_TASK:> Description: def remove(self, entity): """ Removes the given entity from this cache. :param entity: Entity to remove. :type entity: Object implementing :class:`everest.interfaces.IEntity`. :raises KeyError: If the given entity is not in this cache. :raises ValueError: If the ID of the given entity is `None`. """
self.__id_map.pop(entity.id, None) self.__slug_map.pop(entity.slug, None) self.__entities.remove(entity)
<SYSTEM_TASK:> Retrieve entities from this cache, possibly after filtering, ordering <END_TASK> <USER_TASK:> Description: def retrieve(self, filter_expression=None, order_expression=None, slice_key=None): """ Retrieve entities from this cache, possibly after filtering, ordering and slicing. """
ents = iter(self.__entities) if not filter_expression is None: ents = filter_expression(ents) if not order_expression is None: # Ordering always involves a copy and conversion to a list, so # we have to wrap in an iterator. ents = iter(order_expression(ents)) if not slice_key is None: ents = islice(ents, slice_key.start, slice_key.stop) return ents
<SYSTEM_TASK:> Based on the user-related lists I have downloaded, annotate the users. <END_TASK> <USER_TASK:> Description: def extract_user_keywords_generator(twitter_lists_gen, lemmatizing="wordnet"): """ Based on the user-related lists I have downloaded, annotate the users. Inputs: - twitter_lists_gen: A python generator that yields a user Twitter id and a generator of Twitter lists. - lemmatizing: A string containing one of the following: "porter", "snowball" or "wordnet". Yields: - user_twitter_id: A Twitter user id. - user_annotation: A python dictionary that contains two dicts: * bag_of_lemmas: Maps emmas to multiplicity. * lemma_to_keywordbag: A python dictionary that maps stems/lemmas to original topic keywords. """
#################################################################################################################### # Extract keywords serially. #################################################################################################################### for user_twitter_id, twitter_lists_list in twitter_lists_gen: if twitter_lists_list is not None: if "lists" in twitter_lists_list.keys(): twitter_lists_list = twitter_lists_list["lists"] bag_of_lemmas, lemma_to_keywordbag = user_twitter_list_bag_of_words(twitter_lists_list, lemmatizing) for lemma, keywordbag in lemma_to_keywordbag.items(): lemma_to_keywordbag[lemma] = dict(keywordbag) lemma_to_keywordbag = dict(lemma_to_keywordbag) user_annotation = dict() user_annotation["bag_of_lemmas"] = bag_of_lemmas user_annotation["lemma_to_keywordbag"] = lemma_to_keywordbag yield user_twitter_id, user_annotation