_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 31
13.1k
| language
stringclasses 1
value | meta_information
dict |
---|---|---|---|---|---|
q3700
|
PickleInterface.recv
|
train
|
def recv(self, blocking=True):
"""Receive the next object from the socket"""
|
python
|
{
"resource": ""
}
|
q3701
|
PickleInterface.try_recv
|
train
|
def try_recv(self):
"""Return None immediately if nothing is waiting"""
try:
lenstr = self.sock.recv(4, socket.MSG_DONTWAIT)
except socket.error:
return None
if len(lenstr) < 4:
|
python
|
{
"resource": ""
}
|
q3702
|
PickleInterface._get_next_obj
|
train
|
def _get_next_obj(self, length):
"""Assumes we've already read the object length"""
|
python
|
{
"resource": ""
}
|
q3703
|
Dut.get_modules
|
train
|
def get_modules(self, type_name):
'''Getting modules by type name.
Parameters
----------
type_name : string
Type name of the modules to be returned.
Returns
-------
List of modules of given type name else empty
|
python
|
{
"resource": ""
}
|
q3704
|
HardwareLayer.wait_for_ready
|
train
|
def wait_for_ready(self, timeout=None, times=None, delay=None, delay_between=None, abort=None):
'''Determine the ready state of the device and wait until device is ready.
Parameters
----------
timeout : int, float
The maximum amount of time to wait in seconds. Reaching the timeout will raise a RuntimeError.
times : int
Maximum number of times reading the ready state.
delay : int, float
The number of seconds to sleep before checks. Defaults to 0.
delay_between : int, float
The number of seconds to sleep between each check. Defaults to 0.
abort : Threading.Event
Breaking the loop from other threads.
Returns
-------
True if state is ready, else False.
'''
if delay:
try:
sleep(delay)
except IOError: # negative values
pass
if timeout is not None:
|
python
|
{
"resource": ""
}
|
q3705
|
SiUSBDevice.DownloadXilinx
|
train
|
def DownloadXilinx(self, bitfile):
"""We hijack this call to perform the socket connect"""
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
python
|
{
"resource": ""
}
|
q3706
|
SussProber.set_position
|
train
|
def set_position(self, x, y, speed=None):
''' Move chuck to absolute position in um'''
if speed:
self._intf.write('MoveChuckSubsite %1.1f %1.1f R Y %d' %
|
python
|
{
"resource": ""
}
|
q3707
|
SussProber.move_position
|
train
|
def move_position(self, dx, dy, speed=None):
''' Move chuck relative to actual position in um'''
if speed:
self._intf.write('MoveChuckPosition %1.1f %1.1f R Y %d'
|
python
|
{
"resource": ""
}
|
q3708
|
SussProber.get_die
|
train
|
def get_die(self):
''' Move chuck to wafer map chip index'''
reply = self._intf.query('ReadMapPosition')
|
python
|
{
"resource": ""
}
|
q3709
|
TrackRegister.clear
|
train
|
def clear(self):
'Clear tracks in memory - all zero'
|
python
|
{
"resource": ""
}
|
q3710
|
sitcp_fifo.set_data
|
train
|
def set_data(self, data):
''' Sending data to via TCP.
Parameters
----------
data : array
Array of unsigned integers (32 bit).
'''
|
python
|
{
"resource": ""
}
|
q3711
|
Pixel.program_global_reg
|
train
|
def program_global_reg(self):
"""
Send the global register to the chip.
Loads the values of self['GLOBAL_REG'] onto the chip.
Includes enabling the clock, and loading the Control (CTR)
and DAC shadow registers.
"""
self._clear_strobes()
gr_size = len(self['GLOBAL_REG'][:]) # get the size
self['SEQ']['SHIFT_IN'][0:gr_size] = self['GLOBAL_REG'][:] # this will be shifted out
self['SEQ']['GLOBAL_SHIFT_EN'][0:gr_size] = bitarray(gr_size * '1') # this is to
|
python
|
{
"resource": ""
}
|
q3712
|
Pixel.program_pixel_reg
|
train
|
def program_pixel_reg(self, enable_receiver=True):
"""
Send the pixel register to the chip and store the output.
Loads the values of self['PIXEL_REG'] onto the chip.
Includes enabling the clock, and loading the Control (CTR)
and DAC shadow registers.
if(enable_receiver), stores the output (by byte) in
|
python
|
{
"resource": ""
}
|
q3713
|
BaseRemoteWorkflowProxy.dump_submission_data
|
train
|
def dump_submission_data(self):
"""
Dumps the current submission data to the submission file.
"""
# renew the dashboard config
self.submission_data["dashboard_config"] = self.dashboard.get_persistent_config()
|
python
|
{
"resource": ""
}
|
q3714
|
BaseRemoteWorkflowProxy.run
|
train
|
def run(self):
"""
Actual run method that starts the processing of jobs and initiates the status polling, or
performs job cancelling or cleaning, depending on the task parameters.
"""
task = self.task
self._outputs = self.output()
# create the job dashboard interface
self.dashboard = task.create_job_dashboard() or NoJobDashboard()
# read submission data and reset some values
submitted = not task.ignore_submission and self._outputs["submission"].exists()
if submitted:
self.submission_data.update(self._outputs["submission"].load(formatter="json"))
task.tasks_per_job = self.submission_data.tasks_per_job
self.dashboard.apply_config(self.submission_data.dashboard_config)
# when the branch outputs, i.e. the "collection" exists, just create dummy control outputs
if "collection" in self._outputs and self._outputs["collection"].exists():
self.touch_control_outputs()
# cancel jobs?
elif self._cancel_jobs:
if submitted:
self.cancel()
# cleanup jobs?
elif self._cleanup_jobs:
if submitted:
self.cleanup()
# submit and/or wait while polling
else:
# maybe set a tracking url
tracking_url = self.dashboard.create_tracking_url()
if tracking_url:
task.set_tracking_url(tracking_url)
# ensure the output directory exists
if not submitted:
self._outputs["submission"].parent.touch()
# at this point, when the status file exists, it is considered outdated
if "status" in self._outputs:
self._outputs["status"].remove()
try:
# instantiate the configured job file factory, not kwargs yet
self.job_file_factory = self.create_job_file_factory()
# submit
if not submitted:
|
python
|
{
"resource": ""
}
|
q3715
|
BaseRemoteWorkflowProxy.cancel
|
train
|
def cancel(self):
"""
Cancels running jobs. The job ids are read from the submission file which has to exist
for obvious reasons.
"""
task = self.task
# get job ids from submission data
job_ids = [
d["job_id"] for d in self.submission_data.jobs.values()
if d["job_id"] not in (self.submission_data.dummy_job_id, None)
]
if not job_ids:
return
# cancel jobs
task.publish_message("going to cancel {} jobs".format(len(job_ids)))
errors = self.job_manager.cancel_batch(job_ids)
# print errors
if errors:
print("{} error(s) occured while cancelling {} job(s) of task {}:".format(
len(errors), len(job_ids), task.task_id))
tmpl = " {}"
for i, err in enumerate(errors):
print(tmpl.format(err))
if i +
|
python
|
{
"resource": ""
}
|
q3716
|
BaseRemoteWorkflowProxy.cleanup
|
train
|
def cleanup(self):
"""
Cleans up jobs on the remote run location. The job ids are read from the submission file
which has to exist for obvious reasons.
"""
task = self.task
# get job ids from submission data
job_ids = [
d["job_id"] for d in self.submission_data.jobs.values()
if d["job_id"] not in (self.submission_data.dummy_job_id, None)
]
if not job_ids:
return
# cleanup jobs
task.publish_message("going to cleanup {} jobs".format(len(job_ids)))
errors = self.job_manager.cleanup_batch(job_ids)
# print errors
if errors:
print("{} error(s) occured
|
python
|
{
"resource": ""
}
|
q3717
|
BaseRemoteWorkflowProxy.touch_control_outputs
|
train
|
def touch_control_outputs(self):
"""
Creates and saves dummy submission and status files. This method is called in case the
collection of branch task outputs exists.
"""
task = self.task
# create the parent directory
self._outputs["submission"].parent.touch()
# get all branch indexes and chunk them by tasks_per_job
branch_chunks = list(iter_chunks(task.branch_map.keys(), task.tasks_per_job))
# submission output
if not self._outputs["submission"].exists():
submission_data = self.submission_data.copy()
# set dummy submission data
submission_data.jobs.clear()
for i, branches in enumerate(branch_chunks):
job_num = i + 1
|
python
|
{
"resource": ""
}
|
q3718
|
TeeStream._flush
|
train
|
def _flush(self):
"""
Flushes all registered consumer streams.
"""
for consumer in self.consumers:
|
python
|
{
"resource": ""
}
|
q3719
|
get_voms_proxy_user
|
train
|
def get_voms_proxy_user():
"""
Returns the owner of the voms proxy.
"""
out = _voms_proxy_info(["--identity"])[1].strip()
|
python
|
{
"resource": ""
}
|
q3720
|
JobArguments.get_args
|
train
|
def get_args(self):
"""
Returns the list of encoded job arguments. The order of this list corresponds to the
arguments expected by the job wrapper script.
"""
return [
self.task_cls.__module__,
self.task_cls.__name__,
|
python
|
{
"resource": ""
}
|
q3721
|
patch_all
|
train
|
def patch_all():
"""
Runs all patches. This function ensures that a second invocation has no effect.
"""
global _patched
if _patched:
|
python
|
{
"resource": ""
}
|
q3722
|
patch_worker_run_task
|
train
|
def patch_worker_run_task():
"""
Patches the ``luigi.worker.Worker._run_task`` method to store the worker id and the id of its
first task in the task. This information is required by the sandboxing mechanism
"""
_run_task = luigi.worker.Worker._run_task
def run_task(self, task_id):
task = self._scheduled_tasks[task_id]
task._worker_id = self._id
task._worker_task = self._first_task
try:
_run_task(self, task_id)
finally:
|
python
|
{
"resource": ""
}
|
q3723
|
patch_worker_factory
|
train
|
def patch_worker_factory():
"""
Patches the ``luigi.interface._WorkerSchedulerFactory`` to include sandboxing information when
create a worker instance.
"""
def create_worker(self, scheduler, worker_processes, assistant=False):
worker = luigi.worker.Worker(scheduler=scheduler, worker_processes=worker_processes,
|
python
|
{
"resource": ""
}
|
q3724
|
patch_keepalive_run
|
train
|
def patch_keepalive_run():
"""
Patches the ``luigi.worker.KeepAliveThread.run`` to immediately stop the keep-alive thread when
running within a sandbox.
"""
_run = luigi.worker.KeepAliveThread.run
|
python
|
{
"resource": ""
}
|
q3725
|
run
|
train
|
def run():
"""
Entry point to the law cli. Sets up all parsers, parses all arguments, and executes the
requested subprogram.
"""
# setup the main parser and sub parsers
parser = ArgumentParser(prog="law", description="The law command line tool.")
sub_parsers = parser.add_subparsers(help="subcommands", dest="command")
# add main arguments
parser.add_argument("--version", "-V", action="version", version=law.__version__)
# setup all progs
mods = {}
for prog in progs:
|
python
|
{
"resource": ""
}
|
q3726
|
BaseWorkflowProxy.output
|
train
|
def output(self):
"""
Returns the default workflow outputs in an ordered dictionary. At the moment this is just
the collection of outputs of the branch tasks, stored with the key ``"collection"``.
"""
if self.task.target_collection_cls is not None:
|
python
|
{
"resource": ""
}
|
q3727
|
ImzMLParser.get_physical_coordinates
|
train
|
def get_physical_coordinates(self, i):
"""
For a pixel index i, return the real-world coordinates in nanometers.
This is equivalent to multiplying the image coordinates of the given pixel with the pixel size.
:param i: the pixel index
:return: a tuple of x and y coordinates.
:rtype: Tuple[float]
:raises KeyError: if the .imzML file does not specify the attributes "pixel size x" and "pixel size y"
"""
try:
pixel_size_x = self.imzmldict["pixel size x"]
|
python
|
{
"resource": ""
}
|
q3728
|
ImzMLParser.getspectrum
|
train
|
def getspectrum(self, index):
"""
Reads the spectrum at specified index from the .ibd file.
:param index:
Index of the desired spectrum in the .imzML file
Output:
mz_array: numpy.ndarray
Sequence of m/z values representing the horizontal axis of the desired
|
python
|
{
"resource": ""
}
|
q3729
|
ImzMLWriter._read_mz
|
train
|
def _read_mz(self, mz_offset, mz_len, mz_enc_len):
'''reads a mz array from the currently open ibd file'''
self.ibd.seek(mz_offset)
|
python
|
{
"resource": ""
}
|
q3730
|
ImzMLWriter.addSpectrum
|
train
|
def addSpectrum(self, mzs, intensities, coords, userParams=[]):
"""
Add a mass spectrum to the file.
:param mz:
mz array
:param intensities:
intensity array
:param coords:
* 2-tuple of x and y position OR
* 3-tuple of x, y, and z position
note some applications want coords to be 1-indexed
"""
# must be rounded now to allow comparisons to later data
# but don't waste CPU time in continuous mode since the data will not be used anyway
if self.mode != "continuous" or self.first_mz is None:
mzs = self.mz_compression.rounding(mzs)
intensities = self.intensity_compression.rounding(intensities)
if self.mode == "continuous":
if self.first_mz is None:
self.first_mz = self._encode_and_write(mzs, self.mz_dtype, self.mz_compression)
mz_data = self.first_mz
elif self.mode == "processed":
mz_data = self._encode_and_write(mzs, self.mz_dtype, self.mz_compression)
elif self.mode == "auto":
mz_data = self._get_previous_mz(mzs)
else:
raise TypeError("Unknown mode: %s"
|
python
|
{
"resource": ""
}
|
q3731
|
initialise
|
train
|
def initialise():
"""
Detects, prompts and initialises the project.
Stores project and tool configuration in the `changes` module.
"""
global settings, project_settings
# Global changes settings
|
python
|
{
"resource": ""
}
|
q3732
|
build_distributions
|
train
|
def build_distributions(context):
"""Builds package distributions"""
rmtree('dist', ignore_errors=True)
build_package_command = 'python setup.py clean sdist bdist_wheel'
result = shell.dry_run(build_package_command, context.dry_run)
packages = Path('dist').files() if not context.dry_run else "nothing"
|
python
|
{
"resource": ""
}
|
q3733
|
install_package
|
train
|
def install_package(context):
"""Attempts to install the sdist and wheel."""
if not context.dry_run and build_distributions(context):
with util.mktmpdir() as tmp_dir:
venv.create_venv(tmp_dir=tmp_dir)
for distribution in Path('dist').files():
try:
venv.install(distribution, tmp_dir)
log.info('Successfully installed %s', distribution)
if context.test_command and verification.run_test_command(context):
log.info(
|
python
|
{
"resource": ""
}
|
q3734
|
upload_package
|
train
|
def upload_package(context):
"""Uploads your project packages to pypi with twine."""
if not context.dry_run and build_distributions(context):
upload_args = 'twine upload '
upload_args += ' '.join(Path('dist').files())
if context.pypi:
upload_args += ' -r %s' % context.pypi
upload_result = shell.dry_run(upload_args, context.dry_run)
if not context.dry_run and not upload_result:
|
python
|
{
"resource": ""
}
|
q3735
|
install_from_pypi
|
train
|
def install_from_pypi(context):
"""Attempts to install your package from pypi."""
tmp_dir = venv.create_venv()
install_cmd = '%s/bin/pip install %s' % (tmp_dir, context.module_name)
package_index = 'pypi'
if context.pypi:
install_cmd += '-i %s' % context.pypi
package_index = context.pypi
try:
result = shell.dry_run(install_cmd, context.dry_run)
if not context.dry_run and not result:
log.error(
'Failed to install %s from %s', context.module_name, package_index
)
|
python
|
{
"resource": ""
}
|
q3736
|
report_and_raise
|
train
|
def report_and_raise(probe_name, probe_result, failure_msg):
"""Logs the probe result and raises on failure"""
|
python
|
{
"resource": ""
}
|
q3737
|
probe_project
|
train
|
def probe_project(python_module):
"""
Check if the project meets `changes` requirements.
Complain and exit otherwise.
"""
log.info('Checking project for changes requirements.')
return (
has_tools()
and has_setup()
|
python
|
{
"resource": ""
}
|
q3738
|
publish
|
train
|
def publish(context):
"""Publishes the project"""
commit_version_change(context)
if context.github:
# github token
project_settings = project_config(context.module_name)
if not project_settings['gh_token']:
click.echo('You need a GitHub token for changes to create a release.')
click.pause(
'Press [enter] to launch the GitHub "New personal access '
'token" page, to create a token for changes.'
)
click.launch('https://github.com/settings/tokens/new')
project_settings['gh_token'] = click.prompt('Enter your changes token')
store_settings(context.module_name, project_settings)
description = click.prompt('Describe this release')
upload_url = create_github_release(
context, project_settings['gh_token'], description
|
python
|
{
"resource": ""
}
|
q3739
|
perform_release
|
train
|
def perform_release(context):
"""Executes the release process."""
try:
run_tests()
if not context.skip_changelog:
generate_changelog(context)
increment_version(context)
build_distributions(context)
install_package(context)
|
python
|
{
"resource": ""
}
|
q3740
|
extract_attribute
|
train
|
def extract_attribute(module_name, attribute_name):
"""Extract metatdata property from a module"""
with open('%s/__init__.py' % module_name) as input_file:
for line in input_file:
|
python
|
{
"resource": ""
}
|
q3741
|
replace_attribute
|
train
|
def replace_attribute(module_name, attribute_name, new_value, dry_run=True):
"""Update a metadata attribute"""
init_file = '%s/__init__.py' % module_name
_, tmp_file = tempfile.mkstemp()
with open(init_file) as input_file:
with open(tmp_file, 'w')
|
python
|
{
"resource": ""
}
|
q3742
|
has_attribute
|
train
|
def has_attribute(module_name, attribute_name):
"""Is this attribute present?"""
init_file = '%s/__init__.py' % module_name
return any(
|
python
|
{
"resource": ""
}
|
q3743
|
choose_labels
|
train
|
def choose_labels(alternatives):
"""
Prompt the user select several labels from the provided alternatives.
At least one label must be selected.
:param list alternatives: Sequence of options that are available to select from
:return: Several selected labels
"""
if not alternatives:
raise ValueError
if not isinstance(alternatives, list):
raise TypeError
choice_map = OrderedDict(
('{}'.format(i), value) for i, value in enumerate(alternatives, 1)
)
# prepend a termination option
input_terminator = '0'
choice_map.update({input_terminator: '<done>'})
choice_map.move_to_end('0', last=False)
choice_indexes = choice_map.keys()
choice_lines = ['{} - {}'.format(*c) for c in choice_map.items()]
prompt = '\n'.join(
(
'Select labels:',
'\n'.join(choice_lines),
'Choose from {}'.format(', '.join(choice_indexes)),
)
)
user_choices = set()
user_choice = None
while not user_choice == input_terminator:
if user_choices:
note('Selected labels: [{}]'.format(', '.join(user_choices)))
|
python
|
{
"resource": ""
}
|
q3744
|
stage
|
train
|
def stage(draft, discard, repo_directory, release_name, release_description):
"""
Stages a release
"""
with work_in(repo_directory):
if discard:
|
python
|
{
"resource": ""
}
|
q3745
|
generate_changelog
|
train
|
def generate_changelog(context):
"""Generates an automatic changelog from your commit messages."""
changelog_content = [
'\n## [%s](%s/compare/%s...%s)\n\n'
% (
context.new_version,
context.repo_url,
context.current_version,
context.new_version,
)
]
git_log_content = None
git_log = 'log --oneline --no-merges --no-color'.split(' ')
try:
git_log_tag = git_log + ['%s..master' % context.current_version]
git_log_content = git(git_log_tag)
log.debug('content: %s' % git_log_content)
except Exception:
log.warn('Error diffing previous version, initial release')
git_log_content = git(git_log)
git_log_content = replace_sha_with_commit_link(context.repo_url, git_log_content)
# turn change log entries into
|
python
|
{
"resource": ""
}
|
q3746
|
extract
|
train
|
def extract(dictionary, keys):
"""
Extract only the specified keys from a dict
:param dictionary: source dictionary
:param keys: list of keys to extract
:return dict:
|
python
|
{
"resource": ""
}
|
q3747
|
tag_and_push
|
train
|
def tag_and_push(context):
"""Tags your git repo with the new version number"""
tag_option = '--annotate'
if probe.has_signing_key(context):
tag_option = '--sign'
shell.dry_run(
|
python
|
{
"resource": ""
}
|
q3748
|
dry_run
|
train
|
def dry_run(command, dry_run):
"""Executes a shell command unless the dry run option is set"""
if not dry_run:
cmd_parts = command.split(' ')
# http://plumbum.readthedocs.org/en/latest/local_commands.html#run-and-popen
|
python
|
{
"resource": ""
}
|
q3749
|
increment
|
train
|
def increment(version, major=False, minor=False, patch=True):
"""
Increment a semantic version
:param version: str of the version to increment
:param major: bool specifying major level version increment
:param minor: bool specifying minor level version increment
:param patch: bool specifying patch level version increment
:return: str of the incremented version
"""
version = semantic_version.Version(version)
|
python
|
{
"resource": ""
}
|
q3750
|
_recursive_gh_get
|
train
|
def _recursive_gh_get(href, items):
"""Recursively get list of GitHub objects.
See https://developer.github.com/v3/guides/traversing-with-pagination/
"""
response = _request('GET', href)
|
python
|
{
"resource": ""
}
|
q3751
|
main
|
train
|
def main(github_token, github_api_url, progress):
"""A CLI to easily manage GitHub releases, assets and references."""
global progress_reporter_cls
progress_reporter_cls.reportProgress = sys.stdout.isatty() and progress
if progress_reporter_cls.reportProgress:
|
python
|
{
"resource": ""
}
|
q3752
|
_update_release_sha
|
train
|
def _update_release_sha(repo_name, tag_name, new_release_sha, dry_run):
"""Update the commit associated with a given release tag.
Since updating a tag commit is not directly possible, this function
does the following steps:
* set the release tag to ``<tag_name>-tmp`` and associate it
with ``new_release_sha``.
* delete tag ``refs/tags/<tag_name>``.
* update the release tag to ``<tag_name>`` and associate it
with ``new_release_sha``.
"""
if new_release_sha is None:
return
refs = get_refs(repo_name, tags=True, pattern="refs/tags/%s" % tag_name)
if not refs:
return
assert len(refs) == 1
# If sha associated with "<tag_name>" is up-to-date, we are done.
previous_release_sha = refs[0]["object"]["sha"]
if previous_release_sha == new_release_sha:
return
tmp_tag_name = tag_name + "-tmp"
# If any, remove leftover temporary tag "<tag_name>-tmp"
refs = get_refs(repo_name, tags=True, pattern="refs/tags/%s" % tmp_tag_name)
if refs:
assert len(refs) == 1
time.sleep(0.1)
gh_ref_delete(repo_name,
"refs/tags/%s" % tmp_tag_name, dry_run=dry_run)
# Update "<tag_name>" release by associating it with the "<tag_name>-tmp"
# and "<new_release_sha>". It will create the temporary tag.
time.sleep(0.1)
patch_release(repo_name, tag_name,
|
python
|
{
"resource": ""
}
|
q3753
|
complexidade
|
train
|
def complexidade(obj):
"""
Returns a value that indicates project health, currently FinancialIndicator
is used as this value, but it can be a result of calculation with other
indicators in future
|
python
|
{
"resource": ""
}
|
q3754
|
details
|
train
|
def details(project):
"""
Project detail endpoint,
Returns project pronac, name,
and indicators with details
"""
indicators = project.indicator_set.all()
indicators_detail = [(indicator_details(i)
for i in indicators)][0]
if not indicators:
indicators_detail = [
{'FinancialIndicator':
{'valor': 0.0,
|
python
|
{
"resource": ""
}
|
q3755
|
indicator_details
|
train
|
def indicator_details(indicator):
"""
Return a dictionary with all metrics in FinancialIndicator,
if there aren't values for that Indicator, it is filled with default values
"""
metrics = format_metrics_json(indicator)
metrics_list = set(indicator.metrics
|
python
|
{
"resource": ""
}
|
q3756
|
Metrics.get_metric
|
train
|
def get_metric(self, pronac, metric):
"""
Get metric for the project with the given pronac number.
Usage:
>>> metrics.get_metric(pronac_id, 'finance.approved_funds')
"""
assert isinstance(metric, str)
assert '.' in metric, 'metric must declare
|
python
|
{
"resource": ""
}
|
q3757
|
execute_project_models_sql_scripts
|
train
|
def execute_project_models_sql_scripts(force_update=False):
"""
Used to get project information from MinC database
and convert to this application Project models.
Uses bulk_create if database is clean
"""
# TODO: Remove except and use ignore_conflicts
# on bulk_create when django 2.2. is released
with open(MODEL_FILE, "r") as file_content:
query = file_content.read()
db = db_connector()
query_result = db.execute_pandas_sql_query(query)
db.close()
try:
projects = Project.objects.bulk_create(
(Project(**vals) for vals in query_result.to_dict("records")),
# ignore_conflicts=True available on django 2.2.
)
indicators = [FinancialIndicator(project=p) for p in projects]
FinancialIndicator.objects.bulk_create(indicators)
except IntegrityError:
# happens when there are duplicated projects
LOG("Projects bulk_create failed, creating one by one...")
with transaction.atomic():
|
python
|
{
"resource": ""
}
|
q3758
|
create_finance_metrics
|
train
|
def create_finance_metrics(metrics: list, pronacs: list):
"""
Creates metrics, creating an Indicator if it doesn't already exists
Metrics are created for projects that are in pronacs and saved in
database.
args:
metrics: list of names of metrics that will be calculated
pronacs: pronacs in dataset that is used to calculate those metrics
"""
missing = missing_metrics(metrics, pronacs)
print(f"There are {len(missing)} missing metrics!")
processors = mp.cpu_count()
print(f"Using {processors} processors to calculate metrics!")
indicators_qs = FinancialIndicator.objects.filter(
project_id__in=[p for p, _ in missing]
)
indicators = {i.project_id: i for i in indicators_qs}
pool = mp.Pool(processors)
results = [
pool.apply_async(create_metric,
|
python
|
{
"resource": ""
}
|
q3759
|
load_project_metrics
|
train
|
def load_project_metrics():
"""
Create project metrics for financial indicator
Updates them if already exists
"""
all_metrics = FinancialIndicator.METRICS
for key in all_metrics:
df = getattr(data, key)
pronac = 'PRONAC'
|
python
|
{
"resource": ""
}
|
q3760
|
new_providers
|
train
|
def new_providers(pronac, dt):
"""
Return the percentage of providers of a project
that are new to the providers database.
"""
info = data.providers_info
df = info[info['PRONAC'] == pronac]
providers_count = data.providers_count.to_dict()[0]
new_providers = []
segment_id = None
for _, row in df.iterrows():
cnpj = row['nrCNPJCPF']
cnpj_count = providers_count.get(cnpj, 0)
segment_id = row['idSegmento']
if cnpj_count <= 1:
item_id = row['idPlanilhaAprovacao']
item_name = row['Item']
provider_name = row['nmFornecedor']
new_provider = {
'nome': provider_name,
'cnpj': cnpj,
'itens': {
item_id: {
'nome': item_name,
'tem_comprovante': True
}
}
}
|
python
|
{
"resource": ""
}
|
q3761
|
average_percentage_of_new_providers
|
train
|
def average_percentage_of_new_providers(providers_info, providers_count):
"""
Return the average percentage of new providers
per segment and the average percentage of all projects.
"""
segments_percentages = {}
all_projects_percentages = []
providers_count = providers_count.to_dict()[0]
for _, items in providers_info.groupby('PRONAC'):
cnpj_array = items['nrCNPJCPF'].unique()
new_providers = 0
for cnpj in cnpj_array:
cnpj_count = providers_count.get(cnpj, 0)
if cnpj_count <= 1:
new_providers += 1
segment_id = items.iloc[0]['idSegmento']
new_providers_percent = new_providers / cnpj_array.size
segments_percentages.setdefault(segment_id, [])
|
python
|
{
"resource": ""
}
|
q3762
|
providers_count
|
train
|
def providers_count(df):
"""
Returns total occurrences of each provider
in the database.
"""
providers_count = {}
cnpj_array = df.values
for a in cnpj_array:
cnpj = a[0]
occurrences = providers_count.get(cnpj, 0)
|
python
|
{
"resource": ""
}
|
q3763
|
get_providers_info
|
train
|
def get_providers_info(pronac):
"""
Return all info about providers of a
project with the given pronac.
"""
df = data.providers_info
|
python
|
{
"resource": ""
}
|
q3764
|
get_info
|
train
|
def get_info(df, group, info=['mean', 'std']):
"""
Aggregate mean and std with the given group.
"""
|
python
|
{
"resource": ""
}
|
q3765
|
get_salic_url
|
train
|
def get_salic_url(item, prefix, df_values=None):
"""
Mount a salic url for the given item.
"""
url_keys = {
'pronac': 'idPronac',
'uf': 'uf',
'product': 'produto',
'county': 'idmunicipio',
'item_id': 'idPlanilhaItem',
'stage': 'etapa',
}
if df_values:
values = [item[v] for v in df_values]
url_values = dict(
zip(url_keys.keys(), values)
)
else:
url_values = {
"pronac": item["idPronac"],
|
python
|
{
"resource": ""
}
|
q3766
|
has_receipt
|
train
|
def has_receipt(item):
"""
Verify if a item has a receipt.
"""
pronac_id = str(item['idPronac'])
item_id = str(item["idPlanilhaItens"])
|
python
|
{
"resource": ""
}
|
q3767
|
get_segment_projects
|
train
|
def get_segment_projects(segment_id):
"""
Returns all projects from a segment.
"""
df = data.all_items
return (
|
python
|
{
"resource": ""
}
|
q3768
|
receipt
|
train
|
def receipt(df):
"""
Return a dataframe to verify if a item has a receipt.
"""
mutated_df = df[['IdPRONAC', 'idPlanilhaItem']].astype(str)
|
python
|
{
"resource": ""
}
|
q3769
|
update_models
|
train
|
def update_models(ctx, f=False):
"""
Updates local django db projects models using salic database from
MinC
"""
if f:
|
python
|
{
"resource": ""
}
|
q3770
|
FinancialIndicatorManager.create_indicator
|
train
|
def create_indicator(self, project, is_valid, metrics_list):
"""
Creates FinancialIndicator object for a project, calculating
metrics and indicator value
"""
project = Project.objects.get(pronac=project)
indicator, _ = (FinancialIndicator
.objects.update_or_create(project=project))
indicator.is_valid = is_valid
if indicator.is_valid:
p_metrics = metrics_calc.get_project(project.pronac)
for metric_name in metrics_list:
print("calculando a metrica ",
|
python
|
{
"resource": ""
}
|
q3771
|
item_prices
|
train
|
def item_prices(pronac, data):
"""
Verify if a project is an outlier compared
to the other projects in his segment, based
on the price of bought items.
"""
threshold = 0.1
outlier_info = get_outliers_percentage(pronac)
|
python
|
{
"resource": ""
}
|
q3772
|
is_outlier
|
train
|
def is_outlier(df, item_id, segment_id, price):
"""
Verify if a item is an outlier compared to the
other occurrences of the same item, based on his price.
Args:
item_id: idPlanilhaItens
segment_id: idSegmento
price: VlUnitarioAprovado
"""
if (segment_id, item_id) not in df.index:
|
python
|
{
"resource": ""
}
|
q3773
|
aggregated_relevant_items
|
train
|
def aggregated_relevant_items(raw_df):
"""
Aggragation for calculate mean and std.
"""
df = (
raw_df[['idSegmento', 'idPlanilhaItens', 'VlUnitarioAprovado']]
.groupby(by=['idSegmento', 'idPlanilhaItens'])
|
python
|
{
"resource": ""
}
|
q3774
|
relevant_items
|
train
|
def relevant_items(df):
"""
Dataframe with items used by cultural projects,
filtered by date and price.
"""
start_date = datetime(2013, 1, 1)
df['DataProjeto'] = pd.to_datetime(df['DataProjeto'])
# get only projects
|
python
|
{
"resource": ""
}
|
q3775
|
items_with_price
|
train
|
def items_with_price(raw_df):
"""
Dataframe with price as number.
"""
df = (
raw_df
[['PRONAC', 'idPlanilhaAprovacao', 'Item',
'idPlanilhaItens', 'VlUnitarioAprovado',
'idSegmento', 'DataProjeto', 'idPronac',
|
python
|
{
"resource": ""
}
|
q3776
|
get_outliers_percentage
|
train
|
def get_outliers_percentage(pronac):
"""
Returns the percentage of items
of the project that are outliers.
"""
items = (
data.items_with_price
.groupby(['PRONAC'])
.get_group(pronac)
)
df = data.aggregated_relevant_items
outlier_items = {}
url_prefix = '/prestacao-contas/analisar/comprovante'
for _, item in items.iterrows():
item_id = item['idPlanilhaItens']
price = item['VlUnitarioAprovado']
segment_id = item['idSegmento']
item_name = item['Item']
if is_outlier(df, item_id, segment_id, price):
outlier_items[item_id] = {
'name': item_name,
|
python
|
{
"resource": ""
}
|
q3777
|
common_items
|
train
|
def common_items(df):
"""
Returns the itens that are common in all the segments,
in the format | idSegmento | id planilhaItens |.
"""
percentage = 0.1
return (
df
.groupby(['idSegmento', 'idPlanilhaItens'])
.count()
.rename(columns={'PRONAC': 'itemOccurrences'})
.sort_values('itemOccurrences', ascending=False)
|
python
|
{
"resource": ""
}
|
q3778
|
common_items_percentage
|
train
|
def common_items_percentage(pronac, seg_common_items):
"""
Returns the percentage of items in a project that are
common in the cultural segment.
"""
if len(seg_common_items) == 0:
return 0
|
python
|
{
"resource": ""
}
|
q3779
|
common_items_metrics
|
train
|
def common_items_metrics(all_items, common_items):
"""
Calculates the percentage of common items for each project
in each segment and calculates the mean and std of this percentage
for each segment.
"""
segments = common_items.index.unique()
metrics = {}
for seg in segments:
seg_common_items = segment_common_items(seg)
|
python
|
{
"resource": ""
}
|
q3780
|
get_project_items
|
train
|
def get_project_items(pronac):
"""
Returns all items from a project.
"""
df = data.all_items
return (
|
python
|
{
"resource": ""
}
|
q3781
|
segment_common_items
|
train
|
def segment_common_items(segment_id):
"""
Returns all the common items in a segment.
"""
df = data.common_items
return (
|
python
|
{
"resource": ""
}
|
q3782
|
add_info_to_uncommon_items
|
train
|
def add_info_to_uncommon_items(filtered_items, uncommon_items):
"""
Add extra info to the uncommon items.
"""
result = uncommon_items
url_prefix = '/prestacao-contas/analisar/comprovante'
for _, item in filtered_items.iterrows():
item_id = item['idPlanilhaItens']
item_name = uncommon_items[item_id]
|
python
|
{
"resource": ""
}
|
q3783
|
common_items_ratio
|
train
|
def common_items_ratio(pronac, dt):
"""
Calculates the common items on projects in a cultural segment,
calculates the uncommon items on projects in a cultural segment and
verify if a project is an outlier compared to the other projects
in his segment.
"""
segment_id = get_segment_id(str(pronac))
metrics = data.common_items_metrics.to_dict(orient='index')[segment_id]
ratio = common_items_percentage(pronac, segment_common_items(segment_id))
# constant that defines the threshold to verify if a project
# is an outlier.
k = 1.5
threshold = metrics['mean'] - k * metrics['std']
uncommon_items = get_uncommon_items(pronac)
pronac_filter = data.all_items['PRONAC'] == pronac
uncommon_items_filter = (
data.all_items['idPlanilhaItens']
.isin(uncommon_items)
)
items_filter = (pronac_filter
|
python
|
{
"resource": ""
}
|
q3784
|
verified_funds
|
train
|
def verified_funds(pronac, dt):
"""
Responsable for detecting anomalies in projects total verified funds.
"""
dataframe = data.planilha_comprovacao
project = dataframe.loc[dataframe['PRONAC'] == pronac]
segment_id = project.iloc[0]["idSegmento"]
pronac_funds = project[
["idPlanilhaAprovacao", "PRONAC", "vlComprovacao", "idSegmento"]
]
funds_grp = pronac_funds.drop(columns=["idPlanilhaAprovacao"]).groupby(
["PRONAC"]
)
project_funds = funds_grp.sum().loc[pronac]["vlComprovacao"]
segments_info = data.verified_funds_by_segment_agg.to_dict(orient="index")
mean = segments_info[segment_id]["mean"]
|
python
|
{
"resource": ""
}
|
q3785
|
raised_funds_by_project
|
train
|
def raised_funds_by_project(df):
"""
Raised funds organized by project.
"""
df['CaptacaoReal'] = df['CaptacaoReal'].apply(
pd.to_numeric
)
|
python
|
{
"resource": ""
}
|
q3786
|
analyzed_projects
|
train
|
def analyzed_projects(raw_df):
"""
Return all projects that was analyzed.
"""
df = raw_df[['PRONAC', 'proponenteCgcCpf']]
analyzed_projects = df.groupby('proponenteCgcCpf')[
'PRONAC'
|
python
|
{
"resource": ""
}
|
q3787
|
submitted_projects
|
train
|
def submitted_projects(raw_df):
"""
Return all submitted projects.
"""
df = raw_df.astype({'PRONAC': str, 'CgcCpf': str})
submitted_projects = df.groupby('CgcCpf')[
'PRONAC'
|
python
|
{
"resource": ""
}
|
q3788
|
raised_funds
|
train
|
def raised_funds(pronac, data):
"""
Returns the total raised funds of a project
with the given pronac and if this project is an
outlier based on this value.
"""
is_outlier, mean, std, total_raised_funds = get_outlier_info(pronac)
maximum_expected_funds = gaussian_outlier.maximum_expected_value(mean, std)
return
|
python
|
{
"resource": ""
}
|
q3789
|
segment_raised_funds_average
|
train
|
def segment_raised_funds_average(df):
"""
Return some info about raised funds.
"""
grouped = df.groupby('Segmento')
aggregated = grouped.agg(['mean',
|
python
|
{
"resource": ""
}
|
q3790
|
get_outlier_info
|
train
|
def get_outlier_info(pronac):
"""
Return if a project with the given
pronac is an outlier based on raised funds.
"""
df = data.planilha_captacao
raised_funds_averages = data.segment_raised_funds_average.to_dict('index')
segment_id = df[df['Pronac'] == pronac]['Segmento'].iloc[0]
mean = raised_funds_averages[segment_id]['mean']
|
python
|
{
"resource": ""
}
|
q3791
|
csv_to_pickle
|
train
|
def csv_to_pickle(path=ROOT / "raw", clean=False):
"""Convert all CSV files in path to pickle."""
for file in os.listdir(path):
base, ext = os.path.splitext(file)
if ext != ".csv":
continue
LOG(f"converting {file} to pickle")
df
|
python
|
{
"resource": ""
}
|
q3792
|
Loader.store
|
train
|
def store(self, loc, df):
"""Store dataframe in the given location.
Store some arbitrary dataframe:
>>> data.store('my_data', df)
Now recover it from the global store.
>>> data.my_data
...
|
python
|
{
"resource": ""
}
|
q3793
|
StocksInfo.close_databases
|
train
|
def close_databases(self):
""" Close all database sessions """
if self.gc_book:
self.gc_book.close()
|
python
|
{
"resource": ""
}
|
q3794
|
StocksInfo.load_stock_quantity
|
train
|
def load_stock_quantity(self, symbol: str) -> Decimal(0):
""" retrieves stock quantity """
book = self.get_gc_book()
collection = SecuritiesAggregate(book)
|
python
|
{
"resource": ""
}
|
q3795
|
StocksInfo.get_gc_book
|
train
|
def get_gc_book(self):
""" Returns the GnuCash db session """
if not self.gc_book:
gc_db = self.config.get(ConfigKeys.gnucash_book_path)
if not gc_db:
raise AttributeError("GnuCash book path not configured.")
# check if this is the abs file exists
if not os.path.isabs(gc_db):
gc_db = resource_filename(
Requirement.parse("Asset-Allocation"), gc_db)
|
python
|
{
"resource": ""
}
|
q3796
|
StocksInfo.get_symbols_with_positive_balances
|
train
|
def get_symbols_with_positive_balances(self) -> List[str]:
""" Identifies all the securities with positive balances """
from gnucash_portfolio import BookAggregate
holdings = []
with BookAggregate() as book:
# query = book.securities.query.filter(Commodity.)
holding_entities = book.securities.get_all()
for item in holding_entities:
# Check holding balance
agg = book.securities.get_aggregate(item)
|
python
|
{
"resource": ""
}
|
q3797
|
StocksInfo.__get_pricedb_session
|
train
|
def __get_pricedb_session(self):
""" Provides initialization and access to module-level session """
|
python
|
{
"resource": ""
}
|
q3798
|
add
|
train
|
def add(assetclass: int, symbol: str):
""" Add a stock to an asset class """
assert isinstance(symbol, str)
assert isinstance(assetclass, int)
symbol = symbol.upper()
app = AppAggregate()
|
python
|
{
"resource": ""
}
|
q3799
|
unallocated
|
train
|
def unallocated():
""" Identify unallocated holdings """
app = AppAggregate()
app.logger = logger
unalloc = app.find_unallocated_holdings()
if not unalloc:
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.