_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
---|---|---|---|---|---|
q3700
|
PickleInterface.recv
|
train
|
def recv(self, blocking=True):
"""Receive the next object from the socket"""
length = struct.unpack("<I", self.sock.recv(4))[0]
return self._get_next_obj(length)
|
python
|
{
"resource": ""
}
|
q3701
|
PickleInterface.try_recv
|
train
|
def try_recv(self):
"""Return None immediately if nothing is waiting"""
try:
lenstr = self.sock.recv(4, socket.MSG_DONTWAIT)
except socket.error:
return None
if len(lenstr) < 4:
raise EOFError("Socket closed")
length = struct.unpack("<I", lenstr)[0]
return self._get_next_obj(length)
|
python
|
{
"resource": ""
}
|
q3702
|
PickleInterface._get_next_obj
|
train
|
def _get_next_obj(self, length):
"""Assumes we've already read the object length"""
data = b''
while len(data) < length:
data += self.sock.recv(length - len(data))
return pickle.loads(data)
|
python
|
{
"resource": ""
}
|
q3703
|
Dut.get_modules
|
train
|
def get_modules(self, type_name):
'''Getting modules by type name.
Parameters
----------
type_name : string
Type name of the modules to be returned.
Returns
-------
List of modules of given type name else empty list.
'''
modules = []
for module in self:
if module.__class__.__name__ == type_name:
modules.append(module)
return modules
|
python
|
{
"resource": ""
}
|
q3704
|
HardwareLayer.wait_for_ready
|
train
|
def wait_for_ready(self, timeout=None, times=None, delay=None, delay_between=None, abort=None):
'''Determine the ready state of the device and wait until device is ready.
Parameters
----------
timeout : int, float
The maximum amount of time to wait in seconds. Reaching the timeout will raise a RuntimeError.
times : int
Maximum number of times reading the ready state.
delay : int, float
The number of seconds to sleep before checks. Defaults to 0.
delay_between : int, float
The number of seconds to sleep between each check. Defaults to 0.
abort : Threading.Event
Breaking the loop from other threads.
Returns
-------
True if state is ready, else False.
'''
if delay:
try:
sleep(delay)
except IOError: # negative values
pass
if timeout is not None:
if timeout < 0:
raise ValueError("timeout is smaller than 0")
else:
stop = time() + timeout
times_checked = 0
while not self.is_ready:
now = time()
times_checked += 1
if abort and abort.is_set():
False
if timeout is not None and stop <= now:
raise RuntimeError('Time out while waiting for ready in %s, module %s' % (self.name, self.__class__.__module__))
if times and times > times_checked:
False
if delay_between:
try:
sleep(delay_between)
except IOError: # negative values
pass
return True
|
python
|
{
"resource": ""
}
|
q3705
|
SiUSBDevice.DownloadXilinx
|
train
|
def DownloadXilinx(self, bitfile):
"""We hijack this call to perform the socket connect"""
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sock.connect((self.simulation_host, self.simulation_port))
self._iface = PickleInterface(self._sock)
return True
|
python
|
{
"resource": ""
}
|
q3706
|
SussProber.set_position
|
train
|
def set_position(self, x, y, speed=None):
''' Move chuck to absolute position in um'''
if speed:
self._intf.write('MoveChuckSubsite %1.1f %1.1f R Y %d' % (x, y, speed))
else:
self._intf.write('MoveChuckSubsite %1.1f %1.1f R Y' % (x, y))
|
python
|
{
"resource": ""
}
|
q3707
|
SussProber.move_position
|
train
|
def move_position(self, dx, dy, speed=None):
''' Move chuck relative to actual position in um'''
if speed:
self._intf.write('MoveChuckPosition %1.1f %1.1f R Y %d' % (dx, dy, speed))
else:
self._intf.write('MoveChuckPosition %1.1f %1.1f R Y' % (dx, dy))
|
python
|
{
"resource": ""
}
|
q3708
|
SussProber.get_die
|
train
|
def get_die(self):
''' Move chuck to wafer map chip index'''
reply = self._intf.query('ReadMapPosition')
values = reply[2:].split(' ')
return (int(values[0]), int(values[1]))
|
python
|
{
"resource": ""
}
|
q3709
|
TrackRegister.clear
|
train
|
def clear(self):
'Clear tracks in memory - all zero'
for track in self._tracks:
self._tracks[track].setall(False)
|
python
|
{
"resource": ""
}
|
q3710
|
sitcp_fifo.set_data
|
train
|
def set_data(self, data):
''' Sending data to via TCP.
Parameters
----------
data : array
Array of unsigned integers (32 bit).
'''
data = array.array('B', struct.unpack("{}B".format(len(data) * 4), struct.pack("{}I".format(len(data)), *data)))
self._intf._send_tcp_data(data)
|
python
|
{
"resource": ""
}
|
q3711
|
Pixel.program_global_reg
|
train
|
def program_global_reg(self):
"""
Send the global register to the chip.
Loads the values of self['GLOBAL_REG'] onto the chip.
Includes enabling the clock, and loading the Control (CTR)
and DAC shadow registers.
"""
self._clear_strobes()
gr_size = len(self['GLOBAL_REG'][:]) # get the size
self['SEQ']['SHIFT_IN'][0:gr_size] = self['GLOBAL_REG'][:] # this will be shifted out
self['SEQ']['GLOBAL_SHIFT_EN'][0:gr_size] = bitarray(gr_size * '1') # this is to enable clock
self['SEQ']['GLOBAL_CTR_LD'][gr_size + 1:gr_size + 2] = bitarray("1") # load signals
self['SEQ']['GLOBAL_DAC_LD'][gr_size + 1:gr_size + 2] = bitarray("1")
# Execute the program (write bits to output pins)
# + 1 extra 0 bit so that everything ends on LOW instead of HIGH
self._run_seq(gr_size + 3)
|
python
|
{
"resource": ""
}
|
q3712
|
Pixel.program_pixel_reg
|
train
|
def program_pixel_reg(self, enable_receiver=True):
"""
Send the pixel register to the chip and store the output.
Loads the values of self['PIXEL_REG'] onto the chip.
Includes enabling the clock, and loading the Control (CTR)
and DAC shadow registers.
if(enable_receiver), stores the output (by byte) in
self['DATA'], retrievable via `chip['DATA'].get_data()`.
"""
self._clear_strobes()
# enable receiver it work only if pixel register is enabled/clocked
self['PIXEL_RX'].set_en(enable_receiver)
px_size = len(self['PIXEL_REG'][:]) # get the size
self['SEQ']['SHIFT_IN'][0:px_size] = self['PIXEL_REG'][:] # this will be shifted out
self['SEQ']['PIXEL_SHIFT_EN'][0:px_size] = bitarray(px_size * '1') # this is to enable clock
self._run_seq(px_size + 1)
|
python
|
{
"resource": ""
}
|
q3713
|
BaseRemoteWorkflowProxy.dump_submission_data
|
train
|
def dump_submission_data(self):
"""
Dumps the current submission data to the submission file.
"""
# renew the dashboard config
self.submission_data["dashboard_config"] = self.dashboard.get_persistent_config()
# write the submission data to the output file
self._outputs["submission"].dump(self.submission_data, formatter="json", indent=4)
|
python
|
{
"resource": ""
}
|
q3714
|
BaseRemoteWorkflowProxy.run
|
train
|
def run(self):
"""
Actual run method that starts the processing of jobs and initiates the status polling, or
performs job cancelling or cleaning, depending on the task parameters.
"""
task = self.task
self._outputs = self.output()
# create the job dashboard interface
self.dashboard = task.create_job_dashboard() or NoJobDashboard()
# read submission data and reset some values
submitted = not task.ignore_submission and self._outputs["submission"].exists()
if submitted:
self.submission_data.update(self._outputs["submission"].load(formatter="json"))
task.tasks_per_job = self.submission_data.tasks_per_job
self.dashboard.apply_config(self.submission_data.dashboard_config)
# when the branch outputs, i.e. the "collection" exists, just create dummy control outputs
if "collection" in self._outputs and self._outputs["collection"].exists():
self.touch_control_outputs()
# cancel jobs?
elif self._cancel_jobs:
if submitted:
self.cancel()
# cleanup jobs?
elif self._cleanup_jobs:
if submitted:
self.cleanup()
# submit and/or wait while polling
else:
# maybe set a tracking url
tracking_url = self.dashboard.create_tracking_url()
if tracking_url:
task.set_tracking_url(tracking_url)
# ensure the output directory exists
if not submitted:
self._outputs["submission"].parent.touch()
# at this point, when the status file exists, it is considered outdated
if "status" in self._outputs:
self._outputs["status"].remove()
try:
# instantiate the configured job file factory, not kwargs yet
self.job_file_factory = self.create_job_file_factory()
# submit
if not submitted:
# set the initial list of unsubmitted jobs
branches = sorted(task.branch_map.keys())
branch_chunks = list(iter_chunks(branches, task.tasks_per_job))
self.submission_data.unsubmitted_jobs = OrderedDict(
(i + 1, branches) for i, branches in enumerate(branch_chunks)
)
self.submit()
# sleep once to give the job interface time to register the jobs
post_submit_delay = self._get_task_attribute("post_submit_delay")()
if post_submit_delay:
time.sleep(post_submit_delay)
# start status polling when a) no_poll is not set, or b) the jobs were already
# submitted so that failed jobs are resubmitted after a single polling iteration
if not task.no_poll or submitted:
self.poll()
finally:
# in any event, cleanup the job file
if self.job_file_factory:
self.job_file_factory.cleanup_dir(force=False)
|
python
|
{
"resource": ""
}
|
q3715
|
BaseRemoteWorkflowProxy.cancel
|
train
|
def cancel(self):
"""
Cancels running jobs. The job ids are read from the submission file which has to exist
for obvious reasons.
"""
task = self.task
# get job ids from submission data
job_ids = [
d["job_id"] for d in self.submission_data.jobs.values()
if d["job_id"] not in (self.submission_data.dummy_job_id, None)
]
if not job_ids:
return
# cancel jobs
task.publish_message("going to cancel {} jobs".format(len(job_ids)))
errors = self.job_manager.cancel_batch(job_ids)
# print errors
if errors:
print("{} error(s) occured while cancelling {} job(s) of task {}:".format(
len(errors), len(job_ids), task.task_id))
tmpl = " {}"
for i, err in enumerate(errors):
print(tmpl.format(err))
if i + 1 >= self.show_errors:
remaining = len(errors) - self.show_errors
if remaining > 0:
print(" ... and {} more".format(remaining))
break
# inform the dashboard
for job_num, job_data in six.iteritems(self.submission_data.jobs):
task.forward_dashboard_event(self.dashboard, job_data, "action.cancel", job_num)
|
python
|
{
"resource": ""
}
|
q3716
|
BaseRemoteWorkflowProxy.cleanup
|
train
|
def cleanup(self):
"""
Cleans up jobs on the remote run location. The job ids are read from the submission file
which has to exist for obvious reasons.
"""
task = self.task
# get job ids from submission data
job_ids = [
d["job_id"] for d in self.submission_data.jobs.values()
if d["job_id"] not in (self.submission_data.dummy_job_id, None)
]
if not job_ids:
return
# cleanup jobs
task.publish_message("going to cleanup {} jobs".format(len(job_ids)))
errors = self.job_manager.cleanup_batch(job_ids)
# print errors
if errors:
print("{} error(s) occured while cleaning up {} job(s) of task {}:".format(
len(errors), len(job_ids), task.task_id))
tmpl = " {}"
for i, err in enumerate(errors):
print(tmpl.format(err))
if i + 1 >= self.show_errors:
remaining = len(errors) - self.show_errors
if remaining > 0:
print(" ... and {} more".format(remaining))
break
|
python
|
{
"resource": ""
}
|
q3717
|
BaseRemoteWorkflowProxy.touch_control_outputs
|
train
|
def touch_control_outputs(self):
"""
Creates and saves dummy submission and status files. This method is called in case the
collection of branch task outputs exists.
"""
task = self.task
# create the parent directory
self._outputs["submission"].parent.touch()
# get all branch indexes and chunk them by tasks_per_job
branch_chunks = list(iter_chunks(task.branch_map.keys(), task.tasks_per_job))
# submission output
if not self._outputs["submission"].exists():
submission_data = self.submission_data.copy()
# set dummy submission data
submission_data.jobs.clear()
for i, branches in enumerate(branch_chunks):
job_num = i + 1
submission_data.jobs[job_num] = self.submission_data_cls.job_data(branches=branches)
self._outputs["submission"].dump(submission_data, formatter="json", indent=4)
# status output
if "status" in self._outputs and not self._outputs["status"].exists():
status_data = self.status_data_cls()
# set dummy status data
for i, branches in enumerate(branch_chunks):
job_num = i + 1
status_data.jobs[job_num] = self.status_data_cls.job_data(
status=self.job_manager.FINISHED, code=0)
self._outputs["status"].dump(status_data, formatter="json", indent=4)
|
python
|
{
"resource": ""
}
|
q3718
|
TeeStream._flush
|
train
|
def _flush(self):
"""
Flushes all registered consumer streams.
"""
for consumer in self.consumers:
if not getattr(consumer, "closed", False):
consumer.flush()
|
python
|
{
"resource": ""
}
|
q3719
|
get_voms_proxy_user
|
train
|
def get_voms_proxy_user():
"""
Returns the owner of the voms proxy.
"""
out = _voms_proxy_info(["--identity"])[1].strip()
try:
return re.match(r".*\/CN\=([^\/]+).*", out.strip()).group(1)
except:
raise Exception("no valid identity found in voms proxy: {}".format(out))
|
python
|
{
"resource": ""
}
|
q3720
|
JobArguments.get_args
|
train
|
def get_args(self):
"""
Returns the list of encoded job arguments. The order of this list corresponds to the
arguments expected by the job wrapper script.
"""
return [
self.task_cls.__module__,
self.task_cls.__name__,
self.encode_list(self.task_params),
self.encode_list(self.branches),
self.encode_bool(self.auto_retry),
self.encode_list(self.dashboard_data),
]
|
python
|
{
"resource": ""
}
|
q3721
|
patch_all
|
train
|
def patch_all():
"""
Runs all patches. This function ensures that a second invocation has no effect.
"""
global _patched
if _patched:
return
_patched = True
patch_default_retcodes()
patch_worker_run_task()
patch_worker_factory()
patch_keepalive_run()
patch_cmdline_parser()
logger.debug("applied law-specific luigi patches")
|
python
|
{
"resource": ""
}
|
q3722
|
patch_worker_run_task
|
train
|
def patch_worker_run_task():
"""
Patches the ``luigi.worker.Worker._run_task`` method to store the worker id and the id of its
first task in the task. This information is required by the sandboxing mechanism
"""
_run_task = luigi.worker.Worker._run_task
def run_task(self, task_id):
task = self._scheduled_tasks[task_id]
task._worker_id = self._id
task._worker_task = self._first_task
try:
_run_task(self, task_id)
finally:
task._worker_id = None
task._worker_task = None
# make worker disposable when sandboxed
if os.getenv("LAW_SANDBOX_SWITCHED") == "1":
self._start_phasing_out()
luigi.worker.Worker._run_task = run_task
|
python
|
{
"resource": ""
}
|
q3723
|
patch_worker_factory
|
train
|
def patch_worker_factory():
"""
Patches the ``luigi.interface._WorkerSchedulerFactory`` to include sandboxing information when
create a worker instance.
"""
def create_worker(self, scheduler, worker_processes, assistant=False):
worker = luigi.worker.Worker(scheduler=scheduler, worker_processes=worker_processes,
assistant=assistant, worker_id=os.getenv("LAW_SANDBOX_WORKER_ID"))
worker._first_task = os.getenv("LAW_SANDBOX_WORKER_TASK")
return worker
luigi.interface._WorkerSchedulerFactory.create_worker = create_worker
|
python
|
{
"resource": ""
}
|
q3724
|
patch_keepalive_run
|
train
|
def patch_keepalive_run():
"""
Patches the ``luigi.worker.KeepAliveThread.run`` to immediately stop the keep-alive thread when
running within a sandbox.
"""
_run = luigi.worker.KeepAliveThread.run
def run(self):
# do not run the keep-alive loop when sandboxed
if os.getenv("LAW_SANDBOX_SWITCHED") == "1":
self.stop()
else:
_run(self)
luigi.worker.KeepAliveThread.run = run
|
python
|
{
"resource": ""
}
|
q3725
|
run
|
train
|
def run():
"""
Entry point to the law cli. Sets up all parsers, parses all arguments, and executes the
requested subprogram.
"""
# setup the main parser and sub parsers
parser = ArgumentParser(prog="law", description="The law command line tool.")
sub_parsers = parser.add_subparsers(help="subcommands", dest="command")
# add main arguments
parser.add_argument("--version", "-V", action="version", version=law.__version__)
# setup all progs
mods = {}
for prog in progs:
mods[prog] = import_module("law.cli." + prog)
mods[prog].setup_parser(sub_parsers)
# parse args and dispatch execution
if len(sys.argv) >= 2 and sys.argv[1] in forward_progs:
args = parser.parse_args(sys.argv[1:3])
else:
args = parser.parse_args()
if args.command:
mods[args.command].execute(args)
else:
parser.print_help()
|
python
|
{
"resource": ""
}
|
q3726
|
BaseWorkflowProxy.output
|
train
|
def output(self):
"""
Returns the default workflow outputs in an ordered dictionary. At the moment this is just
the collection of outputs of the branch tasks, stored with the key ``"collection"``.
"""
if self.task.target_collection_cls is not None:
cls = self.task.target_collection_cls
elif self.task.outputs_siblings:
cls = SiblingFileCollection
else:
cls = TargetCollection
targets = luigi.task.getpaths(self.task.get_branch_tasks())
collection = cls(targets, threshold=self.threshold(len(targets)))
return OrderedDict([("collection", collection)])
|
python
|
{
"resource": ""
}
|
q3727
|
ImzMLParser.get_physical_coordinates
|
train
|
def get_physical_coordinates(self, i):
"""
For a pixel index i, return the real-world coordinates in nanometers.
This is equivalent to multiplying the image coordinates of the given pixel with the pixel size.
:param i: the pixel index
:return: a tuple of x and y coordinates.
:rtype: Tuple[float]
:raises KeyError: if the .imzML file does not specify the attributes "pixel size x" and "pixel size y"
"""
try:
pixel_size_x = self.imzmldict["pixel size x"]
pixel_size_y = self.imzmldict["pixel size y"]
except KeyError:
raise KeyError("Could not find all pixel size attributes in imzML file")
image_x, image_y = self.coordinates[i][:2]
return image_x * pixel_size_x, image_y * pixel_size_y
|
python
|
{
"resource": ""
}
|
q3728
|
ImzMLParser.getspectrum
|
train
|
def getspectrum(self, index):
"""
Reads the spectrum at specified index from the .ibd file.
:param index:
Index of the desired spectrum in the .imzML file
Output:
mz_array: numpy.ndarray
Sequence of m/z values representing the horizontal axis of the desired mass
spectrum
intensity_array: numpy.ndarray
Sequence of intensity values corresponding to mz_array
"""
mz_bytes, intensity_bytes = self.get_spectrum_as_string(index)
mz_array = np.frombuffer(mz_bytes, dtype=self.mzPrecision)
intensity_array = np.frombuffer(intensity_bytes, dtype=self.intensityPrecision)
return mz_array, intensity_array
|
python
|
{
"resource": ""
}
|
q3729
|
ImzMLWriter._read_mz
|
train
|
def _read_mz(self, mz_offset, mz_len, mz_enc_len):
'''reads a mz array from the currently open ibd file'''
self.ibd.seek(mz_offset)
data = self.ibd.read(mz_enc_len)
self.ibd.seek(0, 2)
data = self.mz_compression.decompress(data)
return tuple(np.fromstring(data, dtype=self.mz_dtype))
|
python
|
{
"resource": ""
}
|
q3730
|
ImzMLWriter.addSpectrum
|
train
|
def addSpectrum(self, mzs, intensities, coords, userParams=[]):
"""
Add a mass spectrum to the file.
:param mz:
mz array
:param intensities:
intensity array
:param coords:
* 2-tuple of x and y position OR
* 3-tuple of x, y, and z position
note some applications want coords to be 1-indexed
"""
# must be rounded now to allow comparisons to later data
# but don't waste CPU time in continuous mode since the data will not be used anyway
if self.mode != "continuous" or self.first_mz is None:
mzs = self.mz_compression.rounding(mzs)
intensities = self.intensity_compression.rounding(intensities)
if self.mode == "continuous":
if self.first_mz is None:
self.first_mz = self._encode_and_write(mzs, self.mz_dtype, self.mz_compression)
mz_data = self.first_mz
elif self.mode == "processed":
mz_data = self._encode_and_write(mzs, self.mz_dtype, self.mz_compression)
elif self.mode == "auto":
mz_data = self._get_previous_mz(mzs)
else:
raise TypeError("Unknown mode: %s" % self.mode)
mz_offset, mz_len, mz_enc_len = mz_data
int_offset, int_len, int_enc_len = self._encode_and_write(intensities, self.intensity_dtype, self.intensity_compression)
mz_min = np.min(mzs)
mz_max = np.max(mzs)
ix_max = np.argmax(intensities)
mz_base = mzs[ix_max]
int_base = intensities[ix_max]
int_tic = np.sum(intensities)
s = _Spectrum(coords, mz_len, mz_offset, mz_enc_len, int_len, int_offset, int_enc_len, mz_min, mz_max, mz_base, int_base, int_tic, userParams)
self.spectra.append(s)
|
python
|
{
"resource": ""
}
|
q3731
|
initialise
|
train
|
def initialise():
"""
Detects, prompts and initialises the project.
Stores project and tool configuration in the `changes` module.
"""
global settings, project_settings
# Global changes settings
settings = Changes.load()
# Project specific settings
project_settings = Project.load(GitHubRepository(auth_token=settings.auth_token))
|
python
|
{
"resource": ""
}
|
q3732
|
build_distributions
|
train
|
def build_distributions(context):
"""Builds package distributions"""
rmtree('dist', ignore_errors=True)
build_package_command = 'python setup.py clean sdist bdist_wheel'
result = shell.dry_run(build_package_command, context.dry_run)
packages = Path('dist').files() if not context.dry_run else "nothing"
if not result:
raise Exception('Error building packages: %s' % result)
else:
log.info('Built %s' % ', '.join(packages))
return packages
|
python
|
{
"resource": ""
}
|
q3733
|
install_package
|
train
|
def install_package(context):
"""Attempts to install the sdist and wheel."""
if not context.dry_run and build_distributions(context):
with util.mktmpdir() as tmp_dir:
venv.create_venv(tmp_dir=tmp_dir)
for distribution in Path('dist').files():
try:
venv.install(distribution, tmp_dir)
log.info('Successfully installed %s', distribution)
if context.test_command and verification.run_test_command(context):
log.info(
'Successfully ran test command: %s', context.test_command
)
except Exception as e:
raise Exception(
'Error installing distribution %s' % distribution, e
)
else:
log.info('Dry run, skipping installation')
|
python
|
{
"resource": ""
}
|
q3734
|
upload_package
|
train
|
def upload_package(context):
"""Uploads your project packages to pypi with twine."""
if not context.dry_run and build_distributions(context):
upload_args = 'twine upload '
upload_args += ' '.join(Path('dist').files())
if context.pypi:
upload_args += ' -r %s' % context.pypi
upload_result = shell.dry_run(upload_args, context.dry_run)
if not context.dry_run and not upload_result:
raise Exception('Error uploading: %s' % upload_result)
else:
log.info(
'Successfully uploaded %s:%s', context.module_name, context.new_version
)
else:
log.info('Dry run, skipping package upload')
|
python
|
{
"resource": ""
}
|
q3735
|
install_from_pypi
|
train
|
def install_from_pypi(context):
"""Attempts to install your package from pypi."""
tmp_dir = venv.create_venv()
install_cmd = '%s/bin/pip install %s' % (tmp_dir, context.module_name)
package_index = 'pypi'
if context.pypi:
install_cmd += '-i %s' % context.pypi
package_index = context.pypi
try:
result = shell.dry_run(install_cmd, context.dry_run)
if not context.dry_run and not result:
log.error(
'Failed to install %s from %s', context.module_name, package_index
)
else:
log.info(
'Successfully installed %s from %s', context.module_name, package_index
)
except Exception as e:
error_msg = 'Error installing %s from %s' % (context.module_name, package_index)
log.exception(error_msg)
raise Exception(error_msg, e)
|
python
|
{
"resource": ""
}
|
q3736
|
report_and_raise
|
train
|
def report_and_raise(probe_name, probe_result, failure_msg):
"""Logs the probe result and raises on failure"""
log.info('%s? %s' % (probe_name, probe_result))
if not probe_result:
raise exceptions.ProbeException(failure_msg)
else:
return True
|
python
|
{
"resource": ""
}
|
q3737
|
probe_project
|
train
|
def probe_project(python_module):
"""
Check if the project meets `changes` requirements.
Complain and exit otherwise.
"""
log.info('Checking project for changes requirements.')
return (
has_tools()
and has_setup()
and has_metadata(python_module)
and has_test_runner()
and has_readme()
and has_changelog()
)
|
python
|
{
"resource": ""
}
|
q3738
|
publish
|
train
|
def publish(context):
"""Publishes the project"""
commit_version_change(context)
if context.github:
# github token
project_settings = project_config(context.module_name)
if not project_settings['gh_token']:
click.echo('You need a GitHub token for changes to create a release.')
click.pause(
'Press [enter] to launch the GitHub "New personal access '
'token" page, to create a token for changes.'
)
click.launch('https://github.com/settings/tokens/new')
project_settings['gh_token'] = click.prompt('Enter your changes token')
store_settings(context.module_name, project_settings)
description = click.prompt('Describe this release')
upload_url = create_github_release(
context, project_settings['gh_token'], description
)
upload_release_distributions(
context,
project_settings['gh_token'],
build_distributions(context),
upload_url,
)
click.pause('Press [enter] to review and update your new release')
click.launch(
'{0}/releases/tag/{1}'.format(context.repo_url, context.new_version)
)
else:
tag_and_push(context)
|
python
|
{
"resource": ""
}
|
q3739
|
perform_release
|
train
|
def perform_release(context):
"""Executes the release process."""
try:
run_tests()
if not context.skip_changelog:
generate_changelog(context)
increment_version(context)
build_distributions(context)
install_package(context)
upload_package(context)
install_from_pypi(context)
publish(context)
except Exception:
log.exception('Error releasing')
|
python
|
{
"resource": ""
}
|
q3740
|
extract_attribute
|
train
|
def extract_attribute(module_name, attribute_name):
"""Extract metatdata property from a module"""
with open('%s/__init__.py' % module_name) as input_file:
for line in input_file:
if line.startswith(attribute_name):
return ast.literal_eval(line.split('=')[1].strip())
|
python
|
{
"resource": ""
}
|
q3741
|
replace_attribute
|
train
|
def replace_attribute(module_name, attribute_name, new_value, dry_run=True):
"""Update a metadata attribute"""
init_file = '%s/__init__.py' % module_name
_, tmp_file = tempfile.mkstemp()
with open(init_file) as input_file:
with open(tmp_file, 'w') as output_file:
for line in input_file:
if line.startswith(attribute_name):
line = "%s = '%s'\n" % (attribute_name, new_value)
output_file.write(line)
if not dry_run:
Path(tmp_file).copy(init_file)
else:
log.info(diff(tmp_file, init_file, retcode=None))
|
python
|
{
"resource": ""
}
|
q3742
|
has_attribute
|
train
|
def has_attribute(module_name, attribute_name):
"""Is this attribute present?"""
init_file = '%s/__init__.py' % module_name
return any(
[attribute_name in init_line for init_line in open(init_file).readlines()]
)
|
python
|
{
"resource": ""
}
|
q3743
|
choose_labels
|
train
|
def choose_labels(alternatives):
"""
Prompt the user select several labels from the provided alternatives.
At least one label must be selected.
:param list alternatives: Sequence of options that are available to select from
:return: Several selected labels
"""
if not alternatives:
raise ValueError
if not isinstance(alternatives, list):
raise TypeError
choice_map = OrderedDict(
('{}'.format(i), value) for i, value in enumerate(alternatives, 1)
)
# prepend a termination option
input_terminator = '0'
choice_map.update({input_terminator: '<done>'})
choice_map.move_to_end('0', last=False)
choice_indexes = choice_map.keys()
choice_lines = ['{} - {}'.format(*c) for c in choice_map.items()]
prompt = '\n'.join(
(
'Select labels:',
'\n'.join(choice_lines),
'Choose from {}'.format(', '.join(choice_indexes)),
)
)
user_choices = set()
user_choice = None
while not user_choice == input_terminator:
if user_choices:
note('Selected labels: [{}]'.format(', '.join(user_choices)))
user_choice = click.prompt(
prompt, type=click.Choice(choice_indexes), default=input_terminator
)
done = user_choice == input_terminator
new_selection = user_choice not in user_choices
nothing_selected = not user_choices
if not done and new_selection:
user_choices.add(choice_map[user_choice])
if done and nothing_selected:
error('Please select at least one label')
user_choice = None
return user_choices
|
python
|
{
"resource": ""
}
|
q3744
|
stage
|
train
|
def stage(draft, discard, repo_directory, release_name, release_description):
"""
Stages a release
"""
with work_in(repo_directory):
if discard:
stage_command.discard(release_name, release_description)
else:
stage_command.stage(draft, release_name, release_description)
|
python
|
{
"resource": ""
}
|
q3745
|
generate_changelog
|
train
|
def generate_changelog(context):
"""Generates an automatic changelog from your commit messages."""
changelog_content = [
'\n## [%s](%s/compare/%s...%s)\n\n'
% (
context.new_version,
context.repo_url,
context.current_version,
context.new_version,
)
]
git_log_content = None
git_log = 'log --oneline --no-merges --no-color'.split(' ')
try:
git_log_tag = git_log + ['%s..master' % context.current_version]
git_log_content = git(git_log_tag)
log.debug('content: %s' % git_log_content)
except Exception:
log.warn('Error diffing previous version, initial release')
git_log_content = git(git_log)
git_log_content = replace_sha_with_commit_link(context.repo_url, git_log_content)
# turn change log entries into markdown bullet points
if git_log_content:
[
changelog_content.append('* %s\n' % line) if line else line
for line in git_log_content[:-1]
]
write_new_changelog(
context.repo_url, 'CHANGELOG.md', changelog_content, dry_run=context.dry_run
)
log.info('Added content to CHANGELOG.md')
context.changelog_content = changelog_content
|
python
|
{
"resource": ""
}
|
q3746
|
extract
|
train
|
def extract(dictionary, keys):
"""
Extract only the specified keys from a dict
:param dictionary: source dictionary
:param keys: list of keys to extract
:return dict: extracted dictionary
"""
return dict((k, dictionary[k]) for k in keys if k in dictionary)
|
python
|
{
"resource": ""
}
|
q3747
|
tag_and_push
|
train
|
def tag_and_push(context):
"""Tags your git repo with the new version number"""
tag_option = '--annotate'
if probe.has_signing_key(context):
tag_option = '--sign'
shell.dry_run(
TAG_TEMPLATE % (tag_option, context.new_version, context.new_version),
context.dry_run,
)
shell.dry_run('git push --tags', context.dry_run)
|
python
|
{
"resource": ""
}
|
q3748
|
dry_run
|
train
|
def dry_run(command, dry_run):
"""Executes a shell command unless the dry run option is set"""
if not dry_run:
cmd_parts = command.split(' ')
# http://plumbum.readthedocs.org/en/latest/local_commands.html#run-and-popen
return local[cmd_parts[0]](cmd_parts[1:])
else:
log.info('Dry run of %s, skipping' % command)
return True
|
python
|
{
"resource": ""
}
|
q3749
|
increment
|
train
|
def increment(version, major=False, minor=False, patch=True):
"""
Increment a semantic version
:param version: str of the version to increment
:param major: bool specifying major level version increment
:param minor: bool specifying minor level version increment
:param patch: bool specifying patch level version increment
:return: str of the incremented version
"""
version = semantic_version.Version(version)
if major:
version.major += 1
version.minor = 0
version.patch = 0
elif minor:
version.minor += 1
version.patch = 0
elif patch:
version.patch += 1
return str(version)
|
python
|
{
"resource": ""
}
|
q3750
|
_recursive_gh_get
|
train
|
def _recursive_gh_get(href, items):
"""Recursively get list of GitHub objects.
See https://developer.github.com/v3/guides/traversing-with-pagination/
"""
response = _request('GET', href)
response.raise_for_status()
items.extend(response.json())
if "link" not in response.headers:
return
links = link_header.parse(response.headers["link"])
rels = {link.rel: link.href for link in links.links}
if "next" in rels:
_recursive_gh_get(rels["next"], items)
|
python
|
{
"resource": ""
}
|
q3751
|
main
|
train
|
def main(github_token, github_api_url, progress):
"""A CLI to easily manage GitHub releases, assets and references."""
global progress_reporter_cls
progress_reporter_cls.reportProgress = sys.stdout.isatty() and progress
if progress_reporter_cls.reportProgress:
progress_reporter_cls = _progress_bar
global _github_token_cli_arg
_github_token_cli_arg = github_token
global _github_api_url
_github_api_url = github_api_url
|
python
|
{
"resource": ""
}
|
q3752
|
_update_release_sha
|
train
|
def _update_release_sha(repo_name, tag_name, new_release_sha, dry_run):
"""Update the commit associated with a given release tag.
Since updating a tag commit is not directly possible, this function
does the following steps:
* set the release tag to ``<tag_name>-tmp`` and associate it
with ``new_release_sha``.
* delete tag ``refs/tags/<tag_name>``.
* update the release tag to ``<tag_name>`` and associate it
with ``new_release_sha``.
"""
if new_release_sha is None:
return
refs = get_refs(repo_name, tags=True, pattern="refs/tags/%s" % tag_name)
if not refs:
return
assert len(refs) == 1
# If sha associated with "<tag_name>" is up-to-date, we are done.
previous_release_sha = refs[0]["object"]["sha"]
if previous_release_sha == new_release_sha:
return
tmp_tag_name = tag_name + "-tmp"
# If any, remove leftover temporary tag "<tag_name>-tmp"
refs = get_refs(repo_name, tags=True, pattern="refs/tags/%s" % tmp_tag_name)
if refs:
assert len(refs) == 1
time.sleep(0.1)
gh_ref_delete(repo_name,
"refs/tags/%s" % tmp_tag_name, dry_run=dry_run)
# Update "<tag_name>" release by associating it with the "<tag_name>-tmp"
# and "<new_release_sha>". It will create the temporary tag.
time.sleep(0.1)
patch_release(repo_name, tag_name,
tag_name=tmp_tag_name,
target_commitish=new_release_sha,
dry_run=dry_run)
# Now "<tag_name>-tmp" references "<new_release_sha>", remove "<tag_name>"
time.sleep(0.1)
gh_ref_delete(repo_name, "refs/tags/%s" % tag_name, dry_run=dry_run)
# Finally, update "<tag_name>-tmp" release by associating it with the
# "<tag_name>" and "<new_release_sha>".
time.sleep(0.1)
patch_release(repo_name, tmp_tag_name,
tag_name=tag_name,
target_commitish=new_release_sha,
dry_run=dry_run)
# ... and remove "<tag_name>-tmp"
time.sleep(0.1)
gh_ref_delete(repo_name,
"refs/tags/%s" % tmp_tag_name, dry_run=dry_run)
|
python
|
{
"resource": ""
}
|
q3753
|
complexidade
|
train
|
def complexidade(obj):
"""
Returns a value that indicates project health, currently FinancialIndicator
is used as this value, but it can be a result of calculation with other
indicators in future
"""
indicators = obj.indicator_set.all()
if not indicators:
value = 0.0
else:
value = indicators.first().value
return value
|
python
|
{
"resource": ""
}
|
q3754
|
details
|
train
|
def details(project):
"""
Project detail endpoint,
Returns project pronac, name,
and indicators with details
"""
indicators = project.indicator_set.all()
indicators_detail = [(indicator_details(i)
for i in indicators)][0]
if not indicators:
indicators_detail = [
{'FinancialIndicator':
{'valor': 0.0,
'metrics': default_metrics, }, }]
indicators_detail = convert_list_into_dict(indicators_detail)
return {'pronac': project.pronac,
'nome': project.nome,
'indicadores': indicators_detail,
}
|
python
|
{
"resource": ""
}
|
q3755
|
indicator_details
|
train
|
def indicator_details(indicator):
"""
Return a dictionary with all metrics in FinancialIndicator,
if there aren't values for that Indicator, it is filled with default values
"""
metrics = format_metrics_json(indicator)
metrics_list = set(indicator.metrics
.filter(name__in=metrics_name_map.keys())
.values_list('name', flat=True))
null_metrics = default_metrics
for keys in metrics_list:
null_metrics.pop(metrics_name_map[keys], None)
metrics.update(null_metrics)
return {type(indicator).__name__: {
'valor': indicator.value,
'metricas': metrics, },
}
|
python
|
{
"resource": ""
}
|
q3756
|
Metrics.get_metric
|
train
|
def get_metric(self, pronac, metric):
"""
Get metric for the project with the given pronac number.
Usage:
>>> metrics.get_metric(pronac_id, 'finance.approved_funds')
"""
assert isinstance(metric, str)
assert '.' in metric, 'metric must declare a namespace'
try:
func = self._metrics[metric]
return func(pronac, self._data)
except KeyError:
raise InvalidMetricError('metric does not exist')
|
python
|
{
"resource": ""
}
|
q3757
|
execute_project_models_sql_scripts
|
train
|
def execute_project_models_sql_scripts(force_update=False):
"""
Used to get project information from MinC database
and convert to this application Project models.
Uses bulk_create if database is clean
"""
# TODO: Remove except and use ignore_conflicts
# on bulk_create when django 2.2. is released
with open(MODEL_FILE, "r") as file_content:
query = file_content.read()
db = db_connector()
query_result = db.execute_pandas_sql_query(query)
db.close()
try:
projects = Project.objects.bulk_create(
(Project(**vals) for vals in query_result.to_dict("records")),
# ignore_conflicts=True available on django 2.2.
)
indicators = [FinancialIndicator(project=p) for p in projects]
FinancialIndicator.objects.bulk_create(indicators)
except IntegrityError:
# happens when there are duplicated projects
LOG("Projects bulk_create failed, creating one by one...")
with transaction.atomic():
if force_update:
for item in query_result.to_dict("records"):
p, _ = Project.objects.update_or_create(**item)
FinancialIndicator.objects.update_or_create(project=p)
else:
for item in query_result.to_dict("records"):
p, _ = Project.objects.get_or_create(**item)
FinancialIndicator.objects.update_or_create(project=p)
|
python
|
{
"resource": ""
}
|
q3758
|
create_finance_metrics
|
train
|
def create_finance_metrics(metrics: list, pronacs: list):
"""
Creates metrics, creating an Indicator if it doesn't already exists
Metrics are created for projects that are in pronacs and saved in
database.
args:
metrics: list of names of metrics that will be calculated
pronacs: pronacs in dataset that is used to calculate those metrics
"""
missing = missing_metrics(metrics, pronacs)
print(f"There are {len(missing)} missing metrics!")
processors = mp.cpu_count()
print(f"Using {processors} processors to calculate metrics!")
indicators_qs = FinancialIndicator.objects.filter(
project_id__in=[p for p, _ in missing]
)
indicators = {i.project_id: i for i in indicators_qs}
pool = mp.Pool(processors)
results = [
pool.apply_async(create_metric, args=(indicators, metric_name, pronac))
for pronac, metric_name in missing
]
calculated_metrics = [p.get() for p in results]
if calculated_metrics:
Metric.objects.bulk_create(calculated_metrics)
print("Bulk completed")
for indicator in indicators.values():
indicator.fetch_weighted_complexity()
print("Finished update indicators!")
pool.close()
print("Finished metrics calculation!")
|
python
|
{
"resource": ""
}
|
q3759
|
load_project_metrics
|
train
|
def load_project_metrics():
"""
Create project metrics for financial indicator
Updates them if already exists
"""
all_metrics = FinancialIndicator.METRICS
for key in all_metrics:
df = getattr(data, key)
pronac = 'PRONAC'
if key == 'planilha_captacao':
pronac = 'Pronac'
pronacs = df[pronac].unique().tolist()
create_finance_metrics(all_metrics[key], pronacs)
|
python
|
{
"resource": ""
}
|
q3760
|
new_providers
|
train
|
def new_providers(pronac, dt):
"""
Return the percentage of providers of a project
that are new to the providers database.
"""
info = data.providers_info
df = info[info['PRONAC'] == pronac]
providers_count = data.providers_count.to_dict()[0]
new_providers = []
segment_id = None
for _, row in df.iterrows():
cnpj = row['nrCNPJCPF']
cnpj_count = providers_count.get(cnpj, 0)
segment_id = row['idSegmento']
if cnpj_count <= 1:
item_id = row['idPlanilhaAprovacao']
item_name = row['Item']
provider_name = row['nmFornecedor']
new_provider = {
'nome': provider_name,
'cnpj': cnpj,
'itens': {
item_id: {
'nome': item_name,
'tem_comprovante': True
}
}
}
new_providers.append(new_provider)
providers_amount = len(df['nrCNPJCPF'].unique())
new_providers_amount = len(new_providers)
new_providers_percentage = new_providers_amount / providers_amount
averages = data.average_percentage_of_new_providers.to_dict()
segments_average = averages['segments_average_percentage']
all_projects_average = list(averages['all_projects_average'].values())[0]
if new_providers:
new_providers.sort(key=lambda provider: provider['nome'])
return {
'lista_de_novos_fornecedores': new_providers,
'valor': new_providers_amount,
'new_providers_percentage': new_providers_percentage,
'is_outlier': new_providers_percentage > segments_average[segment_id],
'segment_average_percentage': segments_average[segment_id],
'all_projects_average_percentage': all_projects_average,
}
|
python
|
{
"resource": ""
}
|
q3761
|
average_percentage_of_new_providers
|
train
|
def average_percentage_of_new_providers(providers_info, providers_count):
"""
Return the average percentage of new providers
per segment and the average percentage of all projects.
"""
segments_percentages = {}
all_projects_percentages = []
providers_count = providers_count.to_dict()[0]
for _, items in providers_info.groupby('PRONAC'):
cnpj_array = items['nrCNPJCPF'].unique()
new_providers = 0
for cnpj in cnpj_array:
cnpj_count = providers_count.get(cnpj, 0)
if cnpj_count <= 1:
new_providers += 1
segment_id = items.iloc[0]['idSegmento']
new_providers_percent = new_providers / cnpj_array.size
segments_percentages.setdefault(segment_id, [])
segments_percentages[segment_id].append(new_providers_percent)
all_projects_percentages.append(new_providers_percent)
segments_average_percentage = {}
for segment_id, percentages in segments_percentages.items():
mean = np.mean(percentages)
segments_average_percentage[segment_id] = mean
return pd.DataFrame.from_dict({
'segments_average_percentage': segments_average_percentage,
'all_projects_average': np.mean(all_projects_percentages)
})
|
python
|
{
"resource": ""
}
|
q3762
|
providers_count
|
train
|
def providers_count(df):
"""
Returns total occurrences of each provider
in the database.
"""
providers_count = {}
cnpj_array = df.values
for a in cnpj_array:
cnpj = a[0]
occurrences = providers_count.get(cnpj, 0)
providers_count[cnpj] = occurrences + 1
return pd.DataFrame.from_dict(providers_count, orient='index')
|
python
|
{
"resource": ""
}
|
q3763
|
get_providers_info
|
train
|
def get_providers_info(pronac):
"""
Return all info about providers of a
project with the given pronac.
"""
df = data.providers_info
grouped = df.groupby('PRONAC')
return grouped.get_group(pronac)
|
python
|
{
"resource": ""
}
|
q3764
|
get_info
|
train
|
def get_info(df, group, info=['mean', 'std']):
"""
Aggregate mean and std with the given group.
"""
agg = df.groupby(group).agg(info)
agg.columns = agg.columns.droplevel(0)
return agg
|
python
|
{
"resource": ""
}
|
q3765
|
get_salic_url
|
train
|
def get_salic_url(item, prefix, df_values=None):
"""
Mount a salic url for the given item.
"""
url_keys = {
'pronac': 'idPronac',
'uf': 'uf',
'product': 'produto',
'county': 'idmunicipio',
'item_id': 'idPlanilhaItem',
'stage': 'etapa',
}
if df_values:
values = [item[v] for v in df_values]
url_values = dict(
zip(url_keys.keys(), values)
)
else:
url_values = {
"pronac": item["idPronac"],
"uf": item["UfItem"],
"product": item["idProduto"],
"county": item["cdCidade"],
"item_id": item["idPlanilhaItens"],
"stage": item["cdEtapa"],
}
item_data = [(value, url_values[key]) for key, value in url_keys.items()]
url = prefix
for k, v in item_data:
url += f'/{str(k)}/{str(v)}'
return url
|
python
|
{
"resource": ""
}
|
q3766
|
has_receipt
|
train
|
def has_receipt(item):
"""
Verify if a item has a receipt.
"""
pronac_id = str(item['idPronac'])
item_id = str(item["idPlanilhaItens"])
combined_id = f'{pronac_id}/{item_id}'
return combined_id in data.receipt.index
|
python
|
{
"resource": ""
}
|
q3767
|
get_segment_projects
|
train
|
def get_segment_projects(segment_id):
"""
Returns all projects from a segment.
"""
df = data.all_items
return (
df[df['idSegmento'] == str(segment_id)]
.drop_duplicates(["PRONAC"])
.values
)
|
python
|
{
"resource": ""
}
|
q3768
|
receipt
|
train
|
def receipt(df):
"""
Return a dataframe to verify if a item has a receipt.
"""
mutated_df = df[['IdPRONAC', 'idPlanilhaItem']].astype(str)
mutated_df['pronac_planilha_itens'] = (
f"{mutated_df['IdPRONAC']}/{mutated_df['idPlanilhaItem']}"
)
return (
mutated_df
.set_index(['pronac_planilha_itens'])
)
|
python
|
{
"resource": ""
}
|
q3769
|
update_models
|
train
|
def update_models(ctx, f=False):
"""
Updates local django db projects models using salic database from
MinC
"""
if f:
manage(ctx, 'create_models_from_sql --force True', env={})
else:
manage(ctx, 'create_models_from_sql', env={})
|
python
|
{
"resource": ""
}
|
q3770
|
FinancialIndicatorManager.create_indicator
|
train
|
def create_indicator(self, project, is_valid, metrics_list):
"""
Creates FinancialIndicator object for a project, calculating
metrics and indicator value
"""
project = Project.objects.get(pronac=project)
indicator, _ = (FinancialIndicator
.objects.update_or_create(project=project))
indicator.is_valid = is_valid
if indicator.is_valid:
p_metrics = metrics_calc.get_project(project.pronac)
for metric_name in metrics_list:
print("calculando a metrica ", metric_name)
x = getattr(p_metrics.finance, metric_name)
print("do projeto: ", project)
Metric.objects.create_metric(metric_name, x, indicator)
indicator.fetch_weighted_complexity()
return indicator
|
python
|
{
"resource": ""
}
|
q3771
|
item_prices
|
train
|
def item_prices(pronac, data):
"""
Verify if a project is an outlier compared
to the other projects in his segment, based
on the price of bought items.
"""
threshold = 0.1
outlier_info = get_outliers_percentage(pronac)
outlier_info['is_outlier'] = outlier_info['percentage'] > threshold
outlier_info['maximum_expected'] = threshold * outlier_info['total_items']
return outlier_info
|
python
|
{
"resource": ""
}
|
q3772
|
is_outlier
|
train
|
def is_outlier(df, item_id, segment_id, price):
"""
Verify if a item is an outlier compared to the
other occurrences of the same item, based on his price.
Args:
item_id: idPlanilhaItens
segment_id: idSegmento
price: VlUnitarioAprovado
"""
if (segment_id, item_id) not in df.index:
return False
mean = df.loc[(segment_id, item_id)]['mean']
std = df.loc[(segment_id, item_id)]['std']
return gaussian_outlier.is_outlier(
x=price, mean=mean, standard_deviation=std
)
|
python
|
{
"resource": ""
}
|
q3773
|
aggregated_relevant_items
|
train
|
def aggregated_relevant_items(raw_df):
"""
Aggragation for calculate mean and std.
"""
df = (
raw_df[['idSegmento', 'idPlanilhaItens', 'VlUnitarioAprovado']]
.groupby(by=['idSegmento', 'idPlanilhaItens'])
.agg([np.mean, lambda x: np.std(x, ddof=0)])
)
df.columns = df.columns.droplevel(0)
return (
df
.rename(columns={'<lambda>': 'std'})
)
|
python
|
{
"resource": ""
}
|
q3774
|
relevant_items
|
train
|
def relevant_items(df):
"""
Dataframe with items used by cultural projects,
filtered by date and price.
"""
start_date = datetime(2013, 1, 1)
df['DataProjeto'] = pd.to_datetime(df['DataProjeto'])
# get only projects newer than start_date
# and items with price > 0
df = df[df.DataProjeto >= start_date]
df = df[df.VlUnitarioAprovado > 0.0]
return df
|
python
|
{
"resource": ""
}
|
q3775
|
items_with_price
|
train
|
def items_with_price(raw_df):
"""
Dataframe with price as number.
"""
df = (
raw_df
[['PRONAC', 'idPlanilhaAprovacao', 'Item',
'idPlanilhaItens', 'VlUnitarioAprovado',
'idSegmento', 'DataProjeto', 'idPronac',
'UfItem', 'idProduto', 'cdCidade', 'cdEtapa']]
).copy()
df['VlUnitarioAprovado'] = df['VlUnitarioAprovado'].apply(pd.to_numeric)
return df
|
python
|
{
"resource": ""
}
|
q3776
|
get_outliers_percentage
|
train
|
def get_outliers_percentage(pronac):
"""
Returns the percentage of items
of the project that are outliers.
"""
items = (
data.items_with_price
.groupby(['PRONAC'])
.get_group(pronac)
)
df = data.aggregated_relevant_items
outlier_items = {}
url_prefix = '/prestacao-contas/analisar/comprovante'
for _, item in items.iterrows():
item_id = item['idPlanilhaItens']
price = item['VlUnitarioAprovado']
segment_id = item['idSegmento']
item_name = item['Item']
if is_outlier(df, item_id, segment_id, price):
outlier_items[item_id] = {
'name': item_name,
'salic_url': get_salic_url(item, url_prefix),
'has_receipt': has_receipt(item)
}
total_items = items.shape[0]
outliers_amount = len(outlier_items)
percentage = outliers_amount / total_items
return {
'items': outlier_items,
'valor': outliers_amount,
'total_items': total_items,
'percentage': percentage,
'is_outlier': outliers_amount > 0,
}
|
python
|
{
"resource": ""
}
|
q3777
|
common_items
|
train
|
def common_items(df):
"""
Returns the itens that are common in all the segments,
in the format | idSegmento | id planilhaItens |.
"""
percentage = 0.1
return (
df
.groupby(['idSegmento', 'idPlanilhaItens'])
.count()
.rename(columns={'PRONAC': 'itemOccurrences'})
.sort_values('itemOccurrences', ascending=False)
.reset_index(['idSegmento', 'idPlanilhaItens'])
.groupby('idSegmento')
.apply(lambda x: x[None: max(2, int(len(x) * percentage))])
.reset_index(['idSegmento'], drop=True)
.set_index(['idSegmento'])
)
|
python
|
{
"resource": ""
}
|
q3778
|
common_items_percentage
|
train
|
def common_items_percentage(pronac, seg_common_items):
"""
Returns the percentage of items in a project that are
common in the cultural segment.
"""
if len(seg_common_items) == 0:
return 0
project_items = get_project_items(pronac).values[:, 0]
project_items_amount = len(project_items)
if project_items_amount == 0:
return 1
common_found_items = sum(
seg_common_items.isin(project_items)['idPlanilhaItens']
)
return common_found_items / project_items_amount
|
python
|
{
"resource": ""
}
|
q3779
|
common_items_metrics
|
train
|
def common_items_metrics(all_items, common_items):
"""
Calculates the percentage of common items for each project
in each segment and calculates the mean and std of this percentage
for each segment.
"""
segments = common_items.index.unique()
metrics = {}
for seg in segments:
seg_common_items = segment_common_items(seg)
projects = get_segment_projects(seg)
metric_values = []
for proj in projects:
pronac = proj[0]
percentage = common_items_percentage(pronac, seg_common_items)
metric_values.append(percentage)
metrics[seg] = {
'mean': np.mean(metric_values),
'std': np.std(metric_values)
}
return pd.DataFrame.from_dict(metrics, orient='index')
|
python
|
{
"resource": ""
}
|
q3780
|
get_project_items
|
train
|
def get_project_items(pronac):
"""
Returns all items from a project.
"""
df = data.all_items
return (
df[df['PRONAC'] == pronac]
.drop(columns=['PRONAC', 'idSegmento'])
)
|
python
|
{
"resource": ""
}
|
q3781
|
segment_common_items
|
train
|
def segment_common_items(segment_id):
"""
Returns all the common items in a segment.
"""
df = data.common_items
return (
df
.loc[str(segment_id)]
.reset_index(drop=1)
.drop(columns=["itemOccurrences"])
)
|
python
|
{
"resource": ""
}
|
q3782
|
add_info_to_uncommon_items
|
train
|
def add_info_to_uncommon_items(filtered_items, uncommon_items):
"""
Add extra info to the uncommon items.
"""
result = uncommon_items
url_prefix = '/prestacao-contas/analisar/comprovante'
for _, item in filtered_items.iterrows():
item_id = item['idPlanilhaItens']
item_name = uncommon_items[item_id]
result[item_id] = {
'name': item_name,
'salic_url': get_salic_url(item, url_prefix),
'has_recepit': has_receipt(item)
}
return result
|
python
|
{
"resource": ""
}
|
q3783
|
common_items_ratio
|
train
|
def common_items_ratio(pronac, dt):
"""
Calculates the common items on projects in a cultural segment,
calculates the uncommon items on projects in a cultural segment and
verify if a project is an outlier compared to the other projects
in his segment.
"""
segment_id = get_segment_id(str(pronac))
metrics = data.common_items_metrics.to_dict(orient='index')[segment_id]
ratio = common_items_percentage(pronac, segment_common_items(segment_id))
# constant that defines the threshold to verify if a project
# is an outlier.
k = 1.5
threshold = metrics['mean'] - k * metrics['std']
uncommon_items = get_uncommon_items(pronac)
pronac_filter = data.all_items['PRONAC'] == pronac
uncommon_items_filter = (
data.all_items['idPlanilhaItens']
.isin(uncommon_items)
)
items_filter = (pronac_filter & uncommon_items_filter)
filtered_items = (
data
.all_items[items_filter]
.drop_duplicates(subset='idPlanilhaItens')
)
uncommon_items = add_info_to_uncommon_items(filtered_items, uncommon_items)
return {
'is_outlier': ratio < threshold,
'valor': ratio,
'maximo_esperado': metrics['mean'],
'desvio_padrao': metrics['std'],
'items_incomuns': uncommon_items,
'items_comuns_que_o_projeto_nao_possui': get_common_items_not_present(pronac),
}
|
python
|
{
"resource": ""
}
|
q3784
|
verified_funds
|
train
|
def verified_funds(pronac, dt):
"""
Responsable for detecting anomalies in projects total verified funds.
"""
dataframe = data.planilha_comprovacao
project = dataframe.loc[dataframe['PRONAC'] == pronac]
segment_id = project.iloc[0]["idSegmento"]
pronac_funds = project[
["idPlanilhaAprovacao", "PRONAC", "vlComprovacao", "idSegmento"]
]
funds_grp = pronac_funds.drop(columns=["idPlanilhaAprovacao"]).groupby(
["PRONAC"]
)
project_funds = funds_grp.sum().loc[pronac]["vlComprovacao"]
segments_info = data.verified_funds_by_segment_agg.to_dict(orient="index")
mean = segments_info[segment_id]["mean"]
std = segments_info[segment_id]["std"]
is_outlier = gaussian_outlier.is_outlier(project_funds, mean, std)
maximum_expected_funds = gaussian_outlier.maximum_expected_value(mean, std)
return {
"is_outlier": is_outlier,
"valor": project_funds,
"maximo_esperado": maximum_expected_funds,
"minimo_esperado": 0,
}
|
python
|
{
"resource": ""
}
|
q3785
|
raised_funds_by_project
|
train
|
def raised_funds_by_project(df):
"""
Raised funds organized by project.
"""
df['CaptacaoReal'] = df['CaptacaoReal'].apply(
pd.to_numeric
)
return (
df[['Pronac', 'CaptacaoReal']]
.groupby(['Pronac'])
.sum()
)
|
python
|
{
"resource": ""
}
|
q3786
|
analyzed_projects
|
train
|
def analyzed_projects(raw_df):
"""
Return all projects that was analyzed.
"""
df = raw_df[['PRONAC', 'proponenteCgcCpf']]
analyzed_projects = df.groupby('proponenteCgcCpf')[
'PRONAC'
].agg(['unique', 'nunique'])
analyzed_projects.columns = ['pronac_list', 'num_pronacs']
return analyzed_projects
|
python
|
{
"resource": ""
}
|
q3787
|
submitted_projects
|
train
|
def submitted_projects(raw_df):
"""
Return all submitted projects.
"""
df = raw_df.astype({'PRONAC': str, 'CgcCpf': str})
submitted_projects = df.groupby('CgcCpf')[
'PRONAC'
].agg(['unique', 'nunique'])
submitted_projects.columns = ['pronac_list', 'num_pronacs']
return submitted_projects
|
python
|
{
"resource": ""
}
|
q3788
|
raised_funds
|
train
|
def raised_funds(pronac, data):
"""
Returns the total raised funds of a project
with the given pronac and if this project is an
outlier based on this value.
"""
is_outlier, mean, std, total_raised_funds = get_outlier_info(pronac)
maximum_expected_funds = gaussian_outlier.maximum_expected_value(mean, std)
return {
'is_outlier': is_outlier,
'total_raised_funds': total_raised_funds,
'maximum_expected_funds': maximum_expected_funds
}
|
python
|
{
"resource": ""
}
|
q3789
|
segment_raised_funds_average
|
train
|
def segment_raised_funds_average(df):
"""
Return some info about raised funds.
"""
grouped = df.groupby('Segmento')
aggregated = grouped.agg(['mean', 'std'])
aggregated.columns = aggregated.columns.droplevel(0)
return aggregated
|
python
|
{
"resource": ""
}
|
q3790
|
get_outlier_info
|
train
|
def get_outlier_info(pronac):
"""
Return if a project with the given
pronac is an outlier based on raised funds.
"""
df = data.planilha_captacao
raised_funds_averages = data.segment_raised_funds_average.to_dict('index')
segment_id = df[df['Pronac'] == pronac]['Segmento'].iloc[0]
mean = raised_funds_averages[segment_id]['mean']
std = raised_funds_averages[segment_id]['std']
project_raised_funds = get_project_raised_funds(pronac)
outlier = gaussian_outlier.is_outlier(project_raised_funds, mean, std)
return (outlier, mean, std, project_raised_funds)
|
python
|
{
"resource": ""
}
|
q3791
|
csv_to_pickle
|
train
|
def csv_to_pickle(path=ROOT / "raw", clean=False):
"""Convert all CSV files in path to pickle."""
for file in os.listdir(path):
base, ext = os.path.splitext(file)
if ext != ".csv":
continue
LOG(f"converting {file} to pickle")
df = pd.read_csv(path / file, low_memory=True)
WRITE_DF(df, path / (base + "." + FILE_EXTENSION), **WRITE_DF_OPTS)
if clean:
os.remove(path / file)
LOG(f"removed {file}")
|
python
|
{
"resource": ""
}
|
q3792
|
Loader.store
|
train
|
def store(self, loc, df):
"""Store dataframe in the given location.
Store some arbitrary dataframe:
>>> data.store('my_data', df)
Now recover it from the global store.
>>> data.my_data
...
"""
path = "%s.%s" % (self._root / "processed" / loc, FILE_EXTENSION)
WRITE_DF(df, path, **WRITE_DF_OPTS)
self._cache[loc] = df
|
python
|
{
"resource": ""
}
|
q3793
|
StocksInfo.close_databases
|
train
|
def close_databases(self):
""" Close all database sessions """
if self.gc_book:
self.gc_book.close()
if self.pricedb_session:
self.pricedb_session.close()
|
python
|
{
"resource": ""
}
|
q3794
|
StocksInfo.load_stock_quantity
|
train
|
def load_stock_quantity(self, symbol: str) -> Decimal(0):
""" retrieves stock quantity """
book = self.get_gc_book()
collection = SecuritiesAggregate(book)
sec = collection.get_aggregate_for_symbol(symbol)
quantity = sec.get_quantity()
return quantity
|
python
|
{
"resource": ""
}
|
q3795
|
StocksInfo.get_gc_book
|
train
|
def get_gc_book(self):
""" Returns the GnuCash db session """
if not self.gc_book:
gc_db = self.config.get(ConfigKeys.gnucash_book_path)
if not gc_db:
raise AttributeError("GnuCash book path not configured.")
# check if this is the abs file exists
if not os.path.isabs(gc_db):
gc_db = resource_filename(
Requirement.parse("Asset-Allocation"), gc_db)
if not os.path.exists(gc_db):
raise ValueError(f"Invalid GnuCash book path {gc_db}")
self.gc_book = open_book(gc_db, open_if_lock=True)
return self.gc_book
|
python
|
{
"resource": ""
}
|
q3796
|
StocksInfo.get_symbols_with_positive_balances
|
train
|
def get_symbols_with_positive_balances(self) -> List[str]:
""" Identifies all the securities with positive balances """
from gnucash_portfolio import BookAggregate
holdings = []
with BookAggregate() as book:
# query = book.securities.query.filter(Commodity.)
holding_entities = book.securities.get_all()
for item in holding_entities:
# Check holding balance
agg = book.securities.get_aggregate(item)
balance = agg.get_num_shares()
if balance > Decimal(0):
holdings.append(f"{item.namespace}:{item.mnemonic}")
else:
self.logger.debug(f"0 balance for {item}")
# holdings = map(lambda x: , holding_entities)
return holdings
|
python
|
{
"resource": ""
}
|
q3797
|
StocksInfo.__get_pricedb_session
|
train
|
def __get_pricedb_session(self):
""" Provides initialization and access to module-level session """
from pricedb import dal
if not self.pricedb_session:
self.pricedb_session = dal.get_default_session()
return self.pricedb_session
|
python
|
{
"resource": ""
}
|
q3798
|
add
|
train
|
def add(assetclass: int, symbol: str):
""" Add a stock to an asset class """
assert isinstance(symbol, str)
assert isinstance(assetclass, int)
symbol = symbol.upper()
app = AppAggregate()
new_item = app.add_stock_to_class(assetclass, symbol)
print(f"Record added: {new_item}.")
|
python
|
{
"resource": ""
}
|
q3799
|
unallocated
|
train
|
def unallocated():
""" Identify unallocated holdings """
app = AppAggregate()
app.logger = logger
unalloc = app.find_unallocated_holdings()
if not unalloc:
print(f"No unallocated holdings.")
for item in unalloc:
print(item)
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.