function
stringlengths
11
56k
repo_name
stringlengths
5
60
features
sequence
def __init__(self): vstruct.VStruct.__init__(self) self.DisableAffinity = v_uint32()
atlas0fd00m/CanCat
[ 169, 34, 169, 6, 1428517921 ]
def __init__(self): vstruct.VStruct.__init__(self) self.ListSize = v_uint32() self.InterfaceType = v_uint32() self.BusNumber = v_uint32() self.SlotNumber = v_uint32() self.Reserved = vstruct.VArray([ v_uint32() for i in xrange(3) ]) self.AlternativeLists = v_uint32() self.List = vstruct.VArray([ IO_RESOURCE_LIST() for i in xrange(1) ])
atlas0fd00m/CanCat
[ 169, 34, 169, 6, 1428517921 ]
def bin2asc(data: bytes): """ Encode binary data as ascii. If it is a large data set, then use a list of hex characters. """ if len(data) > 30: res = [] for part in chunks(data): res.append(binascii.hexlify(part).decode("ascii")) return res else: return binascii.hexlify(data).decode("ascii")
windelbouwman/ppci-mirror
[ 280, 28, 280, 61, 1480248069 ]
def setUp(self): self.set_filename('chart_format23.xlsx')
jmcnamara/XlsxWriter
[ 3172, 594, 3172, 18, 1357261626 ]
def test_lipop1(self): if not self.params['lipop1_bin']: self.params['lipop1_bin'] = 'LipoP' lipop1.annotate(self.params, self.proteins) self.expected_output = { u'SPy_0252': True, u'SPy_2077': False, u'SPy_0317': True, u'tr|Q9HYX8': True, } for seqid in self.expected_output: self.assertEqual( self.expected_output[seqid], self.proteins[seqid]['is_lipop']) self.assertEqual(self.proteins[u'tr|Q9HYX8']['lipop_cleave_position'], 19) self.assertIn('lipop_im_retention_signal', self.proteins[u'tr|Q9HYX8']) self.assertTrue( self.proteins[u'tr|Q9HYX8']['lipop_im_retention_signal'])
boscoh/inmembrane
[ 9, 9, 9, 3, 1320018818 ]
def contact(self, sponsor): # comma-separated emails in mailto: should work: https://www.ietf.org/rfc/rfc2368.txt # but the commas need to be URL-quoted return format_html( u'<a href="mailto:{}">{}</a>', quote(u','.join(sponsor.contact_emails)), sponsor.contact_name )
PyCon/pycon
[ 156, 98, 156, 58, 1370354058 ]
def get_form(self, *args, **kwargs): # @@@ kinda ugly but using choices= on NullBooleanField is broken form = super(SponsorAdmin, self).get_form(*args, **kwargs) form.base_fields["active"].widget.choices = [ (u"1", _(u"unreviewed")), (u"2", _(u"approved")), (u"3", _(u"rejected")) ] applicant_qs = form.base_fields['applicant'].queryset applicant_qs = applicant_qs.order_by('first_name', 'last_name', 'pk') form.base_fields['applicant'].queryset = applicant_qs return form
PyCon/pycon
[ 156, 98, 156, 58, 1370354058 ]
def func_generator(ben): def column_func(obj): return getattr(obj, ben['field_name']) column_func.short_description = ben['column_title'] column_func.boolean = True column_func.admin_order_field = ben['field_name'] return column_func
PyCon/pycon
[ 156, 98, 156, 58, 1370354058 ]
def save_related(self, request, form, formsets, change): super(SponsorAdmin, self).save_related(request, form, formsets, change) obj = form.instance obj.save()
PyCon/pycon
[ 156, 98, 156, 58, 1370354058 ]
def levels(self, benefit): return u", ".join(l.level.name for l in benefit.benefit_levels.all())
PyCon/pycon
[ 156, 98, 156, 58, 1370354058 ]
def benefits(self, obj): return ', '.join(obj.benefit_levels.values_list('benefit__name', flat=True))
PyCon/pycon
[ 156, 98, 156, 58, 1370354058 ]
def benefits(self, obj): return ', '.join(obj.benefit_packages.values_list('benefit__name', flat=True))
PyCon/pycon
[ 156, 98, 156, 58, 1370354058 ]
def test_list_several(monkeypatch): monkeypatch.setenv("foo", "bar,baz,barf") assert parsenvy.list("foo") == ["bar", "baz", "barf"]
nkantar/Parsenvy
[ 35, 9, 35, 11, 1487911029 ]
def test_list_one_comma(monkeypatch): monkeypatch.setenv("foo", ",") assert parsenvy.list("foo") == ["", ""]
nkantar/Parsenvy
[ 35, 9, 35, 11, 1487911029 ]
def __init__(self): self.reporters = []
drallensmith/neat-python
[ 32, 10, 32, 3, 1498176757 ]
def remove(self, reporter): self.reporters.remove(reporter)
drallensmith/neat-python
[ 32, 10, 32, 3, 1498176757 ]
def end_generation(self, config, population, species_set): for r in self.reporters: r.end_generation(config, population, species_set)
drallensmith/neat-python
[ 32, 10, 32, 3, 1498176757 ]
def post_reproduction(self, config, population, species): for r in self.reporters: r.post_reproduction(config, population, species)
drallensmith/neat-python
[ 32, 10, 32, 3, 1498176757 ]
def found_solution(self, config, generation, best): for r in self.reporters: r.found_solution(config, generation, best)
drallensmith/neat-python
[ 32, 10, 32, 3, 1498176757 ]
def info(self, msg): for r in self.reporters: r.info(msg)
drallensmith/neat-python
[ 32, 10, 32, 3, 1498176757 ]
def start_generation(self, generation): pass
drallensmith/neat-python
[ 32, 10, 32, 3, 1498176757 ]
def post_evaluate(self, config, population, species, best_genome): pass
drallensmith/neat-python
[ 32, 10, 32, 3, 1498176757 ]
def complete_extinction(self): pass
drallensmith/neat-python
[ 32, 10, 32, 3, 1498176757 ]
def species_stagnant(self, sid, species): pass
drallensmith/neat-python
[ 32, 10, 32, 3, 1498176757 ]
def __init__(self, show_species_detail): self.show_species_detail = show_species_detail self.generation = None self.generation_start_time = None self.generation_times = [] self.num_extinctions = 0
drallensmith/neat-python
[ 32, 10, 32, 3, 1498176757 ]
def end_generation(self, config, population, species_set): ng = len(population) ns = len(species_set.species) if self.show_species_detail: print('Population of {0:d} members in {1:d} species:'.format(ng, ns)) sids = list(iterkeys(species_set.species)) sids.sort() print(" ID age size fitness adj fit stag") print(" ==== === ==== ======= ======= ====") for sid in sids: s = species_set.species[sid] a = self.generation - s.created n = len(s.members) f = "--" if s.fitness is None else "{:.1f}".format(s.fitness) af = "--" if s.adjusted_fitness is None else "{:.3f}".format(s.adjusted_fitness) st = self.generation - s.last_improved print( " {: >4} {: >3} {: >4} {: >7} {: >7} {: >4}".format(sid, a, n, f, af, st)) else: print('Population of {0:d} members in {1:d} species'.format(ng, ns)) elapsed = time.time() - self.generation_start_time self.generation_times.append(elapsed) self.generation_times = self.generation_times[-10:] average = sum(self.generation_times) / len(self.generation_times) print('Total extinctions: {0:d}'.format(self.num_extinctions)) if len(self.generation_times) > 1: print("Generation time: {0:.3f} sec ({1:.3f} average)".format(elapsed, average)) else: print("Generation time: {0:.3f} sec".format(elapsed))
drallensmith/neat-python
[ 32, 10, 32, 3, 1498176757 ]
def complete_extinction(self): self.num_extinctions += 1 print('All species extinct.')
drallensmith/neat-python
[ 32, 10, 32, 3, 1498176757 ]
def species_stagnant(self, sid, species): if self.show_species_detail: print("\nSpecies {0} with {1} members is stagnated: removing it".format(sid, len(species.members)))
drallensmith/neat-python
[ 32, 10, 32, 3, 1498176757 ]
def point_in_hull(point, hull, tolerance=1e-12): return all((np.dot(eq[:-1], point) + eq[-1] <= tolerance) for eq in hull.equations)
ethz-asl/segmatch
[ 972, 390, 972, 67, 1474386165 ]
def time_to_hms(delta): ''' Convert some time in seconds to a tuple of (hours, minutes, seconds). ''' m, s = divmod(delta, 60) h, m = divmod(m, 60) return (h, m, s)
bluegenes/MakeMyTranscriptome
[ 9, 5, 9, 10, 1425336439 ]
def check_foreground(): ''' function that returns True if the process executing this module is in foreground. If in background, this module won't print to stderr/stdout. ''' if(not platform.system().lower().startswith('linux')): return True try: if(os.getpgrp() == os.tcgetpgrp(sys.stdout.fileno())): return True except OSError: pass return False
bluegenes/MakeMyTranscriptome
[ 9, 5, 9, 10, 1425336439 ]
def not_zero(i): ''' function that returns True if i is not zero. the funciton is equivalent to lambda i: i!=0 ''' return i != 0
bluegenes/MakeMyTranscriptome
[ 9, 5, 9, 10, 1425336439 ]
def __init__( self, command, dependencies=[], targets=[], cpu=1, name='Anonymous_Task', stderr=None, stdout=None, error_check=not_zero, max_wall_time=float('inf')): ''' The __init__ for the Task object has nine parameters described below: command - a string representation of the cmd line command to be executed. dependencies - a list containing Task objects, Supervisor objects, or unit functions. A call to self.checkDependencies will return True if and only if all Task objects in dependencies are finished, all Supervisor objects in dependencies are finished, and all unit funcitons return True when called. targets - a list of strings that are the paths to the output files generated by running the task. cpu - the number of threads or cpu intensivity of the command. This value is used by Supervisor objects. Default=1 name - A string that will be used for representing the task for more convienient logging stderr - String path to the location where stderr from the command should be written. Defaults is sys.stderr stdout - String path to the location where stdout from the command should be written. Defaults is sys.stdout error_check - a function that accepts a single int parameter, the exit code from running command. The function should return True if the exit code implies an error was encountered while running command, else False. Default = lambda i: i != 0 max_wall_time - the amount of time, in minutes, that the command should be allowed to run before being stopped by a call to self.finished(). Default = float('inf') ''' if(stderr is not None): f = open(stderr, 'a') f.close() if(stdout is not None): f = open(stdout, 'a') f.close() self.command = command self.dependencies = dependencies self.cpu = cpu self.name = name self.stdout = stdout self.stderr = stderr self.targets = targets self.error_check = error_check self.opened_files = [] self.process = None self.exit_code = None self.soft_finished_status = False self.start_time = time.time() self.max_wall_time = max_wall_time
bluegenes/MakeMyTranscriptome
[ 9, 5, 9, 10, 1425336439 ]
def checkDependencies(self): ''' Method will check all dependencies of this object. self.dependencies is a list containing Task objects, Supervisor objects, or unit functions. A call to self.checkDependencies will return True if and only if all Task objects in dependencies are finished, all Supervisor objects in dependencies are finished, and all unit funcitons return True when called. ''' for d in self.dependencies: if(isinstance(d, Task) or isinstance(d, Supervisor)): try: if(not d.finished()): return False except (Task.ExitCodeException, Task.TaskException): return False elif(callable(d)): if(not d()): return False else: error_message = ( 'Unable to check depencies of task {0!1}. \nTask ' 'dependencies must not conatin anything that is not ' 'a function or a Task. ').format(self.name) raise self.TaskException(error_message) return True
bluegenes/MakeMyTranscriptome
[ 9, 5, 9, 10, 1425336439 ]
def start(self): ''' Method used to start the execution of this Task. self.command will be executed using the subprocess.Popen constructor with shell=True. ''' if(self.stderr is not None): err = open(self.stderr, 'w', 1) err.write(str(self.command) + '\n\n') self.opened_files.append(err) else: err = None if(self.stdout is not None): out = open(self.stdout, 'w', 1) out.write(str(self.command) + '\n\n') self.opened_files.append(out) else: out = None self.start_time = time.time() temp = subprocess.Popen(self.command, shell=True, stdout=out, stderr=err) self.process = temp
bluegenes/MakeMyTranscriptome
[ 9, 5, 9, 10, 1425336439 ]
def finished(self): ''' A method that will check if this Task has finished executing, and check to ensure that execution was succesfull. If this Task hasn't been started and hasn't been skipped, it will return false. Otherwise, this method will query the subproccess created by self.start(). If the subprocess is still runnning, this method will return False. If the subprocess has set its exit code, the exit code will be checked by self.error_check, then this method will check to make sure that all targets of this task were created. If all checks are passed, self.finished will return True. In the event that execution of this task fails for some reason, all targets will be renamed to be [t+'.partial' for t in targets] to indicate that the output is unlikely to be complete. ''' if(self.soft_finished_status): return True if(self.process is None): return False self.process.poll() exit_code = self.process.returncode if(exit_code is None): cur_run_time = float(time.time() - self.start_time) / 60 if(cur_run_time > self.max_wall_time): err_mess = ('Task {0!s} has been running for greater than its maximum wall time, ' '{1!s}m, and has been aborted. This is likely an external error and ' 'trying again is recommended.').format(self.name, self.max_wall_time) self.killRun() raise self.TaskException(err_mess) return False
bluegenes/MakeMyTranscriptome
[ 9, 5, 9, 10, 1425336439 ]
def run(self, delay=1): ''' A convenience method that alllows for the execution of a task serially. This task will be started by a call to self.start(), then every delay seconds, a call to self.finished() will check to see if the command has finished executing. self.run() will return upon completion of the command. ''' self.start() while(not self.finished()): time.sleep(delay)
bluegenes/MakeMyTranscriptome
[ 9, 5, 9, 10, 1425336439 ]
def killRun(self): ''' Calling this method will safely stop the execution of this task. ''' try: self.process.kill() except: pass self.close_files() self.rename_targets()
bluegenes/MakeMyTranscriptome
[ 9, 5, 9, 10, 1425336439 ]
def close_files(self): ''' This method is responsible for closing all log files opened by a call to self.start. ''' for f in self.opened_files: f.close() self.opened_files = []
bluegenes/MakeMyTranscriptome
[ 9, 5, 9, 10, 1425336439 ]
def rename_targets(self): ''' This method will check for each t in self.targets, if t exists. If it does, t will be renamed to be t+".partial". This method is called whenever this task halts unexpectedly or in error. ''' for f in self.targets: if(os.path.exists(f)): os.rename(f, f + '.partial')
bluegenes/MakeMyTranscriptome
[ 9, 5, 9, 10, 1425336439 ]
def skipable(self): ''' Method uses by Supervisor objects to determine if this task needs to be executed. This method will return True if this task is skippable and so does not need to be executed. A task is skippable if and only if all dependencies are satisfied, all dependencies have been skippable, and all targets exist. ''' if(self.exit_code is not None): return False if(self.soft_finished_status): return True for t in self.dependencies: if(isinstance(t, Task) or isinstance(t, Supervisor)): if(not t.skipable()): return False if(self.targets == []): return False for t in self.targets: if(not os.path.exists(t)): return False self.soft_finished_status = True return True
bluegenes/MakeMyTranscriptome
[ 9, 5, 9, 10, 1425336439 ]
def __str__(self): return self.name
bluegenes/MakeMyTranscriptome
[ 9, 5, 9, 10, 1425336439 ]
def __repr__(self): return self.__str__()
bluegenes/MakeMyTranscriptome
[ 9, 5, 9, 10, 1425336439 ]
def __init__( self, tasks=[], dependencies=[], cpu=float('inf'), name='Supervisor', delay=1, force_run=False, email=None, email_interval=30, log=None): ''' the __init__ for the Superviosr object has nine paramters described below. tasks - A list of Task or Supervisor objects that this Supervisor will manage. Default = [] dependencies - A list of dependencies for this Supervisor. This works in the same way as the dependencies for a Task object. Default = [] cpu - The cpu cap for this supervisor. When you run this supervisor, the supervisor will run the maximimun number of Tasks that it can in parrelel such that the sum of Task.cpu for each of those tasks is below this supervisor's cpu. Default = float('inf') name - A handy way to keep track of your supervisor if you are using multiple. name will be used during exception help messages and when printing the supervisor. Default = 'Supervisor' delay - While running tasks, this superrvisor will weight delay seconds in between each execution cycle. Default = 1 force_run - A flag that determines whether the Supervisor will attempt to skip execution of tasks that it thinks are skippable. A task is skippable if Task.skippable() returns True. Default = False email - This supervisor will send emails updating you on execution of the pipeline if an email is given. This supervisor will send an email when the pipeline finishes, and will send emails at most once per email_interval minutes while running. Default = None email_interval - The delay between emails. Sent by the Supervisor. Only relevant if email is used. log - The path to the log file that the Supervisor will generate. The log file is plain text and details the execution of each task. Default = name+'.run_log' ''' self.cpu = cpu self.name = name self.delay = delay self.dependencies = dependencies self.force_run = force_run self.email = email self.email_interval = email_interval * 60 self.last_email = time.time() self.log_path = log if(log is not None) else name + '.run_log' self.log_str = '' self.task_map = {} self.task_status = {} self.errors = [] self.targets = [] self.tasks = set() for t in tasks: self.add_task(t)
bluegenes/MakeMyTranscriptome
[ 9, 5, 9, 10, 1425336439 ]
def run(self): ''' Use this funciton to get a Supervisor to run all tasks that it is managing. Execution occurs inside of a large loop with multiple steps. First, this supervisor checks all running tasks to see if they are finished. Those tasks that are finished are handled and removed from processing Next the Supervisor checks to see if any tasks can be started by checking their dependencies and making sure executing the task won't result in the SUpervisor exceeding the cpu_cap. Then, if force_run is False, the supervisor will check to see if the task can be skipped. If the task can be skipped, it will be, otherwise the task will be started. The loop breaks when their are no tasks left that can be executed and no tasks currently being executed. ''' self.log_file = open(self.log_path, 'w', 1) self.last_email = time.time() self.tasks_to_run = set([t for t in self.tasks]) self.tasks_running = set() cur_cpu = 0 signal.signal(signal.SIGTERM, lambda *x: self.killRun()) try: # execute run while(len(self.tasks_to_run) > 0 or len(self.tasks_running) > 0): size_tasks_to_run = len(self.tasks_to_run) # handling finished tasks for t in [task for task in self.tasks_running]: temp = self.task_status[t] try: if(t.finished()): self.tasks_running.remove(t) cur_cpu -= t.cpu temp['stop'] = int(time.time()) temp['state'] = Supervisor.STATE_FINISHED temp['exit_code'] = t.exit_code h, m, s = time_to_hms(temp['stop'] - temp['start']) temp['message'] = 'Completed in {0!s}h {1!s}m {2!s}s'.format(h, m, s) self.log(t.name+':'+self.task_status[t]['state']+':'+time.asctime()+'\n\n') except (Task.ExitCodeException, Task.TaskException) as inst: cur_cpu -= t.cpu self.tasks_running.remove(t) self.errors.append(inst) temp['stop'] = int(time.time()) temp['state'] = Supervisor.STATE_ERR temp['exit_code'] = t.exit_code h, m, s = time_to_hms(temp['stop'] - temp['start']) temp['message'] = 'Failed in {0!s}h {1!s}m {2!s}s'.format(h, m, s) self.log(t.name+':'+self.task_status[t]['state']+':'+time.asctime()+'\n\n') self.__removeTaskPath__(t) # starting execution of tasks for t in [task for task in self.tasks_to_run]: temp = self.task_status[t] if(t.cpu+cur_cpu > self.cpu): continue elif(not t.checkDependencies()): continue elif(not self.force_run and t.skipable()): temp['state'] = Supervisor.STATE_SKIPPED self.log(t.name+':'+temp['state']+'\n') self.tasks_to_run.remove(t) continue else: cur_cpu += t.cpu self.tasks_running.add(t) self.tasks_to_run.remove(t) t.start() temp['state'] = Supervisor.STATE_RUNNING temp['start'] = int(time.time()) self.log(t.name+':'+self.task_status[t]['state']+':'+time.asctime()+'\n\n') self.log_file.write(t.command+'\n\n') tasks_to_run_delta = size_tasks_to_run - len(self.tasks_to_run) if(len(self.tasks_running) == 0 and tasks_to_run_delta == 0): break time.sleep(self.delay) # handle all errors if(self.errors != [] or (len(self.tasks_running) == 0 and len(self.tasks_to_run) != 0)): err_str = '\n\n' if(len(self.tasks_running) == 0 and len(self.tasks_to_run) != 0): err_str += 'Unable to resolve dependencies during execution of '+self.name err_str += '. The following tasks could not be executed:\n' err_str += '\n'.join(['\t'+t.name for t in self.tasks_to_run]) if(self.errors != []): err_str += '\nEncountered an unexpected Error in the following tasks:\n' for t in self.task_status: if(self.task_status[t]['state'] == Supervisor.STATE_ERR): err_str+= '\tName - {0!s} : Message - {1!s} : Exit Code - {2!s}\n'.format( t.name,self.task_status[t]['message'],self.task_status[t]['exit_code']) removed_tasks = [] for t in self.task_status: if(self.task_status[t]['state']==Supervisor.STATE_REMOVED): removed_tasks.append('\tName - {0!s} : Message - {1!s} : Exit Code- {2!s}\n'.format( t.name,self.task_status[t]['message'],self.task_status[t]['exit_code'])) if(len(removed_tasks)>0): err_str+='\nAs a result of the above errors, the following tasks could not be executed:\n' err_str+=''.join(removed_tasks) err_str+='\nErrors Reported:\n' for e in self.errors: err_str+=str(e)+'\n' raise Exception(err_str) except BaseException as inst: self.killRun() self.log_file.write(str(inst)) raise finally: self.send_email('', subject='MMT Finished')
bluegenes/MakeMyTranscriptome
[ 9, 5, 9, 10, 1425336439 ]
def __removeTaskPath__(self, task): ''' Helper function that removes tasks that can no longer be executed from the sueprvisors list of tasks that need o be executed. Basically, anything down stream of task is removed. ''' flag = True removed = set([task]) check_intersection = lambda s, l : any(e in s for e in l) while(flag): flag = False for t in [t for t in self.tasks_to_run]: sup_deps = [d for d in t.dependencies if(isinstance(d, Supervisor))] sup_checks = [check_intersection(removed, s.tasks) for s in sup_deps] if(check_intersection(removed, t.dependencies) or any(sup_checks)): temp = self.task_status[t] self.tasks_to_run.remove(t) removed.add(t) temp['message'] = 'Never Started' temp['state'] = Supervisor.STATE_REMOVED flag = True
bluegenes/MakeMyTranscriptome
[ 9, 5, 9, 10, 1425336439 ]
def log(self, message): ''' Convienvince funciton that allows for the SUpervisor to write messages to stdout and the supervisors log file ''' self.log_str += message self.log_file.write(message) if(check_foreground()): print(message) if(time.time() > self.last_email + self.email_interval): self.last_email = time.time() self.send_email(self.log_str, 'MMT Running Update') self.log_str = ''
bluegenes/MakeMyTranscriptome
[ 9, 5, 9, 10, 1425336439 ]
def finished(self): ''' A method called from within run. If a task is dependant on a this supervisor, then this supervisor will query all of its tasks to determine if the supervisor is finished executing. A Supervisor is finished if and only if all of the Supervisor's tasks are finsihed. ''' for t in self.tasks: if(not t.finished()): return False return True
bluegenes/MakeMyTranscriptome
[ 9, 5, 9, 10, 1425336439 ]
def skipable(self): ''' A method called from within run. If a task is dependant on a this supervisor, then this supervisor will query all of its tasks to determine if the supervisor doesn't need to be executed. A Supervisor is skippable if and only if all of the Supervisor's tasks are skipable. ''' for t in self.tasks: if(not t.skipable()): return False return True
bluegenes/MakeMyTranscriptome
[ 9, 5, 9, 10, 1425336439 ]
def send_email(self, message, subject=''): ''' A convienince function that allows for the sending of emails to the Supervisor's self.email adress. If self.email is none, the method does nothing. Otherwise, Supervisor will start a subprocess that sends the email with the specified subject and message to self.email. subject defaults to ''. The funciton will always return None. ''' if(self.email is None): return else: # message = ''.join([c if(c!='\n') else '\\n'for c in message]) cmd = "echo '{0!s}' | mail -s '{1!s}' '{2!s}'".format(message, subject, self.email) subprocess.call(cmd, shell=True)
bluegenes/MakeMyTranscriptome
[ 9, 5, 9, 10, 1425336439 ]
def killRun(self): ''' safely stops all running tasks and halts the run. ''' for t in self.tasks_running: t.killRun() self.running = []
bluegenes/MakeMyTranscriptome
[ 9, 5, 9, 10, 1425336439 ]
def add_task(self, task): ''' Allows for a task or supervisor to be added to this supervisor. A supervisor is added to this supervisor by adding all tasks that the supervisor managed. ''' if(isinstance(task, Supervisor)): for t in task.tasks: t.dependencies.extend(task.dependencies) self.add_task(t) if(isinstance(task, Task)): if(task.cpu > self.cpu): err_mess = ('Task {0!s} has a higher cpu than this supervisor, {1!s}.\nYou ' 'must increase this supervisor\'s cpu or decrease {0!s}\'s cpu.') err_mess = err_mess.format(task.name, self.name) raise Exception(err_mess) if(task.name in self.task_map): warnings.warn('A task named '+task.name+' already exists.') self.task_map[task.name] = task self.tasks.add(task) self.task_status[task] = {'state': Supervisor.STATE_INITIALIZED, 'exit_code': None, 'message': None, 'start': None, 'stop': None} self.targets.extend(task.targets)
bluegenes/MakeMyTranscriptome
[ 9, 5, 9, 10, 1425336439 ]
def __str__(self): return self.name
bluegenes/MakeMyTranscriptome
[ 9, 5, 9, 10, 1425336439 ]
def __repr__(self): return self.__str__()
bluegenes/MakeMyTranscriptome
[ 9, 5, 9, 10, 1425336439 ]
def t2_dep(): t2.command = 'python ..\\..\\test.py 8' return True
bluegenes/MakeMyTranscriptome
[ 9, 5, 9, 10, 1425336439 ]
def extractChuunihimeWordpressCom(item): ''' Parser for 'chuunihime.wordpress.com' ''' vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title']) if not (chp or vol) or "preview" in item['title'].lower(): return None tagmap = [ ('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel'), ] for tagname, name, tl_type in tagmap: if tagname in item['tags']: return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) return False
fake-name/ReadableWebProxy
[ 191, 16, 191, 3, 1437712243 ]
def _calculate_divisions( df, partition_col, repartition, npartitions, upsample=1.0, partition_size=128e6,
blaze/dask
[ 10792, 1599, 10792, 913, 1420397400 ]
def sort_values( df, by, npartitions=None, ascending=True, na_position="last", upsample=1.0, partition_size=128e6, sort_function=None, sort_function_kwargs=None, **kwargs,
blaze/dask
[ 10792, 1599, 10792, 913, 1420397400 ]
def set_index( df, index, npartitions=None, shuffle=None, compute=False, drop=True, upsample=1.0, divisions=None, partition_size=128e6, **kwargs,
blaze/dask
[ 10792, 1599, 10792, 913, 1420397400 ]
def remove_nans(divisions): """Remove nans from divisions These sometime pop up when we call min/max on an empty partition Examples -------- >>> remove_nans((np.nan, 1, 2)) [1, 1, 2] >>> remove_nans((1, np.nan, 2)) [1, 2, 2] >>> remove_nans((1, 2, np.nan)) [1, 2, 2] """ divisions = list(divisions) for i in range(len(divisions) - 2, -1, -1): if pd.isnull(divisions[i]): divisions[i] = divisions[i + 1] for i in range(len(divisions) - 1, -1, -1): if not pd.isnull(divisions[i]): for j in range(i + 1, len(divisions)): divisions[j] = divisions[i] break return divisions
blaze/dask
[ 10792, 1599, 10792, 913, 1420397400 ]
def shuffle( df, index, shuffle=None, npartitions=None, max_branch=32, ignore_index=False, compute=None,
blaze/dask
[ 10792, 1599, 10792, 913, 1420397400 ]
def rearrange_by_divisions( df, column, divisions, max_branch=None, shuffle=None, ascending=True, na_position="last", duplicates=True,
blaze/dask
[ 10792, 1599, 10792, 913, 1420397400 ]
def rearrange_by_column( df, col, npartitions=None, max_branch=None, shuffle=None, compute=None, ignore_index=False,
blaze/dask
[ 10792, 1599, 10792, 913, 1420397400 ]
def __init__(self, buffer=True, tempdir=None): self.tempdir = tempdir or config.get("temporary_directory", None) self.buffer = buffer self.compression = config.get("dataframe.shuffle-compression", None)
blaze/dask
[ 10792, 1599, 10792, 913, 1420397400 ]
def __call__(self, *args, **kwargs): import partd path = tempfile.mkdtemp(suffix=".partd", dir=self.tempdir) try: partd_compression = ( getattr(partd.compressed, self.compression) if self.compression else None ) except AttributeError as e: raise ImportError( "Not able to import and load {} as compression algorithm." "Please check if the library is installed and supported by Partd.".format( self.compression ) ) from e file = partd.File(path) partd.file.cleanup_files.append(path) # Envelope partd file with compression, if set and available if partd_compression: file = partd_compression(file) if self.buffer: return partd.PandasBlocks(partd.Buffer(partd.Dict(), file)) else: return partd.PandasBlocks(file)
blaze/dask
[ 10792, 1599, 10792, 913, 1420397400 ]
def _noop(x, cleanup_token): """ A task that does nothing. """ return x
blaze/dask
[ 10792, 1599, 10792, 913, 1420397400 ]
def partitioning_index(df, npartitions): """ Computes a deterministic index mapping each record to a partition. Identical rows are mapped to the same partition. Parameters ---------- df : DataFrame/Series/Index npartitions : int The number of partitions to group into. Returns ------- partitions : ndarray An array of int64 values mapping each record to a partition. """ return hash_object_dispatch(df, index=False) % int(npartitions)
blaze/dask
[ 10792, 1599, 10792, 913, 1420397400 ]
def cleanup_partd_files(p, keys): """ Cleanup the files in a partd.File dataset. Parameters ---------- p : partd.Interface File or Encode wrapping a file should be OK. keys: List Just for scheduling purposes, not actually used. """ import partd if isinstance(p, partd.Encode): maybe_file = p.partd else: maybe_file if isinstance(maybe_file, partd.File): path = maybe_file.path else: path = None if path: shutil.rmtree(path, ignore_errors=True)
blaze/dask
[ 10792, 1599, 10792, 913, 1420397400 ]
def set_partitions_pre(s, divisions, ascending=True, na_position="last"): try: if ascending: partitions = divisions.searchsorted(s, side="right") - 1 else: partitions = len(divisions) - divisions.searchsorted(s, side="right") - 1 except TypeError: # `searchsorted` fails if `s` contains nulls and strings partitions = np.empty(len(s), dtype="int32") not_null = s.notna() if ascending: partitions[not_null] = divisions.searchsorted(s[not_null], side="right") - 1 else: partitions[not_null] = ( len(divisions) - divisions.searchsorted(s[not_null], side="right") - 1 ) partitions[(partitions < 0) | (partitions >= len(divisions) - 1)] = ( len(divisions) - 2 if ascending else 0 ) partitions[s.isna().values] = len(divisions) - 2 if na_position == "last" else 0 return partitions
blaze/dask
[ 10792, 1599, 10792, 913, 1420397400 ]
def shuffle_group_get(g_head, i): g, head = g_head if i in g: return g[i] else: return head
blaze/dask
[ 10792, 1599, 10792, 913, 1420397400 ]
def ensure_cleanup_on_exception(p): """Ensure a partd.File is cleaned up. We have several tasks referring to a `partd.File` instance. We want to ensure that the file is cleaned up if and only if there's an exception in the tasks using the `partd.File`. """ try: yield except Exception: # the function (e.g. shuffle_group_3) had an internal exception. # We'll cleanup our temporary files and re-raise. try: p.drop() except Exception: logger.exception("ignoring exception in ensure_cleanup_on_exception") raise
blaze/dask
[ 10792, 1599, 10792, 913, 1420397400 ]
def set_index_post_scalar(df, index_name, drop, column_dtype): df2 = df.drop("_partitions", axis=1).set_index(index_name, drop=drop) df2.columns = df2.columns.astype(column_dtype) return df2
blaze/dask
[ 10792, 1599, 10792, 913, 1420397400 ]
def drop_overlap(df, index): return df.drop(index) if index in df.index else df
blaze/dask
[ 10792, 1599, 10792, 913, 1420397400 ]
def fix_overlap(ddf, overlap): """Ensures that the upper bound on each partition of ddf (except the last) is exclusive""" name = "fix-overlap-" + tokenize(ddf, overlap) n = len(ddf.divisions) - 1 dsk = {(name, i): (ddf._name, i) for i in range(n)} frames = [] for i in overlap: # `frames` is a list of data from previous partitions that we may want to # move to partition i. Here, we add "overlap" from the previous partition # (i-1) to this list. frames.append((get_overlap, (ddf._name, i - 1), ddf.divisions[i])) # Make sure that any data added from partition i-1 to `frames` is removed # from partition i-1. dsk[(name, i - 1)] = (drop_overlap, dsk[(name, i - 1)], ddf.divisions[i]) # We do not want to move "overlap" from the previous partition (i-1) into # this partition (i) if the data from this partition will need to be moved # to the next partition (i+1) anyway. If we concatenate data too early, # we may lose rows (https://github.com/dask/dask/issues/6972). if i == ddf.npartitions - 2 or ddf.divisions[i] != ddf.divisions[i + 1]: frames.append((ddf._name, i)) dsk[(name, i)] = (methods.concat, frames) frames = [] graph = HighLevelGraph.from_collections(name, dsk, dependencies=[ddf]) return new_dd_object(graph, name, ddf._meta, ddf.divisions)
blaze/dask
[ 10792, 1599, 10792, 913, 1420397400 ]
def sync_user_profile(sender, instance, created, **kwargs): # pylint: disable=unused-argument """ Signal handler create/update a DiscussionUser every time a profile is created/updated """ if not settings.FEATURES.get('OPEN_DISCUSSIONS_USER_SYNC', False): return transaction.on_commit(lambda: tasks.sync_discussion_user.delay(instance.user_id))
mitodl/micromasters
[ 28, 16, 28, 318, 1456876397 ]
def add_staff_as_moderator(sender, instance, created, **kwargs): # pylint: disable=unused-argument """ Signal handler add user as moderator when his staff role on program is added """ if not settings.FEATURES.get('OPEN_DISCUSSIONS_USER_SYNC', False): return if instance.role not in Role.permission_to_roles[Permissions.CAN_CREATE_FORUMS]: return transaction.on_commit( lambda: tasks.add_user_as_moderator_to_channel.delay( instance.user_id, instance.program_id, ) )
mitodl/micromasters
[ 28, 16, 28, 318, 1456876397 ]
def is_done(self): from os.path import exists if self.options['files'].is_dependency(): return False for f in self.options["files"].raw(): if exists(f): return False return True
thasso/pyjip
[ 19, 8, 19, 18, 1375120760 ]
def get_command(self): return "bash", "for file in ${files}; do rm -f $file; done"
thasso/pyjip
[ 19, 8, 19, 18, 1375120760 ]
def print_demo_lengths(demos): num_frames_per_episode = [len(demo[2]) for demo in demos] logger.info('Demo length: {:.3f}+-{:.3f}'.format( np.mean(num_frames_per_episode), np.std(num_frames_per_episode)))
mila-iqia/babyai
[ 575, 129, 575, 9, 1538503104 ]
def generate_demos_cluster(): demos_per_job = args.episodes // args.jobs demos_path = utils.get_demos_path(args.demos, args.env, 'agent') job_demo_names = [os.path.realpath(demos_path + '.shard{}'.format(i)) for i in range(args.jobs)] for demo_name in job_demo_names: job_demos_path = utils.get_demos_path(demo_name) if os.path.exists(job_demos_path): os.remove(job_demos_path) command = [args.job_script] command += sys.argv[1:] for i in range(args.jobs): cmd_i = list(map(str, command + ['--seed', args.seed + i * demos_per_job] + ['--demos', job_demo_names[i]] + ['--episodes', demos_per_job] + ['--jobs', 0] + ['--valid-episodes', 0])) logger.info('LAUNCH COMMAND') logger.info(cmd_i) output = subprocess.check_output(cmd_i) logger.info('LAUNCH OUTPUT') logger.info(output.decode('utf-8')) job_demos = [None] * args.jobs while True: jobs_done = 0 for i in range(args.jobs): if job_demos[i] is None or len(job_demos[i]) < demos_per_job: try: logger.info("Trying to load shard {}".format(i)) job_demos[i] = utils.load_demos(utils.get_demos_path(job_demo_names[i])) logger.info("{} demos ready in shard {}".format( len(job_demos[i]), i)) except Exception: logger.exception("Failed to load the shard") if job_demos[i] and len(job_demos[i]) == demos_per_job: jobs_done += 1 logger.info("{} out of {} shards done".format(jobs_done, args.jobs)) if jobs_done == args.jobs: break logger.info("sleep for 60 seconds") time.sleep(60) # Training demos all_demos = [] for demos in job_demos: all_demos.extend(demos) utils.save_demos(all_demos, demos_path)
mila-iqia/babyai
[ 575, 129, 575, 9, 1538503104 ]
def test_simple(self): f = mod.get_function('simple') result = interp.run(f, args=[10.0]) assert result == 100.0, result
flypy/pykit
[ 26, 5, 26, 1, 1371216754 ]
def test_exceptions(self): f = mod.get_function('raise') try: result = interp.run(f) except interp.UncaughtException as e: exc, = e.args assert isinstance(exc, TypeError), exc else: assert False, result
flypy/pykit
[ 26, 5, 26, 1, 1371216754 ]
def _meta_from_array(x, columns=None, index=None, meta=None): """Create empty DataFrame or Series which has correct dtype""" if x.ndim > 2: raise ValueError( "from_array does not input more than 2D array, got" " array with shape %r" % (x.shape,) ) if index is not None: if not isinstance(index, Index): raise ValueError("'index' must be an instance of dask.dataframe.Index") index = index._meta if meta is None: meta = pd.DataFrame() if getattr(x.dtype, "names", None) is not None: # record array has named columns if columns is None: columns = list(x.dtype.names) elif np.isscalar(columns): raise ValueError("For a struct dtype, columns must be a list.") elif not all(i in x.dtype.names for i in columns): extra = sorted(set(columns).difference(x.dtype.names)) raise ValueError(f"dtype {x.dtype} doesn't have fields {extra}") fields = x.dtype.fields dtypes = [fields[n][0] if n in fields else "f8" for n in columns] elif x.ndim == 1: if np.isscalar(columns) or columns is None: return meta._constructor_sliced( [], name=columns, dtype=x.dtype, index=index ) elif len(columns) == 1: return meta._constructor( np.array([], dtype=x.dtype), columns=columns, index=index ) raise ValueError( "For a 1d array, columns must be a scalar or single element list" ) else: if np.isnan(x.shape[1]): raise ValueError("Shape along axis 1 must be known") if columns is None: columns = list(range(x.shape[1])) if x.ndim == 2 else [0] elif len(columns) != x.shape[1]: raise ValueError( "Number of column names must match width of the array. " f"Got {len(columns)} names for {x.shape[1]} columns" ) dtypes = [x.dtype] * len(columns) data = {c: np.array([], dtype=dt) for (c, dt) in zip(columns, dtypes)} return meta._constructor(data, columns=columns, index=index)
blaze/dask
[ 10792, 1599, 10792, 913, 1420397400 ]
def from_pandas(data, npartitions=None, chunksize=None, sort=True, name=None): """ Construct a Dask DataFrame from a Pandas DataFrame This splits an in-memory Pandas dataframe into several parts and constructs a dask.dataframe from those parts on which Dask.dataframe can operate in parallel. By default, the input dataframe will be sorted by the index to produce cleanly-divided partitions (with known divisions). To preserve the input ordering, make sure the input index is monotonically-increasing. The ``sort=False`` option will also avoid reordering, but will not result in known divisions. Note that, despite parallelism, Dask.dataframe may not always be faster than Pandas. We recommend that you stay with Pandas for as long as possible before switching to Dask.dataframe. Parameters ---------- data : pandas.DataFrame or pandas.Series The DataFrame/Series with which to construct a Dask DataFrame/Series npartitions : int, optional The number of partitions of the index to create. Note that depending on the size and index of the dataframe, the output may have fewer partitions than requested. chunksize : int, optional The number of rows per index partition to use. sort: bool Sort the input by index first to obtain cleanly divided partitions (with known divisions). If False, the input will not be sorted, and all divisions will be set to None. Default is True. name: string, optional An optional keyname for the dataframe. Defaults to hashing the input Returns ------- dask.DataFrame or dask.Series A dask DataFrame/Series partitioned along the index Examples -------- >>> from dask.dataframe import from_pandas >>> df = pd.DataFrame(dict(a=list('aabbcc'), b=list(range(6))), ... index=pd.date_range(start='20100101', periods=6)) >>> ddf = from_pandas(df, npartitions=3) >>> ddf.divisions # doctest: +NORMALIZE_WHITESPACE (Timestamp('2010-01-01 00:00:00', freq='D'), Timestamp('2010-01-03 00:00:00', freq='D'), Timestamp('2010-01-05 00:00:00', freq='D'), Timestamp('2010-01-06 00:00:00', freq='D')) >>> ddf = from_pandas(df.a, npartitions=3) # Works with Series too! >>> ddf.divisions # doctest: +NORMALIZE_WHITESPACE (Timestamp('2010-01-01 00:00:00', freq='D'), Timestamp('2010-01-03 00:00:00', freq='D'), Timestamp('2010-01-05 00:00:00', freq='D'), Timestamp('2010-01-06 00:00:00', freq='D')) Raises ------ TypeError If something other than a ``pandas.DataFrame`` or ``pandas.Series`` is passed in. See Also -------- from_array : Construct a dask.DataFrame from an array that has record dtype read_csv : Construct a dask.DataFrame from a CSV file """ if isinstance(getattr(data, "index", None), pd.MultiIndex): raise NotImplementedError("Dask does not support MultiIndex Dataframes.") if not has_parallel_type(data): raise TypeError("Input must be a pandas DataFrame or Series") if (npartitions is None) == (chunksize is None): raise ValueError("Exactly one of npartitions and chunksize must be specified.") nrows = len(data) if chunksize is None: chunksize = int(ceil(nrows / npartitions)) name = name or ("from_pandas-" + tokenize(data, chunksize)) if not nrows: return new_dd_object({(name, 0): data}, name, data, [None, None]) if sort and not data.index.is_monotonic_increasing: data = data.sort_index(ascending=True) if sort: divisions, locations = sorted_division_locations( data.index, chunksize=chunksize ) else: locations = list(range(0, nrows, chunksize)) + [len(data)] divisions = [None] * len(locations) dsk = { (name, i): data.iloc[start:stop] for i, (start, stop) in enumerate(zip(locations[:-1], locations[1:])) } return new_dd_object(dsk, name, data, divisions)
blaze/dask
[ 10792, 1599, 10792, 913, 1420397400 ]
def dataframe_from_ctable(x, slc, columns=None, categories=None, lock=lock): """Get DataFrame from bcolz.ctable Parameters ---------- x: bcolz.ctable slc: slice columns: list of column names or None >>> import bcolz >>> x = bcolz.ctable([[1, 2, 3, 4], [10, 20, 30, 40]], names=['a', 'b']) >>> dataframe_from_ctable(x, slice(1, 3)) a b 1 2 20 2 3 30 >>> dataframe_from_ctable(x, slice(1, 3), columns=['b']) b 1 20 2 30 >>> dataframe_from_ctable(x, slice(1, 3), columns='b') 1 20 2 30 Name: b, dtype: int... """ import bcolz if columns is None: columns = x.dtype.names if isinstance(columns, tuple): columns = list(columns) x = x[columns] if type(slc) is slice: start = slc.start stop = slc.stop if slc.stop < len(x) else len(x) else: start = slc[0].start stop = slc[0].stop if slc[0].stop < len(x) else len(x) idx = pd.Index(range(start, stop)) if lock: lock.acquire() try: if isinstance(x, bcolz.ctable): chunks = [x[name][slc] for name in columns] if categories is not None: chunks = [ pd.Categorical.from_codes( np.searchsorted(categories[name], chunk), categories[name], True ) if name in categories else chunk for name, chunk in zip(columns, chunks) ] result = pd.DataFrame( dict(zip(columns, chunks)), columns=columns, index=idx ) elif isinstance(x, bcolz.carray): chunk = x[slc] if categories is not None and columns and columns in categories: chunk = pd.Categorical.from_codes( np.searchsorted(categories[columns], chunk), categories[columns], True, ) result = pd.Series(chunk, name=columns, index=idx) finally: if lock: lock.release() return result
blaze/dask
[ 10792, 1599, 10792, 913, 1420397400 ]
def _link(token, result): """A dummy function to link results together in a graph We use this to enforce an artificial sequential ordering on tasks that don't explicitly pass around a shared resource """ return None
blaze/dask
[ 10792, 1599, 10792, 913, 1420397400 ]
def to_bag(df, index=False, format="tuple"): """Create Dask Bag from a Dask DataFrame Parameters ---------- index : bool, optional If True, the elements are tuples of ``(index, value)``, otherwise they're just the ``value``. Default is False. format : {"tuple", "dict"},optional Whether to return a bag of tuples or dictionaries. Examples -------- >>> bag = df.to_bag() # doctest: +SKIP """ from ...bag.core import Bag if not isinstance(df, (DataFrame, Series)): raise TypeError("df must be either DataFrame or Series") name = "to_bag-" + tokenize(df, index, format) dsk = { (name, i): (_df_to_bag, block, index, format) for (i, block) in enumerate(df.__dask_keys__()) } dsk.update(df.__dask_optimize__(df.__dask_graph__(), df.__dask_keys__())) return Bag(dsk, name, df.npartitions)
blaze/dask
[ 10792, 1599, 10792, 913, 1420397400 ]
def from_delayed( dfs, meta=None, divisions=None, prefix="from-delayed", verify_meta=True
blaze/dask
[ 10792, 1599, 10792, 913, 1420397400 ]
def sorted_division_locations(seq, npartitions=None, chunksize=None): """Find division locations and values in sorted list Examples -------- >>> L = ['A', 'B', 'C', 'D', 'E', 'F'] >>> sorted_division_locations(L, chunksize=2) (['A', 'C', 'E', 'F'], [0, 2, 4, 6]) >>> sorted_division_locations(L, chunksize=3) (['A', 'D', 'F'], [0, 3, 6]) >>> L = ['A', 'A', 'A', 'A', 'B', 'B', 'B', 'C'] >>> sorted_division_locations(L, chunksize=3) (['A', 'B', 'C'], [0, 4, 8]) >>> sorted_division_locations(L, chunksize=2) (['A', 'B', 'C'], [0, 4, 8]) >>> sorted_division_locations(['A'], chunksize=2) (['A', 'A'], [0, 1]) """ if (npartitions is None) == (chunksize is None): raise ValueError("Exactly one of npartitions and chunksize must be specified.") if npartitions: chunksize = ceil(len(seq) / npartitions) positions = [0] values = [seq[0]] for pos in range(0, len(seq), chunksize): if pos <= positions[-1]: continue while pos + 1 < len(seq) and seq[pos - 1] == seq[pos]: pos += 1 values.append(seq[pos]) if pos == len(seq) - 1: pos += 1 positions.append(pos) if positions[-1] != len(seq): positions.append(len(seq)) values.append(seq[-1]) return values, positions
blaze/dask
[ 10792, 1599, 10792, 913, 1420397400 ]
def _render_plugin(plugin, context, renderer=None): if renderer: content = renderer.render_plugin( instance=plugin, context=context, editable=False, ) else: content = plugin.render_plugin(context) return content
aldryn/aldryn-search
[ 48, 71, 48, 25, 1373443182 ]
def get_plugin_index_data(base_plugin, request): text_bits = [] instance, plugin_type = base_plugin.get_plugin_instance() if instance is None or instance.plugin_type in EXCLUDED_PLUGINS: # this is an empty plugin or excluded from search return text_bits search_fields = getattr(instance, 'search_fields', []) if hasattr(instance, 'search_fulltext'): # check if the plugin instance has search enabled search_contents = instance.search_fulltext elif hasattr(base_plugin, 'search_fulltext'): # now check in the base plugin instance (CMSPlugin) search_contents = base_plugin.search_fulltext elif hasattr(plugin_type, 'search_fulltext'): # last check in the plugin class (CMSPluginBase) search_contents = plugin_type.search_fulltext else: # disabled if there's search fields defined, # otherwise it's enabled. search_contents = not bool(search_fields) if search_contents: context = RequestContext(request) updates = {} engine = Engine.get_default() for processor in engine.template_context_processors: updates.update(processor(context.request)) context.dicts[context._processors_index] = updates try: # django-cms>=3.5 renderer = request.toolbar.content_renderer except AttributeError: # django-cms>=3.4 renderer = context.get('cms_content_renderer') plugin_contents = _render_plugin(instance, context, renderer) if plugin_contents: text_bits = get_cleaned_bits(plugin_contents) else: values = (get_field_value(instance, field) for field in search_fields) for value in values: cleaned_bits = get_cleaned_bits(value or '') text_bits.extend(cleaned_bits) return text_bits
aldryn/aldryn-search
[ 48, 71, 48, 25, 1373443182 ]
def main(): for model_name, ( select_config, feature_filename, ) in config_feature_file_pairs.items(): print("running {} to create model files".format(select_config)) # have to put tmp_output_dir into yaml file select_config = TEST_CONFIGS / select_config feature_file = sorted(FEATURE_FILES_DST.glob(feature_filename)) if len(feature_file) != 1: raise ValueError( "found more than one feature file with search {}:\n{}".format( feature_filename, feature_file ) ) else: # call `resolve` to get full path to model file, so pytest fixtures find it from inside tmp directories feature_file = feature_file[0].resolve() replace_dict = { "feature_file": ("replace with feature_file", str(feature_file)), "output_dir": ("replace with tmp_output_dir", str(MODEL_FILES_DST)), } select_config_rewritten = rewrite_config( select_config, str(MODEL_FILES_DST), replace_dict ) select_output_before = [ select_output_dir for select_output_dir in sorted(MODEL_FILES_DST.glob("*select*output*")) if select_output_dir.is_dir() ] hvc.select(select_config_rewritten) select_output_after = [ select_output_dir for select_output_dir in sorted(MODEL_FILES_DST.glob("*select*output*")) if select_output_dir.is_dir() ] select_output_dir = [ after for after in select_output_after if after not in select_output_before ] if len(select_output_dir) != 1: raise ValueError( "incorrect number of outputs when looking for select " "ouput dirs:\n{}".format(select_output_dir) ) else: select_output_dir = select_output_dir[0] # arbitrarily grab the last .model and associated .meta file model_file = sorted(select_output_dir.glob("*/*.model"))[-1] # call `resolve` to get full path to model file, so pytest fixtures find it from inside tmp directories model_file_dst = MODEL_FILES_DST.joinpath(model_name + ".model").resolve() shutil.move(src=model_file, dst=model_file_dst) meta_file = sorted(select_output_dir.glob("*/*.meta"))[-1] meta_file_dst = MODEL_FILES_DST.joinpath(model_name + ".meta") shutil.move(src=str(meta_file), dst=str(meta_file_dst)) # need to change 'model_filename' in .meta file meta_file = joblib.load(meta_file_dst) meta_file["model_filename"] = os.path.abspath(model_file_dst) joblib.dump(meta_file, meta_file_dst) # clean up -- delete all the other model files, directory, and config shutil.rmtree(select_output_dir) os.remove(select_config_rewritten)
NickleDave/hybrid-vocal-classifier
[ 23, 8, 23, 44, 1483596187 ]
def data(N=100, period=1, theta=[10, 2, 3], dy=1, rseed=0): """Generate some data for testing""" rng = np.random.RandomState(rseed) t = 20 * period * rng.rand(N) omega = 2 * np.pi / period y = theta[0] + theta[1] * np.sin(omega * t) + theta[2] * np.cos(omega * t) dy = dy * (0.5 + rng.rand(N)) y += dy * rng.randn(N) return t, y, dy
jakevdp/lombscargle
[ 4, 2, 4, 2, 1458772994 ]
def test_output_shapes(method, shape, data): t, y, dy = data freq = np.asarray(np.random.rand(*shape)) freq.flat = np.arange(1, freq.size + 1) PLS = lombscargle(t, y, frequency=freq, fit_bias=False, method=method) assert_equal(PLS.shape, shape)
jakevdp/lombscargle
[ 4, 2, 4, 2, 1458772994 ]
def test_units_match(method, t_unit, frequency_unit, y_unit, data): t, y, dy = data dy = dy.mean() # scipy only supports constant errors t = t * t_unit y = y * y_unit dy = dy * y_unit frequency = np.linspace(0.5, 1.5, 10) * frequency_unit PLS = lombscargle(t, y, frequency=frequency, fit_bias=False, method=method) assert_equal(PLS.unit, units.dimensionless_unscaled) PLS = lombscargle(t, y, dy, frequency=frequency, fit_bias=False, method=method) assert_equal(PLS.unit, units.dimensionless_unscaled)
jakevdp/lombscargle
[ 4, 2, 4, 2, 1458772994 ]
def test_units_mismatch(method, data): t, y, dy = data dy = dy.mean() # scipy only supports constant errors t = t * units.second y = y * units.mag frequency = np.linspace(0.5, 1.5, 10) # this should fail because frequency and 1/t unitsdo not match with pytest.raises(ValueError) as err: lombscargle(t, y, frequency=frequency, method=method, fit_bias=False) assert str(err.value).startswith('Units of frequency not equivalent') # this should fail because dy and y units do not match with pytest.raises(ValueError) as err: lombscargle(t, y, dy, frequency / t.unit, method=method, fit_bias=False) assert str(err.value).startswith('Units of y not equivalent')
jakevdp/lombscargle
[ 4, 2, 4, 2, 1458772994 ]
def test_common_interface(method, center_data, freq, data): t, y, dy = data if freq is None: freq_expected = LombScargle(t, y, dy).autofrequency(t) else: freq_expected = freq expected_PLS = lombscargle_slow(t, y, dy=None, frequency=freq_expected, fit_bias=False, center_data=center_data) PLS = lombscargle(t, y, frequency=freq, method=method, fit_bias=False, center_data=center_data) if method in ['fastchi2', 'fast', 'auto']: atol = 0.005 else: atol = 0 assert_allclose(PLS, expected_PLS, atol=atol)
jakevdp/lombscargle
[ 4, 2, 4, 2, 1458772994 ]
def test_object_interface_power(data, method, center_data, fit_bias, freq): t, y, dy = data if method == 'scipy' and fit_bias: return if method == 'scipy': dy = None expected_PLS = lombscargle(t, y, dy, frequency=freq, method=method, fit_bias=fit_bias, center_data=center_data) ls = LombScargle(t, y, dy, fit_bias=fit_bias, center_data=center_data) PLS = ls.power(freq, method=method) assert_allclose(PLS, expected_PLS)
jakevdp/lombscargle
[ 4, 2, 4, 2, 1458772994 ]
def test_object_interface_autopower(data, method, center_data, fit_bias): t, y, dy = data if method == 'scipy' and fit_bias: return if method == 'scipy': dy = None ls = LombScargle(t, y, dy, fit_bias=fit_bias, center_data=center_data) freq, PLS = ls.autopower(method=method) expected_PLS = lombscargle(t, y, dy, freq, method=method, fit_bias=fit_bias, center_data=center_data) # TODO: test frequency output? assert_allclose(PLS, expected_PLS)
jakevdp/lombscargle
[ 4, 2, 4, 2, 1458772994 ]
def test_object_interface_model(fit_bias, freq): rand = np.random.RandomState(0) t = 10 * rand.rand(40) params = 10 * rand.rand(3) y = np.zeros_like(t) if fit_bias: y += params[0] y += params[1] * np.sin(2 * np.pi * freq * (t - params[2])) ls = LombScargle(t, y, center_data=False, fit_bias=fit_bias) y_fit = ls.model(t, freq) assert_allclose(y_fit, y)
jakevdp/lombscargle
[ 4, 2, 4, 2, 1458772994 ]