function
stringlengths
11
56k
repo_name
stringlengths
5
60
features
sequence
def get_next_by_order(self, **kwargs): """ Retrieves next object by order. """ return self._get_next_or_previous_by_order(True, **kwargs)
cccs-web/mezzanine
[ 2, 1, 2, 1, 1404268714 ]
def is_editable(self, request): """ Restrict in-line editing to the objects's owner and superusers. """ return request.user.is_superuser or request.user.id == self.user_id
cccs-web/mezzanine
[ 2, 1, 2, 1, 1404268714 ]
def create_site_permission(sender, **kw): sender_name = "%s.%s" % (sender._meta.app_label, sender._meta.object_name) if sender_name.lower() != user_model_name.lower(): return user = kw["instance"] if user.is_staff and not user.is_superuser: perm, created = SitePermission.objects.get_or_create(user=user) if created or perm.sites.count() < 1: perm.sites.add(current_site_id())
cccs-web/mezzanine
[ 2, 1, 2, 1, 1404268714 ]
def load_(self, h): return 1
albertz/music-player
[ 483, 61, 483, 16, 1345772141 ]
def testMethods(self): self.assertResultIsBOOL(TestCIPluginInterfaceHelper.load_)
albertz/music-player
[ 483, 61, 483, 16, 1345772141 ]
def findIndex(grid,charElem): for i in range(len(grid)): for j in range(len(grid[i])): if(grid[i][j] == charElem): return [i,j] return [-1,-1]
tejasnikumbh/Algorithms
[ 6, 5, 6, 1, 1417624809 ]
def genSurr(grid,i,j): validIndices = [] surrIndices = [ (1,0) , (-1,0) , (0,1) , (0,-1) ] if(len(grid) == 0): return -1 else: # Number of rows and columns in grid ROWS = len(grid) COLS = len(grid[0]) for (a,b) in surrIndices: xIndex = i + a yIndex = j + b if(xIndex >= ROWS or xIndex < 0): continue if(yIndex >= COLS or yIndex < 0): continue validIndices.append((xIndex,yIndex)) return validIndices
tejasnikumbh/Algorithms
[ 6, 5, 6, 1, 1417624809 ]
def genValidSurr(grid,surr,validChars,visitedSet): validSet = [] for point in surr: indexI = point[0] indexJ = point[1] gridPoint = grid[indexI][indexJ] if((gridPoint in validChars) and not(point in visitedSet)): validSet.append(point) return validSet
tejasnikumbh/Algorithms
[ 6, 5, 6, 1, 1417624809 ]
def dfsPathSearch(grid, startIndex, goalIndex, pathSoFar, visitedNodes): # Marking the current node as explored visitedNodes.add(startIndex) # Base case of recursion in case we want to stop # after certain condition if(startIndex == goalIndex): return True else: # Recursive steps # Generate all valid surrounding points s = genSurr(grid,startIndex[0],startIndex[1]) validChars = set() validChars.add('.') validChars.add('*') sValid = genValidSurr(grid,s,validChars,visitedNodes) # Return False in case no valid surrounding pt found if(len(sValid) == 0): return False # Iterate through all valid surrouding points for point in sValid: pathExists = dfsPathSearch(grid, point, goalIndex, pathSoFar, visitedNodes) if(pathExists): # If there were more than one choices here, increment # wand use by one pathSoFar.append(point) return True # Return false if no point in valid surroundings # can generate a path to goal return False
tejasnikumbh/Algorithms
[ 6, 5, 6, 1, 1417624809 ]
def parseGrid(stream,r,c): grid = [[] for x in range(r)] for i in range(r): grid[i] = list(stream.readline().rstrip()) return grid
tejasnikumbh/Algorithms
[ 6, 5, 6, 1, 1417624809 ]
def get_installed_pypackages(): return {p.project_name.lower(): p for p in pkg_resources.working_set}
darvid/hydrogen
[ 3, 1, 3, 1, 1415991326 ]
def warning(message, **kwargs): kwargs["fg"] = kwargs.get("fg", "red") click.secho(u"warning: {}".format(message), **kwargs)
darvid/hydrogen
[ 3, 1, 3, 1, 1415991326 ]
def fatal(message, **kwargs): error(message, level="fatal", **kwargs)
darvid/hydrogen
[ 3, 1, 3, 1, 1415991326 ]
def get(url, session=None, silent=not debug, **kwargs): """Retrieve a given URL and log response. :param session: a :class:`requests.Session` object. :param silent: if **True**, response status and URL will not be printed. """ session = session or requests kwargs["verify"] = kwargs.get("verify", True) r = session.get(url, **kwargs) if not silent: status_code = click.style( str(r.status_code), fg="green" if r.status_code in (200, 304) else "red") click.echo(status_code + " " + url) if r.status_code == 404: raise PackageNotFoundError return r
darvid/hydrogen
[ 3, 1, 3, 1, 1415991326 ]
def get_dir_from_zipfile(zip_file, fallback=None): """Return the name of the root folder in a zip file. :param zip_file: a :class:`zipfile.ZipFile` instance. :param fallback: if `None`, the name of the zip file is used. This is returned if the zip file contains more than one top-level directory, or none at all. """ fallback = fallback or zip_file.filename directories = [name for name in zip_file.namelist() if name.endswith("/") and len(PurePath(name).parts) == 1] return fallback if len(directories) > 1 else directories[0]
darvid/hydrogen
[ 3, 1, 3, 1, 1415991326 ]
def on_cleanup_error(function, path, excinfo): click.secho("warning: failed to remove file or directory: {}\n" "please delete it manually.".format(path), fg="red")
darvid/hydrogen
[ 3, 1, 3, 1, 1415991326 ]
def __init__(self, package, version): """Construct a new requirement. :param package: the package name. :param version: a semver compatible version specification. """ self.package = package self.version = version if self.version and not re.match(r"[<=>~]", version[:2]): self.version = "=={}".format(self.version)
darvid/hydrogen
[ 3, 1, 3, 1, 1415991326 ]
def coerce(cls, string): """Create a :class:`Requirement` object from a given package spec.""" match = re.match(cls.spec_regex, string) if not match: raise InvalidRequirementSpecError("could not parse requirement") package = match.group(1) if all(match.group(2, 3)): version = "".join(match.group(2, 3)) else: version = None return cls(package, version)
darvid/hydrogen
[ 3, 1, 3, 1, 1415991326 ]
def __eq__(self, other): return (isinstance(other, self.__class__) and other.package == self.package)
darvid/hydrogen
[ 3, 1, 3, 1, 1415991326 ]
def __str__(self): return "".join([self.package, self.version or ""])
darvid/hydrogen
[ 3, 1, 3, 1, 1415991326 ]
def __init__(self, filename=None): self.filename = None if filename: self.load(filename)
darvid/hydrogen
[ 3, 1, 3, 1, 1415991326 ]
def load(self, requirements_file=None): """Load or reload requirements from a requirements.txt file. :param requirements_file: if not given, the filename used from initialization will be read again. """ if requirements_file is None: requirements_file = self.filename if requirements_file is None: raise ValueError("no filename provided") elif isinstance(requirements_file, text_type): requirements_file = Path(requirements_file) self.clear() with requirements_file.open() as f: self.loads(f.read()) if isinstance(requirements_file, (text_type, Path)): self.filename = requirements_file
darvid/hydrogen
[ 3, 1, 3, 1, 1415991326 ]
def remove(self, elem): """Remove a requirement. :param elem: a string or :class:`Requirement` instance. """ if isinstance(elem, text_type): for requirement in self: if requirement.package == elem: return super(Requirements, self).remove(requirement) return super(Requirements, self).remove(elem)
darvid/hydrogen
[ 3, 1, 3, 1, 1415991326 ]
def __repr__(self): return "<Requirements({})>".format(self.filename.name or "")
darvid/hydrogen
[ 3, 1, 3, 1, 1415991326 ]
def __init__(self, name, filename=None): self.name = name super(NamedRequirements, self).__init__(filename=filename)
darvid/hydrogen
[ 3, 1, 3, 1, 1415991326 ]
def __init__(self, groups=None): super(GroupedRequirements, self).__init__(NamedRequirements) self.groups = groups or self.default_groups self.filename = None self.create_default_groups()
darvid/hydrogen
[ 3, 1, 3, 1, 1415991326 ]
def create_default_groups(self): for group in self.groups: group = group.replace(" ", "_").lower() self[group] = NamedRequirements(group)
darvid/hydrogen
[ 3, 1, 3, 1, 1415991326 ]
def load(self, filename, create_if_missing=True): filename = Path(filename) if not filename.exists() and create_if_missing: self.load_pip_requirements() with filename.open("w") as f: f.write(yaml.dump(self.serialized, default_flow_style=False, encoding=None)) self.filename = filename return self.save(filename) with filename.open() as f: for group, requirements in yaml.load(f.read()).items(): for requirement in requirements: self[group].add(Requirement.coerce(requirement)) self.filename = filename
darvid/hydrogen
[ 3, 1, 3, 1, 1415991326 ]
def serialized(self): to_ret = {} for group, requirements in self.items(): to_ret[group] = [str(requirement) for requirement in requirements] return to_ret
darvid/hydrogen
[ 3, 1, 3, 1, 1415991326 ]
def yaml(self): return yaml.dump(self.serialized, default_flow_style=False, encoding=None)
darvid/hydrogen
[ 3, 1, 3, 1, 1415991326 ]
def get_package_url(cls, package, session=None, silent=False): response = get("{}/packages/{}".format(cls.bower_base_uri, package)) return response.json().get("url", None)
darvid/hydrogen
[ 3, 1, 3, 1, 1415991326 ]
def clean_semver(cls, version_spec): return re.sub(r"([<>=~])\s+?v?", "\\1", version_spec, re.IGNORECASE)
darvid/hydrogen
[ 3, 1, 3, 1, 1415991326 ]
def __init__(self, assets_dir=None, requirements_file="requirements.yml"): self.assets_dir = assets_dir or Path(".") / "assets" self.requirements = GroupedRequirements() self.requirements.load(requirements_file) self.temp_dir = mkdtemp()
darvid/hydrogen
[ 3, 1, 3, 1, 1415991326 ]
def get_bower_package(self, url, dest=None, version=None, process_deps=True): dest = dest or Path(".") / "assets" parsed_url = urlparse(url) if parsed_url.scheme == "git" or parsed_url.path.endswith(".git"): if parsed_url.netloc == "github.com": user, repo = parsed_url.path[1:-4].split("/") response = get(github_api_uri + "/repos/{}/{}/tags".format(user, repo)) tags = response.json() target = None if not len(tags): click.secho("fatal: no tags exist for {}/{}".format( user, repo), fg="red") raise InvalidPackageError if version is None: target = tags[0] else: for tag in tags: if semver.match(tag["name"], Bower.clean_semver(version)): target = tag break if not target: click.secho( "fatal: failed to find matching tag for " "{user}/{repo} {version}".format(user, repo, version), fg="red") raise VersionNotFoundError click.secho("installing {}/{}#{}".format( user, repo, tags[0]["name"]), fg="green") return self.get_bower_package( url=target["zipball_url"], dest=dest, version=version) raise NotImplementedError click.echo("git clone {url}".format(url=url)) cmd = envoy.run('git clone {url} "{dest}"'.format( url=url, dest=dest)) elif parsed_url.scheme in ("http", "https"): zip_dest = download_file(url, dest=self.temp_dir, label="{dest_basename}", expected_extension="zip") with zipfile.ZipFile(zip_dest, "r") as pkg: return self.extract_bower_zipfile(pkg, dest, expected_version=version) # pkg.extractall(str(dest)) else: click.secho("protocol currently unsupported :(") sys.exit(1)
darvid/hydrogen
[ 3, 1, 3, 1, 1415991326 ]
def install_pip(self, package, save=True, save_dev=False): """Installs a pip package. :param save: if `True`, pins the package to the Hydrogen requirements YAML file. :param save_dev: if `True`, pins the package as a development dependency to the Hydrogen requirements YAML file. :param return: a **single** :class:`Requirement` object, representing the installed version of the given package. """ requirement = Requirement.coerce(package) click.echo("pip install " + requirement.package) cmd = envoy.run("pip install {}".format(str(requirement))) if cmd.status_code == 0: installed_packages = get_installed_pypackages() package = installed_packages[requirement.package] requirement.version = "=={}".format(package.version) if save: self.requirements["all"].add(requirement) if save_dev: self.requirements["dev"].add(requirement) if save or save_dev: self.requirements.save() return requirement else: fatal(cmd.std_err)
darvid/hydrogen
[ 3, 1, 3, 1, 1415991326 ]
def main(ctx): which = "where" if sys.platform == "win32" else "which" if envoy.run(which + " git").status_code != 0: click.secho("fatal: git not found in PATH", fg="red") sys.exit(1) ctx.obj = Hydrogen()
darvid/hydrogen
[ 3, 1, 3, 1, 1415991326 ]
def freeze(h, output_yaml, resolve, groups): """Output installed packages.""" if not groups: groups = filter(lambda group: not group.lower().startswith("bower"), h.requirements.keys()) else: groups = [text_type.strip(group) for group in groups.split(",")] if output_yaml: for requirements in h.requirements.values(): for requirement in requirements: if resolve and not requirement.version: requirement.load_installed_version() click.echo(h.requirements.yaml) else: for group in groups: if not h.requirements[group]: continue click.echo("# {}".format(group)) for requirement in h.requirements[group]: if resolve and not requirement.version: requirement.load_installed_version() click.echo(str(requirement))
darvid/hydrogen
[ 3, 1, 3, 1, 1415991326 ]
def install(h, pip, groups, save, save_dev, packages): """Install a pip or bower package.""" if groups: groups = [text_type.strip(group) for group in groups.split(",")] else: groups = h.requirements.keys() if not packages: for group in groups: if group not in h.requirements: warning("{} not in requirements".format(group)) continue install = (h.install_bower if group.startswith("bower") else h.install_pip) for requirement in h.requirements[group]: install(str(requirement), save=False, save_dev=False) if pip: for package in packages: h.install_pip(package, save=save, save_dev=save_dev) else: for package in packages: h.install_bower(package, save=save, save_dev=save_dev)
darvid/hydrogen
[ 3, 1, 3, 1, 1415991326 ]
def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read()
ntucker/django-aloha-edit
[ 21, 5, 21, 1, 1367876699 ]
def __str__(self): return "%s: %s" % (self.get_type_display(), self.value)
Kniyl/mezzanine
[ 1, 2, 1, 1, 1398427594 ]
def __str__(self): return "%s: %s" % (self.user_name, self.text)
Kniyl/mezzanine
[ 1, 2, 1, 1, 1398427594 ]
def is_holiday(date1, holidays = getattr(settings, 'SCHOOL_HOLIDAYS', [])): for date_start, date_end in holidays: if isinstance(date_end, str): if date1.date() == date_start.date(): return True elif date1.date() >= date_start.date() and date1.date() <= date_end.date(): return True return False
unicefuganda/edtrac
[ 7, 3, 7, 3, 1324013652 ]
def previous_calendar_week(t=None): """ To education monitoring, a week runs between Thursdays, Thursday marks the beginning of a new week of data submission Data for a new week is accepted until Wednesday evening of the following week """ d = t or datetime.datetime.now() if not d.weekday() == 3: # last Thursday == next Thursday minus 7 days. last_thursday = d + (datetime.timedelta((3-d.weekday())%7) - (datetime.timedelta(days=7))) else: last_thursday = d end_date = last_thursday + datetime.timedelta(days=6) return (last_thursday.date(), end_date)
unicefuganda/edtrac
[ 7, 3, 7, 3, 1324013652 ]
def get_polls(**kwargs): script_polls = ScriptStep.objects.values_list('poll', flat=True).exclude(poll=None) return Poll.objects.exclude(pk__in=script_polls).annotate(Count('responses'))
unicefuganda/edtrac
[ 7, 3, 7, 3, 1324013652 ]
def list_poll_responses(poll, **kwargs): """ pass a poll queryset and you get yourself a dict with locations vs responses (quite handy for the charts) dependecies: Contact and Location must be in your module; this lists all Poll responses by district """ #forceful import from poll.models import Poll to_ret = {} """ narrowed down to 3 districts (and up to 14 districts) """ DISTRICT = ['Kaabong', 'Kabarole', 'Kyegegwa', 'Kotido'] if not kwargs: # if no other arguments are provided for location in Location.objects.filter(name__in=DISTRICT): to_ret[location.__unicode__()] = compute_average_percentage([msg.message.text for msg in poll.responses.filter(contact__in=Contact.objects.filter(reporting_location=location))]) return to_ret else: # filter by number of weeks #TODO more elegant solution to coincide with actual school term weeks date_filter = kwargs['weeks'] #give the date in weeks date_now = datetime.datetime.now() date_diff = date_now - datetime.timedelta(weeks=date_filter) all_emis_reports = EmisReporter.objects.filter(reporting_location__in=[loc for loc in Locations.objects.filter(name__in=DISTRICT)]) for location in Location.objects.filter(name__in=DISTRICT): to_ret[location.__unicode__()] = compute_average_percentage([msg.message.text for msg in poll.responses.filter(date__gte=date_diff, contact__in=Contact.objects.filter(reporting_location=location))]) return to_ret
unicefuganda/edtrac
[ 7, 3, 7, 3, 1324013652 ]
def __init__(self, sample=[], population=False): """Statistics class initializer method.""" # Raise an exception if the data set is empty. if (not sample): raise StatisticsException, "Empty data set!: %s" % sample # The data set (a list). self.sample = sample # Sample/Population variance determination flag. self.population = population self.N = len(self.sample) self.sum = float(sum(self.sample)) self.min = min(self.sample) self.max = max(self.sample) self.range = self.max - self.min self.mean = self.sum/self.N # Inplace sort (list is now in ascending order). self.sample.sort() self.__getMode() # Instance identification attribute. self.identification = id(self)
unicefuganda/edtrac
[ 7, 3, 7, 3, 1324013652 ]
def __getVariance(self): """Determine the measure of the spread of the data set about the mean. Sample variance is determined by default; population variance can be determined by setting population attribute to True. """ x = 0 # Summation variable. # Subtract the mean from each data item and square the difference. # Sum all the squared deviations. for item in self.sample: x += (item - self.mean)**2.0 try: if (not self.population): # Divide sum of squares by N-1 (sample variance). self.variance = x/(self.N-1) else: # Divide sum of squares by N (population variance). self.variance = x/self.N except: self.variance = 0
unicefuganda/edtrac
[ 7, 3, 7, 3, 1324013652 ]
def extract_key_count(list, key=None): """ A utility function written to count the number of times a `key` would appear in, for example, a categorized poll. Examples: >>> extract_key_count('yes', """ if list and key: # go through a list of dictionaries for dict in list: if dict.get('category__name') == key: return dict.get('value') else: return 0
unicefuganda/edtrac
[ 7, 3, 7, 3, 1324013652 ]
def main(app): target_dir = os.path.join(app.builder.srcdir, 'book_figures') source_dir = os.path.abspath(app.builder.srcdir + '/../' + 'examples') try: plot_gallery = eval(app.builder.config.plot_gallery) except TypeError: plot_gallery = bool(app.builder.config.plot_gallery) if not os.path.exists(source_dir): os.makedirs(source_dir) if not os.path.exists(target_dir): os.makedirs(target_dir) EB = ExampleBuilder(source_dir, target_dir, execute_files=plot_gallery, contents_file='contents.txt', dir_info_file='README.rst', dir_footer_file='FOOTER.rst', sphinx_tag_base='book_fig', template_example=RST_TEMPLATE) EB.run()
cigroup-ol/metaopt
[ 21, 3, 21, 11, 1382447731 ]
def __init__(self, name, code, url_namespace='anysign', **kwargs): """Configure backend.""" #: Human-readable name. self.name = name #: Machine-readable name. Should be lowercase alphanumeric only, i.e. #: PEP-8 compliant. self.code = code #: Namespace for URL resolution. self.url_namespace = url_namespace
novafloss/django-anysign
[ 22, 9, 22, 9, 1402504862 ]
def get_signer_url(self, signer): """Return URL where signer signs document. Raise ``NotImplementedError`` in case the backend does not support "signer view" feature. Default implementation reverses :meth:`get_signer_url_name` with ``signer.pk`` as argument. """ return reverse(self.get_signer_url_name(), args=[signer.pk])
novafloss/django-anysign
[ 22, 9, 22, 9, 1402504862 ]
def get_signer_return_url(self, signer): """Return absolute URL where signer is redirected after signing. The URL must be **absolute** because it is typically used by external signature service: the signer uses external web UI to sign the document(s) and then the signature service redirects the signer to (this) `Django` website. Raise ``NotImplementedError`` in case the backend does not support "signer return view" feature. Default implementation reverses :meth:`get_signer_return_url_name` with ``signer.pk`` as argument. """ return reverse( self.get_signer_return_url_name(), args=[signer.pk])
novafloss/django-anysign
[ 22, 9, 22, 9, 1402504862 ]
def get_signature_callback_url(self, signature): """Return URL where backend can post signature notifications. Raise ``NotImplementedError`` in case the backend does not support "signature callback url" feature. Default implementation reverses :meth:`get_signature_callback_url_name` with ``signature.pk`` as argument. """ return reverse( self.get_signature_callback_url_name(), args=[signature.pk])
novafloss/django-anysign
[ 22, 9, 22, 9, 1402504862 ]
def _ValidateFileReplicationRule(rule): """Raises an error if a FileReplicationRule is invalid. For example, checks that if REPLICATION_TYPE_FILTER, destination_fields are specified. Args: rule: (FileReplicationRule) The rule to validate. """ if rule.file_type == replication_config_pb2.FILE_TYPE_JSON: if rule.replication_type != replication_config_pb2.REPLICATION_TYPE_FILTER: raise ValueError( 'Rule for JSON source %s must use REPLICATION_TYPE_FILTER.' % rule.source_path) elif rule.file_type == replication_config_pb2.FILE_TYPE_OTHER: if rule.replication_type != replication_config_pb2.REPLICATION_TYPE_COPY: raise ValueError('Rule for source %s must use REPLICATION_TYPE_COPY.' % rule.source_path) else: raise NotImplementedError('Replicate not implemented for file type %s' % rule.file_type) if rule.replication_type == replication_config_pb2.REPLICATION_TYPE_COPY: if rule.destination_fields.paths: raise ValueError( 'Rule with REPLICATION_TYPE_COPY cannot use destination_fields.') elif rule.replication_type == replication_config_pb2.REPLICATION_TYPE_FILTER: if not rule.destination_fields.paths: raise ValueError( 'Rule with REPLICATION_TYPE_FILTER must use destination_fields.') else: raise NotImplementedError( 'Replicate not implemented for replication type %s' % rule.replication_type) if os.path.isabs(rule.source_path) or os.path.isabs(rule.destination_path): raise ValueError( 'Only paths relative to the source root are allowed. In rule: %s' % rule)
endlessm/chromium-browser
[ 21, 16, 21, 3, 1435959644 ]
def __init__(self, *args, **kwargs): super().__init__(os.path.join('languages', 'java'), install=True, *args, **kwargs)
jimporter/bfg9000
[ 68, 20, 68, 13, 1424839632 ]
def test_install(self): self.build('install') self.assertDirectory(self.installdir, [ os.path.join(self.libdir, 'program.jar'), ]) os.chdir(self.srcdir) cleandir(self.builddir) self.assertOutput( ['java', '-jar', os.path.join(self.libdir, 'program.jar')], 'hello from java!\n' )
jimporter/bfg9000
[ 68, 20, 68, 13, 1424839632 ]
def __init__(self, *args, **kwargs): super().__init__(os.path.join('languages', 'java'), extra_env={'JAVAC': os.getenv('GCJ', 'gcj')}, *args, **kwargs)
jimporter/bfg9000
[ 68, 20, 68, 13, 1424839632 ]
def __init__(self, *args, **kwargs): super().__init__(os.path.join('languages', 'java_library'), install=True, *args, **kwargs)
jimporter/bfg9000
[ 68, 20, 68, 13, 1424839632 ]
def test_install(self): self.build('install') self.assertDirectory(self.installdir, [ os.path.join(self.libdir, 'lib.jar'), os.path.join(self.libdir, 'program.jar'), ]) os.chdir(self.srcdir) cleandir(self.builddir) self.assertOutput( ['java', '-jar', os.path.join(self.libdir, 'program.jar')], 'hello from library!\n' )
jimporter/bfg9000
[ 68, 20, 68, 13, 1424839632 ]
def dict_diff(a, b): diff = dict() for k in a: if k in b: if b[k] != a[k]: diff[k] = (a[k],b[k]) return diff
cmand/scamper
[ 19, 15, 19, 1, 1427748624 ]
def __init__(self, wartsfile, verbose=False): super(WartsTraceBoxReader, self).__init__(wartsfile, verbose)
cmand/scamper
[ 19, 15, 19, 1, 1427748624 ]
def next_object(self): # read warts object header self.header = self.fd.read(8) # sanity check if len(self.header) != 8: return None (magic, typ, length) = struct.unpack('!HHI', self.header) if self.verbose: print "Magic: %02X Obj: %02X Len: %02x" % (magic, typ, length) assert(magic == obj_type['MAGIC']) # read remainder of object data = self.fd.read(length) if typ == obj_type['LIST']: return WartsList(data, verbose=self.verbose) elif typ == obj_type['CYCLESTART']: return WartsCycle(data, verbose=self.verbose) elif typ == obj_type['CYCLE']: return WartsCycle(data, verbose=self.verbose) elif typ == obj_type['CYCLE_STOP']: return WartsCycleStop(data, verbose=self.verbose) elif typ == TRACEBOXTYPE: return WartsTraceBox(data, verbose=self.verbose) else: print "Unsupported object: %02x Len: %d" % (typ, length) assert False
cmand/scamper
[ 19, 15, 19, 1, 1427748624 ]
def __init__(self, data, verbose=False): super(WartsTraceBox, self).__init__(TRACEBOXTYPE, verbose) self.data = data self.flagdata = data self.pkts = [] self.flag_defines = [ ('listid', unpack_uint32_t), ('cycleid', unpack_uint32_t), ('userid', unpack_uint32_t), ('srcaddr', self.unpack_address), ('dstaddr', self.unpack_address), ('sport', unpack_uint16_t), ('dport', unpack_uint16_t), ('start', read_timeval), ('result', unpack_uint16_t), ('rtt', unpack_uint8_t), ('qtype', unpack_uint8_t), ('udp', unpack_uint8_t), ('printmode', unpack_uint8_t), ('pktc16', unpack_uint16_t), ('pktc', unpack_uint32_t), ] flag_bytes = self.read_flags() if self.verbose: print "TB Params:", self.flags offset = flag_bytes for i in range(self.flags['pktc']): pkt = WartsTraceBoxPkt(data[offset:], self.referenced_address, self.verbose) self.pkts.append(pkt.flags) offset+=pkt.flag_bytes if self.verbose: print "Pkt %d: %s" % (i+1, pkt.flags)
cmand/scamper
[ 19, 15, 19, 1, 1427748624 ]
def __init__(self, data, refs, verbose=False): super(WartsTraceBoxPkt, self).__init__(TRACEBOXTYPE, verbose) self.update_ref(refs) self.flagdata = data self.flag_defines = [ ('dir', unpack_uint8_t), ('time', read_timeval), ('len', unpack_uint16_t), ('data', self.read_pass), ] self.flag_bytes = self.read_flags() datalen = self.flags['len'] self.flags['data'] = self.read_tracebox_pkt(data[self.flag_bytes:self.flag_bytes+datalen]) self.flag_bytes += self.flags['len']
cmand/scamper
[ 19, 15, 19, 1, 1427748624 ]
def read_tracebox_pkt(self, data): fields = dict() ip = dpkt.ip.IP(data) fields['hop'] = socket.inet_ntoa(ip.src) if ip.p == dpkt.ip.IP_PROTO_ICMP: # This is a reply from a hop fields['hop'] = socket.inet_ntoa(ip.src) icmp = ip.data #print "ICMP quote:", icmp.type, icmp.code, "LEN:", len(icmp.data.data) # icmp.data is type dpkt.icmp.TimeExceed # so, icmp.data.data is a dpkt.ip.IP ip = icmp.data.data fields['IP::Version'] = ip.v fields['IP::IHL'] = ip.hl dscp = (ip.tos & 0xFC) >> 2 ecn = (ip.tos & 0x03) fields['IP::DiffServicesCP'] = hex(dscp) fields['IP::ECN'] = hex(ecn) fields['IP:Length'] = hex(ip.len) fields['IP:ID'] = ip.id flags = (ip.df >> 1) + ip.mf fields['IP:Flags'] = hex(flags) fields['IP:FragmentOffset'] = ip.offset fields['IP:TTL'] = ip.ttl fields['IP::Protocol'] = ip.p fields['IP::Checksum'] = hex(ip.sum) fields['IP::SourceAddr'] = socket.inet_ntoa(ip.src) fields['IP::DestAddr'] = socket.inet_ntoa(ip.dst) if ip.p == dpkt.ip.IP_PROTO_TCP: tcp = ip.data if not isinstance(tcp, dpkt.tcp.TCP): #print "Partial quote!" z = struct.pack('12sB',ip.data,0x50) + struct.pack('7B',*([0]*7)) tcp = dpkt.tcp.TCP(z) #print type(tcp) if len(ip.data) >= 4: fields['TCP::SPort'] = hex(tcp.sport) fields['TCP::DPort'] = hex(tcp.dport) if len(ip.data) >= 8: fields['TCP::SeqNumber'] = hex(tcp.seq) if len(ip.data) >= 12: fields['TCP::AckNumber'] = hex(tcp.ack) if len(ip.data) >= 16: fields['TCP::Offset'] = hex(tcp.off) fields['TCP::Flags'] = hex(tcp.flags) fields['TCP::Window'] = hex(tcp.win) if len(ip.data) == 20: fields['TCP::Checksum'] = hex(tcp.sum) fields['TCP::UrgentPtr'] = hex(tcp.urp) if len(ip.data) >= 20: if len(tcp.opts) > 0: opts = dpkt.tcp.parse_opts(tcp.opts) for o,d in opts: if o == dpkt.tcp.TCP_OPT_EOL: fields['TCP::OPT_EOL'] = d elif o == dpkt.tcp.TCP_OPT_NOP: fields['TCP::OPT_NOP'] = d elif o == dpkt.tcp.TCP_OPT_MSS: fields['TCP::OPT_MSS'] = d elif o == dpkt.tcp.TCP_OPT_WSCALE: fields['TCP::OPT_WSCALE'] = d elif o == dpkt.tcp.TCP_OPT_SACKOK: fields['TCP::OPT_SACKOK'] = d elif o == dpkt.tcp.TCP_OPT_SACK: fields['TCP::OPT_SACK'] = d elif o == dpkt.tcp.TCP_OPT_TIMESTAMP: fields['TCP::OPT_TIMESTAMP'] = d return fields
cmand/scamper
[ 19, 15, 19, 1, 1427748624 ]
def test_unicode_title(): get_beyonce = GetCurrentContent("Beyoncé Knowles") assert get_beyonce()
mahmoud/wapiti
[ 36, 11, 36, 6, 1358639129 ]
def test_web_request(): url = 'http://upload.wikimedia.org/wikipedia/commons/d/d2/Mcgregor.jpg' get_photo = base.WebRequestOperation(url) res = get_photo() text = res[0] assert len(text) == 16408
mahmoud/wapiti
[ 36, 11, 36, 6, 1358639129 ]
def test_missing_revisions(): get_revs = GetPageRevisionInfos('Coffee_lololololol') rev_list = get_revs() ''' Should return 'missing' and negative pageid ''' assert len(rev_list) == 0
mahmoud/wapiti
[ 36, 11, 36, 6, 1358639129 ]
def calculate_distance_boundary(r, mu, r_inner, r_outer): """ Calculate distance to shell boundary in cm. Parameters ---------- r : float radial coordinate of the RPacket mu : float cosine of the direction of movement r_inner : float inner radius of current shell r_outer : float outer radius of current shell """ delta_shell = 0 if mu > 0.0: # direction outward distance = math.sqrt(r_outer * r_outer + ((mu * mu - 1.0) * r * r)) - ( r * mu ) delta_shell = 1 else: # going inward check = r_inner * r_inner + (r * r * (mu * mu - 1.0)) if check >= 0.0: # hit inner boundary distance = -r * mu - math.sqrt(check) delta_shell = -1 else: # miss inner boundary distance = math.sqrt( r_outer * r_outer + ((mu * mu - 1.0) * r * r) ) - (r * mu) delta_shell = 1 return distance, delta_shell
tardis-sn/tardis
[ 173, 342, 173, 166, 1323325219 ]
def calculate_distance_line( r_packet, comov_nu, is_last_line, nu_line, time_explosion
tardis-sn/tardis
[ 173, 342, 173, 166, 1323325219 ]
def calculate_distance_line_full_relativity( nu_line, nu, time_explosion, r_packet
tardis-sn/tardis
[ 173, 342, 173, 166, 1323325219 ]
def select_numeric(df): return df.select_dtypes(exclude=['object'])
Featuretools/featuretools
[ 6538, 841, 6538, 165, 1504908917 ]
def es(): es = load_mock_customer(n_customers=15, n_products=15, n_sessions=75, n_transactions=1000, random_seed=0, return_entityset=True) return es
Featuretools/featuretools
[ 6538, 841, 6538, 165, 1504908917 ]
def df(es): df = es['customers'].df df['target'] = np.random.randint(1, 3, df.shape[0]) # 1 or 2 values return df
Featuretools/featuretools
[ 6538, 841, 6538, 165, 1504908917 ]
def pipeline(es): pipeline = Pipeline(steps=[ ('ft', DFSTransformer(entityset=es, target_entity="customers", max_features=20)), ("numeric", FunctionTransformer(select_numeric, validate=False)), ('imp', SimpleImputer()), ('et', ExtraTreesClassifier(n_estimators=10)) ]) return pipeline
Featuretools/featuretools
[ 6538, 841, 6538, 165, 1504908917 ]
def test_sklearn_estimator(df, pipeline): # Using with estimator pipeline.fit(df['customer_id'].values, y=df.target.values) \ .predict(df['customer_id'].values) result = pipeline.score(df['customer_id'].values, df.target.values) assert isinstance(result, (float)) # Pickling / Unpickling Pipeline # TODO fix this # s = pickle.dumps(pipeline) # pipe_pickled = pickle.loads(s) # result = pipe_pickled.score(df['customer_id'].values, df.target.values) # assert isinstance(result, (float))
Featuretools/featuretools
[ 6538, 841, 6538, 165, 1504908917 ]
def test_sklearn_gridsearchcv(df, pipeline): # Using with GridSearchCV params = { 'et__max_depth': [5, 10] } grid = GridSearchCV(estimator=pipeline, param_grid=params, cv=3) grid.fit(df['customer_id'].values, df.target.values) assert len(grid.predict(df['customer_id'].values)) == 15
Featuretools/featuretools
[ 6538, 841, 6538, 165, 1504908917 ]
def {% if function_name is defined %}{{ function_name | lower() }}{% else %}main{% endif %}():
ReconCell/smacha
[ 15, 2, 15, 1, 1501314993 ]
def time_func(func, data, iterations): start = time.time() while iterations: iterations -= 1 func(data) return time.time() - start
thedrow/cyrapidjson
[ 7, 3, 7, 5, 1451993583 ]
def test_json_serialization(name, serialize, deserialize, benchmark): ser_data, des_data = benchmark(run_client_test, name, serialize, deserialize) msg = "\n%-11s serialize: %0.3f deserialize: %0.3f total: %0.3f" % ( name, ser_data, des_data, ser_data + des_data ) print(msg)
thedrow/cyrapidjson
[ 7, 3, 7, 5, 1451993583 ]
def test_json_unicode_strings(name, serialize, deserialize, benchmark): print("\nArray with 256 unicode strings:") ser_data, des_data = benchmark(run_client_test, name, serialize, deserialize, data=unicode_strings, iterations=5000, ) msg = "%-11s serialize: %0.3f deserialize: %0.3f total: %0.3f" % ( name, ser_data, des_data, ser_data + des_data ) print(msg)
thedrow/cyrapidjson
[ 7, 3, 7, 5, 1451993583 ]
def test_json_scii_strings(name, serialize, deserialize, benchmark): print("\nArray with 256 ascii strings:") ser_data, des_data = benchmark(run_client_test, name, serialize, deserialize, data=strings, ) msg = "%-11s serialize: %0.3f deserialize: %0.3f total: %0.3f" % ( name, ser_data, des_data, ser_data + des_data ) print(msg)
thedrow/cyrapidjson
[ 7, 3, 7, 5, 1451993583 ]
def test_json_booleans(name, serialize, deserialize, benchmark): print("\nArray with 256 True's:") ser_data, des_data = benchmark(run_client_test, name, serialize, deserialize, data=booleans, ) msg = "%-11s serialize: %0.3f deserialize: %0.3f total: %0.3f" % ( name, ser_data, des_data, ser_data + des_data ) print(msg)
thedrow/cyrapidjson
[ 7, 3, 7, 5, 1451993583 ]
def test_json_list_of_dictionaries(name, serialize, deserialize, benchmark): print("\nArray of 100 dictionaries:") ser_data, des_data = benchmark(run_client_test, name, serialize, deserialize, data=list_dicts, iterations=5, ) msg = "%-11s serialize: %0.3f deserialize: %0.3f total: %0.3f" % ( name, ser_data, des_data, ser_data + des_data ) print(msg)
thedrow/cyrapidjson
[ 7, 3, 7, 5, 1451993583 ]
def test_json_dictionary_of_lists(name, serialize, deserialize, benchmark): print("\nDictionary of 100 Arrays:") ser_data, des_data = benchmark(run_client_test, name, serialize, deserialize, data=dict_lists, iterations=5, ) msg = "%-11s serialize: %0.3f deserialize: %0.3f total: %0.3f" % ( name, ser_data, des_data, ser_data + des_data ) print(msg)
thedrow/cyrapidjson
[ 7, 3, 7, 5, 1451993583 ]
def test_json_medium_complex_objects(name, serialize, deserialize, benchmark): print("\n256 Medium Complex objects:") ser_data, des_data = benchmark(run_client_test, name, serialize, deserialize, data=medium_complex, iterations=50000, ) msg = "%-11s serialize: %0.3f deserialize: %0.3f total: %0.3f" % ( name, ser_data, des_data, ser_data + des_data ) print(msg)
thedrow/cyrapidjson
[ 7, 3, 7, 5, 1451993583 ]
def finalize_options(self): _build_ext.finalize_options(self) # Prevent numpy from thinking it is still in its setup process: __builtins__.__NUMPY_SETUP__ = False import numpy self.include_dirs.append(numpy.get_include())
visualfabriq/bqueryd
[ 5, 1, 5, 5, 1480679165 ]
def read(*parts): """ Build an absolute path from *parts* and and return the contents of the resulting file. Assume UTF-8 encoding. """ with codecs.open(os.path.join(HERE, *parts), "rb", "utf-8") as f: return f.read()
visualfabriq/bqueryd
[ 5, 1, 5, 5, 1480679165 ]
def view(self, dtype=None, typ=None): return self
theislab/anndata
[ 355, 126, 355, 257, 1502460606 ]
def adata(): adata = ad.AnnData(np.zeros((100, 100))) adata.obsm["o"] = np.zeros((100, 50)) adata.varm["o"] = np.zeros((100, 50)) return adata
theislab/anndata
[ 355, 126, 355, 257, 1502460606 ]
def adata_parameterized(request): return gen_adata(shape=(200, 300), X_type=request.param)
theislab/anndata
[ 355, 126, 355, 257, 1502460606 ]
def matrix_type(request): return request.param
theislab/anndata
[ 355, 126, 355, 257, 1502460606 ]
def mapping_name(request): return request.param
theislab/anndata
[ 355, 126, 355, 257, 1502460606 ]
def test_views(): X = np.array(X_list) adata = ad.AnnData(X, obs=obs_dict, var=var_dict, uns=uns_dict, dtype="int32") assert adata[:, 0].is_view assert adata[:, 0].X.tolist() == np.reshape([1, 4, 7], (3, 1)).tolist() adata[:2, 0].X = [0, 0] assert adata[:, 0].X.tolist() == np.reshape([0, 0, 7], (3, 1)).tolist() adata_subset = adata[:2, [0, 1]] assert adata_subset.is_view # now transition to actual object adata_subset.obs["foo"] = range(2) assert not adata_subset.is_view assert adata_subset.obs["foo"].tolist() == list(range(2))
theislab/anndata
[ 355, 126, 355, 257, 1502460606 ]
def test_set_obsm_key(adata): init_hash = joblib.hash(adata) orig_obsm_val = adata.obsm["o"].copy() subset_obsm = adata[:50] assert subset_obsm.is_view subset_obsm.obsm["o"] = np.ones((50, 20)) assert not subset_obsm.is_view assert np.all(adata.obsm["o"] == orig_obsm_val) assert init_hash == joblib.hash(adata)
theislab/anndata
[ 355, 126, 355, 257, 1502460606 ]
def test_set_obs(adata, subset_func): init_hash = joblib.hash(adata) subset = adata[subset_func(adata.obs_names), :] new_obs = pd.DataFrame( dict(a=np.ones(subset.n_obs), b=np.ones(subset.n_obs)), index=subset.obs_names, ) assert subset.is_view subset.obs = new_obs assert not subset.is_view assert np.all(subset.obs == new_obs) assert joblib.hash(adata) == init_hash
theislab/anndata
[ 355, 126, 355, 257, 1502460606 ]
def test_drop_obs_column(): adata = ad.AnnData(np.array(X_list), obs=obs_dict, dtype="int32") subset = adata[:2] assert subset.is_view # returns a copy of obs assert subset.obs.drop(columns=["oanno1"]).columns.tolist() == ["oanno2", "oanno3"] assert subset.is_view # would modify obs, so it should actualize subset and not modify adata subset.obs.drop(columns=["oanno1"], inplace=True) assert not subset.is_view assert subset.obs.columns.tolist() == ["oanno2", "oanno3"] assert adata.obs.columns.tolist() == ["oanno1", "oanno2", "oanno3"]
theislab/anndata
[ 355, 126, 355, 257, 1502460606 ]
def test_set_varm(adata): init_hash = joblib.hash(adata) dim0_size = np.random.randint(2, adata.shape[1] - 1) dim1_size = np.random.randint(1, 99) orig_varm_val = adata.varm["o"].copy() subset_idx = np.random.choice(adata.var_names, dim0_size, replace=False) subset = adata[:, subset_idx] assert subset.is_view subset.varm = dict(o=np.ones((dim0_size, dim1_size))) assert not subset.is_view assert np.all(orig_varm_val == adata.varm["o"]) # Checking for mutation assert np.all(subset.varm["o"] == np.ones((dim0_size, dim1_size))) subset = adata[:, subset_idx] subset_hash = joblib.hash(subset) with pytest.raises(ValueError): subset.varm = dict(o=np.ones((dim0_size + 1, dim1_size))) with pytest.raises(ValueError): subset.varm = dict(o=np.ones((dim0_size - 1, dim1_size))) # subset should not be changed by failed setting assert subset_hash == joblib.hash(subset) assert init_hash == joblib.hash(adata)
theislab/anndata
[ 355, 126, 355, 257, 1502460606 ]
def test_not_set_subset_X(matrix_type, subset_func): adata = ad.AnnData(matrix_type(asarray(sparse.random(20, 20)))) init_hash = joblib.hash(adata) orig_X_val = adata.X.copy() while True: subset_idx = slice_subset(adata.obs_names) if len(adata[subset_idx, :]) > 2: break subset = adata[subset_idx, :] subset = adata[:, subset_idx] internal_idx = _normalize_index( subset_func(np.arange(subset.X.shape[1])), subset.var_names ) assert subset.is_view subset.X[:, internal_idx] = 1 assert not subset.is_view assert not np.any(asarray(adata.X != orig_X_val)) assert init_hash == joblib.hash(adata)
theislab/anndata
[ 355, 126, 355, 257, 1502460606 ]
def test_set_subset_obsm(adata, subset_func): init_hash = joblib.hash(adata) orig_obsm_val = adata.obsm["o"].copy() while True: subset_idx = slice_subset(adata.obs_names) if len(adata[subset_idx, :]) > 2: break subset = adata[subset_idx, :] internal_idx = _normalize_index( subset_func(np.arange(subset.obsm["o"].shape[0])), subset.obs_names ) assert subset.is_view subset.obsm["o"][internal_idx] = 1 assert not subset.is_view assert np.all(adata.obsm["o"] == orig_obsm_val) assert init_hash == joblib.hash(adata)
theislab/anndata
[ 355, 126, 355, 257, 1502460606 ]