function
stringlengths
11
56k
repo_name
stringlengths
5
60
features
sequence
def resolve(self, user, notify=True): self.resolved_at = timezone.now() self.resolved_by = user self.save(update_fields=('resolved_at', 'resolved_by')) if notify: chat.send((u'Issue <https://allmychanges.com{url}|#{issue_id}> ' u'for {namespace}/{name} was resolved by {username}.').format( url=reverse('issue-detail', pk=self.id), issue_id=self.id, namespace=self.changelog.namespace, name=self.changelog.name, username=user.username)) if self.type == 'auto-paused': changelog = self.changelog with log.fields(changelog_id=changelog.id): log.info('Resuming changelog updates') changelog.resume() if notify: chat.send(u'Autopaused package {namespace}/{name} was resumed {username}.'.format( namespace=changelog.namespace, name=changelog.name, username=user.username))
AllMyChanges/allmychanges.com
[ 3, 1, 3, 18, 1398338343 ]
def merge(user, light_user): entries = LightModerator.objects.filter(light_user=light_user) for entry in entries: with log.fields(username=user.username, light_user=light_user): log.info('Transforming light moderator into the permanent') Moderator.objects.create( changelog=entry.changelog, user=user, from_light_user=light_user) entries.delete()
AllMyChanges/allmychanges.com
[ 3, 1, 3, 18, 1398338343 ]
def remove_stale_moderators(): LightModerator.objects.filter( created_at__lte=timezone.now() - datetime.timedelta(1)).delete()
AllMyChanges/allmychanges.com
[ 3, 1, 3, 18, 1398338343 ]
def namespace(self): return self.changelog.namespace
AllMyChanges/allmychanges.com
[ 3, 1, 3, 18, 1398338343 ]
def name(self): return self.changelog.name
AllMyChanges/allmychanges.com
[ 3, 1, 3, 18, 1398338343 ]
def description(self): return self.changelog.description
AllMyChanges/allmychanges.com
[ 3, 1, 3, 18, 1398338343 ]
def set_processing_status(self, status, level=logging.INFO): self.log.append(status) self.processing_status = status[:PROCESSING_STATUS_LENGTH] self.updated_at = timezone.now() self.save(update_fields=('processing_status', 'updated_at', 'log')) key = 'preview-processing-status:{0}'.format(self.id) cache.set(key, status, 10 * 60)
AllMyChanges/allmychanges.com
[ 3, 1, 3, 18, 1398338343 ]
def schedule_update(self): self.set_status('processing') self.set_processing_status('Waiting in the queue') self.versions.all().delete() update_preview_task.delay(self.pk)
AllMyChanges/allmychanges.com
[ 3, 1, 3, 18, 1398338343 ]
def create(self, *args, **kwargs): version = super(VersionManager, self).create(*args, **kwargs) changelog = kwargs.get('changelog') if changelog: version.associate_with_free_tags() return version
AllMyChanges/allmychanges.com
[ 3, 1, 3, 18, 1398338343 ]
def unreleased(self): return self.filter(unreleased=True)
AllMyChanges/allmychanges.com
[ 3, 1, 3, 18, 1398338343 ]
def __unicode__(self): return self.number
AllMyChanges/allmychanges.com
[ 3, 1, 3, 18, 1398338343 ]
def post_tweet(self): if not settings.TWITTER_CREDS: return if self.unreleased: raise RuntimeError('Unable to tweet about unreleased version') if self.tweet_id: return # because we already posted a tweet ch = self.changelog image_url = settings.BASE_URL + ch.get_absolute_url() \ + '?snap=1&version=' + self.number filename = sha1(image_url).hexdigest() + '.png' full_path = os.path.join(settings.SNAPSHOTS_ROOT, filename) result = envoy.run( '{root}/makescreenshot --width 590 --height 600 {url} {path}'.format( root=settings.PROJECT_ROOT, url=image_url, path=full_path)) if result.status_code != 0: with log.fields( status_code=result.status_code, std_out=result.std_out, std_err=result.std_err): log.error('Unable to make a screenshot') raise RuntimeError('Unable to make a screenshot') with open(full_path, 'rb') as f: from requests_oauthlib import OAuth1 auth = OAuth1(*settings.TWITTER_CREDS) response = requests.post( 'https://upload.twitter.com/1.1/media/upload.json', auth=auth, files={'media': ('screenshot.png', f.read(), 'image/png')}) media_id = response.json()['media_id_string'] url = settings.BASE_URL + self.get_absolute_url() text = '{number} of {namespace}/{name} was released: {url} #{namespace} #{name} #release'.format( number=self.number, namespace=ch.namespace, name=ch.name, url=url) response = requests.post( 'https://api.twitter.com/1.1/statuses/update.json', auth=auth, data={'status': text, 'media_ids': media_id}) if response.status_code == 200: self.tweet_id = response.json()['id_str'] self.save(update_fields=('tweet_id',)) return full_path
AllMyChanges/allmychanges.com
[ 3, 1, 3, 18, 1398338343 ]
def associate_with_free_tags(self): # associate free tags with this version for tag in self.changelog.tags.filter(version_number=self.number): tag.version = self tag.save(update_fields=('version',))
AllMyChanges/allmychanges.com
[ 3, 1, 3, 18, 1398338343 ]
def get_absolute_url(self): # the name shouldn't contain any unicode or nonascii letters nor spaces # otherwise, we need to encode tu utf-8 and quote_plus it. return self.changelog.get_absolute_url() + '#' + self.name
AllMyChanges/allmychanges.com
[ 3, 1, 3, 18, 1398338343 ]
def merge(user, light_user): entries = UserHistoryLog.objects.filter(user=None, light_user=light_user) if entries.count() > 0: with log.fields(username=user.username, num_entries=entries.count(), light_user=light_user): log.info('Merging user history logs') entries.update(user=user)
AllMyChanges/allmychanges.com
[ 3, 1, 3, 18, 1398338343 ]
def write(user, light_user, action, description): user = user if user is not None and user.is_authenticated() else None return UserHistoryLog.objects.create(user=user, light_user=light_user, action=action, description=description)
AllMyChanges/allmychanges.com
[ 3, 1, 3, 18, 1398338343 ]
def new_code_for(user): hash = md5(str(time.time()) + settings.SECRET_KEY).hexdigest() try: code = user.email_verification_code code.hash = hash code.save() except EmailVerificationCode.DoesNotExist: code = EmailVerificationCode.objects.create( user=user, hash=hash) return code
AllMyChanges/allmychanges.com
[ 3, 1, 3, 18, 1398338343 ]
def save(self, *args, **kwargs): super(AutocompleteData, self).save(*args, **kwargs) if self.words.count() == 0: self.add_words()
AllMyChanges/allmychanges.com
[ 3, 1, 3, 18, 1398338343 ]
def __init__(self, action: str = None) -> None: super().__init__(prefix, action)
cloudtools/awacs
[ 386, 98, 386, 14, 1364415387 ]
def __init__(self, resource: str = "", region: str = "", account: str = "") -> None: super().__init__( service=prefix, resource=resource, region=region, account=account )
cloudtools/awacs
[ 386, 98, 386, 14, 1364415387 ]
def test_parse_rank_score(): ## GIVEN a rank score string on genmod format rank_scores_info = "123:10" variant_score = 10.0 family_id = "123" ## WHEN parsing the rank score parsed_rank_score = parse_rank_score(rank_scores_info, family_id) ## THEN assert that the correct rank score is parsed assert variant_score == parsed_rank_score
Clinical-Genomics/scout
[ 122, 41, 122, 149, 1412930641 ]
def options(opt): opt.load('hwaf-base', tooldir=_heptooldir) opt.add_option( '--with-cmake', default=None, help="Look for CMake at the given path") return
hwaf/hwaf
[ 10, 1, 10, 14, 1355159308 ]
def find_cmake(ctx, **kwargs):
hwaf/hwaf
[ 10, 1, 10, 14, 1355159308 ]
def scoop_not_functional_check(): if scoop is not None and scoop.IS_RUNNING: print('SCOOP mode functional!') return False else: print('SCOOP NOT running!') return True
SmokinCaterpillar/pypet
[ 85, 18, 85, 2, 1379437560 ]
def set_mode(self): super(MultiprocSCOOPNetqueueTest, self).set_mode() self.mode = pypetconstants.WRAP_MODE_NETQUEUE self.multiproc = True self.freeze_input = False self.ncores = 4 self.gc_interval = 3 self.niceness = check_nice(1) self.use_pool = False self.use_scoop = True self.graceful_exit = False
SmokinCaterpillar/pypet
[ 85, 18, 85, 2, 1379437560 ]
def test_niceness(self): pass
SmokinCaterpillar/pypet
[ 85, 18, 85, 2, 1379437560 ]
def set_mode(self): super(MultiprocSCOOPSortLocalTest, self).set_mode() self.mode = pypetconstants.WRAP_MODE_LOCAL self.freeze_input = False self.multiproc = True self.ncores = 4 self.use_pool = False self.use_scoop = True self.graceful_exit = False
SmokinCaterpillar/pypet
[ 85, 18, 85, 2, 1379437560 ]
def test_graceful_exit(self): pass
SmokinCaterpillar/pypet
[ 85, 18, 85, 2, 1379437560 ]
def set_mode(self): super(MultiprocFrozenSCOOPLocalTest, self).set_mode() self.mode = pypetconstants.WRAP_MODE_LOCAL self.multiproc = True self.freeze_input = True self.ncores = 4 self.gc_interval = 3 self.niceness = check_nice(1) self.use_pool = False self.use_scoop = True self.graceful_exit = False
SmokinCaterpillar/pypet
[ 85, 18, 85, 2, 1379437560 ]
def test_niceness(self): pass
SmokinCaterpillar/pypet
[ 85, 18, 85, 2, 1379437560 ]
def set_mode(self): super(MultiprocFrozenSCOOPSortNetlockTest, self).set_mode() self.mode = pypetconstants.WRAP_MODE_NETLOCK self.freeze_input = True self.multiproc = True self.ncores = 4 self.use_pool = False self.use_scoop = True self.port = (10000, 60000) self.graceful_exit = False
SmokinCaterpillar/pypet
[ 85, 18, 85, 2, 1379437560 ]
def test_graceful_exit(self): pass
SmokinCaterpillar/pypet
[ 85, 18, 85, 2, 1379437560 ]
def set_mode(self): super(MultiprocFrozenSCOOPSortNetqueueTest, self).set_mode() self.mode = pypetconstants.WRAP_MODE_NETQUEUE self.freeze_input = True self.multiproc = True self.ncores = 4 self.use_pool = False self.use_scoop = True self.graceful_exit = False #self.port = 'tcp://127.0.0.1:22334'
SmokinCaterpillar/pypet
[ 85, 18, 85, 2, 1379437560 ]
def test_graceful_exit(self): pass
SmokinCaterpillar/pypet
[ 85, 18, 85, 2, 1379437560 ]
def set_mode(self): super(MultiprocSCOOPNetlockTest, self).set_mode() self.mode = pypetconstants.WRAP_MODE_NETLOCK self.multiproc = True self.freeze_input = False self.ncores = 4 self.gc_interval = 3 self.niceness = check_nice(1) self.use_pool = False self.use_scoop = True self.port = None self.timeout = 1099.99 self.graceful_exit = False # self.port = 'tcp://127.0.0.1:22334'
SmokinCaterpillar/pypet
[ 85, 18, 85, 2, 1379437560 ]
def test_niceness(self): pass
SmokinCaterpillar/pypet
[ 85, 18, 85, 2, 1379437560 ]
def __init__(self, nrows, ncols, width, height, layout): # style args can include height/width, nrows, ncols, shared,... self.nrows = nrows self.ncols = ncols self.width = width self.height = height self.layout = layout # get .canvas and .axes self.get_tree_dims() self.get_canvas_and_axes()
eaton-lab/toytree
[ 133, 26, 133, 11, 1491841399 ]
def get_tree_dims(self): """ get height and width if not set by user """ if self.ncols * self.nrows < 4: minx = 250 miny = 250 else: minx = 200 miny = 140 # wider than tall if self.layout in ("d", "u"): self.width = ( self.width if self.width else min(750, minx * self.ncols) ) self.height = ( self.height if self.height else min(750, miny * self.nrows) ) else: self.height = ( self.height if self.height else min(750, minx * self.nrows) ) self.width = ( self.width if self.width else min(750, miny * self.ncols) )
eaton-lab/toytree
[ 133, 26, 133, 11, 1491841399 ]
def __init__(self, tree, axes, style): # args includes axes self.tree = tree self.axes = axes self.style = style self.canvas = None self.external_axis = False # get the longest name for dimension fitting self.lname = 0 if not all([i is None for i in self.style.tip_labels]): self.lname = max([len(str(i)) for i in self.style.tip_labels]) # ntips and shape to fit with provided args self.get_dims_from_tree_size() # fills canvas and axes self.get_canvas_and_axes() # expand the domain/extents for the text # self.fit_tip_labels() # ticks for tree and scalebar self.add_axes_style()
eaton-lab/toytree
[ 133, 26, 133, 11, 1491841399 ]
def get_canvas_and_axes(self): """ """ if self.axes is not None: self.canvas = None self.external_axis = True else: self.canvas = toyplot.Canvas( height=self.style.height, width=self.style.width, ) self.axes = self.canvas.cartesian( padding=self.style.padding )
eaton-lab/toytree
[ 133, 26, 133, 11, 1491841399 ]
def scores_to_probs(scores): scores = numpy.array(scores) scores -= scores.max() probs = numpy.exp(scores, out=scores) probs /= probs.sum() return probs
forcedotcom/distributions
[ 32, 24, 32, 16, 1360952494 ]
def print_histogram(probs, counts): WIDTH = 60.0 max_count = max(counts) print '{: >8} {: >8}'.format('Prob', 'Count') for prob, count in sorted(zip(probs, counts), reverse=True): width = int(round(WIDTH * count / max_count)) print '{: >8.3f} {: >8d} {}'.format(prob, count, '-' * width)
forcedotcom/distributions
[ 32, 24, 32, 16, 1360952494 ]
def unif01_goodness_of_fit(samples, plot=False): """ Bin uniformly distributed samples and apply Pearson's chi^2 test. """ samples = numpy.array(samples, dtype=float) assert samples.min() >= 0.0 assert samples.max() <= 1.0 bin_count = int(round(len(samples) ** 0.333)) assert bin_count >= 7, 'WARNING imprecise test, use more samples' probs = numpy.ones(bin_count, dtype=numpy.float) / bin_count counts = numpy.zeros(bin_count, dtype=numpy.int) for sample in samples: counts[int(bin_count * sample)] += 1 return multinomial_goodness_of_fit(probs, counts, len(samples), plot=plot)
forcedotcom/distributions
[ 32, 24, 32, 16, 1360952494 ]
def discrete_goodness_of_fit( samples, probs_dict, truncate_beyond=8, plot=False): """ Transform arbitrary discrete data to multinomial and assess goodness of fit via Pearson's chi^2 test. """ assert len(samples) > 100, 'WARNING imprecision; use more samples' counts = defaultdict(lambda: 0) for sample in samples: assert sample in probs_dict counts[sample] += 1 items = [(prob, counts.get(i, 0)) for i, prob in probs_dict.iteritems()] items.sort(reverse=True) truncated = (truncate_beyond and truncate_beyond < len(items)) if truncated: items = items[:truncate_beyond] probs = [prob for prob, count in items] counts = [count for prob, count in items] return multinomial_goodness_of_fit( probs, counts, len(samples), truncated=truncated, plot=plot)
forcedotcom/distributions
[ 32, 24, 32, 16, 1360952494 ]
def schemamigration(): # turn ``schemamigration.py --initial`` into # ``manage.py schemamigration cmsplugin_disqus --initial`` and setup the # enviroment from django.conf import settings from django.core.management import ManagementUtility settings.configure( INSTALLED_APPS=INSTALLED_APPS, ROOT_URLCONF=ROOT_URLCONF, DATABASES=DATABASES, TEMPLATE_CONTEXT_PROCESSORS=TEMPLATE_CONTEXT_PROCESSORS ) argv = list(sys.argv) argv.insert(1, 'schemamigration') argv.insert(2, 'djangocms_inherit') utility = ManagementUtility(argv) utility.execute()
divio/djangocms-inherit
[ 3, 18, 3, 8, 1391521515 ]
def __init__(self, cost_withGradients): super(CostModel, self).__init__() self.cost_type = cost_withGradients # --- Set-up evaluation cost if self.cost_type is None: self.cost_withGradients = constant_cost_withGradients self.cost_type = 'Constant cost' elif self.cost_type == 'evaluation_time': self.cost_model = GPModel() self.cost_withGradients = self._cost_gp_withGradients self.num_updates = 0 else: self.cost_withGradients = cost_withGradients self.cost_type = 'User defined cost'
SheffieldML/GPyOpt
[ 858, 251, 858, 104, 1407923905 ]
def _cost_gp_withGradients(self,x): """ Predicts the time cost and its gradient of evaluating the function at x. """ m, _, dmdx, _= self.cost_model.predict_withGradients(x) return np.exp(m), np.exp(m)*dmdx
SheffieldML/GPyOpt
[ 858, 251, 858, 104, 1407923905 ]
def __init__(self, root, transforms=None): super().__init__(root=root) self.transforms = transforms self._flow_list = [] self._image_list = []
pytorch/vision
[ 13447, 6560, 13447, 884, 1478733103 ]
def _read_flow(self, file_name): # Return the flow or a tuple with the flow and the valid_flow_mask if _has_builtin_flow_mask is True pass
pytorch/vision
[ 13447, 6560, 13447, 884, 1478733103 ]
def __len__(self): return len(self._image_list)
pytorch/vision
[ 13447, 6560, 13447, 884, 1478733103 ]
def __init__(self, root, split="train", pass_name="clean", transforms=None): super().__init__(root=root, transforms=transforms) verify_str_arg(split, "split", valid_values=("train", "test")) verify_str_arg(pass_name, "pass_name", valid_values=("clean", "final", "both")) passes = ["clean", "final"] if pass_name == "both" else [pass_name] root = Path(root) / "Sintel" flow_root = root / "training" / "flow" for pass_name in passes: split_dir = "training" if split == "train" else split image_root = root / split_dir / pass_name for scene in os.listdir(image_root): image_list = sorted(glob(str(image_root / scene / "*.png"))) for i in range(len(image_list) - 1): self._image_list += [[image_list[i], image_list[i + 1]]] if split == "train": self._flow_list += sorted(glob(str(flow_root / scene / "*.flo")))
pytorch/vision
[ 13447, 6560, 13447, 884, 1478733103 ]
def _read_flow(self, file_name): return _read_flo(file_name)
pytorch/vision
[ 13447, 6560, 13447, 884, 1478733103 ]
def __init__(self, root, split="train", transforms=None): super().__init__(root=root, transforms=transforms) verify_str_arg(split, "split", valid_values=("train", "test")) root = Path(root) / "KittiFlow" / (split + "ing") images1 = sorted(glob(str(root / "image_2" / "*_10.png"))) images2 = sorted(glob(str(root / "image_2" / "*_11.png"))) if not images1 or not images2: raise FileNotFoundError( "Could not find the Kitti flow images. Please make sure the directory structure is correct." ) for img1, img2 in zip(images1, images2): self._image_list += [[img1, img2]] if split == "train": self._flow_list = sorted(glob(str(root / "flow_occ" / "*_10.png")))
pytorch/vision
[ 13447, 6560, 13447, 884, 1478733103 ]
def _read_flow(self, file_name): return _read_16bits_png_with_flow_and_valid_mask(file_name)
pytorch/vision
[ 13447, 6560, 13447, 884, 1478733103 ]
def __init__(self, root, split="train", transforms=None): super().__init__(root=root, transforms=transforms) verify_str_arg(split, "split", valid_values=("train", "val")) root = Path(root) / "FlyingChairs" images = sorted(glob(str(root / "data" / "*.ppm"))) flows = sorted(glob(str(root / "data" / "*.flo"))) split_file_name = "FlyingChairs_train_val.txt" if not os.path.exists(root / split_file_name): raise FileNotFoundError( "The FlyingChairs_train_val.txt file was not found - please download it from the dataset page (see docstring)." ) split_list = np.loadtxt(str(root / split_file_name), dtype=np.int32) for i in range(len(flows)): split_id = split_list[i] if (split == "train" and split_id == 1) or (split == "val" and split_id == 2): self._flow_list += [flows[i]] self._image_list += [[images[2 * i], images[2 * i + 1]]]
pytorch/vision
[ 13447, 6560, 13447, 884, 1478733103 ]
def _read_flow(self, file_name): return _read_flo(file_name)
pytorch/vision
[ 13447, 6560, 13447, 884, 1478733103 ]
def __init__(self, root, split="train", pass_name="clean", camera="left", transforms=None): super().__init__(root=root, transforms=transforms) verify_str_arg(split, "split", valid_values=("train", "test")) split = split.upper() verify_str_arg(pass_name, "pass_name", valid_values=("clean", "final", "both")) passes = { "clean": ["frames_cleanpass"], "final": ["frames_finalpass"], "both": ["frames_cleanpass", "frames_finalpass"], }[pass_name] verify_str_arg(camera, "camera", valid_values=("left", "right", "both")) cameras = ["left", "right"] if camera == "both" else [camera] root = Path(root) / "FlyingThings3D" directions = ("into_future", "into_past") for pass_name, camera, direction in itertools.product(passes, cameras, directions): image_dirs = sorted(glob(str(root / pass_name / split / "*/*"))) image_dirs = sorted(Path(image_dir) / camera for image_dir in image_dirs) flow_dirs = sorted(glob(str(root / "optical_flow" / split / "*/*"))) flow_dirs = sorted(Path(flow_dir) / direction / camera for flow_dir in flow_dirs) if not image_dirs or not flow_dirs: raise FileNotFoundError( "Could not find the FlyingThings3D flow images. " "Please make sure the directory structure is correct." ) for image_dir, flow_dir in zip(image_dirs, flow_dirs): images = sorted(glob(str(image_dir / "*.png"))) flows = sorted(glob(str(flow_dir / "*.pfm"))) for i in range(len(flows) - 1): if direction == "into_future": self._image_list += [[images[i], images[i + 1]]] self._flow_list += [flows[i]] elif direction == "into_past": self._image_list += [[images[i + 1], images[i]]] self._flow_list += [flows[i + 1]]
pytorch/vision
[ 13447, 6560, 13447, 884, 1478733103 ]
def _read_flow(self, file_name): return _read_pfm(file_name)
pytorch/vision
[ 13447, 6560, 13447, 884, 1478733103 ]
def __init__(self, root, split="train", transforms=None): super().__init__(root=root, transforms=transforms) verify_str_arg(split, "split", valid_values=("train", "test")) root = Path(root) / "hd1k" if split == "train": # There are 36 "sequences" and we don't want seq i to overlap with seq i + 1, so we need this for loop for seq_idx in range(36): flows = sorted(glob(str(root / "hd1k_flow_gt" / "flow_occ" / f"{seq_idx:06d}_*.png"))) images = sorted(glob(str(root / "hd1k_input" / "image_2" / f"{seq_idx:06d}_*.png"))) for i in range(len(flows) - 1): self._flow_list += [flows[i]] self._image_list += [[images[i], images[i + 1]]] else: images1 = sorted(glob(str(root / "hd1k_challenge" / "image_2" / "*10.png"))) images2 = sorted(glob(str(root / "hd1k_challenge" / "image_2" / "*11.png"))) for image1, image2 in zip(images1, images2): self._image_list += [[image1, image2]] if not self._image_list: raise FileNotFoundError( "Could not find the HD1K images. Please make sure the directory structure is correct." )
pytorch/vision
[ 13447, 6560, 13447, 884, 1478733103 ]
def __getitem__(self, index): """Return example at given index. Args: index(int): The index of the example to retrieve Returns: tuple: A 4-tuple with ``(img1, img2, flow, valid_flow_mask)`` where ``valid_flow_mask`` is a numpy boolean mask of shape (H, W) indicating which flow values are valid. The flow is a numpy array of shape (2, H, W) and the images are PIL images. ``flow`` and ``valid_flow_mask`` are None if ``split="test"``. """ return super().__getitem__(index)
pytorch/vision
[ 13447, 6560, 13447, 884, 1478733103 ]
def _read_16bits_png_with_flow_and_valid_mask(file_name): flow_and_valid = _read_png_16(file_name).to(torch.float32) flow, valid_flow_mask = flow_and_valid[:2, :, :], flow_and_valid[2, :, :] flow = (flow - 2 ** 15) / 64 # This conversion is explained somewhere on the kitti archive valid_flow_mask = valid_flow_mask.bool() # For consistency with other datasets, we convert to numpy return flow.numpy(), valid_flow_mask.numpy()
pytorch/vision
[ 13447, 6560, 13447, 884, 1478733103 ]
def detectSignature(filename): # the list contains all the accesses rlist = AccList() wlist = AccList() accList = AccList() # all lines with "accList" are commentted out # because the figure drawing using accList # is replaced with rlist and wlist # open the trace file f = open(filename, 'r') # skip the first several lines # Maybe the skipped lines are table heads for i in range(int(sig._format_prop['skip_lines'])): line = f.readline()
yinyanlong/iosig
[ 8, 3, 8, 5, 1393557818 ]
def generateCSVs(single_trace_filename): """Generate the Read/Write Bandwidth figures""" trace_path, trace_filename = os.path.split(single_trace_filename) # the list contains all the accesses rlist = AccList() wlist = AccList() rlistEmpty = 1 wlistEmpty = 1 total_read_count = 0 total_write_count = 0 total_read_time = 0.0 total_write_time = 0.0 # Create and empty each CSV files, write the CSV title line output = os.path.join(sig._out_path, trace_filename + ".read.rate.csv") f = open(output, 'w') f.write("Time,Rate\n") f.close() output = os.path.join(sig._out_path, trace_filename + ".write.rate.csv") f = open(output, 'w') f.write("Time,Rate\n") f.close() output = os.path.join(sig._out_path, trace_filename + ".read.interval.csv") f = open(output, 'w') f.write("Begin,End\n") f.close() output = os.path.join(sig._out_path, trace_filename + ".write.interval.csv") f = open(output, 'w') f.write("Begin,End\n") f.close() output = os.path.join(sig._out_path, trace_filename + ".read.hole.sizes.csv") f = open(output, 'w') f.write("Time,Size\n") f.close() # open the trace file f = open(single_trace_filename, 'r') # skip the first several lines # Maybe the skipped lines are table heads for i in range(int(sig._format_prop['skip_lines'])): line = f.readline()
yinyanlong/iosig
[ 8, 3, 8, 5, 1393557818 ]
def __init__(self, name: str, get_converter: t.Optional[t.Callable] = None) -> None: self.__name__ = name self.get_converter = get_converter
mitsuhiko/flask
[ 62144, 15482, 62144, 6, 1270552319 ]
def __set__(self, obj: t.Any, value: t.Any) -> None: obj.config[self.__name__] = value
mitsuhiko/flask
[ 62144, 15482, 62144, 6, 1270552319 ]
def __init__(self, root_path: str, defaults: t.Optional[dict] = None) -> None: dict.__init__(self, defaults or {}) self.root_path = root_path
mitsuhiko/flask
[ 62144, 15482, 62144, 6, 1270552319 ]
def from_pyfile(self, filename: str, silent: bool = False) -> bool: """Updates the values in the config from a Python file. This function behaves as if the file was imported as module with the :meth:`from_object` function. :param filename: the filename of the config. This can either be an absolute filename or a filename relative to the root path. :param silent: set to ``True`` if you want silent failure for missing files. .. versionadded:: 0.7 `silent` parameter. """ filename = os.path.join(self.root_path, filename) d = types.ModuleType("config") d.__file__ = filename try: with open(filename, mode="rb") as config_file: exec(compile(config_file.read(), filename, "exec"), d.__dict__) except OSError as e: if silent and e.errno in (errno.ENOENT, errno.EISDIR, errno.ENOTDIR): return False e.strerror = f"Unable to load configuration file ({e.strerror})" raise self.from_object(d) return True
mitsuhiko/flask
[ 62144, 15482, 62144, 6, 1270552319 ]
def from_file( self, filename: str, load: t.Callable[[t.IO[t.Any]], t.Mapping], silent: bool = False,
mitsuhiko/flask
[ 62144, 15482, 62144, 6, 1270552319 ]
def from_json(self, filename: str, silent: bool = False) -> bool: """Update the values in the config from a JSON file. The loaded data is passed to the :meth:`from_mapping` method. :param filename: The path to the JSON file. This can be an absolute path or relative to the config root path. :param silent: Ignore the file if it doesn't exist. .. deprecated:: 2.0.0 Will be removed in Flask 2.1. Use :meth:`from_file` instead. This was removed early in 2.0.0, was added back in 2.0.1. .. versionadded:: 0.11 """ import warnings from . import json warnings.warn( "'from_json' is deprecated and will be removed in Flask" " 2.1. Use 'from_file(path, json.load)' instead.", DeprecationWarning, stacklevel=2, ) return self.from_file(filename, json.load, silent=silent)
mitsuhiko/flask
[ 62144, 15482, 62144, 6, 1270552319 ]
def get_namespace( self, namespace: str, lowercase: bool = True, trim_namespace: bool = True
mitsuhiko/flask
[ 62144, 15482, 62144, 6, 1270552319 ]
def new_flow(): '''Create a new flow :status 200: Render the new flow template :status 302: Try to create a new flow using the :py:class:`~purchasing.conductor.forms.NewFlowForm`, redirect to the flows list view if successful ''' stages = Stage.choices_factory() form = NewFlowForm(stages=stages) if form.validate_on_submit(): stage_order = [] for entry in form.stage_order.entries: # try to evaluate the return value as an ID try: stage_id = int(entry.data) # otherwise it's a new stage except ValueError: new_stage = Stage.create(name=entry.data) stage_id = new_stage.id stage_order.append(stage_id) Flow.create(flow_name=form.flow_name.data, stage_order=stage_order) flash('Flow created successfully!', 'alert-success') return redirect(url_for('conductor.flows_list')) return render_template('conductor/flows/new.html', stages=stages, form=form)
codeforamerica/pittsburgh-purchasing-suite
[ 18, 9, 18, 6, 1429573929 ]
def flows_list(): '''List all flows :status 200: Render the all flows list template ''' flows = Flow.query.order_by(Flow.flow_name).all() active, archived = [], [] for flow in flows: if flow.is_archived: archived.append(flow) else: active.append(flow) return render_template('conductor/flows/browse.html', active=active, archived=archived)
codeforamerica/pittsburgh-purchasing-suite
[ 18, 9, 18, 6, 1429573929 ]
def begin(self): self.append({'cbs': [], 'dirty': False})
Suor/django-cacheops
[ 1796, 209, 1796, 21, 1307190347 ]
def rollback(self): self.pop()
Suor/django-cacheops
[ 1796, 209, 1796, 21, 1307190347 ]
def mark_dirty(self): self[-1]['dirty'] = True
Suor/django-cacheops
[ 1796, 209, 1796, 21, 1307190347 ]
def __init__(self): super(TransactionStates, self).__init__() self._states = defaultdict(TransactionState)
Suor/django-cacheops
[ 1796, 209, 1796, 21, 1307190347 ]
def is_dirty(self, dbs): return any(self[db].is_dirty() for db in dbs)
Suor/django-cacheops
[ 1796, 209, 1796, 21, 1307190347 ]
def queue_when_in_transaction(call): if transaction_states[call.using]: transaction_states[call.using].push((call, (), {})) else: return call()
Suor/django-cacheops
[ 1796, 209, 1796, 21, 1307190347 ]
def __enter__(self): entering = not transaction_states[self.using] transaction_states[self.using].begin() self._no_monkey.__enter__(self) if entering: on_commit(transaction_states[self.using].commit, self.using)
Suor/django-cacheops
[ 1796, 209, 1796, 21, 1307190347 ]
def callproc(self, procname, params=None): result = self._no_monkey.callproc(self, procname, params) if transaction_states[self.db.alias]: transaction_states[self.db.alias].mark_dirty() return result
Suor/django-cacheops
[ 1796, 209, 1796, 21, 1307190347 ]
def executemany(self, sql, param_list): result = self._no_monkey.executemany(self, sql, param_list) if transaction_states[self.db.alias] and is_sql_dirty(sql): transaction_states[self.db.alias].mark_dirty() return result
Suor/django-cacheops
[ 1796, 209, 1796, 21, 1307190347 ]
def is_sql_dirty(sql): # This should not happen as using bytes in Python 3 is against db protocol, # but some people will pass it anyway if isinstance(sql, bytes): sql = sql.decode() # NOTE: not using regex here for speed sql = sql.lower() for action in ('update', 'insert', 'delete'): p = sql.find(action) if p == -1: continue start, end = p - 1, p + len(action) if (start < 0 or sql[start] not in CHARS) and (end >= len(sql) or sql[end] not in CHARS): return True else: return False
Suor/django-cacheops
[ 1796, 209, 1796, 21, 1307190347 ]
def test_reverse_rfc822_datetime(): dates = [ ("Sat, 01 Jan 2011 00:00:00 -0000", datetime(2011, 1, 1, tzinfo=pytz.utc)), ("Sat, 01 Jan 2011 23:59:59 -0000", datetime(2011, 1, 1, 23, 59, 59, tzinfo=pytz.utc)), ("Sat, 01 Jan 2011 21:59:59 -0200", datetime(2011, 1, 1, 23, 59, 59, tzinfo=pytz.utc)), ] for date_string, expected in dates: yield assert_equal, inputs.datetime_from_rfc822(date_string), expected
flask-restful/flask-restful
[ 6540, 1017, 6540, 137, 1350000531 ]
def test_urls(): urls = [ 'http://www.djangoproject.com/', 'http://localhost/', 'http://example.com/', 'http://www.example.com/', 'http://www.example.com:8000/test', 'http://valid-with-hyphens.com/', 'http://subdomain.example.com/', 'http://200.8.9.10/', 'http://200.8.9.10:8000/test', 'http://valid-----hyphens.com/', 'http://example.com?something=value', 'http://example.com/index.php?something=value&another=value2', 'http://foo:[email protected]', 'http://foo:@example.com', 'http://foo:@2001:db8:85a3::8a2e:370:7334', 'http://foo2:qd1%[email protected]', ] for value in urls: yield assert_equal, inputs.url(value), value
flask-restful/flask-restful
[ 6540, 1017, 6540, 137, 1350000531 ]
def test_bad_urls(): values = [ 'foo', 'http://', 'http://example', 'http://example.', 'http://.com', 'http://invalid-.com', 'http://-invalid.com', 'http://inv-.alid-.com', 'http://inv-.-alid.com', 'foo bar baz', u'foo \u2713', 'http://@foo:[email protected]', 'http://:[email protected]', 'http://bar:bar:[email protected]', ] for value in values: yield check_bad_url_raises, value
flask-restful/flask-restful
[ 6540, 1017, 6540, 137, 1350000531 ]
def check_url_error_message(value): try: inputs.url(value) assert False, u"inputs.url({0}) should raise an exception".format(value) except ValueError as e: assert_equal(six.text_type(e), (u"{0} is not a valid URL. Did you mean: http://{0}".format(value)))
flask-restful/flask-restful
[ 6540, 1017, 6540, 137, 1350000531 ]
def test_regex_good_input(): cases = ( '123', '1234567890', '00000', ) num_only = inputs.regex(r'^[0-9]+$') for value in cases: yield assert_equal, num_only(value), value
flask-restful/flask-restful
[ 6540, 1017, 6540, 137, 1350000531 ]
def test_regex_flags_good_input(): cases = ( 'abcd', 'ABCabc', 'ABC', ) case_insensitive = inputs.regex(r'^[A-Z]+$', re.IGNORECASE) for value in cases: yield assert_equal, case_insensitive(value), value
flask-restful/flask-restful
[ 6540, 1017, 6540, 137, 1350000531 ]
def test_boolean_false(self): assert_equal(inputs.boolean("False"), False)
flask-restful/flask-restful
[ 6540, 1017, 6540, 137, 1350000531 ]
def test_boolean_true(self): assert_equal(inputs.boolean("true"), True)
flask-restful/flask-restful
[ 6540, 1017, 6540, 137, 1350000531 ]
def test_boolean_upper_case(self): assert_equal(inputs.boolean("FaLSE"), False)
flask-restful/flask-restful
[ 6540, 1017, 6540, 137, 1350000531 ]
def test_boolean_with_python_bool(self): """Input that is already a native python `bool` should be passed through without extra processing.""" assert_equal(inputs.boolean(True), True) assert_equal(inputs.boolean(False), False)
flask-restful/flask-restful
[ 6540, 1017, 6540, 137, 1350000531 ]
def test_date_later_than_1900(self): assert_equal(inputs.date("1900-01-01"), datetime(1900, 1, 1))
flask-restful/flask-restful
[ 6540, 1017, 6540, 137, 1350000531 ]
def test_date_input(self): assert_equal(inputs.date("2008-08-01"), datetime(2008, 8, 1))
flask-restful/flask-restful
[ 6540, 1017, 6540, 137, 1350000531 ]
def test_natural(self): assert_equal(3, inputs.natural(3))
flask-restful/flask-restful
[ 6540, 1017, 6540, 137, 1350000531 ]
def test_positive(self): assert_equal(1, inputs.positive(1)) assert_equal(10000, inputs.positive(10000))
flask-restful/flask-restful
[ 6540, 1017, 6540, 137, 1350000531 ]
def test_positive_negative_input(self): assert_raises(ValueError, lambda: inputs.positive(-1))
flask-restful/flask-restful
[ 6540, 1017, 6540, 137, 1350000531 ]
def test_int_range_inclusive(self): int_range = inputs.int_range(1, 5) assert_equal(5, int_range(5))
flask-restful/flask-restful
[ 6540, 1017, 6540, 137, 1350000531 ]
def test_int_range_high(self): int_range = inputs.int_range(0, 5) assert_raises(ValueError, lambda: int_range(6))
flask-restful/flask-restful
[ 6540, 1017, 6540, 137, 1350000531 ]
def test_invalid_isointerval_error(): try: inputs.iso8601interval('2013-01-01/blah') except ValueError as error: assert_equal( str(error), "Invalid argument: 2013-01-01/blah. argument must be a valid ISO8601 " "date/time interval.", ) return assert False, 'Should raise a ValueError'
flask-restful/flask-restful
[ 6540, 1017, 6540, 137, 1350000531 ]