code
stringlengths
22
1.05M
apis
listlengths
1
3.31k
extract_api
stringlengths
75
3.25M
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://www.apache.org/licenses/LICENSE-2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. """ JSON Serialization/Deserialization ---------------------------------- The canonical way to do this is to define and `default` and `object_hook` parameters to the json.dumps and json.loads methods. Unfortunately, due to https://bugs.python.org/issue12657 this is not possible at the moment, as support for custom NamedTuple serialization is broken. To circumvent the issue, we pass the input value through custom encode and decode functions that map nested object terms to JSON-serializable data structures with explicit recursion. """ import json from typing import Any, Optional from ._base import encode, decode def dump_json(o: Any, indent: Optional[int] = None) -> str: """ Serializes an object to a JSON string. Parameters ---------- o The object to serialize. indent An optional number of spaced to use as an indent. Returns ------- str A string representing the object in JSON format. See Also -------- load_json Inverse function. """ return json.dumps(encode(o), indent=indent, sort_keys=True) def load_json(s: str) -> Any: """ Deserializes an object from a JSON string. Parameters ---------- s A string representing the object in JSON format. Returns ------- Any The deserialized object. See Also -------- dump_json Inverse function. """ return decode(json.loads(s))
[ "json.loads" ]
[((2019, 2032), 'json.loads', 'json.loads', (['s'], {}), '(s)\n', (2029, 2032), False, 'import json\n')]
import os.path as osp import sys import subprocess subprocess.call(['pip', 'install', 'cvbase']) import cvbase as cvb import torch from torch.autograd import gradcheck sys.path.append(osp.abspath(osp.join(__file__, '../../'))) from biupdownsample import biupsample_naive, BiupsampleNaive from biupdownsample import bidownsample_naive, BidownsampleNaive feat = torch.randn(2, 64, 2, 2, requires_grad=True, device='cuda:0').double() mask = torch.randn( 2, 100, 4, 4, requires_grad=True, device='cuda:0').sigmoid().double() print('Gradcheck for biupsample naive...') test = gradcheck(BiupsampleNaive(5, 4, 2), (feat, mask), atol=1e-4, eps=1e-4) print(test) feat = torch.randn( 2, 1024, 100, 100, requires_grad=True, device='cuda:0').float() mask = torch.randn( 2, 25, 200, 200, requires_grad=True, device='cuda:0').sigmoid().float() loop_num = 500 time_naive_forward = 0 time_naive_backward = 0 bar = cvb.ProgressBar(loop_num) timer = cvb.Timer() for i in range(loop_num): x = biupsample_naive(feat.clone(), mask.clone(), 5, 1, 2) torch.cuda.synchronize() time_naive_forward += timer.since_last_check() x.sum().backward(retain_graph=True) torch.cuda.synchronize() time_naive_backward += timer.since_last_check() bar.update() forward_speed = (time_naive_forward + 1e-3) * 1e3 / loop_num backward_speed = (time_naive_backward + 1e-3) * 1e3 / loop_num print('\nBiupsample naive time forward: ' f'{forward_speed} ms/iter | time backward: {backward_speed} ms/iter') # --------------------------------------------------------------- feat = torch.randn(2, 64, 4, 4, requires_grad=True, device='cuda:0').double() mask = torch.randn( 2, 16, 4, 4, requires_grad=True, device='cuda:0').double() print('Gradcheck for bidownsample naive...') test = gradcheck(BidownsampleNaive(4, 1, 1), (feat, mask), atol=1e-4, eps=1e-4) print(test) feat = torch.randn( 2, 512, 200, 200, requires_grad=True, device='cuda:0').float() mask = torch.randn( 2, 100, 100, 100, requires_grad=True, device='cuda:0').sigmoid().float() loop_num = 500 time_naive_forward = 0 time_naive_backward = 0 bar = cvb.ProgressBar(loop_num) timer = cvb.Timer() for i in range(loop_num): x = bidownsample_naive(feat.clone(), mask.clone(), 10, 1, 2) torch.cuda.synchronize() time_naive_forward += timer.since_last_check() x.sum().backward(retain_graph=True) torch.cuda.synchronize() time_naive_backward += timer.since_last_check() bar.update() forward_speed = (time_naive_forward + 1e-3) * 1e3 / loop_num backward_speed = (time_naive_backward + 1e-3) * 1e3 / loop_num print('\nBidownsample naive time forward: ' f'{forward_speed} ms/iter | time backward: {backward_speed} ms/iter')
[ "biupdownsample.BidownsampleNaive", "os.path.join", "torch.cuda.synchronize", "cvbase.Timer", "biupdownsample.BiupsampleNaive", "subprocess.call", "torch.randn", "cvbase.ProgressBar" ]
[((52, 97), 'subprocess.call', 'subprocess.call', (["['pip', 'install', 'cvbase']"], {}), "(['pip', 'install', 'cvbase'])\n", (67, 97), False, 'import subprocess\n'), ((918, 943), 'cvbase.ProgressBar', 'cvb.ProgressBar', (['loop_num'], {}), '(loop_num)\n', (933, 943), True, 'import cvbase as cvb\n'), ((952, 963), 'cvbase.Timer', 'cvb.Timer', ([], {}), '()\n', (961, 963), True, 'import cvbase as cvb\n'), ((2139, 2164), 'cvbase.ProgressBar', 'cvb.ProgressBar', (['loop_num'], {}), '(loop_num)\n', (2154, 2164), True, 'import cvbase as cvb\n'), ((2173, 2184), 'cvbase.Timer', 'cvb.Timer', ([], {}), '()\n', (2182, 2184), True, 'import cvbase as cvb\n'), ((590, 614), 'biupdownsample.BiupsampleNaive', 'BiupsampleNaive', (['(5)', '(4)', '(2)'], {}), '(5, 4, 2)\n', (605, 614), False, 'from biupdownsample import biupsample_naive, BiupsampleNaive\n'), ((1056, 1080), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (1078, 1080), False, 'import torch\n'), ((1176, 1200), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (1198, 1200), False, 'import torch\n'), ((1806, 1832), 'biupdownsample.BidownsampleNaive', 'BidownsampleNaive', (['(4)', '(1)', '(1)'], {}), '(4, 1, 1)\n', (1823, 1832), False, 'from biupdownsample import bidownsample_naive, BidownsampleNaive\n'), ((2280, 2304), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (2302, 2304), False, 'import torch\n'), ((2400, 2424), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (2422, 2424), False, 'import torch\n'), ((198, 226), 'os.path.join', 'osp.join', (['__file__', '"""../../"""'], {}), "(__file__, '../../')\n", (206, 226), True, 'import os.path as osp\n'), ((363, 424), 'torch.randn', 'torch.randn', (['(2)', '(64)', '(2)', '(2)'], {'requires_grad': '(True)', 'device': '"""cuda:0"""'}), "(2, 64, 2, 2, requires_grad=True, device='cuda:0')\n", (374, 424), False, 'import torch\n'), ((672, 739), 'torch.randn', 'torch.randn', (['(2)', '(1024)', '(100)', '(100)'], {'requires_grad': '(True)', 'device': '"""cuda:0"""'}), "(2, 1024, 100, 100, requires_grad=True, device='cuda:0')\n", (683, 739), False, 'import torch\n'), ((1587, 1648), 'torch.randn', 'torch.randn', (['(2)', '(64)', '(4)', '(4)'], {'requires_grad': '(True)', 'device': '"""cuda:0"""'}), "(2, 64, 4, 4, requires_grad=True, device='cuda:0')\n", (1598, 1648), False, 'import torch\n'), ((1665, 1726), 'torch.randn', 'torch.randn', (['(2)', '(16)', '(4)', '(4)'], {'requires_grad': '(True)', 'device': '"""cuda:0"""'}), "(2, 16, 4, 4, requires_grad=True, device='cuda:0')\n", (1676, 1726), False, 'import torch\n'), ((1892, 1958), 'torch.randn', 'torch.randn', (['(2)', '(512)', '(200)', '(200)'], {'requires_grad': '(True)', 'device': '"""cuda:0"""'}), "(2, 512, 200, 200, requires_grad=True, device='cuda:0')\n", (1903, 1958), False, 'import torch\n'), ((441, 503), 'torch.randn', 'torch.randn', (['(2)', '(100)', '(4)', '(4)'], {'requires_grad': '(True)', 'device': '"""cuda:0"""'}), "(2, 100, 4, 4, requires_grad=True, device='cuda:0')\n", (452, 503), False, 'import torch\n'), ((760, 825), 'torch.randn', 'torch.randn', (['(2)', '(25)', '(200)', '(200)'], {'requires_grad': '(True)', 'device': '"""cuda:0"""'}), "(2, 25, 200, 200, requires_grad=True, device='cuda:0')\n", (771, 825), False, 'import torch\n'), ((1979, 2045), 'torch.randn', 'torch.randn', (['(2)', '(100)', '(100)', '(100)'], {'requires_grad': '(True)', 'device': '"""cuda:0"""'}), "(2, 100, 100, 100, requires_grad=True, device='cuda:0')\n", (1990, 2045), False, 'import torch\n')]
from django.template.loader import render_to_string from django.utils.safestring import mark_safe from django.utils.translation import ugettext as _ from wagtail.admin.edit_handlers import EditHandler class FormSubmissionsPanel(EditHandler): template = "wagtailforms/edit_handlers/form_responses_panel.html" def bind_to_model(self, model): new = super().bind_to_model(model) if self.heading is None: new.heading = _('{} submissions').format(model.get_verbose_name()) return new def render(self): Submission = self.model.get_submission_class() submissions = Submission.objects.filter(page=self.instance) submission_count = submissions.count() if not submission_count: return '' return mark_safe(render_to_string(self.template, { 'self': self, 'submission_count': submission_count, 'last_submit_time': (submissions.order_by('submit_time') .last().submit_time), }))
[ "django.utils.translation.ugettext" ]
[((454, 473), 'django.utils.translation.ugettext', '_', (['"""{} submissions"""'], {}), "('{} submissions')\n", (455, 473), True, 'from django.utils.translation import ugettext as _\n')]
# -*- coding: utf-8 -*- """ Extension that adjust project file tree to include a namespace package. This extension adds a **namespace** option to :obj:`~pyscaffold.api.create_project` and provides correct values for the options **root_pkg** and **namespace_pkg** to the following functions in the action list. """ import argparse import os from os.path import isdir from os.path import join as join_path from .. import templates, utils from ..api import Extension, helpers from ..log import logger class Namespace(Extension): """Add a namespace (container package) to the generated package.""" def augment_cli(self, parser): """Add an option to parser that enables the namespace extension. Args: parser (argparse.ArgumentParser): CLI parser object """ parser.add_argument( self.flag, dest=self.name, default=None, action=create_namespace_parser(self), metavar="NS1[.NS2]", help="put your project inside a namespace package") def activate(self, actions): """Register an action responsible for adding namespace to the package. Args: actions (list): list of actions to perform Returns: list: updated list of actions """ actions = helpers.register(actions, enforce_namespace_options, after='get_default_options') actions = helpers.register(actions, add_namespace, before='apply_update_rules') return helpers.register(actions, move_old_package, after='create_structure') def create_namespace_parser(obj_ref): """Create a namespace parser. Args: obj_ref (Extension): object reference to the actual extension Returns: NamespaceParser: parser for namespace cli argument """ class NamespaceParser(argparse.Action): """Consumes the values provided, but also appends the extension function to the extensions list. """ def __call__(self, parser, namespace, values, option_string=None): namespace.extensions.append(obj_ref) # Now the extra parameters can be stored setattr(namespace, self.dest, values) # save the namespace cli argument for later obj_ref.args = values return NamespaceParser def enforce_namespace_options(struct, opts): """Make sure options reflect the namespace usage.""" opts.setdefault('namespace', None) if opts['namespace']: opts['ns_list'] = utils.prepare_namespace(opts['namespace']) opts['root_pkg'] = opts['ns_list'][0] opts['qual_pkg'] = ".".join([opts['ns_list'][-1], opts['package']]) return struct, opts def add_namespace(struct, opts): """Prepend the namespace to a given file structure Args: struct (dict): directory structure as dictionary of dictionaries opts (dict): options of the project Returns: tuple(dict, dict): directory structure as dictionary of dictionaries and input options """ if not opts['namespace']: return struct, opts namespace = opts['ns_list'][-1].split('.') base_struct = struct struct = base_struct[opts['project']]['src'] pkg_struct = struct[opts['package']] del struct[opts['package']] for sub_package in namespace: struct[sub_package] = {'__init__.py': templates.namespace(opts)} struct = struct[sub_package] struct[opts['package']] = pkg_struct return base_struct, opts def move_old_package(struct, opts): """Move old package that may be eventually created without namespace Args: struct (dict): directory structure as dictionary of dictionaries opts (dict): options of the project Returns: tuple(dict, dict): directory structure as dictionary of dictionaries and input options """ old_path = join_path(opts['project'], 'src', opts['package']) namespace_path = opts['qual_pkg'].replace('.', os.sep) target = join_path(opts['project'], 'src', namespace_path) old_exists = opts['pretend'] or isdir(old_path) # ^ When pretending, pretend also an old folder exists # to show a worst case scenario log to the user... if old_exists and opts['qual_pkg'] != opts['package']: if not opts['pretend']: logger.warning( '\nA folder %r exists in the project directory, and it is ' 'likely to have been generated by a PyScaffold extension or ' 'manually by one of the current project authors.\n' 'Moving it to %r, since a namespace option was passed.\n' 'Please make sure to edit all the files that depend on this ' 'package to ensure the correct location.\n', opts['package'], namespace_path) utils.move(old_path, target=target, log=True, pretend=opts['pretend']) return struct, opts
[ "os.path.isdir", "os.path.join" ]
[((4027, 4077), 'os.path.join', 'join_path', (["opts['project']", '"""src"""', "opts['package']"], {}), "(opts['project'], 'src', opts['package'])\n", (4036, 4077), True, 'from os.path import join as join_path\n'), ((4150, 4199), 'os.path.join', 'join_path', (["opts['project']", '"""src"""', 'namespace_path'], {}), "(opts['project'], 'src', namespace_path)\n", (4159, 4199), True, 'from os.path import join as join_path\n'), ((4237, 4252), 'os.path.isdir', 'isdir', (['old_path'], {}), '(old_path)\n', (4242, 4252), False, 'from os.path import isdir\n')]
# encoding: utf-8 from mock import call, patch from django.template import Template, Context from django.test import TestCase from core.models import MockModel @patch("haystack.templatetags.more_like_this.SearchQuerySet") class MoreLikeThisTagTestCase(TestCase): def render(self, template, context): # Why on Earth does Django not have a TemplateTestCase yet? t = Template(template) c = Context(context) return t.render(c) def test_more_like_this_without_limit(self, mock_sqs): mock_model = MockModel.objects.get(pk=3) template = """{% load more_like_this %}{% more_like_this entry as related_content %}{% for rc in related_content %}{{ rc.id }}{% endfor %}""" context = {'entry': mock_model} mlt = mock_sqs.return_value.more_like_this mlt.return_value = [{"id": "test_id"}] self.assertEqual("test_id", self.render(template, context)) mlt.assert_called_once_with(mock_model) def test_more_like_this_with_limit(self, mock_sqs): mock_model = MockModel.objects.get(pk=3) template = """{% load more_like_this %}{% more_like_this entry as related_content limit 5 %}{% for rc in related_content %}{{ rc.id }}{% endfor %}""" context = {'entry': mock_model} mlt = mock_sqs.return_value.more_like_this mlt.return_value.__getitem__.return_value = [{"id": "test_id"}] self.assertEqual("test_id", self.render(template, context)) mlt.assert_called_once_with(mock_model) mock_sqs.assert_has_calls([call().more_like_this(mock_model), call().more_like_this().__getitem__(slice(None, 5))], any_order=True) def test_more_like_this_for_model(self, mock_sqs): mock_model = MockModel.objects.get(pk=3) template = """{% load more_like_this %}{% more_like_this entry as related_content for "core.mock" limit 5 %}{% for rc in related_content %}{{ rc.id }}{% endfor %}""" context = {'entry': mock_model} self.render(template, context) mock_sqs.assert_has_calls([call().models().more_like_this(mock_model), call().models().more_like_this().__getitem__(slice(None, 5))], any_order=True)
[ "core.models.MockModel.objects.get", "mock.patch", "django.template.Template", "django.template.Context", "mock.call" ]
[((165, 225), 'mock.patch', 'patch', (['"""haystack.templatetags.more_like_this.SearchQuerySet"""'], {}), "('haystack.templatetags.more_like_this.SearchQuerySet')\n", (170, 225), False, 'from mock import call, patch\n'), ((388, 406), 'django.template.Template', 'Template', (['template'], {}), '(template)\n', (396, 406), False, 'from django.template import Template, Context\n'), ((419, 435), 'django.template.Context', 'Context', (['context'], {}), '(context)\n', (426, 435), False, 'from django.template import Template, Context\n'), ((544, 571), 'core.models.MockModel.objects.get', 'MockModel.objects.get', ([], {'pk': '(3)'}), '(pk=3)\n', (565, 571), False, 'from core.models import MockModel\n'), ((1057, 1084), 'core.models.MockModel.objects.get', 'MockModel.objects.get', ([], {'pk': '(3)'}), '(pk=3)\n', (1078, 1084), False, 'from core.models import MockModel\n'), ((1813, 1840), 'core.models.MockModel.objects.get', 'MockModel.objects.get', ([], {'pk': '(3)'}), '(pk=3)\n', (1834, 1840), False, 'from core.models import MockModel\n'), ((1561, 1567), 'mock.call', 'call', ([], {}), '()\n', (1565, 1567), False, 'from mock import call, patch\n'), ((1631, 1637), 'mock.call', 'call', ([], {}), '()\n', (1635, 1637), False, 'from mock import call, patch\n'), ((2131, 2137), 'mock.call', 'call', ([], {}), '()\n', (2135, 2137), False, 'from mock import call, patch\n'), ((2210, 2216), 'mock.call', 'call', ([], {}), '()\n', (2214, 2216), False, 'from mock import call, patch\n')]
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import editdistance import os import sys import torch from fairseq.data import AddTargetDataset, Dictionary, FileAudioDataset, encoders from fairseq.data.data_utils import post_process from . import LegacyFairseqTask, register_task from .. import utils from ..logging import metrics class LabelEncoder(object): def __init__(self, dictionary): self.dictionary = dictionary def __call__(self, label): return self.dictionary.encode_line( label, append_eos=False, add_if_not_exist=False ) @register_task("audio_pretraining") class AudioPretrainingTask(LegacyFairseqTask): """""" @staticmethod def add_args(parser): """Add task-specific arguments to the parser.""" parser.add_argument("data", help="path to data directory") parser.add_argument( "--sample-rate", default=16000, type=int, help="target sample rate. audio files will be up/down sampled to this rate", ) parser.add_argument( "--normalize", action="store_true", help="if set, normalizes input to have 0 mean and unit variance", ) parser.add_argument( "--max-sample-size", default=None, type=int, help="max sample size to crop to for batching. default = min sample length", ) parser.add_argument( "--min-sample-size", default=None, type=int, help="min sample size to crop to for batching. default = same as --max-sample-size", ) parser.add_argument( "--enable-padding", action="store_true", help="pad shorter samples instead of cropping", ) parser.add_argument( "--labels", type=str, default=None, help="extension of the label file to load, if any", ) # Options for reporting WER metrics during validation. Only applicable to # Seq2Seq models during fine-tuning parser.add_argument( "--eval-wer", action="store_true", help="compute WER for Seq2Seq models", ) parser.add_argument( "--eval-wer-remove-bpe", default="letter", help="remove BPE tokens before scoring (can be sentencepiece, letter, and more)", ) def __init__(self, args, source_dictionary=None, target_dictionary=None): super().__init__(args) self._target_dictionary = target_dictionary self._source_dictionary = source_dictionary self.is_ctc = args.criterion == "ctc" if getattr(self.args, "eval_wer", False): assert args.labels is not None, "eval_wer can only be set during fine-tuning" @classmethod def setup_task(cls, args, **kwargs): """Setup the task (e.g., load dictionaries). Args: args (omegaconf.DictConfig): parsed command-line arguments """ if args.labels: dict_path = os.path.join(args.data, f"dict.{args.labels}.txt") target_dictionary = Dictionary.load(dict_path) else: target_dictionary = None return cls(args, target_dictionary=target_dictionary) def load_dataset(self, split, **kwargs): """Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, test) """ manifest = os.path.join(self.args.data, "{}.tsv".format(split)) self.datasets[split] = FileAudioDataset( manifest, sample_rate=self.args.sample_rate, max_sample_size=self.args.max_sample_size, min_sample_size=self.args.max_sample_size, min_length=self.args.min_sample_size, pad=self.args.labels is not None or self.args.enable_padding, normalize=self.args.normalize, ) if self.args.labels: label_path = os.path.join(self.args.data, f"{split}.{self.args.labels}") labels = [] with open(label_path, "r") as f: for line in f: labels.append(line) process_label = LabelEncoder(self.target_dictionary) self.datasets[split] = AddTargetDataset( self.datasets[split], labels, pad=self.target_dictionary.pad(), eos=self.target_dictionary.eos(), batch_targets=True, process_label=process_label, add_to_input=not self.is_ctc, ) @property def source_dictionary(self): return self._source_dictionary @property def target_dictionary(self): """Return the :class:`~fairseq.data.Dictionary` for the language model.""" return self._target_dictionary def max_positions(self): """Maximum input length supported by the encoder.""" return (sys.maxsize, sys.maxsize) def filter_indices_by_size( self, indices, dataset, max_positions=None, ignore_invalid_inputs=False, ): # we do not need to filter by size in this task as dataloaders take care of this return indices def valid_step(self, sample, model, criterion): loss, sample_size, logging_output = super().valid_step(sample, model, criterion) if getattr(self.args, "eval_wer", False) and not self.is_ctc: metrics = self._inference_with_wer(self.sequence_generator, sample, model) logging_output["_num_char_errors"] = metrics["num_char_errors"] logging_output["_num_chars"] = metrics["num_chars"] logging_output["_num_word_errors"] = metrics["num_word_errors"] logging_output["_num_words"] = metrics["num_words"] return loss, sample_size, logging_output def build_model(self, args): model = super().build_model(args) if getattr(args, 'eval_wer', False) and not self.is_ctc: self.sequence_generator = self.build_generator([model], args, ) self.tokenizer = encoders.build_tokenizer(args) return model def _inference_with_wer(self, generator, sample, model): def decode(toks, escape_unk=True): s = self.target_dictionary.string( toks.int().cpu(), self.args.eval_wer_remove_bpe, escape_unk=escape_unk, extra_symbols_to_ignore={generator.eos}, ) if self.tokenizer: s = self.tokenizer.decode(s) return s num_word_errors, num_char_errors = 0, 0 num_chars, num_words = 0, 0 gen_out = self.inference_step(generator, [model], sample, None) for i in range(len(gen_out)): hyp = decode(gen_out[i][0]["tokens"]) ref = decode( utils.strip_pad(sample["target"][i], self.target_dictionary.pad()), escape_unk=True, ) hyp = post_process(hyp, self.args.eval_wer_remove_bpe).strip("_") ref = post_process(ref, self.args.eval_wer_remove_bpe).strip("_") num_char_errors += editdistance.eval(hyp, ref) num_chars += len(ref) hyp_words = hyp.split("_") ref_words = ref.split("_") num_word_errors += editdistance.eval(hyp_words, ref_words) num_words += len(ref_words) return { "num_char_errors": num_char_errors, "num_chars": num_chars, "num_word_errors": num_word_errors, "num_words": num_words, } def reduce_metrics(self, logging_outputs, criterion): super().reduce_metrics(logging_outputs, criterion) zero = torch.scalar_tensor(0.) num_char_errors = sum(log.get("_num_char_errors", zero) for log in logging_outputs) num_chars = sum(log.get("_num_chars", zero) for log in logging_outputs) num_word_errors = sum(log.get("_num_word_errors", zero) for log in logging_outputs) num_words = sum(log.get("_num_words", zero) for log in logging_outputs) metrics.log_scalar("_num_char_errors", num_char_errors) metrics.log_scalar("_num_chars", num_chars) metrics.log_scalar("_num_word_errors", num_word_errors) metrics.log_scalar("_num_words", num_words) if num_words > 0: metrics.log_derived( "uer", lambda meters: meters["_num_char_errors"].sum * 100.0 / meters["_num_chars"].sum if meters["_num_chars"].sum > 0 else float("nan") ) metrics.log_derived( "wer", lambda meters: meters["_num_word_errors"].sum * 100.0 / meters["_num_words"].sum if meters["_num_words"].sum > 0 else float("nan") )
[ "fairseq.data.Dictionary.load", "os.path.join", "torch.scalar_tensor", "fairseq.data.FileAudioDataset", "fairseq.data.encoders.build_tokenizer", "fairseq.data.data_utils.post_process", "editdistance.eval" ]
[((3879, 4176), 'fairseq.data.FileAudioDataset', 'FileAudioDataset', (['manifest'], {'sample_rate': 'self.args.sample_rate', 'max_sample_size': 'self.args.max_sample_size', 'min_sample_size': 'self.args.max_sample_size', 'min_length': 'self.args.min_sample_size', 'pad': '(self.args.labels is not None or self.args.enable_padding)', 'normalize': 'self.args.normalize'}), '(manifest, sample_rate=self.args.sample_rate,\n max_sample_size=self.args.max_sample_size, min_sample_size=self.args.\n max_sample_size, min_length=self.args.min_sample_size, pad=self.args.\n labels is not None or self.args.enable_padding, normalize=self.args.\n normalize)\n', (3895, 4176), False, 'from fairseq.data import AddTargetDataset, Dictionary, FileAudioDataset, encoders\n'), ((8140, 8164), 'torch.scalar_tensor', 'torch.scalar_tensor', (['(0.0)'], {}), '(0.0)\n', (8159, 8164), False, 'import torch\n'), ((3370, 3420), 'os.path.join', 'os.path.join', (['args.data', 'f"""dict.{args.labels}.txt"""'], {}), "(args.data, f'dict.{args.labels}.txt')\n", (3382, 3420), False, 'import os\n'), ((3453, 3479), 'fairseq.data.Dictionary.load', 'Dictionary.load', (['dict_path'], {}), '(dict_path)\n', (3468, 3479), False, 'from fairseq.data import AddTargetDataset, Dictionary, FileAudioDataset, encoders\n'), ((4308, 4367), 'os.path.join', 'os.path.join', (['self.args.data', 'f"""{split}.{self.args.labels}"""'], {}), "(self.args.data, f'{split}.{self.args.labels}')\n", (4320, 4367), False, 'import os\n'), ((6470, 6500), 'fairseq.data.encoders.build_tokenizer', 'encoders.build_tokenizer', (['args'], {}), '(args)\n', (6494, 6500), False, 'from fairseq.data import AddTargetDataset, Dictionary, FileAudioDataset, encoders\n'), ((7551, 7578), 'editdistance.eval', 'editdistance.eval', (['hyp', 'ref'], {}), '(hyp, ref)\n', (7568, 7578), False, 'import editdistance\n'), ((7722, 7761), 'editdistance.eval', 'editdistance.eval', (['hyp_words', 'ref_words'], {}), '(hyp_words, ref_words)\n', (7739, 7761), False, 'import editdistance\n'), ((7382, 7430), 'fairseq.data.data_utils.post_process', 'post_process', (['hyp', 'self.args.eval_wer_remove_bpe'], {}), '(hyp, self.args.eval_wer_remove_bpe)\n', (7394, 7430), False, 'from fairseq.data.data_utils import post_process\n'), ((7460, 7508), 'fairseq.data.data_utils.post_process', 'post_process', (['ref', 'self.args.eval_wer_remove_bpe'], {}), '(ref, self.args.eval_wer_remove_bpe)\n', (7472, 7508), False, 'from fairseq.data.data_utils import post_process\n')]
""" All your views aka. your template endpoints go here. There are two ways to create a view. 1. Create a new Subclass inheriting from one of the flask_template_master views 2. Use the view-factory function flask_template_master.views.create_template_endpoint Each view requires an 1 (and 2 optional) things: 1. An environment: The environment provides the templates and handles all options of how templates are rendered 2. (optional) An global provider: A global provider provides variables that are accessible in all templates of the endpoint 3. (optional) An compiler: The compiler gets the rendered template and can handle a postprocessing step and controls the data that is returned. This can e.g. be used to run a Latex compilation. """ import jinja2 from flask_template_master.compiler import LatexCompiler from flask_template_master.views import BaseTemplateView, create_template_endpoint from flask_template_master import Api from flask_template_master.global_provider import DictGlobalProvider from flask_template_master.environments import LATEX_TEMPLATE_CONFIG api = Api() # create an instance of an flask-restfull API. Always required! class TestView(BaseTemplateView): """This is an example of a view created as a subclass. This is a simple view using a Dict loader to provide all template strings inline. It does not use a compile step and simply returns the rendered template string on POST. It passes one value as a global variable. This can be seen in template b. The global variable will be overwritten, if a variable with the same name is passed by the POST request """ # The environment needs to be a jinja environment with a loader ENVIRONMENT = jinja2.Environment(loader=jinja2.DictLoader({'a': '{{ test }}', 'b': '{{ test }} {{ global }}'})) GLOBAL_PROVIDER = DictGlobalProvider({'global': 'This is a global value'}) # This registers '/class_test/' for the overview and '/class_test/<template_name> for the individual templates TestView.add_as_resource(api, '/class_test/') # This is an example on how to use the factory function # Setting up the jinja2 enviroemnt using a file loader with LaTex config environment = jinja2.Environment(loader=jinja2.FileSystemLoader('./templates'), **LATEX_TEMPLATE_CONFIG) compiler = LatexCompiler() create_template_endpoint(api, '/factory_test/', environment=environment, compiler=compiler)
[ "flask_template_master.compiler.LatexCompiler", "flask_template_master.views.create_template_endpoint", "flask_template_master.global_provider.DictGlobalProvider", "flask_template_master.Api", "jinja2.DictLoader", "jinja2.FileSystemLoader" ]
[((1086, 1091), 'flask_template_master.Api', 'Api', ([], {}), '()\n', (1089, 1091), False, 'from flask_template_master import Api\n'), ((2296, 2311), 'flask_template_master.compiler.LatexCompiler', 'LatexCompiler', ([], {}), '()\n', (2309, 2311), False, 'from flask_template_master.compiler import LatexCompiler\n'), ((2312, 2407), 'flask_template_master.views.create_template_endpoint', 'create_template_endpoint', (['api', '"""/factory_test/"""'], {'environment': 'environment', 'compiler': 'compiler'}), "(api, '/factory_test/', environment=environment,\n compiler=compiler)\n", (2336, 2407), False, 'from flask_template_master.views import BaseTemplateView, create_template_endpoint\n'), ((1833, 1889), 'flask_template_master.global_provider.DictGlobalProvider', 'DictGlobalProvider', (["{'global': 'This is a global value'}"], {}), "({'global': 'This is a global value'})\n", (1851, 1889), False, 'from flask_template_master.global_provider import DictGlobalProvider\n'), ((2220, 2258), 'jinja2.FileSystemLoader', 'jinja2.FileSystemLoader', (['"""./templates"""'], {}), "('./templates')\n", (2243, 2258), False, 'import jinja2\n'), ((1739, 1809), 'jinja2.DictLoader', 'jinja2.DictLoader', (["{'a': '{{ test }}', 'b': '{{ test }} {{ global }}'}"], {}), "({'a': '{{ test }}', 'b': '{{ test }} {{ global }}'})\n", (1756, 1809), False, 'import jinja2\n')]
import math import numpy as np import torch import torch.nn.functional as F from torch import nn class SimpleMLP(nn.Module): """Simple MLP function approximator for Q-Learning.""" def __init__(self, in_dim, out_dim, hidden_units=256, num_hidden_layers=1): super().__init__() self.input_layer = nn.Sequential(nn.Linear(in_dim, hidden_units), nn.ReLU()) self.hidden_layers = nn.Sequential( *[nn.Sequential(nn.Linear(hidden_units, hidden_units), nn.ReLU()) for _ in range(num_hidden_layers - 1)] ) self.output_layer = nn.Linear(hidden_units, out_dim) def forward(self, x): x = self.input_layer(x) x = self.hidden_layers(x) return self.output_layer(x) class NoisyLinear(nn.Module): """NoisyLinear Layer""" def __init__(self, in_dim, out_dim, std_init=0.4): super(NoisyLinear, self).__init__() self.in_features = in_dim self.out_features = out_dim self.std_init = std_init self.weight_mu = nn.Parameter(torch.empty(out_dim, in_dim)) self.weight_sigma = nn.Parameter(torch.empty(out_dim, in_dim)) self.register_buffer("weight_epsilon", torch.empty(out_dim, in_dim)) self.bias_mu = nn.Parameter(torch.empty(out_dim)) self.bias_sigma = nn.Parameter(torch.empty(out_dim)) self.register_buffer("bias_epsilon", torch.empty(out_dim)) self.reset_parameters() self.sample_noise() def reset_parameters(self): mu_range = 1.0 / math.sqrt(self.in_features) self.weight_mu.data.uniform_(-mu_range, mu_range) self.weight_sigma.data.fill_(self.std_init / math.sqrt(self.in_features)) self.bias_mu.data.uniform_(-mu_range, mu_range) self.bias_sigma.data.fill_(self.std_init / math.sqrt(self.out_features)) def _scale_noise(self, size): x = torch.randn(size) return x.sign().mul_(x.abs().sqrt_()) def sample_noise(self): epsilon_in = self._scale_noise(self.in_features) epsilon_out = self._scale_noise(self.out_features) weight_eps = epsilon_out.ger(epsilon_in) bias_eps = epsilon_out return weight_eps, bias_eps def forward(self, inp): if self.training: weight_eps, bias_eps = self.sample_noise() return F.linear( inp, self.weight_mu + self.weight_sigma * weight_eps, self.bias_mu + self.bias_sigma * bias_eps, ) else: return F.linear(inp, self.weight_mu, self.bias_mu) class ComplexMLP(nn.Module): """MLP function approximator for Q-Learning.""" def __init__( self, in_dim, out_dim, hidden_units=256, num_hidden_layers=1, noisy=False, dueling=False, sigma_init=0.5, atoms=1, ): super().__init__() self._noisy = noisy self._dueling = dueling self._sigma_init = sigma_init self._in_dim = np.prod(in_dim) self._hidden_units = hidden_units if self._dueling: num_hidden_layers = max(num_hidden_layers - 1, 2) self._num_hidden_layers = num_hidden_layers self._out_dim = out_dim self._atoms = atoms self.init_networks() def init_networks(self): if self._noisy: self.input_layer = nn.Sequential( NoisyLinear(self._in_dim, self._hidden_units, self._sigma_init), nn.ReLU(), ) self.hidden_layers = nn.Sequential( *[ nn.Sequential( NoisyLinear(self._hidden_units, self._hidden_units, self._sigma_init), nn.ReLU(), ) for _ in range(self._num_hidden_layers - 1) ] ) else: self.input_layer = nn.Sequential(nn.Linear(self._in_dim, self._hidden_units), nn.ReLU()) self.hidden_layers = nn.Sequential( *[ nn.Sequential(nn.Linear(self._hidden_units, self._hidden_units), nn.ReLU()) for _ in range(self._num_hidden_layers - 1) ] ) if self._dueling: """In dueling, we have two heads - one for estimating advantage function and one for estimating value function. If `noisy` is also true, then each of these layers will be NoisyLinear()""" if self._noisy: self.output_layer_adv = nn.Sequential( NoisyLinear(self._hidden_units, self._hidden_units, self._sigma_init), nn.ReLU(), NoisyLinear( self._hidden_units, self._out_dim * self._atoms, self._sigma_init, ), ) self.output_layer_val = nn.Sequential( NoisyLinear(self._hidden_units, self._hidden_units, self._sigma_init), nn.ReLU(), NoisyLinear( self._hidden_units, 1 * self._atoms, self._sigma_init, ), ) else: self.output_layer_adv = nn.Sequential( nn.Linear(self._hidden_units, self._hidden_units, self._sigma_init), nn.ReLU(), nn.Linear( self._hidden_units, self._out_dim * self._atoms, self._sigma_init, ), ) self.output_layer_val = nn.Sequential( nn.Linear(self._hidden_units, self._hidden_units, self._sigma_init), nn.ReLU(), nn.Linear( self._hidden_units, 1 * self._atoms, self._sigma_init, ), ) else: if self._noisy: self.output_layer = NoisyLinear(self._hidden_units, self._out_dim * self._atoms, self._sigma_init) else: self.output_layer = nn.Linear(self._hidden_units, self._out_dim * self._atoms) def forward(self, x): x = torch.flatten(x, start_dim=1) x = self.input_layer(x) x = self.hidden_layers(x) if self._dueling: adv = self.output_layer_adv(x) val = self.output_layer_val(x) if len(adv.shape) == 1: x = val + adv - adv.mean(0) else: x = val + adv - adv.mean(1).unsqueeze(1).expand(x.shape[0], self._out_dim) else: x = self.output_layer(x) return x class DistributionalMLP(ComplexMLP): """Distributional MLP function approximator for Q-Learning.""" def __init__( self, in_dim, out_dim, supports, hidden_units=256, num_hidden_layers=1, noisy=True, dueling=True, sigma_init=0.5, atoms=51, ): super().__init__( in_dim, out_dim, hidden_units, num_hidden_layers, noisy, dueling, sigma_init, atoms, ) self._supports = supports def forward(self, x): x = torch.flatten(x, start_dim=1) x = self.dist(x) x = torch.sum(x * self._supports, dim=2) return x def dist(self, x): x = self.input_layer(x) x = self.hidden_layers(x) if self._dueling: adv = self.output_layer_adv(x) adv = adv.view(-1, self._out_dim, self._atoms) val = self.output_layer_val(x) val = val.view(-1, 1, self._atoms) x = val + adv - adv.mean(dim=1, keepdim=True) else: x = self.output_layer(x) x = x.view(-1, self._out_dim, self._atoms) x = F.softmax(x, dim=-1) x = x.clamp(min=1e-3) return x
[ "torch.nn.functional.linear", "numpy.prod", "torch.nn.functional.softmax", "torch.nn.ReLU", "math.sqrt", "torch.sum", "torch.nn.Linear", "torch.empty", "torch.randn", "torch.flatten" ]
[((580, 612), 'torch.nn.Linear', 'nn.Linear', (['hidden_units', 'out_dim'], {}), '(hidden_units, out_dim)\n', (589, 612), False, 'from torch import nn\n'), ((1877, 1894), 'torch.randn', 'torch.randn', (['size'], {}), '(size)\n', (1888, 1894), False, 'import torch\n'), ((3022, 3037), 'numpy.prod', 'np.prod', (['in_dim'], {}), '(in_dim)\n', (3029, 3037), True, 'import numpy as np\n'), ((6396, 6425), 'torch.flatten', 'torch.flatten', (['x'], {'start_dim': '(1)'}), '(x, start_dim=1)\n', (6409, 6425), False, 'import torch\n'), ((7491, 7520), 'torch.flatten', 'torch.flatten', (['x'], {'start_dim': '(1)'}), '(x, start_dim=1)\n', (7504, 7520), False, 'import torch\n'), ((7558, 7594), 'torch.sum', 'torch.sum', (['(x * self._supports)'], {'dim': '(2)'}), '(x * self._supports, dim=2)\n', (7567, 7594), False, 'import torch\n'), ((8096, 8116), 'torch.nn.functional.softmax', 'F.softmax', (['x'], {'dim': '(-1)'}), '(x, dim=-1)\n', (8105, 8116), True, 'import torch.nn.functional as F\n'), ((337, 368), 'torch.nn.Linear', 'nn.Linear', (['in_dim', 'hidden_units'], {}), '(in_dim, hidden_units)\n', (346, 368), False, 'from torch import nn\n'), ((370, 379), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (377, 379), False, 'from torch import nn\n'), ((1043, 1071), 'torch.empty', 'torch.empty', (['out_dim', 'in_dim'], {}), '(out_dim, in_dim)\n', (1054, 1071), False, 'import torch\n'), ((1114, 1142), 'torch.empty', 'torch.empty', (['out_dim', 'in_dim'], {}), '(out_dim, in_dim)\n', (1125, 1142), False, 'import torch\n'), ((1191, 1219), 'torch.empty', 'torch.empty', (['out_dim', 'in_dim'], {}), '(out_dim, in_dim)\n', (1202, 1219), False, 'import torch\n'), ((1257, 1277), 'torch.empty', 'torch.empty', (['out_dim'], {}), '(out_dim)\n', (1268, 1277), False, 'import torch\n'), ((1318, 1338), 'torch.empty', 'torch.empty', (['out_dim'], {}), '(out_dim)\n', (1329, 1338), False, 'import torch\n'), ((1385, 1405), 'torch.empty', 'torch.empty', (['out_dim'], {}), '(out_dim)\n', (1396, 1405), False, 'import torch\n'), ((1525, 1552), 'math.sqrt', 'math.sqrt', (['self.in_features'], {}), '(self.in_features)\n', (1534, 1552), False, 'import math\n'), ((2331, 2440), 'torch.nn.functional.linear', 'F.linear', (['inp', '(self.weight_mu + self.weight_sigma * weight_eps)', '(self.bias_mu + self.bias_sigma * bias_eps)'], {}), '(inp, self.weight_mu + self.weight_sigma * weight_eps, self.bias_mu +\n self.bias_sigma * bias_eps)\n', (2339, 2440), True, 'import torch.nn.functional as F\n'), ((2533, 2576), 'torch.nn.functional.linear', 'F.linear', (['inp', 'self.weight_mu', 'self.bias_mu'], {}), '(inp, self.weight_mu, self.bias_mu)\n', (2541, 2576), True, 'import torch.nn.functional as F\n'), ((1664, 1691), 'math.sqrt', 'math.sqrt', (['self.in_features'], {}), '(self.in_features)\n', (1673, 1691), False, 'import math\n'), ((1800, 1828), 'math.sqrt', 'math.sqrt', (['self.out_features'], {}), '(self.out_features)\n', (1809, 1828), False, 'import math\n'), ((3506, 3515), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3513, 3515), False, 'from torch import nn\n'), ((3941, 3984), 'torch.nn.Linear', 'nn.Linear', (['self._in_dim', 'self._hidden_units'], {}), '(self._in_dim, self._hidden_units)\n', (3950, 3984), False, 'from torch import nn\n'), ((3986, 3995), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3993, 3995), False, 'from torch import nn\n'), ((6297, 6355), 'torch.nn.Linear', 'nn.Linear', (['self._hidden_units', '(self._out_dim * self._atoms)'], {}), '(self._hidden_units, self._out_dim * self._atoms)\n', (6306, 6355), False, 'from torch import nn\n'), ((4703, 4712), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4710, 4712), False, 'from torch import nn\n'), ((5094, 5103), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (5101, 5103), False, 'from torch import nn\n'), ((5400, 5467), 'torch.nn.Linear', 'nn.Linear', (['self._hidden_units', 'self._hidden_units', 'self._sigma_init'], {}), '(self._hidden_units, self._hidden_units, self._sigma_init)\n', (5409, 5467), False, 'from torch import nn\n'), ((5489, 5498), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (5496, 5498), False, 'from torch import nn\n'), ((5520, 5596), 'torch.nn.Linear', 'nn.Linear', (['self._hidden_units', '(self._out_dim * self._atoms)', 'self._sigma_init'], {}), '(self._hidden_units, self._out_dim * self._atoms, self._sigma_init)\n', (5529, 5596), False, 'from torch import nn\n'), ((5787, 5854), 'torch.nn.Linear', 'nn.Linear', (['self._hidden_units', 'self._hidden_units', 'self._sigma_init'], {}), '(self._hidden_units, self._hidden_units, self._sigma_init)\n', (5796, 5854), False, 'from torch import nn\n'), ((5876, 5885), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (5883, 5885), False, 'from torch import nn\n'), ((5907, 5971), 'torch.nn.Linear', 'nn.Linear', (['self._hidden_units', '(1 * self._atoms)', 'self._sigma_init'], {}), '(self._hidden_units, 1 * self._atoms, self._sigma_init)\n', (5916, 5971), False, 'from torch import nn\n'), ((453, 490), 'torch.nn.Linear', 'nn.Linear', (['hidden_units', 'hidden_units'], {}), '(hidden_units, hidden_units)\n', (462, 490), False, 'from torch import nn\n'), ((492, 501), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (499, 501), False, 'from torch import nn\n'), ((3752, 3761), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3759, 3761), False, 'from torch import nn\n'), ((4098, 4147), 'torch.nn.Linear', 'nn.Linear', (['self._hidden_units', 'self._hidden_units'], {}), '(self._hidden_units, self._hidden_units)\n', (4107, 4147), False, 'from torch import nn\n'), ((4149, 4158), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4156, 4158), False, 'from torch import nn\n')]
import torch from torch import nn from torch.nn import functional as F class BicubicDownSample(nn.Module): def bicubic_kernel(self, x, a=-0.50): """ This equation is exactly copied from the website below: https://clouard.users.greyc.fr/Pantheon/experiments/rescaling/index-en.html#bicubic """ abs_x = torch.abs(x) if abs_x <= 1.0: return (a + 2.0) * torch.pow(abs_x, 3.0) - (a + 3.0) * torch.pow(abs_x, 2.0) + 1 elif 1.0 < abs_x < 2.0: return a * torch.pow(abs_x, 3) - 5.0 * a * torch.pow(abs_x, 2.0) + 8.0 * a * abs_x - 4.0 * a else: return 0.0 def __init__(self, factor=4, cuda=True, padding="reflect"): super().__init__() self.factor = factor size = factor * 4 k = torch.tensor( [self.bicubic_kernel((i - torch.floor(torch.tensor(size / 2)) + 0.5) / factor) for i in range(size)], dtype=torch.float32, ) k = k / torch.sum(k) # k = torch.einsum('i,j->ij', (k, k)) k1 = torch.reshape(k, shape=(1, 1, size, 1)) self.k1 = torch.cat([k1, k1, k1], dim=0) k2 = torch.reshape(k, shape=(1, 1, 1, size)) self.k2 = torch.cat([k2, k2, k2], dim=0) self.cuda = ".cuda" if cuda else "" self.padding = padding for param in self.parameters(): param.requires_grad = False def forward(self, x, nhwc=False, clip_round=False, byte_output=False): # x = torch.from_numpy(x).type('torch.FloatTensor') filter_height = self.factor * 4 filter_width = self.factor * 4 stride = self.factor pad_along_height = max(filter_height - stride, 0) pad_along_width = max(filter_width - stride, 0) filters1 = self.k1.type("torch{}.FloatTensor".format(self.cuda)) filters2 = self.k2.type("torch{}.FloatTensor".format(self.cuda)) # compute actual padding values for each side pad_top = pad_along_height // 2 pad_bottom = pad_along_height - pad_top pad_left = pad_along_width // 2 pad_right = pad_along_width - pad_left # apply mirror padding if nhwc: x = torch.transpose(torch.transpose(x, 2, 3), 1, 2) # NHWC to NCHW # downscaling performed by 1-d convolution x = F.pad(x, (0, 0, pad_top, pad_bottom), self.padding) x = F.conv2d(input=x, weight=filters1, stride=(stride, 1), groups=3) if clip_round: x = torch.clamp(torch.round(x), 0.0, 255.0) x = F.pad(x, (pad_left, pad_right, 0, 0), self.padding) x = F.conv2d(input=x, weight=filters2, stride=(1, stride), groups=3) if clip_round: x = torch.clamp(torch.round(x), 0.0, 255.0) if nhwc: x = torch.transpose(torch.transpose(x, 1, 3), 1, 2) if byte_output: return x.type("torch.{}.ByteTensor".format(self.cuda)) else: return x
[ "torch.nn.functional.conv2d", "torch.abs", "torch.pow", "torch.transpose", "torch.tensor", "torch.round", "torch.sum", "torch.nn.functional.pad", "torch.reshape", "torch.cat" ]
[((347, 359), 'torch.abs', 'torch.abs', (['x'], {}), '(x)\n', (356, 359), False, 'import torch\n'), ((1070, 1109), 'torch.reshape', 'torch.reshape', (['k'], {'shape': '(1, 1, size, 1)'}), '(k, shape=(1, 1, size, 1))\n', (1083, 1109), False, 'import torch\n'), ((1128, 1158), 'torch.cat', 'torch.cat', (['[k1, k1, k1]'], {'dim': '(0)'}), '([k1, k1, k1], dim=0)\n', (1137, 1158), False, 'import torch\n'), ((1172, 1211), 'torch.reshape', 'torch.reshape', (['k'], {'shape': '(1, 1, 1, size)'}), '(k, shape=(1, 1, 1, size))\n', (1185, 1211), False, 'import torch\n'), ((1230, 1260), 'torch.cat', 'torch.cat', (['[k2, k2, k2]'], {'dim': '(0)'}), '([k2, k2, k2], dim=0)\n', (1239, 1260), False, 'import torch\n'), ((2344, 2395), 'torch.nn.functional.pad', 'F.pad', (['x', '(0, 0, pad_top, pad_bottom)', 'self.padding'], {}), '(x, (0, 0, pad_top, pad_bottom), self.padding)\n', (2349, 2395), True, 'from torch.nn import functional as F\n'), ((2408, 2472), 'torch.nn.functional.conv2d', 'F.conv2d', ([], {'input': 'x', 'weight': 'filters1', 'stride': '(stride, 1)', 'groups': '(3)'}), '(input=x, weight=filters1, stride=(stride, 1), groups=3)\n', (2416, 2472), True, 'from torch.nn import functional as F\n'), ((2565, 2616), 'torch.nn.functional.pad', 'F.pad', (['x', '(pad_left, pad_right, 0, 0)', 'self.padding'], {}), '(x, (pad_left, pad_right, 0, 0), self.padding)\n', (2570, 2616), True, 'from torch.nn import functional as F\n'), ((2629, 2693), 'torch.nn.functional.conv2d', 'F.conv2d', ([], {'input': 'x', 'weight': 'filters2', 'stride': '(1, stride)', 'groups': '(3)'}), '(input=x, weight=filters2, stride=(1, stride), groups=3)\n', (2637, 2693), True, 'from torch.nn import functional as F\n'), ((998, 1010), 'torch.sum', 'torch.sum', (['k'], {}), '(k)\n', (1007, 1010), False, 'import torch\n'), ((2232, 2256), 'torch.transpose', 'torch.transpose', (['x', '(2)', '(3)'], {}), '(x, 2, 3)\n', (2247, 2256), False, 'import torch\n'), ((2524, 2538), 'torch.round', 'torch.round', (['x'], {}), '(x)\n', (2535, 2538), False, 'import torch\n'), ((2745, 2759), 'torch.round', 'torch.round', (['x'], {}), '(x)\n', (2756, 2759), False, 'import torch\n'), ((2823, 2847), 'torch.transpose', 'torch.transpose', (['x', '(1)', '(3)'], {}), '(x, 1, 3)\n', (2838, 2847), False, 'import torch\n'), ((416, 437), 'torch.pow', 'torch.pow', (['abs_x', '(3.0)'], {}), '(abs_x, 3.0)\n', (425, 437), False, 'import torch\n'), ((452, 473), 'torch.pow', 'torch.pow', (['abs_x', '(2.0)'], {}), '(abs_x, 2.0)\n', (461, 473), False, 'import torch\n'), ((533, 552), 'torch.pow', 'torch.pow', (['abs_x', '(3)'], {}), '(abs_x, 3)\n', (542, 552), False, 'import torch\n'), ((565, 586), 'torch.pow', 'torch.pow', (['abs_x', '(2.0)'], {}), '(abs_x, 2.0)\n', (574, 586), False, 'import torch\n'), ((875, 897), 'torch.tensor', 'torch.tensor', (['(size / 2)'], {}), '(size / 2)\n', (887, 897), False, 'import torch\n')]
import nose2.tools from typing import Union from app.util import has_attributes class SampleClass: pass class TestUtil: @nose2.tools.params( ('SET_VALUE', True), (None, False), ('NO_ATTRIBUTE', False), (False, True), ('', True), (0, True), ) def test_has_attributes(self, value: Union[bool, int, str, None], ans: bool) -> None: obj = SampleClass() if value != 'NO_ATTRIBUTE': setattr(obj, 'attr', value) has_attr = has_attributes(obj, 'attr') assert has_attr is ans
[ "app.util.has_attributes" ]
[((522, 549), 'app.util.has_attributes', 'has_attributes', (['obj', '"""attr"""'], {}), "(obj, 'attr')\n", (536, 549), False, 'from app.util import has_attributes\n')]
# Copyright 2022 by Autodesk, Inc. # Permission to use, copy, modify, and distribute this software in object code form # for any purpose and without fee is hereby granted, provided that the above copyright # notice appears in all copies and that both that copyright notice and the limited # warranty and restricted rights notice below appear in all supporting documentation. # # AUTODESK PROVIDES THIS PROGRAM "AS IS" AND WITH ALL FAULTS. AUTODESK SPECIFICALLY # DISCLAIMS ANY IMPLIED WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR USE. # AUTODESK, INC. DOES NOT WARRANT THAT THE OPERATION OF THE PROGRAM WILL BE # UNINTERRUPTED OR ERROR FREE. from dataclasses import dataclass, field import base64 import pprint import adsk.core app = adsk.core.Application.get() def b64_url_safe_encode(string): encoded_bytes = base64.urlsafe_b64encode(string.encode("utf-8")) encoded_str = str(encoded_bytes, "utf-8") return encoded_str.rstrip("=") def b64_url_safe_decode(string): return str(base64.urlsafe_b64decode(string.lstrip('a.') + "==="), "utf-8") def link_for_url(url: str) -> str: return f"<a href={url}>{url}</a>" @dataclass class FusionData: # This should be set at creation or at least validity checked BEFORE calling this data_file: adsk.core.DataFile = field(repr=False, default=None) # THe following are computed based on current state of Fusion and are not "printed" by default hub: adsk.core.DataHub = field(repr=False, init=False) project: adsk.core.DataProject = field(repr=False, init=False) folder: adsk.core.DataFolder = field(repr=False, init=False) user: adsk.core.User = field(repr=False, init=False) # All String Properties file_name: str = field(init=False) user_email: str = field(init=False) hub_name: str = field(init=False) hub_id: str = field(init=False) hub_id_decoded: str = field(init=False) hub_team_name: str = field(init=False) project_name: str = field(init=False) project_id: str = field(init=False) project_id_decoded: str = field(init=False) folder_name: str = field(init=False) folder_id: str = field(init=False) lineage_urn: str = field(init=False) version_urn: str = field(init=False) base64_lineage_urn: str = field(init=False) base64_version_urn: str = field(init=False) open_from_web: str = field(init=False) fusion_team_url: str = field(init=False) fusion_team_link: str = field(init=False) def __post_init__(self): # THe following are computed based on current state of Fusion and are not "printed" by default self.hub = app.data.activeHub self.project = self.data_file.parentProject self.folder = self.data_file.parentFolder self.user = app.currentUser # All String Properties self.file_name: str = self.data_file.name self.user_email: str = self.user.email self.hub_name: str = self.hub.name self.hub_id: str = self.hub.id self.hub_id_decoded: str = b64_url_safe_decode(self.hub_id) self.hub_team_name: str = self.hub_id_decoded.split(':')[-1] self.project_name: str = self.project.name self.project_id: str = self.project.id self.project_id_decoded: str = b64_url_safe_decode(self.project_id) self.folder_name: str = self.folder.name self.folder_id: str = self.folder.id self.lineage_urn: str = self.data_file.id self.version_urn: str = self.data_file.versionId self.base64_lineage_urn: str = b64_url_safe_encode(self.lineage_urn) self.base64_version_urn: str = b64_url_safe_encode(self.version_urn) team_base_url: str = 'autodesk360' self.open_from_web: str = f"fusion360://userEmail={self.user_email}&" \ f"lineageUrn={self.lineage_urn}&" \ f"hubUrl=https://{self.hub_team_name}.{team_base_url}.com&" \ f"documentName={self.file_name}" self.fusion_team_url: str = f"https://{self.hub_team_name}.{team_base_url}.com/g/data/{self.base64_lineage_urn}" self.fusion_team_link = link_for_url(self.fusion_team_url) def str_dict(self): return {k: v for k, v in self.__dict__.items() if isinstance(v, str)} def pretty_string(self): return pprint.pformat(self.str_dict())
[ "dataclasses.field" ]
[((1312, 1343), 'dataclasses.field', 'field', ([], {'repr': '(False)', 'default': 'None'}), '(repr=False, default=None)\n', (1317, 1343), False, 'from dataclasses import dataclass, field\n'), ((1473, 1502), 'dataclasses.field', 'field', ([], {'repr': '(False)', 'init': '(False)'}), '(repr=False, init=False)\n', (1478, 1502), False, 'from dataclasses import dataclass, field\n'), ((1540, 1569), 'dataclasses.field', 'field', ([], {'repr': '(False)', 'init': '(False)'}), '(repr=False, init=False)\n', (1545, 1569), False, 'from dataclasses import dataclass, field\n'), ((1605, 1634), 'dataclasses.field', 'field', ([], {'repr': '(False)', 'init': '(False)'}), '(repr=False, init=False)\n', (1610, 1634), False, 'from dataclasses import dataclass, field\n'), ((1662, 1691), 'dataclasses.field', 'field', ([], {'repr': '(False)', 'init': '(False)'}), '(repr=False, init=False)\n', (1667, 1691), False, 'from dataclasses import dataclass, field\n'), ((1742, 1759), 'dataclasses.field', 'field', ([], {'init': '(False)'}), '(init=False)\n', (1747, 1759), False, 'from dataclasses import dataclass, field\n'), ((1782, 1799), 'dataclasses.field', 'field', ([], {'init': '(False)'}), '(init=False)\n', (1787, 1799), False, 'from dataclasses import dataclass, field\n'), ((1820, 1837), 'dataclasses.field', 'field', ([], {'init': '(False)'}), '(init=False)\n', (1825, 1837), False, 'from dataclasses import dataclass, field\n'), ((1856, 1873), 'dataclasses.field', 'field', ([], {'init': '(False)'}), '(init=False)\n', (1861, 1873), False, 'from dataclasses import dataclass, field\n'), ((1900, 1917), 'dataclasses.field', 'field', ([], {'init': '(False)'}), '(init=False)\n', (1905, 1917), False, 'from dataclasses import dataclass, field\n'), ((1943, 1960), 'dataclasses.field', 'field', ([], {'init': '(False)'}), '(init=False)\n', (1948, 1960), False, 'from dataclasses import dataclass, field\n'), ((1985, 2002), 'dataclasses.field', 'field', ([], {'init': '(False)'}), '(init=False)\n', (1990, 2002), False, 'from dataclasses import dataclass, field\n'), ((2025, 2042), 'dataclasses.field', 'field', ([], {'init': '(False)'}), '(init=False)\n', (2030, 2042), False, 'from dataclasses import dataclass, field\n'), ((2073, 2090), 'dataclasses.field', 'field', ([], {'init': '(False)'}), '(init=False)\n', (2078, 2090), False, 'from dataclasses import dataclass, field\n'), ((2114, 2131), 'dataclasses.field', 'field', ([], {'init': '(False)'}), '(init=False)\n', (2119, 2131), False, 'from dataclasses import dataclass, field\n'), ((2153, 2170), 'dataclasses.field', 'field', ([], {'init': '(False)'}), '(init=False)\n', (2158, 2170), False, 'from dataclasses import dataclass, field\n'), ((2194, 2211), 'dataclasses.field', 'field', ([], {'init': '(False)'}), '(init=False)\n', (2199, 2211), False, 'from dataclasses import dataclass, field\n'), ((2235, 2252), 'dataclasses.field', 'field', ([], {'init': '(False)'}), '(init=False)\n', (2240, 2252), False, 'from dataclasses import dataclass, field\n'), ((2283, 2300), 'dataclasses.field', 'field', ([], {'init': '(False)'}), '(init=False)\n', (2288, 2300), False, 'from dataclasses import dataclass, field\n'), ((2331, 2348), 'dataclasses.field', 'field', ([], {'init': '(False)'}), '(init=False)\n', (2336, 2348), False, 'from dataclasses import dataclass, field\n'), ((2374, 2391), 'dataclasses.field', 'field', ([], {'init': '(False)'}), '(init=False)\n', (2379, 2391), False, 'from dataclasses import dataclass, field\n'), ((2419, 2436), 'dataclasses.field', 'field', ([], {'init': '(False)'}), '(init=False)\n', (2424, 2436), False, 'from dataclasses import dataclass, field\n'), ((2465, 2482), 'dataclasses.field', 'field', ([], {'init': '(False)'}), '(init=False)\n', (2470, 2482), False, 'from dataclasses import dataclass, field\n')]
""" This process creates the two kafka topics to be used. The input-topic with ten partitions and the output-topic with one partition. Also preloads the kafka cluster with test data (if flag is set to true). """ import os import time import json import logging from confluent_kafka.admin import AdminClient, NewTopic from confluent_kafka import Producer # defining logger logger = logging.getLogger() logger.setLevel(logging.INFO) # reading the environement variables defined on the docker compose KAFKA_CLUSTER = os.environ.get('KAFKA_CLUSTER_CONNECT', 'localhost:9092') LOAD_DATA = os.environ.get('LOAD_SAMPLE_DATA', False) logging.info( (f'>Env variables: KAFKA_CLUSTER_CONNECT={KAFKA_CLUSTER} ' f'LOAD_SAMPLE_DATA={LOAD_DATA}')) BROKER_CONFIG = {'bootstrap.servers': KAFKA_CLUSTER} def read_json_file(file_route: str) -> dict: """ Read the json configuration file to set topics and partitions. Args: - str, the route(with name) of the configuration file. Returns: - dict, with the configurations defined on the json file. """ with open(file_route, 'r') as f: config = json.load(f) logging.info('JSON file readed.') return config def create_topics(admin: object, config: dict) -> None: """Create the kafka topics based on the configuration file. Args: - object, the admin client kafka object. - dict, json configuration of the process. Returns: None. """ # read the topic configuration and create the NewTopic objects topics = [] for k, v in config.items(): topics.append(NewTopic( v['topic_name'], num_partitions=v['partitions_quantity'], replication_factor=1 ) ) logging.info(f'Starting the creation of the topics: {topics}...') creation_response = admin.create_topics(topics) # the response has futures (which runs asynchronously) so we validate them # to see if they succeeded or not for topic, f in creation_response.items(): try: f.result() logging.info(f'Creation of the topic {topic} completed.') except Exception as e: logger.error(f'Error creating the kafka topic: {topic}. {e}') raise Exception(f'Error creating the kafka topic: {topic}. {e}') def list_topics_and_config(admin: object) -> None: """Check the topics that exists at a specifid. And displays other configs of the Kafka Cluster. Args: - object, the admin client kafka object. Returns: None. """ list_response = admin.list_topics(timeout=5) # get all the broker info logging.info('>Broker details:') for counter, broker in enumerate(list_response.brokers.items(), start=1): logging.info(f'{counter}-Broker info: {broker}') logging.info('>Topics details:') # get all the topic names for counter, topic_data in enumerate(list_response.topics.items(), start=1): logging.info(f'{counter}-Topic info: {topic_data}') def load_sample_data(topic: str, sample_data: list) -> None: """Loads the sample data to the input kafka topic. This will load data across 10 different partitions. Args: - str, the topic name where the data is going to be loaded. - list, the sample data to be loaded by the producer across all the partitions of the specified topic. Returns: None """ producer = Producer(BROKER_CONFIG) # iterate through partitions for data in sample_data: for number in data['values']: try: producer.produce(topic, str(number), None, data['partition']) except Exception as e: logger.error( f'Producer failed to produce a message to the topic. {e}') raise Exception( f'Failed to produce a message from Kakfia. {e}') producer.poll(0) # ensure all the delivery queue has been loaded producer.flush() logging.info('Data successfully produced and loaded to the specify topic.') def main() -> None: """Orchestrates all the process execution. From configuring the cluster topics to load the sample input data. """ configuration_file = 'topic_config.json' data_file = 'sample_data.json' time.sleep(5) actual_path = os.path.dirname(__file__) configuration_path = os.path.join(actual_path, configuration_file) data_path = os.path.join(actual_path, data_file) config = read_json_file(configuration_path) # defining the admin client needed to create topics admin = AdminClient(BROKER_CONFIG) create_topics(admin, config) # this step its only for validation purposes list_topics_and_config(admin) # start the load of the sample data to the input topic if LOAD_DATA: in_topic_name = config['in_topic_conf']['topic_name'] sample_data = read_json_file(data_path) load_sample_data(in_topic_name, sample_data) if __name__ == '__main__': main()
[ "logging.getLogger", "confluent_kafka.admin.AdminClient", "os.environ.get", "os.path.join", "time.sleep", "confluent_kafka.Producer", "os.path.dirname", "json.load", "confluent_kafka.admin.NewTopic", "logging.info" ]
[((382, 401), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (399, 401), False, 'import logging\n'), ((516, 573), 'os.environ.get', 'os.environ.get', (['"""KAFKA_CLUSTER_CONNECT"""', '"""localhost:9092"""'], {}), "('KAFKA_CLUSTER_CONNECT', 'localhost:9092')\n", (530, 573), False, 'import os\n'), ((586, 627), 'os.environ.get', 'os.environ.get', (['"""LOAD_SAMPLE_DATA"""', '(False)'], {}), "('LOAD_SAMPLE_DATA', False)\n", (600, 627), False, 'import os\n'), ((628, 737), 'logging.info', 'logging.info', (['f""">Env variables: KAFKA_CLUSTER_CONNECT={KAFKA_CLUSTER} LOAD_SAMPLE_DATA={LOAD_DATA}"""'], {}), "(\n f'>Env variables: KAFKA_CLUSTER_CONNECT={KAFKA_CLUSTER} LOAD_SAMPLE_DATA={LOAD_DATA}'\n )\n", (640, 737), False, 'import logging\n'), ((1756, 1821), 'logging.info', 'logging.info', (['f"""Starting the creation of the topics: {topics}..."""'], {}), "(f'Starting the creation of the topics: {topics}...')\n", (1768, 1821), False, 'import logging\n'), ((2652, 2684), 'logging.info', 'logging.info', (['""">Broker details:"""'], {}), "('>Broker details:')\n", (2664, 2684), False, 'import logging\n'), ((2824, 2856), 'logging.info', 'logging.info', (['""">Topics details:"""'], {}), "('>Topics details:')\n", (2836, 2856), False, 'import logging\n'), ((3442, 3465), 'confluent_kafka.Producer', 'Producer', (['BROKER_CONFIG'], {}), '(BROKER_CONFIG)\n', (3450, 3465), False, 'from confluent_kafka import Producer\n'), ((4015, 4090), 'logging.info', 'logging.info', (['"""Data successfully produced and loaded to the specify topic."""'], {}), "('Data successfully produced and loaded to the specify topic.')\n", (4027, 4090), False, 'import logging\n'), ((4323, 4336), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (4333, 4336), False, 'import time\n'), ((4355, 4380), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (4370, 4380), False, 'import os\n'), ((4406, 4451), 'os.path.join', 'os.path.join', (['actual_path', 'configuration_file'], {}), '(actual_path, configuration_file)\n', (4418, 4451), False, 'import os\n'), ((4468, 4504), 'os.path.join', 'os.path.join', (['actual_path', 'data_file'], {}), '(actual_path, data_file)\n', (4480, 4504), False, 'import os\n'), ((4621, 4647), 'confluent_kafka.admin.AdminClient', 'AdminClient', (['BROKER_CONFIG'], {}), '(BROKER_CONFIG)\n', (4632, 4647), False, 'from confluent_kafka.admin import AdminClient, NewTopic\n'), ((1133, 1145), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1142, 1145), False, 'import json\n'), ((1154, 1187), 'logging.info', 'logging.info', (['"""JSON file readed."""'], {}), "('JSON file readed.')\n", (1166, 1187), False, 'import logging\n'), ((2771, 2819), 'logging.info', 'logging.info', (['f"""{counter}-Broker info: {broker}"""'], {}), "(f'{counter}-Broker info: {broker}')\n", (2783, 2819), False, 'import logging\n'), ((2976, 3027), 'logging.info', 'logging.info', (['f"""{counter}-Topic info: {topic_data}"""'], {}), "(f'{counter}-Topic info: {topic_data}')\n", (2988, 3027), False, 'import logging\n'), ((1602, 1694), 'confluent_kafka.admin.NewTopic', 'NewTopic', (["v['topic_name']"], {'num_partitions': "v['partitions_quantity']", 'replication_factor': '(1)'}), "(v['topic_name'], num_partitions=v['partitions_quantity'],\n replication_factor=1)\n", (1610, 1694), False, 'from confluent_kafka.admin import AdminClient, NewTopic\n'), ((2086, 2143), 'logging.info', 'logging.info', (['f"""Creation of the topic {topic} completed."""'], {}), "(f'Creation of the topic {topic} completed.')\n", (2098, 2143), False, 'import logging\n')]
#!/usr/bin/python3 import os import time import sys os.system("clear") print('''\033[91m CREATED BY Hironotori ''') def slowprint(s): for c in s + '\n' : sys.stdout.write(c) sys.stdout.flush() slowprint(''' \033[93m [1] apt-pkg pip-pip3 [2] apt-pkg python [3] apt-pkg python2 [4] apt-pkg bash [5] apt-pkg git [6] apt-pkg perl [7] apt-pkg nano [8] apt-pkg curl [9] apt-pkg openssl [10] apt-pkg openssh [11] apt-pkg wget [12] apt-pkg clang [13] apt-pkg nmap [14] apt-pkg w3m [15] apt-pkg ruby [16] apt-pkg dnsutils [17] apt-pkg coreutils [18] apt-pkg fish. [19] apt-pkg zip [20] apt-pkg figlet. [21] apt-pkg cowsay [22] apt-pkg unzip. [23] apt-pkg vim [24] apt-pkg wcalc. [25] apt-pkg bmon [26] apt-pkg unrar. [27] apt-pkg proot [28] apt-pkg golang. [29] apt-pkg tsu [30] apt-pkg tor. [31] apt-pkg php [00] Установить все Вместе [0] Выход''') print (" ") choice = input("\033[93mВыберите пункт : ") if choice == '0' : sys.exit() if choice == '1' : os.system ("apt upgrade -y") os.system ("pkg install") os.system ("pkg upgrade") os.system ("apt install") os.system ("apt upgrade") os.system ("apt update") os.system ("pkg update") os.system("python -m pip install --upgrade pip") os.system ("pip3 install --upgrade setuptools pip") os.system ("termux-setup-storage") sys.exit () if choice == '2' : os.system ("apt upgrade -y") os.system ("pkg install") os.system ("pkg upgrade") os.system ("apt install") os.system ("apt upgrade") os.system ("apt update") os.system ("pkg update") os.system ("pkg install python -y") os.system ("pkg upgrade python -y") os.system ("apt install python -y") os.system ("apt upgrade python -y") os.system ("termux-setup-storage") sys.exit () if choice == '3' : os.system ("apt upgrade -y") os.system ("pkg install") os.system ("pkg upgrade") os.system ("apt install") os.system ("apt upgrade") os.system ("apt update") os.system ("pkg update") os.system ("pkg install python2 -y") os.system ("pkg upgrade python2 -y") os.system ("apt install python2 -y") os.system ("apt upgrade python2 -y") os.system ("termux-setup-storage") sys.exit () if choice == '4' : os.system ("apt upgrade -y") os.system ("pkg install") os.system ("pkg upgrade") os.system ("apt install") os.system ("apt upgrade") os.system ("apt update") os.system ("pkg update") os.system ("pkg install bash") os.system ("apt install bash") os.system ("pkg upgrade bash") os.system ("apt upgrade bash") os.system ("termux-setup-storage") sys.exit () if choice == '5' : os.system ("apt upgrade -y") os.system ("pkg install") os.system ("pkg upgrade") os.system ("apt install") os.system ("apt upgrade") os.system ("apt update") os.system ("pkg update") os.system ("apt install git -y") os.system ("pkg install git -y") os.system ("pkg upgrade git -y") os.system ("apt upgrade git -y") os.system ("termux-setup-storage") sys.exit () if choice == '6' : os.system ("apt upgrade -y") os.system ("pkg install") os.system ("pkg upgrade") os.system ("apt install") os.system ("apt upgrade") os.system ("apt update") os.system ("pkg update") os.system ("pkg install perl -y") os.system ("apt install perl -y") os.system ("pkg upgrade perl -y") os.system ("apt upgrade perl -y") os.system ("termux-setup-storage") sys.exit () if choice == '7' : os.system ("apt upgrade -y") os.system ("pkg install") os.system ("pkg upgrade") os.system ("apt install") os.system ("apt upgrade") os.system ("apt update") os.system ("pkg update") os.system ("pkg install nano -y") os.system ("apt install nano -y") os.system ("pkg upgrade nano -y") os.system ("apt upgrade nano -y") os.system ("termux-setup-storage") sys.exit () if choice == '8' : os.system ("apt upgrade -y") os.system ("pkg install") os.system ("pkg upgrade") os.system ("apt install") os.system ("apt upgrade") os.system ("apt update") os.system ("pkg update") os.system ("pkg install curl -y") os.system ("apt install curl -y") os.system ("pkg upgrade curl -y") os.system ("apt upgrade curl -y") os.system ("termux-setup-storage") sys.exit () if choice == '9' : os.system ("apt upgrade -y") os.system ("pkg install") os.system ("pkg upgrade") os.system ("apt install") os.system ("apt upgrate") os.system ("apt update") os.system ("pkg update") os.system ("pkg install openssl -y") os.system ("apt install openssl -y") os.system ("pkg upgrade openssl -y") os.system ("apt upgrade openssl -y") os.system ("termux-setup-storage") sys.exit () if choice == '10' : os.system ("apt upgrade -y") os.system ("pkg install") os.system ("pkg upgrade") os.system ("apt install") os.system ("apt upgrate") os.system ("apt update") os.system ("pkg update") os.system ("pkg install openssh -y") os.system ("apt install openssh -y") os.system ("pkg upgrade openssh -y") os.system ("apt upgrade openssh -y") os.system ("termux-setup-storage") sys.exit () if choice == '11' : os.system ("apt upgrade -y") os.system ("pkg install") os.system ("pkg upgrade") os.system ("apt install") os.system ("apt upgrate") os.system ("apt update") os.system ("pkg update") os.system ("pkg install wget -y") os.system ("apt install wget -y") os.system ("pkg upgrade wget -y") os.system ("apt upgrade wget -y") os.system ("termux-setup-storage") sys.exit () if choice == '12' : os.system ("apt upgrade -y") os.system ("pkg install") os.system ("pkg upgrade") os.system ("apt install") os.system ("apt upgrate") os.system ("apt update") os.system ("pkg update") os.system ("pkg install clang -y") os.system ("apt install clang -y") os.system ("pkg upgrade clang -y") os.system ("apt upgrade clang -y") os.system ("termux-setup-storage") sys.exit () if choice == '13' : os.system ("apt upgrade -y") os.system ("pkg install") os.system ("pkg upgrade") os.system ("apt install") os.system ("apt upgrate") os.system ("apt update") os.system ("pkg update") os.system ("pkg install nmap -y") os.system ("apt install nmap -y") os.system ("pkg upgrade nmap -y") os.system ("apt upgrade nmap -y") os.system ("termux-setup-storage") sys.exit () if choice == '14' : os.system ("apt upgrade -y") os.system ("pkg install") os.system ("pkg upgrade") os.system ("apt install") os.system ("apt upgrate") os.system ("apt update") os.system ("pkg update") os.system ("pkg install w3m -y") os.system ("apt install w3m -y") os.system ("pkg upgrade w3m -y") os.system ("apt upgrade w3m -y") os.system ("termux-setup-storage") sys.exit () if choice == '15' : os.system ("apt upgrade -y") os.system ("pkg install") os.system ("pkg upgrade") os.system ("apt install") os.system ("apt upgrate") os.system ("apt update") os.system ("pkg update") os.system ("pkg install ruby -y") os.system ("apt install ruby -y") os.system ("pkg upgrade ruby -y") os.system ("apt upgrade ruby -y") os.system ("termux-setup-storage") sys.exit () if choice == '16' : os.system ("apt upgrade -y") os.system ("pkg install") os.system ("pkg upgrade") os.system ("apt install") os.system ("apt upgrate") os.system ("apt update") os.system ("pkg update") os.system ("pkg install dnsutils -y") os.system ("apt install dnsutils -y") os.system ("pkg upgrade dnsutils -y") os.system ("apt upgrade dnsutils -y") os.system ("termux-setup-storage") sys.exit () if choice == '17' : os.system ("apt upgrade -y") os.system ("pkg install") os.system ("pkg upgrade") os.system ("apt install") os.system ("apt upgrate") os.system ("apt update") os.system ("pkg update") os.system ("pkg install coreutils -y") os.system ("apt install coreutils -y") os.system ("pkg upgrade coreutils -y") os.system ("apt upgrade coreutils -y") os.system ("termux-setup-storage") sys.exit () if choice == '18' : os.system ("apt upgrade -y") os.system ("pkg install") os.system ("pkg upgrade") os.system ("apt install") os.system ("apt upgrate") os.system ("apt update") os.system ("pkg update") os.system ("pkg install fish -y") os.system ("apt install fish -y") os.system ("pkg upgrade fish -y") os.system ("apt upgrade fish -y") os.system ("termux-setup-storage") sys.exit () if choice == '19' : os.system ("apt upgrade -y") os.system ("pkg install") os.system ("pkg upgrade") os.system ("apt install") os.system ("apt upgrate") os.system ("apt update") os.system ("pkg update") os.system ("pkg install zip -y") os.system ("apt install zip -y") os.system ("pkg upgrade zip -y") os.system ("apt upgrade zip -y") os.system ("termux-setup-storage") sys.exit () if choice == '20' : os.system ("apt upgrade -y") os.system ("pkg install") os.system ("pkg upgrade") os.system ("apt install") os.system ("apt upgrate") os.system ("apt update") os.system ("pkg update") os.system ("pkg install figlet -y") os.system ("apt install figlet -y") os.system ("pkg upgrade figlet -y") os.system ("apt upgrade figlet -y") os.system ("termux-setup-storage") sys.exit () if choice == '21' : os.system ("apt upgrade -y") os.system ("pkg install") os.system ("pkg upgrade") os.system ("apt install") os.system ("apt upgrate") os.system ("apt update") os.system ("pkg update") os.system ("pkg install cowsay -y") os.system ("apt install cowsay -y") os.system ("pkg upgrade cowsay -y") os.system ("apt upgrade cowsay -y") os.system ("termux-setup-storage") sys.exit () if choice == '22' : os.system ("apt upgrade -y") os.system ("pkg install") os.system ("pkg upgrade") os.system ("apt install") os.system ("apt upgrate") os.system ("apt update") os.system ("pkg update") os.system ("pkg install unzip -y") os.system ("apt install unzip -y") os.system ("pkg upgrade unzip -y") os.system ("apt upgrade unzip -y") os.system ("termux-setup-storage") sys.exit () if choice == '23' : os.system ("apt upgrade -y") os.system ("pkg install") os.system ("pkg upgrade") os.system ("apt install") os.system ("apt upgrate") os.system ("apt update") os.system ("pkg update") os.system ("pkg install vim -y") os.system ("apt install vim -y") os.system ("pkg upgrade vim -y") os.system ("apt upgrade vim -y") os.system ("termux-setup-storage") sys.exit () if choice == '24' : os.system ("apt upgrade -y") os.system ("pkg install") os.system ("pkg upgrade") os.system ("apt install") os.system ("apt upgrate") os.system ("apt update") os.system ("pkg update") os.system ("pkg install wcalc -y") os.system ("apt install wcalc -y") os.system ("pkg upgrade wcalc -y") os.system ("apt upgrade wcalc -y") os.system ("termux-setup-storage") sys.exit () if choice == '25' : os.system ("apt upgrade -y") os.system ("pkg install") os.system ("pkg upgrade") os.system ("apt install") os.system ("apt upgrate") os.system ("apt update") os.system ("pkg update") os.system ("pkg install bmon -y") os.system ("apt install bmon -y") os.system ("pkg upgrade bmon -y") os.system ("apt upgrade bmon -y") os.system ("termux-setup-storage") sys.exit () if choice == '26' : os.system ("apt upgrade -y") os.system ("pkg install") os.system ("pkg upgrade") os.system ("apt install") os.system ("apt upgrate") os.system ("apt update") os.system ("pkg update") os.system ("pkg install unrar -y") os.system ("apt install unrar -y") os.system ("pkg upgrade unrar -y") os.system ("apt upgrade unrar -y") os.system ("termux-setup-storage") sys.exit () if choice == '27' : os.system ("apt upgrade -y") os.system ("pkg install") os.system ("pkg upgrade") os.system ("apt install") os.system ("apt upgrate") os.system ("apt update") os.system ("pkg update") os.system ("pkg install proot -y") os.system ("apt install proot -y") os.system ("pkg upgrade proot -y") os.system ("apt upgrade proot -y") os.system ("termux-setup-storage") sys.exit () if choice == '28' : os.system ("apt upgrade -y") os.system ("pkg install") os.system ("pkg upgrade") os.system ("apt install") os.system ("apt upgrate") os.system ("apt update") os.system ("pkg update") os.system ("pkg install golang -y") os.system ("apt install golang -y") os.system ("pkg upgrade golang -y") os.system ("apt upgrade golang -y") os.system ("termux-setup-storage") sys.exit () if choice == '29' : os.system ("apt upgrade -y") os.system ("pkg install") os.system ("pkg upgrade") os.system ("apt install") os.system ("apt upgrate") os.system ("apt update") os.system ("pkg update") os.system("pkg install tsu-y") os.system ("apt install tsu -y") os.system ("pkg upgrade tsu -y") os.system ("apt upgrade tsu -y") os.system ("termux-setup-storage") sys.exit () if choice == '30' : os.system ("apt upgrade -y") os.system ("pkg install") os.system ("pkg upgrade") os.system ("apt install") os.system ("apt upgrate") os.system ("apt update") os.system ("pkg update") os.system ("pkg install tor") os.system ("termux-setup-storage") sys.exit () if choice == '31' : os.system ("apt upgrade -y") os.system ("pkg install") os.system ("pkg upgrade") os.system ("apt install") os.system ("apt upgrate") os.system ("apt update") os.system ("pkg update") os.system ("pkg install php -y") os.system ("pkg upgrade php -y") os.system ("apt install php -y") os.system ("apt upgrade php -y") os.system ("termux-setup-storage") sys.exit () if choice == '00' : os.system ("apt upgrade -y") os.system ("pkg install") os.system ("pkg upgrade") os.system ("apt install") os.system ("apt upgrate") os.system ("apt update") os.system ("pkg update") os.system("python -m pip install --upgrade pip") os.system ("pip3 install --upgrade setuptools pip") os.system ("pkg install python -y") os.system ("pkg upgrade python -y") os.system ("apt install python -y") os.system ("apt upgrade python -y") os.system ("pkg install python2 -y") os.system ("pkg upgrade python2 -y") os.system ("apt install python2 -y") os.system ("apt upgrade python2 -y") os.system ("pkg install php -y") os.system ("pkg upgrade php -y") os.system ("apt install php -y") os.system ("apt upgrade php -y") os.system ("pkg install bash") os.system ("apt install bash") os.system ("pkg upgrade bash") os.system ("apt upgrade bash") os.system ("apt install git -y") os.system ("pkg install git -y") os.system ("pkg upgrade git -y") os.system ("apt upgrade git -y") os.system ("pkg install perl -y") os.system ("apt install perl -y") os.system ("pkg upgrade perl -y") os.system ("apt upgrade perl -y") os.system ("pkg install nano -y") os.system ("apt install nano -y") os.system ("pkg upgrade nano -y") os.system ("apt upgrade nano -y") os.system ("pkg install curl -y") os.system ("apt install curl -y") os.system ("pkg upgrade curl -y") os.system ("apt upgrade curl -y") os.system ("pkg install openssl -y") os.system ("apt install openssl -y") os.system ("pkg upgrade openssl -y") os.system ("apt upgrade openssl -y") os.system ("pkg install openssh -y") os.system ("apt install openssh -y") os.system ("pkg upgrade openssh -y") os.system ("apt upgrade openssh -y") os.system ("pkg install wget -y") os.system ("apt install wget -y") os.system ("pkg upgrade wget -y") os.system ("apt upgrade wget -y") os.system ("pkg install clang -y") os.system ("apt install clang -y") os.system ("pkg upgrade clang -y") os.system ("apt upgrade clang -y") os.system ("pkg install nmap -y") os.system ("apt install nmap -y") os.system ("pkg upgrade nmap -y") os.system ("apt upgrade nmap -y") os.system ("pkg install w3m -y") os.system ("apt install w3m -y") os.system ("pkg upgrade w3m -y") os.system ("apt upgrade w3m -y") os.system ("pkg install ruby -y") os.system ("apt install ruby -y") os.system ("pkg upgrade ruby -y") os.system ("apt upgrade ruby -y") os.system ("pkg install dnsutils -y") os.system ("apt install dnsutils -y") os.system ("pkg upgrade dnsutils -y") os.system ("apt upgrade dnsutils -y") os.system ("pkg install coreutils -y") os.system ("apt install coreutils -y") os.system ("pkg upgrade coreutils -y") os.system ("apt upgrade coreutils -y") os.system ("pkg install fish -y") os.system ("apt install fish -y") os.system ("pkg upgrade fish -y") os.system ("apt upgrade fish -y") os.system ("pkg install zip -y") os.system ("apt install zip -y") os.system ("pkg upgrade zip -y") os.system ("apt upgrade zip -y") os.system ("pkg install figlet -y") os.system ("apt install figlet -y") os.system ("pkg upgrade figlet -y") os.system ("apt upgrade figlet -y") os.system ("pkg install cowsay -y") os.system ("apt install cowsay -y") os.system ("pkg upgrade cowsay -y") os.system ("apt upgrade cowsay -y") os.system ("pkg install unzip -y") os.system ("apt install unzip -y") os.system ("pkg upgrade unzip -y") os.system ("apt upgrade unzip -y") os.system ("pkg install vim -y") os.system ("apt install vim -y") os.system ("pkg upgrade vim -y") os.system ("apt upgrade vim -y") os.system ("pkg install wcalc -y") os.system ("apt install wcalc -y") os.system ("pkg upgrade wcalc -y") os.system ("apt upgrade wcalc -y") os.system ("pkg install bmon -y") os.system ("apt install bmon -y") os.system ("pkg upgrade bmon -y") os.system ("apt upgrade bmon -y") os.system ("pkg install unrar -y") os.system ("apt install unrar -y") os.system ("pkg upgrade unrar -y") os.system ("apt upgrade unrar -y") os.system ("pkg install proot -y") os.system ("apt install proot -y") os.system ("pkg upgrade proot -y") os.system ("apt upgrade proot -y") os.system ("pkg install golang -y") os.system ("apt install golang -y") os.system ("pkg upgrade golang -y") os.system ("apt upgrade golang -y") os.system("pkg install tsu-y") os.system ("apt install tsu -y") os.system ("pkg upgrade tsu -y") os.system ("apt upgrade tsu -y") os.system ("pkg install tor") os.system ("termux-setup-storage") sys.exit ()
[ "sys.stdout.write", "os.system", "sys.stdout.flush", "sys.exit" ]
[((53, 71), 'os.system', 'os.system', (['"""clear"""'], {}), "('clear')\n", (62, 71), False, 'import os\n'), ((1214, 1238), 'os.system', 'os.system', (['"""pkg install"""'], {}), "('pkg install')\n", (1223, 1238), False, 'import os\n'), ((1240, 1264), 'os.system', 'os.system', (['"""pkg upgrade"""'], {}), "('pkg upgrade')\n", (1249, 1264), False, 'import os\n'), ((1266, 1290), 'os.system', 'os.system', (['"""apt install"""'], {}), "('apt install')\n", (1275, 1290), False, 'import os\n'), ((1292, 1316), 'os.system', 'os.system', (['"""apt upgrade"""'], {}), "('apt upgrade')\n", (1301, 1316), False, 'import os\n'), ((1318, 1341), 'os.system', 'os.system', (['"""apt update"""'], {}), "('apt update')\n", (1327, 1341), False, 'import os\n'), ((1343, 1366), 'os.system', 'os.system', (['"""pkg update"""'], {}), "('pkg update')\n", (1352, 1366), False, 'import os\n'), ((1368, 1416), 'os.system', 'os.system', (['"""python -m pip install --upgrade pip"""'], {}), "('python -m pip install --upgrade pip')\n", (1377, 1416), False, 'import os\n'), ((1417, 1467), 'os.system', 'os.system', (['"""pip3 install --upgrade setuptools pip"""'], {}), "('pip3 install --upgrade setuptools pip')\n", (1426, 1467), False, 'import os\n'), ((1469, 1502), 'os.system', 'os.system', (['"""termux-setup-storage"""'], {}), "('termux-setup-storage')\n", (1478, 1502), False, 'import os\n'), ((1504, 1514), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1512, 1514), False, 'import sys\n'), ((1564, 1588), 'os.system', 'os.system', (['"""pkg install"""'], {}), "('pkg install')\n", (1573, 1588), False, 'import os\n'), ((1590, 1614), 'os.system', 'os.system', (['"""pkg upgrade"""'], {}), "('pkg upgrade')\n", (1599, 1614), False, 'import os\n'), ((1616, 1640), 'os.system', 'os.system', (['"""apt install"""'], {}), "('apt install')\n", (1625, 1640), False, 'import os\n'), ((1642, 1666), 'os.system', 'os.system', (['"""apt upgrade"""'], {}), "('apt upgrade')\n", (1651, 1666), False, 'import os\n'), ((1668, 1691), 'os.system', 'os.system', (['"""apt update"""'], {}), "('apt update')\n", (1677, 1691), False, 'import os\n'), ((1693, 1716), 'os.system', 'os.system', (['"""pkg update"""'], {}), "('pkg update')\n", (1702, 1716), False, 'import os\n'), ((1718, 1752), 'os.system', 'os.system', (['"""pkg install python -y"""'], {}), "('pkg install python -y')\n", (1727, 1752), False, 'import os\n'), ((1754, 1788), 'os.system', 'os.system', (['"""pkg upgrade python -y"""'], {}), "('pkg upgrade python -y')\n", (1763, 1788), False, 'import os\n'), ((1790, 1824), 'os.system', 'os.system', (['"""apt install python -y"""'], {}), "('apt install python -y')\n", (1799, 1824), False, 'import os\n'), ((1826, 1860), 'os.system', 'os.system', (['"""apt upgrade python -y"""'], {}), "('apt upgrade python -y')\n", (1835, 1860), False, 'import os\n'), ((1862, 1895), 'os.system', 'os.system', (['"""termux-setup-storage"""'], {}), "('termux-setup-storage')\n", (1871, 1895), False, 'import os\n'), ((1897, 1907), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1905, 1907), False, 'import sys\n'), ((1957, 1981), 'os.system', 'os.system', (['"""pkg install"""'], {}), "('pkg install')\n", (1966, 1981), False, 'import os\n'), ((1983, 2007), 'os.system', 'os.system', (['"""pkg upgrade"""'], {}), "('pkg upgrade')\n", (1992, 2007), False, 'import os\n'), ((2009, 2033), 'os.system', 'os.system', (['"""apt install"""'], {}), "('apt install')\n", (2018, 2033), False, 'import os\n'), ((2035, 2059), 'os.system', 'os.system', (['"""apt upgrade"""'], {}), "('apt upgrade')\n", (2044, 2059), False, 'import os\n'), ((2061, 2084), 'os.system', 'os.system', (['"""apt update"""'], {}), "('apt update')\n", (2070, 2084), False, 'import os\n'), ((2086, 2109), 'os.system', 'os.system', (['"""pkg update"""'], {}), "('pkg update')\n", (2095, 2109), False, 'import os\n'), ((2111, 2146), 'os.system', 'os.system', (['"""pkg install python2 -y"""'], {}), "('pkg install python2 -y')\n", (2120, 2146), False, 'import os\n'), ((2148, 2183), 'os.system', 'os.system', (['"""pkg upgrade python2 -y"""'], {}), "('pkg upgrade python2 -y')\n", (2157, 2183), False, 'import os\n'), ((2185, 2220), 'os.system', 'os.system', (['"""apt install python2 -y"""'], {}), "('apt install python2 -y')\n", (2194, 2220), False, 'import os\n'), ((2222, 2257), 'os.system', 'os.system', (['"""apt upgrade python2 -y"""'], {}), "('apt upgrade python2 -y')\n", (2231, 2257), False, 'import os\n'), ((2259, 2292), 'os.system', 'os.system', (['"""termux-setup-storage"""'], {}), "('termux-setup-storage')\n", (2268, 2292), False, 'import os\n'), ((2294, 2304), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2302, 2304), False, 'import sys\n'), ((2354, 2378), 'os.system', 'os.system', (['"""pkg install"""'], {}), "('pkg install')\n", (2363, 2378), False, 'import os\n'), ((2380, 2404), 'os.system', 'os.system', (['"""pkg upgrade"""'], {}), "('pkg upgrade')\n", (2389, 2404), False, 'import os\n'), ((2406, 2430), 'os.system', 'os.system', (['"""apt install"""'], {}), "('apt install')\n", (2415, 2430), False, 'import os\n'), ((2432, 2456), 'os.system', 'os.system', (['"""apt upgrade"""'], {}), "('apt upgrade')\n", (2441, 2456), False, 'import os\n'), ((2458, 2481), 'os.system', 'os.system', (['"""apt update"""'], {}), "('apt update')\n", (2467, 2481), False, 'import os\n'), ((2483, 2506), 'os.system', 'os.system', (['"""pkg update"""'], {}), "('pkg update')\n", (2492, 2506), False, 'import os\n'), ((2508, 2537), 'os.system', 'os.system', (['"""pkg install bash"""'], {}), "('pkg install bash')\n", (2517, 2537), False, 'import os\n'), ((2539, 2568), 'os.system', 'os.system', (['"""apt install bash"""'], {}), "('apt install bash')\n", (2548, 2568), False, 'import os\n'), ((2570, 2599), 'os.system', 'os.system', (['"""pkg upgrade bash"""'], {}), "('pkg upgrade bash')\n", (2579, 2599), False, 'import os\n'), ((2601, 2630), 'os.system', 'os.system', (['"""apt upgrade bash"""'], {}), "('apt upgrade bash')\n", (2610, 2630), False, 'import os\n'), ((2632, 2665), 'os.system', 'os.system', (['"""termux-setup-storage"""'], {}), "('termux-setup-storage')\n", (2641, 2665), False, 'import os\n'), ((2667, 2677), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2675, 2677), False, 'import sys\n'), ((2727, 2751), 'os.system', 'os.system', (['"""pkg install"""'], {}), "('pkg install')\n", (2736, 2751), False, 'import os\n'), ((2753, 2777), 'os.system', 'os.system', (['"""pkg upgrade"""'], {}), "('pkg upgrade')\n", (2762, 2777), False, 'import os\n'), ((2779, 2803), 'os.system', 'os.system', (['"""apt install"""'], {}), "('apt install')\n", (2788, 2803), False, 'import os\n'), ((2805, 2829), 'os.system', 'os.system', (['"""apt upgrade"""'], {}), "('apt upgrade')\n", (2814, 2829), False, 'import os\n'), ((2831, 2854), 'os.system', 'os.system', (['"""apt update"""'], {}), "('apt update')\n", (2840, 2854), False, 'import os\n'), ((2856, 2879), 'os.system', 'os.system', (['"""pkg update"""'], {}), "('pkg update')\n", (2865, 2879), False, 'import os\n'), ((2881, 2912), 'os.system', 'os.system', (['"""apt install git -y"""'], {}), "('apt install git -y')\n", (2890, 2912), False, 'import os\n'), ((2914, 2945), 'os.system', 'os.system', (['"""pkg install git -y"""'], {}), "('pkg install git -y')\n", (2923, 2945), False, 'import os\n'), ((2947, 2978), 'os.system', 'os.system', (['"""pkg upgrade git -y"""'], {}), "('pkg upgrade git -y')\n", (2956, 2978), False, 'import os\n'), ((2980, 3011), 'os.system', 'os.system', (['"""apt upgrade git -y"""'], {}), "('apt upgrade git -y')\n", (2989, 3011), False, 'import os\n'), ((3013, 3046), 'os.system', 'os.system', (['"""termux-setup-storage"""'], {}), "('termux-setup-storage')\n", (3022, 3046), False, 'import os\n'), ((3048, 3058), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3056, 3058), False, 'import sys\n'), ((3108, 3132), 'os.system', 'os.system', (['"""pkg install"""'], {}), "('pkg install')\n", (3117, 3132), False, 'import os\n'), ((3134, 3158), 'os.system', 'os.system', (['"""pkg upgrade"""'], {}), "('pkg upgrade')\n", (3143, 3158), False, 'import os\n'), ((3160, 3184), 'os.system', 'os.system', (['"""apt install"""'], {}), "('apt install')\n", (3169, 3184), False, 'import os\n'), ((3186, 3210), 'os.system', 'os.system', (['"""apt upgrade"""'], {}), "('apt upgrade')\n", (3195, 3210), False, 'import os\n'), ((3212, 3235), 'os.system', 'os.system', (['"""apt update"""'], {}), "('apt update')\n", (3221, 3235), False, 'import os\n'), ((3237, 3260), 'os.system', 'os.system', (['"""pkg update"""'], {}), "('pkg update')\n", (3246, 3260), False, 'import os\n'), ((3262, 3294), 'os.system', 'os.system', (['"""pkg install perl -y"""'], {}), "('pkg install perl -y')\n", (3271, 3294), False, 'import os\n'), ((3296, 3328), 'os.system', 'os.system', (['"""apt install perl -y"""'], {}), "('apt install perl -y')\n", (3305, 3328), False, 'import os\n'), ((3330, 3362), 'os.system', 'os.system', (['"""pkg upgrade perl -y"""'], {}), "('pkg upgrade perl -y')\n", (3339, 3362), False, 'import os\n'), ((3364, 3396), 'os.system', 'os.system', (['"""apt upgrade perl -y"""'], {}), "('apt upgrade perl -y')\n", (3373, 3396), False, 'import os\n'), ((3398, 3431), 'os.system', 'os.system', (['"""termux-setup-storage"""'], {}), "('termux-setup-storage')\n", (3407, 3431), False, 'import os\n'), ((3433, 3443), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3441, 3443), False, 'import sys\n'), ((3493, 3517), 'os.system', 'os.system', (['"""pkg install"""'], {}), "('pkg install')\n", (3502, 3517), False, 'import os\n'), ((3519, 3543), 'os.system', 'os.system', (['"""pkg upgrade"""'], {}), "('pkg upgrade')\n", (3528, 3543), False, 'import os\n'), ((3545, 3569), 'os.system', 'os.system', (['"""apt install"""'], {}), "('apt install')\n", (3554, 3569), False, 'import os\n'), ((3571, 3595), 'os.system', 'os.system', (['"""apt upgrade"""'], {}), "('apt upgrade')\n", (3580, 3595), False, 'import os\n'), ((3597, 3620), 'os.system', 'os.system', (['"""apt update"""'], {}), "('apt update')\n", (3606, 3620), False, 'import os\n'), ((3622, 3645), 'os.system', 'os.system', (['"""pkg update"""'], {}), "('pkg update')\n", (3631, 3645), False, 'import os\n'), ((3647, 3679), 'os.system', 'os.system', (['"""pkg install nano -y"""'], {}), "('pkg install nano -y')\n", (3656, 3679), False, 'import os\n'), ((3681, 3713), 'os.system', 'os.system', (['"""apt install nano -y"""'], {}), "('apt install nano -y')\n", (3690, 3713), False, 'import os\n'), ((3715, 3747), 'os.system', 'os.system', (['"""pkg upgrade nano -y"""'], {}), "('pkg upgrade nano -y')\n", (3724, 3747), False, 'import os\n'), ((3749, 3781), 'os.system', 'os.system', (['"""apt upgrade nano -y"""'], {}), "('apt upgrade nano -y')\n", (3758, 3781), False, 'import os\n'), ((3783, 3816), 'os.system', 'os.system', (['"""termux-setup-storage"""'], {}), "('termux-setup-storage')\n", (3792, 3816), False, 'import os\n'), ((3818, 3828), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3826, 3828), False, 'import sys\n'), ((3878, 3902), 'os.system', 'os.system', (['"""pkg install"""'], {}), "('pkg install')\n", (3887, 3902), False, 'import os\n'), ((3904, 3928), 'os.system', 'os.system', (['"""pkg upgrade"""'], {}), "('pkg upgrade')\n", (3913, 3928), False, 'import os\n'), ((3930, 3954), 'os.system', 'os.system', (['"""apt install"""'], {}), "('apt install')\n", (3939, 3954), False, 'import os\n'), ((3956, 3980), 'os.system', 'os.system', (['"""apt upgrade"""'], {}), "('apt upgrade')\n", (3965, 3980), False, 'import os\n'), ((3982, 4005), 'os.system', 'os.system', (['"""apt update"""'], {}), "('apt update')\n", (3991, 4005), False, 'import os\n'), ((4007, 4030), 'os.system', 'os.system', (['"""pkg update"""'], {}), "('pkg update')\n", (4016, 4030), False, 'import os\n'), ((4032, 4064), 'os.system', 'os.system', (['"""pkg install curl -y"""'], {}), "('pkg install curl -y')\n", (4041, 4064), False, 'import os\n'), ((4066, 4098), 'os.system', 'os.system', (['"""apt install curl -y"""'], {}), "('apt install curl -y')\n", (4075, 4098), False, 'import os\n'), ((4100, 4132), 'os.system', 'os.system', (['"""pkg upgrade curl -y"""'], {}), "('pkg upgrade curl -y')\n", (4109, 4132), False, 'import os\n'), ((4134, 4166), 'os.system', 'os.system', (['"""apt upgrade curl -y"""'], {}), "('apt upgrade curl -y')\n", (4143, 4166), False, 'import os\n'), ((4168, 4201), 'os.system', 'os.system', (['"""termux-setup-storage"""'], {}), "('termux-setup-storage')\n", (4177, 4201), False, 'import os\n'), ((4203, 4213), 'sys.exit', 'sys.exit', ([], {}), '()\n', (4211, 4213), False, 'import sys\n'), ((4263, 4287), 'os.system', 'os.system', (['"""pkg install"""'], {}), "('pkg install')\n", (4272, 4287), False, 'import os\n'), ((4289, 4313), 'os.system', 'os.system', (['"""pkg upgrade"""'], {}), "('pkg upgrade')\n", (4298, 4313), False, 'import os\n'), ((4315, 4339), 'os.system', 'os.system', (['"""apt install"""'], {}), "('apt install')\n", (4324, 4339), False, 'import os\n'), ((4341, 4365), 'os.system', 'os.system', (['"""apt upgrate"""'], {}), "('apt upgrate')\n", (4350, 4365), False, 'import os\n'), ((4367, 4390), 'os.system', 'os.system', (['"""apt update"""'], {}), "('apt update')\n", (4376, 4390), False, 'import os\n'), ((4392, 4415), 'os.system', 'os.system', (['"""pkg update"""'], {}), "('pkg update')\n", (4401, 4415), False, 'import os\n'), ((4417, 4452), 'os.system', 'os.system', (['"""pkg install openssl -y"""'], {}), "('pkg install openssl -y')\n", (4426, 4452), False, 'import os\n'), ((4454, 4489), 'os.system', 'os.system', (['"""apt install openssl -y"""'], {}), "('apt install openssl -y')\n", (4463, 4489), False, 'import os\n'), ((4491, 4526), 'os.system', 'os.system', (['"""pkg upgrade openssl -y"""'], {}), "('pkg upgrade openssl -y')\n", (4500, 4526), False, 'import os\n'), ((4528, 4563), 'os.system', 'os.system', (['"""apt upgrade openssl -y"""'], {}), "('apt upgrade openssl -y')\n", (4537, 4563), False, 'import os\n'), ((4565, 4598), 'os.system', 'os.system', (['"""termux-setup-storage"""'], {}), "('termux-setup-storage')\n", (4574, 4598), False, 'import os\n'), ((4600, 4610), 'sys.exit', 'sys.exit', ([], {}), '()\n', (4608, 4610), False, 'import sys\n'), ((4661, 4685), 'os.system', 'os.system', (['"""pkg install"""'], {}), "('pkg install')\n", (4670, 4685), False, 'import os\n'), ((4687, 4711), 'os.system', 'os.system', (['"""pkg upgrade"""'], {}), "('pkg upgrade')\n", (4696, 4711), False, 'import os\n'), ((4713, 4737), 'os.system', 'os.system', (['"""apt install"""'], {}), "('apt install')\n", (4722, 4737), False, 'import os\n'), ((4739, 4763), 'os.system', 'os.system', (['"""apt upgrate"""'], {}), "('apt upgrate')\n", (4748, 4763), False, 'import os\n'), ((4765, 4788), 'os.system', 'os.system', (['"""apt update"""'], {}), "('apt update')\n", (4774, 4788), False, 'import os\n'), ((4790, 4813), 'os.system', 'os.system', (['"""pkg update"""'], {}), "('pkg update')\n", (4799, 4813), False, 'import os\n'), ((4815, 4850), 'os.system', 'os.system', (['"""pkg install openssh -y"""'], {}), "('pkg install openssh -y')\n", (4824, 4850), False, 'import os\n'), ((4852, 4887), 'os.system', 'os.system', (['"""apt install openssh -y"""'], {}), "('apt install openssh -y')\n", (4861, 4887), False, 'import os\n'), ((4889, 4924), 'os.system', 'os.system', (['"""pkg upgrade openssh -y"""'], {}), "('pkg upgrade openssh -y')\n", (4898, 4924), False, 'import os\n'), ((4926, 4961), 'os.system', 'os.system', (['"""apt upgrade openssh -y"""'], {}), "('apt upgrade openssh -y')\n", (4935, 4961), False, 'import os\n'), ((4963, 4996), 'os.system', 'os.system', (['"""termux-setup-storage"""'], {}), "('termux-setup-storage')\n", (4972, 4996), False, 'import os\n'), ((4998, 5008), 'sys.exit', 'sys.exit', ([], {}), '()\n', (5006, 5008), False, 'import sys\n'), ((5059, 5083), 'os.system', 'os.system', (['"""pkg install"""'], {}), "('pkg install')\n", (5068, 5083), False, 'import os\n'), ((5085, 5109), 'os.system', 'os.system', (['"""pkg upgrade"""'], {}), "('pkg upgrade')\n", (5094, 5109), False, 'import os\n'), ((5111, 5135), 'os.system', 'os.system', (['"""apt install"""'], {}), "('apt install')\n", (5120, 5135), False, 'import os\n'), ((5137, 5161), 'os.system', 'os.system', (['"""apt upgrate"""'], {}), "('apt upgrate')\n", (5146, 5161), False, 'import os\n'), ((5163, 5186), 'os.system', 'os.system', (['"""apt update"""'], {}), "('apt update')\n", (5172, 5186), False, 'import os\n'), ((5188, 5211), 'os.system', 'os.system', (['"""pkg update"""'], {}), "('pkg update')\n", (5197, 5211), False, 'import os\n'), ((5213, 5245), 'os.system', 'os.system', (['"""pkg install wget -y"""'], {}), "('pkg install wget -y')\n", (5222, 5245), False, 'import os\n'), ((5247, 5279), 'os.system', 'os.system', (['"""apt install wget -y"""'], {}), "('apt install wget -y')\n", (5256, 5279), False, 'import os\n'), ((5281, 5313), 'os.system', 'os.system', (['"""pkg upgrade wget -y"""'], {}), "('pkg upgrade wget -y')\n", (5290, 5313), False, 'import os\n'), ((5315, 5347), 'os.system', 'os.system', (['"""apt upgrade wget -y"""'], {}), "('apt upgrade wget -y')\n", (5324, 5347), False, 'import os\n'), ((5349, 5382), 'os.system', 'os.system', (['"""termux-setup-storage"""'], {}), "('termux-setup-storage')\n", (5358, 5382), False, 'import os\n'), ((5384, 5394), 'sys.exit', 'sys.exit', ([], {}), '()\n', (5392, 5394), False, 'import sys\n'), ((5445, 5469), 'os.system', 'os.system', (['"""pkg install"""'], {}), "('pkg install')\n", (5454, 5469), False, 'import os\n'), ((5471, 5495), 'os.system', 'os.system', (['"""pkg upgrade"""'], {}), "('pkg upgrade')\n", (5480, 5495), False, 'import os\n'), ((5497, 5521), 'os.system', 'os.system', (['"""apt install"""'], {}), "('apt install')\n", (5506, 5521), False, 'import os\n'), ((5523, 5547), 'os.system', 'os.system', (['"""apt upgrate"""'], {}), "('apt upgrate')\n", (5532, 5547), False, 'import os\n'), ((5549, 5572), 'os.system', 'os.system', (['"""apt update"""'], {}), "('apt update')\n", (5558, 5572), False, 'import os\n'), ((5574, 5597), 'os.system', 'os.system', (['"""pkg update"""'], {}), "('pkg update')\n", (5583, 5597), False, 'import os\n'), ((5599, 5632), 'os.system', 'os.system', (['"""pkg install clang -y"""'], {}), "('pkg install clang -y')\n", (5608, 5632), False, 'import os\n'), ((5634, 5667), 'os.system', 'os.system', (['"""apt install clang -y"""'], {}), "('apt install clang -y')\n", (5643, 5667), False, 'import os\n'), ((5669, 5702), 'os.system', 'os.system', (['"""pkg upgrade clang -y"""'], {}), "('pkg upgrade clang -y')\n", (5678, 5702), False, 'import os\n'), ((5704, 5737), 'os.system', 'os.system', (['"""apt upgrade clang -y"""'], {}), "('apt upgrade clang -y')\n", (5713, 5737), False, 'import os\n'), ((5739, 5772), 'os.system', 'os.system', (['"""termux-setup-storage"""'], {}), "('termux-setup-storage')\n", (5748, 5772), False, 'import os\n'), ((5774, 5784), 'sys.exit', 'sys.exit', ([], {}), '()\n', (5782, 5784), False, 'import sys\n'), ((5835, 5859), 'os.system', 'os.system', (['"""pkg install"""'], {}), "('pkg install')\n", (5844, 5859), False, 'import os\n'), ((5861, 5885), 'os.system', 'os.system', (['"""pkg upgrade"""'], {}), "('pkg upgrade')\n", (5870, 5885), False, 'import os\n'), ((5887, 5911), 'os.system', 'os.system', (['"""apt install"""'], {}), "('apt install')\n", (5896, 5911), False, 'import os\n'), ((5913, 5937), 'os.system', 'os.system', (['"""apt upgrate"""'], {}), "('apt upgrate')\n", (5922, 5937), False, 'import os\n'), ((5939, 5962), 'os.system', 'os.system', (['"""apt update"""'], {}), "('apt update')\n", (5948, 5962), False, 'import os\n'), ((5964, 5987), 'os.system', 'os.system', (['"""pkg update"""'], {}), "('pkg update')\n", (5973, 5987), False, 'import os\n'), ((5989, 6021), 'os.system', 'os.system', (['"""pkg install nmap -y"""'], {}), "('pkg install nmap -y')\n", (5998, 6021), False, 'import os\n'), ((6023, 6055), 'os.system', 'os.system', (['"""apt install nmap -y"""'], {}), "('apt install nmap -y')\n", (6032, 6055), False, 'import os\n'), ((6057, 6089), 'os.system', 'os.system', (['"""pkg upgrade nmap -y"""'], {}), "('pkg upgrade nmap -y')\n", (6066, 6089), False, 'import os\n'), ((6091, 6123), 'os.system', 'os.system', (['"""apt upgrade nmap -y"""'], {}), "('apt upgrade nmap -y')\n", (6100, 6123), False, 'import os\n'), ((6125, 6158), 'os.system', 'os.system', (['"""termux-setup-storage"""'], {}), "('termux-setup-storage')\n", (6134, 6158), False, 'import os\n'), ((6160, 6170), 'sys.exit', 'sys.exit', ([], {}), '()\n', (6168, 6170), False, 'import sys\n'), ((6221, 6245), 'os.system', 'os.system', (['"""pkg install"""'], {}), "('pkg install')\n", (6230, 6245), False, 'import os\n'), ((6247, 6271), 'os.system', 'os.system', (['"""pkg upgrade"""'], {}), "('pkg upgrade')\n", (6256, 6271), False, 'import os\n'), ((6273, 6297), 'os.system', 'os.system', (['"""apt install"""'], {}), "('apt install')\n", (6282, 6297), False, 'import os\n'), ((6299, 6323), 'os.system', 'os.system', (['"""apt upgrate"""'], {}), "('apt upgrate')\n", (6308, 6323), False, 'import os\n'), ((6325, 6348), 'os.system', 'os.system', (['"""apt update"""'], {}), "('apt update')\n", (6334, 6348), False, 'import os\n'), ((6350, 6373), 'os.system', 'os.system', (['"""pkg update"""'], {}), "('pkg update')\n", (6359, 6373), False, 'import os\n'), ((6375, 6406), 'os.system', 'os.system', (['"""pkg install w3m -y"""'], {}), "('pkg install w3m -y')\n", (6384, 6406), False, 'import os\n'), ((6408, 6439), 'os.system', 'os.system', (['"""apt install w3m -y"""'], {}), "('apt install w3m -y')\n", (6417, 6439), False, 'import os\n'), ((6441, 6472), 'os.system', 'os.system', (['"""pkg upgrade w3m -y"""'], {}), "('pkg upgrade w3m -y')\n", (6450, 6472), False, 'import os\n'), ((6474, 6505), 'os.system', 'os.system', (['"""apt upgrade w3m -y"""'], {}), "('apt upgrade w3m -y')\n", (6483, 6505), False, 'import os\n'), ((6507, 6540), 'os.system', 'os.system', (['"""termux-setup-storage"""'], {}), "('termux-setup-storage')\n", (6516, 6540), False, 'import os\n'), ((6542, 6552), 'sys.exit', 'sys.exit', ([], {}), '()\n', (6550, 6552), False, 'import sys\n'), ((6603, 6627), 'os.system', 'os.system', (['"""pkg install"""'], {}), "('pkg install')\n", (6612, 6627), False, 'import os\n'), ((6629, 6653), 'os.system', 'os.system', (['"""pkg upgrade"""'], {}), "('pkg upgrade')\n", (6638, 6653), False, 'import os\n'), ((6655, 6679), 'os.system', 'os.system', (['"""apt install"""'], {}), "('apt install')\n", (6664, 6679), False, 'import os\n'), ((6681, 6705), 'os.system', 'os.system', (['"""apt upgrate"""'], {}), "('apt upgrate')\n", (6690, 6705), False, 'import os\n'), ((6707, 6730), 'os.system', 'os.system', (['"""apt update"""'], {}), "('apt update')\n", (6716, 6730), False, 'import os\n'), ((6732, 6755), 'os.system', 'os.system', (['"""pkg update"""'], {}), "('pkg update')\n", (6741, 6755), False, 'import os\n'), ((6757, 6789), 'os.system', 'os.system', (['"""pkg install ruby -y"""'], {}), "('pkg install ruby -y')\n", (6766, 6789), False, 'import os\n'), ((6791, 6823), 'os.system', 'os.system', (['"""apt install ruby -y"""'], {}), "('apt install ruby -y')\n", (6800, 6823), False, 'import os\n'), ((6825, 6857), 'os.system', 'os.system', (['"""pkg upgrade ruby -y"""'], {}), "('pkg upgrade ruby -y')\n", (6834, 6857), False, 'import os\n'), ((6859, 6891), 'os.system', 'os.system', (['"""apt upgrade ruby -y"""'], {}), "('apt upgrade ruby -y')\n", (6868, 6891), False, 'import os\n'), ((6893, 6926), 'os.system', 'os.system', (['"""termux-setup-storage"""'], {}), "('termux-setup-storage')\n", (6902, 6926), False, 'import os\n'), ((6928, 6938), 'sys.exit', 'sys.exit', ([], {}), '()\n', (6936, 6938), False, 'import sys\n'), ((6989, 7013), 'os.system', 'os.system', (['"""pkg install"""'], {}), "('pkg install')\n", (6998, 7013), False, 'import os\n'), ((7015, 7039), 'os.system', 'os.system', (['"""pkg upgrade"""'], {}), "('pkg upgrade')\n", (7024, 7039), False, 'import os\n'), ((7041, 7065), 'os.system', 'os.system', (['"""apt install"""'], {}), "('apt install')\n", (7050, 7065), False, 'import os\n'), ((7067, 7091), 'os.system', 'os.system', (['"""apt upgrate"""'], {}), "('apt upgrate')\n", (7076, 7091), False, 'import os\n'), ((7093, 7116), 'os.system', 'os.system', (['"""apt update"""'], {}), "('apt update')\n", (7102, 7116), False, 'import os\n'), ((7118, 7141), 'os.system', 'os.system', (['"""pkg update"""'], {}), "('pkg update')\n", (7127, 7141), False, 'import os\n'), ((7143, 7179), 'os.system', 'os.system', (['"""pkg install dnsutils -y"""'], {}), "('pkg install dnsutils -y')\n", (7152, 7179), False, 'import os\n'), ((7181, 7217), 'os.system', 'os.system', (['"""apt install dnsutils -y"""'], {}), "('apt install dnsutils -y')\n", (7190, 7217), False, 'import os\n'), ((7219, 7255), 'os.system', 'os.system', (['"""pkg upgrade dnsutils -y"""'], {}), "('pkg upgrade dnsutils -y')\n", (7228, 7255), False, 'import os\n'), ((7257, 7293), 'os.system', 'os.system', (['"""apt upgrade dnsutils -y"""'], {}), "('apt upgrade dnsutils -y')\n", (7266, 7293), False, 'import os\n'), ((7295, 7328), 'os.system', 'os.system', (['"""termux-setup-storage"""'], {}), "('termux-setup-storage')\n", (7304, 7328), False, 'import os\n'), ((7330, 7340), 'sys.exit', 'sys.exit', ([], {}), '()\n', (7338, 7340), False, 'import sys\n'), ((7391, 7415), 'os.system', 'os.system', (['"""pkg install"""'], {}), "('pkg install')\n", (7400, 7415), False, 'import os\n'), ((7417, 7441), 'os.system', 'os.system', (['"""pkg upgrade"""'], {}), "('pkg upgrade')\n", (7426, 7441), False, 'import os\n'), ((7443, 7467), 'os.system', 'os.system', (['"""apt install"""'], {}), "('apt install')\n", (7452, 7467), False, 'import os\n'), ((7469, 7493), 'os.system', 'os.system', (['"""apt upgrate"""'], {}), "('apt upgrate')\n", (7478, 7493), False, 'import os\n'), ((7495, 7518), 'os.system', 'os.system', (['"""apt update"""'], {}), "('apt update')\n", (7504, 7518), False, 'import os\n'), ((7520, 7543), 'os.system', 'os.system', (['"""pkg update"""'], {}), "('pkg update')\n", (7529, 7543), False, 'import os\n'), ((7545, 7582), 'os.system', 'os.system', (['"""pkg install coreutils -y"""'], {}), "('pkg install coreutils -y')\n", (7554, 7582), False, 'import os\n'), ((7584, 7621), 'os.system', 'os.system', (['"""apt install coreutils -y"""'], {}), "('apt install coreutils -y')\n", (7593, 7621), False, 'import os\n'), ((7623, 7660), 'os.system', 'os.system', (['"""pkg upgrade coreutils -y"""'], {}), "('pkg upgrade coreutils -y')\n", (7632, 7660), False, 'import os\n'), ((7662, 7699), 'os.system', 'os.system', (['"""apt upgrade coreutils -y"""'], {}), "('apt upgrade coreutils -y')\n", (7671, 7699), False, 'import os\n'), ((7701, 7734), 'os.system', 'os.system', (['"""termux-setup-storage"""'], {}), "('termux-setup-storage')\n", (7710, 7734), False, 'import os\n'), ((7736, 7746), 'sys.exit', 'sys.exit', ([], {}), '()\n', (7744, 7746), False, 'import sys\n'), ((7797, 7821), 'os.system', 'os.system', (['"""pkg install"""'], {}), "('pkg install')\n", (7806, 7821), False, 'import os\n'), ((7823, 7847), 'os.system', 'os.system', (['"""pkg upgrade"""'], {}), "('pkg upgrade')\n", (7832, 7847), False, 'import os\n'), ((7849, 7873), 'os.system', 'os.system', (['"""apt install"""'], {}), "('apt install')\n", (7858, 7873), False, 'import os\n'), ((7875, 7899), 'os.system', 'os.system', (['"""apt upgrate"""'], {}), "('apt upgrate')\n", (7884, 7899), False, 'import os\n'), ((7901, 7924), 'os.system', 'os.system', (['"""apt update"""'], {}), "('apt update')\n", (7910, 7924), False, 'import os\n'), ((7926, 7949), 'os.system', 'os.system', (['"""pkg update"""'], {}), "('pkg update')\n", (7935, 7949), False, 'import os\n'), ((7951, 7983), 'os.system', 'os.system', (['"""pkg install fish -y"""'], {}), "('pkg install fish -y')\n", (7960, 7983), False, 'import os\n'), ((7985, 8017), 'os.system', 'os.system', (['"""apt install fish -y"""'], {}), "('apt install fish -y')\n", (7994, 8017), False, 'import os\n'), ((8019, 8051), 'os.system', 'os.system', (['"""pkg upgrade fish -y"""'], {}), "('pkg upgrade fish -y')\n", (8028, 8051), False, 'import os\n'), ((8053, 8085), 'os.system', 'os.system', (['"""apt upgrade fish -y"""'], {}), "('apt upgrade fish -y')\n", (8062, 8085), False, 'import os\n'), ((8087, 8120), 'os.system', 'os.system', (['"""termux-setup-storage"""'], {}), "('termux-setup-storage')\n", (8096, 8120), False, 'import os\n'), ((8122, 8132), 'sys.exit', 'sys.exit', ([], {}), '()\n', (8130, 8132), False, 'import sys\n'), ((8183, 8207), 'os.system', 'os.system', (['"""pkg install"""'], {}), "('pkg install')\n", (8192, 8207), False, 'import os\n'), ((8209, 8233), 'os.system', 'os.system', (['"""pkg upgrade"""'], {}), "('pkg upgrade')\n", (8218, 8233), False, 'import os\n'), ((8235, 8259), 'os.system', 'os.system', (['"""apt install"""'], {}), "('apt install')\n", (8244, 8259), False, 'import os\n'), ((8261, 8285), 'os.system', 'os.system', (['"""apt upgrate"""'], {}), "('apt upgrate')\n", (8270, 8285), False, 'import os\n'), ((8287, 8310), 'os.system', 'os.system', (['"""apt update"""'], {}), "('apt update')\n", (8296, 8310), False, 'import os\n'), ((8312, 8335), 'os.system', 'os.system', (['"""pkg update"""'], {}), "('pkg update')\n", (8321, 8335), False, 'import os\n'), ((8337, 8368), 'os.system', 'os.system', (['"""pkg install zip -y"""'], {}), "('pkg install zip -y')\n", (8346, 8368), False, 'import os\n'), ((8370, 8401), 'os.system', 'os.system', (['"""apt install zip -y"""'], {}), "('apt install zip -y')\n", (8379, 8401), False, 'import os\n'), ((8403, 8434), 'os.system', 'os.system', (['"""pkg upgrade zip -y"""'], {}), "('pkg upgrade zip -y')\n", (8412, 8434), False, 'import os\n'), ((8436, 8467), 'os.system', 'os.system', (['"""apt upgrade zip -y"""'], {}), "('apt upgrade zip -y')\n", (8445, 8467), False, 'import os\n'), ((8469, 8502), 'os.system', 'os.system', (['"""termux-setup-storage"""'], {}), "('termux-setup-storage')\n", (8478, 8502), False, 'import os\n'), ((8504, 8514), 'sys.exit', 'sys.exit', ([], {}), '()\n', (8512, 8514), False, 'import sys\n'), ((8565, 8589), 'os.system', 'os.system', (['"""pkg install"""'], {}), "('pkg install')\n", (8574, 8589), False, 'import os\n'), ((8591, 8615), 'os.system', 'os.system', (['"""pkg upgrade"""'], {}), "('pkg upgrade')\n", (8600, 8615), False, 'import os\n'), ((8617, 8641), 'os.system', 'os.system', (['"""apt install"""'], {}), "('apt install')\n", (8626, 8641), False, 'import os\n'), ((8643, 8667), 'os.system', 'os.system', (['"""apt upgrate"""'], {}), "('apt upgrate')\n", (8652, 8667), False, 'import os\n'), ((8669, 8692), 'os.system', 'os.system', (['"""apt update"""'], {}), "('apt update')\n", (8678, 8692), False, 'import os\n'), ((8694, 8717), 'os.system', 'os.system', (['"""pkg update"""'], {}), "('pkg update')\n", (8703, 8717), False, 'import os\n'), ((8719, 8753), 'os.system', 'os.system', (['"""pkg install figlet -y"""'], {}), "('pkg install figlet -y')\n", (8728, 8753), False, 'import os\n'), ((8755, 8789), 'os.system', 'os.system', (['"""apt install figlet -y"""'], {}), "('apt install figlet -y')\n", (8764, 8789), False, 'import os\n'), ((8791, 8825), 'os.system', 'os.system', (['"""pkg upgrade figlet -y"""'], {}), "('pkg upgrade figlet -y')\n", (8800, 8825), False, 'import os\n'), ((8827, 8861), 'os.system', 'os.system', (['"""apt upgrade figlet -y"""'], {}), "('apt upgrade figlet -y')\n", (8836, 8861), False, 'import os\n'), ((8863, 8896), 'os.system', 'os.system', (['"""termux-setup-storage"""'], {}), "('termux-setup-storage')\n", (8872, 8896), False, 'import os\n'), ((8898, 8908), 'sys.exit', 'sys.exit', ([], {}), '()\n', (8906, 8908), False, 'import sys\n'), ((8959, 8983), 'os.system', 'os.system', (['"""pkg install"""'], {}), "('pkg install')\n", (8968, 8983), False, 'import os\n'), ((8985, 9009), 'os.system', 'os.system', (['"""pkg upgrade"""'], {}), "('pkg upgrade')\n", (8994, 9009), False, 'import os\n'), ((9011, 9035), 'os.system', 'os.system', (['"""apt install"""'], {}), "('apt install')\n", (9020, 9035), False, 'import os\n'), ((9037, 9061), 'os.system', 'os.system', (['"""apt upgrate"""'], {}), "('apt upgrate')\n", (9046, 9061), False, 'import os\n'), ((9063, 9086), 'os.system', 'os.system', (['"""apt update"""'], {}), "('apt update')\n", (9072, 9086), False, 'import os\n'), ((9088, 9111), 'os.system', 'os.system', (['"""pkg update"""'], {}), "('pkg update')\n", (9097, 9111), False, 'import os\n'), ((9113, 9147), 'os.system', 'os.system', (['"""pkg install cowsay -y"""'], {}), "('pkg install cowsay -y')\n", (9122, 9147), False, 'import os\n'), ((9149, 9183), 'os.system', 'os.system', (['"""apt install cowsay -y"""'], {}), "('apt install cowsay -y')\n", (9158, 9183), False, 'import os\n'), ((9185, 9219), 'os.system', 'os.system', (['"""pkg upgrade cowsay -y"""'], {}), "('pkg upgrade cowsay -y')\n", (9194, 9219), False, 'import os\n'), ((9221, 9255), 'os.system', 'os.system', (['"""apt upgrade cowsay -y"""'], {}), "('apt upgrade cowsay -y')\n", (9230, 9255), False, 'import os\n'), ((9257, 9290), 'os.system', 'os.system', (['"""termux-setup-storage"""'], {}), "('termux-setup-storage')\n", (9266, 9290), False, 'import os\n'), ((9292, 9302), 'sys.exit', 'sys.exit', ([], {}), '()\n', (9300, 9302), False, 'import sys\n'), ((9353, 9377), 'os.system', 'os.system', (['"""pkg install"""'], {}), "('pkg install')\n", (9362, 9377), False, 'import os\n'), ((9379, 9403), 'os.system', 'os.system', (['"""pkg upgrade"""'], {}), "('pkg upgrade')\n", (9388, 9403), False, 'import os\n'), ((9405, 9429), 'os.system', 'os.system', (['"""apt install"""'], {}), "('apt install')\n", (9414, 9429), False, 'import os\n'), ((9431, 9455), 'os.system', 'os.system', (['"""apt upgrate"""'], {}), "('apt upgrate')\n", (9440, 9455), False, 'import os\n'), ((9457, 9480), 'os.system', 'os.system', (['"""apt update"""'], {}), "('apt update')\n", (9466, 9480), False, 'import os\n'), ((9482, 9505), 'os.system', 'os.system', (['"""pkg update"""'], {}), "('pkg update')\n", (9491, 9505), False, 'import os\n'), ((9507, 9540), 'os.system', 'os.system', (['"""pkg install unzip -y"""'], {}), "('pkg install unzip -y')\n", (9516, 9540), False, 'import os\n'), ((9542, 9575), 'os.system', 'os.system', (['"""apt install unzip -y"""'], {}), "('apt install unzip -y')\n", (9551, 9575), False, 'import os\n'), ((9577, 9610), 'os.system', 'os.system', (['"""pkg upgrade unzip -y"""'], {}), "('pkg upgrade unzip -y')\n", (9586, 9610), False, 'import os\n'), ((9612, 9645), 'os.system', 'os.system', (['"""apt upgrade unzip -y"""'], {}), "('apt upgrade unzip -y')\n", (9621, 9645), False, 'import os\n'), ((9647, 9680), 'os.system', 'os.system', (['"""termux-setup-storage"""'], {}), "('termux-setup-storage')\n", (9656, 9680), False, 'import os\n'), ((9682, 9692), 'sys.exit', 'sys.exit', ([], {}), '()\n', (9690, 9692), False, 'import sys\n'), ((9743, 9767), 'os.system', 'os.system', (['"""pkg install"""'], {}), "('pkg install')\n", (9752, 9767), False, 'import os\n'), ((9769, 9793), 'os.system', 'os.system', (['"""pkg upgrade"""'], {}), "('pkg upgrade')\n", (9778, 9793), False, 'import os\n'), ((9795, 9819), 'os.system', 'os.system', (['"""apt install"""'], {}), "('apt install')\n", (9804, 9819), False, 'import os\n'), ((9821, 9845), 'os.system', 'os.system', (['"""apt upgrate"""'], {}), "('apt upgrate')\n", (9830, 9845), False, 'import os\n'), ((9847, 9870), 'os.system', 'os.system', (['"""apt update"""'], {}), "('apt update')\n", (9856, 9870), False, 'import os\n'), ((9872, 9895), 'os.system', 'os.system', (['"""pkg update"""'], {}), "('pkg update')\n", (9881, 9895), False, 'import os\n'), ((9897, 9928), 'os.system', 'os.system', (['"""pkg install vim -y"""'], {}), "('pkg install vim -y')\n", (9906, 9928), False, 'import os\n'), ((9930, 9961), 'os.system', 'os.system', (['"""apt install vim -y"""'], {}), "('apt install vim -y')\n", (9939, 9961), False, 'import os\n'), ((9963, 9994), 'os.system', 'os.system', (['"""pkg upgrade vim -y"""'], {}), "('pkg upgrade vim -y')\n", (9972, 9994), False, 'import os\n'), ((9996, 10027), 'os.system', 'os.system', (['"""apt upgrade vim -y"""'], {}), "('apt upgrade vim -y')\n", (10005, 10027), False, 'import os\n'), ((10029, 10062), 'os.system', 'os.system', (['"""termux-setup-storage"""'], {}), "('termux-setup-storage')\n", (10038, 10062), False, 'import os\n'), ((10064, 10074), 'sys.exit', 'sys.exit', ([], {}), '()\n', (10072, 10074), False, 'import sys\n'), ((10125, 10149), 'os.system', 'os.system', (['"""pkg install"""'], {}), "('pkg install')\n", (10134, 10149), False, 'import os\n'), ((10151, 10175), 'os.system', 'os.system', (['"""pkg upgrade"""'], {}), "('pkg upgrade')\n", (10160, 10175), False, 'import os\n'), ((10177, 10201), 'os.system', 'os.system', (['"""apt install"""'], {}), "('apt install')\n", (10186, 10201), False, 'import os\n'), ((10203, 10227), 'os.system', 'os.system', (['"""apt upgrate"""'], {}), "('apt upgrate')\n", (10212, 10227), False, 'import os\n'), ((10229, 10252), 'os.system', 'os.system', (['"""apt update"""'], {}), "('apt update')\n", (10238, 10252), False, 'import os\n'), ((10254, 10277), 'os.system', 'os.system', (['"""pkg update"""'], {}), "('pkg update')\n", (10263, 10277), False, 'import os\n'), ((10279, 10312), 'os.system', 'os.system', (['"""pkg install wcalc -y"""'], {}), "('pkg install wcalc -y')\n", (10288, 10312), False, 'import os\n'), ((10314, 10347), 'os.system', 'os.system', (['"""apt install wcalc -y"""'], {}), "('apt install wcalc -y')\n", (10323, 10347), False, 'import os\n'), ((10349, 10382), 'os.system', 'os.system', (['"""pkg upgrade wcalc -y"""'], {}), "('pkg upgrade wcalc -y')\n", (10358, 10382), False, 'import os\n'), ((10384, 10417), 'os.system', 'os.system', (['"""apt upgrade wcalc -y"""'], {}), "('apt upgrade wcalc -y')\n", (10393, 10417), False, 'import os\n'), ((10419, 10452), 'os.system', 'os.system', (['"""termux-setup-storage"""'], {}), "('termux-setup-storage')\n", (10428, 10452), False, 'import os\n'), ((10454, 10464), 'sys.exit', 'sys.exit', ([], {}), '()\n', (10462, 10464), False, 'import sys\n'), ((10515, 10539), 'os.system', 'os.system', (['"""pkg install"""'], {}), "('pkg install')\n", (10524, 10539), False, 'import os\n'), ((10541, 10565), 'os.system', 'os.system', (['"""pkg upgrade"""'], {}), "('pkg upgrade')\n", (10550, 10565), False, 'import os\n'), ((10567, 10591), 'os.system', 'os.system', (['"""apt install"""'], {}), "('apt install')\n", (10576, 10591), False, 'import os\n'), ((10593, 10617), 'os.system', 'os.system', (['"""apt upgrate"""'], {}), "('apt upgrate')\n", (10602, 10617), False, 'import os\n'), ((10619, 10642), 'os.system', 'os.system', (['"""apt update"""'], {}), "('apt update')\n", (10628, 10642), False, 'import os\n'), ((10644, 10667), 'os.system', 'os.system', (['"""pkg update"""'], {}), "('pkg update')\n", (10653, 10667), False, 'import os\n'), ((10669, 10701), 'os.system', 'os.system', (['"""pkg install bmon -y"""'], {}), "('pkg install bmon -y')\n", (10678, 10701), False, 'import os\n'), ((10703, 10735), 'os.system', 'os.system', (['"""apt install bmon -y"""'], {}), "('apt install bmon -y')\n", (10712, 10735), False, 'import os\n'), ((10737, 10769), 'os.system', 'os.system', (['"""pkg upgrade bmon -y"""'], {}), "('pkg upgrade bmon -y')\n", (10746, 10769), False, 'import os\n'), ((10771, 10803), 'os.system', 'os.system', (['"""apt upgrade bmon -y"""'], {}), "('apt upgrade bmon -y')\n", (10780, 10803), False, 'import os\n'), ((10805, 10838), 'os.system', 'os.system', (['"""termux-setup-storage"""'], {}), "('termux-setup-storage')\n", (10814, 10838), False, 'import os\n'), ((10840, 10850), 'sys.exit', 'sys.exit', ([], {}), '()\n', (10848, 10850), False, 'import sys\n'), ((10901, 10925), 'os.system', 'os.system', (['"""pkg install"""'], {}), "('pkg install')\n", (10910, 10925), False, 'import os\n'), ((10927, 10951), 'os.system', 'os.system', (['"""pkg upgrade"""'], {}), "('pkg upgrade')\n", (10936, 10951), False, 'import os\n'), ((10953, 10977), 'os.system', 'os.system', (['"""apt install"""'], {}), "('apt install')\n", (10962, 10977), False, 'import os\n'), ((10979, 11003), 'os.system', 'os.system', (['"""apt upgrate"""'], {}), "('apt upgrate')\n", (10988, 11003), False, 'import os\n'), ((11005, 11028), 'os.system', 'os.system', (['"""apt update"""'], {}), "('apt update')\n", (11014, 11028), False, 'import os\n'), ((11030, 11053), 'os.system', 'os.system', (['"""pkg update"""'], {}), "('pkg update')\n", (11039, 11053), False, 'import os\n'), ((11055, 11088), 'os.system', 'os.system', (['"""pkg install unrar -y"""'], {}), "('pkg install unrar -y')\n", (11064, 11088), False, 'import os\n'), ((11090, 11123), 'os.system', 'os.system', (['"""apt install unrar -y"""'], {}), "('apt install unrar -y')\n", (11099, 11123), False, 'import os\n'), ((11125, 11158), 'os.system', 'os.system', (['"""pkg upgrade unrar -y"""'], {}), "('pkg upgrade unrar -y')\n", (11134, 11158), False, 'import os\n'), ((11160, 11193), 'os.system', 'os.system', (['"""apt upgrade unrar -y"""'], {}), "('apt upgrade unrar -y')\n", (11169, 11193), False, 'import os\n'), ((11195, 11228), 'os.system', 'os.system', (['"""termux-setup-storage"""'], {}), "('termux-setup-storage')\n", (11204, 11228), False, 'import os\n'), ((11230, 11240), 'sys.exit', 'sys.exit', ([], {}), '()\n', (11238, 11240), False, 'import sys\n'), ((11291, 11315), 'os.system', 'os.system', (['"""pkg install"""'], {}), "('pkg install')\n", (11300, 11315), False, 'import os\n'), ((11317, 11341), 'os.system', 'os.system', (['"""pkg upgrade"""'], {}), "('pkg upgrade')\n", (11326, 11341), False, 'import os\n'), ((11343, 11367), 'os.system', 'os.system', (['"""apt install"""'], {}), "('apt install')\n", (11352, 11367), False, 'import os\n'), ((11369, 11393), 'os.system', 'os.system', (['"""apt upgrate"""'], {}), "('apt upgrate')\n", (11378, 11393), False, 'import os\n'), ((11395, 11418), 'os.system', 'os.system', (['"""apt update"""'], {}), "('apt update')\n", (11404, 11418), False, 'import os\n'), ((11420, 11443), 'os.system', 'os.system', (['"""pkg update"""'], {}), "('pkg update')\n", (11429, 11443), False, 'import os\n'), ((11445, 11478), 'os.system', 'os.system', (['"""pkg install proot -y"""'], {}), "('pkg install proot -y')\n", (11454, 11478), False, 'import os\n'), ((11480, 11513), 'os.system', 'os.system', (['"""apt install proot -y"""'], {}), "('apt install proot -y')\n", (11489, 11513), False, 'import os\n'), ((11515, 11548), 'os.system', 'os.system', (['"""pkg upgrade proot -y"""'], {}), "('pkg upgrade proot -y')\n", (11524, 11548), False, 'import os\n'), ((11550, 11583), 'os.system', 'os.system', (['"""apt upgrade proot -y"""'], {}), "('apt upgrade proot -y')\n", (11559, 11583), False, 'import os\n'), ((11585, 11618), 'os.system', 'os.system', (['"""termux-setup-storage"""'], {}), "('termux-setup-storage')\n", (11594, 11618), False, 'import os\n'), ((11620, 11630), 'sys.exit', 'sys.exit', ([], {}), '()\n', (11628, 11630), False, 'import sys\n'), ((11681, 11705), 'os.system', 'os.system', (['"""pkg install"""'], {}), "('pkg install')\n", (11690, 11705), False, 'import os\n'), ((11707, 11731), 'os.system', 'os.system', (['"""pkg upgrade"""'], {}), "('pkg upgrade')\n", (11716, 11731), False, 'import os\n'), ((11733, 11757), 'os.system', 'os.system', (['"""apt install"""'], {}), "('apt install')\n", (11742, 11757), False, 'import os\n'), ((11759, 11783), 'os.system', 'os.system', (['"""apt upgrate"""'], {}), "('apt upgrate')\n", (11768, 11783), False, 'import os\n'), ((11785, 11808), 'os.system', 'os.system', (['"""apt update"""'], {}), "('apt update')\n", (11794, 11808), False, 'import os\n'), ((11810, 11833), 'os.system', 'os.system', (['"""pkg update"""'], {}), "('pkg update')\n", (11819, 11833), False, 'import os\n'), ((11835, 11869), 'os.system', 'os.system', (['"""pkg install golang -y"""'], {}), "('pkg install golang -y')\n", (11844, 11869), False, 'import os\n'), ((11871, 11905), 'os.system', 'os.system', (['"""apt install golang -y"""'], {}), "('apt install golang -y')\n", (11880, 11905), False, 'import os\n'), ((11907, 11941), 'os.system', 'os.system', (['"""pkg upgrade golang -y"""'], {}), "('pkg upgrade golang -y')\n", (11916, 11941), False, 'import os\n'), ((11943, 11977), 'os.system', 'os.system', (['"""apt upgrade golang -y"""'], {}), "('apt upgrade golang -y')\n", (11952, 11977), False, 'import os\n'), ((11979, 12012), 'os.system', 'os.system', (['"""termux-setup-storage"""'], {}), "('termux-setup-storage')\n", (11988, 12012), False, 'import os\n'), ((12014, 12024), 'sys.exit', 'sys.exit', ([], {}), '()\n', (12022, 12024), False, 'import sys\n'), ((12075, 12099), 'os.system', 'os.system', (['"""pkg install"""'], {}), "('pkg install')\n", (12084, 12099), False, 'import os\n'), ((12101, 12125), 'os.system', 'os.system', (['"""pkg upgrade"""'], {}), "('pkg upgrade')\n", (12110, 12125), False, 'import os\n'), ((12127, 12151), 'os.system', 'os.system', (['"""apt install"""'], {}), "('apt install')\n", (12136, 12151), False, 'import os\n'), ((12153, 12177), 'os.system', 'os.system', (['"""apt upgrate"""'], {}), "('apt upgrate')\n", (12162, 12177), False, 'import os\n'), ((12179, 12202), 'os.system', 'os.system', (['"""apt update"""'], {}), "('apt update')\n", (12188, 12202), False, 'import os\n'), ((12204, 12227), 'os.system', 'os.system', (['"""pkg update"""'], {}), "('pkg update')\n", (12213, 12227), False, 'import os\n'), ((12229, 12259), 'os.system', 'os.system', (['"""pkg install tsu-y"""'], {}), "('pkg install tsu-y')\n", (12238, 12259), False, 'import os\n'), ((12260, 12291), 'os.system', 'os.system', (['"""apt install tsu -y"""'], {}), "('apt install tsu -y')\n", (12269, 12291), False, 'import os\n'), ((12293, 12324), 'os.system', 'os.system', (['"""pkg upgrade tsu -y"""'], {}), "('pkg upgrade tsu -y')\n", (12302, 12324), False, 'import os\n'), ((12326, 12357), 'os.system', 'os.system', (['"""apt upgrade tsu -y"""'], {}), "('apt upgrade tsu -y')\n", (12335, 12357), False, 'import os\n'), ((12359, 12392), 'os.system', 'os.system', (['"""termux-setup-storage"""'], {}), "('termux-setup-storage')\n", (12368, 12392), False, 'import os\n'), ((12394, 12404), 'sys.exit', 'sys.exit', ([], {}), '()\n', (12402, 12404), False, 'import sys\n'), ((12455, 12479), 'os.system', 'os.system', (['"""pkg install"""'], {}), "('pkg install')\n", (12464, 12479), False, 'import os\n'), ((12481, 12505), 'os.system', 'os.system', (['"""pkg upgrade"""'], {}), "('pkg upgrade')\n", (12490, 12505), False, 'import os\n'), ((12507, 12531), 'os.system', 'os.system', (['"""apt install"""'], {}), "('apt install')\n", (12516, 12531), False, 'import os\n'), ((12533, 12557), 'os.system', 'os.system', (['"""apt upgrate"""'], {}), "('apt upgrate')\n", (12542, 12557), False, 'import os\n'), ((12559, 12582), 'os.system', 'os.system', (['"""apt update"""'], {}), "('apt update')\n", (12568, 12582), False, 'import os\n'), ((12584, 12607), 'os.system', 'os.system', (['"""pkg update"""'], {}), "('pkg update')\n", (12593, 12607), False, 'import os\n'), ((12609, 12637), 'os.system', 'os.system', (['"""pkg install tor"""'], {}), "('pkg install tor')\n", (12618, 12637), False, 'import os\n'), ((12639, 12672), 'os.system', 'os.system', (['"""termux-setup-storage"""'], {}), "('termux-setup-storage')\n", (12648, 12672), False, 'import os\n'), ((12674, 12684), 'sys.exit', 'sys.exit', ([], {}), '()\n', (12682, 12684), False, 'import sys\n'), ((12735, 12759), 'os.system', 'os.system', (['"""pkg install"""'], {}), "('pkg install')\n", (12744, 12759), False, 'import os\n'), ((12761, 12785), 'os.system', 'os.system', (['"""pkg upgrade"""'], {}), "('pkg upgrade')\n", (12770, 12785), False, 'import os\n'), ((12787, 12811), 'os.system', 'os.system', (['"""apt install"""'], {}), "('apt install')\n", (12796, 12811), False, 'import os\n'), ((12813, 12837), 'os.system', 'os.system', (['"""apt upgrate"""'], {}), "('apt upgrate')\n", (12822, 12837), False, 'import os\n'), ((12839, 12862), 'os.system', 'os.system', (['"""apt update"""'], {}), "('apt update')\n", (12848, 12862), False, 'import os\n'), ((12864, 12887), 'os.system', 'os.system', (['"""pkg update"""'], {}), "('pkg update')\n", (12873, 12887), False, 'import os\n'), ((12889, 12920), 'os.system', 'os.system', (['"""pkg install php -y"""'], {}), "('pkg install php -y')\n", (12898, 12920), False, 'import os\n'), ((12922, 12953), 'os.system', 'os.system', (['"""pkg upgrade php -y"""'], {}), "('pkg upgrade php -y')\n", (12931, 12953), False, 'import os\n'), ((12955, 12986), 'os.system', 'os.system', (['"""apt install php -y"""'], {}), "('apt install php -y')\n", (12964, 12986), False, 'import os\n'), ((12988, 13019), 'os.system', 'os.system', (['"""apt upgrade php -y"""'], {}), "('apt upgrade php -y')\n", (12997, 13019), False, 'import os\n'), ((13021, 13054), 'os.system', 'os.system', (['"""termux-setup-storage"""'], {}), "('termux-setup-storage')\n", (13030, 13054), False, 'import os\n'), ((13056, 13066), 'sys.exit', 'sys.exit', ([], {}), '()\n', (13064, 13066), False, 'import sys\n'), ((13117, 13141), 'os.system', 'os.system', (['"""pkg install"""'], {}), "('pkg install')\n", (13126, 13141), False, 'import os\n'), ((13143, 13167), 'os.system', 'os.system', (['"""pkg upgrade"""'], {}), "('pkg upgrade')\n", (13152, 13167), False, 'import os\n'), ((13169, 13193), 'os.system', 'os.system', (['"""apt install"""'], {}), "('apt install')\n", (13178, 13193), False, 'import os\n'), ((13195, 13219), 'os.system', 'os.system', (['"""apt upgrate"""'], {}), "('apt upgrate')\n", (13204, 13219), False, 'import os\n'), ((13221, 13244), 'os.system', 'os.system', (['"""apt update"""'], {}), "('apt update')\n", (13230, 13244), False, 'import os\n'), ((13246, 13269), 'os.system', 'os.system', (['"""pkg update"""'], {}), "('pkg update')\n", (13255, 13269), False, 'import os\n'), ((13271, 13319), 'os.system', 'os.system', (['"""python -m pip install --upgrade pip"""'], {}), "('python -m pip install --upgrade pip')\n", (13280, 13319), False, 'import os\n'), ((13320, 13370), 'os.system', 'os.system', (['"""pip3 install --upgrade setuptools pip"""'], {}), "('pip3 install --upgrade setuptools pip')\n", (13329, 13370), False, 'import os\n'), ((13372, 13406), 'os.system', 'os.system', (['"""pkg install python -y"""'], {}), "('pkg install python -y')\n", (13381, 13406), False, 'import os\n'), ((13408, 13442), 'os.system', 'os.system', (['"""pkg upgrade python -y"""'], {}), "('pkg upgrade python -y')\n", (13417, 13442), False, 'import os\n'), ((13444, 13478), 'os.system', 'os.system', (['"""apt install python -y"""'], {}), "('apt install python -y')\n", (13453, 13478), False, 'import os\n'), ((13480, 13514), 'os.system', 'os.system', (['"""apt upgrade python -y"""'], {}), "('apt upgrade python -y')\n", (13489, 13514), False, 'import os\n'), ((13516, 13551), 'os.system', 'os.system', (['"""pkg install python2 -y"""'], {}), "('pkg install python2 -y')\n", (13525, 13551), False, 'import os\n'), ((13553, 13588), 'os.system', 'os.system', (['"""pkg upgrade python2 -y"""'], {}), "('pkg upgrade python2 -y')\n", (13562, 13588), False, 'import os\n'), ((13590, 13625), 'os.system', 'os.system', (['"""apt install python2 -y"""'], {}), "('apt install python2 -y')\n", (13599, 13625), False, 'import os\n'), ((13627, 13662), 'os.system', 'os.system', (['"""apt upgrade python2 -y"""'], {}), "('apt upgrade python2 -y')\n", (13636, 13662), False, 'import os\n'), ((13664, 13695), 'os.system', 'os.system', (['"""pkg install php -y"""'], {}), "('pkg install php -y')\n", (13673, 13695), False, 'import os\n'), ((13697, 13728), 'os.system', 'os.system', (['"""pkg upgrade php -y"""'], {}), "('pkg upgrade php -y')\n", (13706, 13728), False, 'import os\n'), ((13730, 13761), 'os.system', 'os.system', (['"""apt install php -y"""'], {}), "('apt install php -y')\n", (13739, 13761), False, 'import os\n'), ((13763, 13794), 'os.system', 'os.system', (['"""apt upgrade php -y"""'], {}), "('apt upgrade php -y')\n", (13772, 13794), False, 'import os\n'), ((13796, 13825), 'os.system', 'os.system', (['"""pkg install bash"""'], {}), "('pkg install bash')\n", (13805, 13825), False, 'import os\n'), ((13827, 13856), 'os.system', 'os.system', (['"""apt install bash"""'], {}), "('apt install bash')\n", (13836, 13856), False, 'import os\n'), ((13858, 13887), 'os.system', 'os.system', (['"""pkg upgrade bash"""'], {}), "('pkg upgrade bash')\n", (13867, 13887), False, 'import os\n'), ((13889, 13918), 'os.system', 'os.system', (['"""apt upgrade bash"""'], {}), "('apt upgrade bash')\n", (13898, 13918), False, 'import os\n'), ((13920, 13951), 'os.system', 'os.system', (['"""apt install git -y"""'], {}), "('apt install git -y')\n", (13929, 13951), False, 'import os\n'), ((13953, 13984), 'os.system', 'os.system', (['"""pkg install git -y"""'], {}), "('pkg install git -y')\n", (13962, 13984), False, 'import os\n'), ((13986, 14017), 'os.system', 'os.system', (['"""pkg upgrade git -y"""'], {}), "('pkg upgrade git -y')\n", (13995, 14017), False, 'import os\n'), ((14019, 14050), 'os.system', 'os.system', (['"""apt upgrade git -y"""'], {}), "('apt upgrade git -y')\n", (14028, 14050), False, 'import os\n'), ((14052, 14084), 'os.system', 'os.system', (['"""pkg install perl -y"""'], {}), "('pkg install perl -y')\n", (14061, 14084), False, 'import os\n'), ((14086, 14118), 'os.system', 'os.system', (['"""apt install perl -y"""'], {}), "('apt install perl -y')\n", (14095, 14118), False, 'import os\n'), ((14120, 14152), 'os.system', 'os.system', (['"""pkg upgrade perl -y"""'], {}), "('pkg upgrade perl -y')\n", (14129, 14152), False, 'import os\n'), ((14154, 14186), 'os.system', 'os.system', (['"""apt upgrade perl -y"""'], {}), "('apt upgrade perl -y')\n", (14163, 14186), False, 'import os\n'), ((14188, 14220), 'os.system', 'os.system', (['"""pkg install nano -y"""'], {}), "('pkg install nano -y')\n", (14197, 14220), False, 'import os\n'), ((14222, 14254), 'os.system', 'os.system', (['"""apt install nano -y"""'], {}), "('apt install nano -y')\n", (14231, 14254), False, 'import os\n'), ((14256, 14288), 'os.system', 'os.system', (['"""pkg upgrade nano -y"""'], {}), "('pkg upgrade nano -y')\n", (14265, 14288), False, 'import os\n'), ((14290, 14322), 'os.system', 'os.system', (['"""apt upgrade nano -y"""'], {}), "('apt upgrade nano -y')\n", (14299, 14322), False, 'import os\n'), ((14324, 14356), 'os.system', 'os.system', (['"""pkg install curl -y"""'], {}), "('pkg install curl -y')\n", (14333, 14356), False, 'import os\n'), ((14358, 14390), 'os.system', 'os.system', (['"""apt install curl -y"""'], {}), "('apt install curl -y')\n", (14367, 14390), False, 'import os\n'), ((14392, 14424), 'os.system', 'os.system', (['"""pkg upgrade curl -y"""'], {}), "('pkg upgrade curl -y')\n", (14401, 14424), False, 'import os\n'), ((14426, 14458), 'os.system', 'os.system', (['"""apt upgrade curl -y"""'], {}), "('apt upgrade curl -y')\n", (14435, 14458), False, 'import os\n'), ((14460, 14495), 'os.system', 'os.system', (['"""pkg install openssl -y"""'], {}), "('pkg install openssl -y')\n", (14469, 14495), False, 'import os\n'), ((14497, 14532), 'os.system', 'os.system', (['"""apt install openssl -y"""'], {}), "('apt install openssl -y')\n", (14506, 14532), False, 'import os\n'), ((14534, 14569), 'os.system', 'os.system', (['"""pkg upgrade openssl -y"""'], {}), "('pkg upgrade openssl -y')\n", (14543, 14569), False, 'import os\n'), ((14571, 14606), 'os.system', 'os.system', (['"""apt upgrade openssl -y"""'], {}), "('apt upgrade openssl -y')\n", (14580, 14606), False, 'import os\n'), ((14608, 14643), 'os.system', 'os.system', (['"""pkg install openssh -y"""'], {}), "('pkg install openssh -y')\n", (14617, 14643), False, 'import os\n'), ((14645, 14680), 'os.system', 'os.system', (['"""apt install openssh -y"""'], {}), "('apt install openssh -y')\n", (14654, 14680), False, 'import os\n'), ((14682, 14717), 'os.system', 'os.system', (['"""pkg upgrade openssh -y"""'], {}), "('pkg upgrade openssh -y')\n", (14691, 14717), False, 'import os\n'), ((14719, 14754), 'os.system', 'os.system', (['"""apt upgrade openssh -y"""'], {}), "('apt upgrade openssh -y')\n", (14728, 14754), False, 'import os\n'), ((14756, 14788), 'os.system', 'os.system', (['"""pkg install wget -y"""'], {}), "('pkg install wget -y')\n", (14765, 14788), False, 'import os\n'), ((14790, 14822), 'os.system', 'os.system', (['"""apt install wget -y"""'], {}), "('apt install wget -y')\n", (14799, 14822), False, 'import os\n'), ((14824, 14856), 'os.system', 'os.system', (['"""pkg upgrade wget -y"""'], {}), "('pkg upgrade wget -y')\n", (14833, 14856), False, 'import os\n'), ((14858, 14890), 'os.system', 'os.system', (['"""apt upgrade wget -y"""'], {}), "('apt upgrade wget -y')\n", (14867, 14890), False, 'import os\n'), ((14892, 14925), 'os.system', 'os.system', (['"""pkg install clang -y"""'], {}), "('pkg install clang -y')\n", (14901, 14925), False, 'import os\n'), ((14927, 14960), 'os.system', 'os.system', (['"""apt install clang -y"""'], {}), "('apt install clang -y')\n", (14936, 14960), False, 'import os\n'), ((14962, 14995), 'os.system', 'os.system', (['"""pkg upgrade clang -y"""'], {}), "('pkg upgrade clang -y')\n", (14971, 14995), False, 'import os\n'), ((14997, 15030), 'os.system', 'os.system', (['"""apt upgrade clang -y"""'], {}), "('apt upgrade clang -y')\n", (15006, 15030), False, 'import os\n'), ((15032, 15064), 'os.system', 'os.system', (['"""pkg install nmap -y"""'], {}), "('pkg install nmap -y')\n", (15041, 15064), False, 'import os\n'), ((15066, 15098), 'os.system', 'os.system', (['"""apt install nmap -y"""'], {}), "('apt install nmap -y')\n", (15075, 15098), False, 'import os\n'), ((15100, 15132), 'os.system', 'os.system', (['"""pkg upgrade nmap -y"""'], {}), "('pkg upgrade nmap -y')\n", (15109, 15132), False, 'import os\n'), ((15134, 15166), 'os.system', 'os.system', (['"""apt upgrade nmap -y"""'], {}), "('apt upgrade nmap -y')\n", (15143, 15166), False, 'import os\n'), ((15168, 15199), 'os.system', 'os.system', (['"""pkg install w3m -y"""'], {}), "('pkg install w3m -y')\n", (15177, 15199), False, 'import os\n'), ((15201, 15232), 'os.system', 'os.system', (['"""apt install w3m -y"""'], {}), "('apt install w3m -y')\n", (15210, 15232), False, 'import os\n'), ((15234, 15265), 'os.system', 'os.system', (['"""pkg upgrade w3m -y"""'], {}), "('pkg upgrade w3m -y')\n", (15243, 15265), False, 'import os\n'), ((15267, 15298), 'os.system', 'os.system', (['"""apt upgrade w3m -y"""'], {}), "('apt upgrade w3m -y')\n", (15276, 15298), False, 'import os\n'), ((15300, 15332), 'os.system', 'os.system', (['"""pkg install ruby -y"""'], {}), "('pkg install ruby -y')\n", (15309, 15332), False, 'import os\n'), ((15334, 15366), 'os.system', 'os.system', (['"""apt install ruby -y"""'], {}), "('apt install ruby -y')\n", (15343, 15366), False, 'import os\n'), ((15368, 15400), 'os.system', 'os.system', (['"""pkg upgrade ruby -y"""'], {}), "('pkg upgrade ruby -y')\n", (15377, 15400), False, 'import os\n'), ((15402, 15434), 'os.system', 'os.system', (['"""apt upgrade ruby -y"""'], {}), "('apt upgrade ruby -y')\n", (15411, 15434), False, 'import os\n'), ((15436, 15472), 'os.system', 'os.system', (['"""pkg install dnsutils -y"""'], {}), "('pkg install dnsutils -y')\n", (15445, 15472), False, 'import os\n'), ((15474, 15510), 'os.system', 'os.system', (['"""apt install dnsutils -y"""'], {}), "('apt install dnsutils -y')\n", (15483, 15510), False, 'import os\n'), ((15512, 15548), 'os.system', 'os.system', (['"""pkg upgrade dnsutils -y"""'], {}), "('pkg upgrade dnsutils -y')\n", (15521, 15548), False, 'import os\n'), ((15550, 15586), 'os.system', 'os.system', (['"""apt upgrade dnsutils -y"""'], {}), "('apt upgrade dnsutils -y')\n", (15559, 15586), False, 'import os\n'), ((15588, 15625), 'os.system', 'os.system', (['"""pkg install coreutils -y"""'], {}), "('pkg install coreutils -y')\n", (15597, 15625), False, 'import os\n'), ((15627, 15664), 'os.system', 'os.system', (['"""apt install coreutils -y"""'], {}), "('apt install coreutils -y')\n", (15636, 15664), False, 'import os\n'), ((15666, 15703), 'os.system', 'os.system', (['"""pkg upgrade coreutils -y"""'], {}), "('pkg upgrade coreutils -y')\n", (15675, 15703), False, 'import os\n'), ((15705, 15742), 'os.system', 'os.system', (['"""apt upgrade coreutils -y"""'], {}), "('apt upgrade coreutils -y')\n", (15714, 15742), False, 'import os\n'), ((15744, 15776), 'os.system', 'os.system', (['"""pkg install fish -y"""'], {}), "('pkg install fish -y')\n", (15753, 15776), False, 'import os\n'), ((15778, 15810), 'os.system', 'os.system', (['"""apt install fish -y"""'], {}), "('apt install fish -y')\n", (15787, 15810), False, 'import os\n'), ((15812, 15844), 'os.system', 'os.system', (['"""pkg upgrade fish -y"""'], {}), "('pkg upgrade fish -y')\n", (15821, 15844), False, 'import os\n'), ((15846, 15878), 'os.system', 'os.system', (['"""apt upgrade fish -y"""'], {}), "('apt upgrade fish -y')\n", (15855, 15878), False, 'import os\n'), ((15880, 15911), 'os.system', 'os.system', (['"""pkg install zip -y"""'], {}), "('pkg install zip -y')\n", (15889, 15911), False, 'import os\n'), ((15913, 15944), 'os.system', 'os.system', (['"""apt install zip -y"""'], {}), "('apt install zip -y')\n", (15922, 15944), False, 'import os\n'), ((15946, 15977), 'os.system', 'os.system', (['"""pkg upgrade zip -y"""'], {}), "('pkg upgrade zip -y')\n", (15955, 15977), False, 'import os\n'), ((15979, 16010), 'os.system', 'os.system', (['"""apt upgrade zip -y"""'], {}), "('apt upgrade zip -y')\n", (15988, 16010), False, 'import os\n'), ((16012, 16046), 'os.system', 'os.system', (['"""pkg install figlet -y"""'], {}), "('pkg install figlet -y')\n", (16021, 16046), False, 'import os\n'), ((16048, 16082), 'os.system', 'os.system', (['"""apt install figlet -y"""'], {}), "('apt install figlet -y')\n", (16057, 16082), False, 'import os\n'), ((16084, 16118), 'os.system', 'os.system', (['"""pkg upgrade figlet -y"""'], {}), "('pkg upgrade figlet -y')\n", (16093, 16118), False, 'import os\n'), ((16120, 16154), 'os.system', 'os.system', (['"""apt upgrade figlet -y"""'], {}), "('apt upgrade figlet -y')\n", (16129, 16154), False, 'import os\n'), ((16156, 16190), 'os.system', 'os.system', (['"""pkg install cowsay -y"""'], {}), "('pkg install cowsay -y')\n", (16165, 16190), False, 'import os\n'), ((16192, 16226), 'os.system', 'os.system', (['"""apt install cowsay -y"""'], {}), "('apt install cowsay -y')\n", (16201, 16226), False, 'import os\n'), ((16228, 16262), 'os.system', 'os.system', (['"""pkg upgrade cowsay -y"""'], {}), "('pkg upgrade cowsay -y')\n", (16237, 16262), False, 'import os\n'), ((16264, 16298), 'os.system', 'os.system', (['"""apt upgrade cowsay -y"""'], {}), "('apt upgrade cowsay -y')\n", (16273, 16298), False, 'import os\n'), ((16300, 16333), 'os.system', 'os.system', (['"""pkg install unzip -y"""'], {}), "('pkg install unzip -y')\n", (16309, 16333), False, 'import os\n'), ((16335, 16368), 'os.system', 'os.system', (['"""apt install unzip -y"""'], {}), "('apt install unzip -y')\n", (16344, 16368), False, 'import os\n'), ((16370, 16403), 'os.system', 'os.system', (['"""pkg upgrade unzip -y"""'], {}), "('pkg upgrade unzip -y')\n", (16379, 16403), False, 'import os\n'), ((16405, 16438), 'os.system', 'os.system', (['"""apt upgrade unzip -y"""'], {}), "('apt upgrade unzip -y')\n", (16414, 16438), False, 'import os\n'), ((16440, 16471), 'os.system', 'os.system', (['"""pkg install vim -y"""'], {}), "('pkg install vim -y')\n", (16449, 16471), False, 'import os\n'), ((16473, 16504), 'os.system', 'os.system', (['"""apt install vim -y"""'], {}), "('apt install vim -y')\n", (16482, 16504), False, 'import os\n'), ((16506, 16537), 'os.system', 'os.system', (['"""pkg upgrade vim -y"""'], {}), "('pkg upgrade vim -y')\n", (16515, 16537), False, 'import os\n'), ((16539, 16570), 'os.system', 'os.system', (['"""apt upgrade vim -y"""'], {}), "('apt upgrade vim -y')\n", (16548, 16570), False, 'import os\n'), ((16572, 16605), 'os.system', 'os.system', (['"""pkg install wcalc -y"""'], {}), "('pkg install wcalc -y')\n", (16581, 16605), False, 'import os\n'), ((16607, 16640), 'os.system', 'os.system', (['"""apt install wcalc -y"""'], {}), "('apt install wcalc -y')\n", (16616, 16640), False, 'import os\n'), ((16642, 16675), 'os.system', 'os.system', (['"""pkg upgrade wcalc -y"""'], {}), "('pkg upgrade wcalc -y')\n", (16651, 16675), False, 'import os\n'), ((16677, 16710), 'os.system', 'os.system', (['"""apt upgrade wcalc -y"""'], {}), "('apt upgrade wcalc -y')\n", (16686, 16710), False, 'import os\n'), ((16712, 16744), 'os.system', 'os.system', (['"""pkg install bmon -y"""'], {}), "('pkg install bmon -y')\n", (16721, 16744), False, 'import os\n'), ((16746, 16778), 'os.system', 'os.system', (['"""apt install bmon -y"""'], {}), "('apt install bmon -y')\n", (16755, 16778), False, 'import os\n'), ((16780, 16812), 'os.system', 'os.system', (['"""pkg upgrade bmon -y"""'], {}), "('pkg upgrade bmon -y')\n", (16789, 16812), False, 'import os\n'), ((16814, 16846), 'os.system', 'os.system', (['"""apt upgrade bmon -y"""'], {}), "('apt upgrade bmon -y')\n", (16823, 16846), False, 'import os\n'), ((16848, 16881), 'os.system', 'os.system', (['"""pkg install unrar -y"""'], {}), "('pkg install unrar -y')\n", (16857, 16881), False, 'import os\n'), ((16883, 16916), 'os.system', 'os.system', (['"""apt install unrar -y"""'], {}), "('apt install unrar -y')\n", (16892, 16916), False, 'import os\n'), ((16918, 16951), 'os.system', 'os.system', (['"""pkg upgrade unrar -y"""'], {}), "('pkg upgrade unrar -y')\n", (16927, 16951), False, 'import os\n'), ((16953, 16986), 'os.system', 'os.system', (['"""apt upgrade unrar -y"""'], {}), "('apt upgrade unrar -y')\n", (16962, 16986), False, 'import os\n'), ((16988, 17021), 'os.system', 'os.system', (['"""pkg install proot -y"""'], {}), "('pkg install proot -y')\n", (16997, 17021), False, 'import os\n'), ((17023, 17056), 'os.system', 'os.system', (['"""apt install proot -y"""'], {}), "('apt install proot -y')\n", (17032, 17056), False, 'import os\n'), ((17058, 17091), 'os.system', 'os.system', (['"""pkg upgrade proot -y"""'], {}), "('pkg upgrade proot -y')\n", (17067, 17091), False, 'import os\n'), ((17093, 17126), 'os.system', 'os.system', (['"""apt upgrade proot -y"""'], {}), "('apt upgrade proot -y')\n", (17102, 17126), False, 'import os\n'), ((17128, 17162), 'os.system', 'os.system', (['"""pkg install golang -y"""'], {}), "('pkg install golang -y')\n", (17137, 17162), False, 'import os\n'), ((17164, 17198), 'os.system', 'os.system', (['"""apt install golang -y"""'], {}), "('apt install golang -y')\n", (17173, 17198), False, 'import os\n'), ((17200, 17234), 'os.system', 'os.system', (['"""pkg upgrade golang -y"""'], {}), "('pkg upgrade golang -y')\n", (17209, 17234), False, 'import os\n'), ((17236, 17270), 'os.system', 'os.system', (['"""apt upgrade golang -y"""'], {}), "('apt upgrade golang -y')\n", (17245, 17270), False, 'import os\n'), ((17272, 17302), 'os.system', 'os.system', (['"""pkg install tsu-y"""'], {}), "('pkg install tsu-y')\n", (17281, 17302), False, 'import os\n'), ((17303, 17334), 'os.system', 'os.system', (['"""apt install tsu -y"""'], {}), "('apt install tsu -y')\n", (17312, 17334), False, 'import os\n'), ((17336, 17367), 'os.system', 'os.system', (['"""pkg upgrade tsu -y"""'], {}), "('pkg upgrade tsu -y')\n", (17345, 17367), False, 'import os\n'), ((17369, 17400), 'os.system', 'os.system', (['"""apt upgrade tsu -y"""'], {}), "('apt upgrade tsu -y')\n", (17378, 17400), False, 'import os\n'), ((17402, 17430), 'os.system', 'os.system', (['"""pkg install tor"""'], {}), "('pkg install tor')\n", (17411, 17430), False, 'import os\n'), ((17432, 17465), 'os.system', 'os.system', (['"""termux-setup-storage"""'], {}), "('termux-setup-storage')\n", (17441, 17465), False, 'import os\n'), ((17467, 17477), 'sys.exit', 'sys.exit', ([], {}), '()\n', (17475, 17477), False, 'import sys\n'), ((1155, 1165), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1163, 1165), False, 'import sys\n'), ((1185, 1212), 'os.system', 'os.system', (['"""apt upgrade -y"""'], {}), "('apt upgrade -y')\n", (1194, 1212), False, 'import os\n'), ((1535, 1562), 'os.system', 'os.system', (['"""apt upgrade -y"""'], {}), "('apt upgrade -y')\n", (1544, 1562), False, 'import os\n'), ((1928, 1955), 'os.system', 'os.system', (['"""apt upgrade -y"""'], {}), "('apt upgrade -y')\n", (1937, 1955), False, 'import os\n'), ((2325, 2352), 'os.system', 'os.system', (['"""apt upgrade -y"""'], {}), "('apt upgrade -y')\n", (2334, 2352), False, 'import os\n'), ((2698, 2725), 'os.system', 'os.system', (['"""apt upgrade -y"""'], {}), "('apt upgrade -y')\n", (2707, 2725), False, 'import os\n'), ((3079, 3106), 'os.system', 'os.system', (['"""apt upgrade -y"""'], {}), "('apt upgrade -y')\n", (3088, 3106), False, 'import os\n'), ((3464, 3491), 'os.system', 'os.system', (['"""apt upgrade -y"""'], {}), "('apt upgrade -y')\n", (3473, 3491), False, 'import os\n'), ((3849, 3876), 'os.system', 'os.system', (['"""apt upgrade -y"""'], {}), "('apt upgrade -y')\n", (3858, 3876), False, 'import os\n'), ((4234, 4261), 'os.system', 'os.system', (['"""apt upgrade -y"""'], {}), "('apt upgrade -y')\n", (4243, 4261), False, 'import os\n'), ((4632, 4659), 'os.system', 'os.system', (['"""apt upgrade -y"""'], {}), "('apt upgrade -y')\n", (4641, 4659), False, 'import os\n'), ((5030, 5057), 'os.system', 'os.system', (['"""apt upgrade -y"""'], {}), "('apt upgrade -y')\n", (5039, 5057), False, 'import os\n'), ((5416, 5443), 'os.system', 'os.system', (['"""apt upgrade -y"""'], {}), "('apt upgrade -y')\n", (5425, 5443), False, 'import os\n'), ((5806, 5833), 'os.system', 'os.system', (['"""apt upgrade -y"""'], {}), "('apt upgrade -y')\n", (5815, 5833), False, 'import os\n'), ((6192, 6219), 'os.system', 'os.system', (['"""apt upgrade -y"""'], {}), "('apt upgrade -y')\n", (6201, 6219), False, 'import os\n'), ((6574, 6601), 'os.system', 'os.system', (['"""apt upgrade -y"""'], {}), "('apt upgrade -y')\n", (6583, 6601), False, 'import os\n'), ((6960, 6987), 'os.system', 'os.system', (['"""apt upgrade -y"""'], {}), "('apt upgrade -y')\n", (6969, 6987), False, 'import os\n'), ((7362, 7389), 'os.system', 'os.system', (['"""apt upgrade -y"""'], {}), "('apt upgrade -y')\n", (7371, 7389), False, 'import os\n'), ((7768, 7795), 'os.system', 'os.system', (['"""apt upgrade -y"""'], {}), "('apt upgrade -y')\n", (7777, 7795), False, 'import os\n'), ((8154, 8181), 'os.system', 'os.system', (['"""apt upgrade -y"""'], {}), "('apt upgrade -y')\n", (8163, 8181), False, 'import os\n'), ((8536, 8563), 'os.system', 'os.system', (['"""apt upgrade -y"""'], {}), "('apt upgrade -y')\n", (8545, 8563), False, 'import os\n'), ((8930, 8957), 'os.system', 'os.system', (['"""apt upgrade -y"""'], {}), "('apt upgrade -y')\n", (8939, 8957), False, 'import os\n'), ((9324, 9351), 'os.system', 'os.system', (['"""apt upgrade -y"""'], {}), "('apt upgrade -y')\n", (9333, 9351), False, 'import os\n'), ((9714, 9741), 'os.system', 'os.system', (['"""apt upgrade -y"""'], {}), "('apt upgrade -y')\n", (9723, 9741), False, 'import os\n'), ((10096, 10123), 'os.system', 'os.system', (['"""apt upgrade -y"""'], {}), "('apt upgrade -y')\n", (10105, 10123), False, 'import os\n'), ((10486, 10513), 'os.system', 'os.system', (['"""apt upgrade -y"""'], {}), "('apt upgrade -y')\n", (10495, 10513), False, 'import os\n'), ((10872, 10899), 'os.system', 'os.system', (['"""apt upgrade -y"""'], {}), "('apt upgrade -y')\n", (10881, 10899), False, 'import os\n'), ((11262, 11289), 'os.system', 'os.system', (['"""apt upgrade -y"""'], {}), "('apt upgrade -y')\n", (11271, 11289), False, 'import os\n'), ((11652, 11679), 'os.system', 'os.system', (['"""apt upgrade -y"""'], {}), "('apt upgrade -y')\n", (11661, 11679), False, 'import os\n'), ((12046, 12073), 'os.system', 'os.system', (['"""apt upgrade -y"""'], {}), "('apt upgrade -y')\n", (12055, 12073), False, 'import os\n'), ((12426, 12453), 'os.system', 'os.system', (['"""apt upgrade -y"""'], {}), "('apt upgrade -y')\n", (12435, 12453), False, 'import os\n'), ((12706, 12733), 'os.system', 'os.system', (['"""apt upgrade -y"""'], {}), "('apt upgrade -y')\n", (12715, 12733), False, 'import os\n'), ((13088, 13115), 'os.system', 'os.system', (['"""apt upgrade -y"""'], {}), "('apt upgrade -y')\n", (13097, 13115), False, 'import os\n'), ((168, 187), 'sys.stdout.write', 'sys.stdout.write', (['c'], {}), '(c)\n', (184, 187), False, 'import sys\n'), ((196, 214), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (212, 214), False, 'import sys\n')]
import pygame import random pygame.init() pygame.font.init() class Card(object): """ The Card Class """ def __init__(self, left, top, width, height, back_color, front_color, solved_color, display, font_color, text_font, value=None): self._rect = pygame.Rect(left, top, width, height) self._display = display self._back_color = back_color # color of card when face down self._front_color = front_color # color of card when face up self._solved_color = solved_color # color of card after it is matched self._font_color = font_color self._text_font = text_font self._value = value # the number we are trying to match self._unsolved = True # is set to false once matched self._hidden = True # card is face down to start self._times_seen = 0 # number of times player viewed card @property def value(self): return self._value @value.setter def value(self, value): self._value = value @property def times_seen(self): return self._times_seen def solved(self): self._unsolved = False pygame.draw.rect(self._display, self._solved_color, self._rect) def is_unsolved(self): return self._unsolved def is_clicked(self, pos): x_pos, y_pos = pos return self._rect.collidepoint(x_pos, y_pos) # did player click on this card? def is_hidden(self): return self._hidden def show_card(self): self._hidden = False self._times_seen += 1 pygame.draw.rect(self._display, self._front_color, self._rect) text_surface = self._text_font.render(self._value, True, self._font_color) self._display.blit(text_surface, (self._rect.center[0] - (text_surface.get_width() / 2), self._rect.center[1] - (text_surface.get_height() / 2))) def hide_card(self): self._hidden = True pygame.draw.rect(self._display, self._back_color, self._rect) def get_matching_card(card_list, card_to_match): """ This function returns the card that matches the one passed in """ the_matching_card = None for test_card in card_list: if test_card.value == card_to_match.value and test_card != card_to_match: the_matching_card = test_card break return the_matching_card def cards_remaining(card_list): """ this function returns the number of cards that have not been matched yet """ num_remaining = 0 for c in card_list: if c.is_unsolved(): num_remaining += 1 return num_remaining if __name__ == "__main__": display_width = 600 display_height = 600 card_font = pygame.font.SysFont('Comic Sans MS', 48) front_col = pygame.Color('white') solved_col = pygame.Color('#636363') back_col = pygame.Color('#293a32') font_col = pygame.Color('black') score_font = pygame.font.SysFont('Comic Sans MS', 24) score_txt_col = pygame.Color('#d4c38f') score_y_margin = 50 score_x_margin = 20 player_closed_app = False new_game = False cards = [] game_display = pygame.display.set_mode((display_width, display_height)) pygame.display.set_caption('Matching Game') game_display.fill(pygame.Color('#b5c9a6')) score_rect = pygame.draw.rect(game_display, pygame.Color('black'), pygame.Rect(0, 0, display_width, score_y_margin)) surf_8x8_txt = score_font.render("8 x 8", True, score_txt_col) left_pos = (game_display.get_width() - score_x_margin - surf_8x8_txt.get_width()) surf_8x8_rect = game_display.blit(surf_8x8_txt, (left_pos, (score_y_margin - surf_8x8_txt.get_height()) / 2)) surf_6x6_txt = score_font.render("6 x 6", True, score_txt_col) left_pos = left_pos - surf_6x6_txt.get_width() - score_x_margin surf_6x6_rect = game_display.blit(surf_6x6_txt, (left_pos, (score_y_margin - surf_6x6_txt.get_height()) / 2)) surf_4x4_txt = score_font.render("4 x 4", True, score_txt_col) left_pos = left_pos - surf_4x4_txt.get_width() - score_x_margin surf_4x4_rect = game_display.blit(surf_4x4_txt, (left_pos, (score_y_margin - surf_4x4_txt.get_height()) / 2)) surf_sel_txt = score_font.render("Select Game:", True, score_txt_col) left_pos = left_pos - surf_sel_txt.get_width() - score_x_margin game_display.blit(surf_sel_txt, (left_pos, (score_y_margin - surf_sel_txt.get_height()) / 2)) num_cols = 0 num_rows = 0 pick_1 = None # variable to hold first card selected by player score = 0 max_score = 0 # maximum score a player can get while not player_closed_app: for event in pygame.event.get(): if event.type == pygame.QUIT: player_closed_app = True if new_game: pygame.draw.rect(game_display, pygame.Color('#b5c9a6'), pygame.Rect(0, score_y_margin, display_width, display_height - score_y_margin)) total_pairs = (num_cols * num_rows) / 2 max_score = total_pairs - 1 # player gets no credit for last two cards remaining pairs = range(1, total_pairs + 1) + range(1, total_pairs + 1) # create numbered pairs # calculate the width and height of the cards and the space between them card_horz_width = int((display_width * 0.8) / num_cols) space_horz_width = int((display_width * 0.2) / (num_cols + 1)) card_vert_height = int(((display_height - score_y_margin) * 0.8) / num_rows) space_vert_height = int(((display_height - score_y_margin) * 0.2) / (num_rows + 1)) # create cards and randomly assign the numbered pairs random.random() del cards[:] for row in range(1, num_rows + 1): for col in range(1, num_cols + 1): rnd_item = random.choice(pairs) pairs.remove(rnd_item) new_card_x = ((col - 1) * card_horz_width) + (col * space_horz_width) new_card_y = ((row - 1) * card_vert_height) + (row * space_vert_height) + score_y_margin crd = Card(new_card_x, new_card_y, card_horz_width, card_vert_height, back_col, front_col, solved_col, game_display, font_col, card_font, str(rnd_item)) cards.append(crd) crd.hide_card() score = 0 new_game = False if pygame.mouse.get_pressed()[0]: if surf_4x4_rect.collidepoint(pygame.mouse.get_pos()): # start new game 4 x 4 new_game = True num_cols = 4 num_rows = 4 pygame.time.wait(200) # wait 200ms to avoid multiple new game mouse click events if surf_6x6_rect.collidepoint(pygame.mouse.get_pos()): # start new game 6 x 6 new_game = True num_cols = 6 num_rows = 6 pygame.time.wait(200) if surf_8x8_rect.collidepoint(pygame.mouse.get_pos()): # start new game 8 x 8 new_game = True num_cols = 8 num_rows = 8 pygame.time.wait(200) for crd in cards: if crd.is_clicked(pygame.mouse.get_pos()) and crd.is_hidden() and crd.is_unsolved(): crd.show_card() pygame.display.flip() if pick_1 is None: pick_1 = crd # player picked first card else: # player picked second card. if pick_1.value == crd.value: # it is a match! pick_1.solved() crd.solved() if crd.times_seen > 1 and cards_remaining(cards) > 0: score += 1 # if you have seen the matching card at least once before, you get a point elif crd.times_seen == 1 and cards_remaining(cards) > 0: max_score -= 1 # no points for luck, we just reduce the max possible score pygame.time.wait(500) # show matching values for 500ms else: # it did not match pick_1.hide_card() crd.hide_card() matching_card = get_matching_card(cards, pick_1) if matching_card.times_seen > 0: score -= 1 # player has seen the matching card before! 1 point penalty! if crd.times_seen > 1: score -= 1 # player should have known this card was not a match! 1 point penalty! pygame.time.wait(1500) # show card values for 1.5sec pick_1 = None # get ready for next pair of selections by player break # update score surf_wrong = score_font.render("Score = " + str(score) + " out of " + str(max_score), True, score_txt_col) pygame.draw.rect(game_display, pygame.Color('black'), pygame.Rect(score_x_margin, 0, surf_wrong.get_width() + 100, score_y_margin)) game_display.blit(surf_wrong, (score_x_margin, (score_y_margin - surf_wrong.get_height()) / 2)) pygame.display.flip() # player existed application pygame.quit() quit()
[ "random.choice", "pygame.mouse.get_pressed", "pygame.init", "pygame.quit", "pygame.event.get", "pygame.time.wait", "pygame.display.set_mode", "pygame.display.flip", "pygame.mouse.get_pos", "pygame.Rect", "pygame.draw.rect", "pygame.font.init", "pygame.display.set_caption", "pygame.Color", "random.random", "pygame.font.SysFont" ]
[((29, 42), 'pygame.init', 'pygame.init', ([], {}), '()\n', (40, 42), False, 'import pygame\n'), ((43, 61), 'pygame.font.init', 'pygame.font.init', ([], {}), '()\n', (59, 61), False, 'import pygame\n'), ((2788, 2828), 'pygame.font.SysFont', 'pygame.font.SysFont', (['"""Comic Sans MS"""', '(48)'], {}), "('Comic Sans MS', 48)\n", (2807, 2828), False, 'import pygame\n'), ((2845, 2866), 'pygame.Color', 'pygame.Color', (['"""white"""'], {}), "('white')\n", (2857, 2866), False, 'import pygame\n'), ((2884, 2907), 'pygame.Color', 'pygame.Color', (['"""#636363"""'], {}), "('#636363')\n", (2896, 2907), False, 'import pygame\n'), ((2923, 2946), 'pygame.Color', 'pygame.Color', (['"""#293a32"""'], {}), "('#293a32')\n", (2935, 2946), False, 'import pygame\n'), ((2962, 2983), 'pygame.Color', 'pygame.Color', (['"""black"""'], {}), "('black')\n", (2974, 2983), False, 'import pygame\n'), ((3002, 3042), 'pygame.font.SysFont', 'pygame.font.SysFont', (['"""Comic Sans MS"""', '(24)'], {}), "('Comic Sans MS', 24)\n", (3021, 3042), False, 'import pygame\n'), ((3063, 3086), 'pygame.Color', 'pygame.Color', (['"""#d4c38f"""'], {}), "('#d4c38f')\n", (3075, 3086), False, 'import pygame\n'), ((3223, 3279), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(display_width, display_height)'], {}), '((display_width, display_height))\n', (3246, 3279), False, 'import pygame\n'), ((3284, 3327), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""Matching Game"""'], {}), "('Matching Game')\n", (3310, 3327), False, 'import pygame\n'), ((9524, 9537), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (9535, 9537), False, 'import pygame\n'), ((317, 354), 'pygame.Rect', 'pygame.Rect', (['left', 'top', 'width', 'height'], {}), '(left, top, width, height)\n', (328, 354), False, 'import pygame\n'), ((1205, 1268), 'pygame.draw.rect', 'pygame.draw.rect', (['self._display', 'self._solved_color', 'self._rect'], {}), '(self._display, self._solved_color, self._rect)\n', (1221, 1268), False, 'import pygame\n'), ((1620, 1682), 'pygame.draw.rect', 'pygame.draw.rect', (['self._display', 'self._front_color', 'self._rect'], {}), '(self._display, self._front_color, self._rect)\n', (1636, 1682), False, 'import pygame\n'), ((2024, 2085), 'pygame.draw.rect', 'pygame.draw.rect', (['self._display', 'self._back_color', 'self._rect'], {}), '(self._display, self._back_color, self._rect)\n', (2040, 2085), False, 'import pygame\n'), ((3350, 3373), 'pygame.Color', 'pygame.Color', (['"""#b5c9a6"""'], {}), "('#b5c9a6')\n", (3362, 3373), False, 'import pygame\n'), ((3424, 3445), 'pygame.Color', 'pygame.Color', (['"""black"""'], {}), "('black')\n", (3436, 3445), False, 'import pygame\n'), ((3447, 3495), 'pygame.Rect', 'pygame.Rect', (['(0)', '(0)', 'display_width', 'score_y_margin'], {}), '(0, 0, display_width, score_y_margin)\n', (3458, 3495), False, 'import pygame\n'), ((4733, 4751), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (4749, 4751), False, 'import pygame\n'), ((9464, 9485), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (9483, 9485), False, 'import pygame\n'), ((5772, 5787), 'random.random', 'random.random', ([], {}), '()\n', (5785, 5787), False, 'import random\n'), ((6548, 6574), 'pygame.mouse.get_pressed', 'pygame.mouse.get_pressed', ([], {}), '()\n', (6572, 6574), False, 'import pygame\n'), ((9226, 9247), 'pygame.Color', 'pygame.Color', (['"""black"""'], {}), "('black')\n", (9238, 9247), False, 'import pygame\n'), ((4900, 4923), 'pygame.Color', 'pygame.Color', (['"""#b5c9a6"""'], {}), "('#b5c9a6')\n", (4912, 4923), False, 'import pygame\n'), ((4954, 5032), 'pygame.Rect', 'pygame.Rect', (['(0)', 'score_y_margin', 'display_width', '(display_height - score_y_margin)'], {}), '(0, score_y_margin, display_width, display_height - score_y_margin)\n', (4965, 5032), False, 'import pygame\n'), ((6621, 6643), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (6641, 6643), False, 'import pygame\n'), ((6776, 6797), 'pygame.time.wait', 'pygame.time.wait', (['(200)'], {}), '(200)\n', (6792, 6797), False, 'import pygame\n'), ((6900, 6922), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (6920, 6922), False, 'import pygame\n'), ((7055, 7076), 'pygame.time.wait', 'pygame.time.wait', (['(200)'], {}), '(200)\n', (7071, 7076), False, 'import pygame\n'), ((7119, 7141), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (7139, 7141), False, 'import pygame\n'), ((7274, 7295), 'pygame.time.wait', 'pygame.time.wait', (['(200)'], {}), '(200)\n', (7290, 7295), False, 'import pygame\n'), ((5942, 5962), 'random.choice', 'random.choice', (['pairs'], {}), '(pairs)\n', (5955, 5962), False, 'import random\n'), ((7483, 7504), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (7502, 7504), False, 'import pygame\n'), ((7360, 7382), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (7380, 7382), False, 'import pygame\n'), ((8244, 8265), 'pygame.time.wait', 'pygame.time.wait', (['(500)'], {}), '(500)\n', (8260, 8265), False, 'import pygame\n'), ((8880, 8902), 'pygame.time.wait', 'pygame.time.wait', (['(1500)'], {}), '(1500)\n', (8896, 8902), False, 'import pygame\n')]
import os class config: host = 'zhangxuanyang.zhangxuanyang.ws2.hh-c.brainpp.cn' username = 'admin' port = 5672 exp_name = os.path.dirname(os.path.abspath(__file__)) exp_name = '-'.join(i for i in exp_name.split(os.path.sep) if i); test_send_pipe = exp_name + '-test-send_pipe' test_recv_pipe = exp_name + '-test-recv_pipe' net_cache = 'model_and_data/checkpoint_epoch_50.pth.tar' initial_net_cache = 'model_and_data/checkpoint_epoch_0.pth.tar' layers = 14 edges = 14 model_input_size = (1, 3, 224, 224) # Candidate operators blocks_keys = [ 'none', 'max_pool_3x3', 'avg_pool_3x3', 'skip_connect', 'sep_conv_3x3', 'sep_conv_5x5', 'dil_conv_3x3', 'dil_conv_5x5' ] op_num = len(blocks_keys) # Operators encoding NONE = 0 MAX_POOLING_3x3 = 1 AVG_POOL_3x3 = 2 SKIP_CONNECT = 3 SEP_CONV_3x3 = 4 SEP_CONV_5x5 = 5 DIL_CONV_3x3 = 6 DIL_CONV_5x5 = 7 time_limit=None #time_limit=0.050 speed_input_shape=[32,3,224,224] flops_limit=True max_flops=600*1e6 # max_flops=None max_epochs=20 select_num = 10 population_num = 50 mutation_num = 25 m_prob = 0.1 crossover_num = 25 momentum = 0.7 eps = 1e-5 # Enumerate all paths of a single cell paths = [[0, 2, 3, 4, 5], [0, 2, 3, 5], [0, 2, 4, 5], [0, 2, 5], [0, 3, 4, 5], [0, 3, 5], [0, 4, 5], [0, 5], [1, 2, 3, 4, 5], [1, 2, 3, 5], [1, 2, 4, 5], [1, 2, 5], [1, 3, 4, 5], [1, 3, 5], [1, 4, 5], [1, 5], [0, 2, 3, 4], [0, 2, 4], [0, 3, 4], [0, 4], [1, 2, 3, 4], [1, 2, 4], [1, 3, 4], [1, 4], [0, 2, 3], [0, 3], [1, 2, 3], [1, 3], [0, 2], [1, 2]] for i in ['exp_name']: print('{}: {}'.format(i,eval('config.{}'.format(i))))
[ "os.path.abspath" ]
[((157, 182), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (172, 182), False, 'import os\n')]
#!/usr/bin/env python3.5 from time import sleep, time from datetime import datetime, timedelta from pid.decorator import pidfile #from subprocess import call from RPi import GPIO import requests import json #import config import logging import signal import sys #13: grün #16: braun #19: orange #20: grün #21: braun #26: orange SENSORS = [ { "GPIOpinIN": 26, "GPIOpinOUT": 19, "SENSORID": 4, "NAME": "Garagentor" }, { "GPIOpinIN": 20, "GPIOpinOUT": 13, "SENSORID": 2, "NAME": "Garagentür" } ] # deConz REST API settings APIKEY = "" # API key for the deConz REST API APIHOST = "" # IP address of the deConz REST API, e.g. "192.168.1.100" APISCHEME = "http" # scheme for the deConz REST API, e.g. "http" # program settings POLL_INTERVALL = 7 # duration in seconds to wait between polls logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s', filename='/var/log/deConzSensors.log') class mySensor: def ping(self): GPIO.output(self.gpio_out, 1) sumOfStates = 0 for i in range(10): # get 10 samples of the door state curState = GPIO.input(self.gpio_in) logging.debug('current state of ' + self.name + ': ' + str(curState)) sumOfStates += curState sleep(0.1) GPIO.output(self.gpio_out, 0) if sumOfStates < 5: if self.door_open == False: logging.info(self.name + ' opened') self.door_open = True setRemoteSensor(True, self.sensor_id) else: if self.door_open == True: logging.info(self.name + ' closed') setRemoteSensor(False, self.sensor_id) self.door_open = False #delta = (datetime.now() - self.open_since).seconds # delta in seconds between now and the door open state #logging.debug(self.name + ': delta: ' + str(delta) + ' – GPIO input ' + str(self.gpio_in)) #if self.door_open and (delta > (2 * POLL_INTERVALL)): # only set remote sensor when we have 2 consecutive misses # logging.warning(self.name + ' open') # setRemoteSensor(True, self.sensor_id) #self.door_open = True #def updateLocalSettings(self, channel): # logging.debug(self.name + ': Callback fired for GPIO input ' + str(channel)) # self.door_open = False # self.open_since = datetime.now() def __init__(self, sensor_config): self.door_open = True self.open_since = datetime.now() self.gpio_in = sensor_config["GPIOpinIN"] self.gpio_out = sensor_config["GPIOpinOUT"] self.sensor_id = sensor_config["SENSORID"] self.name = sensor_config["NAME"] GPIO.setup(sensor_config["GPIOpinIN"], GPIO.IN, pull_up_down=GPIO.PUD_DOWN) GPIO.setup(sensor_config["GPIOpinOUT"], GPIO.OUT, initial=GPIO.LOW) #GPIO.add_event_detect(sensor_config["GPIOpinIN"], GPIO.RISING, callback=self.updateLocalSettings, bouncetime=250) def terminate(signum, frame): logging.info("******************** Terminating ******************** ") logging.debug('Signal handler called with signal ' + str(signum)) GPIO.cleanup() logging.info("************************ Exit *********************** ") sys.exit(0) def init(): logging.info("******************** Starting up ******************** ") signal.signal(signal.SIGINT, terminate) signal.signal(signal.SIGTERM, terminate) GPIO.setmode(GPIO.BCM) mySensors = [] for sensor in SENSORS: logging.info("adding sensor '" + sensor["NAME"] + "' at GPIO pin " + str(sensor["GPIOpinIN"])) mySensors.append(mySensor(sensor)) logging.info("***************************************************** ") return mySensors def setRemoteSensor(open, sensor_id): url = APISCHEME + "://" + APIHOST + "/api/" + APIKEY + "/sensors/" + str(sensor_id) + "/state" payload = {'open': str(open).lower()} r = requests.put(url, data=json.dumps(payload)) r.raise_for_status() logging.debug('setting remote sensor ' + str(sensor_id) + ' to ' + str(open)) # creating a PID file to prevent double execution of this script @pidfile() def main(): sensors=init() # initialize everything while True: # idle loop for sensor in sensors: sensor.ping() sleep(POLL_INTERVALL / len(sensors)) # sleep for the duration given as POLL_INTERVALL if __name__ == '__main__': main()
[ "logging.basicConfig", "RPi.GPIO.cleanup", "signal.signal", "RPi.GPIO.output", "RPi.GPIO.setup", "json.dumps", "time.sleep", "datetime.datetime.now", "RPi.GPIO.input", "sys.exit", "pid.decorator.pidfile", "logging.info", "RPi.GPIO.setmode" ]
[((858, 998), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s - %(levelname)s - %(message)s"""', 'filename': '"""/var/log/deConzSensors.log"""'}), "(level=logging.INFO, format=\n '%(asctime)s - %(levelname)s - %(message)s', filename=\n '/var/log/deConzSensors.log')\n", (877, 998), False, 'import logging\n'), ((4230, 4239), 'pid.decorator.pidfile', 'pidfile', ([], {}), '()\n', (4237, 4239), False, 'from pid.decorator import pidfile\n'), ((3081, 3151), 'logging.info', 'logging.info', (['"""******************** Terminating ******************** """'], {}), "('******************** Terminating ******************** ')\n", (3093, 3151), False, 'import logging\n'), ((3226, 3240), 'RPi.GPIO.cleanup', 'GPIO.cleanup', ([], {}), '()\n', (3238, 3240), False, 'from RPi import GPIO\n'), ((3245, 3315), 'logging.info', 'logging.info', (['"""************************ Exit *********************** """'], {}), "('************************ Exit *********************** ')\n", (3257, 3315), False, 'import logging\n'), ((3320, 3331), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (3328, 3331), False, 'import sys\n'), ((3349, 3419), 'logging.info', 'logging.info', (['"""******************** Starting up ******************** """'], {}), "('******************** Starting up ******************** ')\n", (3361, 3419), False, 'import logging\n'), ((3424, 3463), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'terminate'], {}), '(signal.SIGINT, terminate)\n', (3437, 3463), False, 'import signal\n'), ((3468, 3508), 'signal.signal', 'signal.signal', (['signal.SIGTERM', 'terminate'], {}), '(signal.SIGTERM, terminate)\n', (3481, 3508), False, 'import signal\n'), ((3513, 3535), 'RPi.GPIO.setmode', 'GPIO.setmode', (['GPIO.BCM'], {}), '(GPIO.BCM)\n', (3525, 3535), False, 'from RPi import GPIO\n'), ((3732, 3802), 'logging.info', 'logging.info', (['"""***************************************************** """'], {}), "('***************************************************** ')\n", (3744, 3802), False, 'import logging\n'), ((1034, 1063), 'RPi.GPIO.output', 'GPIO.output', (['self.gpio_out', '(1)'], {}), '(self.gpio_out, 1)\n', (1045, 1063), False, 'from RPi import GPIO\n'), ((1348, 1377), 'RPi.GPIO.output', 'GPIO.output', (['self.gpio_out', '(0)'], {}), '(self.gpio_out, 0)\n', (1359, 1377), False, 'from RPi import GPIO\n'), ((2553, 2567), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2565, 2567), False, 'from datetime import datetime, timedelta\n'), ((2771, 2846), 'RPi.GPIO.setup', 'GPIO.setup', (["sensor_config['GPIOpinIN']", 'GPIO.IN'], {'pull_up_down': 'GPIO.PUD_DOWN'}), "(sensor_config['GPIOpinIN'], GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\n", (2781, 2846), False, 'from RPi import GPIO\n'), ((2855, 2922), 'RPi.GPIO.setup', 'GPIO.setup', (["sensor_config['GPIOpinOUT']", 'GPIO.OUT'], {'initial': 'GPIO.LOW'}), "(sensor_config['GPIOpinOUT'], GPIO.OUT, initial=GPIO.LOW)\n", (2865, 2922), False, 'from RPi import GPIO\n'), ((1174, 1198), 'RPi.GPIO.input', 'GPIO.input', (['self.gpio_in'], {}), '(self.gpio_in)\n', (1184, 1198), False, 'from RPi import GPIO\n'), ((1329, 1339), 'time.sleep', 'sleep', (['(0.1)'], {}), '(0.1)\n', (1334, 1339), False, 'from time import sleep, time\n'), ((4035, 4054), 'json.dumps', 'json.dumps', (['payload'], {}), '(payload)\n', (4045, 4054), False, 'import json\n'), ((1462, 1497), 'logging.info', 'logging.info', (["(self.name + ' opened')"], {}), "(self.name + ' opened')\n", (1474, 1497), False, 'import logging\n'), ((1652, 1687), 'logging.info', 'logging.info', (["(self.name + ' closed')"], {}), "(self.name + ' closed')\n", (1664, 1687), False, 'import logging\n')]
from shop.forms import UserForm from django.views import generic from django.urls import reverse_lazy from django.shortcuts import render, redirect, get_object_or_404 from django.contrib.auth import authenticate, login, logout from django.contrib.auth.models import auth from .models import Product, Contact, Category, Product, Order, OrderItem from django.contrib import messages from django.views.decorators.csrf import ensure_csrf_cookie from math import ceil import json from shop.models import User from django.views.decorators.csrf import csrf_exempt # from PayTm import checksum # Create your views here. from django.http import HttpResponse from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger MERCHANT_KEY = 'Your-Merchant-Key-Here' def index(request, category_slug=None): category = None categories = Category.objects.all() products = Product.objects.filter(available=True) if category_slug: category = get_object_or_404(Category, slug=category_slug) products = products.filter(category=category) page = request.GET.get('page') paginator = Paginator(products, 6) try: products = paginator.page(page) except PageNotAnInteger: products = paginator.page(1) except EmptyPage: products = paginator.page(1) if request.user: print(request.user) pass # wishlist = Wishlist.objects.filter(user=request.user) return render( request, 'shop/index.html', { 'category': category, 'categories': categories, 'products': products, # 'wishlist': wishlist } ) else: return render( request, 'shop/index.html', { 'category': category, 'categories': categories, 'products': products, } ) def searchMatch(query, item): '''return true only if query matches the item''' if query in item.description.lower() or query in item.name.lower(): return True else: return False def search(request): query = request.GET.get('search') allProds = [] catprods = Product.objects.values('category', 'id') cats = {item['category'] for item in catprods} for cat in cats: prodtemp = Product.objects.filter(category=cat) prod = [item for item in prodtemp if searchMatch(query, item)] n = len(prod) nSlides = n // 4 + ceil((n / 4) - (n // 4)) if len(prod) != 0: allProds.append([prod, range(1, nSlides), nSlides]) params = { 'products': allProds, "msg": "" } if len(allProds) == 0 or len(query) < 4: params = { 'msg': "Please make sure to enter relevant search query" } return render(request, 'shop/search.html', params) def about(request): return render(request, 'shop/about.html') def contact(request): thank = False if request.method == "POST": name = request.POST.get('name', '') email = request.POST.get('email', '') phone = request.POST.get('phone', '') desc = request.POST.get('desc', '') contact = Contact(name=name, email=email, phone=phone, desc=desc) contact.save() thank = True return render(request, 'shop/contact.html', {'thank': thank}) def tracker(request): if request.method == "POST": orderId = request.POST.get('orderId', '') email = request.POST.get('email', '') try: order = Order.objects.filter(order_id=orderId, email=email) if len(order) > 0: update = OrderUpdate.objects.filter(order_id=orderId) updates = [] for item in update: updates.append( { 'text': item.update_desc, 'time': item.timestamp } ) response = json.dumps( { "status": "success", "updates": updates, "itemsJson": order[0].items_json }, default=str ) return HttpResponse(response) else: return HttpResponse('{"status":"noitem"}') except Exception as e: return HttpResponse('{"status":"error"}') return render(request, 'shop/tracker.html') def productView(request, myid): # Fetch the product using the id product = Product.objects.filter(id=myid) return render(request, 'shop/prodView.html', {'product': product[0]}) def checkout(request): if request.method == "POST": items_json = request.POST.get('itemsJson', '') name = request.POST.get('name', '') amount = request.POST.get('amount', '') email = request.POST.get('email', '') address = request.POST.get('address1', '') + \ " " + request.POST.get('address2', '') city = request.POST.get('city', '') state = request.POST.get('state', '') zip_code = request.POST.get('zip_code', '') phone = request.POST.get('phone', '') order = Order( name=name, email=email, address=address, state=state, # zip_code=zip_code, # phone=phone, # amount=amount ) order.save() order_item = OrderItem( order=order, price=amount, product_id=1, ) order_item.save() thank = True # id = order.order_id return render(request, 'shop/checkout.html', {'thank':thank, 'id': order.id}) # Request paytm to transfer the amount to your account after payment by user param_dict = { 'MID': 'Your-Merchant-Id-Here', 'ORDER_ID': str(order.order_id), 'TXN_AMOUNT': str(amount), 'CUST_ID': email, 'INDUSTRY_TYPE_ID': 'Retail', 'WEBSITE': 'WEBSTAGING', 'CHANNEL_ID': 'WEB', 'CALLBACK_URL': 'http://127.0.0.1:8000/handlerequest/', } # param_dict['CHECKSUMHASH'] = checksum.generate_checksum(param_dict, MERCHANT_KEY) # return render(request, '/paytm.html', {'param_dict': param_dict}) return render(request, 'shop/checkout.html') def signup(request): if request.method == 'POST': print('psot') form = UserForm(request.POST) if form.is_valid(): user = form.save(commit=False) # commit=False tells Django that "Don't send this to database yet. # I have more things I want to do with it." # import pdb;pdb.set_trace() if form.cleaned_data['type']=='Vendor': user.is_staff = True # Set the user object here user.save() return redirect("/admin/login") else: user.is_staff = False user.save() return redirect("/login") # Now you can send it to DB else: print('in valid vin vlaidpsot') form = UserForm() print(form.errors) return render( request, 'shop/signup.html',{ 'form':form, 'errors':form.errors }) else: print('hello jasfdjlasdjfs') form = UserForm() return render( request, 'shop/signup.html',{ 'form':form }) @csrf_exempt def handlerequest(request): # paytm will send you post request here form = request.POST response_dict = {} for i in form.keys(): response_dict[i] = form[i] if i == 'CHECKSUMHASH': checksum = form[i] # verify = Checksum.verify_checksum(response_dict, MERCHANT_KEY, checksum) # if verify: # if response_dict['RESPCODE'] == '01': # print('order successful') # else: # print('order was not successful because' + response_dict['RESPMSG']) return render(request, 'shop/paymentstatus.html', {'response': response_dict}) def vendor(request): user =User.objects.get(id=request.user.id) menu = {} return render(request, 'shop/restprofile.html', {'user':user}) from django.views.generic.edit import UpdateView class UserUpdate(UpdateView): model = User fields = ['name','email','first_name','last_name'] template_name_suffix = '_update_form'
[ "django.shortcuts.render", "math.ceil", "shop.forms.UserForm", "django.http.HttpResponse", "django.shortcuts.get_object_or_404", "json.dumps", "shop.models.User.objects.get", "django.shortcuts.redirect", "django.core.paginator.Paginator" ]
[((1142, 1164), 'django.core.paginator.Paginator', 'Paginator', (['products', '(6)'], {}), '(products, 6)\n', (1151, 1164), False, 'from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n'), ((2977, 3020), 'django.shortcuts.render', 'render', (['request', '"""shop/search.html"""', 'params'], {}), "(request, 'shop/search.html', params)\n", (2983, 3020), False, 'from django.shortcuts import render, redirect, get_object_or_404\n'), ((3058, 3092), 'django.shortcuts.render', 'render', (['request', '"""shop/about.html"""'], {}), "(request, 'shop/about.html')\n", (3064, 3092), False, 'from django.shortcuts import render, redirect, get_object_or_404\n'), ((3490, 3544), 'django.shortcuts.render', 'render', (['request', '"""shop/contact.html"""', "{'thank': thank}"], {}), "(request, 'shop/contact.html', {'thank': thank})\n", (3496, 3544), False, 'from django.shortcuts import render, redirect, get_object_or_404\n'), ((4729, 4765), 'django.shortcuts.render', 'render', (['request', '"""shop/tracker.html"""'], {}), "(request, 'shop/tracker.html')\n", (4735, 4765), False, 'from django.shortcuts import render, redirect, get_object_or_404\n'), ((4900, 4962), 'django.shortcuts.render', 'render', (['request', '"""shop/prodView.html"""', "{'product': product[0]}"], {}), "(request, 'shop/prodView.html', {'product': product[0]})\n", (4906, 4962), False, 'from django.shortcuts import render, redirect, get_object_or_404\n'), ((6711, 6748), 'django.shortcuts.render', 'render', (['request', '"""shop/checkout.html"""'], {}), "(request, 'shop/checkout.html')\n", (6717, 6748), False, 'from django.shortcuts import render, redirect, get_object_or_404\n'), ((8730, 8801), 'django.shortcuts.render', 'render', (['request', '"""shop/paymentstatus.html"""', "{'response': response_dict}"], {}), "(request, 'shop/paymentstatus.html', {'response': response_dict})\n", (8736, 8801), False, 'from django.shortcuts import render, redirect, get_object_or_404\n'), ((8839, 8875), 'shop.models.User.objects.get', 'User.objects.get', ([], {'id': 'request.user.id'}), '(id=request.user.id)\n', (8855, 8875), False, 'from shop.models import User\n'), ((8915, 8971), 'django.shortcuts.render', 'render', (['request', '"""shop/restprofile.html"""', "{'user': user}"], {}), "(request, 'shop/restprofile.html', {'user': user})\n", (8921, 8971), False, 'from django.shortcuts import render, redirect, get_object_or_404\n'), ((984, 1031), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Category'], {'slug': 'category_slug'}), '(Category, slug=category_slug)\n', (1001, 1031), False, 'from django.shortcuts import render, redirect, get_object_or_404\n'), ((1499, 1609), 'django.shortcuts.render', 'render', (['request', '"""shop/index.html"""', "{'category': category, 'categories': categories, 'products': products}"], {}), "(request, 'shop/index.html', {'category': category, 'categories':\n categories, 'products': products})\n", (1505, 1609), False, 'from django.shortcuts import render, redirect, get_object_or_404\n'), ((1792, 1902), 'django.shortcuts.render', 'render', (['request', '"""shop/index.html"""', "{'category': category, 'categories': categories, 'products': products}"], {}), "(request, 'shop/index.html', {'category': category, 'categories':\n categories, 'products': products})\n", (1798, 1902), False, 'from django.shortcuts import render, redirect, get_object_or_404\n'), ((5985, 6056), 'django.shortcuts.render', 'render', (['request', '"""shop/checkout.html"""', "{'thank': thank, 'id': order.id}"], {}), "(request, 'shop/checkout.html', {'thank': thank, 'id': order.id})\n", (5991, 6056), False, 'from django.shortcuts import render, redirect, get_object_or_404\n'), ((6856, 6878), 'shop.forms.UserForm', 'UserForm', (['request.POST'], {}), '(request.POST)\n', (6864, 6878), False, 'from shop.forms import UserForm\n'), ((8022, 8032), 'shop.forms.UserForm', 'UserForm', ([], {}), '()\n', (8030, 8032), False, 'from shop.forms import UserForm\n'), ((8049, 8100), 'django.shortcuts.render', 'render', (['request', '"""shop/signup.html"""', "{'form': form}"], {}), "(request, 'shop/signup.html', {'form': form})\n", (8055, 8100), False, 'from django.shortcuts import render, redirect, get_object_or_404\n'), ((2627, 2647), 'math.ceil', 'ceil', (['(n / 4 - n // 4)'], {}), '(n / 4 - n // 4)\n', (2631, 2647), False, 'from math import ceil\n'), ((7704, 7714), 'shop.forms.UserForm', 'UserForm', ([], {}), '()\n', (7712, 7714), False, 'from shop.forms import UserForm\n'), ((7767, 7841), 'django.shortcuts.render', 'render', (['request', '"""shop/signup.html"""', "{'form': form, 'errors': form.errors}"], {}), "(request, 'shop/signup.html', {'form': form, 'errors': form.errors})\n", (7773, 7841), False, 'from django.shortcuts import render, redirect, get_object_or_404\n'), ((4526, 4548), 'django.http.HttpResponse', 'HttpResponse', (['response'], {}), '(response)\n', (4538, 4548), False, 'from django.http import HttpResponse\n'), ((4592, 4627), 'django.http.HttpResponse', 'HttpResponse', (['"""{"status":"noitem"}"""'], {}), '(\'{"status":"noitem"}\')\n', (4604, 4627), False, 'from django.http import HttpResponse\n'), ((4680, 4714), 'django.http.HttpResponse', 'HttpResponse', (['"""{"status":"error"}"""'], {}), '(\'{"status":"error"}\')\n', (4692, 4714), False, 'from django.http import HttpResponse\n'), ((7401, 7425), 'django.shortcuts.redirect', 'redirect', (['"""/admin/login"""'], {}), "('/admin/login')\n", (7409, 7425), False, 'from django.shortcuts import render, redirect, get_object_or_404\n'), ((7556, 7574), 'django.shortcuts.redirect', 'redirect', (['"""/login"""'], {}), "('/login')\n", (7564, 7574), False, 'from django.shortcuts import render, redirect, get_object_or_404\n'), ((4214, 4319), 'json.dumps', 'json.dumps', (["{'status': 'success', 'updates': updates, 'itemsJson': order[0].items_json}"], {'default': 'str'}), "({'status': 'success', 'updates': updates, 'itemsJson': order[0].\n items_json}, default=str)\n", (4224, 4319), False, 'import json\n')]
import FWCore.ParameterSet.Config as cms from SimMuon.GEMDigitizer.muonGEMDigis_cfi import * from SimMuon.GEMDigitizer.muonGEMPadDigis_cfi import * from SimMuon.GEMDigitizer.muonGEMPadDigiClusters_cfi import * muonGEMDigiTask = cms.Task(simMuonGEMDigis, simMuonGEMPadDigis, simMuonGEMPadDigiClusters) muonGEMDigi = cms.Sequence(muonGEMDigiTask)
[ "FWCore.ParameterSet.Config.Task", "FWCore.ParameterSet.Config.Sequence" ]
[((230, 302), 'FWCore.ParameterSet.Config.Task', 'cms.Task', (['simMuonGEMDigis', 'simMuonGEMPadDigis', 'simMuonGEMPadDigiClusters'], {}), '(simMuonGEMDigis, simMuonGEMPadDigis, simMuonGEMPadDigiClusters)\n', (238, 302), True, 'import FWCore.ParameterSet.Config as cms\n'), ((317, 346), 'FWCore.ParameterSet.Config.Sequence', 'cms.Sequence', (['muonGEMDigiTask'], {}), '(muonGEMDigiTask)\n', (329, 346), True, 'import FWCore.ParameterSet.Config as cms\n')]
# -*- coding: utf-8 -*- """ Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available. Copyright (C) 2017-2018 THL A29 Limited, a Tencent company. All rights reserved. Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://opensource.org/licenses/MIT Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ # noqa """A distributed rate limiter rely on redis based on `token bucket <https://en.wikipedia.org/wiki/Token_bucket>` algorithm Usage ~~~~~ .. code-block:: python # Init a redis connection pool import redis redisdb = redis.Redis() rate = RateLimiter(redisdb, identifier='ip=127.0.0.1 path=/get_user_info/') # Allow 10 requests every 1 minute # period also accepts seconds/minutes/hours/days as key rate.add_rule(tokens=10, period={'minute': 1}) # You could add multiple rules for on limiter # rate.add_rule(tokens=200, period={'hour': 1}) print rate.acquire() # returns {'allowed': True, 'remaining_tokens': 9.0} """ import time import logging from redis import WatchError logger = logging.getLogger('root') class BaseRateLimiter(object): def __init__(self, redisdb, identifier, namespace='', tokens=None, period=None): """Init a RateLimiter class :param redisdb: a `redis.Redis` instance :param str identifier: identifier for the limiter, such as an user_id etc. :param str namespace: namespace for redis keys :param int tokens: maxium tokens for one time period :param dict period: dict, time period, such as {'minutes': 10} """ self.redisdb = redisdb self.identifier = identifier self.namespace = namespace self.rules = [] # Add rule if tokens is not None and period: self.add_rule(tokens, period) self.prepare() def prepare(self): """Prepare to work """ pass def add_rule(self, tokens, period): """Add multiple rules for this limiter, see `__init__` for parameter details """ rule = Rule(tokens, Rule.period_to_seonds(period)) self.rules.append(rule) def acquire(self, tokens=1): """Acquire for a single request :param int tokens: tokens to consume for this request, default to 1 """ if not self.rules: return {'allowed': True, 'remaining_tokens': 0} logger.debug('Start acquiring tokens by given rules, this operation may have several ' 'communications with redis.') rets = [] for rule in self.rules: logger.debug('Acquiring by single rule, rule=%s tokens=%s', rule, tokens) ret = self.acquire_by_single_rule(rule, tokens) logger.debug('Acquiring finished, result=%s', ret) if not ret['allowed']: logger.debug('Acquiring denied by given rule, rule=%s.', rule) return ret rets.append(ret) logger.debug('Acquiring successed.') return { 'allowed': True, 'remaining_tokens': min(x['remaining_tokens'] for x in rets) } class RateLimiter(BaseRateLimiter): """Rate limiter class """ def acquire_by_single_rule(self, rule, tokens=1): """Acquire an request quota from limiter :param rule: `Rule` object :param int tokens: tokens to be consumed, default 1 :returns: a dict of `allowed` and `remaining_tokens` - allowed: wheather this request is allowed - remaining_tokens: remaining_tokens for this rule's period """ rk_tokens = 'rlim::%s::tokens::%s::r%s' % (self.namespace, self.identifier, rule.to_string()) rk_last_ts = 'rlim::%s::last_ts::%s::r%s' % (self.namespace, self.identifier, rule.to_string()) rule_ttl_seconds = rule.period_seconds + 10 try: rv_last_ts = float(self.redisdb.get(rk_last_ts)) rv_tokens = float(self.redisdb.get(rk_tokens)) except Exception: # Inintilize values if not exists rv_last_ts = time.time() rv_tokens = rule.tokens self.redisdb.set(rk_tokens, rv_tokens, ex=rule_ttl_seconds) self.redisdb.set(rk_last_ts, '%.3f' % rv_last_ts, ex=rule_ttl_seconds) # Add fresh tokens since last timestamp with self.redisdb.pipeline() as pipe: pipe.watch(rk_last_ts) # Float precision may cause this value negative # Add token by passed time senconds_passed = max(time.time() - rv_last_ts, 0) fresh_tokens = rule.fresh_tokens_by_seconds(senconds_passed) remaining_tokens = rv_tokens # Only add fresh token when it's greater than 1 # Passed time maybe less than 1, fresh_token more than 1 if fresh_tokens >= 1 and remaining_tokens < rule.tokens: # Never add let tokens more than rule.tokens fresh_tokens = min(fresh_tokens, rule.tokens - remaining_tokens) pipe.multi() pipe.incrbyfloat(rk_tokens, fresh_tokens) pipe.expire(rk_tokens, rule_ttl_seconds) pipe.set(rk_last_ts, '%.3f' % time.time(), ex=rule_ttl_seconds) # Ignore WatchError try: pipe.execute() except WatchError: pass # Remove tokens, if tokens to consume are bigger than remaining tokens, do nothing # and return Flase remaining_tokens = self.redisdb.incrbyfloat(rk_tokens, -tokens) over_limit = False if remaining_tokens < 0: remaining_tokens = self.redisdb.incrbyfloat(rk_tokens, tokens) over_limit = True return { 'allowed': not over_limit, 'remaining_tokens': max(remaining_tokens, 0) } class SimpleLimiter(BaseRateLimiter): def prepare(self): self.simple_incr = self.redisdb.register_script('''\ local current current = redis.call("incr", KEYS[1]) if tonumber(current) == 1 then redis.call("expire", KEYS[1], ARGV[1]) end return current''') def acquire_by_single_rule(self, rule, tokens=1): """Acquire an request quota from limiter :param rule: `Rule` object :param int tokens: tokens to be consumed, default 1 :returns: a dict of `allowed` and `remaining_tokens` - allowed: wheather this request is allowed - remaining_tokens: remaining_tokens for this rule's period """ # TODO: Should we use ( current timestamp / period_seconds ) as part of the redis key? rk_counter = 'rlim::%s::scounter::%s::r%s' % (self.namespace, self.identifier, rule.to_string()) old_cnt = self.redisdb.get(rk_counter) if old_cnt is not None and int(old_cnt) >= rule.tokens: return { 'allowed': False, 'remaining_tokens': 0.0 } new_cnt = self.simple_incr(keys=[rk_counter], args=[rule.period_seconds]) return { 'allowed': True, 'remaining_tokens': max(0, rule.tokens - new_cnt) } class Rule(object): """Rule class for RateLimiter""" time_unit_to_seconds = { 'second': 1, 'minute': 60, 'hour': 3600, 'day': 3600 * 24, } @classmethod def period_to_seonds(cls, period): for unit, seconds in cls.time_unit_to_seconds.items(): if unit in period: period_seconds = period[unit] * seconds break else: raise ValueError(('Invalid period %s given, should be ' '{"second/minute/hour/day": NUMBER}') % period) return period_seconds def __init__(self, tokens, period_seconds): self.tokens = tokens # Precision of seconds only to second self.period_seconds = int(period_seconds) if tokens < 0: logger.warn('Will not allow any acquire because given tokens < 0') def to_string(self): return "%s_%s" % (self.tokens, self.period_seconds) def fresh_tokens_by_seconds(self, seconds): return int(self.rate_per_seconds * seconds) @property def rate_per_seconds(self): return self.tokens / float(self.period_seconds) def __repr__(self): return '<Rule %s>' % self.to_string()
[ "logging.getLogger", "time.time" ]
[((1469, 1494), 'logging.getLogger', 'logging.getLogger', (['"""root"""'], {}), "('root')\n", (1486, 1494), False, 'import logging\n'), ((4501, 4512), 'time.time', 'time.time', ([], {}), '()\n', (4510, 4512), False, 'import time\n'), ((4968, 4979), 'time.time', 'time.time', ([], {}), '()\n', (4977, 4979), False, 'import time\n'), ((5643, 5654), 'time.time', 'time.time', ([], {}), '()\n', (5652, 5654), False, 'import time\n')]
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright 2019 <NAME> # MIT License (https://opensource.org/licenses/MIT) import logging import numpy as np import torch from parallel_wavegan.layers import Conv1d from parallel_wavegan.layers import Conv1d1x1 from parallel_wavegan.layers import Conv2d from parallel_wavegan.layers import ConvInUpsampleNetwork from parallel_wavegan.layers import UpsampleNetwork logging.basicConfig( level=logging.DEBUG, format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s") def test_conv_initialization(): conv = Conv1d(10, 10, 3, bias=True) np.testing.assert_array_equal(conv.bias.data.numpy(), np.zeros_like(conv.bias.data.numpy())) conv1x1 = Conv1d1x1(10, 10, bias=True) np.testing.assert_array_equal(conv1x1.bias.data.numpy(), np.zeros_like(conv1x1.bias.data.numpy())) kernel_size = (10, 10) conv2d = Conv2d(10, 10, kernel_size, bias=True) np.testing.assert_array_equal(conv2d.weight.data.numpy(), np.ones_like(conv2d.weight.data.numpy()) / np.prod(kernel_size)) np.testing.assert_array_equal(conv2d.bias.data.numpy(), np.zeros_like(conv2d.bias.data.numpy())) kernel_size = (1, 10) conv2d = Conv2d(10, 10, kernel_size, bias=True) np.testing.assert_array_equal(conv2d.weight.data.numpy(), np.ones_like(conv2d.weight.data.numpy()) / np.prod(kernel_size)) np.testing.assert_array_equal(conv2d.bias.data.numpy(), np.zeros_like(conv2d.bias.data.numpy())) def test_upsample(): length = 10 scales = [4, 4] x = torch.randn(1, 10, length) upsample = UpsampleNetwork(scales) y = upsample(x) assert x.size(-1) * np.prod(scales) == y.size(-1) for aux_context_window in [0, 1, 2, 3]: conv_upsample = ConvInUpsampleNetwork(scales, aux_channels=x.size(1), aux_context_window=aux_context_window) y = conv_upsample(x) assert (x.size(-1) - 2 * aux_context_window) * np.prod(scales) == y.size(-1)
[ "logging.basicConfig", "parallel_wavegan.layers.UpsampleNetwork", "numpy.prod", "parallel_wavegan.layers.Conv1d1x1", "parallel_wavegan.layers.Conv1d", "parallel_wavegan.layers.Conv2d", "torch.randn" ]
[((418, 536), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG', 'format': '"""%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s"""'}), "(level=logging.DEBUG, format=\n '%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s')\n", (437, 536), False, 'import logging\n'), ((582, 610), 'parallel_wavegan.layers.Conv1d', 'Conv1d', (['(10)', '(10)', '(3)'], {'bias': '(True)'}), '(10, 10, 3, bias=True)\n', (588, 610), False, 'from parallel_wavegan.layers import Conv1d\n'), ((756, 784), 'parallel_wavegan.layers.Conv1d1x1', 'Conv1d1x1', (['(10)', '(10)'], {'bias': '(True)'}), '(10, 10, bias=True)\n', (765, 784), False, 'from parallel_wavegan.layers import Conv1d1x1\n'), ((962, 1000), 'parallel_wavegan.layers.Conv2d', 'Conv2d', (['(10)', '(10)', 'kernel_size'], {'bias': '(True)'}), '(10, 10, kernel_size, bias=True)\n', (968, 1000), False, 'from parallel_wavegan.layers import Conv2d\n'), ((1336, 1374), 'parallel_wavegan.layers.Conv2d', 'Conv2d', (['(10)', '(10)', 'kernel_size'], {'bias': '(True)'}), '(10, 10, kernel_size, bias=True)\n', (1342, 1374), False, 'from parallel_wavegan.layers import Conv2d\n'), ((1737, 1763), 'torch.randn', 'torch.randn', (['(1)', '(10)', 'length'], {}), '(1, 10, length)\n', (1748, 1763), False, 'import torch\n'), ((1779, 1802), 'parallel_wavegan.layers.UpsampleNetwork', 'UpsampleNetwork', (['scales'], {}), '(scales)\n', (1794, 1802), False, 'from parallel_wavegan.layers import UpsampleNetwork\n'), ((1140, 1160), 'numpy.prod', 'np.prod', (['kernel_size'], {}), '(kernel_size)\n', (1147, 1160), True, 'import numpy as np\n'), ((1514, 1534), 'numpy.prod', 'np.prod', (['kernel_size'], {}), '(kernel_size)\n', (1521, 1534), True, 'import numpy as np\n'), ((1847, 1862), 'numpy.prod', 'np.prod', (['scales'], {}), '(scales)\n', (1854, 1862), True, 'import numpy as np\n'), ((2215, 2230), 'numpy.prod', 'np.prod', (['scales'], {}), '(scales)\n', (2222, 2230), True, 'import numpy as np\n')]
# Copyright 2020 The Tilt Brush Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Helpers for 3d printing.""" import os import re import sys import math import pprint import shutil import itertools import subprocess from collections import Counter import numpy try: from tiltbrush.tilt import Tilt except ImportError: print("You need the Tilt Brush Toolkit (https://github.com/googlevr/tilt-brush-toolkit)") print("and then put its Python directory in your PYTHONPATH.") sys.exit(1) from tbdata.brush_lookup import BrushLookup # Convert strokes for 3d printing. # True Don't touch these strokes # False Remove these strokes from the sketch # <name> Replace the brush for these strokes # names can also be guids, which is useful when the name is ambiguous BRUSH_REPLACEMENTS = [ # Good brushes ('SquarePaper', True), ('ThickGeometry', True), ('Wire', True), # Brushes that should be replaced ('TaperedMarker', 'ThickGeometry'), ('OilPaint', 'ThickGeometry'), ('Ink', 'ThickGeometry'), ('Marker', 'ThickGeometry'), ('Paper', 'ThickGeometry'), ('FlatDeprecated','ThickGeometry'), # Questionable ('Highlighter', 'ThickGeometry'), ('Light', 'Wire'), # Remove particles ('Smoke', None), ('Snow', None), ('Embers', None), ('Stars', None), # Remove animated ('Fire', None), # Remove shader-based ('Plasma', None), ('Rainbow', None), ('Streamers', None), ] # ---------------------------------------------------------------------- # Little utilities # ---------------------------------------------------------------------- def msg(text): sys.stdout.write("%-79s\r" % text[:79]) sys.stdout.flush() def msgln(text): sys.stdout.write("%-79s\n" % text[:79]) sys.stdout.flush() def rgb8_to_hsl(rgb): """Takes a rgb8 tuple, returns a hsl tuple.""" HUE_MAX = 6 r = rgb[0] / 255.0 g = rgb[1] / 255.0 b = rgb[2] / 255.0 cmin = min(r, g, b) cmax = max(r, g, b) delta = cmax - cmin h = 0 s = 0 l = (cmax + cmin) if delta != 0: if l < 0.5: s = delta / l else: s = delta / (2 - l) if r == cmax: h = (g - b) / delta elif g == cmax: h = 2 + (b - r) / delta elif b == cmax: h = 4 + (r - g) / delta return h, s, l # ---------------------------------------------------------------------- # Brush conversion # ---------------------------------------------------------------------- def get_replacements_by_guid(replacements_by_name): """Returns a lookup table that is by-guid rather than by-name.""" brush_lookup = BrushLookup.get() def guid_or_name_to_guid(guid_or_name): if guid_or_name in brush_lookup.guid_to_name: return guid_or_name elif guid_or_name in brush_lookup.name_to_guids: return brush_lookup.get_unique_guid(guid_or_name) else: raise LookupError("Not a known brush or brush guid: %r" % guid_or_name) dct = {} for before, after in replacements_by_name: before_guid = guid_or_name_to_guid(before) if after is True: after_guid = before_guid elif after is None: after_guid = None else: after_guid = guid_or_name_to_guid(after) dct[before_guid] = after_guid return dct def convert_brushes(tilt, replacements_by_name, show_removed=False): """Convert brushes to 3d-printable versions, or remove their strokes from the tilt.""" replacements = get_replacements_by_guid(replacements_by_name) brush_lookup = BrushLookup.get() with tilt.mutable_metadata() as dct: index_to_guid = dct['BrushIndex'] # First, show us what brushes the tilt file uses used_guids = Counter() for stroke in tilt.sketch.strokes: guid = index_to_guid[stroke.brush_idx] used_guids[guid] += 1 print("Brushes used:") for guid, n in sorted(list(used_guids.items()), key=lambda p:-p[1]): print(" %5d %s" % (n, brush_lookup.guid_to_name.get(guid))) sys.stdout.flush() del used_guids index_to_new_index = {} for i, guid in enumerate(index_to_guid): name = brush_lookup.guid_to_name.get(guid, guid) try: new_guid = replacements[guid] except KeyError: print("%d: Don't know what to do with brush %s" % (i, name)) index_to_new_index[i] = i else: new_name = brush_lookup.guid_to_name.get(new_guid, new_guid) if new_guid is None: print("%d: Remove %s" % (i, name)) index_to_new_index[i] = None else: if guid == new_guid: print("%d: Keep %s" % (i, name)) elif name == new_name: print("%d: Replace %s/%s -> %s/%s" % (i, name, guid, new_name, new_guid)) else: print("%d: Replace %s -> %s" % (i, name, new_name)) try: new_idx = index_to_guid.index(new_guid) except ValueError: new_idx = len(index_to_guid) index_to_guid.append(new_guid) index_to_new_index[i] = new_idx brush_indices_to_remove = set(i for (i, new_i) in list(index_to_new_index.items()) if new_i is None) if brush_indices_to_remove: old_len = len(tilt.sketch.strokes) if show_removed: # Render in magenta instead of removing for stroke in tilt.sketch.strokes: if stroke.brush_idx in brush_indices_to_remove: stroke.brush_color = (1, 0, 1, 1) else: stroke.brush_color = stroke.brush_color else: tilt.sketch.strokes[:] = [s for s in tilt.sketch.strokes if s.brush_idx not in brush_indices_to_remove] new_len = len(tilt.sketch.strokes) print("Strokes %d -> %d" % (old_len, new_len)) for stroke in tilt.sketch.strokes: new_idx = index_to_new_index[stroke.brush_idx] # Might be none if it's a removed brush if new_idx is not None: stroke.brush_idx = new_idx # ---------------------------------------------------------------------- # Stroke simplification # ---------------------------------------------------------------------- def calculate_pos_error(cp0, cp1, middle_cps): if len(middle_cps) == 0: return 0 strip_length = cp1._dist - cp0._dist if strip_length <= 0: return 0 max_pos_error = 0 for i, cp in enumerate(middle_cps): t = (cp._dist - cp0._dist) / strip_length pos_interpolated = t * cp0._pos + (1-t) * cp1._pos pos_error = numpy.linalg.norm((pos_interpolated - cp._pos)) if pos_error > max_pos_error: max_pos_error = pos_error return max_pos_error def simplify_stroke(stroke, max_error): # Do greedy optimization of stroke. REQUIRED_END_CPS = 1 # or 2 keep_cps = [] toss_cps = [] # The current set of candidates to toss n = len(stroke.controlpoints) brush_size = stroke.brush_size for i, cp in enumerate(stroke.controlpoints): cp._pos = numpy.array(cp.position) if i == 0: cp._dist = 0 else: prev_cp = stroke.controlpoints[i-1] cp._dist = prev_cp._dist + numpy.linalg.norm(prev_cp._pos - cp._pos) if REQUIRED_END_CPS <= i < n - REQUIRED_END_CPS: pos_error = calculate_pos_error(keep_cps[-1], cp, toss_cps) keep = (pos_error > max_error * stroke.brush_size) #print " %3d: %s %f %f" % (i, keep, pos_error, stroke.brush_size * .2) else: keep = True #print " %3d: True (End)" % i if keep: keep_cps.append(cp) toss_cps = [] else: toss_cps.append(cp) stroke.controlpoints[:] = keep_cps def reduce_control_points(tilt, max_error): # If debug_simplify, the resulting .tilt file shows both the old and the new before_cp = 0 after_cp = 0 msg("Simplify strokes") pct = 0 n = len(tilt.sketch.strokes) for i, stroke in enumerate(tilt.sketch.strokes): new_pct = (i+1) * 100 / n if new_pct != pct: pct = new_pct removed_pct = (before_cp - after_cp) * 100 / (before_cp+1) msg("Simplify strokes: %3d%% %5d/%5d Removed %3d%%" % (pct, i, n, removed_pct)) before_cp += len(stroke.controlpoints) simplify_stroke(stroke, max_error) after_cp += len(stroke.controlpoints) msg("Simplify strokes: done") msgln("Control points: %5d -> %5d (%2d%%)" % ( before_cp, after_cp, after_cp * 100 / before_cp)) # ---------------------------------------------------------------------- # Stray strokes # ---------------------------------------------------------------------- def remove_stray_strokes(tilt, max_dist=0, replacement_brush_guid=None): """Show histograms of control point positions, to help with resizing.""" import numpy as np from math import sqrt def iter_pos(tilt): first_cp = 0 for stroke in tilt.sketch.strokes: stroke._first_cp = first_cp first_cp += len(stroke.controlpoints) for cp in stroke.controlpoints: yield cp.position positions = np.array(list(iter_pos(tilt))) if False: # Print out x/y/z histograms histograms = [np.histogram(positions[... , i], bins=30) for i in range(3)] for irow in range(len(histograms[0][0])+1): for axis, histogram in enumerate(histograms): try: print("%s %3d %6d " % ('xyz'[axis], histogram[1][irow], histogram[0][irow]), end=' ') except IndexError: print("%s %3d %6s " % ('xyz'[axis], histogram[1][irow], ''), end=' ') print() if max_dist > 0: # Convert replacement guid -> replacement index if replacement_brush_guid is None: replacement_brush_index = None else: with tilt.mutable_metadata() as dct: try: replacement_brush_index = dct['BrushIndex'].index(replacement_brush_guid) except ValueError: dct['BrushIndex'].append(replacement_brush_guid) replacement_brush_index = dct['BrushIndex'].index(replacement_brush_guid) # Compute Mahalanobis distance and remove strokes that fall outside # https://en.wikipedia.org/wiki/Mahalanobis_distance mean = np.mean(positions, axis=0) cov = np.cov(positions, rowvar=False) invcov = np.linalg.inv(cov) def mahalanobis_distance(v): """Return distance of row vector""" cv = (v - mean)[np.newaxis] return sqrt(cv.dot(invcov).dot(cv.T)[0, 0]) def out_of_bounds(stroke): i0 = stroke._first_cp i1 = i0 + len(stroke.controlpoints) dists = np.array(list(map(mahalanobis_distance, positions[i0 : i1]))) return np.any(dists > max_dist) msg("Finding OOB strokes") # TODO: figure out how to use np.einsum() and remove all the python-level loops oob_strokes = [ pair for pair in enumerate(tilt.sketch.strokes) if out_of_bounds(pair[1]) ] msg("") if len(oob_strokes): if replacement_brush_index is not None: for i, stroke in oob_strokes: print("Replacing out-of-bounds stroke", i) stroke.brush_idx = replacement_brush_index stroke.brush_color = (1,0,1,1) else: print("Removing %d strokes" % len(oob_strokes)) remove_indices = set(pair[0] for pair in oob_strokes) tilt.sketch.strokes[:] = [ stroke for i, stroke in enumerate(tilt.sketch.stroke) if i not in remove_indices ] # ---------------------------------------------------------------------- # Color reduction # ---------------------------------------------------------------------- def get_most_similar_factors(n): """Factorize n into two numbers. Returns the best pair, in the sense that the numbers are the closest to each other.""" i = int(n**0.5 + 0.5) while n % i != 0: i -= 1 return i, n/i def get_good_factors(n, max_aspect_ratio=None): """Factorize n into two integers that are closest to each other. If max_aspect_ratio is passed, search numbers >= n until a pair is found whose aspect ratio is <= max_aspect_ratio.""" if max_aspect_ratio is None: return get_most_similar_factors(n) for i in itertools.count(): a, b = get_most_similar_factors(n + i) if float(b)/a <= max_aspect_ratio: return a, b def rgbaf_to_rgb8(rgbaf): """Convert [r, g, b, a] floats to (r, g, b) bytes.""" return tuple(int(channel * 255) for channel in rgbaf[0:3]) def rgb8_to_rgbaf(rgb8): """Convert (r, g, b) bytes to [r, g, b, a] floats.""" lst = [channel / 255.0 for channel in rgb8] lst.append(1.0) return lst def tilt_colors_to_image(tilt, max_aspect_ratio=None, preserve_colors=()): """Returns a PIL.Image containing the colors used in the tilt. The image will have colors in roughly the same proportion as the control points in the tilt. preserve_colors is a list of rgb8 colors.""" import numpy as np from PIL import Image assert max_aspect_ratio is None or max_aspect_ratio > 0 preserve_colors = set(preserve_colors) def iter_rgb8_colors(tilt): for stroke in tilt.sketch.strokes: yield (rgbaf_to_rgb8(stroke.brush_color), len(stroke.controlpoints)) def by_decreasing_usage(counter_pair): # Sort function for colors return -counter_pair[1] def by_color_similarity(counter_pair): # Sort function for colors rgb8, usage = counter_pair h, s, l = rgb8_to_hsl(rgb8) return (rgb8 in preserve_colors), l counter = Counter() for color, n in iter_rgb8_colors(tilt): counter[color] += n most_used_color, amt = max(iter(counter.items()), key=lambda pair: pair[1]) for rgb8 in preserve_colors: if rgb8 not in counter: print("Ignoring: #%02x%02x%02x is not in the image" % rgb8) else: counter[rgb8] += amt / 2 # Find a "nice" width and height, possibly adjusting the number of texels num_texels = sum(counter.values()) width, height = get_good_factors(num_texels, max_aspect_ratio) if width * height != num_texels: counter[most_used_color] += width * height - num_texels assert counter[most_used_color] > 0 num_texels = sum(counter.values()) assert width * height == num_texels # Expand the colors into a 1d array, then turn into an Image colors_array = np.zeros(shape=(num_texels, 3), dtype='uint8') i = 0 # The sort used here only matters to humans when they look at the images colors_and_counts = sorted(iter(counter.items()), key=by_color_similarity) # colors_and_counts = sorted(counter.iteritems(), key=by_decreasing_usage) for (color, count) in colors_and_counts: colors_array[i:i+count] = color i += count colors_array.shape = (height, width, 3) return Image.fromarray(colors_array, mode='RGB') def get_quantized_image_pillow(im, num_colors): MAXIMUM_COVERAGE = 1 print("Falling back to old color quantization") return im.quantize(colors=num_colors, method=MAXIMUM_COVERAGE), 'pillow' def get_quantized_image_pngquant(im, num_colors): from PIL import Image import subprocess # pngquant errors out if its best solution is below this "quality" QUALITY_MIN = 0 # never error out # pngquant stops using colors when "quality" goes above this. # I have no real feeling for what this number means in practice QUALITY_MAX = 40 im.save('tmp_pngquant.png') try: subprocess.check_call([ 'pngquant', '--nofs', # no dithering '--force', '--quality', '%d-%d' % (QUALITY_MIN, QUALITY_MAX), '-o', 'tmp_pngquant_out.png', str(num_colors), '--', 'tmp_pngquant.png' ]) imq = Image.open('tmp_pngquant_out.png') imq.load() finally: if os.path.exists('tmp_pngquant.png'): os.unlink('tmp_pngquant.png') if os.path.exists('tmp_pngquant_out.png'): os.unlink('tmp_pngquant_out.png') return imq, 'pngquant' def get_quantized_image(im, num_colors): try: return get_quantized_image_pngquant(im, num_colors) except subprocess.CalledProcessError as e: print("Error running pngquant: %s" % e) except OSError as e: print("Missing pngquant: %s" % e) print("Download pngquant.exe it and put it in your PATH.") return get_quantized_image_pillow(im, num_colors) def simplify_colors(tilt, num_colors, preserve_colors): im = tilt_colors_to_image(tilt, max_aspect_ratio=4, preserve_colors=preserve_colors) if num_colors < 0: # Little hack to force use of pillow imq, method = get_quantized_image_pillow(im, -num_colors) else: imq, method = get_quantized_image(im, num_colors) def iter_rgb8(im): return zip(im.getdata(0), im.getdata(1), im.getdata(2)) def get_imq_color(ipixel, data=imq.getdata(), palette=imq.getpalette()): # Look up color in imq, which is awkward because it's palettized palette_entry = data[ipixel] r, g, b = palette[palette_entry * 3 : (palette_entry + 1) * 3] return (r, g, b) # Create table mapping unquantized rgb8 to quantized rgbaf old_to_new = {} idx = 0 for (old_color, group) in itertools.groupby(iter_rgb8(im)): assert old_color not in old_to_new old_to_new[old_color] = rgb8_to_rgbaf(get_imq_color(idx)) idx += len(list(group)) for stroke in tilt.sketch.strokes: stroke.brush_color = old_to_new[rgbaf_to_rgb8(stroke.brush_color)] if True: import numpy as np for old8, newf in old_to_new.items(): oldv = np.array(rgb8_to_rgbaf(old8)[0:3]) newv = np.array(newf[0:3]) err = oldv - newv err = math.sqrt(np.dot(err, err)) if err > .2: print("High color error: #%02x%02x%02x" % old8) num_colors = len(set(map(tuple, list(old_to_new.values())))) base, _ = os.path.splitext(tilt.filename) im.save('%s_%s.png' % (base, 'orig')) imq.save('%s_%s_%d.png' % (base, method, num_colors)) # ---------------------------------------------------------------------- # Split export into multiple .obj files # ---------------------------------------------------------------------- def iter_aggregated_by_color(json_filename): """Yields TiltBrushMesh instances, each of a uniform color.""" from tiltbrush.export import iter_meshes, TiltBrushMesh def by_color(m): return m.c[0] meshes = iter_meshes(json_filename) for (color, group) in itertools.groupby(sorted(meshes, key=by_color), key=by_color): yield TiltBrushMesh.from_meshes(group) def write_simple_obj(mesh, outf_name): from io import StringIO tmpf = StringIO() for v in mesh.v: tmpf.write("v %f %f %f\n" % v) for (t1, t2, t3) in mesh.tri: t1 += 1; t2 += 1; t3 += 1 tmpf.write("f %d %d %d\n" % (t1, t2, t3)) with file(outf_name, 'wb') as outf: outf.write(tmpf.getvalue()) def split_json_into_obj(json_filename): import struct output_base = os.path.splitext(json_filename)[0].replace('_out', '') meshes = list(iter_aggregated_by_color(json_filename)) meshes.sort(key=lambda m: len(m.v), reverse=True) for i, mesh in enumerate(meshes): # It's the "ignore normals" that does the most collapsing here. mesh.collapse_verts(ignore=('uv0', 'uv1', 'c', 't', 'n')) mesh.remove_degenerate() (r, g, b, a) = struct.unpack('4B', struct.pack('I', mesh.c[0])) assert a == 255, (r, g, b, a) hex_color = '%02x%02x%02x' % (r, g, b) outf_name = '%s %02d %s.obj' % (output_base, i, hex_color) write_simple_obj(mesh, outf_name) msgln("Wrote %s" % outf_name) # ---------------------------------------------------------------------- # Main # ---------------------------------------------------------------------- def process_tilt(filename, args): msg("Load tilt") tilt = Tilt(filename) msg("Load strokes") tilt.sketch.strokes msg("") if args.debug: msg("Clone strokes") before_strokes = [s.clone() for s in tilt.sketch.strokes] # Do this before color quantization, because it removes strokes (and their colors) if args.convert_brushes: convert_brushes(tilt, BRUSH_REPLACEMENTS) if args.remove_stray_strokes is not None: remove_stray_strokes(tilt, args.remove_stray_strokes, BrushLookup.get().get_unique_guid('Wire')) if args.pos_error_tolerance > 0: reduce_control_points(tilt, args.pos_error_tolerance) if args.simplify_colors is not None: simplify_colors(tilt, num_colors=args.simplify_colors, preserve_colors=args.preserve_colors) if args.debug: final_strokes = [] # interleave them so it renders semi-nicely... for before, after in itertools.zip_longest(before_strokes, tilt.sketch.strokes): if before is not None: for cp in before.controlpoints: cp.position[1] += 10 final_strokes.append(before) if after is not None: final_strokes.append(after) tilt.sketch.strokes[:] = final_strokes tilt.write_sketch() msgln("Wrote %s" % os.path.basename(tilt.filename)) def main(): import argparse parser = argparse.ArgumentParser(usage='''%(prog)s [ files ] Process .tilt files to get them ready for 3D printing. You should generally do the steps in this order: 1. Use --remove-stray-strokes (which actually just colors them magenta). Manually delete the strokes you don't want to keep. 2. Experiment with different values for --simplify-colors. Use --preserve-color option to force a color to remain present. 3. Use --convert-brushes and --pos-error-tolerance. 4. Load .tilt files in Tilt Brush, and export to .json 5. Convert from .json -> multiple .obj files ''') def hex_color(arg): arg = arg.lower() m = re.match(r'^#?([0-9a-f]{2})([0-9a-f]{2})([0-9a-f]{2})$', arg) if m is not None: return tuple(int(m.group(i), 16) for i in (1, 2, 3)) else: raise argparse.ArgumentTypeError("Must be exactly hex 6 digits: %r" % arg) parser.add_argument( '--debug', action='store_true', help='For debugging: put both the original and modified strokes in the resulting .tilt file') parser.add_argument( '--remove-stray-strokes', metavar='float', type=float, default=None, help="Replace strokes that are far away from the sketch with magenta wire. Argument is the number of standard deviations; 5.0 is a reasonable starting point.") parser.add_argument( '--simplify-colors', type=int, metavar='N', help='Simplify down to N colors. Use a negative number to try the alternate algorithm.') parser.add_argument( '--preserve-color', dest='preserve_colors', type=hex_color, action='append', default=[], help='Color to preserve, as a hex string like #ff00ff') parser.add_argument( '--convert-brushes', action='store_true', help='Convert brushes to 3d-printable ones') parser.add_argument( '--pos-error-tolerance', type=float, default=0, help='Allowable positional error when simplifying strokes, as a fraction of stroke width. If 0, do not simplify. .1 to .3 are good values. (default %(default)s)') parser.add_argument('-o', dest='output_file', help='Name of output file (optional)') parser.add_argument('files', type=str, nargs='+', help='File(s) to hack') args = parser.parse_args() for i, orig_filename in enumerate(args.files): if orig_filename.endswith('.tilt'): base, ext = os.path.splitext(orig_filename) if i == 0 and args.output_file is not None: working_filename = args.output_file else: working_filename = base + '_out' + ext shutil.copyfile(orig_filename, working_filename) process_tilt(working_filename, args) elif orig_filename.endswith('.json'): split_json_into_obj(orig_filename) if __name__=='__main__': main()
[ "numpy.array", "numpy.linalg.norm", "sys.exit", "numpy.cov", "numpy.mean", "os.path.exists", "numpy.histogram", "argparse.ArgumentParser", "tiltbrush.export.iter_meshes", "tiltbrush.export.TiltBrushMesh.from_meshes", "numpy.dot", "os.unlink", "io.StringIO", "sys.stdout.flush", "tiltbrush.tilt.Tilt", "os.path.splitext", "itertools.zip_longest", "re.match", "numpy.any", "struct.pack", "tbdata.brush_lookup.BrushLookup.get", "argparse.ArgumentTypeError", "shutil.copyfile", "PIL.Image.fromarray", "PIL.Image.open", "collections.Counter", "numpy.zeros", "itertools.count", "numpy.linalg.inv", "os.path.basename", "sys.stdout.write" ]
[((2232, 2271), 'sys.stdout.write', 'sys.stdout.write', (["('%-79s\\r' % text[:79])"], {}), "('%-79s\\r' % text[:79])\n", (2248, 2271), False, 'import sys\n'), ((2274, 2292), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2290, 2292), False, 'import sys\n'), ((2314, 2353), 'sys.stdout.write', 'sys.stdout.write', (["('%-79s\\n' % text[:79])"], {}), "('%-79s\\n' % text[:79])\n", (2330, 2353), False, 'import sys\n'), ((2356, 2374), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2372, 2374), False, 'import sys\n'), ((3187, 3204), 'tbdata.brush_lookup.BrushLookup.get', 'BrushLookup.get', ([], {}), '()\n', (3202, 3204), False, 'from tbdata.brush_lookup import BrushLookup\n'), ((4075, 4092), 'tbdata.brush_lookup.BrushLookup.get', 'BrushLookup.get', ([], {}), '()\n', (4090, 4092), False, 'from tbdata.brush_lookup import BrushLookup\n'), ((12463, 12480), 'itertools.count', 'itertools.count', ([], {}), '()\n', (12478, 12480), False, 'import itertools\n'), ((13754, 13763), 'collections.Counter', 'Counter', ([], {}), '()\n', (13761, 13763), False, 'from collections import Counter\n'), ((14553, 14599), 'numpy.zeros', 'np.zeros', ([], {'shape': '(num_texels, 3)', 'dtype': '"""uint8"""'}), "(shape=(num_texels, 3), dtype='uint8')\n", (14561, 14599), True, 'import numpy as np\n'), ((14982, 15023), 'PIL.Image.fromarray', 'Image.fromarray', (['colors_array'], {'mode': '"""RGB"""'}), "(colors_array, mode='RGB')\n", (14997, 15023), False, 'from PIL import Image\n'), ((18505, 18531), 'tiltbrush.export.iter_meshes', 'iter_meshes', (['json_filename'], {}), '(json_filename)\n', (18516, 18531), False, 'from tiltbrush.export import iter_meshes, TiltBrushMesh\n'), ((18738, 18748), 'io.StringIO', 'StringIO', ([], {}), '()\n', (18746, 18748), False, 'from io import StringIO\n'), ((19918, 19932), 'tiltbrush.tilt.Tilt', 'Tilt', (['filename'], {}), '(filename)\n', (19922, 19932), False, 'from tiltbrush.tilt import Tilt\n'), ((21194, 21773), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'usage': '"""%(prog)s [ files ]\n\nProcess .tilt files to get them ready for 3D printing.\n\nYou should generally do the steps in this order:\n\n1. Use --remove-stray-strokes (which actually just colors them magenta).\n Manually delete the strokes you don\'t want to keep.\n2. Experiment with different values for --simplify-colors. Use\n --preserve-color option to force a color to remain present.\n3. Use --convert-brushes and --pos-error-tolerance.\n4. Load .tilt files in Tilt Brush, and export to .json\n5. Convert from .json -> multiple .obj files\n"""'}), '(usage=\n """%(prog)s [ files ]\n\nProcess .tilt files to get them ready for 3D printing.\n\nYou should generally do the steps in this order:\n\n1. Use --remove-stray-strokes (which actually just colors them magenta).\n Manually delete the strokes you don\'t want to keep.\n2. Experiment with different values for --simplify-colors. Use\n --preserve-color option to force a color to remain present.\n3. Use --convert-brushes and --pos-error-tolerance.\n4. Load .tilt files in Tilt Brush, and export to .json\n5. Convert from .json -> multiple .obj files\n"""\n )\n', (21217, 21773), False, 'import argparse\n'), ((991, 1002), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (999, 1002), False, 'import sys\n'), ((4242, 4251), 'collections.Counter', 'Counter', ([], {}), '()\n', (4249, 4251), False, 'from collections import Counter\n'), ((4535, 4553), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (4551, 4553), False, 'import sys\n'), ((6945, 6990), 'numpy.linalg.norm', 'numpy.linalg.norm', (['(pos_interpolated - cp._pos)'], {}), '(pos_interpolated - cp._pos)\n', (6962, 6990), False, 'import numpy\n'), ((7398, 7422), 'numpy.array', 'numpy.array', (['cp.position'], {}), '(cp.position)\n', (7409, 7422), False, 'import numpy\n'), ((10492, 10518), 'numpy.mean', 'np.mean', (['positions'], {'axis': '(0)'}), '(positions, axis=0)\n', (10499, 10518), True, 'import numpy as np\n'), ((10529, 10560), 'numpy.cov', 'np.cov', (['positions'], {'rowvar': '(False)'}), '(positions, rowvar=False)\n', (10535, 10560), True, 'import numpy as np\n'), ((10574, 10592), 'numpy.linalg.inv', 'np.linalg.inv', (['cov'], {}), '(cov)\n', (10587, 10592), True, 'import numpy as np\n'), ((15900, 15934), 'PIL.Image.open', 'Image.open', (['"""tmp_pngquant_out.png"""'], {}), "('tmp_pngquant_out.png')\n", (15910, 15934), False, 'from PIL import Image\n'), ((15968, 16002), 'os.path.exists', 'os.path.exists', (['"""tmp_pngquant.png"""'], {}), "('tmp_pngquant.png')\n", (15982, 16002), False, 'import os\n'), ((16047, 16085), 'os.path.exists', 'os.path.exists', (['"""tmp_pngquant_out.png"""'], {}), "('tmp_pngquant_out.png')\n", (16061, 16085), False, 'import os\n'), ((17970, 18001), 'os.path.splitext', 'os.path.splitext', (['tilt.filename'], {}), '(tilt.filename)\n', (17986, 18001), False, 'import os\n'), ((20770, 20828), 'itertools.zip_longest', 'itertools.zip_longest', (['before_strokes', 'tilt.sketch.strokes'], {}), '(before_strokes, tilt.sketch.strokes)\n', (20791, 20828), False, 'import itertools\n'), ((21817, 21877), 're.match', 're.match', (['"""^#?([0-9a-f]{2})([0-9a-f]{2})([0-9a-f]{2})$"""', 'arg'], {}), "('^#?([0-9a-f]{2})([0-9a-f]{2})([0-9a-f]{2})$', arg)\n", (21825, 21877), False, 'import re\n'), ((9488, 9528), 'numpy.histogram', 'np.histogram', (['positions[..., i]'], {'bins': '(30)'}), '(positions[..., i], bins=30)\n', (9500, 9528), True, 'import numpy as np\n'), ((10948, 10972), 'numpy.any', 'np.any', (['(dists > max_dist)'], {}), '(dists > max_dist)\n', (10954, 10972), True, 'import numpy as np\n'), ((16010, 16039), 'os.unlink', 'os.unlink', (['"""tmp_pngquant.png"""'], {}), "('tmp_pngquant.png')\n", (16019, 16039), False, 'import os\n'), ((16093, 16126), 'os.unlink', 'os.unlink', (['"""tmp_pngquant_out.png"""'], {}), "('tmp_pngquant_out.png')\n", (16102, 16126), False, 'import os\n'), ((17731, 17750), 'numpy.array', 'np.array', (['newf[0:3]'], {}), '(newf[0:3])\n', (17739, 17750), True, 'import numpy as np\n'), ((18629, 18661), 'tiltbrush.export.TiltBrushMesh.from_meshes', 'TiltBrushMesh.from_meshes', (['group'], {}), '(group)\n', (18654, 18661), False, 'from tiltbrush.export import iter_meshes, TiltBrushMesh\n'), ((19459, 19486), 'struct.pack', 'struct.pack', (['"""I"""', 'mesh.c[0]'], {}), "('I', mesh.c[0])\n", (19470, 19486), False, 'import struct\n'), ((21118, 21149), 'os.path.basename', 'os.path.basename', (['tilt.filename'], {}), '(tilt.filename)\n', (21134, 21149), False, 'import os\n'), ((21982, 22050), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (["('Must be exactly hex 6 digits: %r' % arg)"], {}), "('Must be exactly hex 6 digits: %r' % arg)\n", (22008, 22050), False, 'import argparse\n'), ((23478, 23509), 'os.path.splitext', 'os.path.splitext', (['orig_filename'], {}), '(orig_filename)\n', (23494, 23509), False, 'import os\n'), ((23669, 23717), 'shutil.copyfile', 'shutil.copyfile', (['orig_filename', 'working_filename'], {}), '(orig_filename, working_filename)\n', (23684, 23717), False, 'import shutil\n'), ((7542, 7583), 'numpy.linalg.norm', 'numpy.linalg.norm', (['(prev_cp._pos - cp._pos)'], {}), '(prev_cp._pos - cp._pos)\n', (7559, 7583), False, 'import numpy\n'), ((17797, 17813), 'numpy.dot', 'np.dot', (['err', 'err'], {}), '(err, err)\n', (17803, 17813), True, 'import numpy as np\n'), ((19059, 19090), 'os.path.splitext', 'os.path.splitext', (['json_filename'], {}), '(json_filename)\n', (19075, 19090), False, 'import os\n'), ((20379, 20396), 'tbdata.brush_lookup.BrushLookup.get', 'BrushLookup.get', ([], {}), '()\n', (20394, 20396), False, 'from tbdata.brush_lookup import BrushLookup\n')]
from multiprocessing import Pool from multiprocessing.pool import ThreadPool from queue import Queue from .chunks import chunks __all__ = 'map_parallel', 'map_multicore', 'map_multithread' def _pool_map_stream(pool_type, pipe, fn, workers): assert callable(fn), fn assert isinstance(workers, int), workers assert workers > 0, workers p = pool_type(workers) job_q = Queue(maxsize=int(workers*2)) try: for chunk in chunks(pipe, workers*2): for i in chunk: job_q.put(p.apply_async(fn, [i])) for i in pipe: yield job_q.get().get() job_q.put(p.apply_async(fn, [i])) while not job_q.empty(): yield job_q.get().get() finally: p.terminate() def map_multicore(pipe, fn, workers): ''' This streams map operations through a Pool without needing to load the entire stream into a massive list first, like Pool.map normally requires. ''' assert callable(fn), fn assert isinstance(workers, int), workers assert workers > 0, workers pipe = iter(pipe) return _pool_map_stream(Pool, **locals()) def map_multithread(pipe, fn, workers): ''' This streams map operations through a ThreadPool without needing to load the entire stream into a massive list first, like ThreadPool.map normally requires. ''' assert callable(fn), fn assert isinstance(workers, int), workers assert workers > 0, workers pipe = iter(pipe) return _pool_map_stream(ThreadPool, **locals()) def map_parallel(pipe, fn, workers): ''' This streams map operations in parallel through a pool of processes or threads. If the os does not allow multiprocessing or the datatypes are not serializable, operation reverts to ThreadPools ''' assert callable(fn), fn assert isinstance(workers, int), workers assert workers > 0, workers pipe = iter(pipe) try: for i in map_multicore(pipe, fn, workers): yield i except: for i in map_multithread(pipe, fn, workers): yield i if __name__ == '__main__': import random, time def work(i): print('working on: {}'.format(i)) time.sleep(random.random()) print('finished: {}'.format(i)) return i*2 l = G( range(10) ).map( float ).map_parallel( work, 5 ).print().run()
[ "random.random" ]
[((2264, 2279), 'random.random', 'random.random', ([], {}), '()\n', (2277, 2279), False, 'import random, time\n')]
""" SQLAlchemy database models. """ from datetime import datetime from depot.fields.sqlalchemy import UploadedFileField from app import db from app.util.data import many_to_many, foreign_key from app.config import TOKEN_LEN class User(db.Model): """ User model class. """ id = db.Column(db.Integer(), primary_key=True, autoincrement=True) username = db.Column(db.String(32), unique=True) email = db.Column(db.String(64), unique=True) password = db.Column(db.Binary(32)) join_date = db.Column(db.DateTime(), default=datetime.now) active = db.Column(db.Boolean(), default=False) avatar = db.Column(UploadedFileField()) self_introduction = db.Column(db.Text(), unique=True) contribution = db.Column(db.Integer(), default=0) job = db.Column(db.String(64), unique=True) class Session(db.Model): """ API session class. """ token = db.Column(db.Binary(TOKEN_LEN), primary_key=True) user, user_id = foreign_key("User", backref_name="sessions") class AbstractBaseGroup(object): """ Abstract base group class. """ pass class Group(db.Model, AbstractBaseGroup): """ Group model class. """ id = db.Column(db.Integer(), primary_key=True, autoincrement=True) name = db.Column(db.String(32), unique=True) users = many_to_many("Group", "User", backref_name="groups") introduction = db.Column(db.Text()) class Paper(db.Model): """ Paper model class. """ id = db.Column(db.Integer(), primary_key=True, autoincrement=True) title = db.Column(db.String(256), unique=False) abstract = db.Column(db.Text(), unique=False) authors = db.Column(db.String(256), unique=False) conference = db.Column(db.String(128), unique=False) publish_date = db.Column(db.DateTime(), default=datetime.now) # Accurate to the day owners = many_to_many("Paper", "User", backref_name="papers") owngroup = many_to_many("Paper", "Group", backref_name="papers") collectors = many_to_many("Paper", "User", backref_name="collect_papers") paper_file = db.Column(UploadedFileField()) class Note(db.Model): """ User model class. """ id = db.Column(db.Integer(), primary_key=True, autoincrement=True) title = db.Column(db.String(256), unique=False) create_time = db.Column(db.DateTime(), default=datetime.now) last_modified = db.Column(db.DateTime(), default=datetime.now) author, author_id = foreign_key("User", backref_name="notes") paper, paper_id = foreign_key("Paper", backref_name="notes") collectors = many_to_many("Note", "User", backref_name="collect_notes") owngroup = many_to_many("Note", "Group", backref_name="notes") content = db.Column(db.Text(), unique=False) annotation_file = db.Column(UploadedFileField()) class Question(db.Model): id = db.Column(db.Integer(), primary_key=True, autoincrement=True) provider, provider_id = foreign_key("User", backref_name="questions_asked") titie = db.Column(db.String(256), unique=False) description = db.Column(db.Text(), unique=False) upvotes = many_to_many("Question", "User", backref_name="questions_upvote") downvotes = many_to_many("Question", "User", backref_name="questions_downvote") create_time = db.Column(db.DateTime(), default=datetime.now) last_modified = db.Column(db.DateTime(), default=datetime.now) class Reply(db.Model): id = db.Column(db.Integer(), primary_key=True, autoincrement=True) provider, provider_id = foreign_key("User", backref_name="replies") host_question, q_id = foreign_key("Question", backref_name="replies") content = db.Column(db.Text()) upvotes = many_to_many("Reply", "User", backref_name="replies_upvote") downvotes = many_to_many("Reply", "User", backref_name="replies_downvote") create_time = db.Column(db.DateTime(), default=datetime.now) last_modified = db.Column(db.DateTime(), default=datetime.now) class Comment(db.Model): id = db.Column(db.Integer(), primary_key=True, autoincrement=True) provider, provider_id = foreign_key("User", backref_name="comments") host_question, q_id = foreign_key("Question", backref_name="comments") host_reply, r_id = foreign_key("Reply", backref_name="comments") content = db.Column(db.Text(), unique=False) create_time = db.Column(db.DateTime(), default=datetime.now) last_modified = db.Column(db.DateTime(), default=datetime.now)
[ "app.util.data.many_to_many", "app.db.Integer", "app.db.String", "app.db.DateTime", "app.db.Boolean", "depot.fields.sqlalchemy.UploadedFileField", "app.db.Binary", "app.db.Text", "app.util.data.foreign_key" ]
[((950, 994), 'app.util.data.foreign_key', 'foreign_key', (['"""User"""'], {'backref_name': '"""sessions"""'}), "('User', backref_name='sessions')\n", (961, 994), False, 'from app.util.data import many_to_many, foreign_key\n'), ((1283, 1335), 'app.util.data.many_to_many', 'many_to_many', (['"""Group"""', '"""User"""'], {'backref_name': '"""groups"""'}), "('Group', 'User', backref_name='groups')\n", (1295, 1335), False, 'from app.util.data import many_to_many, foreign_key\n'), ((1816, 1868), 'app.util.data.many_to_many', 'many_to_many', (['"""Paper"""', '"""User"""'], {'backref_name': '"""papers"""'}), "('Paper', 'User', backref_name='papers')\n", (1828, 1868), False, 'from app.util.data import many_to_many, foreign_key\n'), ((1884, 1937), 'app.util.data.many_to_many', 'many_to_many', (['"""Paper"""', '"""Group"""'], {'backref_name': '"""papers"""'}), "('Paper', 'Group', backref_name='papers')\n", (1896, 1937), False, 'from app.util.data import many_to_many, foreign_key\n'), ((1955, 2015), 'app.util.data.many_to_many', 'many_to_many', (['"""Paper"""', '"""User"""'], {'backref_name': '"""collect_papers"""'}), "('Paper', 'User', backref_name='collect_papers')\n", (1967, 2015), False, 'from app.util.data import many_to_many, foreign_key\n'), ((2396, 2437), 'app.util.data.foreign_key', 'foreign_key', (['"""User"""'], {'backref_name': '"""notes"""'}), "('User', backref_name='notes')\n", (2407, 2437), False, 'from app.util.data import many_to_many, foreign_key\n'), ((2460, 2502), 'app.util.data.foreign_key', 'foreign_key', (['"""Paper"""'], {'backref_name': '"""notes"""'}), "('Paper', backref_name='notes')\n", (2471, 2502), False, 'from app.util.data import many_to_many, foreign_key\n'), ((2520, 2578), 'app.util.data.many_to_many', 'many_to_many', (['"""Note"""', '"""User"""'], {'backref_name': '"""collect_notes"""'}), "('Note', 'User', backref_name='collect_notes')\n", (2532, 2578), False, 'from app.util.data import many_to_many, foreign_key\n'), ((2594, 2645), 'app.util.data.many_to_many', 'many_to_many', (['"""Note"""', '"""Group"""'], {'backref_name': '"""notes"""'}), "('Note', 'Group', backref_name='notes')\n", (2606, 2645), False, 'from app.util.data import many_to_many, foreign_key\n'), ((2874, 2925), 'app.util.data.foreign_key', 'foreign_key', (['"""User"""'], {'backref_name': '"""questions_asked"""'}), "('User', backref_name='questions_asked')\n", (2885, 2925), False, 'from app.util.data import many_to_many, foreign_key\n'), ((3045, 3110), 'app.util.data.many_to_many', 'many_to_many', (['"""Question"""', '"""User"""'], {'backref_name': '"""questions_upvote"""'}), "('Question', 'User', backref_name='questions_upvote')\n", (3057, 3110), False, 'from app.util.data import many_to_many, foreign_key\n'), ((3127, 3194), 'app.util.data.many_to_many', 'many_to_many', (['"""Question"""', '"""User"""'], {'backref_name': '"""questions_downvote"""'}), "('Question', 'User', backref_name='questions_downvote')\n", (3139, 3194), False, 'from app.util.data import many_to_many, foreign_key\n'), ((3450, 3493), 'app.util.data.foreign_key', 'foreign_key', (['"""User"""'], {'backref_name': '"""replies"""'}), "('User', backref_name='replies')\n", (3461, 3493), False, 'from app.util.data import many_to_many, foreign_key\n'), ((3520, 3567), 'app.util.data.foreign_key', 'foreign_key', (['"""Question"""'], {'backref_name': '"""replies"""'}), "('Question', backref_name='replies')\n", (3531, 3567), False, 'from app.util.data import many_to_many, foreign_key\n'), ((3617, 3677), 'app.util.data.many_to_many', 'many_to_many', (['"""Reply"""', '"""User"""'], {'backref_name': '"""replies_upvote"""'}), "('Reply', 'User', backref_name='replies_upvote')\n", (3629, 3677), False, 'from app.util.data import many_to_many, foreign_key\n'), ((3694, 3756), 'app.util.data.many_to_many', 'many_to_many', (['"""Reply"""', '"""User"""'], {'backref_name': '"""replies_downvote"""'}), "('Reply', 'User', backref_name='replies_downvote')\n", (3706, 3756), False, 'from app.util.data import many_to_many, foreign_key\n'), ((4014, 4058), 'app.util.data.foreign_key', 'foreign_key', (['"""User"""'], {'backref_name': '"""comments"""'}), "('User', backref_name='comments')\n", (4025, 4058), False, 'from app.util.data import many_to_many, foreign_key\n'), ((4085, 4133), 'app.util.data.foreign_key', 'foreign_key', (['"""Question"""'], {'backref_name': '"""comments"""'}), "('Question', backref_name='comments')\n", (4096, 4133), False, 'from app.util.data import many_to_many, foreign_key\n'), ((4157, 4202), 'app.util.data.foreign_key', 'foreign_key', (['"""Reply"""'], {'backref_name': '"""comments"""'}), "('Reply', backref_name='comments')\n", (4168, 4202), False, 'from app.util.data import many_to_many, foreign_key\n'), ((297, 309), 'app.db.Integer', 'db.Integer', ([], {}), '()\n', (307, 309), False, 'from app import db\n'), ((374, 387), 'app.db.String', 'db.String', (['(32)'], {}), '(32)\n', (383, 387), False, 'from app import db\n'), ((424, 437), 'app.db.String', 'db.String', (['(64)'], {}), '(64)\n', (433, 437), False, 'from app import db\n'), ((477, 490), 'app.db.Binary', 'db.Binary', (['(32)'], {}), '(32)\n', (486, 490), False, 'from app import db\n'), ((518, 531), 'app.db.DateTime', 'db.DateTime', ([], {}), '()\n', (529, 531), False, 'from app import db\n'), ((578, 590), 'app.db.Boolean', 'db.Boolean', ([], {}), '()\n', (588, 590), False, 'from app import db\n'), ((630, 649), 'depot.fields.sqlalchemy.UploadedFileField', 'UploadedFileField', ([], {}), '()\n', (647, 649), False, 'from depot.fields.sqlalchemy import UploadedFileField\n'), ((685, 694), 'app.db.Text', 'db.Text', ([], {}), '()\n', (692, 694), False, 'from app import db\n'), ((738, 750), 'app.db.Integer', 'db.Integer', ([], {}), '()\n', (748, 750), False, 'from app import db\n'), ((783, 796), 'app.db.String', 'db.String', (['(64)'], {}), '(64)\n', (792, 796), False, 'from app import db\n'), ((890, 910), 'app.db.Binary', 'db.Binary', (['TOKEN_LEN'], {}), '(TOKEN_LEN)\n', (899, 910), False, 'from app import db\n'), ((1170, 1182), 'app.db.Integer', 'db.Integer', ([], {}), '()\n', (1180, 1182), False, 'from app import db\n'), ((1243, 1256), 'app.db.String', 'db.String', (['(32)'], {}), '(32)\n', (1252, 1256), False, 'from app import db\n'), ((1365, 1374), 'app.db.Text', 'db.Text', ([], {}), '()\n', (1372, 1374), False, 'from app import db\n'), ((1450, 1462), 'app.db.Integer', 'db.Integer', ([], {}), '()\n', (1460, 1462), False, 'from app import db\n'), ((1524, 1538), 'app.db.String', 'db.String', (['(256)'], {}), '(256)\n', (1533, 1538), False, 'from app import db\n'), ((1579, 1588), 'app.db.Text', 'db.Text', ([], {}), '()\n', (1586, 1588), False, 'from app import db\n'), ((1628, 1642), 'app.db.String', 'db.String', (['(256)'], {}), '(256)\n', (1637, 1642), False, 'from app import db\n'), ((1685, 1699), 'app.db.String', 'db.String', (['(128)'], {}), '(128)\n', (1694, 1699), False, 'from app import db\n'), ((1744, 1757), 'app.db.DateTime', 'db.DateTime', ([], {}), '()\n', (1755, 1757), False, 'from app import db\n'), ((2043, 2062), 'depot.fields.sqlalchemy.UploadedFileField', 'UploadedFileField', ([], {}), '()\n', (2060, 2062), False, 'from depot.fields.sqlalchemy import UploadedFileField\n'), ((2136, 2148), 'app.db.Integer', 'db.Integer', ([], {}), '()\n', (2146, 2148), False, 'from app import db\n'), ((2210, 2224), 'app.db.String', 'db.String', (['(256)'], {}), '(256)\n', (2219, 2224), False, 'from app import db\n'), ((2268, 2281), 'app.db.DateTime', 'db.DateTime', ([], {}), '()\n', (2279, 2281), False, 'from app import db\n'), ((2335, 2348), 'app.db.DateTime', 'db.DateTime', ([], {}), '()\n', (2346, 2348), False, 'from app import db\n'), ((2670, 2679), 'app.db.Text', 'db.Text', ([], {}), '()\n', (2677, 2679), False, 'from app import db\n'), ((2727, 2746), 'depot.fields.sqlalchemy.UploadedFileField', 'UploadedFileField', ([], {}), '()\n', (2744, 2746), False, 'from depot.fields.sqlalchemy import UploadedFileField\n'), ((2794, 2806), 'app.db.Integer', 'db.Integer', ([], {}), '()\n', (2804, 2806), False, 'from app import db\n'), ((2948, 2962), 'app.db.String', 'db.String', (['(256)'], {}), '(256)\n', (2957, 2962), False, 'from app import db\n'), ((3006, 3015), 'app.db.Text', 'db.Text', ([], {}), '()\n', (3013, 3015), False, 'from app import db\n'), ((3223, 3236), 'app.db.DateTime', 'db.DateTime', ([], {}), '()\n', (3234, 3236), False, 'from app import db\n'), ((3290, 3303), 'app.db.DateTime', 'db.DateTime', ([], {}), '()\n', (3301, 3303), False, 'from app import db\n'), ((3370, 3382), 'app.db.Integer', 'db.Integer', ([], {}), '()\n', (3380, 3382), False, 'from app import db\n'), ((3592, 3601), 'app.db.Text', 'db.Text', ([], {}), '()\n', (3599, 3601), False, 'from app import db\n'), ((3785, 3798), 'app.db.DateTime', 'db.DateTime', ([], {}), '()\n', (3796, 3798), False, 'from app import db\n'), ((3852, 3865), 'app.db.DateTime', 'db.DateTime', ([], {}), '()\n', (3863, 3865), False, 'from app import db\n'), ((3934, 3946), 'app.db.Integer', 'db.Integer', ([], {}), '()\n', (3944, 3946), False, 'from app import db\n'), ((4227, 4236), 'app.db.Text', 'db.Text', ([], {}), '()\n', (4234, 4236), False, 'from app import db\n'), ((4280, 4293), 'app.db.DateTime', 'db.DateTime', ([], {}), '()\n', (4291, 4293), False, 'from app import db\n'), ((4347, 4360), 'app.db.DateTime', 'db.DateTime', ([], {}), '()\n', (4358, 4360), False, 'from app import db\n')]
import h5py import numpy as np import os, pdb import tensorflow as tf from rllab.envs.base import EnvSpec from rllab.envs.normalized_env import normalize as normalize_env import rllab.misc.logger as logger from sandbox.rocky.tf.algos.trpo import TRPO from sandbox.rocky.tf.policies.gaussian_mlp_policy import GaussianMLPPolicy from sandbox.rocky.tf.policies.gaussian_gru_policy import GaussianGRUPolicy from sandbox.rocky.tf.envs.base import TfEnv from sandbox.rocky.tf.spaces.discrete import Discrete from hgail.algos.hgail_impl import Level from hgail.baselines.gaussian_mlp_baseline import GaussianMLPBaseline from hgail.critic.critic import WassersteinCritic from hgail.envs.spec_wrapper_env import SpecWrapperEnv from hgail.envs.vectorized_normalized_env import vectorized_normalized_env from hgail.misc.datasets import CriticDataset, RecognitionDataset from hgail.policies.categorical_latent_sampler import CategoricalLatentSampler from hgail.policies.gaussian_latent_var_gru_policy import GaussianLatentVarGRUPolicy from hgail.policies.gaussian_latent_var_mlp_policy import GaussianLatentVarMLPPolicy from hgail.policies.latent_sampler import UniformlyRandomLatentSampler from hgail.core.models import ObservationActionMLP from hgail.policies.scheduling import ConstantIntervalScheduler from hgail.recognition.recognition_model import RecognitionModel from hgail.samplers.hierarchy_sampler import HierarchySampler import hgail.misc.utils from julia_env.julia_env import JuliaEnv ''' Const NGSIM_FILENAME_TO_ID = { 'trajdata_i101_trajectories-0750am-0805am.txt': 1, 'trajdata_i101_trajectories-0805am-0820am.txt': 2, 'trajdata_i101_trajectories-0820am-0835am.txt': 3, 'trajdata_i80_trajectories-0400-0415.txt': 4, 'trajdata_i80_trajectories-0500-0515.txt': 5, 'trajdata_i80_trajectories-0515-0530.txt': 6 }''' NGSIM_FILENAME_TO_ID = { 'trajdata_i101_trajectories-0750am-0805am.txt': 1, 'trajdata_i101-22agents-0750am-0805am.txt' : 1 } ''' Common ''' def maybe_mkdir(dirpath): if not os.path.exists(dirpath): os.mkdir(dirpath) def partition_list(lst, n): sublists = [[] for _ in range(n)] for i, v in enumerate(lst): sublists[i % n].append(v) return sublists def str2bool(v): if v.lower() == 'true': return True return False def write_trajectories(filepath, trajs): np.savez(filepath, trajs=trajs) def load_trajectories(filepath): return np.load(filepath)['trajs'] def filename2label(fn): s = fn.find('-') + 1 e = fn.rfind('_') return fn[s:e] def load_trajs_labels(directory, files_to_use=[0,1,2,3,4,5]): filenames = [ 'trajdata_i101_trajectories-0750am-0805am_trajectories.npz', 'trajdata_i101_trajectories-0805am-0820am_trajectories.npz', 'trajdata_i101_trajectories-0820am-0835am_trajectories.npz', 'trajdata_i80_trajectories-0400-0415_trajectories.npz', 'trajdata_i80_trajectories-0500-0515_trajectories.npz', 'trajdata_i80_trajectories-0515-0530_trajectories.npz' ] filenames = [filenames[i] for i in files_to_use] labels = [filename2label(fn) for fn in filenames] filepaths = [os.path.join(directory, fn) for fn in filenames] trajs = [load_trajectories(fp) for fp in filepaths] return trajs, labels ''' Component build functions ''' ''' This is about as hacky as it gets, but I want to avoid editing the rllab source code as much as possible, so it will have to do for now. Add a reset(self, kwargs**) function to the normalizing environment https://stackoverflow.com/questions/972/adding-a-method-to-an-existing-object-instance ''' def normalize_env_reset_with_kwargs(self, **kwargs): ret = self._wrapped_env.reset(**kwargs) if self._normalize_obs: return self._apply_normalize_obs(ret) else: return ret def add_kwargs_to_reset(env): normalize_env = hgail.misc.utils.extract_normalizing_env(env) if normalize_env is not None: normalize_env.reset = normalize_env_reset_with_kwargs.__get__(normalize_env) '''end of hack, back to our regularly scheduled programming''' # Raunak adding an input argument for multiagent video making def build_ngsim_env( args, exp_dir='/tmp', alpha=0.001, vectorize=True, render_params=None, videoMaking=False): basedir = os.path.expanduser('~/.julia/v0.6/NGSIM/data') filepaths = [os.path.join(basedir, args.ngsim_filename)] if render_params is None: render_params = dict( viz_dir=os.path.join(exp_dir, 'imitate/viz'), zoom=5. ) env_params = dict( trajectory_filepaths=filepaths, H=args.env_H, primesteps=args.env_primesteps, action_repeat=args.env_action_repeat, terminate_on_collision=False, terminate_on_off_road=False, render_params=render_params, n_envs=args.n_envs, n_veh=args.n_envs, remove_ngsim_veh=args.remove_ngsim_veh, reward=args.env_reward ) # order matters here because multiagent is a subset of vectorized # i.e., if you want to run with multiagent = true, then vectorize must # also be true if args.env_multiagent: env_id = 'MultiagentNGSIMEnv' if videoMaking: print('RAUNAK BHATTACHARRYA VIDEO MAKER IS ON') env_id='MultiagentNGSIMEnvVideoMaker' alpha = alpha * args.n_envs normalize_wrapper = vectorized_normalized_env elif vectorize: env_id = 'VectorizedNGSIMEnv' alpha = alpha * args.n_envs normalize_wrapper = vectorized_normalized_env else: env_id = 'NGSIMEnv' normalize_wrapper = normalize_env print(env_params) env = JuliaEnv( env_id=env_id, env_params=env_params, using='AutoEnvs' ) # get low and high values for normalizing _real_ actions low, high = env.action_space.low, env.action_space.high env = TfEnv(normalize_wrapper(env, normalize_obs=True, obs_alpha=alpha)) add_kwargs_to_reset(env) return env, low, high def build_critic(args, data, env, writer=None): if args.use_critic_replay_memory: critic_replay_memory = hgail.misc.utils.KeyValueReplayMemory(maxsize=3 * args.batch_size) else: critic_replay_memory = None critic_dataset = CriticDataset( data, replay_memory=critic_replay_memory, batch_size=args.critic_batch_size, flat_recurrent=args.policy_recurrent ) critic_network = ObservationActionMLP( name='critic', hidden_layer_dims=args.critic_hidden_layer_dims, dropout_keep_prob=args.critic_dropout_keep_prob ) critic = WassersteinCritic( obs_dim=env.observation_space.flat_dim, act_dim=env.action_space.flat_dim, dataset=critic_dataset, network=critic_network, gradient_penalty=args.gradient_penalty, optimizer=tf.train.RMSPropOptimizer(args.critic_learning_rate), n_train_epochs=args.n_critic_train_epochs, summary_writer=writer, grad_norm_rescale=args.critic_grad_rescale, verbose=2, debug_nan=True ) return critic def build_policy(args, env, latent_sampler=None): if args.use_infogail: if latent_sampler is None: latent_sampler = UniformlyRandomLatentSampler( scheduler=ConstantIntervalScheduler(k=args.scheduler_k), name='latent_sampler', dim=args.latent_dim ) if args.policy_recurrent: policy = GaussianLatentVarGRUPolicy( name="policy", latent_sampler=latent_sampler, env_spec=env.spec, hidden_dim=args.recurrent_hidden_dim, ) else: print("GaussianLatentVarMLPPolicy") policy = GaussianLatentVarMLPPolicy( name="policy", latent_sampler=latent_sampler, env_spec=env.spec, hidden_sizes=args.policy_mean_hidden_layer_dims, std_hidden_sizes=args.policy_std_hidden_layer_dims ) else: if args.policy_recurrent: print("GaussianGRUPolicy") policy = GaussianGRUPolicy( name="policy", env_spec=env.spec, hidden_dim=args.recurrent_hidden_dim, output_nonlinearity=None, learn_std=True ) else: print("GaussianMLPPolicy") policy = GaussianMLPPolicy( name="policy", env_spec=env.spec, hidden_sizes=args.policy_mean_hidden_layer_dims, std_hidden_sizes=args.policy_std_hidden_layer_dims, adaptive_std=True, output_nonlinearity=None, learn_std=True ) return policy def build_recognition_model(args, env, writer=None): if args.use_infogail: recognition_dataset = RecognitionDataset( args.batch_size, flat_recurrent=args.policy_recurrent ) recognition_network = ObservationActionMLP( name='recog', hidden_layer_dims=args.recognition_hidden_layer_dims, output_dim=args.latent_dim ) recognition_model = RecognitionModel( obs_dim=env.observation_space.flat_dim, act_dim=env.action_space.flat_dim, dataset=recognition_dataset, network=recognition_network, variable_type='categorical', latent_dim=args.latent_dim, optimizer=tf.train.AdamOptimizer(args.recognition_learning_rate), n_train_epochs=args.n_recognition_train_epochs, summary_writer=writer, verbose=2 ) else: recognition_model = None return recognition_model def build_baseline(args, env): return GaussianMLPBaseline(env_spec=env.spec) def build_reward_handler(args, writer=None): reward_handler = hgail.misc.utils.RewardHandler( use_env_rewards=args.reward_handler_use_env_rewards, max_epochs=args.reward_handler_max_epochs, # epoch at which final scales are used critic_final_scale=args.reward_handler_critic_final_scale, recognition_initial_scale=0., recognition_final_scale=args.reward_handler_recognition_final_scale, summary_writer=writer, normalize_rewards=True, critic_clip_low=-100, critic_clip_high=100, ) return reward_handler def build_hierarchy(args, env, writer=None): levels = [] latent_sampler = UniformlyRandomLatentSampler( name='base_latent_sampler', dim=args.latent_dim, scheduler=ConstantIntervalScheduler(k=args.env_H) ) for level_idx in [1,0]: # wrap env in different spec depending on level if level_idx == 0: level_env = env else: level_env = SpecWrapperEnv( env, action_space=Discrete(args.latent_dim), observation_space=env.observation_space ) with tf.variable_scope('level_{}'.format(level_idx)): # recognition_model = build_recognition_model(args, level_env, writer) recognition_model = None if level_idx == 0: policy = build_policy(args, env, latent_sampler=latent_sampler) else: scheduler = ConstantIntervalScheduler(k=args.scheduler_k) policy = latent_sampler = CategoricalLatentSampler( scheduler=scheduler, name='latent_sampler', policy_name='latent_sampler_policy', dim=args.latent_dim, env_spec=level_env.spec, latent_sampler=latent_sampler, max_n_envs=args.n_envs ) baseline = build_baseline(args, level_env) if args.vectorize: force_batch_sampler = False if level_idx == 0: sampler_args = dict(n_envs=args.n_envs) else: sampler_args = None else: force_batch_sampler = True sampler_args = None sampler_cls = None if level_idx == 0 else HierarchySampler algo = TRPO( env=level_env, policy=policy, baseline=baseline, batch_size=args.batch_size, max_path_length=args.max_path_length, n_itr=args.n_itr, discount=args.discount, step_size=args.trpo_step_size, sampler_cls=sampler_cls, force_batch_sampler=force_batch_sampler, sampler_args=sampler_args, optimizer_args=dict( max_backtracks=50, debug_nan=True ) ) reward_handler = build_reward_handler(args, writer) level = Level( depth=level_idx, algo=algo, reward_handler=reward_handler, recognition_model=recognition_model, start_itr=0, end_itr=0 if level_idx == 0 else np.inf ) levels.append(level) # by convention the order of the levels should be increasing # but they must be built in the reverse order # so reverse the list before returning it return list(reversed(levels)) ''' setup ''' def latest_snapshot(exp_dir, phase='train'): snapshot_dir = os.path.join(exp_dir, phase, 'log') snapshots = glob.glob('{}/itr_*.pkl'.format(snapshot_dir)) latest = sorted(snapshots, reverse=True)[0] return latest def set_up_experiment( exp_name, phase, exp_home='../../data/experiments/', snapshot_gap=5): maybe_mkdir(exp_home) exp_dir = os.path.join(exp_home, exp_name) maybe_mkdir(exp_dir) phase_dir = os.path.join(exp_dir, phase) maybe_mkdir(phase_dir) log_dir = os.path.join(phase_dir, 'log') maybe_mkdir(log_dir) logger.set_snapshot_dir(log_dir) logger.set_snapshot_mode('gap') logger.set_snapshot_gap(snapshot_gap) log_filepath = os.path.join(log_dir, 'log.txt') logger.add_text_output(log_filepath) return exp_dir ''' data utilities ''' def compute_lengths(arr): sums = np.sum(np.array(arr), axis=2) lengths = [] for sample in sums: zero_idxs = np.where(sample == 0.)[0] if len(zero_idxs) == 0: lengths.append(len(sample)) else: lengths.append(zero_idxs[0]) return np.array(lengths) def normalize(x, clip_std_multiple=np.inf): mean = np.mean(x, axis=0, keepdims=True) x = x - mean std = np.std(x, axis=0, keepdims=True) + 1e-8 up = std * clip_std_multiple lb = - std * clip_std_multiple x = np.clip(x, lb, up) x = x / std return x, mean, std def normalize_range(x, low, high): low = np.array(low) high = np.array(high) mean = (high + low) / 2. half_range = (high - low) / 2. x = (x - mean) / half_range x = np.clip(x, -1, 1) return x def load_x_feature_names(filepath, ngsim_filename): print(filepath) f = h5py.File(filepath, 'r') xs = [] traj_id = NGSIM_FILENAME_TO_ID[ngsim_filename] # in case this nees to allow for multiple files in the future traj_ids = [traj_id] for i in traj_ids: if str(i) in f.keys(): xs.append(f[str(i)]) else: raise ValueError('invalid key to trajectory data: {}'.format(i)) x = np.concatenate(xs) feature_names = f.attrs['feature_names'] return x, feature_names def load_data( filepath, act_keys=['accel', 'turn_rate_global'], ngsim_filename='trajdata_i101_trajectories-0750am-0805am.txt', debug_size=None, min_length=50, normalize_data=True, shuffle=False, act_low=-1, act_high=1, clip_std_multiple=np.inf): # loading varies based on dataset type x, feature_names = load_x_feature_names(filepath, ngsim_filename) # optionally keep it to a reasonable size if debug_size is not None: x = x[:debug_size] if shuffle: idxs = np.random.permutation(len(x)) x = x[idxs] # compute lengths of the samples before anything else b/c this is fragile lengths = compute_lengths(x) # flatten the dataset to (n_samples, n_features) # taking only the valid timesteps from each sample # i.e., throw out timeseries information xs = [] for i, l in enumerate(lengths): # enforce minimum length constraint if l >= min_length: xs.append(x[i,:l]) x = np.concatenate(xs) # split into observations and actions # redundant because the environment is not able to extract actions obs = x act_idxs = [i for (i,n) in enumerate(feature_names) if n in act_keys] act = x[:, act_idxs] if normalize_data: # normalize it all, _no_ test / val split obs, obs_mean, obs_std = normalize(obs, clip_std_multiple) # normalize actions to between -1 and 1 act = normalize_range(act, act_low, act_high) else: obs_mean = None obs_std = None return dict( observations=obs, actions=act, obs_mean=obs_mean, obs_std=obs_std, )
[ "numpy.clip", "rllab.misc.logger.add_text_output", "numpy.array", "hgail.misc.datasets.RecognitionDataset", "rllab.misc.logger.set_snapshot_mode", "rllab.misc.logger.set_snapshot_dir", "numpy.mean", "numpy.savez", "os.path.exists", "hgail.algos.hgail_impl.Level", "numpy.where", "hgail.policies.gaussian_latent_var_gru_policy.GaussianLatentVarGRUPolicy", "os.mkdir", "numpy.concatenate", "sandbox.rocky.tf.policies.gaussian_mlp_policy.GaussianMLPPolicy", "julia_env.julia_env.JuliaEnv", "tensorflow.train.AdamOptimizer", "hgail.core.models.ObservationActionMLP", "os.path.expanduser", "hgail.policies.scheduling.ConstantIntervalScheduler", "rllab.misc.logger.set_snapshot_gap", "hgail.baselines.gaussian_mlp_baseline.GaussianMLPBaseline", "h5py.File", "hgail.policies.gaussian_latent_var_mlp_policy.GaussianLatentVarMLPPolicy", "numpy.std", "hgail.policies.categorical_latent_sampler.CategoricalLatentSampler", "tensorflow.train.RMSPropOptimizer", "sandbox.rocky.tf.policies.gaussian_gru_policy.GaussianGRUPolicy", "hgail.misc.datasets.CriticDataset", "os.path.join", "numpy.load", "sandbox.rocky.tf.spaces.discrete.Discrete" ]
[((2367, 2398), 'numpy.savez', 'np.savez', (['filepath'], {'trajs': 'trajs'}), '(filepath, trajs=trajs)\n', (2375, 2398), True, 'import numpy as np\n'), ((4357, 4403), 'os.path.expanduser', 'os.path.expanduser', (['"""~/.julia/v0.6/NGSIM/data"""'], {}), "('~/.julia/v0.6/NGSIM/data')\n", (4375, 4403), False, 'import os, pdb\n'), ((5753, 5817), 'julia_env.julia_env.JuliaEnv', 'JuliaEnv', ([], {'env_id': 'env_id', 'env_params': 'env_params', 'using': '"""AutoEnvs"""'}), "(env_id=env_id, env_params=env_params, using='AutoEnvs')\n", (5761, 5817), False, 'from julia_env.julia_env import JuliaEnv\n'), ((6354, 6487), 'hgail.misc.datasets.CriticDataset', 'CriticDataset', (['data'], {'replay_memory': 'critic_replay_memory', 'batch_size': 'args.critic_batch_size', 'flat_recurrent': 'args.policy_recurrent'}), '(data, replay_memory=critic_replay_memory, batch_size=args.\n critic_batch_size, flat_recurrent=args.policy_recurrent)\n', (6367, 6487), False, 'from hgail.misc.datasets import CriticDataset, RecognitionDataset\n'), ((6543, 6681), 'hgail.core.models.ObservationActionMLP', 'ObservationActionMLP', ([], {'name': '"""critic"""', 'hidden_layer_dims': 'args.critic_hidden_layer_dims', 'dropout_keep_prob': 'args.critic_dropout_keep_prob'}), "(name='critic', hidden_layer_dims=args.\n critic_hidden_layer_dims, dropout_keep_prob=args.critic_dropout_keep_prob)\n", (6563, 6681), False, 'from hgail.core.models import ObservationActionMLP\n'), ((9986, 10024), 'hgail.baselines.gaussian_mlp_baseline.GaussianMLPBaseline', 'GaussianMLPBaseline', ([], {'env_spec': 'env.spec'}), '(env_spec=env.spec)\n', (10005, 10024), False, 'from hgail.baselines.gaussian_mlp_baseline import GaussianMLPBaseline\n'), ((13741, 13776), 'os.path.join', 'os.path.join', (['exp_dir', 'phase', '"""log"""'], {}), "(exp_dir, phase, 'log')\n", (13753, 13776), False, 'import os, pdb\n'), ((14074, 14106), 'os.path.join', 'os.path.join', (['exp_home', 'exp_name'], {}), '(exp_home, exp_name)\n', (14086, 14106), False, 'import os, pdb\n'), ((14148, 14176), 'os.path.join', 'os.path.join', (['exp_dir', 'phase'], {}), '(exp_dir, phase)\n', (14160, 14176), False, 'import os, pdb\n'), ((14218, 14248), 'os.path.join', 'os.path.join', (['phase_dir', '"""log"""'], {}), "(phase_dir, 'log')\n", (14230, 14248), False, 'import os, pdb\n'), ((14278, 14310), 'rllab.misc.logger.set_snapshot_dir', 'logger.set_snapshot_dir', (['log_dir'], {}), '(log_dir)\n', (14301, 14310), True, 'import rllab.misc.logger as logger\n'), ((14315, 14346), 'rllab.misc.logger.set_snapshot_mode', 'logger.set_snapshot_mode', (['"""gap"""'], {}), "('gap')\n", (14339, 14346), True, 'import rllab.misc.logger as logger\n'), ((14351, 14388), 'rllab.misc.logger.set_snapshot_gap', 'logger.set_snapshot_gap', (['snapshot_gap'], {}), '(snapshot_gap)\n', (14374, 14388), True, 'import rllab.misc.logger as logger\n'), ((14408, 14440), 'os.path.join', 'os.path.join', (['log_dir', '"""log.txt"""'], {}), "(log_dir, 'log.txt')\n", (14420, 14440), False, 'import os, pdb\n'), ((14445, 14481), 'rllab.misc.logger.add_text_output', 'logger.add_text_output', (['log_filepath'], {}), '(log_filepath)\n', (14467, 14481), True, 'import rllab.misc.logger as logger\n'), ((14818, 14835), 'numpy.array', 'np.array', (['lengths'], {}), '(lengths)\n', (14826, 14835), True, 'import numpy as np\n'), ((14892, 14925), 'numpy.mean', 'np.mean', (['x'], {'axis': '(0)', 'keepdims': '(True)'}), '(x, axis=0, keepdims=True)\n', (14899, 14925), True, 'import numpy as np\n'), ((15069, 15087), 'numpy.clip', 'np.clip', (['x', 'lb', 'up'], {}), '(x, lb, up)\n', (15076, 15087), True, 'import numpy as np\n'), ((15174, 15187), 'numpy.array', 'np.array', (['low'], {}), '(low)\n', (15182, 15187), True, 'import numpy as np\n'), ((15199, 15213), 'numpy.array', 'np.array', (['high'], {}), '(high)\n', (15207, 15213), True, 'import numpy as np\n'), ((15318, 15335), 'numpy.clip', 'np.clip', (['x', '(-1)', '(1)'], {}), '(x, -1, 1)\n', (15325, 15335), True, 'import numpy as np\n'), ((15430, 15454), 'h5py.File', 'h5py.File', (['filepath', '"""r"""'], {}), "(filepath, 'r')\n", (15439, 15454), False, 'import h5py\n'), ((15802, 15820), 'numpy.concatenate', 'np.concatenate', (['xs'], {}), '(xs)\n', (15816, 15820), True, 'import numpy as np\n'), ((16959, 16977), 'numpy.concatenate', 'np.concatenate', (['xs'], {}), '(xs)\n', (16973, 16977), True, 'import numpy as np\n'), ((2034, 2057), 'os.path.exists', 'os.path.exists', (['dirpath'], {}), '(dirpath)\n', (2048, 2057), False, 'import os, pdb\n'), ((2067, 2084), 'os.mkdir', 'os.mkdir', (['dirpath'], {}), '(dirpath)\n', (2075, 2084), False, 'import os, pdb\n'), ((2444, 2461), 'numpy.load', 'np.load', (['filepath'], {}), '(filepath)\n', (2451, 2461), True, 'import numpy as np\n'), ((3171, 3198), 'os.path.join', 'os.path.join', (['directory', 'fn'], {}), '(directory, fn)\n', (3183, 3198), False, 'import os, pdb\n'), ((4421, 4463), 'os.path.join', 'os.path.join', (['basedir', 'args.ngsim_filename'], {}), '(basedir, args.ngsim_filename)\n', (4433, 4463), False, 'import os, pdb\n'), ((9055, 9128), 'hgail.misc.datasets.RecognitionDataset', 'RecognitionDataset', (['args.batch_size'], {'flat_recurrent': 'args.policy_recurrent'}), '(args.batch_size, flat_recurrent=args.policy_recurrent)\n', (9073, 9128), False, 'from hgail.misc.datasets import CriticDataset, RecognitionDataset\n'), ((9193, 9314), 'hgail.core.models.ObservationActionMLP', 'ObservationActionMLP', ([], {'name': '"""recog"""', 'hidden_layer_dims': 'args.recognition_hidden_layer_dims', 'output_dim': 'args.latent_dim'}), "(name='recog', hidden_layer_dims=args.\n recognition_hidden_layer_dims, output_dim=args.latent_dim)\n", (9213, 9314), False, 'from hgail.core.models import ObservationActionMLP\n'), ((14570, 14583), 'numpy.array', 'np.array', (['arr'], {}), '(arr)\n', (14578, 14583), True, 'import numpy as np\n'), ((14953, 14985), 'numpy.std', 'np.std', (['x'], {'axis': '(0)', 'keepdims': '(True)'}), '(x, axis=0, keepdims=True)\n', (14959, 14985), True, 'import numpy as np\n'), ((6962, 7014), 'tensorflow.train.RMSPropOptimizer', 'tf.train.RMSPropOptimizer', (['args.critic_learning_rate'], {}), '(args.critic_learning_rate)\n', (6987, 7014), True, 'import tensorflow as tf\n'), ((7604, 7737), 'hgail.policies.gaussian_latent_var_gru_policy.GaussianLatentVarGRUPolicy', 'GaussianLatentVarGRUPolicy', ([], {'name': '"""policy"""', 'latent_sampler': 'latent_sampler', 'env_spec': 'env.spec', 'hidden_dim': 'args.recurrent_hidden_dim'}), "(name='policy', latent_sampler=latent_sampler,\n env_spec=env.spec, hidden_dim=args.recurrent_hidden_dim)\n", (7630, 7737), False, 'from hgail.policies.gaussian_latent_var_gru_policy import GaussianLatentVarGRUPolicy\n'), ((7896, 8096), 'hgail.policies.gaussian_latent_var_mlp_policy.GaussianLatentVarMLPPolicy', 'GaussianLatentVarMLPPolicy', ([], {'name': '"""policy"""', 'latent_sampler': 'latent_sampler', 'env_spec': 'env.spec', 'hidden_sizes': 'args.policy_mean_hidden_layer_dims', 'std_hidden_sizes': 'args.policy_std_hidden_layer_dims'}), "(name='policy', latent_sampler=latent_sampler,\n env_spec=env.spec, hidden_sizes=args.policy_mean_hidden_layer_dims,\n std_hidden_sizes=args.policy_std_hidden_layer_dims)\n", (7922, 8096), False, 'from hgail.policies.gaussian_latent_var_mlp_policy import GaussianLatentVarMLPPolicy\n'), ((8287, 8423), 'sandbox.rocky.tf.policies.gaussian_gru_policy.GaussianGRUPolicy', 'GaussianGRUPolicy', ([], {'name': '"""policy"""', 'env_spec': 'env.spec', 'hidden_dim': 'args.recurrent_hidden_dim', 'output_nonlinearity': 'None', 'learn_std': '(True)'}), "(name='policy', env_spec=env.spec, hidden_dim=args.\n recurrent_hidden_dim, output_nonlinearity=None, learn_std=True)\n", (8304, 8423), False, 'from sandbox.rocky.tf.policies.gaussian_gru_policy import GaussianGRUPolicy\n'), ((8587, 8815), 'sandbox.rocky.tf.policies.gaussian_mlp_policy.GaussianMLPPolicy', 'GaussianMLPPolicy', ([], {'name': '"""policy"""', 'env_spec': 'env.spec', 'hidden_sizes': 'args.policy_mean_hidden_layer_dims', 'std_hidden_sizes': 'args.policy_std_hidden_layer_dims', 'adaptive_std': '(True)', 'output_nonlinearity': 'None', 'learn_std': '(True)'}), "(name='policy', env_spec=env.spec, hidden_sizes=args.\n policy_mean_hidden_layer_dims, std_hidden_sizes=args.\n policy_std_hidden_layer_dims, adaptive_std=True, output_nonlinearity=\n None, learn_std=True)\n", (8604, 8815), False, 'from sandbox.rocky.tf.policies.gaussian_mlp_policy import GaussianMLPPolicy\n'), ((10809, 10848), 'hgail.policies.scheduling.ConstantIntervalScheduler', 'ConstantIntervalScheduler', ([], {'k': 'args.env_H'}), '(k=args.env_H)\n', (10834, 10848), False, 'from hgail.policies.scheduling import ConstantIntervalScheduler\n'), ((13165, 13329), 'hgail.algos.hgail_impl.Level', 'Level', ([], {'depth': 'level_idx', 'algo': 'algo', 'reward_handler': 'reward_handler', 'recognition_model': 'recognition_model', 'start_itr': '(0)', 'end_itr': '(0 if level_idx == 0 else np.inf)'}), '(depth=level_idx, algo=algo, reward_handler=reward_handler,\n recognition_model=recognition_model, start_itr=0, end_itr=0 if \n level_idx == 0 else np.inf)\n', (13170, 13329), False, 'from hgail.algos.hgail_impl import Level\n'), ((14654, 14677), 'numpy.where', 'np.where', (['(sample == 0.0)'], {}), '(sample == 0.0)\n', (14662, 14677), True, 'import numpy as np\n'), ((4545, 4581), 'os.path.join', 'os.path.join', (['exp_dir', '"""imitate/viz"""'], {}), "(exp_dir, 'imitate/viz')\n", (4557, 4581), False, 'import os, pdb\n'), ((9688, 9742), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['args.recognition_learning_rate'], {}), '(args.recognition_learning_rate)\n', (9710, 9742), True, 'import tensorflow as tf\n'), ((11547, 11592), 'hgail.policies.scheduling.ConstantIntervalScheduler', 'ConstantIntervalScheduler', ([], {'k': 'args.scheduler_k'}), '(k=args.scheduler_k)\n', (11572, 11592), False, 'from hgail.policies.scheduling import ConstantIntervalScheduler\n'), ((11635, 11850), 'hgail.policies.categorical_latent_sampler.CategoricalLatentSampler', 'CategoricalLatentSampler', ([], {'scheduler': 'scheduler', 'name': '"""latent_sampler"""', 'policy_name': '"""latent_sampler_policy"""', 'dim': 'args.latent_dim', 'env_spec': 'level_env.spec', 'latent_sampler': 'latent_sampler', 'max_n_envs': 'args.n_envs'}), "(scheduler=scheduler, name='latent_sampler',\n policy_name='latent_sampler_policy', dim=args.latent_dim, env_spec=\n level_env.spec, latent_sampler=latent_sampler, max_n_envs=args.n_envs)\n", (11659, 11850), False, 'from hgail.policies.categorical_latent_sampler import CategoricalLatentSampler\n'), ((7413, 7458), 'hgail.policies.scheduling.ConstantIntervalScheduler', 'ConstantIntervalScheduler', ([], {'k': 'args.scheduler_k'}), '(k=args.scheduler_k)\n', (7438, 7458), False, 'from hgail.policies.scheduling import ConstantIntervalScheduler\n'), ((11098, 11123), 'sandbox.rocky.tf.spaces.discrete.Discrete', 'Discrete', (['args.latent_dim'], {}), '(args.latent_dim)\n', (11106, 11123), False, 'from sandbox.rocky.tf.spaces.discrete import Discrete\n')]
import argparse import os from glob import glob import imageio from tqdm import tqdm from csbdeep.utils import normalize from stardist.models import StarDist3D def get_image_files(root, image_folder, ext): # get the image and label mask paths and validate them image_pattern = os.path.join(root, image_folder, f'*{ext}') print("Looking for images with the pattern", image_pattern) images = glob(image_pattern) assert len(images) > 0, "Did not find any images" images.sort() return images # could be done more efficiently, see # https://github.com/hci-unihd/batchlib/blob/master/batchlib/segmentation/stardist_prediction.py def run_prediction(image_files, model_path, root, prediction_folder): # load the model model_root, model_name = os.path.split(model_path.rstrip('/')) model = StarDist3D(None, name=model_name, basedir=model_root) res_folder = os.path.join(root, prediction_folder) os.makedirs(res_folder, exist_ok=True) # normalization parameters: lower and upper percentile used for image normalization # maybe these should be exposed lower_percentile = 1 upper_percentile = 99.8 ax_norm = (0, 1, 2) for im_file in tqdm(image_files, desc="run stardist prediction"): im = imageio.volread(im_file) im = normalize(im, lower_percentile, upper_percentile, axis=ax_norm) pred, _ = model.predict_instances(im) im_name = os.path.split(im_file)[1] save_path = os.path.join(res_folder, im_name) imageio.imsave(save_path, pred) def predict_stardist(root, model_path, image_folder, prediction_folder, ext): print("Loading images") image_files = get_image_files(root, image_folder, ext) print("Found", len(image_files), "images for prediction") print("Start prediction ...") run_prediction(image_files, model_path, root, prediction_folder) print("Finished prediction") def main(): parser = argparse.ArgumentParser(description="Predict new images with a stardist model") parser.add_argument('root', type=str, help="Root folder with image data.") parser.add_argument('model_path', type=str, help="Where the model is saved.") parser.add_argument('--image_folder', type=str, default='images', help="Name of the folder with the training images, default: images.") parser.add_argument('--prediction_folder', type=str, default='predictions', help="Name of the folder where the predictions should be stored, default: predictions.") parser.add_argument('--ext', type=str, default='.tif', help="Image file extension, default: .tif") args = parser.parse_args() predict_stardist(args.root, args.model_path, args.image_folder, args.prediction_folder, args.ext) if __name__ == '__main__': main()
[ "os.makedirs", "argparse.ArgumentParser", "imageio.imsave", "tqdm.tqdm", "os.path.join", "csbdeep.utils.normalize", "os.path.split", "imageio.volread", "stardist.models.StarDist3D", "glob.glob" ]
[((289, 332), 'os.path.join', 'os.path.join', (['root', 'image_folder', 'f"""*{ext}"""'], {}), "(root, image_folder, f'*{ext}')\n", (301, 332), False, 'import os\n'), ((410, 429), 'glob.glob', 'glob', (['image_pattern'], {}), '(image_pattern)\n', (414, 429), False, 'from glob import glob\n'), ((829, 882), 'stardist.models.StarDist3D', 'StarDist3D', (['None'], {'name': 'model_name', 'basedir': 'model_root'}), '(None, name=model_name, basedir=model_root)\n', (839, 882), False, 'from stardist.models import StarDist3D\n'), ((901, 938), 'os.path.join', 'os.path.join', (['root', 'prediction_folder'], {}), '(root, prediction_folder)\n', (913, 938), False, 'import os\n'), ((943, 981), 'os.makedirs', 'os.makedirs', (['res_folder'], {'exist_ok': '(True)'}), '(res_folder, exist_ok=True)\n', (954, 981), False, 'import os\n'), ((1204, 1253), 'tqdm.tqdm', 'tqdm', (['image_files'], {'desc': '"""run stardist prediction"""'}), "(image_files, desc='run stardist prediction')\n", (1208, 1253), False, 'from tqdm import tqdm\n'), ((1948, 2027), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Predict new images with a stardist model"""'}), "(description='Predict new images with a stardist model')\n", (1971, 2027), False, 'import argparse\n'), ((1268, 1292), 'imageio.volread', 'imageio.volread', (['im_file'], {}), '(im_file)\n', (1283, 1292), False, 'import imageio\n'), ((1306, 1369), 'csbdeep.utils.normalize', 'normalize', (['im', 'lower_percentile', 'upper_percentile'], {'axis': 'ax_norm'}), '(im, lower_percentile, upper_percentile, axis=ax_norm)\n', (1315, 1369), False, 'from csbdeep.utils import normalize\n'), ((1481, 1514), 'os.path.join', 'os.path.join', (['res_folder', 'im_name'], {}), '(res_folder, im_name)\n', (1493, 1514), False, 'import os\n'), ((1523, 1554), 'imageio.imsave', 'imageio.imsave', (['save_path', 'pred'], {}), '(save_path, pred)\n', (1537, 1554), False, 'import imageio\n'), ((1435, 1457), 'os.path.split', 'os.path.split', (['im_file'], {}), '(im_file)\n', (1448, 1457), False, 'import os\n')]
import os, sys sys.path.append(os.path.dirname(os.path.abspath(__file__))) import json import logging import yaml import requests import time from actions.migrate_job_action import MigrateJobAction from actions.send_alert_action import SendAlertAction from actions.reboot_node_action import RebootNodeAction from actions.uncordon_action import UncordonAction from datetime import datetime, timedelta, timezone from rules_abc import Rule from utils import prometheus_util, k8s_util from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText activity_log = logging.getLogger('activity') def _extract_node_boot_time_info(response): node_boot_times = {} if response is not None and "data" in response: if "result" in response["data"]: for m in response["data"]["result"]: instance = m["metric"]["instance"].split(":")[0] boot_datetime = datetime.utcfromtimestamp(float(m["value"][1])) node_boot_times[instance] = boot_datetime return node_boot_times def _create_email_for_pause_resume_job(job_id, node_names, job_link, job_owner_email): message = MIMEMultipart() message['Subject'] = f'Repair Manager Alert [{job_id} paused/resumed]' message['To'] = job_owner_email body = f'''<p>As previously notified, the following node(s) require reboot due to uncorrectable ECC error:</p> <table border="1">''' for node in node_names: body += f'''<tr><td>{node}</td></tr>''' body += f'''</table><p> <p> Job <a href="{job_link}">{job_id}</a> has been paused/resumed so node(s) can be repaired.</p>''' message.attach(MIMEText(body, 'html')) return message class EccRebootNodeRule(Rule): def __init__(self, alert, config): self.rule = 'ecc_rule' self.alert = alert self.config = config self.ecc_config = self.load_ecc_config() self.etcd_config = self.load_etcd_config() self.all_jobs_indexed_by_node = {} self.nodes_ready_for_action = set() self.jobs_ready_for_migration = {} def load_ecc_config(self): with open('/etc/RepairManager/config/ecc-config.yaml', 'r') as file: return yaml.safe_load(file) def load_etcd_config(self): with open('/etc/RepairManager/config/etcd.conf.yaml', 'r') as file: return yaml.safe_load(file) def check_for_rebooted_nodes_and_uncordon(self, dry_run): # if node has been rebooted since ecc error initially detected, # uncordon, remove from rule_cache, and mark as resolved url = f"http://{self.config['prometheus']['ip']}:{self.config['prometheus']['port']}" query = self.config['prometheus']['node_boot_time_query'] reboot_times_url = prometheus_util.format_url_query(url, query) uncordon_action = UncordonAction() try: response = requests.get(reboot_times_url, timeout=10) if response: reboot_data = response.json() reboot_times = _extract_node_boot_time_info(reboot_data) bad_nodes = self.alert.get_rule_cache_keys(self.rule) for node in bad_nodes: instance = self.alert.get_rule_cache(self.rule, node)["instance"] time_found_string = self.alert.get_rule_cache(self.rule, node)["time_found"] time_found_datetime = datetime.strptime(time_found_string, self.config['date_time_format']) last_reboot_time = reboot_times[instance] if last_reboot_time > time_found_datetime: uncordon_action.execute(node, dry_run) self.alert.remove_from_rule_cache(self.rule, node) activity_log.info({"action":"marked as resolved from incorrectable ecc error","node":node}) except: logging.exception(f'Error checking if nodes have rebooted') def check_for_nodes_with_no_jobs(self): # if no jobs are running on node, take action on node bad_nodes = self.alert.get_rule_cache_keys(self.rule) self.all_jobs_indexed_by_node = k8s_util.get_job_info_indexed_by_node( nodes=bad_nodes, portal_url=self.config['portal_url'], cluster_name=self.config['cluster_name']) for node in bad_nodes: node_has_no_jobs = node not in self.all_jobs_indexed_by_node node_reboot_pending = 'reboot_requested' in self.alert.get_rule_cache(self.rule, node) if node_has_no_jobs and not node_reboot_pending: logging.debug(f'node {node} has no running jobs') self.nodes_ready_for_action.add(node) def check_if_nodes_are_due_for_reboot(self): # if configured time has elapsed since initial detection, take action on node bad_nodes = self.alert.get_rule_cache_keys(self.rule) for node in bad_nodes: time_found_string = self.alert.rule_cache[self.rule][node]["time_found"] time_found_datetime = datetime.strptime(time_found_string, self.config['date_time_format']) delta = timedelta(days=self.ecc_config.get("days_until_node_reboot", 5)) now = datetime.utcnow() node_reboot_pending = 'reboot_requested' in self.alert.get_rule_cache(self.rule, node) if now - time_found_datetime > delta and not node_reboot_pending: logging.debug(f'Configured time has passed for node {node}') self.nodes_ready_for_action.add(node) self.determine_jobs_to_be_migrated(node) def determine_jobs_to_be_migrated(self, node): if node in self.all_jobs_indexed_by_node: jobs_on_node = self.all_jobs_indexed_by_node[node] for job in jobs_on_node: job_id = job["job_id"] if job_id not in self.jobs_ready_for_migration: self.jobs_ready_for_migration[job_id] = { "user_name": job["user_name"], "vc_name": job["vc_name"], "node_names": [node], "job_link": job["job_link"] } else: self.jobs_ready_for_migration[job_id]["node_names"].append(node) def migrate_jobs_and_alert_job_owners(self, dry_run): alert_action = SendAlertAction(self.alert) max_attempts = self.ecc_config.get("attempts_for_pause_resume_jobs", 5) wait_time = self.ecc_config.get("time_sleep_after_pausing", 30) for job_id in self.jobs_ready_for_migration: job = self.jobs_ready_for_migration[job_id] job_owner = job['user_name'] job_owner_email = f"{job_owner}@{self.config['job_owner_email_domain']}" node_names = job["node_names"] job_link = job['job_link'] rest_url = self.config["rest_url"] # migrate all jobs migrate_job = MigrateJobAction(rest_url, max_attempts) success = migrate_job.execute( job_id=job_id, job_owner_email=job_owner_email, wait_time=wait_time, dry_run=dry_run) # alert job owners if success: message = _create_email_for_pause_resume_job(job_id, node_names, job_link, job_owner_email) alert_dry_run = dry_run or not self.ecc_config['enable_alert_job_owners'] alert_action.execute( message=message, dry_run=alert_dry_run, additional_log={"job_id":job_id,"job_owner":job_owner}) else: logging.warning(f"Could not pause/resume the following job: {job_id}") # skip rebooting the node this iteration # and try again later for node in node_names: self.nodes_ready_for_action.remove(node) def reboot_bad_nodes(self, dry_run): reboot_action = RebootNodeAction() for node in self.nodes_ready_for_action: success = reboot_action.execute(node, self.etcd_config, dry_run) if success: # update reboot status so action is not taken again cache_value = self.alert.get_rule_cache(self.rule, node) cache_value['reboot_requested'] = datetime.utcnow().strftime(self.config['date_time_format']) self.alert.update_rule_cache(self.rule, node, cache_value) def check_status(self): dry_run = not self.ecc_config["enable_reboot"] self.check_for_rebooted_nodes_and_uncordon(dry_run) self.check_for_nodes_with_no_jobs() self.check_if_nodes_are_due_for_reboot() return len(self.nodes_ready_for_action) > 0 def take_action(self): dry_run = not self.ecc_config["enable_reboot"] self.migrate_jobs_and_alert_job_owners(dry_run) self.reboot_bad_nodes(dry_run)
[ "logging.getLogger", "logging.debug", "actions.reboot_node_action.RebootNodeAction", "actions.send_alert_action.SendAlertAction", "datetime.datetime.strptime", "datetime.datetime.utcnow", "logging.warning", "utils.prometheus_util.format_url_query", "requests.get", "utils.k8s_util.get_job_info_indexed_by_node", "logging.exception", "yaml.safe_load", "email.mime.multipart.MIMEMultipart", "os.path.abspath", "actions.migrate_job_action.MigrateJobAction", "actions.uncordon_action.UncordonAction", "email.mime.text.MIMEText" ]
[((582, 611), 'logging.getLogger', 'logging.getLogger', (['"""activity"""'], {}), "('activity')\n", (599, 611), False, 'import logging\n'), ((1163, 1178), 'email.mime.multipart.MIMEMultipart', 'MIMEMultipart', ([], {}), '()\n', (1176, 1178), False, 'from email.mime.multipart import MIMEMultipart\n'), ((47, 72), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (62, 72), False, 'import os, sys\n'), ((1659, 1681), 'email.mime.text.MIMEText', 'MIMEText', (['body', '"""html"""'], {}), "(body, 'html')\n", (1667, 1681), False, 'from email.mime.text import MIMEText\n'), ((2777, 2821), 'utils.prometheus_util.format_url_query', 'prometheus_util.format_url_query', (['url', 'query'], {}), '(url, query)\n', (2809, 2821), False, 'from utils import prometheus_util, k8s_util\n'), ((2848, 2864), 'actions.uncordon_action.UncordonAction', 'UncordonAction', ([], {}), '()\n', (2862, 2864), False, 'from actions.uncordon_action import UncordonAction\n'), ((4129, 4268), 'utils.k8s_util.get_job_info_indexed_by_node', 'k8s_util.get_job_info_indexed_by_node', ([], {'nodes': 'bad_nodes', 'portal_url': "self.config['portal_url']", 'cluster_name': "self.config['cluster_name']"}), "(nodes=bad_nodes, portal_url=self.\n config['portal_url'], cluster_name=self.config['cluster_name'])\n", (4166, 4268), False, 'from utils import prometheus_util, k8s_util\n'), ((6368, 6395), 'actions.send_alert_action.SendAlertAction', 'SendAlertAction', (['self.alert'], {}), '(self.alert)\n', (6383, 6395), False, 'from actions.send_alert_action import SendAlertAction\n'), ((8021, 8039), 'actions.reboot_node_action.RebootNodeAction', 'RebootNodeAction', ([], {}), '()\n', (8037, 8039), False, 'from actions.reboot_node_action import RebootNodeAction\n'), ((2220, 2240), 'yaml.safe_load', 'yaml.safe_load', (['file'], {}), '(file)\n', (2234, 2240), False, 'import yaml\n'), ((2369, 2389), 'yaml.safe_load', 'yaml.safe_load', (['file'], {}), '(file)\n', (2383, 2389), False, 'import yaml\n'), ((2902, 2944), 'requests.get', 'requests.get', (['reboot_times_url'], {'timeout': '(10)'}), '(reboot_times_url, timeout=10)\n', (2914, 2944), False, 'import requests\n'), ((5030, 5099), 'datetime.datetime.strptime', 'datetime.strptime', (['time_found_string', "self.config['date_time_format']"], {}), "(time_found_string, self.config['date_time_format'])\n", (5047, 5099), False, 'from datetime import datetime, timedelta, timezone\n'), ((5203, 5220), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (5218, 5220), False, 'from datetime import datetime, timedelta, timezone\n'), ((6971, 7011), 'actions.migrate_job_action.MigrateJobAction', 'MigrateJobAction', (['rest_url', 'max_attempts'], {}), '(rest_url, max_attempts)\n', (6987, 7011), False, 'from actions.migrate_job_action import MigrateJobAction\n'), ((3403, 3472), 'datetime.datetime.strptime', 'datetime.strptime', (['time_found_string', "self.config['date_time_format']"], {}), "(time_found_string, self.config['date_time_format'])\n", (3420, 3472), False, 'from datetime import datetime, timedelta, timezone\n'), ((3860, 3919), 'logging.exception', 'logging.exception', (['f"""Error checking if nodes have rebooted"""'], {}), "(f'Error checking if nodes have rebooted')\n", (3877, 3919), False, 'import logging\n'), ((4578, 4627), 'logging.debug', 'logging.debug', (['f"""node {node} has no running jobs"""'], {}), "(f'node {node} has no running jobs')\n", (4591, 4627), False, 'import logging\n'), ((5414, 5474), 'logging.debug', 'logging.debug', (['f"""Configured time has passed for node {node}"""'], {}), "(f'Configured time has passed for node {node}')\n", (5427, 5474), False, 'import logging\n'), ((7688, 7758), 'logging.warning', 'logging.warning', (['f"""Could not pause/resume the following job: {job_id}"""'], {}), "(f'Could not pause/resume the following job: {job_id}')\n", (7703, 7758), False, 'import logging\n'), ((8380, 8397), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (8395, 8397), False, 'from datetime import datetime, timedelta, timezone\n')]
from __future__ import print_function, unicode_literals, absolute_import, division from six.moves import range, zip, map, reduce, filter from keras.layers import Input, Conv2D, Conv3D, Activation, Lambda from keras.models import Model from keras.layers.merge import Add, Concatenate import tensorflow as tf from keras import backend as K from .blocks import unet_block, unet_blocks, gaussian_2d import re from ..utils import _raise, backend_channels_last import numpy as np def custom_unet(input_shape, last_activation, n_depth=2, n_filter_base=16, kernel_size=(3,3,3), n_conv_per_depth=2, activation="relu", batch_norm=False, dropout=0.0, pool_size=(2,2,2), n_channel_out=1, residual=False, prob_out=False, long_skip=True, eps_scale=1e-3): """ TODO """ if last_activation is None: raise ValueError("last activation has to be given (e.g. 'sigmoid', 'relu')!") all((s % 2 == 1 for s in kernel_size)) or _raise(ValueError('kernel size should be odd in all dimensions.')) channel_axis = -1 if backend_channels_last() else 1 n_dim = len(kernel_size) # TODO: rewrite with conv_block conv = Conv2D if n_dim == 2 else Conv3D input = Input(input_shape, name="input") unet = unet_block(n_depth, n_filter_base, kernel_size, input_planes=input_shape[-1], activation=activation, dropout=dropout, batch_norm=batch_norm, n_conv_per_depth=n_conv_per_depth, pool=pool_size, long_skip=long_skip)(input) final = conv(n_channel_out, (1,)*n_dim, activation='linear')(unet) if residual: if not (n_channel_out == input_shape[-1] if backend_channels_last() else n_channel_out == input_shape[0]): raise ValueError("number of input and output channels must be the same for a residual net.") final = Add()([final, input]) final = Activation(activation=last_activation)(final) if prob_out: scale = conv(n_channel_out, (1,)*n_dim, activation='softplus')(unet) scale = Lambda(lambda x: x+np.float32(eps_scale))(scale) final = Concatenate(axis=channel_axis)([final, scale]) return Model(inputs=input, outputs=final) def uxnet(input_shape, n_depth=2, n_filter_base=16, kernel_size=(3, 3), n_conv_per_depth=2, activation="relu", last_activation='linear', batch_norm=False, dropout=0.0, pool_size=(2, 2), residual=True, odd_to_even=False, shortcut=None, shared_idx=[], prob_out=False, eps_scale=1e-3): """ Multi-body U-Net which learns identity by leaving one plane out in each branch :param input_shape: :param n_depth: :param n_filter_base: :param kernel_size: :param n_conv_per_depth: :param activation: :param last_activation: :param batch_norm: :param dropout: :param pool_size: :param prob_out: :param eps_scale: :return: Model """ # TODO: fill params # TODO: add odd-to-even mode # Define vars channel_axis = -1 if backend_channels_last() else 1 n_planes = input_shape[channel_axis] if n_planes % 2 != 0 and odd_to_even: raise ValueError('Odd-to-even mode does not support uneven number of planes') n_dim = len(kernel_size) conv = Conv2D if n_dim == 2 else Conv3D # Define functional model input = Input(shape=input_shape, name='input_main') # TODO test new implementation and remove old # Split planes (preserve channel) input_x = [Lambda(lambda x: x[..., i:i+1], output_shape=(None, None, 1))(input) for i in range(n_planes)] # We can train either in odd-to-even mode or in LOO mode if odd_to_even: # In this mode we stack together odd and even planes, train the net to predict even from odd and vice versa # input_x_out = [Concatenate(axis=-1)(input_x[j::2]) for j in range(2)] input_x_out = [Concatenate(axis=-1)(input_x[j::2]) for j in range(1, -1, -1)] else: # Concatenate planes back in leave-one-out way input_x_out = [Concatenate(axis=-1)([plane for i, plane in enumerate(input_x) if i != j]) for j in range(n_planes)] # if odd_to_even: # input_x_out = [Lambda(lambda x: x[..., j::2], # output_shape=(None, None, n_planes // 2), # name='{}_planes'.format('even' if j == 0 else 'odd'))(input) # for j in range(1, -1, -1)] # else: # # input_x_out = [Lambda(lambda x: x[..., tf.convert_to_tensor([i for i in range(n_planes) if i != j], dtype=tf.int32)], # # output_shape=(None, None, n_planes-1), # # name='leave_{}_plane_out'.format(j))(input) # # for j in range(n_planes)] # # input_x_out = [Lambda(lambda x: K.concatenate([x[..., :j], x[..., (j+1):]], axis=-1), # output_shape=(None, None, n_planes - 1), # name='leave_{}_plane_out'.format(j))(input) # for j in range(n_planes)] # U-Net parameters depend on mode (odd-to-even or LOO) n_blocks = 2 if odd_to_even else n_planes input_planes = n_planes // 2 if odd_to_even else n_planes-1 output_planes = n_planes // 2 if odd_to_even else 1 # Create U-Net blocks (by number of planes) unet_x = unet_blocks(n_blocks=n_blocks, input_planes=input_planes, output_planes=output_planes, n_depth=n_depth, n_filter_base=n_filter_base, kernel_size=kernel_size, activation=activation, dropout=dropout, batch_norm=batch_norm, n_conv_per_depth=n_conv_per_depth, pool=pool_size, shared_idx=shared_idx) unet_x = [unet(inp_out) for unet, inp_out in zip(unet_x, input_x_out)] # Version without weight sharing: # unet_x = [unet_block(n_depth, n_filter_base, kernel_size, # activation=activation, dropout=dropout, batch_norm=batch_norm, # n_conv_per_depth=n_conv_per_depth, pool=pool_size, # prefix='out_{}_'.format(i))(inp_out) for i, inp_out in enumerate(input_x_out)] # TODO: rewritten for sharing -- remove commented below # Convolve n_filter_base to 1 as each U-Net predicts a single plane # unet_x = [conv(1, (1,) * n_dim, activation=activation)(unet) for unet in unet_x] if residual: if odd_to_even: # For residual U-Net sum up output for odd planes with even planes and vice versa unet_x = [Add()([unet, inp]) for unet, inp in zip(unet_x, input_x[::-1])] else: # For residual U-Net sum up output with its neighbor (next for the first plane, previous for the rest unet_x = [Add()([unet, inp]) for unet, inp in zip(unet_x, [input_x[1]]+input_x[:-1])] # Concatenate outputs of blocks, should receive (None, None, None, n_planes) # TODO assert to check shape? if odd_to_even: # Split even and odd, assemble them together in the correct order # TODO tests unet_even = [Lambda(lambda x: x[..., i:i+1], output_shape=(None, None, 1), name='even_{}'.format(i))(unet_x[0]) for i in range(n_planes // 2)] unet_odd = [Lambda(lambda x: x[..., i:i+1], output_shape=(None, None, 1), name='odd_{}'.format(i))(unet_x[1]) for i in range(n_planes // 2)] unet_x = list(np.array(list(zip(unet_even, unet_odd))).flatten()) unet = Concatenate(axis=-1)(unet_x) if shortcut is not None: # We can create a shortcut without long skip connection to prevent noise memorization if shortcut == 'unet': shortcut_block = unet_block(long_skip=False, input_planes=n_planes, n_depth=n_depth, n_filter_base=n_filter_base, kernel_size=kernel_size, activation=activation, dropout=dropout, batch_norm=batch_norm, n_conv_per_depth=n_conv_per_depth, pool=pool_size)(input) shortcut_block = conv(n_planes, (1,) * n_dim, activation='linear', name='shortcut_final_conv')(shortcut_block) # Or a simple gaussian blur block elif shortcut == 'gaussian': shortcut_block = gaussian_2d(n_planes, k=13, s=7)(input) else: raise ValueError('Shortcut should be either unet or gaussian') # TODO add or concatenate? unet = Add()([unet, shortcut_block]) # unet = Concatenate(axis=-1)([unet, shortcut_unet]) # Final activation layer final = Activation(activation=last_activation)(unet) if prob_out: scale = conv(n_planes, (1,)*n_dim, activation='softplus')(unet) scale = Lambda(lambda x: x+np.float32(eps_scale))(scale) final = Concatenate(axis=channel_axis)([final, scale]) return Model(inputs=input, outputs=final) def common_unet(n_dim=2, n_depth=1, kern_size=3, n_first=16, n_channel_out=1, residual=True, prob_out=False, long_skip=True, last_activation='linear'): """ Construct a common CARE neural net based on U-Net [1]_ and residual learning [2]_ to be used for image restoration/enhancement. Parameters ---------- n_dim : int number of image dimensions (2 or 3) n_depth : int number of resolution levels of U-Net architecture kern_size : int size of convolution filter in all image dimensions n_first : int number of convolution filters for first U-Net resolution level (value is doubled after each downsampling operation) n_channel_out : int number of channels of the predicted output image residual : bool if True, model will internally predict the residual w.r.t. the input (typically better) requires number of input and output image channels to be equal prob_out : bool standard regression (False) or probabilistic prediction (True) if True, model will predict two values for each input pixel (mean and positive scale value) last_activation : str name of activation function for the final output layer Returns ------- function Function to construct the network, which takes as argument the shape of the input image Example ------- >>> model = common_unet(2, 1,3,16, 1, True, False)(input_shape) References ---------- .. [1] <NAME>, <NAME>, <NAME>x, *U-Net: Convolutional Networks for Biomedical Image Segmentation*, MICCAI 2015 .. [2] <NAME>, <NAME>, <NAME>, <NAME>. *Deep Residual Learning for Image Recognition*, CVPR 2016 """ def _build_this(input_shape): return custom_unet(input_shape, last_activation, n_depth, n_first, (kern_size,)*n_dim, pool_size=(2,)*n_dim, n_channel_out=n_channel_out, residual=residual, prob_out=prob_out, long_skip=long_skip) return _build_this def common_uxnet(n_dim=2, n_depth=1, kern_size=3, n_first=16, residual=True, prob_out=False, last_activation='linear', shared_idx=[], odd_to_even=False, shortcut=None): def _build_this(input_shape): return uxnet(input_shape=input_shape, last_activation=last_activation, n_depth=n_depth, n_filter_base=n_first, kernel_size=(kern_size,)*n_dim, pool_size=(2,)*n_dim, residual=residual, prob_out=prob_out, shared_idx=shared_idx, odd_to_even=odd_to_even, shortcut=shortcut) return _build_this modelname = re.compile("^(?P<model>resunet|unet)(?P<n_dim>\d)(?P<prob_out>p)?_(?P<n_depth>\d+)_(?P<kern_size>\d+)_(?P<n_first>\d+)(_(?P<n_channel_out>\d+)out)?(_(?P<last_activation>.+)-last)?$") def common_unet_by_name(model): r"""Shorthand notation for equivalent use of :func:`common_unet`. Parameters ---------- model : str define model to be created via string, which is parsed as a regular expression: `^(?P<model>resunet|unet)(?P<n_dim>\d)(?P<prob_out>p)?_(?P<n_depth>\d+)_(?P<kern_size>\d+)_(?P<n_first>\d+)(_(?P<n_channel_out>\d+)out)?(_(?P<last_activation>.+)-last)?$` Returns ------- function Calls :func:`common_unet` with the respective parameters. Raises ------ ValueError If argument `model` is not a valid string according to the regular expression. Example ------- >>> model = common_unet_by_name('resunet2_1_3_16_1out')(input_shape) >>> # equivalent to: model = common_unet(2, 1,3,16, 1, True, False)(input_shape) Todo ---- Backslashes in docstring for regexp not rendered correctly. """ m = modelname.fullmatch(model) if m is None: raise ValueError("model name '%s' unknown, must follow pattern '%s'" % (model, modelname.pattern)) # from pprint import pprint # pprint(m.groupdict()) options = {k:int(m.group(k)) for k in ['n_depth','n_first','kern_size']} options['prob_out'] = m.group('prob_out') is not None options['residual'] = {'unet': False, 'resunet': True}[m.group('model')] options['n_dim'] = int(m.group('n_dim')) options['n_channel_out'] = 1 if m.group('n_channel_out') is None else int(m.group('n_channel_out')) if m.group('last_activation') is not None: options['last_activation'] = m.group('last_activation') return common_unet(**options) def receptive_field_unet(n_depth, kern_size, pool_size=2, n_dim=2, img_size=1024): """Receptive field for U-Net model (pre/post for each dimension).""" x = np.zeros((1,)+(img_size,)*n_dim+(1,)) mid = tuple([s//2 for s in x.shape[1:-1]]) x[(slice(None),) + mid + (slice(None),)] = 1 model = custom_unet ( x.shape[1:], n_depth=n_depth, kernel_size=[kern_size]*n_dim, pool_size=[pool_size]*n_dim, n_filter_base=8, activation='linear', last_activation='linear', ) y = model.predict(x)[0,...,0] y0 = model.predict(0*x)[0,...,0] ind = np.where(np.abs(y-y0)>0) return [(m-np.min(i), np.max(i)-m) for (m, i) in zip(mid, ind)]
[ "keras.layers.merge.Concatenate", "numpy.abs", "six.moves.range", "numpy.float32", "re.compile", "keras.layers.merge.Add", "keras.layers.Lambda", "numpy.max", "keras.layers.Input", "numpy.zeros", "keras.layers.Activation", "keras.models.Model", "numpy.min", "six.moves.zip" ]
[((11943, 12140), 're.compile', 're.compile', (['"""^(?P<model>resunet|unet)(?P<n_dim>\\\\d)(?P<prob_out>p)?_(?P<n_depth>\\\\d+)_(?P<kern_size>\\\\d+)_(?P<n_first>\\\\d+)(_(?P<n_channel_out>\\\\d+)out)?(_(?P<last_activation>.+)-last)?$"""'], {}), "(\n '^(?P<model>resunet|unet)(?P<n_dim>\\\\d)(?P<prob_out>p)?_(?P<n_depth>\\\\d+)_(?P<kern_size>\\\\d+)_(?P<n_first>\\\\d+)(_(?P<n_channel_out>\\\\d+)out)?(_(?P<last_activation>.+)-last)?$'\n )\n", (11953, 12140), False, 'import re\n'), ((1399, 1431), 'keras.layers.Input', 'Input', (['input_shape'], {'name': '"""input"""'}), "(input_shape, name='input')\n", (1404, 1431), False, 'from keras.layers import Input, Conv2D, Conv3D, Activation, Lambda\n'), ((2347, 2381), 'keras.models.Model', 'Model', ([], {'inputs': 'input', 'outputs': 'final'}), '(inputs=input, outputs=final)\n', (2352, 2381), False, 'from keras.models import Model\n'), ((3636, 3679), 'keras.layers.Input', 'Input', ([], {'shape': 'input_shape', 'name': '"""input_main"""'}), "(shape=input_shape, name='input_main')\n", (3641, 3679), False, 'from keras.layers import Input, Conv2D, Conv3D, Activation, Lambda\n'), ((9270, 9304), 'keras.models.Model', 'Model', ([], {'inputs': 'input', 'outputs': 'final'}), '(inputs=input, outputs=final)\n', (9275, 9304), False, 'from keras.models import Model\n'), ((13941, 13984), 'numpy.zeros', 'np.zeros', (['((1,) + (img_size,) * n_dim + (1,))'], {}), '((1,) + (img_size,) * n_dim + (1,))\n', (13949, 13984), True, 'import numpy as np\n'), ((2066, 2104), 'keras.layers.Activation', 'Activation', ([], {'activation': 'last_activation'}), '(activation=last_activation)\n', (2076, 2104), False, 'from keras.layers import Input, Conv2D, Conv3D, Activation, Lambda\n'), ((7874, 7894), 'keras.layers.merge.Concatenate', 'Concatenate', ([], {'axis': '(-1)'}), '(axis=-1)\n', (7885, 7894), False, 'from keras.layers.merge import Add, Concatenate\n'), ((8995, 9033), 'keras.layers.Activation', 'Activation', ([], {'activation': 'last_activation'}), '(activation=last_activation)\n', (9005, 9033), False, 'from keras.layers import Input, Conv2D, Conv3D, Activation, Lambda\n'), ((2032, 2037), 'keras.layers.merge.Add', 'Add', ([], {}), '()\n', (2035, 2037), False, 'from keras.layers.merge import Add, Concatenate\n'), ((2288, 2318), 'keras.layers.merge.Concatenate', 'Concatenate', ([], {'axis': 'channel_axis'}), '(axis=channel_axis)\n', (2299, 2318), False, 'from keras.layers.merge import Add, Concatenate\n'), ((3784, 3847), 'keras.layers.Lambda', 'Lambda', (['(lambda x: x[..., i:i + 1])'], {'output_shape': '(None, None, 1)'}), '(lambda x: x[..., i:i + 1], output_shape=(None, None, 1))\n', (3790, 3847), False, 'from keras.layers import Input, Conv2D, Conv3D, Activation, Lambda\n'), ((3862, 3877), 'six.moves.range', 'range', (['n_planes'], {}), '(n_planes)\n', (3867, 3877), False, 'from six.moves import range, zip, map, reduce, filter\n'), ((6074, 6098), 'six.moves.zip', 'zip', (['unet_x', 'input_x_out'], {}), '(unet_x, input_x_out)\n', (6077, 6098), False, 'from six.moves import range, zip, map, reduce, filter\n'), ((8860, 8865), 'keras.layers.merge.Add', 'Add', ([], {}), '()\n', (8863, 8865), False, 'from keras.layers.merge import Add, Concatenate\n'), ((9211, 9241), 'keras.layers.merge.Concatenate', 'Concatenate', ([], {'axis': 'channel_axis'}), '(axis=channel_axis)\n', (9222, 9241), False, 'from keras.layers.merge import Add, Concatenate\n'), ((14376, 14390), 'numpy.abs', 'np.abs', (['(y - y0)'], {}), '(y - y0)\n', (14382, 14390), True, 'import numpy as np\n'), ((14445, 14458), 'six.moves.zip', 'zip', (['mid', 'ind'], {}), '(mid, ind)\n', (14448, 14458), False, 'from six.moves import range, zip, map, reduce, filter\n'), ((4180, 4200), 'keras.layers.merge.Concatenate', 'Concatenate', ([], {'axis': '(-1)'}), '(axis=-1)\n', (4191, 4200), False, 'from keras.layers.merge import Add, Concatenate\n'), ((4225, 4241), 'six.moves.range', 'range', (['(1)', '(-1)', '(-1)'], {}), '(1, -1, -1)\n', (4230, 4241), False, 'from six.moves import range, zip, map, reduce, filter\n'), ((4331, 4351), 'keras.layers.merge.Concatenate', 'Concatenate', ([], {'axis': '(-1)'}), '(axis=-1)\n', (4342, 4351), False, 'from keras.layers.merge import Add, Concatenate\n'), ((4415, 4430), 'six.moves.range', 'range', (['n_planes'], {}), '(n_planes)\n', (4420, 4430), False, 'from six.moves import range, zip, map, reduce, filter\n'), ((7562, 7582), 'six.moves.range', 'range', (['(n_planes // 2)'], {}), '(n_planes // 2)\n', (7567, 7582), False, 'from six.moves import range, zip, map, reduce, filter\n'), ((7765, 7785), 'six.moves.range', 'range', (['(n_planes // 2)'], {}), '(n_planes // 2)\n', (7770, 7785), False, 'from six.moves import range, zip, map, reduce, filter\n'), ((14407, 14416), 'numpy.min', 'np.min', (['i'], {}), '(i)\n', (14413, 14416), True, 'import numpy as np\n'), ((14418, 14427), 'numpy.max', 'np.max', (['i'], {}), '(i)\n', (14424, 14427), True, 'import numpy as np\n'), ((6855, 6860), 'keras.layers.merge.Add', 'Add', ([], {}), '()\n', (6858, 6860), False, 'from keras.layers.merge import Add, Concatenate\n'), ((6891, 6917), 'six.moves.zip', 'zip', (['unet_x', 'input_x[::-1]'], {}), '(unet_x, input_x[::-1])\n', (6894, 6917), False, 'from six.moves import range, zip, map, reduce, filter\n'), ((7069, 7074), 'keras.layers.merge.Add', 'Add', ([], {}), '()\n', (7072, 7074), False, 'from keras.layers.merge import Add, Concatenate\n'), ((7105, 7145), 'six.moves.zip', 'zip', (['unet_x', '([input_x[1]] + input_x[:-1])'], {}), '(unet_x, [input_x[1]] + input_x[:-1])\n', (7108, 7145), False, 'from six.moves import range, zip, map, reduce, filter\n'), ((2242, 2263), 'numpy.float32', 'np.float32', (['eps_scale'], {}), '(eps_scale)\n', (2252, 2263), True, 'import numpy as np\n'), ((9165, 9186), 'numpy.float32', 'np.float32', (['eps_scale'], {}), '(eps_scale)\n', (9175, 9186), True, 'import numpy as np\n'), ((7824, 7848), 'six.moves.zip', 'zip', (['unet_even', 'unet_odd'], {}), '(unet_even, unet_odd)\n', (7827, 7848), False, 'from six.moves import range, zip, map, reduce, filter\n')]
from maxdb import DB def runtime_on_any_exception(func): def decorate(*args, **kwargs): try: func(*args, **kwargs) except: raise RuntimeError return decorate class CLIUtils(object): DEFAULT_PATH = 'storage.json' def __init__(self): self._db = None self._path = self.DEFAULT_PATH def run(self, rawcmd): cmd, *args = rawcmd.split(' ') if cmd: try: self._cmds_cache[cmd](args) except KeyError: print('Lab1 does not have command <{0}>'.format(cmd)) except RuntimeError: print('Incorrect arguments for DB.{0}: <{1}>'.format(cmd, args)) @property def _cmds_cache(self): return { 'tables': self._tables, 'all': self._all, 'insert': self._insert, 'get': self._get, 'update': self._update, 'delete': self._delete, 'help': lambda _: print(self._help_msg), 'path': lambda _: print(self._path), 'exit': self._close, } @property def _help_msg(self): return """LAB1 HELP: | tables | print list of tables from current storage. | all <table> (<table> ...) | display _all values from specific table. | all labcondition | display _all products with price more than 100UAH. | insert <table> <cnt> | insert N items to the table. | is followed by >>>column_name <value> | get <table> <id> | get single row specified by id from table. | update <table> <id> | udpate table with a new single value. | is followed by | >>>with <column> <value> (<column> <value> (...)) | delete <table> <id> | delete row specified by id from table. | save <filepath> | save database using current storage type to specified filepath. | load <filepath> | load specific database from file using current storage type. | help | display current message. | path | display storage file path. | exit | exit the program. """ def _tables(self, _): print(self._db.tables()) @runtime_on_any_exception def _all(self, args): if 'labcondition' == args[0]: found_rows = self._db.get( 'Products', column='price', cond=lambda p: int(p.value) > 100 ) print('Rows from DB.Products with price>100:') print('\n'.join(map(str, found_rows))) else: for table_name in args: table_rows = self._db.table(table_name).all_ids() table_pretty_rows = '\n'.join(map(lambda i: 'ID {0} {1}'.format(*i), table_rows)) print('DB.{0}:\n{1}'.format(table_name, table_pretty_rows)) @runtime_on_any_exception def _insert(self, args): table_name, cnt = args table_to_insert = self._db.table(table_name) for cur_cnt in range(int(cnt)): print('Please, enter values for DB.{0} row:'.format(table_name)) row_to_insert = {} for column_name, column_type in table_to_insert.columns.items(): if column_type == 'fk': print('Enter Table for FK: fktable=', end='') fktable = input() print('Enter Id for FK: fkid=', end='') fkid = input() row_to_insert[column_name] = ( {'table': fktable, 'fkid': fkid}, column_type ) else: print('Enter {0}, type={1}: {0}='.format(column_name, column_type), end='') column_value = input() row_to_insert[column_name] = (column_value, column_type) table_to_insert.insert(row_to_insert) @runtime_on_any_exception def _get(self, args): table_name, row_idx = args print('DB.{0} id={1}:'.format(*args)) print(self._db.get(table_name, doc_id=int(row_idx)) or 'Not Found DB.{0}.{1}'.format(*args)) @runtime_on_any_exception def _update(self, args): table_name, row_idx = args table_to_update = self._db.table(table_name) row_to_update = table_to_update.get(row_id=int(row_idx)) colval_to_update = {} print('Updating DB.{0}.{1}: {2}'.format(table_name, row_idx, row_to_update)) for column_name, column_type in table_to_update.columns.items(): if column_type == 'fk': current_fktable = row_to_update[column_name].table print('Change FKTable from <{0}> to value='.format(current_fktable), end='') after_fktable = input() current_fkid = row_to_update[column_name].fk_id print('Change FKId from <{0}> to value='.format(current_fkid), end='') after_fkid = input() colval_to_update[column_name] = { 'table': after_fktable, 'fkid': after_fkid } else: print('Enter value for column {0}, type={1}: {0}='.format(column_name, column_type), end='') column_value = input() colval_to_update[column_name] = column_value table_to_update.update(colval_to_update, [int(row_idx)]) @runtime_on_any_exception def _delete(self, args): table_name, row_id = args print('Deleted item DB.{0}.{1}'.format(*args)) print(self._db.delete(table_name, row_ids=[int(row_id)]) or 'Not Found DB.{0}.{1}'.format(*args)) def _open(self): """Create DB instance and preload default models.""" self._db = DB(self._path) products = self._db.table( 'Products', columns={'name': 'str', 'price': 'int'} ) orders = self._db.table( 'Orders', columns={'product': 'fk', 'client': 'str', 'destination': 'addr'} ) try: products.insert_multiple([ {"name": ("product1", "str"), "price": ("50", "int")}, {"name": ("product2", "str"), "price": ("100", "int")}, {"name": ("product3", "str"), "price": ("200", "int")}, ]) except: pass try: orders.insert_multiple([ { "product": ({'table': 'Products', 'fkid': '1'}, 'fk'), "client": ("honchar", "str"), "destination": ("Kyiv", "addr") }, { "product": ({'table': 'Products', 'fkid': '2'}, 'fk'), "client": ("honchar2", "str"), "destination": ("Kyiv2", "addr") }, { "product": ({'table': 'Products', 'fkid': '3'}, 'fk'), "client": ("honchar3", "str"), "destination": ("Kyiv3", "addr") }, ]) except: pass self.run('help', *()) def _close(self, _): """Close DB instance routine.""" self._db.close() def __enter__(self): self._open() return self def __exit__(self, exc_type, exc_val, exc_tb): self._close(None)
[ "maxdb.DB" ]
[((5976, 5990), 'maxdb.DB', 'DB', (['self._path'], {}), '(self._path)\n', (5978, 5990), False, 'from maxdb import DB\n')]
from cognibench.models import CNBModel from cognibench.capabilities import ContinuousAction, ContinuousObservation from cognibench.continuous import ContinuousSpace from cognibench.models.wrappers import MatlabWrapperMixin class PsPMModel(MatlabWrapperMixin, CNBModel, ContinuousAction, ContinuousObservation): name = "PsPM model" def __init__( self, *args, lib_paths, import_base_path, predict_fn, model_spec, **kwargs ): self.set_action_space(ContinuousSpace()) self.set_observation_space(ContinuousSpace()) def pred(matlab_sess, stimuli): stimuli_copy = dict(stimuli) stimuli_copy.update(model_spec) return matlab_sess.feval(predict_fn, stimuli_copy) MatlabWrapperMixin.__init__( self, lib_paths=lib_paths, import_base_path=import_base_path, predict_fn=pred, ) CNBModel.__init__(self, *args, **kwargs)
[ "cognibench.models.CNBModel.__init__", "cognibench.continuous.ContinuousSpace", "cognibench.models.wrappers.MatlabWrapperMixin.__init__" ]
[((747, 858), 'cognibench.models.wrappers.MatlabWrapperMixin.__init__', 'MatlabWrapperMixin.__init__', (['self'], {'lib_paths': 'lib_paths', 'import_base_path': 'import_base_path', 'predict_fn': 'pred'}), '(self, lib_paths=lib_paths, import_base_path=\n import_base_path, predict_fn=pred)\n', (774, 858), False, 'from cognibench.models.wrappers import MatlabWrapperMixin\n'), ((921, 961), 'cognibench.models.CNBModel.__init__', 'CNBModel.__init__', (['self', '*args'], {}), '(self, *args, **kwargs)\n', (938, 961), False, 'from cognibench.models import CNBModel\n'), ((476, 493), 'cognibench.continuous.ContinuousSpace', 'ContinuousSpace', ([], {}), '()\n', (491, 493), False, 'from cognibench.continuous import ContinuousSpace\n'), ((530, 547), 'cognibench.continuous.ContinuousSpace', 'ContinuousSpace', ([], {}), '()\n', (545, 547), False, 'from cognibench.continuous import ContinuousSpace\n')]
import unittest from programy.config.file.yaml_file import YamlConfigurationFile from programy.clients.restful.config import RestConfiguration from programy.clients.events.console.config import ConsoleConfiguration class RestConfigurationTests(unittest.TestCase): def test_init(self): yaml = YamlConfigurationFile() self.assertIsNotNone(yaml) yaml.load_from_text(""" rest: host: 127.0.0.1 port: 5000 debug: false workers: 4 use_api_keys: false api_key_file: apikeys.txt """, ConsoleConfiguration(), ".") rest_config = RestConfiguration("rest") rest_config.load_configuration(yaml, ".") self.assertEqual("127.0.0.1", rest_config.host) self.assertEqual(5000, rest_config.port) self.assertEqual(False, rest_config.debug) self.assertEqual(False, rest_config.use_api_keys) self.assertEqual("apikeys.txt", rest_config.api_key_file) def test_init_no_values(self): yaml = YamlConfigurationFile() self.assertIsNotNone(yaml) yaml.load_from_text(""" rest: """, ConsoleConfiguration(), ".") rest_config = RestConfiguration("rest") rest_config.load_configuration(yaml, ".") self.assertEqual("0.0.0.0", rest_config.host) self.assertEqual(80, rest_config.port) self.assertEqual(False, rest_config.debug) self.assertEqual(False, rest_config.use_api_keys) def test_to_yaml_with_defaults(self): config = RestConfiguration("rest") data = {} config.to_yaml(data, True) self.assertEquals(data['host'], "0.0.0.0") self.assertEquals(data['port'], 80) self.assertEquals(data['debug'], False) self.assertEquals(data['use_api_keys'], False) self.assertEquals(data['api_key_file'], './api.keys') self.assertEquals(data['ssl_cert_file'], './rsa.cert') self.assertEquals(data['ssl_key_file'], './rsa.keys') self.assertEquals(data['bot'], 'bot') self.assertEquals(data['license_keys'], "./config/license.keys") self.assertEquals(data['bot_selector'], "programy.clients.client.DefaultBotSelector") self.assertEquals(data['renderer'], "programy.clients.render.text.TextRenderer")
[ "programy.config.file.yaml_file.YamlConfigurationFile", "programy.clients.restful.config.RestConfiguration", "programy.clients.events.console.config.ConsoleConfiguration" ]
[((307, 330), 'programy.config.file.yaml_file.YamlConfigurationFile', 'YamlConfigurationFile', ([], {}), '()\n', (328, 330), False, 'from programy.config.file.yaml_file import YamlConfigurationFile\n'), ((634, 659), 'programy.clients.restful.config.RestConfiguration', 'RestConfiguration', (['"""rest"""'], {}), "('rest')\n", (651, 659), False, 'from programy.clients.restful.config import RestConfiguration\n'), ((1042, 1065), 'programy.config.file.yaml_file.YamlConfigurationFile', 'YamlConfigurationFile', ([], {}), '()\n', (1063, 1065), False, 'from programy.config.file.yaml_file import YamlConfigurationFile\n'), ((1212, 1237), 'programy.clients.restful.config.RestConfiguration', 'RestConfiguration', (['"""rest"""'], {}), "('rest')\n", (1229, 1237), False, 'from programy.clients.restful.config import RestConfiguration\n'), ((1559, 1584), 'programy.clients.restful.config.RestConfiguration', 'RestConfiguration', (['"""rest"""'], {}), "('rest')\n", (1576, 1584), False, 'from programy.clients.restful.config import RestConfiguration\n'), ((582, 604), 'programy.clients.events.console.config.ConsoleConfiguration', 'ConsoleConfiguration', ([], {}), '()\n', (602, 604), False, 'from programy.clients.events.console.config import ConsoleConfiguration\n'), ((1160, 1182), 'programy.clients.events.console.config.ConsoleConfiguration', 'ConsoleConfiguration', ([], {}), '()\n', (1180, 1182), False, 'from programy.clients.events.console.config import ConsoleConfiguration\n')]
r"""Train a neural network to predict feedback for a program string.""" from __future__ import division from __future__ import print_function from __future__ import absolute_import import os import sys import random import numpy as np from tqdm import tqdm import torch import torch.optim as optim import torch.utils.data as data import torch.nn.functional as F from .models import ProgramRNN from .utils import AverageMeter, save_checkpoint, merge_args_with_dict from .datasets import load_dataset from .config import default_hyperparams from .rubric_utils.load_params import get_label_params, get_max_seq_len if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument('dataset', type=str, help='annotated|synthetic') parser.add_argument('problem_id', type=int, help='1|2|3|4|5|6|7|8') parser.add_argument('out_dir', type=str, help='where to save outputs') parser.add_argument('--cuda', action='store_true', default=False, help='enables CUDA training [default: False]') args = parser.parse_args() args.cuda = args.cuda and torch.cuda.is_available() merge_args_with_dict(args, default_hyperparams) device = torch.device('cuda' if args.cuda else 'cpu') args.max_seq_len = get_max_seq_len(args.problem_id) label_dim, _, _, _, _ = get_label_params(args.problem_id) # reproducibility torch.manual_seed(args.seed) np.random.seed(args.seed) if not os.path.isdir(args.out_dir): os.makedirs(args.out_dir) train_dataset = load_dataset( args.dataset, args.problem_id, 'train', vocab=None, max_seq_len=args.max_seq_len, min_occ=args.min_occ) val_dataset = load_dataset( args.dataset, args.problem_id, 'val', vocab=train_dataset.vocab, max_seq_len=args.max_seq_len, min_occ=args.min_occ) test_dataset = load_dataset(args.dataset, args.problem_id, 'test', vocab=train_dataset.vocab, max_seq_len=args.max_seq_len, min_occ=args.min_occ) train_loader = data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True) val_loader = data.DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False) test_loader = data.DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False) model = ProgramRNN( args.z_dim, label_dim, train_dataset.vocab_size, embedding_dim=args.embedding_dim, hidden_dim=args.hidden_dim, num_layers=args.num_layers) model = model.to(device) optimizer = optim.Adam(model.parameters(), lr=args.lr) def train(epoch): model.train() loss_meter = AverageMeter() acc_meter = AverageMeter() for batch_idx, (seq, length, label, _) in enumerate(train_loader): assert label is not None batch_size = len(seq) seq = seq.to(device) length = length.to(device) label = label.to(device) optimizer.zero_grad() label_out = model(seq, length) loss = F.binary_cross_entropy(label_out, label) loss.backward() loss_meter.update(loss.item(), batch_size) optimizer.step() acc = np.mean(torch.round(label_out).detach().numpy() == label.detach().numpy()) acc_meter.update(acc, batch_size) if batch_idx % args.log_interval == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tAccuracy: {:.4f}'.format( epoch, batch_idx * batch_size, len(train_loader.dataset), 100. * batch_idx / len(train_loader), loss_meter.avg, acc_meter.avg)) print('====> Epoch: {}\tLoss: {:.4f}\tAccuracy: {:.4f}'.format( epoch, loss_meter.avg, acc_meter.avg)) return loss_meter.avg, acc_meter.avg def test(epoch, loader, name='Test'): model.eval() loss_meter = AverageMeter() acc_meter = AverageMeter() with torch.no_grad(): with tqdm(total=len(loader)) as pbar: for (seq, length, label, _) in loader: assert label is not None batch_size = len(seq) seq = seq.to(device) length = length.to(device) label = label.to(device) label_out = model(seq, length) loss = F.binary_cross_entropy(label_out, label) loss_meter.update(loss.item(), batch_size) acc = np.mean(torch.round(label_out.cpu()).numpy() == label.cpu().numpy()) acc_meter.update(acc, batch_size) pbar.update() print('====> {} Epoch: {}\tLoss: {:.4f}\tAccuracy: {:.4f}'.format( name, epoch, loss_meter.avg, acc_meter.avg)) return loss_meter.avg, acc_meter.avg best_loss = sys.maxint track_train_loss = np.zeros(args.epochs) track_val_loss = np.zeros(args.epochs) track_test_loss = np.zeros(args.epochs) track_train_acc = np.zeros(args.epochs) track_val_acc = np.zeros(args.epochs) track_test_acc = np.zeros(args.epochs) for epoch in xrange(1, args.epochs + 1): train_loss, train_acc = train(epoch) val_loss, val_acc = test(epoch, val_loader, name='Val') test_loss, test_acc = test(epoch, test_loader, name='Test') track_train_loss[epoch - 1] = train_loss track_val_loss[epoch - 1] = val_loss track_test_loss[epoch - 1] = test_loss track_train_acc[epoch - 1] = train_acc track_val_acc[epoch - 1] = val_acc track_test_acc[epoch - 1] = test_acc is_best = val_loss < best_loss best_loss = min(val_loss, best_loss) save_checkpoint({ 'state_dict': model.state_dict(), 'cmd_line_args': args, 'vocab': train_dataset.vocab, }, is_best, folder=args.out_dir) np.save(os.path.join(args.out_dir, 'train_loss.npy'), track_train_loss) np.save(os.path.join(args.out_dir, 'val_loss.npy'), track_val_loss) np.save(os.path.join(args.out_dir, 'test_loss.npy'), track_test_loss) np.save(os.path.join(args.out_dir, 'train_acc.npy'), track_train_acc) np.save(os.path.join(args.out_dir, 'val_acc.npy'), track_val_acc) np.save(os.path.join(args.out_dir, 'test_acc.npy'), track_test_acc)
[ "torch.manual_seed", "argparse.ArgumentParser", "os.makedirs", "torch.nn.functional.binary_cross_entropy", "os.path.join", "numpy.zeros", "torch.cuda.is_available", "os.path.isdir", "numpy.random.seed", "torch.round", "torch.utils.data.DataLoader", "torch.no_grad", "torch.device" ]
[((677, 702), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (700, 702), False, 'import argparse\n'), ((1216, 1260), 'torch.device', 'torch.device', (["('cuda' if args.cuda else 'cpu')"], {}), "('cuda' if args.cuda else 'cpu')\n", (1228, 1260), False, 'import torch\n'), ((1407, 1435), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (1424, 1435), False, 'import torch\n'), ((1440, 1465), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (1454, 1465), True, 'import numpy as np\n'), ((2104, 2176), 'torch.utils.data.DataLoader', 'data.DataLoader', (['train_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(True)'}), '(train_dataset, batch_size=args.batch_size, shuffle=True)\n', (2119, 2176), True, 'import torch.utils.data as data\n'), ((2194, 2265), 'torch.utils.data.DataLoader', 'data.DataLoader', (['val_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(False)'}), '(val_dataset, batch_size=args.batch_size, shuffle=False)\n', (2209, 2265), True, 'import torch.utils.data as data\n'), ((2284, 2356), 'torch.utils.data.DataLoader', 'data.DataLoader', (['test_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(False)'}), '(test_dataset, batch_size=args.batch_size, shuffle=False)\n', (2299, 2356), True, 'import torch.utils.data as data\n'), ((5012, 5033), 'numpy.zeros', 'np.zeros', (['args.epochs'], {}), '(args.epochs)\n', (5020, 5033), True, 'import numpy as np\n'), ((5055, 5076), 'numpy.zeros', 'np.zeros', (['args.epochs'], {}), '(args.epochs)\n', (5063, 5076), True, 'import numpy as np\n'), ((5099, 5120), 'numpy.zeros', 'np.zeros', (['args.epochs'], {}), '(args.epochs)\n', (5107, 5120), True, 'import numpy as np\n'), ((5143, 5164), 'numpy.zeros', 'np.zeros', (['args.epochs'], {}), '(args.epochs)\n', (5151, 5164), True, 'import numpy as np\n'), ((5185, 5206), 'numpy.zeros', 'np.zeros', (['args.epochs'], {}), '(args.epochs)\n', (5193, 5206), True, 'import numpy as np\n'), ((5228, 5249), 'numpy.zeros', 'np.zeros', (['args.epochs'], {}), '(args.epochs)\n', (5236, 5249), True, 'import numpy as np\n'), ((1125, 1150), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1148, 1150), False, 'import torch\n'), ((1478, 1505), 'os.path.isdir', 'os.path.isdir', (['args.out_dir'], {}), '(args.out_dir)\n', (1491, 1505), False, 'import os\n'), ((1515, 1540), 'os.makedirs', 'os.makedirs', (['args.out_dir'], {}), '(args.out_dir)\n', (1526, 1540), False, 'import os\n'), ((3104, 3144), 'torch.nn.functional.binary_cross_entropy', 'F.binary_cross_entropy', (['label_out', 'label'], {}), '(label_out, label)\n', (3126, 3144), True, 'import torch.nn.functional as F\n'), ((4064, 4079), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4077, 4079), False, 'import torch\n'), ((6059, 6103), 'os.path.join', 'os.path.join', (['args.out_dir', '"""train_loss.npy"""'], {}), "(args.out_dir, 'train_loss.npy')\n", (6071, 6103), False, 'import os\n'), ((6139, 6181), 'os.path.join', 'os.path.join', (['args.out_dir', '"""val_loss.npy"""'], {}), "(args.out_dir, 'val_loss.npy')\n", (6151, 6181), False, 'import os\n'), ((6215, 6258), 'os.path.join', 'os.path.join', (['args.out_dir', '"""test_loss.npy"""'], {}), "(args.out_dir, 'test_loss.npy')\n", (6227, 6258), False, 'import os\n'), ((6293, 6336), 'os.path.join', 'os.path.join', (['args.out_dir', '"""train_acc.npy"""'], {}), "(args.out_dir, 'train_acc.npy')\n", (6305, 6336), False, 'import os\n'), ((6371, 6412), 'os.path.join', 'os.path.join', (['args.out_dir', '"""val_acc.npy"""'], {}), "(args.out_dir, 'val_acc.npy')\n", (6383, 6412), False, 'import os\n'), ((6445, 6487), 'os.path.join', 'os.path.join', (['args.out_dir', '"""test_acc.npy"""'], {}), "(args.out_dir, 'test_acc.npy')\n", (6457, 6487), False, 'import os\n'), ((4485, 4525), 'torch.nn.functional.binary_cross_entropy', 'F.binary_cross_entropy', (['label_out', 'label'], {}), '(label_out, label)\n', (4507, 4525), True, 'import torch.nn.functional as F\n'), ((3285, 3307), 'torch.round', 'torch.round', (['label_out'], {}), '(label_out)\n', (3296, 3307), False, 'import torch\n')]
""" Sparse Poisson Recovery (SPoRe) module for solving Multiple Measurement Vector problem with Poisson signals (MMVP) by batch stochastic gradient ascent and Monte Carlo integration Authors: <NAME>, <NAME> Reference: [1] <NAME>, <NAME>, <NAME>, and <NAME>, "Extreme Compressed Sensing of Poisson Rates from Multiple Measurements," Mar. 2021. arXiv ID: """ from abc import ABC, abstractmethod import numpy as np import time import pdb from .mmv_models import FwdModelGroup, SPoReFwdModelGroup class SPoRe(object): def __init__(self, N, fwdmodel, sampler, batch_size=100, step_size=1e-1, min_lambda=1e-3, pyx_min=0, grad_scale=5e-2, conv_rel=1e-2, conv_window=500, patience = 3000, step_cut = 0.1, max_cut = 5, max_iter=int(1e4)): """ Parameters ---------- N: int Dimension of signals fwdmodel : object instance of a mmv_models.FwdModel class. Object should contain any necessary model-specific parameters as attributes sampler : object instance of a spore.Sampler class that has a .sample method returning S samples of signals X from a probability distribution (N, S, :) batch_size: int Number of columns of Y to randomly draw and evaluate for each iteration step_size: float initial learning rate for stochastic gradient ascent min_lambda: float Lower bound on individual entries of lambda. \epsilon in [1] pyx_min: float (default 0, i.e. no effect) A batch element y_b is only included in analysis if max(p(y_b|x_s)) among sampled x's (x_s) is greater than this value. Prevents steps in the direction of junk measurements (e.g. a corrupted siganl) OR if samples are not good for the y_b [1] used 0 for all experiments grad_scale: float Maximum l2-norm of gradient step that can be taken. Any step larger is rescaled to have this l2-norm conv_rel: float (0,1) Fractional change in the average of lambda estimate in two conv_windows, below which iteration stops conv_window: int Number of iterations over which to evaluate moving averages. Nonoverlapping windows are compared. E.g. if conv_window = 500, then 999-500 iterations ago is averaged and compared to 499-current average. patience: int Number of iterations to wait for improvement in log likelihood before cutting step size step_cut: float (0, 1) Fraction to cut step size by if patience exceeded max_cut: int Maximum number of times step size can be cut by step_cut before quitting max_iter: int Maximum iteration budget. SPoRe terminates regardless of convergence status """ self.N = N if isinstance(fwdmodel, FwdModelGroup): self.fwdmodel_group = fwdmodel else: self.fwdmodel_group = FwdModelGroup([fwdmodel]) self.sampler = sampler self.batch_size = batch_size self.step_size = step_size self.min_lambda = min_lambda self.pyx_min = pyx_min self.grad_scale = grad_scale self.conv_rel = conv_rel self.conv_window = conv_window self.patience = patience self.step_cut = step_cut self.max_cut = max_cut self.max_iter = max_iter def recover(self, Y, S, lam0=None, randinit_offset=1e-1, seed=None, verbose=True): """Recover poisson rate parameters given Parameters ---------- Y : array_like Observations. Shape ``(M, D)``. S : int Number of samples to draw for each Y. lam0: array_like Initial value for estimated lambda. If None, lam0 = randinit_offset Shape: ``(N,) randinit_offset: float Random initializations (if lam0 not provided) are drawn. Offset sets a minimum value for any particular entry of lambda0 seed: int or None Initial seed for before iterations begin verbose: boolean If True, prints some information every <self.conv_window> iterations Returns ------- lam_S : numpy array Recovered estimate of lambda Shape ``(N,)`` includeCheck: numpy array Indices of observations that never influenced a gradient step. These observations can be considered 'unexplained' by the recovered lambda. Can be indicative of a corrupted measurement. Not used in [1] lamHistory: numpy array History of lambda estimates at each iteration Shape ``(N, iters)`` (for iters evaluated until convergence) llHistory: numpy array History of median log-likelihood estimates at each iteration Shape ``(iters,)`` """ if isinstance(self.fwdmodel_group, SPoReFwdModelGroup): fwdmodel = self.fwdmodel_group else: _, D = Y.shape group_indices = None fwdmodel = SPoReFwdModelGroup(self.fwdmodel_group, group_indices) M, D = np.shape(Y) np.random.seed(seed) lamHistory = np.zeros((self.N, self.max_iter)) llHistory = np.zeros((self.max_iter)) if lam0 is None: lam0 = np.ones(self.N)*randinit_offset lamHat = lam0 # Remaining false elements at convergence => unexplained measurements. Not used in [1] includeCheck = np.zeros(D) > np.ones(D) refIter = 0 bestIter = 0 stepTemp = self.step_size numCut = 0 t0 = time.time() stepIter = [] # Batch gradient ascent for i in range(self.max_iter): # Get batch elements and sample for each batchInds = np.random.choice(D, self.batch_size) Y_batch = Y[:,batchInds] self.sampler._lam = lamHat X_sample = self.sampler.sample(Y_batch, S) pyx = fwdmodel.py_x_batch(Y_batch[:, None, :], X_sample, batchInds) # (S, B) array # Don't eval batch elements whose p(y|x) is too low for all samples. In [1] (self.pyx_min=0) batchInclude = np.max(pyx, axis=0) > self.pyx_min includeCheck[batchInds[batchInclude]] = True pyx = pyx[:, batchInclude] if np.shape(X_sample)[2] > 1: X_sample = X_sample[:,:,batchInclude] pqRatio = self.sampler.pq_ratio(X_sample) probsAgg = pyx * pqRatio # (S, B) array, aggregate value of pdf computations # Evaluate loss and gradient llHistory[i] = self.log_likelihood(probsAgg) grad = self.gradient(X_sample, lamHat, probsAgg) step = stepTemp * grad # Necessary to make more robust against numerical issue described in [1] if not np.all(grad==np.zeros(self.N)): # at least some sampled X informs a gradient step stepIter.append(i) # track when steps are taken if np.any( (lamHat+step) >self.min_lambda): #if at least one index is stepped meaningfully # Rescale according to the indices still in question normCheck = np.linalg.norm(step[ (lamHat+step) >self.min_lambda]) if normCheck > self.grad_scale : step = (self.grad_scale / normCheck) * step else: # step is likely too big, period. if np.linalg.norm(step) > self.grad_scale : # Rescale based on whole step vector step = (self.grad_scale / np.linalg.norm(step)) * step #if steps have been taken at least 1/2 the time, recent conv_window worth of iterations likely to have been taken # hypothesize that steps may not be taken occasionally at first as lamHat is a bad estimate, but will be taken with increasing regularity enoughSteps = np.sum(np.array(stepIter) > (i - self.conv_window*2)) > self.conv_window lamHat += step lamHat[lamHat < self.min_lambda] = self.min_lambda lamHistory[:, i] = lamHat # Check convergence if (i+1) >= (self.conv_window*2): lam1 = np.mean(lamHistory[:, (i-2*self.conv_window+1):(i-self.conv_window+1)], axis=1) # e.g [:, 0:500] if conv_window is 500 lam2 = np.mean(lamHistory[:, (i-self.conv_window+1):(i+1)], axis=1) # e.g. [:, 500:] if i is 999, conv_window is 500 pctChange = np.linalg.norm(lam2 - lam1, ord=1) / np.linalg.norm(lam1, ord=1) if pctChange < self.conv_rel and enoughSteps: break # Cut learning rate (if necessary) if llHistory[i] >= llHistory[bestIter] or np.isnan(llHistory[bestIter]): bestIter = i refIter = i if i - refIter >= self.patience and enoughSteps: stepTemp = self.step_cut * stepTemp refIter = i numCut += 1 if verbose is True: print('Step size cut ' + str(numCut) + ' times') if numCut >= self.max_cut: break # Report: if verbose is True and (i+1)>=(self.conv_window*2) and (i+1) % self.conv_window == 0: print('Iteration #: ' + str(i+1) + '; l1-norm change: ' + str(pctChange) + \ '; recovery time: ' + str(round(time.time()-t0, 2)) + ' seconds') # average over last conv_window iterations' values lamHat = np.mean(lamHistory[:, (i-self.conv_window+1):(i+1)], axis=1) return lamHat, includeCheck, lamHistory, llHistory def log_likelihood(self, p_agg): r"""Compute log-likelihood and return the ~average (median/B). Median used because of high variability of individual batch draws. Outlier resistance important if using log-likelihood to inform convergence Parameters ---------- p_agg: array_like element-wise product of p(y|x) (an (S,B,) array) and pqRatio (an (S,B) array or an (S,) array if sample_same=True) Explicitly: p_agg for any element is p(y_b|x_s) * p(x_s|\lamHat) / Q(x_s) where Q is the sampling function Shape: (S, B,) Returns ------- ll: average log likelihood of p(y_b|\lambda) """ S, B = np.shape(p_agg) likelihood = (1/S) * np.sum(p_agg, axis=0) # of all batch elements ll = np.median(np.log(likelihood)) / B return ll def gradient(self, X_s, lamHat, p_agg): """ Compute MC gradients based on pre-computed measurement/sampling likelihoods p(y|x), Q(x_s) (p_agg) and Poisson likelihoods (samples X_s, current estimate lamHat) Parameters ---------- X_s : array_like Sampled X's Shape (N, S, B) or (N, S, 1) lamHat : array_like current estimate of lambda. Shape (N,) p_agg : see log_likelihood() Returns ------- grad: array_like batch gradient Shape: (N,) """ _, _, sameSamples = np.shape(X_s) #same samples over each iteration S, B = np.shape(p_agg) grad = np.zeros((self.N,)) #Note - it's ok if grad = 0 if all sumChecks fail - equates to waiting #until next iter sums = np.sum(p_agg, axis=0) sumCheck = sums !=0 if np.size(sumCheck) != 0: #else just return zero vector if sameSamples == 1: xOverL = X_s[:,:,0] / lamHat[:, None] #(N, S) grad = np.sum((xOverL @ p_agg[:, sumCheck]) / sums[sumCheck] - 1 , axis=1) else: xOverL = X_s / lamHat[:, None, None] #(N, S, B) numer = np.einsum('ij...,j...->i...', xOverL[:,:,sumCheck], p_agg[:,sumCheck]) grad = np.sum((numer / sums) - 1, axis=1) grad = grad/B return grad class Sampler(ABC): @abstractmethod def sample(self, Y, S, seed=None): """Generate samples of X for each column of Y Parameters ---------- Y : array_like Observations to sample according to. This array must have shape ``(M, B)``. S : int Number of samples to draw for each Y. seed: Random seed for drawing Returns ------- X : (N, S, B) or (N, S, 1) ndarray S Samples of X for each of B columns of Y. Last dimension is 1 if same samples apply to all batch elements """ pass @abstractmethod def pq_ratio(self, X): """ Get the ratio of probability densities of input X P(X|self._lam)/Q(X) element-wise Where P(X|self._lam) is the Poisson probability of each entry in X Q(X) is the sampler's probability of drawing that X Parameters ---------- X : array_like N-dimensional Vectors within range of Sampler.sample(), stacked in columns of array Shape: ``(N, S, B)`` or ``(N, S, 1)`` Returns ------- ratio : array_like Probability densities Q(x) for all X Shape: ``(S, B)`` """ pass class PoissonSampler(Sampler): def __init__(self, lam, sample_same=True, seed=None): """ As used in [1]: Q(x) = P(x|lamHat) Parameters ---------- lam : array_like (float) Poisson rates from which to draw Shape: ``(N,)`` sample_same : bool Whether to use the same X samples for each column of Y. """ self._lam = lam self._sample_same = sample_same self._generator = np.random.default_rng(seed) def sample(self, Y, S): N, = self._lam.shape _, B = Y.shape if self._sample_same: X = self._generator.poisson(self._lam[:, None, None], (N, S, 1)) else: X = self._generator.poisson(self._lam[:, None, None], (N, S, B)) return X def pq_ratio(self, X): _, S, B = np.shape(X) #With Poisson sampler - always sampling according to the current lambda value in the sampler ratio = np.ones((S,B)) return ratio
[ "numpy.mean", "numpy.random.default_rng", "numpy.ones", "numpy.random.choice", "numpy.size", "numpy.log", "numpy.any", "numpy.max", "numpy.sum", "numpy.zeros", "numpy.array", "numpy.isnan", "numpy.random.seed", "numpy.einsum", "numpy.linalg.norm", "numpy.shape", "time.time" ]
[((5468, 5479), 'numpy.shape', 'np.shape', (['Y'], {}), '(Y)\n', (5476, 5479), True, 'import numpy as np\n'), ((5488, 5508), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (5502, 5508), True, 'import numpy as np\n'), ((5530, 5563), 'numpy.zeros', 'np.zeros', (['(self.N, self.max_iter)'], {}), '((self.N, self.max_iter))\n', (5538, 5563), True, 'import numpy as np\n'), ((5584, 5607), 'numpy.zeros', 'np.zeros', (['self.max_iter'], {}), '(self.max_iter)\n', (5592, 5607), True, 'import numpy as np\n'), ((5996, 6007), 'time.time', 'time.time', ([], {}), '()\n', (6005, 6007), False, 'import time\n'), ((10405, 10467), 'numpy.mean', 'np.mean', (['lamHistory[:, i - self.conv_window + 1:i + 1]'], {'axis': '(1)'}), '(lamHistory[:, i - self.conv_window + 1:i + 1], axis=1)\n', (10412, 10467), True, 'import numpy as np\n'), ((11291, 11306), 'numpy.shape', 'np.shape', (['p_agg'], {}), '(p_agg)\n', (11299, 11306), True, 'import numpy as np\n'), ((12112, 12125), 'numpy.shape', 'np.shape', (['X_s'], {}), '(X_s)\n', (12120, 12125), True, 'import numpy as np\n'), ((12175, 12190), 'numpy.shape', 'np.shape', (['p_agg'], {}), '(p_agg)\n', (12183, 12190), True, 'import numpy as np\n'), ((12206, 12225), 'numpy.zeros', 'np.zeros', (['(self.N,)'], {}), '((self.N,))\n', (12214, 12225), True, 'import numpy as np\n'), ((12355, 12376), 'numpy.sum', 'np.sum', (['p_agg'], {'axis': '(0)'}), '(p_agg, axis=0)\n', (12361, 12376), True, 'import numpy as np\n'), ((14891, 14918), 'numpy.random.default_rng', 'np.random.default_rng', (['seed'], {}), '(seed)\n', (14912, 14918), True, 'import numpy as np\n'), ((15285, 15296), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (15293, 15296), True, 'import numpy as np\n'), ((15427, 15442), 'numpy.ones', 'np.ones', (['(S, B)'], {}), '((S, B))\n', (15434, 15442), True, 'import numpy as np\n'), ((5844, 5855), 'numpy.zeros', 'np.zeros', (['D'], {}), '(D)\n', (5852, 5855), True, 'import numpy as np\n'), ((5859, 5869), 'numpy.ones', 'np.ones', (['D'], {}), '(D)\n', (5866, 5869), True, 'import numpy as np\n'), ((6215, 6251), 'numpy.random.choice', 'np.random.choice', (['D', 'self.batch_size'], {}), '(D, self.batch_size)\n', (6231, 6251), True, 'import numpy as np\n'), ((11336, 11357), 'numpy.sum', 'np.sum', (['p_agg'], {'axis': '(0)'}), '(p_agg, axis=0)\n', (11342, 11357), True, 'import numpy as np\n'), ((12433, 12450), 'numpy.size', 'np.size', (['sumCheck'], {}), '(sumCheck)\n', (12440, 12450), True, 'import numpy as np\n'), ((5671, 5686), 'numpy.ones', 'np.ones', (['self.N'], {}), '(self.N)\n', (5678, 5686), True, 'import numpy as np\n'), ((6633, 6652), 'numpy.max', 'np.max', (['pyx'], {'axis': '(0)'}), '(pyx, axis=0)\n', (6639, 6652), True, 'import numpy as np\n'), ((7510, 7549), 'numpy.any', 'np.any', (['(lamHat + step > self.min_lambda)'], {}), '(lamHat + step > self.min_lambda)\n', (7516, 7549), True, 'import numpy as np\n'), ((8899, 8989), 'numpy.mean', 'np.mean', (['lamHistory[:, i - 2 * self.conv_window + 1:i - self.conv_window + 1]'], {'axis': '(1)'}), '(lamHistory[:, i - 2 * self.conv_window + 1:i - self.conv_window + 1\n ], axis=1)\n', (8906, 8989), True, 'import numpy as np\n'), ((9041, 9103), 'numpy.mean', 'np.mean', (['lamHistory[:, i - self.conv_window + 1:i + 1]'], {'axis': '(1)'}), '(lamHistory[:, i - self.conv_window + 1:i + 1], axis=1)\n', (9048, 9103), True, 'import numpy as np\n'), ((9536, 9565), 'numpy.isnan', 'np.isnan', (['llHistory[bestIter]'], {}), '(llHistory[bestIter])\n', (9544, 9565), True, 'import numpy as np\n'), ((11413, 11431), 'numpy.log', 'np.log', (['likelihood'], {}), '(likelihood)\n', (11419, 11431), True, 'import numpy as np\n'), ((12632, 12696), 'numpy.sum', 'np.sum', (['(xOverL @ p_agg[:, sumCheck] / sums[sumCheck] - 1)'], {'axis': '(1)'}), '(xOverL @ p_agg[:, sumCheck] / sums[sumCheck] - 1, axis=1)\n', (12638, 12696), True, 'import numpy as np\n'), ((12823, 12896), 'numpy.einsum', 'np.einsum', (['"""ij...,j...->i..."""', 'xOverL[:, :, sumCheck]', 'p_agg[:, sumCheck]'], {}), "('ij...,j...->i...', xOverL[:, :, sumCheck], p_agg[:, sumCheck])\n", (12832, 12896), True, 'import numpy as np\n'), ((12917, 12949), 'numpy.sum', 'np.sum', (['(numer / sums - 1)'], {'axis': '(1)'}), '(numer / sums - 1, axis=1)\n', (12923, 12949), True, 'import numpy as np\n'), ((6797, 6815), 'numpy.shape', 'np.shape', (['X_sample'], {}), '(X_sample)\n', (6805, 6815), True, 'import numpy as np\n'), ((7766, 7819), 'numpy.linalg.norm', 'np.linalg.norm', (['step[lamHat + step > self.min_lambda]'], {}), '(step[lamHat + step > self.min_lambda])\n', (7780, 7819), True, 'import numpy as np\n'), ((9179, 9213), 'numpy.linalg.norm', 'np.linalg.norm', (['(lam2 - lam1)'], {'ord': '(1)'}), '(lam2 - lam1, ord=1)\n', (9193, 9213), True, 'import numpy as np\n'), ((9216, 9243), 'numpy.linalg.norm', 'np.linalg.norm', (['lam1'], {'ord': '(1)'}), '(lam1, ord=1)\n', (9230, 9243), True, 'import numpy as np\n'), ((7353, 7369), 'numpy.zeros', 'np.zeros', (['self.N'], {}), '(self.N)\n', (7361, 7369), True, 'import numpy as np\n'), ((8022, 8042), 'numpy.linalg.norm', 'np.linalg.norm', (['step'], {}), '(step)\n', (8036, 8042), True, 'import numpy as np\n'), ((8564, 8582), 'numpy.array', 'np.array', (['stepIter'], {}), '(stepIter)\n', (8572, 8582), True, 'import numpy as np\n'), ((8150, 8170), 'numpy.linalg.norm', 'np.linalg.norm', (['step'], {}), '(step)\n', (8164, 8170), True, 'import numpy as np\n'), ((10249, 10260), 'time.time', 'time.time', ([], {}), '()\n', (10258, 10260), False, 'import time\n')]
"""Use translation table to translate coding sequence to protein.""" from Bio.Data import CodonTable # type: ignore from Bio.Seq import Seq # type: ignore def translate_cds(cds: str, translation_table: str) -> str: """Translate coding sequence to protein. :param cds: str: DNA coding sequence (CDS) :param translation_table: str: translation table as defined in Bio.Seq.Seq.CodonTable.ambiguous_generic_by_name :return: str: Protein sequence """ table = CodonTable.ambiguous_dna_by_name[translation_table] cds = "".join(cds.split()) # clean out whitespace coding_dna = Seq(cds) protein = coding_dna.translate(table, cds=True, to_stop=True) return str(protein)
[ "Bio.Seq.Seq" ]
[((632, 640), 'Bio.Seq.Seq', 'Seq', (['cds'], {}), '(cds)\n', (635, 640), False, 'from Bio.Seq import Seq\n')]
""" preprocess-twitter.py python preprocess-twitter.py "Some random text with #hashtags, @mentions and http://t.co/kdjfkdjf (links). :)" Script for preprocessing tweets by <NAME> with small modifications by <NAME> with translation to Python by <NAME> Translation of Ruby script to create features for GloVe vectors for Twitter data. http://nlp.stanford.edu/projects/glove/preprocess-twitter.rb """ import sys import regex as re FLAGS = re.MULTILINE | re.DOTALL def hashtag(text): text = text.group() hashtag_body = text[1:] if hashtag_body.isupper(): result = " {} ".format(hashtag_body.lower()) else: result = " ".join(["<hashtag>"] + re.split(r"(?=[A-Z])", hashtag_body, flags=FLAGS)) return result def allcaps(text): text = text.group() return text.lower() + " <allcaps>" def tokenize(text): # Different regex parts for smiley faces eyes = r"[8:=;]" nose = r"['`\-]?" # function so code less repetitive def re_sub(pattern, repl): return re.sub(pattern, repl, text, flags=FLAGS) text = re_sub(r"https?:\/\/\S+\b|www\.(\w+\.)+\S*", "<url>") text = re_sub(r"@\w+", "<user>") text = re_sub(r"{}{}[)dD]+|[)dD]+{}{}".format(eyes, nose, nose, eyes), "<smile>") text = re_sub(r"{}{}p+".format(eyes, nose), "<lolface>") text = re_sub(r"{}{}\(+|\)+{}{}".format(eyes, nose, nose, eyes), "<sadface>") text = re_sub(r"{}{}[\/|l*]".format(eyes, nose), "<neutralface>") text = re_sub(r"/"," / ") text = re_sub(r"<3","<heart>") text = re_sub(r"[-+]?[.\d]*[\d]+[:,.\d]*", "<number>") text = re_sub(r"#\S+", hashtag) text = re_sub(r"([!?.]){2,}", r"\1 <repeat>") text = re_sub(r"\b(\S*?)(.)\2{2,}\b", r"\1\2 <elong>") ## -- I just don't understand why the Ruby script adds <allcaps> to everything so I limited the selection. # text = re_sub(r"([^a-z0-9()<>'`\-]){2,}", allcaps) text = re_sub(r"([A-Z]){2,}", allcaps) return text.lower() if __name__ == '__main__': _, text = sys.argv if text == "test": text = "I TEST alllll kinds of #hashtags and #HASHTAGS, @mentions and 3000 (http://t.co/dkfjkdf). w/ <3 :) haha!!!!!" tokens = tokenize(text) print(tokens)
[ "regex.sub", "regex.split" ]
[((1019, 1059), 'regex.sub', 're.sub', (['pattern', 'repl', 'text'], {'flags': 'FLAGS'}), '(pattern, repl, text, flags=FLAGS)\n', (1025, 1059), True, 'import regex as re\n'), ((671, 719), 'regex.split', 're.split', (['"""(?=[A-Z])"""', 'hashtag_body'], {'flags': 'FLAGS'}), "('(?=[A-Z])', hashtag_body, flags=FLAGS)\n", (679, 719), True, 'import regex as re\n')]
#! /usr/bin/env python # -*- coding:utf8 -*- # # pw_classes.py # # This file is part of pyplanes, a software distributed under the MIT license. # For any question, please contact one of the authors cited below. # # Copyright (c) 2020 # <NAME> <<EMAIL>> # <NAME> <<EMAIL>> # <NAME> <<EMAIL>> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # import numpy as np import numpy.linalg as LA import matplotlib.pyplot as plt from mediapack import from_yaml from mediapack import Air, PEM, EqFluidJCA from pyPLANES.utils.io import initialisation_out_files_plain from pyPLANES.core.calculus import PwCalculus from pyPLANES.core.multilayer import MultiLayer from pyPLANES.pw.pw_layers import FluidLayer from pyPLANES.pw.pw_interfaces import FluidFluidInterface, RigidBacking Air = Air() # def initialise_PW_solver(L, b): # nb_PW = 0 # dofs = [] # for _layer in L: # if _layer.medium.MODEL == "fluid": # dofs.append(nb_PW+np.arange(2)) # nb_PW += 2 # elif _layer.medium.MODEL == "pem": # dofs.append(nb_PW+np.arange(6)) # nb_PW += 6 # elif _layer.medium.MODEL == "elastic": # dofs.append(nb_PW+np.arange(4)) # nb_PW += 4 # interface = [] # for i_l, _layer in enumerate(L[:-1]): # interface.append((L[i_l].medium.MODEL, L[i_l+1].medium.MODEL)) # return nb_PW, interface, dofs class PwProblem(PwCalculus, MultiLayer): """ Plane Wave Problem """ def __init__(self, **kwargs): PwCalculus.__init__(self, **kwargs) termination = kwargs.get("termination","rigid") self.method = kwargs.get("termination","global") MultiLayer.__init__(self, **kwargs) self.kx, self.ky, self.k = None, None, None self.shift_plot = kwargs.get("shift_pw", 0.) self.plot = kwargs.get("plot_results", [False]*6) self.result = {} self.outfiles_directory = False if self.method == "global": self.layers.insert(0,FluidLayer(Air,1.e-2)) if self.layers[1].medium.MEDIUM_TYPE == "fluid": self.interfaces.append(FluidFluidInterface(self.layers[0],self.layers[1])) self.nb_PW = 0 for _layer in self.layers: if _layer.medium.MODEL == "fluid": _layer.dofs = self.nb_PW+np.arange(2) self.nb_PW += 2 elif _layer.medium.MODEL == "pem": _layer.dofs = self.nb_PW+np.arange(6) self.nb_PW += 6 elif _layer.medium.MODEL == "elastic": _layer.dofs = self.nb_PW+np.arange(4) self.nb_PW += 4 def update_frequency(self, f): PwCalculus.update_frequency(self, f) MultiLayer.update_frequency(self, f, self.k, self.kx) def create_linear_system(self, f): self.A = np.zeros((self.nb_PW-1, self.nb_PW), dtype=complex) i_eq = 0 # Loop on the interfaces for _int in self.interfaces: if self.method == "global": i_eq = _int.update_M_global(self.A, i_eq) # for i_inter, _inter in enumerate(self.interfaces): # if _inter[0] == "fluid": # if _inter[1] == "fluid": # i_eq = self.interface_fluid_fluid(i_eq, i_inter, Layers, dofs, M) # if _inter[1] == "pem": # i_eq = self.interface_fluid_pem(i_eq, i_inter, Layers, dofs, M) # if _inter[1] == "elastic": # i_eq = self.interface_fluid_elastic(i_eq, i_inter, Layers, dofs, M) # elif _inter[0] == "pem": # if _inter[1] == "fluid": # i_eq = self.interface_pem_fluid(i_eq, i_inter, Layers, dofs, M) # if _inter[1] == "pem": # i_eq = self.interface_pem_pem(i_eq, i_inter, Layers, dofs, M) # if _inter[1] == "elastic": # i_eq = self.interface_pem_elastic(i_eq, i_inter, Layers, dofs, M) # elif _inter[0] == "elastic": # if _inter[1] == "fluid": # i_eq = self.interface_elastic_fluid(i_eq, i_inter, Layers, dofs, M) # if _inter[1] == "pem": # i_eq = self.interface_elastic_pem(i_eq, i_inter, Layers, dofs, M) # if _inter[1] == "elastic": # i_eq = self.interface_elastic_elastic(i_eq, i_inter, Layers, dofs, M) # if self.backing == backing.rigid: # if Layers[-1].medium.MODEL == "fluid": # i_eq = self.interface_fluid_rigid(M, i_eq, Layers[-1], dofs[-1] ) # elif Layers[-1].medium.MODEL == "pem": # i_eq = self.interface_pem_rigid(M, i_eq, Layers[-1], dofs[-1]) # elif Layers[-1].medium.MODEL == "elastic": # i_eq = self.interface_elastic_rigid(M, i_eq, Layers[-1], dofs[-1]) # elif self.backing == "transmission": # i_eq = self.semi_infinite_medium(M, i_eq, Layers[-1], dofs[-1] ) self.F = -self.A[:, 0]*np.exp(1j*self.ky*self.layers[0].d) # - is for transposition, exponential term is for the phase shift self.A = np.delete(self.A, 0, axis=1) # print(self.A) X = LA.solve(self.A, self.F) # print(X) # R_pyPLANES_PW = X[0] # if self.backing == "transmission": # T_pyPLANES_PW = X[-2] # else: # T_pyPLANES_PW = 0. # X = np.delete(X, 0) # del(dofs[0]) # for i, _ld in enumerate(dofs): # dofs[i] -= 2 # if self.plot: # self.plot_sol_PW(X, dofs) # out["R"] = R_pyPLANES_PW # out["T"] = T_pyPLANES_PW # return out # class Solver_PW(PwCalculus): # def __init__(self, **kwargs): # PwCalculus.__init__(self, **kwargs) # ml = kwargs.get("ml") # termination = kwargs.get("termination") # self.layers = [] # for _l in ml: # if _l[0] == "Air": # mat = Air # else: # mat = from_yaml(_l[0]+".yaml") # d = _l[1] # self.layers.append(Layer(mat,d)) # if termination in ["trans", "transmission","Transmission"]: # self.backing = "Transmission" # else: # self.backing = backing.rigid # self.kx, self.ky, self.k = None, None, None # self.shift_plot = kwargs.get("shift_pw", 0.) # self.plot = kwargs.get("plot_results", [False]*6) # self.result = {} # self.outfiles_directory = False # initialisation_out_files_plain(self) # def write_out_files(self, out): # self.out_file.write("{:.12e}\t".format(self.current_frequency)) # abs = 1-np.abs(out["R"])**2 # self.out_file.write("{:.12e}\t".format(abs)) # self.out_file.write("\n") # def interface_fluid_fluid(self, ieq, iinter, L, d, M): # SV_1, k_y_1 = fluid_SV(self.kx, self.k, L[iinter].medium.K) # SV_2, k_y_2 = fluid_SV(self.kx, self.k, L[iinter+1].medium.K) # M[ieq, d[iinter+0][0]] = SV_1[0, 0]*np.exp(-1j*k_y_1*L[iinter].thickness) # M[ieq, d[iinter+0][1]] = SV_1[0, 1] # M[ieq, d[iinter+1][0]] = -SV_2[0, 0] # M[ieq, d[iinter+1][1]] = -SV_2[0, 1]*np.exp(-1j*k_y_2*L[iinter+1].thickness) # ieq += 1 # M[ieq, d[iinter+0][0]] = SV_1[1, 0]*np.exp(-1j*k_y_1*L[iinter].thickness) # M[ieq, d[iinter+0][1]] = SV_1[1, 1] # M[ieq, d[iinter+1][0]] = -SV_2[1, 0] # M[ieq, d[iinter+1][1]] = -SV_2[1, 1]*np.exp(-1j*k_y_2*L[iinter+1].thickness) # ieq += 1 # return ieq # def interface_fluid_rigid(self, M, ieq, L, d): # SV, k_y = fluid_SV(self.kx, self.k, L.medium.K) # M[ieq, d[0]] = SV[0, 0]*np.exp(-1j*k_y*L.thickness) # M[ieq, d[1]] = SV[0, 1] # ieq += 1 # return ieq # def semi_infinite_medium(self, M, ieq, L, d): # M[ieq, d[1]] = 1. # ieq += 1 # return ieq # def interface_pem_pem(self, ieq, iinter, L, d, M): # SV_1, k_y_1 = PEM_SV(L[iinter].medium, self.kx) # SV_2, k_y_2 = PEM_SV(L[iinter+1].medium, self.kx) # for _i in range(6): # M[ieq, d[iinter+0][0]] = SV_1[_i, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness) # M[ieq, d[iinter+0][1]] = SV_1[_i, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness) # M[ieq, d[iinter+0][2]] = SV_1[_i, 2]*np.exp(-1j*k_y_1[2]*L[iinter].thickness) # M[ieq, d[iinter+0][3]] = SV_1[_i, 3] # M[ieq, d[iinter+0][4]] = SV_1[_i, 4] # M[ieq, d[iinter+0][5]] = SV_1[_i, 5] # M[ieq, d[iinter+1][0]] = -SV_2[_i, 0] # M[ieq, d[iinter+1][1]] = -SV_2[_i, 1] # M[ieq, d[iinter+1][2]] = -SV_2[_i, 2] # M[ieq, d[iinter+1][3]] = -SV_2[_i, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness) # M[ieq, d[iinter+1][4]] = -SV_2[_i, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness) # M[ieq, d[iinter+1][5]] = -SV_2[_i, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness) # ieq += 1 # return ieq # def interface_fluid_pem(self, ieq, iinter, L, d, M): # SV_1, k_y_1 = fluid_SV(self.kx, self.k, L[iinter].medium.K) # SV_2, k_y_2 = PEM_SV(L[iinter+1].medium,self.kx) # # print(k_y_2) # M[ieq, d[iinter+0][0]] = SV_1[0, 0]*np.exp(-1j*k_y_1*L[iinter].thickness) # M[ieq, d[iinter+0][1]] = SV_1[0, 1] # M[ieq, d[iinter+1][0]] = -SV_2[2, 0] # M[ieq, d[iinter+1][1]] = -SV_2[2, 1] # M[ieq, d[iinter+1][2]] = -SV_2[2, 2] # M[ieq, d[iinter+1][3]] = -SV_2[2, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness) # M[ieq, d[iinter+1][4]] = -SV_2[2, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness) # M[ieq, d[iinter+1][5]] = -SV_2[2, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness) # ieq += 1 # M[ieq, d[iinter+0][0]] = SV_1[1, 0]*np.exp(-1j*k_y_1*L[iinter].thickness) # M[ieq, d[iinter+0][1]] = SV_1[1, 1] # M[ieq, d[iinter+1][0]] = -SV_2[4, 0] # M[ieq, d[iinter+1][1]] = -SV_2[4, 1] # M[ieq, d[iinter+1][2]] = -SV_2[4, 2] # M[ieq, d[iinter+1][3]] = -SV_2[4, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness) # M[ieq, d[iinter+1][4]] = -SV_2[4, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness) # M[ieq, d[iinter+1][5]] = -SV_2[4, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness) # ieq += 1 # M[ieq, d[iinter+1][0]] = SV_2[0, 0] # M[ieq, d[iinter+1][1]] = SV_2[0, 1] # M[ieq, d[iinter+1][2]] = SV_2[0, 2] # M[ieq, d[iinter+1][3]] = SV_2[0, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness) # M[ieq, d[iinter+1][4]] = SV_2[0, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness) # M[ieq, d[iinter+1][5]] = SV_2[0, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness) # ieq += 1 # M[ieq, d[iinter+1][0]] = SV_2[3, 0] # M[ieq, d[iinter+1][1]] = SV_2[3, 1] # M[ieq, d[iinter+1][2]] = SV_2[3, 2] # M[ieq, d[iinter+1][3]] = SV_2[3, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness) # M[ieq, d[iinter+1][4]] = SV_2[3, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness) # M[ieq, d[iinter+1][5]] = SV_2[3, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness) # ieq += 1 # return ieq # def interface_elastic_pem(self, ieq, iinter, L, d, M): # SV_1, k_y_1 = elastic_SV(L[iinter].medium,self.kx, self.omega) # SV_2, k_y_2 = PEM_SV(L[iinter+1].medium,self.kx) # # print(k_y_2) # M[ieq, d[iinter+0][0]] = -SV_1[0, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness) # M[ieq, d[iinter+0][1]] = -SV_1[0, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness) # M[ieq, d[iinter+0][2]] = -SV_1[0, 2] # M[ieq, d[iinter+0][3]] = -SV_1[0, 3] # M[ieq, d[iinter+1][0]] = SV_2[0, 0] # M[ieq, d[iinter+1][1]] = SV_2[0, 1] # M[ieq, d[iinter+1][2]] = SV_2[0, 2] # M[ieq, d[iinter+1][3]] = SV_2[0, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness) # M[ieq, d[iinter+1][4]] = SV_2[0, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness) # M[ieq, d[iinter+1][5]] = SV_2[0, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness) # ieq += 1 # M[ieq, d[iinter+0][0]] = -SV_1[1, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness) # M[ieq, d[iinter+0][1]] = -SV_1[1, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness) # M[ieq, d[iinter+0][2]] = -SV_1[1, 2] # M[ieq, d[iinter+0][3]] = -SV_1[1, 3] # M[ieq, d[iinter+1][0]] = SV_2[1, 0] # M[ieq, d[iinter+1][1]] = SV_2[1, 1] # M[ieq, d[iinter+1][2]] = SV_2[1, 2] # M[ieq, d[iinter+1][3]] = SV_2[1, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness) # M[ieq, d[iinter+1][4]] = SV_2[1, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness) # M[ieq, d[iinter+1][5]] = SV_2[1, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness) # ieq += 1 # M[ieq, d[iinter+0][0]] = -SV_1[1, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness) # M[ieq, d[iinter+0][1]] = -SV_1[1, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness) # M[ieq, d[iinter+0][2]] = -SV_1[1, 2] # M[ieq, d[iinter+0][3]] = -SV_1[1, 3] # M[ieq, d[iinter+1][0]] = SV_2[2, 0] # M[ieq, d[iinter+1][1]] = SV_2[2, 1] # M[ieq, d[iinter+1][2]] = SV_2[2, 2] # M[ieq, d[iinter+1][3]] = SV_2[2, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness) # M[ieq, d[iinter+1][4]] = SV_2[2, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness) # M[ieq, d[iinter+1][5]] = SV_2[2, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness) # ieq += 1 # M[ieq, d[iinter+0][0]] = -SV_1[2, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness) # M[ieq, d[iinter+0][1]] = -SV_1[2, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness) # M[ieq, d[iinter+0][2]] = -SV_1[2, 2] # M[ieq, d[iinter+0][3]] = -SV_1[2, 3] # M[ieq, d[iinter+1][0]] = (SV_2[3, 0]-SV_2[4, 0]) # M[ieq, d[iinter+1][1]] = (SV_2[3, 1]-SV_2[4, 1]) # M[ieq, d[iinter+1][2]] = (SV_2[3, 2]-SV_2[4, 2]) # M[ieq, d[iinter+1][3]] = (SV_2[3, 3]-SV_2[4, 3])*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness) # M[ieq, d[iinter+1][4]] = (SV_2[3, 4]-SV_2[4, 4])*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness) # M[ieq, d[iinter+1][5]] = (SV_2[3, 5]-SV_2[4, 5])*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness) # ieq += 1 # M[ieq, d[iinter+0][0]] = -SV_1[3, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness) # M[ieq, d[iinter+0][1]] = -SV_1[3, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness) # M[ieq, d[iinter+0][2]] = -SV_1[3, 2] # M[ieq, d[iinter+0][3]] = -SV_1[3, 3] # M[ieq, d[iinter+1][0]] = SV_2[5, 0] # M[ieq, d[iinter+1][1]] = SV_2[5, 1] # M[ieq, d[iinter+1][2]] = SV_2[5, 2] # M[ieq, d[iinter+1][3]] = SV_2[5, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness) # M[ieq, d[iinter+1][4]] = SV_2[5, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness) # M[ieq, d[iinter+1][5]] = SV_2[5, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness) # ieq += 1 # return ieq # def interface_pem_elastic(self, ieq, iinter, L, d, M): # SV_1, k_y_1 = PEM_SV(L[iinter].medium,self.kx) # SV_2, k_y_2 = elastic_SV(L[iinter+1].medium,self.kx, self.omega) # # print(k_y_2) # M[ieq, d[iinter+0][0]] = SV_1[0, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness) # M[ieq, d[iinter+0][1]] = SV_1[0, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness) # M[ieq, d[iinter+0][2]] = SV_1[0, 2]*np.exp(-1j*k_y_1[2]*L[iinter].thickness) # M[ieq, d[iinter+0][3]] = SV_1[0, 3] # M[ieq, d[iinter+0][4]] = SV_1[0, 4] # M[ieq, d[iinter+0][5]] = SV_1[0, 5] # M[ieq, d[iinter+1][0]] = -SV_2[0, 0] # M[ieq, d[iinter+1][1]] = -SV_2[0, 1] # M[ieq, d[iinter+1][2]] = -SV_2[0, 2]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness) # M[ieq, d[iinter+1][3]] = -SV_2[0, 3]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness) # ieq += 1 # M[ieq, d[iinter+0][0]] = SV_1[1, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness) # M[ieq, d[iinter+0][1]] = SV_1[1, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness) # M[ieq, d[iinter+0][2]] = SV_1[1, 2]*np.exp(-1j*k_y_1[2]*L[iinter].thickness) # M[ieq, d[iinter+0][3]] = SV_1[1, 3] # M[ieq, d[iinter+0][4]] = SV_1[1, 4] # M[ieq, d[iinter+0][5]] = SV_1[1, 5] # M[ieq, d[iinter+1][0]] = -SV_2[1, 0] # M[ieq, d[iinter+1][1]] = -SV_2[1, 1] # M[ieq, d[iinter+1][2]] = -SV_2[1, 2]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness) # M[ieq, d[iinter+1][3]] = -SV_2[1, 3]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness) # ieq += 1 # M[ieq, d[iinter+0][0]] = SV_1[2, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness) # M[ieq, d[iinter+0][1]] = SV_1[2, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness) # M[ieq, d[iinter+0][2]] = SV_1[2, 2]*np.exp(-1j*k_y_1[2]*L[iinter].thickness) # M[ieq, d[iinter+0][3]] = SV_1[2, 3] # M[ieq, d[iinter+0][4]] = SV_1[2, 4] # M[ieq, d[iinter+0][5]] = SV_1[2, 5] # M[ieq, d[iinter+1][0]] = -SV_2[1, 0] # M[ieq, d[iinter+1][1]] = -SV_2[1, 1] # M[ieq, d[iinter+1][2]] = -SV_2[1, 2]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness) # M[ieq, d[iinter+1][3]] = -SV_2[1, 3]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness) # ieq += 1 # M[ieq, d[iinter+0][0]] = (SV_1[3, 0]-SV_1[4, 0])*np.exp(-1j*k_y_1[0]*L[iinter].thickness) # M[ieq, d[iinter+0][1]] = (SV_1[3, 1]-SV_1[4, 1])*np.exp(-1j*k_y_1[1]*L[iinter].thickness) # M[ieq, d[iinter+0][2]] = (SV_1[3, 2]-SV_1[4, 2])*np.exp(-1j*k_y_1[2]*L[iinter].thickness) # M[ieq, d[iinter+0][3]] = (SV_1[3, 3]-SV_1[4, 3]) # M[ieq, d[iinter+0][4]] = (SV_1[3, 4]-SV_1[4, 4]) # M[ieq, d[iinter+0][5]] = (SV_1[3, 5]-SV_1[4, 5]) # M[ieq, d[iinter+1][0]] = -SV_2[2, 0] # M[ieq, d[iinter+1][1]] = -SV_2[2, 1] # M[ieq, d[iinter+1][2]] = -SV_2[2, 2]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness) # M[ieq, d[iinter+1][3]] = -SV_2[2, 3]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness) # ieq += 1 # M[ieq, d[iinter+0][0]] = SV_1[5, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness) # M[ieq, d[iinter+0][1]] = SV_1[5, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness) # M[ieq, d[iinter+0][2]] = SV_1[5, 2]*np.exp(-1j*k_y_1[2]*L[iinter].thickness) # M[ieq, d[iinter+0][3]] = SV_1[5, 3] # M[ieq, d[iinter+0][4]] = SV_1[5, 4] # M[ieq, d[iinter+0][5]] = SV_1[5, 5] # M[ieq, d[iinter+1][0]] = -SV_2[3, 0] # M[ieq, d[iinter+1][1]] = -SV_2[3, 1] # M[ieq, d[iinter+1][2]] = -SV_2[3, 2]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness) # M[ieq, d[iinter+1][3]] = -SV_2[3, 3]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness) # ieq += 1 # return ieq # def interface_elastic_elastic(self, ieq, iinter, L, d, M): # SV_1, k_y_1 = elastic_SV(L[iinter].medium,self.kx, self.omega) # SV_2, k_y_2 = elastic_SV(L[iinter+1].medium,self.kx, self.omega) # for _i in range(4): # M[ieq, d[iinter+0][0]] = SV_1[_i, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness) # M[ieq, d[iinter+0][1]] = SV_1[_i, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness) # M[ieq, d[iinter+0][2]] = SV_1[_i, 2] # M[ieq, d[iinter+0][3]] = SV_1[_i, 3] # M[ieq, d[iinter+1][0]] = -SV_2[_i, 0] # M[ieq, d[iinter+1][1]] = -SV_2[_i, 1] # M[ieq, d[iinter+1][2]] = -SV_2[_i, 2]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness) # M[ieq, d[iinter+1][3]] = -SV_2[_i, 3]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness) # ieq += 1 # return ieq # def interface_fluid_elastic(self, ieq, iinter, L, d, M): # SV_1, k_y_1 = fluid_SV(self.kx, self.k, L[iinter].medium.K) # SV_2, k_y_2 = elastic_SV(L[iinter+1].medium, self.kx, self.omega) # # Continuity of u_y # M[ieq, d[iinter+0][0]] = SV_1[0, 0]*np.exp(-1j*k_y_1*L[iinter].thickness) # M[ieq, d[iinter+0][1]] = SV_1[0, 1] # M[ieq, d[iinter+1][0]] = -SV_2[1, 0] # M[ieq, d[iinter+1][1]] = -SV_2[1, 1] # M[ieq, d[iinter+1][2]] = -SV_2[1, 2]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness) # M[ieq, d[iinter+1][3]] = -SV_2[1, 3]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness) # ieq += 1 # # sigma_yy = -p # M[ieq, d[iinter+0][0]] = SV_1[1, 0]*np.exp(-1j*k_y_1*L[iinter].thickness) # M[ieq, d[iinter+0][1]] = SV_1[1, 1] # M[ieq, d[iinter+1][0]] = SV_2[2, 0] # M[ieq, d[iinter+1][1]] = SV_2[2, 1] # M[ieq, d[iinter+1][2]] = SV_2[2, 2]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness) # M[ieq, d[iinter+1][3]] = SV_2[2, 3]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness) # ieq += 1 # # sigma_xy = 0 # M[ieq, d[iinter+1][0]] = SV_2[0, 0] # M[ieq, d[iinter+1][1]] = SV_2[0, 1] # M[ieq, d[iinter+1][2]] = SV_2[0, 2]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness) # M[ieq, d[iinter+1][3]] = SV_2[0, 3]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness) # ieq += 1 # return ieq # def interface_pem_fluid(self, ieq, iinter, L, d, M): # SV_1, k_y_1 = PEM_SV(L[iinter].medium, self.kx) # SV_2, k_y_2 = fluid_SV(self.kx, self.k, L[iinter+1].medium.K) # # print(k_y_2) # M[ieq, d[iinter+0][0]] = -SV_1[2, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness) # M[ieq, d[iinter+0][1]] = -SV_1[2, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness) # M[ieq, d[iinter+0][2]] = -SV_1[2, 2]*np.exp(-1j*k_y_1[2]*L[iinter].thickness) # M[ieq, d[iinter+0][3]] = -SV_1[2, 3] # M[ieq, d[iinter+0][4]] = -SV_1[2, 4] # M[ieq, d[iinter+0][5]] = -SV_1[2, 5] # M[ieq, d[iinter+1][0]] = SV_2[0, 0] # M[ieq, d[iinter+1][1]] = SV_2[0, 1]*np.exp(-1j*k_y_2*L[iinter+1].thickness) # ieq += 1 # M[ieq, d[iinter+0][0]] = -SV_1[4, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness) # M[ieq, d[iinter+0][1]] = -SV_1[4, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness) # M[ieq, d[iinter+0][2]] = -SV_1[4, 2]*np.exp(-1j*k_y_1[2]*L[iinter].thickness) # M[ieq, d[iinter+0][3]] = -SV_1[4, 3] # M[ieq, d[iinter+0][4]] = -SV_1[4, 4] # M[ieq, d[iinter+0][5]] = -SV_1[4, 5] # M[ieq, d[iinter+1][0]] = SV_2[1, 0] # M[ieq, d[iinter+1][1]] = SV_2[1, 1]*np.exp(-1j*k_y_2*L[iinter+1].thickness) # ieq += 1 # M[ieq, d[iinter+0][0]] = SV_1[0, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness) # M[ieq, d[iinter+0][1]] = SV_1[0, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness) # M[ieq, d[iinter+0][2]] = SV_1[0, 2]*np.exp(-1j*k_y_1[2]*L[iinter].thickness) # M[ieq, d[iinter+0][3]] = SV_1[0, 3] # M[ieq, d[iinter+0][4]] = SV_1[0, 4] # M[ieq, d[iinter+0][5]] = SV_1[0, 5] # ieq += 1 # M[ieq, d[iinter+0][0]] = SV_1[3, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness) # M[ieq, d[iinter+0][1]] = SV_1[3, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness) # M[ieq, d[iinter+0][2]] = SV_1[3, 2]*np.exp(-1j*k_y_1[2]*L[iinter].thickness) # M[ieq, d[iinter+0][3]] = SV_1[3, 3] # M[ieq, d[iinter+0][4]] = SV_1[3, 4] # M[ieq, d[iinter+0][5]] = SV_1[3, 5] # ieq += 1 # return ieq # def interface_elastic_fluid(self, ieq, iinter, L, d, M): # SV_1, k_y_1 = elastic_SV(L[iinter].medium, self.kx, self.omega) # SV_2, k_y_2 = fluid_SV(self.kx, self.k, L[iinter+1].medium.K) # # Continuity of u_y # M[ieq, d[iinter+0][0]] = -SV_1[1, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness) # M[ieq, d[iinter+0][1]] = -SV_1[1, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness) # M[ieq, d[iinter+0][2]] = -SV_1[1, 2] # M[ieq, d[iinter+0][3]] = -SV_1[1, 3] # M[ieq, d[iinter+1][0]] = SV_2[0, 0] # M[ieq, d[iinter+1][1]] = SV_2[0, 1]*np.exp(-1j*k_y_2*L[iinter+1].thickness) # ieq += 1 # # sigma_yy = -p # M[ieq, d[iinter+0][0]] = SV_1[2, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness) # M[ieq, d[iinter+0][1]] = SV_1[2, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness) # M[ieq, d[iinter+0][2]] = SV_1[2, 2] # M[ieq, d[iinter+0][3]] = SV_1[2, 3] # M[ieq, d[iinter+1][0]] = SV_2[1, 0] # M[ieq, d[iinter+1][1]] = SV_2[1, 1]*np.exp(-1j*k_y_2*L[iinter+1].thickness) # ieq += 1 # # sigma_xy = 0 # M[ieq, d[iinter+0][0]] = SV_1[0, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness) # M[ieq, d[iinter+0][1]] = SV_1[0, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness) # M[ieq, d[iinter+0][2]] = SV_1[0, 2] # M[ieq, d[iinter+0][3]] = SV_1[0, 3] # ieq += 1 # return ieq # def interface_elastic_rigid(self, M, ieq, L, d): # SV, k_y = elastic_SV(L.medium,self.kx, self.omega) # M[ieq, d[0]] = SV[1, 0]*np.exp(-1j*k_y[0]*L.thickness) # M[ieq, d[1]] = SV[1, 1]*np.exp(-1j*k_y[1]*L.thickness) # M[ieq, d[2]] = SV[1, 2] # M[ieq, d[3]] = SV[1, 3] # ieq += 1 # M[ieq, d[0]] = SV[3, 0]*np.exp(-1j*k_y[0]*L.thickness) # M[ieq, d[1]] = SV[3, 1]*np.exp(-1j*k_y[1]*L.thickness) # M[ieq, d[2]] = SV[3, 2] # M[ieq, d[3]] = SV[3, 3] # ieq += 1 # return ieq # def interface_pem_rigid(self, M, ieq, L, d): # SV, k_y = PEM_SV(L.medium, self.kx) # M[ieq, d[0]] = SV[1, 0]*np.exp(-1j*k_y[0]*L.thickness) # M[ieq, d[1]] = SV[1, 1]*np.exp(-1j*k_y[1]*L.thickness) # M[ieq, d[2]] = SV[1, 2]*np.exp(-1j*k_y[2]*L.thickness) # M[ieq, d[3]] = SV[1, 3] # M[ieq, d[4]] = SV[1, 4] # M[ieq, d[5]] = SV[1, 5] # ieq += 1 # M[ieq, d[0]] = SV[2, 0]*np.exp(-1j*k_y[0]*L.thickness) # M[ieq, d[1]] = SV[2, 1]*np.exp(-1j*k_y[1]*L.thickness) # M[ieq, d[2]] = SV[2, 2]*np.exp(-1j*k_y[2]*L.thickness) # M[ieq, d[3]] = SV[2, 3] # M[ieq, d[4]] = SV[2, 4] # M[ieq, d[5]] = SV[2, 5] # ieq += 1 # M[ieq, d[0]] = SV[5, 0]*np.exp(-1j*k_y[0]*L.thickness) # M[ieq, d[1]] = SV[5, 1]*np.exp(-1j*k_y[1]*L.thickness) # M[ieq, d[2]] = SV[5, 2]*np.exp(-1j*k_y[2]*L.thickness) # M[ieq, d[3]] = SV[5, 3] # M[ieq, d[4]] = SV[5, 4] # M[ieq, d[5]] = SV[5, 5] # ieq += 1 # return ieq # def plot_sol_PW(self, X, dofs): # x_start = self.shift_plot # for _l, _layer in enumerate(self.layers): # x_f = np.linspace(0, _layer.thickness,200) # x_b = x_f-_layer.thickness # if _layer.medium.MODEL == "fluid": # SV, k_y = fluid_SV(self.kx, self.k, _layer.medium.K) # pr = SV[1, 0]*np.exp(-1j*k_y*x_f)*X[dofs[_l][0]] # pr += SV[1, 1]*np.exp( 1j*k_y*x_b)*X[dofs[_l][1]] # ut = SV[0, 0]*np.exp(-1j*k_y*x_f)*X[dofs[_l][0]] # ut += SV[0, 1]*np.exp( 1j*k_y*x_b)*X[dofs[_l][1]] # if self.plot[2]: # plt.figure(2) # plt.plot(x_start+x_f, np.abs(pr), 'r') # plt.plot(x_start+x_f, np.imag(pr), 'm') # plt.title("Pressure") # # plt.figure(5) # # plt.plot(x_start+x_f,np.abs(ut),'b') # # plt.plot(x_start+x_f,np.imag(ut),'k') # if _layer.medium.MODEL == "pem": # SV, k_y = PEM_SV(_layer.medium, self.kx) # ux, uy, pr, ut = 0*1j*x_f, 0*1j*x_f, 0*1j*x_f, 0*1j*x_f # for i_dim in range(3): # ux += SV[1, i_dim ]*np.exp(-1j*k_y[i_dim]*x_f)*X[dofs[_l][i_dim]] # ux += SV[1, i_dim+3]*np.exp( 1j*k_y[i_dim]*x_b)*X[dofs[_l][i_dim+3]] # uy += SV[5, i_dim ]*np.exp(-1j*k_y[i_dim]*x_f)*X[dofs[_l][i_dim]] # uy += SV[5, i_dim+3]*np.exp( 1j*k_y[i_dim]*x_b)*X[dofs[_l][i_dim+3]] # pr += SV[4, i_dim ]*np.exp(-1j*k_y[i_dim]*x_f)*X[dofs[_l][i_dim]] # pr += SV[4, i_dim+3]*np.exp( 1j*k_y[i_dim]*x_b)*X[dofs[_l][i_dim+3]] # ut += SV[2, i_dim ]*np.exp(-1j*k_y[i_dim]*x_f)*X[dofs[_l][i_dim]] # ut += SV[2, i_dim+3]*np.exp( 1j*k_y[i_dim]*x_b)*X[dofs[_l][i_dim+3]] # if self.plot[0]: # plt.figure(0) # plt.plot(x_start+x_f, np.abs(uy), 'r') # plt.plot(x_start+x_f, np.imag(uy), 'm') # plt.title("Solid displacement along x") # if self.plot[1]: # plt.figure(1) # plt.plot(x_start+x_f, np.abs(ux), 'r') # plt.plot(x_start+x_f, np.imag(ux), 'm') # plt.title("Solid displacement along y") # if self.plot[2]: # plt.figure(2) # plt.plot(x_start+x_f, np.abs(pr), 'r') # plt.plot(x_start+x_f, np.imag(pr), 'm') # plt.title("Pressure") # if _layer.medium.MODEL == "elastic": # SV, k_y = elastic_SV(_layer.medium, self.kx, self.omega) # ux, uy, pr, sig = 0*1j*x_f, 0*1j*x_f, 0*1j*x_f, 0*1j*x_f # for i_dim in range(2): # ux += SV[1, i_dim ]*np.exp(-1j*k_y[i_dim]*x_f)*X[dofs[_l][i_dim]] # ux += SV[1, i_dim+2]*np.exp( 1j*k_y[i_dim]*x_b)*X[dofs[_l][i_dim+2]] # uy += SV[3, i_dim ]*np.exp(-1j*k_y[i_dim]*x_f)*X[dofs[_l][i_dim]] # uy += SV[3, i_dim+2]*np.exp( 1j*k_y[i_dim]*x_b)*X[dofs[_l][i_dim+2]] # pr -= SV[2, i_dim ]*np.exp(-1j*k_y[i_dim]*x_f)*X[dofs[_l][i_dim]] # pr -= SV[2, i_dim+2]*np.exp( 1j*k_y[i_dim]*x_b)*X[dofs[_l][i_dim+2]] # sig -= SV[0, i_dim ]*np.exp(-1j*k_y[i_dim]*x_f)*X[dofs[_l][i_dim]] # sig -= SV[0, i_dim+2]*np.exp( 1j*k_y[i_dim]*x_b)*X[dofs[_l][i_dim+2]] # if self.plot[0]: # plt.figure(0) # plt.plot(x_start+x_f, np.abs(uy), 'r') # plt.plot(x_start+x_f, np.imag(uy), 'm') # plt.title("Solid displacement along x") # if self.plot[1]: # plt.figure(1) # plt.plot(x_start+x_f, np.abs(ux), 'r') # plt.plot(x_start+x_f, np.imag(ux), 'm') # plt.title("Solid displacement along y") # # if self.plot[2]: # # plt.figure(2) # # plt.plot(x_start+x_f, np.abs(pr), 'r') # # plt.plot(x_start+x_f, np.imag(pr), 'm') # # plt.title("Sigma_yy") # # if self.plot[2]: # # plt.figure(3) # # plt.plot(x_start+x_f, np.abs(sig), 'r') # # plt.plot(x_start+x_f, np.imag(sig), 'm') # # plt.title("Sigma_xy") # x_start += _layer.thickness # def PEM_SV(mat,ky): # ''' S={0:\hat{\sigma}_{xy}, 1:u_y^s, 2:u_y^t, 3:\hat{\sigma}_{yy}, 4:p, 5:u_x^s}''' # kx_1 = np.sqrt(mat.delta_1**2-ky**2) # kx_2 = np.sqrt(mat.delta_2**2-ky**2) # kx_3 = np.sqrt(mat.delta_3**2-ky**2) # kx = np.array([kx_1, kx_2, kx_3]) # delta = np.array([mat.delta_1, mat.delta_2, mat.delta_3]) # alpha_1 = -1j*mat.A_hat*mat.delta_1**2-1j*2*mat.N*kx[0]**2 # alpha_2 = -1j*mat.A_hat*mat.delta_2**2-1j*2*mat.N*kx[1]**2 # alpha_3 = -2*1j*mat.N*kx[2]*ky # SV = np.zeros((6,6), dtype=complex) # SV[0:6, 0] = np.array([-2*1j*mat.N*kx[0]*ky, kx[0], mat.mu_1*kx[0], alpha_1, 1j*delta[0]**2*mat.K_eq_til*mat.mu_1, ky]) # SV[0:6, 3] = np.array([ 2*1j*mat.N*kx[0]*ky,-kx[0],-mat.mu_1*kx[0], alpha_1, 1j*delta[0]**2*mat.K_eq_til*mat.mu_1, ky]) # SV[0:6, 1] = np.array([-2*1j*mat.N*kx[1]*ky, kx[1], mat.mu_2*kx[1],alpha_2, 1j*delta[1]**2*mat.K_eq_til*mat.mu_2, ky]) # SV[0:6, 4] = np.array([ 2*1j*mat.N*kx[1]*ky,-kx[1],-mat.mu_2*kx[1],alpha_2, 1j*delta[1]**2*mat.K_eq_til*mat.mu_2, ky]) # SV[0:6, 2] = np.array([1j*mat.N*(kx[2]**2-ky**2), ky, mat.mu_3*ky, alpha_3, 0., -kx[2]]) # SV[0:6, 5] = np.array([1j*mat.N*(kx[2]**2-ky**2), ky, mat.mu_3*ky, -alpha_3, 0., kx[2]]) # return SV, kx # def elastic_SV(mat,ky, omega): # ''' S={0:\sigma_{xy}, 1: u_y, 2 \sigma_{yy}, 3 u_x}''' # P_mat = mat.lambda_ + 2.*mat.mu # delta_p = omega*np.sqrt(mat.rho/P_mat) # delta_s = omega*np.sqrt(mat.rho/mat.mu) # kx_p = np.sqrt(delta_p**2-ky**2) # kx_s = np.sqrt(delta_s**2-ky**2) # kx = np.array([kx_p, kx_s]) # alpha_p = -1j*mat.lambda_*delta_p**2 - 2j*mat.mu*kx[0]**2 # alpha_s = 2j*mat.mu*kx[1]*ky # SV = np.zeros((4, 4), dtype=np.complex) # SV[0:4, 0] = np.array([-2.*1j*mat.mu*kx[0]*ky, kx[0], alpha_p, ky]) # SV[0:4, 2] = np.array([ 2.*1j*mat.mu*kx[0]*ky, -kx[0], alpha_p, ky]) # SV[0:4, 1] = np.array([1j*mat.mu*(kx[1]**2-ky**2), ky,-alpha_s, -kx[1]]) # SV[0:4, 3] = np.array([1j*mat.mu*(kx[1]**2-ky**2), ky, alpha_s, kx[1]]) # return SV, kx # def fluid_SV(kx, k, K): # ''' S={0:u_y , 1:p}''' # ky = np.sqrt(k**2-kx**2) # SV = np.zeros((2, 2), dtype=complex) # SV[0, 0:2] = np.array([ky/(1j*K*k**2), -ky/(1j*K*k**2)]) # SV[1, 0:2] = np.array([1, 1]) # return SV, ky # def resolution_PW_imposed_displacement(S, p): # # print("k={}".format(p.k)) # Layers = S.layers.copy() # n, interfaces, dofs = initialise_PW_solver(Layers, S.backing) # M = np.zeros((n, n), dtype=complex) # i_eq = 0 # # Loop on the layers # for i_inter, _inter in enumerate(interfaces): # if _inter[0] == "fluid": # if _inter[1] == "fluid": # i_eq = interface_fluid_fluid(i_eq, i_inter, Layers, dofs, M, p) # if _inter[1] == "pem": # i_eq = interface_fluid_pem(i_eq, i_inter, Layers, dofs, M, p) # elif _inter[0] == "pem": # if _inter[1] == "fluid": # i_eq = interface_pem_fluid(i_eq, i_inter, Layers, dofs, M, p) # if _inter[1] == "pem": # i_eq = interface_pem_pem(i_eq, i_inter, Layers, dofs, M, p) # if S.backing == backing.rigid: # if Layers[-1].medium.MODEL == "fluid": # i_eq = interface_fluid_rigid(M, i_eq, Layers[-1], dofs[-1], p) # elif Layers[-1].medium.MODEL == "pem": # i_eq = interface_pem_rigid(M, i_eq, Layers[-1], dofs[-1], p) # if Layers[0].medium.MODEL == "fluid": # F = np.zeros(n, dtype=complex) # SV, k_y = fluid_SV(p.kx, p.k, Layers[0].medium.K) # M[i_eq, dofs[0][0]] = SV[0, 0] # M[i_eq, dofs[0][1]] = SV[0, 1]*np.exp(-1j*k_y*Layers[0].thickness) # F[i_eq] = 1. # elif Layers[0].medium.MODEL == "pem": # SV, k_y = PEM_SV(Layers[0].medium, p.kx) # M[i_eq, dofs[0][0]] = SV[2, 0] # M[i_eq, dofs[0][1]] = SV[2, 1] # M[i_eq, dofs[0][2]] = SV[2, 2] # M[i_eq, dofs[0][3]] = SV[2, 3]*np.exp(-1j*k_y[0]*Layers[0].thickness) # M[i_eq, dofs[0][4]] = SV[2, 4]*np.exp(-1j*k_y[1]*Layers[0].thickness) # M[i_eq, dofs[0][5]] = SV[2, 5]*np.exp(-1j*k_y[2]*Layers[0].thickness) # F = np.zeros(n, dtype=complex) # F[i_eq] = 1. # i_eq +=1 # M[i_eq, dofs[0][0]] = SV[0, 0] # M[i_eq, dofs[0][1]] = SV[0, 1] # M[i_eq, dofs[0][2]] = SV[0, 2] # M[i_eq, dofs[0][3]] = SV[0, 3]*np.exp(-1j*k_y[0]*Layers[0].thickness) # M[i_eq, dofs[0][4]] = SV[0, 4]*np.exp(-1j*k_y[1]*Layers[0].thickness) # M[i_eq, dofs[0][5]] = SV[0, 5]*np.exp(-1j*k_y[2]*Layers[0].thickness) # i_eq += 1 # M[i_eq, dofs[0][0]] = SV[3, 0] # M[i_eq, dofs[0][1]] = SV[3, 1] # M[i_eq, dofs[0][2]] = SV[3, 2] # M[i_eq, dofs[0][3]] = SV[3, 3]*np.exp(-1j*k_y[0]*Layers[0].thickness) # M[i_eq, dofs[0][4]] = SV[3, 4]*np.exp(-1j*k_y[1]*Layers[0].thickness) # M[i_eq, dofs[0][5]] = SV[3, 5]*np.exp(-1j*k_y[2]*Layers[0].thickness) # X = LA.solve(M, F) # # print("|R pyPLANES_PW| = {}".format(np.abs(X[0]))) # print("R pyPLANES_PW = {}".format(X[0])) # plot_sol_PW(S, X, dofs, p)
[ "pyPLANES.core.multilayer.MultiLayer.__init__", "numpy.linalg.solve", "pyPLANES.core.multilayer.MultiLayer.update_frequency", "numpy.delete", "pyPLANES.core.calculus.PwCalculus.update_frequency", "mediapack.Air", "numpy.exp", "numpy.zeros", "pyPLANES.core.calculus.PwCalculus.__init__", "pyPLANES.pw.pw_layers.FluidLayer", "pyPLANES.pw.pw_interfaces.FluidFluidInterface", "numpy.arange" ]
[((1311, 1316), 'mediapack.Air', 'Air', ([], {}), '()\n', (1314, 1316), False, 'from mediapack import Air, PEM, EqFluidJCA\n'), ((2063, 2098), 'pyPLANES.core.calculus.PwCalculus.__init__', 'PwCalculus.__init__', (['self'], {}), '(self, **kwargs)\n', (2082, 2098), False, 'from pyPLANES.core.calculus import PwCalculus\n'), ((2221, 2256), 'pyPLANES.core.multilayer.MultiLayer.__init__', 'MultiLayer.__init__', (['self'], {}), '(self, **kwargs)\n', (2240, 2256), False, 'from pyPLANES.core.multilayer import MultiLayer\n'), ((3281, 3317), 'pyPLANES.core.calculus.PwCalculus.update_frequency', 'PwCalculus.update_frequency', (['self', 'f'], {}), '(self, f)\n', (3308, 3317), False, 'from pyPLANES.core.calculus import PwCalculus\n'), ((3326, 3379), 'pyPLANES.core.multilayer.MultiLayer.update_frequency', 'MultiLayer.update_frequency', (['self', 'f', 'self.k', 'self.kx'], {}), '(self, f, self.k, self.kx)\n', (3353, 3379), False, 'from pyPLANES.core.multilayer import MultiLayer\n'), ((3438, 3491), 'numpy.zeros', 'np.zeros', (['(self.nb_PW - 1, self.nb_PW)'], {'dtype': 'complex'}), '((self.nb_PW - 1, self.nb_PW), dtype=complex)\n', (3446, 3491), True, 'import numpy as np\n'), ((5782, 5810), 'numpy.delete', 'np.delete', (['self.A', '(0)'], {'axis': '(1)'}), '(self.A, 0, axis=1)\n', (5791, 5810), True, 'import numpy as np\n'), ((5847, 5871), 'numpy.linalg.solve', 'LA.solve', (['self.A', 'self.F'], {}), '(self.A, self.F)\n', (5855, 5871), True, 'import numpy.linalg as LA\n'), ((5663, 5704), 'numpy.exp', 'np.exp', (['(1.0j * self.ky * self.layers[0].d)'], {}), '(1.0j * self.ky * self.layers[0].d)\n', (5669, 5704), True, 'import numpy as np\n'), ((2556, 2577), 'pyPLANES.pw.pw_layers.FluidLayer', 'FluidLayer', (['Air', '(0.01)'], {}), '(Air, 0.01)\n', (2566, 2577), False, 'from pyPLANES.pw.pw_layers import FluidLayer\n'), ((2679, 2730), 'pyPLANES.pw.pw_interfaces.FluidFluidInterface', 'FluidFluidInterface', (['self.layers[0]', 'self.layers[1]'], {}), '(self.layers[0], self.layers[1])\n', (2698, 2730), False, 'from pyPLANES.pw.pw_interfaces import FluidFluidInterface, RigidBacking\n'), ((2893, 2905), 'numpy.arange', 'np.arange', (['(2)'], {}), '(2)\n', (2902, 2905), True, 'import numpy as np\n'), ((3038, 3050), 'numpy.arange', 'np.arange', (['(6)'], {}), '(6)\n', (3047, 3050), True, 'import numpy as np\n'), ((3187, 3199), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (3196, 3199), True, 'import numpy as np\n')]
import os import requests from bs4 import BeautifulSoup from ekorpkit import eKonf from ekorpkit.io.download.web import web_download, web_download_unzip class EDGAR: def __init__(self, **args): self.args = eKonf.to_config(args) self.base_url = self.args.base_url self.url = self.args.url self.output_dir = self.args.output_dir os.makedirs(self.output_dir, exist_ok=True) self.force_download = self.args.force_download self.name = self.args.name self.build() def build(self): if self.force_download or not os.listdir(self.output_dir): self.download_edgar() else: print(f"{self.name} is already downloaded") def download_edgar(self): user_agent = "Mozilla/5.0" headers = {"User-Agent": user_agent} page = requests.get(self.url, headers=headers) soup = BeautifulSoup(page.content, "html.parser") filelist = soup.find_all("a", class_="filename") for file in filelist: link = self.base_url + file.get("href") file_path = self.output_dir + "/" + file.get_text().strip() web_download(link, file_path, self.name, self.force_download)
[ "os.listdir", "os.makedirs", "ekorpkit.eKonf.to_config", "requests.get", "bs4.BeautifulSoup", "ekorpkit.io.download.web.web_download" ]
[((220, 241), 'ekorpkit.eKonf.to_config', 'eKonf.to_config', (['args'], {}), '(args)\n', (235, 241), False, 'from ekorpkit import eKonf\n'), ((373, 416), 'os.makedirs', 'os.makedirs', (['self.output_dir'], {'exist_ok': '(True)'}), '(self.output_dir, exist_ok=True)\n', (384, 416), False, 'import os\n'), ((849, 888), 'requests.get', 'requests.get', (['self.url'], {'headers': 'headers'}), '(self.url, headers=headers)\n', (861, 888), False, 'import requests\n'), ((905, 947), 'bs4.BeautifulSoup', 'BeautifulSoup', (['page.content', '"""html.parser"""'], {}), "(page.content, 'html.parser')\n", (918, 947), False, 'from bs4 import BeautifulSoup\n'), ((1172, 1233), 'ekorpkit.io.download.web.web_download', 'web_download', (['link', 'file_path', 'self.name', 'self.force_download'], {}), '(link, file_path, self.name, self.force_download)\n', (1184, 1233), False, 'from ekorpkit.io.download.web import web_download, web_download_unzip\n'), ((589, 616), 'os.listdir', 'os.listdir', (['self.output_dir'], {}), '(self.output_dir)\n', (599, 616), False, 'import os\n')]
from HARK.ConsumptionSaving.ConsIndShockModel import PerfForesightConsumerType import numpy as np import unittest class testPerfForesightConsumerType(unittest.TestCase): def setUp(self): self.agent = PerfForesightConsumerType() self.agent_infinite = PerfForesightConsumerType(cycles=0) PF_dictionary = { 'CRRA' : 2.5, 'DiscFac' : 0.96, 'Rfree' : 1.03, 'LivPrb' : [0.98], 'PermGroFac' : [1.01], 'T_cycle' : 1, 'cycles' : 0, 'AgentCount' : 10000 } self.agent_alt = PerfForesightConsumerType( **PF_dictionary) def test_default_solution(self): self.agent.solve() c = self.agent.solution[0].cFunc self.assertEqual(c.x_list[0], -0.9805825242718447) self.assertEqual(c.x_list[1], 0.01941747572815533) self.assertEqual(c.y_list[0], 0) self.assertEqual(c.y_list[1], 0.511321002804608) self.assertEqual(c.decay_extrap, False) def test_another_solution(self): self.agent_alt.DiscFac = 0.90 self.agent_alt.solve() self.assertAlmostEqual( self.agent_alt.solution[0].cFunc(10).tolist(), 3.9750093524820787) def test_checkConditions(self): self.agent_infinite.checkConditions() self.assertTrue(self.agent_infinite.AIC) self.assertTrue(self.agent_infinite.GICPF) self.assertTrue(self.agent_infinite.RIC) self.assertTrue(self.agent_infinite.FHWC) def test_simulation(self): self.agent_infinite.solve() # Create parameter values necessary for simulation SimulationParams = { "AgentCount" : 10000, # Number of agents of this type "T_sim" : 120, # Number of periods to simulate "aNrmInitMean" : -6.0, # Mean of log initial assets "aNrmInitStd" : 1.0, # Standard deviation of log initial assets "pLvlInitMean" : 0.0, # Mean of log initial permanent income "pLvlInitStd" : 0.0, # Standard deviation of log initial permanent income "PermGroFacAgg" : 1.0, # Aggregate permanent income growth factor "T_age" : None, # Age after which simulated agents are automatically killed } self.agent_infinite(**SimulationParams) # This implicitly uses the assignParameters method of AgentType # Create PFexample object self.agent_infinite.track_vars = ['mNrmNow'] self.agent_infinite.initializeSim() self.agent_infinite.simulate() self.assertAlmostEqual( np.mean(self.agent_infinite.mNrmNow_hist,axis=1)[40], -23.008063500363942 ) self.assertAlmostEqual( np.mean(self.agent_infinite.mNrmNow_hist,axis=1)[100], -27.164608851546927 ) ## Try now with the manipulation at time step 80 self.agent_infinite.initializeSim() self.agent_infinite.simulate(80) self.agent_infinite.aNrmNow += -5. # Adjust all simulated consumers' assets downward by 5 self.agent_infinite.simulate(40) self.assertAlmostEqual( np.mean(self.agent_infinite.mNrmNow_hist,axis=1)[40], -23.008063500363942 ) self.assertAlmostEqual( np.mean(self.agent_infinite.mNrmNow_hist,axis=1)[100], -29.140261331951606 )
[ "numpy.mean", "HARK.ConsumptionSaving.ConsIndShockModel.PerfForesightConsumerType" ]
[((214, 241), 'HARK.ConsumptionSaving.ConsIndShockModel.PerfForesightConsumerType', 'PerfForesightConsumerType', ([], {}), '()\n', (239, 241), False, 'from HARK.ConsumptionSaving.ConsIndShockModel import PerfForesightConsumerType\n'), ((272, 307), 'HARK.ConsumptionSaving.ConsIndShockModel.PerfForesightConsumerType', 'PerfForesightConsumerType', ([], {'cycles': '(0)'}), '(cycles=0)\n', (297, 307), False, 'from HARK.ConsumptionSaving.ConsIndShockModel import PerfForesightConsumerType\n'), ((606, 648), 'HARK.ConsumptionSaving.ConsIndShockModel.PerfForesightConsumerType', 'PerfForesightConsumerType', ([], {}), '(**PF_dictionary)\n', (631, 648), False, 'from HARK.ConsumptionSaving.ConsIndShockModel import PerfForesightConsumerType\n'), ((2666, 2715), 'numpy.mean', 'np.mean', (['self.agent_infinite.mNrmNow_hist'], {'axis': '(1)'}), '(self.agent_infinite.mNrmNow_hist, axis=1)\n', (2673, 2715), True, 'import numpy as np\n'), ((2807, 2856), 'numpy.mean', 'np.mean', (['self.agent_infinite.mNrmNow_hist'], {'axis': '(1)'}), '(self.agent_infinite.mNrmNow_hist, axis=1)\n', (2814, 2856), True, 'import numpy as np\n'), ((3232, 3281), 'numpy.mean', 'np.mean', (['self.agent_infinite.mNrmNow_hist'], {'axis': '(1)'}), '(self.agent_infinite.mNrmNow_hist, axis=1)\n', (3239, 3281), True, 'import numpy as np\n'), ((3373, 3422), 'numpy.mean', 'np.mean', (['self.agent_infinite.mNrmNow_hist'], {'axis': '(1)'}), '(self.agent_infinite.mNrmNow_hist, axis=1)\n', (3380, 3422), True, 'import numpy as np\n')]
import os from os import getcwd #---------------------------------------------# # 训练前一定要注意修改classes # 种类顺序需要和model_data下的txt一样 #---------------------------------------------# classes = ["cat", "dog"] sets = ["train", "test"] wd = getcwd() for se in sets: list_file = open('cls_' + se + '.txt', 'w') datasets_path = "datasets/" + se types_name = os.listdir(datasets_path) for type_name in types_name: if type_name not in classes: continue cls_id = classes.index(type_name) photos_path = os.path.join(datasets_path, type_name) photos_name = os.listdir(photos_path) for photo_name in photos_name: _, postfix = os.path.splitext(photo_name) if postfix not in ['.jpg', '.png', '.jpeg']: continue list_file.write(str(cls_id) + ";" + '%s/%s'%(wd, os.path.join(photos_path, photo_name))) list_file.write('\n') list_file.close()
[ "os.listdir", "os.path.join", "os.path.splitext", "os.getcwd" ]
[((246, 254), 'os.getcwd', 'getcwd', ([], {}), '()\n', (252, 254), False, 'from os import getcwd\n'), ((379, 404), 'os.listdir', 'os.listdir', (['datasets_path'], {}), '(datasets_path)\n', (389, 404), False, 'import os\n'), ((575, 613), 'os.path.join', 'os.path.join', (['datasets_path', 'type_name'], {}), '(datasets_path, type_name)\n', (587, 613), False, 'import os\n'), ((637, 660), 'os.listdir', 'os.listdir', (['photos_path'], {}), '(photos_path)\n', (647, 660), False, 'import os\n'), ((727, 755), 'os.path.splitext', 'os.path.splitext', (['photo_name'], {}), '(photo_name)\n', (743, 755), False, 'import os\n'), ((902, 939), 'os.path.join', 'os.path.join', (['photos_path', 'photo_name'], {}), '(photos_path, photo_name)\n', (914, 939), False, 'import os\n')]
from PIL import Image from math import sqrt import numpy as np import time import matplotlib.backends.backend_tkagg import matplotlib.pyplot as plt class Point: x: float y: float f: float h: float g: float def __init__(self, x, y, f): self.x = x self.y = y self.f = f self.g = 0 self.h = 0 self.parent = None def equal(self, other): if self.x == other.x and self.y == other.y: return True class Output: result_image: Image total_time: float n_elements: int max_elements: int def __init__(self, result_image, total_time, n_elements, max_elements): self.result_image = result_image self.total_time = total_time self.n_elements = n_elements self.max_elements = max_elements self.name = None def plot_times(self, other1, other2, other3): fig, ax = plt.subplots() ax.bar([self.name, other1.name, other2.name, other3.name], [self.total_time, other1.total_time, other2.total_time, other3.total_time]) fig.suptitle("Toplam Zamanlar") fname = image_name.split('.') plt.savefig(fname[0] + "times.png") plt.show() def plot_n_elements(self, other1, other2, other3): fig, ax = plt.subplots() ax.bar([self.name, other1.name, other2.name, other3.name], [self.n_elements, other1.n_elements, other2.n_elements, other3.n_elements]) fig.suptitle("Stack'ten Çekilen Toplam Eleman Sayısı") fname = image_name.split('.') plt.savefig(fname[0] + "n_elements.png") plt.show() def plot_max_elements(self, other1, other2, other3): fig, ax = plt.subplots() ax.bar([self.name, other1.name, other2.name, other3.name], [self.max_elements, other1.max_elements, other2.max_elements, other3.max_elements]) fig.suptitle("Stack'te Bulunan Maksimum Eleman Sayısı") fname = image_name.split('.') plt.savefig(fname[0] + "max_elements.png") plt.show() def distance(point, x, y): return sqrt((point.x - x)**2 + (point.y - y)**2) def insert_in_heap(heap, top, point): heap.append(point) i = top parent = (i - 1)/2 while i >= 1 and heap[int(i)].f < heap[int(parent)].f: heap[int(i)], heap[int(parent)] = heap[int(parent)], heap[int(i)] # swap i = parent parent = (i - 1) / 2 return def calculate_weight(x, y, liste, top, point, visited, index1, index2): if visited[int(x)][int(y)] == 0: r, g, b = image.getpixel((x, y)) if x == end.x and y == end.y: print("Path found.") if r is 0: r = 1 new_point = Point(x, y, 0) new_point.parent = point new_point.h = distance(end, x, y) * (256 - r) new_point.g = 0 if index1 == 1: # a_star new_point.g = new_point.parent.g + 256 - r new_point.f = new_point.h + new_point.g # bfs'de g = 0 if index2 == 0: # stack liste.append(new_point) else: # heap insert_in_heap(liste, top, new_point) top += 1 visited[int(x)][int(y)] = 1 return top def add_neighbours(point, liste, top, visited, index1, index2): # print(point.x, point.y) if (point.x == width - 1 and point.y == height - 1) or (point.x == 0 and point.y == 0) or \ (point.x == 0 and point.y == height - 1) or (point.x == width - 1 and point.y == 0): # print("first if") if point.x == width - 1 and point.y == height - 1: constx = -1 consty = -1 elif point.x == 0 and point.y == 0: constx = 1 consty = 1 elif point.x == width - 1 and point.y == 0: constx = 1 consty = -1 else: constx = -1 consty = 1 top = calculate_weight(point.x + constx, point.y, liste, top, point, visited, index1, index2) top = calculate_weight(point.x, point.y + consty, liste, top, point, visited, index1, index2) top = calculate_weight(point.x + constx, point.y + consty, liste, top, point, visited, index1, index2) elif point.x == 0 or point.x == width - 1: # print("nd if") top = calculate_weight(point.x, point.y - 1, liste, top, point, visited, index1, index2) top = calculate_weight(point.x, point.y + 1, liste, top, point, visited, index1, index2) if point.x == 0: const = 1 else: const = -1 top = calculate_weight(point.x + const, point.y - 1, liste, top, point, visited, index1, index2) top = calculate_weight(point.x + const, point.y + 1, liste, top, point, visited, index1, index2) top = calculate_weight(point.x + const, point.y, liste, top, point, visited, index1, index2) elif point.y == 0 or point.y == height - 1: # print("3rd if") top = calculate_weight(point.x - 1, point.y, liste, top, point, visited, index1, index2) top = calculate_weight(point.x + 1, point.y, liste, top, point, visited, index1, index2) if point.y == 0: const = 1 else: const = -1 top = calculate_weight(point.x - 1, point.y + const, liste, top, point, visited, index1, index2) top = calculate_weight(point.x + 1, point.y + const, liste, top, point, visited, index1, index2) top = calculate_weight(point.x, point.y + const, liste, top, point, visited, index1, index2) else: # print("4th if") top = calculate_weight(point.x - 1, point.y, liste, top, point, visited, index1, index2) top = calculate_weight(point.x - 1, point.y - 1, liste, top, point, visited, index1, index2) top = calculate_weight(point.x - 1, point.y + 1, liste, top, point, visited, index1, index2) top = calculate_weight(point.x + 1, point.y - 1, liste, top, point, visited, index1, index2) top = calculate_weight(point.x + 1, point.y, liste, top, point, visited, index1, index2) top = calculate_weight(point.x + 1, point.y + 1, liste, top, point, visited, index1, index2) top = calculate_weight(point.x, point.y + 1, liste, top, point, visited, index1, index2) top = calculate_weight(point.x, point.y - 1, liste, top, point, visited, index1, index2) return top def paint(point): yol = [] while not point.equal(start): yol.append(point) image.putpixel((int(point.x), int(point.y)), (60, 255, 0)) point = point.parent end_time = time.time() # image.show() '''print("--------------YOL------------------") for i in range(len(yol)): print("x: {}, y:{}, distance:{}".format(yol[i].x, yol[i].y, yol[i].f)) print("------------------------------------")''' return image, (end_time - start_time) def bfs_and_a_star_with_stack(index): stack = [] top = 0 found = False point = None stack.append(start) visited = np.zeros((width, height)) visited[int(start.x)][int(start.y)] = 1 j = 0 max_element = 0 while stack and not found: point = stack.pop(top) # print("x: {}, y:{}, f:{}".format(point.x, point.y, point.f)) top -= 1 if point.equal(end): found = True else: top = add_neighbours(point, stack, top, visited, index, 0) stack.sort(key=lambda point: point.f, reverse=True) if len(stack) > max_element: max_element = len(stack) j += 1 if found: result_image, total_time = paint(point) # print("Stackten çekilen eleman sayısı: ", j) # print("Stackteki maksimum eleman sayısı: ", max_element) return result_image, total_time, j, max_element def find_smallest_child(heap, i, top): if 2 * i + 2 < top: # has two child if heap[2*i + 1].f < heap[2*i + 2].f: return 2*i + 1 else: return 2*i + 2 elif 2*i + 1 < top: # has one child return 2*i + 1 else: # has no child return 0 def remove_min(heap, top): if top == 0: return None min_point = heap[0] top -= 1 heap[0] = heap[top] del heap[top] i = 0 index = find_smallest_child(heap, i, top) while index != 0 and heap[i].f > heap[index].f: heap[i], heap[index] = heap[index], heap[i] i = index index = find_smallest_child(heap, i, top) return min_point, top def bfs_and_a_star_with_heap(index): heap = [] found = False yol = [] point = None heap.append(start) visited = np.zeros((width, height)) visited[int(start.x)][int(start.y)] = 1 j = 0 top = 1 max_element = 0 while heap and not found: point, top = remove_min(heap, top) # print("x: {}, y:{}, f:{}".format(point.x, point.y, point.f)) if point.equal(end): found = True else: top = add_neighbours(point, heap, top, visited, index, 1) if len(heap) > max_element: max_element = len(heap) j += 1 if found: result_image, total_time = paint(point) else: return return result_image, total_time, j, max_element if __name__ == "__main__": print("UYARI: Seçilecek görüntü exe dosyası ile aynı klasörde olmalıdır.") image_name = input("Algoritmanın üzerinde çalışacağı görüntünün ismini giriniz (Örnek input: image.png): ") print(image_name) print("-------------------Algoritmalar------------------") print("1- Best First Search with Stack") print("2- Best First Search with Heap") print("3- A* with Stack") print("4- A* with Heap") print("5- Analiz (tüm algoritmaların çalışmalarını ve kıyaslamalarını gör)") alg = input("Algoritmayı ve veri yapısının numarasını seçiniz (Örnek input: 1): ") image = Image.open(image_name) width, height = image.size image = image.convert('RGB') print("Görüntünün genişliği: {}, yüksekliği: {}".format(width, height)) print("NOT: Başlangıç ve bitiş noktasının koordinatları genişlik ve uzunluktan küçük olmalıdır.") sx, sy = input("Başlangıç noktasının x ve y piksel koordinatlarını sırasıyla giriniz (Örnek input: 350 100): ").split() ex, ey = input("Bitiş noktasının x ve y piksel koordinatlarını sırasıyla giriniz (Örnek input: 200 700): ").split() start = Point(int(sx), int(sy), -1) start.parent = -1 end = Point(int(ex), int(ey), -1) start_time = time.time() if int(alg) == 1: result_image, total_time, n_elements, max_elements = bfs_and_a_star_with_stack(0) elif int(alg) == 2: result_image, total_time, n_elements, max_elements = bfs_and_a_star_with_heap(0) elif int(alg) == 3: result_image, total_time, n_elements, max_elements = bfs_and_a_star_with_stack(1) elif int(alg) == 4: result_image, total_time, n_elements, max_elements = bfs_and_a_star_with_heap(1) elif int(alg) == 5: result_image, total_time, n_elements, max_elements = bfs_and_a_star_with_stack(0) output1 = Output(result_image, total_time, n_elements, max_elements) print(n_elements, total_time, max_elements) output1.name = "BFS with Stack" print("1/4") image = Image.open(image_name) width, height = image.size image = image.convert('RGB') start_time = time.time() result_image, total_time, n_elements, max_elements = bfs_and_a_star_with_heap(0) output2 = Output(result_image, total_time, n_elements, max_elements) print(n_elements, total_time, max_elements) output2.name = "BFS with Heap" print("2/4") image = Image.open(image_name) width, height = image.size image = image.convert('RGB') start_time = time.time() result_image, total_time, n_elements, max_elements = bfs_and_a_star_with_stack(1) output3 = Output(result_image, total_time, n_elements, max_elements) output3.name = "A* with Stack" print(n_elements, total_time, max_elements) print("3/4") image = Image.open(image_name) width, height = image.size image = image.convert('RGB') start_time = time.time() result_image, total_time, n_elements, max_elements = bfs_and_a_star_with_heap(1) output4 = Output(result_image, total_time, n_elements, max_elements) output4.name = "A* with Heap" print("4/4") output1.plot_times(output2, output3, output4) output1.plot_max_elements(output2, output3, output4) output1.plot_n_elements(output2, output3, output4) print("Bastırılan görüntüler sırasıyla BFS stack, BFS heap, A* stack ve A* heap şeklindedir.") fname = image_name.split('.') output1.result_image.show() output1.result_image.save(fname[0] + "BFS_stack.png") output2.result_image.show() output2.result_image.save(fname[0] + "BFS_heap.png") output3.result_image.show() output3.result_image.save(fname[0] + "A_star_stack.png") output4.result_image.show() output4.result_image.save(fname[0] + "A_star_heap.png") exit(0) else: print("Algoritma numarası hatalı girildi, tekrar deneyin.") exit(0) print("Stackten çekilen eleman sayısı: ", n_elements) print("Stackteki maksimum eleman sayısı: ", max_elements) print("Toplam süre: ", total_time) result_image.show()
[ "PIL.Image.open", "matplotlib.pyplot.savefig", "math.sqrt", "numpy.zeros", "time.time", "matplotlib.pyplot.subplots", "matplotlib.pyplot.show" ]
[((2189, 2234), 'math.sqrt', 'sqrt', (['((point.x - x) ** 2 + (point.y - y) ** 2)'], {}), '((point.x - x) ** 2 + (point.y - y) ** 2)\n', (2193, 2234), False, 'from math import sqrt\n'), ((6777, 6788), 'time.time', 'time.time', ([], {}), '()\n', (6786, 6788), False, 'import time\n'), ((7223, 7248), 'numpy.zeros', 'np.zeros', (['(width, height)'], {}), '((width, height))\n', (7231, 7248), True, 'import numpy as np\n'), ((8920, 8945), 'numpy.zeros', 'np.zeros', (['(width, height)'], {}), '((width, height))\n', (8928, 8945), True, 'import numpy as np\n'), ((10232, 10254), 'PIL.Image.open', 'Image.open', (['image_name'], {}), '(image_name)\n', (10242, 10254), False, 'from PIL import Image\n'), ((10876, 10887), 'time.time', 'time.time', ([], {}), '()\n', (10885, 10887), False, 'import time\n'), ((962, 976), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (974, 976), True, 'import matplotlib.pyplot as plt\n'), ((1226, 1261), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(fname[0] + 'times.png')"], {}), "(fname[0] + 'times.png')\n", (1237, 1261), True, 'import matplotlib.pyplot as plt\n'), ((1271, 1281), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1279, 1281), True, 'import matplotlib.pyplot as plt\n'), ((1359, 1373), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1371, 1373), True, 'import matplotlib.pyplot as plt\n'), ((1646, 1686), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(fname[0] + 'n_elements.png')"], {}), "(fname[0] + 'n_elements.png')\n", (1657, 1686), True, 'import matplotlib.pyplot as plt\n'), ((1696, 1706), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1704, 1706), True, 'import matplotlib.pyplot as plt\n'), ((1786, 1800), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1798, 1800), True, 'import matplotlib.pyplot as plt\n'), ((2082, 2124), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(fname[0] + 'max_elements.png')"], {}), "(fname[0] + 'max_elements.png')\n", (2093, 2124), True, 'import matplotlib.pyplot as plt\n'), ((2134, 2144), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2142, 2144), True, 'import matplotlib.pyplot as plt\n'), ((11681, 11703), 'PIL.Image.open', 'Image.open', (['image_name'], {}), '(image_name)\n', (11691, 11703), False, 'from PIL import Image\n'), ((11802, 11813), 'time.time', 'time.time', ([], {}), '()\n', (11811, 11813), False, 'import time\n'), ((12116, 12138), 'PIL.Image.open', 'Image.open', (['image_name'], {}), '(image_name)\n', (12126, 12138), False, 'from PIL import Image\n'), ((12237, 12248), 'time.time', 'time.time', ([], {}), '()\n', (12246, 12248), False, 'import time\n'), ((12552, 12574), 'PIL.Image.open', 'Image.open', (['image_name'], {}), '(image_name)\n', (12562, 12574), False, 'from PIL import Image\n'), ((12673, 12684), 'time.time', 'time.time', ([], {}), '()\n', (12682, 12684), False, 'import time\n')]
#Author <NAME> import time import rnnoise import numpy as np def time_rnnoise(rounds=1000): a = rnnoise.RNNoise() timer = 0.0 st = time.time() for i in range(rounds): inp = np.random.bytes(960) timer = (time.time() - st) print(timer) st = time.time() for i in range(rounds): inp = np.random.bytes(960) va,out = a.process_frame(inp) time_taken_per_frame = ((time.time()-st)-timer) /rounds print("time taken for one frame - " + str(time_taken_per_frame )) print("time in a frame - " +str(480.0/48000.0)) print(str((480.0/48000.0)/time_taken_per_frame )+"X faster than real") a.destroy() time_rnnoise()
[ "rnnoise.RNNoise", "time.time", "numpy.random.bytes" ]
[((97, 114), 'rnnoise.RNNoise', 'rnnoise.RNNoise', ([], {}), '()\n', (112, 114), False, 'import rnnoise\n'), ((134, 145), 'time.time', 'time.time', ([], {}), '()\n', (143, 145), False, 'import time\n'), ((248, 259), 'time.time', 'time.time', ([], {}), '()\n', (257, 259), False, 'import time\n'), ((179, 199), 'numpy.random.bytes', 'np.random.bytes', (['(960)'], {}), '(960)\n', (194, 199), True, 'import numpy as np\n'), ((210, 221), 'time.time', 'time.time', ([], {}), '()\n', (219, 221), False, 'import time\n'), ((293, 313), 'numpy.random.bytes', 'np.random.bytes', (['(960)'], {}), '(960)\n', (308, 313), True, 'import numpy as np\n'), ((372, 383), 'time.time', 'time.time', ([], {}), '()\n', (381, 383), False, 'import time\n')]
""" shell sort tests module """ import unittest import random from sort import shell from tests import helper class ShellSortTests(unittest.TestCase): """ shell sort unit tests class """ max = 100 arr = [] def setUp(self): """ setting up for the test """ self.arr = random.sample(range(self.max), self.max) def test_null_input(self): """ should raise when input array is None """ # arrange inp = None # act with self.assertRaises(TypeError) as ex: shell.sort(inp) # assert self.assertEqual("'NoneType' object is not iterable", str(ex.exception)) def test_empty_input(self): """ should return [] when input array is empty """ # arrange inp = [] # act res = shell.sort(inp) # assert self.assertEqual(len(inp), len(res)) def test_sort_a_given_array(self): """ should sort a given array """ # act res = shell.sort(self.arr[:]) # assert self.assertTrue(helper.is_sorted(res))
[ "tests.helper.is_sorted", "sort.shell.sort" ]
[((814, 829), 'sort.shell.sort', 'shell.sort', (['inp'], {}), '(inp)\n', (824, 829), False, 'from sort import shell\n'), ((1003, 1026), 'sort.shell.sort', 'shell.sort', (['self.arr[:]'], {}), '(self.arr[:])\n', (1013, 1026), False, 'from sort import shell\n'), ((543, 558), 'sort.shell.sort', 'shell.sort', (['inp'], {}), '(inp)\n', (553, 558), False, 'from sort import shell\n'), ((1069, 1090), 'tests.helper.is_sorted', 'helper.is_sorted', (['res'], {}), '(res)\n', (1085, 1090), False, 'from tests import helper\n')]
from django_cron import CronJobBase, Schedule class VerifyLicenceSpeciesJob(CronJobBase): """ Verifies LicenceSpecies against TSC server. """ RUN_AT_TIMES = ['00:00'] schedule = Schedule(run_at_times=RUN_AT_TIMES) code = 'applications.verify_licence_species' def do(self): pass
[ "django_cron.Schedule" ]
[((201, 236), 'django_cron.Schedule', 'Schedule', ([], {'run_at_times': 'RUN_AT_TIMES'}), '(run_at_times=RUN_AT_TIMES)\n', (209, 236), False, 'from django_cron import CronJobBase, Schedule\n')]
# Copyright (c) 2009-2020, quasardb SAS. All rights reserved. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of quasardb nor the names of its contributors may # be used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY QUASARDB AND CONTRIBUTORS ``AS IS'' AND ANY # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # from __future__ import print_function from builtins import range as xrange, int import os from socket import gethostname import sys import inspect import traceback import random import time import datetime import locale import numpy as np import quasardb STOCK_COLUMN = "stock_id" OPEN_COLUMN = "open" CLOSE_COLUMN = "close" HIGH_COLUMN = "high" LOW_COLUMN = "low" VOLUME_COLUMN = "volume" def time_execution(str, f, *args): print(" - ", str, end='') start_time = time.time() res = f(*args) end_time = time.time() print(" [duration: {}s]".format(end_time - start_time)) return res def gen_ts_name(): return "test.{}.{}.{}".format(gethostname(), os.getpid(), random.randint(0, 100000)) def create_ts(q, name): ts = q.ts(name) ts.create([quasardb.ColumnInfo(quasardb.ColumnType.Int64, STOCK_COLUMN), quasardb.ColumnInfo(quasardb.ColumnType.Double, OPEN_COLUMN), quasardb.ColumnInfo(quasardb.ColumnType.Double, CLOSE_COLUMN), quasardb.ColumnInfo(quasardb.ColumnType.Double, HIGH_COLUMN), quasardb.ColumnInfo(quasardb.ColumnType.Double, LOW_COLUMN), quasardb.ColumnInfo(quasardb.ColumnType.Int64, VOLUME_COLUMN)]) return ts def create_many_ts(q, names): return [create_ts(q, x) for x in names] def generate_prices(price_count): return np.random.uniform(-100.0, 100.0, price_count) def generate_points(points_count): start_time = np.datetime64('2017-01-01', 'ns') dates = np.array([(start_time + np.timedelta64(i, 'm')) for i in range(points_count)]).astype('datetime64[ns]') stock_ids = np.random.randint(1, 25, size=points_count) prices = np.array([generate_prices(60) for i in range(points_count)]).astype('double') volumes = np.random.randint(0, 10000, points_count) return (dates, stock_ids, prices, volumes) def batch_ts_columns(ts_name, prealloc_size): return (quasardb.BatchColumnInfo(ts_name, STOCK_COLUMN, prealloc_size), quasardb.BatchColumnInfo(ts_name, OPEN_COLUMN, prealloc_size), quasardb.BatchColumnInfo(ts_name, CLOSE_COLUMN, prealloc_size), quasardb.BatchColumnInfo(ts_name, HIGH_COLUMN, prealloc_size), quasardb.BatchColumnInfo(ts_name, LOW_COLUMN, prealloc_size), quasardb.BatchColumnInfo(ts_name, VOLUME_COLUMN, prealloc_size)) def calculate_minute_bar(prices): # Takes all prices for a single minute, and calculate OHLC return (prices[0], prices[-1], np.amax(prices), np.amin(prices)) def bulk_insert(q, ts_names, dates, stock_ids, prices, volumes): # We generate a flattened list of columns for each timeseries; for example, # for 2 columns for 4 timeseries each, we have 8 columns. columns = [column for nested in (batch_ts_columns(ts_name, len(dates)) for ts_name in ts_names) for column in nested] batch_inserter = q.ts_batch(columns) for i in range(len(stock_ids)): # We use the known layout of column (2 for each timeseries, alternating with # STOCK_COLUMN and PRICE_COLUMN) to set the values. for j in range(0, len(ts_names) * 6, 6): (o, c, h, l) = calculate_minute_bar(prices[i]) batch_inserter.start_row(dates[i]) batch_inserter.set_int64(j, stock_ids[i]) # set stock_id batch_inserter.set_double(j + 1, o) # open batch_inserter.set_double(j + 2, c) # close batch_inserter.set_double(j + 3, h) # high batch_inserter.set_double(j + 4, l) # low batch_inserter.set_int64(j + 5, volumes[i]) # low batch_inserter.push() def make_it_so(q, points_count): ts_names = [gen_ts_name(), gen_ts_name()] ts = time_execution("Creating a time series with names {}".format(ts_names), create_many_ts, q, ts_names) (dates, stock_ids, prices, volumes) = time_execution("Generating {:,} points".format(points_count), generate_points, points_count) time_execution("Inserting {:,} points into timeseries with names {}".format(points_count, ts_names), bulk_insert, q, ts_names, dates, stock_ids, prices, volumes) return (ts_names, dates, np.unique(stock_ids)) def main(quasardb_uri, points_count): print("Connecting to: ", quasardb_uri) q = quasardb.Cluster(uri=quasardb_uri) print(" *** Inserting {:,} into {}".format(points_count, quasardb_uri)) make_it_so(q, points_count) if __name__ == "__main__": try: if len(sys.argv) != 3: print("usage: ", sys.argv[0], " quasardb_uri points_count") sys.exit(1) main(sys.argv[1], int(sys.argv[2])) except Exception as ex: # pylint: disable=W0703 print("An error ocurred:", str(ex)) traceback.print_exc()
[ "numpy.amax", "numpy.amin", "numpy.unique", "sys.exit", "quasardb.ColumnInfo", "quasardb.Cluster", "numpy.random.randint", "numpy.random.uniform", "quasardb.BatchColumnInfo", "os.getpid", "numpy.datetime64", "numpy.timedelta64", "builtins.int", "socket.gethostname", "traceback.print_exc", "time.time", "random.randint" ]
[((2039, 2050), 'time.time', 'time.time', ([], {}), '()\n', (2048, 2050), False, 'import time\n'), ((2085, 2096), 'time.time', 'time.time', ([], {}), '()\n', (2094, 2096), False, 'import time\n'), ((2928, 2973), 'numpy.random.uniform', 'np.random.uniform', (['(-100.0)', '(100.0)', 'price_count'], {}), '(-100.0, 100.0, price_count)\n', (2945, 2973), True, 'import numpy as np\n'), ((3027, 3060), 'numpy.datetime64', 'np.datetime64', (['"""2017-01-01"""', '"""ns"""'], {}), "('2017-01-01', 'ns')\n", (3040, 3060), True, 'import numpy as np\n'), ((3194, 3237), 'numpy.random.randint', 'np.random.randint', (['(1)', '(25)'], {'size': 'points_count'}), '(1, 25, size=points_count)\n', (3211, 3237), True, 'import numpy as np\n'), ((3343, 3384), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10000)', 'points_count'], {}), '(0, 10000, points_count)\n', (3360, 3384), True, 'import numpy as np\n'), ((5873, 5907), 'quasardb.Cluster', 'quasardb.Cluster', ([], {'uri': 'quasardb_uri'}), '(uri=quasardb_uri)\n', (5889, 5907), False, 'import quasardb\n'), ((2228, 2241), 'socket.gethostname', 'gethostname', ([], {}), '()\n', (2239, 2241), False, 'from socket import gethostname\n'), ((2243, 2254), 'os.getpid', 'os.getpid', ([], {}), '()\n', (2252, 2254), False, 'import os\n'), ((2256, 2281), 'random.randint', 'random.randint', (['(0)', '(100000)'], {}), '(0, 100000)\n', (2270, 2281), False, 'import random\n'), ((3492, 3554), 'quasardb.BatchColumnInfo', 'quasardb.BatchColumnInfo', (['ts_name', 'STOCK_COLUMN', 'prealloc_size'], {}), '(ts_name, STOCK_COLUMN, prealloc_size)\n', (3516, 3554), False, 'import quasardb\n'), ((3568, 3629), 'quasardb.BatchColumnInfo', 'quasardb.BatchColumnInfo', (['ts_name', 'OPEN_COLUMN', 'prealloc_size'], {}), '(ts_name, OPEN_COLUMN, prealloc_size)\n', (3592, 3629), False, 'import quasardb\n'), ((3643, 3705), 'quasardb.BatchColumnInfo', 'quasardb.BatchColumnInfo', (['ts_name', 'CLOSE_COLUMN', 'prealloc_size'], {}), '(ts_name, CLOSE_COLUMN, prealloc_size)\n', (3667, 3705), False, 'import quasardb\n'), ((3719, 3780), 'quasardb.BatchColumnInfo', 'quasardb.BatchColumnInfo', (['ts_name', 'HIGH_COLUMN', 'prealloc_size'], {}), '(ts_name, HIGH_COLUMN, prealloc_size)\n', (3743, 3780), False, 'import quasardb\n'), ((3794, 3854), 'quasardb.BatchColumnInfo', 'quasardb.BatchColumnInfo', (['ts_name', 'LOW_COLUMN', 'prealloc_size'], {}), '(ts_name, LOW_COLUMN, prealloc_size)\n', (3818, 3854), False, 'import quasardb\n'), ((3868, 3931), 'quasardb.BatchColumnInfo', 'quasardb.BatchColumnInfo', (['ts_name', 'VOLUME_COLUMN', 'prealloc_size'], {}), '(ts_name, VOLUME_COLUMN, prealloc_size)\n', (3892, 3931), False, 'import quasardb\n'), ((4066, 4081), 'numpy.amax', 'np.amax', (['prices'], {}), '(prices)\n', (4073, 4081), True, 'import numpy as np\n'), ((4083, 4098), 'numpy.amin', 'np.amin', (['prices'], {}), '(prices)\n', (4090, 4098), True, 'import numpy as np\n'), ((5761, 5781), 'numpy.unique', 'np.unique', (['stock_ids'], {}), '(stock_ids)\n', (5770, 5781), True, 'import numpy as np\n'), ((2343, 2403), 'quasardb.ColumnInfo', 'quasardb.ColumnInfo', (['quasardb.ColumnType.Int64', 'STOCK_COLUMN'], {}), '(quasardb.ColumnType.Int64, STOCK_COLUMN)\n', (2362, 2403), False, 'import quasardb\n'), ((2420, 2480), 'quasardb.ColumnInfo', 'quasardb.ColumnInfo', (['quasardb.ColumnType.Double', 'OPEN_COLUMN'], {}), '(quasardb.ColumnType.Double, OPEN_COLUMN)\n', (2439, 2480), False, 'import quasardb\n'), ((2497, 2558), 'quasardb.ColumnInfo', 'quasardb.ColumnInfo', (['quasardb.ColumnType.Double', 'CLOSE_COLUMN'], {}), '(quasardb.ColumnType.Double, CLOSE_COLUMN)\n', (2516, 2558), False, 'import quasardb\n'), ((2575, 2635), 'quasardb.ColumnInfo', 'quasardb.ColumnInfo', (['quasardb.ColumnType.Double', 'HIGH_COLUMN'], {}), '(quasardb.ColumnType.Double, HIGH_COLUMN)\n', (2594, 2635), False, 'import quasardb\n'), ((2652, 2711), 'quasardb.ColumnInfo', 'quasardb.ColumnInfo', (['quasardb.ColumnType.Double', 'LOW_COLUMN'], {}), '(quasardb.ColumnType.Double, LOW_COLUMN)\n', (2671, 2711), False, 'import quasardb\n'), ((2728, 2789), 'quasardb.ColumnInfo', 'quasardb.ColumnInfo', (['quasardb.ColumnType.Int64', 'VOLUME_COLUMN'], {}), '(quasardb.ColumnType.Int64, VOLUME_COLUMN)\n', (2747, 2789), False, 'import quasardb\n'), ((6170, 6181), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6178, 6181), False, 'import sys\n'), ((6209, 6225), 'builtins.int', 'int', (['sys.argv[2]'], {}), '(sys.argv[2])\n', (6212, 6225), False, 'from builtins import range as xrange, int\n'), ((6333, 6354), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (6352, 6354), False, 'import traceback\n'), ((3098, 3120), 'numpy.timedelta64', 'np.timedelta64', (['i', '"""m"""'], {}), "(i, 'm')\n", (3112, 3120), True, 'import numpy as np\n')]
from nerwhal.backends.flashtext_backend import FlashtextBackend from nerwhal.recognizer_bases import FlashtextRecognizer def test_single_recognizer(embed): class TestRecognizer(FlashtextRecognizer): TAG = "XX" SCORE = 1.0 @property def keywords(self): return ["abc", "cde"] backend = FlashtextBackend() backend.register_recognizer(TestRecognizer) text = "Das ist abc und cde." ents = backend.run(text) assert embed(text, ents) == "Das ist XX und XX." assert ents[0].start_char == 8 assert ents[0].end_char == 11 assert ents[0].tag == "XX" assert ents[0].text == "abc" assert ents[0].score == 1.0 assert ents[0].recognizer == "TestRecognizer" def test_multiple_recognizers(embed): class TestRecognizerA(FlashtextRecognizer): TAG = "A" SCORE = 1.0 @property def keywords(self): return ["abc"] class TestRecognizerB(FlashtextRecognizer): TAG = "B" SCORE = 0.5 @property def keywords(self): return ["cde"] backend = FlashtextBackend() backend.register_recognizer(TestRecognizerA) backend.register_recognizer(TestRecognizerB) text = "Das ist abc und cde." ents = backend.run(text) assert embed(text, ents) == "Das ist A und B." assert ents[0].tag == "A" assert ents[0].score == 1.0 assert ents[1].tag == "B" assert ents[1].score == 0.5 def test_overlapping_recognizers(embed): class TestRecognizerA(FlashtextRecognizer): TAG = "A" SCORE = 1.0 @property def keywords(self): return ["abc", "cde"] class TestRecognizerB(FlashtextRecognizer): TAG = "B" SCORE = 0.5 @property def keywords(self): return ["cde", "fgh"] backend = FlashtextBackend() backend.register_recognizer(TestRecognizerA) backend.register_recognizer(TestRecognizerB) text = "Das ist cde." ents = backend.run(text) # Recognizer B overwrites the keyword "cde" assert embed(text, ents) == "Das ist B."
[ "nerwhal.backends.flashtext_backend.FlashtextBackend" ]
[((340, 358), 'nerwhal.backends.flashtext_backend.FlashtextBackend', 'FlashtextBackend', ([], {}), '()\n', (356, 358), False, 'from nerwhal.backends.flashtext_backend import FlashtextBackend\n'), ((1114, 1132), 'nerwhal.backends.flashtext_backend.FlashtextBackend', 'FlashtextBackend', ([], {}), '()\n', (1130, 1132), False, 'from nerwhal.backends.flashtext_backend import FlashtextBackend\n'), ((1862, 1880), 'nerwhal.backends.flashtext_backend.FlashtextBackend', 'FlashtextBackend', ([], {}), '()\n', (1878, 1880), False, 'from nerwhal.backends.flashtext_backend import FlashtextBackend\n')]
import pytest import cudf import mock from cuxfilter.charts.core.non_aggregate.core_non_aggregate import ( BaseNonAggregate, ) from cuxfilter.dashboard import DashBoard from cuxfilter import DataFrame from cuxfilter.layouts import chart_view class TestCoreNonAggregateChart: def test_variables(self): bnac = BaseNonAggregate() # BaseChart variables assert bnac.chart_type is None assert bnac.x is None assert bnac.y is None assert bnac.aggregate_fn == "count" assert bnac.color is None assert bnac.height == 0 assert bnac.width == 0 assert bnac.add_interaction is True assert bnac.chart is None assert bnac.source is None assert bnac.source_backup is None assert bnac.data_points == 0 assert bnac._library_specific_params == {} assert bnac.stride is None assert bnac.stride_type == int assert bnac.min_value == 0.0 assert bnac.max_value == 0.0 assert bnac.x_label_map == {} assert bnac.y_label_map == {} assert bnac.title == "" # test chart name setter bnac.x = "x" bnac.y = "y" bnac.chart_type = "test_chart_type" assert bnac.name == "x_y_count_test_chart_type_" # BaseNonAggregateChart variables assert bnac.use_data_tiles is False assert bnac.reset_event is None assert bnac.x_range is None assert bnac.y_range is None assert bnac.aggregate_col is None def test_label_mappers(self): bnac = BaseNonAggregate() library_specific_params = { "x_label_map": {"a": 1, "b": 2}, "y_label_map": {"a": 1, "b": 2}, } bnac.library_specific_params = library_specific_params assert bnac.x_label_map == {"a": 1, "b": 2} assert bnac.y_label_map == {"a": 1, "b": 2} @pytest.mark.parametrize("chart, _chart", [(None, None), (1, 1)]) def test_view(self, chart, _chart): bnac = BaseNonAggregate() bnac.chart = chart bnac.width = 400 bnac.title = "test_title" assert str(bnac.view()) == str( chart_view(_chart, width=bnac.width, title=bnac.title) ) def test_get_selection_geometry_callback(self): bnac = BaseNonAggregate() df = cudf.DataFrame({"a": [1, 2, 2], "b": [3, 4, 5]}) dashboard = DashBoard(dataframe=DataFrame.from_dataframe(df)) assert ( bnac.get_selection_geometry_callback(dashboard).__name__ == "selection_callback" ) assert callable(type(bnac.get_selection_geometry_callback(dashboard))) def test_box_selection_callback(self): bnac = BaseNonAggregate() bnac.x = "a" bnac.y = "b" bnac.chart_type = "temp" self.result = None def t_function(data, patch_update=False): self.result = data bnac.reload_chart = t_function df = cudf.DataFrame({"a": [1, 2, 2], "b": [3, 4, 5]}) dashboard = DashBoard(dataframe=DataFrame.from_dataframe(df)) dashboard._active_view = bnac class evt: geometry = dict(x0=1, x1=2, y0=3, y1=4, type="rect") t = bnac.get_selection_geometry_callback(dashboard) t(evt) assert self.result.equals(df.query("1<=a<=2 and 3<=b<=4")) def test_lasso_election_callback(self): bnac = BaseNonAggregate() bnac.x = "a" bnac.y = "b" bnac.chart_type = "temp" def t_function(data, patch_update=False): self.result = data bnac.reload_chart = t_function df = cudf.DataFrame({"a": [1, 2, 2], "b": [3, 4, 5]}) dashboard = DashBoard(dataframe=DataFrame.from_dataframe(df)) class evt: geometry = dict(x=[1, 1, 2], y=[1, 2, 1], type="poly") final = True t = bnac.get_selection_geometry_callback(dashboard) with mock.patch("cuspatial.point_in_polygon") as pip: pip.return_value = cudf.DataFrame( {"selection": [True, False, True]} ) t(evt) assert pip.called @pytest.mark.parametrize( "data, _data", [ (cudf.DataFrame(), cudf.DataFrame()), ( cudf.DataFrame({"a": [1, 2, 2], "b": [3, 4, 5]}), cudf.DataFrame({"a": [1, 2, 2], "b": [3, 4, 5]}), ), ], ) def test_calculate_source(self, data, _data): """ Calculate source just calls to the format_source_data function which is implemented by chart types inheriting this class. """ bnac = BaseNonAggregate() self.result = None def t_function(data, patch_update=False): self.result = data bnac.format_source_data = t_function bnac.calculate_source(data) assert self.result.equals(_data) @pytest.mark.parametrize( "x_range, y_range, query, local_dict", [ ( (1, 2), (3, 4), "@x_min<=x<=@x_max and @y_min<=y<=@y_max", {"x_min": 1, "x_max": 2, "y_min": 3, "y_max": 4}, ), ( (0, 2), (3, 5), "@x_min<=x<=@x_max and @y_min<=y<=@y_max", {"x_min": 0, "x_max": 2, "y_min": 3, "y_max": 5}, ), ], ) def test_compute_query_dict(self, x_range, y_range, query, local_dict): bnac = BaseNonAggregate() bnac.chart_type = "test" bnac.x = "x" bnac.y = "y" bnac.x_range = x_range bnac.y_range = y_range df = cudf.DataFrame({"x": [1, 2, 2], "y": [3, 4, 5]}) dashboard = DashBoard(dataframe=DataFrame.from_dataframe(df)) bnac.compute_query_dict( dashboard._query_str_dict, dashboard._query_local_variables_dict ) bnac_key = ( f"{bnac.x}_{bnac.y}" f"{'_' + bnac.aggregate_col if bnac.aggregate_col else ''}" f"_{bnac.aggregate_fn}_{bnac.chart_type}_{bnac.title}" ) assert dashboard._query_str_dict[bnac_key] == query for key in local_dict: assert ( dashboard._query_local_variables_dict[key] == local_dict[key] ) @pytest.mark.parametrize( "add_interaction, reset_event, event_1, event_2", [ (True, None, "selection_callback", None), (True, "test_event", "selection_callback", "reset_callback"), (False, "test_event", None, "reset_callback"), ], ) def test_add_events(self, add_interaction, reset_event, event_1, event_2): bnac = BaseNonAggregate() bnac.add_interaction = add_interaction bnac.reset_event = reset_event df = cudf.DataFrame({"a": [1, 2, 2], "b": [3, 4, 5]}) dashboard = DashBoard(dataframe=DataFrame.from_dataframe(df)) self.event_1 = None self.event_2 = None def t_func(fn): self.event_1 = fn.__name__ def t_func1(event, fn): self.event_2 = fn.__name__ bnac.add_selection_geometry_event = t_func bnac.add_event = t_func1 bnac.add_events(dashboard) assert self.event_1 == event_1 assert self.event_2 == event_2 def test_add_reset_event(self): bnac = BaseNonAggregate() bnac.chart_type = "test" bnac.x = "a" bnac.x_range = (0, 2) bnac.y_range = (3, 5) df = cudf.DataFrame({"a": [1, 2, 2], "b": [3, 4, 5]}) dashboard = DashBoard(dataframe=DataFrame.from_dataframe(df)) dashboard._active_view = bnac def t_func1(event, fn): fn("event") bnac.add_event = t_func1 bnac.add_reset_event(dashboard) assert bnac.x_range is None assert bnac.y_range is None def test_query_chart_by_range(self): bnac = BaseNonAggregate() bnac.chart_type = "test" bnac.x = "a" bnac_1 = BaseNonAggregate() bnac_1.chart_type = "test" bnac_1.x = "b" query_tuple = (4, 5) df = cudf.DataFrame({"a": [1, 2, 3, 4], "b": [3, 4, 5, 6]}) bnac.source = df self.result = None self.patch_update = None def t_func(data, patch_update): self.result = data self.patch_update = patch_update # creating a dummy reload chart fn as its not implemented in core # non aggregate chart class bnac.reload_chart = t_func bnac.query_chart_by_range( active_chart=bnac_1, query_tuple=query_tuple, datatile=None ) assert self.result.to_string() == " a b\n1 2 4\n2 3 5" assert self.patch_update is False @pytest.mark.parametrize( "new_indices, result", [ ([4, 5], " a b\n1 2 4\n2 3 5"), ([], " a b\n0 1 3\n1 2 4\n2 3 5\n3 4 6"), ([3], " a b\n0 1 3"), ], ) def test_query_chart_by_indices(self, new_indices, result): bnac = BaseNonAggregate() bnac.chart_type = "test" bnac.x = "a" bnac_1 = BaseNonAggregate() bnac_1.chart_type = "test" bnac_1.x = "b" new_indices = new_indices df = cudf.DataFrame({"a": [1, 2, 3, 4], "b": [3, 4, 5, 6]}) bnac.source = df self.result = None self.patch_update = None def t_func(data, patch_update): self.result = data self.patch_update = patch_update # creating a dummy reload chart fn as its not implemented in core # non aggregate chart class bnac.reload_chart = t_func bnac.query_chart_by_indices( active_chart=bnac_1, old_indices=[], new_indices=new_indices, datatile=None, ) assert self.result.to_string() == result assert self.patch_update is False
[ "cuxfilter.charts.core.non_aggregate.core_non_aggregate.BaseNonAggregate", "mock.patch", "cuxfilter.DataFrame.from_dataframe", "pytest.mark.parametrize", "cuxfilter.layouts.chart_view", "cudf.DataFrame" ]
[((1913, 1977), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""chart, _chart"""', '[(None, None), (1, 1)]'], {}), "('chart, _chart', [(None, None), (1, 1)])\n", (1936, 1977), False, 'import pytest\n'), ((4971, 5273), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""x_range, y_range, query, local_dict"""', "[((1, 2), (3, 4), '@x_min<=x<=@x_max and @y_min<=y<=@y_max', {'x_min': 1,\n 'x_max': 2, 'y_min': 3, 'y_max': 4}), ((0, 2), (3, 5),\n '@x_min<=x<=@x_max and @y_min<=y<=@y_max', {'x_min': 0, 'x_max': 2,\n 'y_min': 3, 'y_max': 5})]"], {}), "('x_range, y_range, query, local_dict', [((1, 2), (3,\n 4), '@x_min<=x<=@x_max and @y_min<=y<=@y_max', {'x_min': 1, 'x_max': 2,\n 'y_min': 3, 'y_max': 4}), ((0, 2), (3, 5),\n '@x_min<=x<=@x_max and @y_min<=y<=@y_max', {'x_min': 0, 'x_max': 2,\n 'y_min': 3, 'y_max': 5})])\n", (4994, 5273), False, 'import pytest\n'), ((6390, 6629), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""add_interaction, reset_event, event_1, event_2"""', "[(True, None, 'selection_callback', None), (True, 'test_event',\n 'selection_callback', 'reset_callback'), (False, 'test_event', None,\n 'reset_callback')]"], {}), "('add_interaction, reset_event, event_1, event_2', [\n (True, None, 'selection_callback', None), (True, 'test_event',\n 'selection_callback', 'reset_callback'), (False, 'test_event', None,\n 'reset_callback')])\n", (6413, 6629), False, 'import pytest\n'), ((8883, 9060), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""new_indices, result"""', '[([4, 5], """ a b\n1 2 4\n2 3 5"""), ([],\n """ a b\n0 1 3\n1 2 4\n2 3 5\n3 4 6"""), ([3], \' a b\\n0 1 3\')]'], {}), '(\'new_indices, result\', [([4, 5],\n """ a b\n1 2 4\n2 3 5"""), ([],\n """ a b\n0 1 3\n1 2 4\n2 3 5\n3 4 6"""), ([3], \' a b\\n0 1 3\')])\n', (8906, 9060), False, 'import pytest\n'), ((327, 345), 'cuxfilter.charts.core.non_aggregate.core_non_aggregate.BaseNonAggregate', 'BaseNonAggregate', ([], {}), '()\n', (343, 345), False, 'from cuxfilter.charts.core.non_aggregate.core_non_aggregate import BaseNonAggregate\n'), ((1584, 1602), 'cuxfilter.charts.core.non_aggregate.core_non_aggregate.BaseNonAggregate', 'BaseNonAggregate', ([], {}), '()\n', (1600, 1602), False, 'from cuxfilter.charts.core.non_aggregate.core_non_aggregate import BaseNonAggregate\n'), ((2033, 2051), 'cuxfilter.charts.core.non_aggregate.core_non_aggregate.BaseNonAggregate', 'BaseNonAggregate', ([], {}), '()\n', (2049, 2051), False, 'from cuxfilter.charts.core.non_aggregate.core_non_aggregate import BaseNonAggregate\n'), ((2324, 2342), 'cuxfilter.charts.core.non_aggregate.core_non_aggregate.BaseNonAggregate', 'BaseNonAggregate', ([], {}), '()\n', (2340, 2342), False, 'from cuxfilter.charts.core.non_aggregate.core_non_aggregate import BaseNonAggregate\n'), ((2357, 2405), 'cudf.DataFrame', 'cudf.DataFrame', (["{'a': [1, 2, 2], 'b': [3, 4, 5]}"], {}), "({'a': [1, 2, 2], 'b': [3, 4, 5]})\n", (2371, 2405), False, 'import cudf\n'), ((2747, 2765), 'cuxfilter.charts.core.non_aggregate.core_non_aggregate.BaseNonAggregate', 'BaseNonAggregate', ([], {}), '()\n', (2763, 2765), False, 'from cuxfilter.charts.core.non_aggregate.core_non_aggregate import BaseNonAggregate\n'), ((3003, 3051), 'cudf.DataFrame', 'cudf.DataFrame', (["{'a': [1, 2, 2], 'b': [3, 4, 5]}"], {}), "({'a': [1, 2, 2], 'b': [3, 4, 5]})\n", (3017, 3051), False, 'import cudf\n'), ((3449, 3467), 'cuxfilter.charts.core.non_aggregate.core_non_aggregate.BaseNonAggregate', 'BaseNonAggregate', ([], {}), '()\n', (3465, 3467), False, 'from cuxfilter.charts.core.non_aggregate.core_non_aggregate import BaseNonAggregate\n'), ((3678, 3726), 'cudf.DataFrame', 'cudf.DataFrame', (["{'a': [1, 2, 2], 'b': [3, 4, 5]}"], {}), "({'a': [1, 2, 2], 'b': [3, 4, 5]})\n", (3692, 3726), False, 'import cudf\n'), ((4713, 4731), 'cuxfilter.charts.core.non_aggregate.core_non_aggregate.BaseNonAggregate', 'BaseNonAggregate', ([], {}), '()\n', (4729, 4731), False, 'from cuxfilter.charts.core.non_aggregate.core_non_aggregate import BaseNonAggregate\n'), ((5565, 5583), 'cuxfilter.charts.core.non_aggregate.core_non_aggregate.BaseNonAggregate', 'BaseNonAggregate', ([], {}), '()\n', (5581, 5583), False, 'from cuxfilter.charts.core.non_aggregate.core_non_aggregate import BaseNonAggregate\n'), ((5735, 5783), 'cudf.DataFrame', 'cudf.DataFrame', (["{'x': [1, 2, 2], 'y': [3, 4, 5]}"], {}), "({'x': [1, 2, 2], 'y': [3, 4, 5]})\n", (5749, 5783), False, 'import cudf\n'), ((6781, 6799), 'cuxfilter.charts.core.non_aggregate.core_non_aggregate.BaseNonAggregate', 'BaseNonAggregate', ([], {}), '()\n', (6797, 6799), False, 'from cuxfilter.charts.core.non_aggregate.core_non_aggregate import BaseNonAggregate\n'), ((6900, 6948), 'cudf.DataFrame', 'cudf.DataFrame', (["{'a': [1, 2, 2], 'b': [3, 4, 5]}"], {}), "({'a': [1, 2, 2], 'b': [3, 4, 5]})\n", (6914, 6948), False, 'import cudf\n'), ((7464, 7482), 'cuxfilter.charts.core.non_aggregate.core_non_aggregate.BaseNonAggregate', 'BaseNonAggregate', ([], {}), '()\n', (7480, 7482), False, 'from cuxfilter.charts.core.non_aggregate.core_non_aggregate import BaseNonAggregate\n'), ((7611, 7659), 'cudf.DataFrame', 'cudf.DataFrame', (["{'a': [1, 2, 2], 'b': [3, 4, 5]}"], {}), "({'a': [1, 2, 2], 'b': [3, 4, 5]})\n", (7625, 7659), False, 'import cudf\n'), ((8030, 8048), 'cuxfilter.charts.core.non_aggregate.core_non_aggregate.BaseNonAggregate', 'BaseNonAggregate', ([], {}), '()\n', (8046, 8048), False, 'from cuxfilter.charts.core.non_aggregate.core_non_aggregate import BaseNonAggregate\n'), ((8121, 8139), 'cuxfilter.charts.core.non_aggregate.core_non_aggregate.BaseNonAggregate', 'BaseNonAggregate', ([], {}), '()\n', (8137, 8139), False, 'from cuxfilter.charts.core.non_aggregate.core_non_aggregate import BaseNonAggregate\n'), ((8242, 8296), 'cudf.DataFrame', 'cudf.DataFrame', (["{'a': [1, 2, 3, 4], 'b': [3, 4, 5, 6]}"], {}), "({'a': [1, 2, 3, 4], 'b': [3, 4, 5, 6]})\n", (8256, 8296), False, 'import cudf\n'), ((9200, 9218), 'cuxfilter.charts.core.non_aggregate.core_non_aggregate.BaseNonAggregate', 'BaseNonAggregate', ([], {}), '()\n', (9216, 9218), False, 'from cuxfilter.charts.core.non_aggregate.core_non_aggregate import BaseNonAggregate\n'), ((9291, 9309), 'cuxfilter.charts.core.non_aggregate.core_non_aggregate.BaseNonAggregate', 'BaseNonAggregate', ([], {}), '()\n', (9307, 9309), False, 'from cuxfilter.charts.core.non_aggregate.core_non_aggregate import BaseNonAggregate\n'), ((9417, 9471), 'cudf.DataFrame', 'cudf.DataFrame', (["{'a': [1, 2, 3, 4], 'b': [3, 4, 5, 6]}"], {}), "({'a': [1, 2, 3, 4], 'b': [3, 4, 5, 6]})\n", (9431, 9471), False, 'import cudf\n'), ((3983, 4023), 'mock.patch', 'mock.patch', (['"""cuspatial.point_in_polygon"""'], {}), "('cuspatial.point_in_polygon')\n", (3993, 4023), False, 'import mock\n'), ((4064, 4114), 'cudf.DataFrame', 'cudf.DataFrame', (["{'selection': [True, False, True]}"], {}), "({'selection': [True, False, True]})\n", (4078, 4114), False, 'import cudf\n'), ((2191, 2245), 'cuxfilter.layouts.chart_view', 'chart_view', (['_chart'], {'width': 'bnac.width', 'title': 'bnac.title'}), '(_chart, width=bnac.width, title=bnac.title)\n', (2201, 2245), False, 'from cuxfilter.layouts import chart_view\n'), ((2446, 2474), 'cuxfilter.DataFrame.from_dataframe', 'DataFrame.from_dataframe', (['df'], {}), '(df)\n', (2470, 2474), False, 'from cuxfilter import DataFrame\n'), ((3092, 3120), 'cuxfilter.DataFrame.from_dataframe', 'DataFrame.from_dataframe', (['df'], {}), '(df)\n', (3116, 3120), False, 'from cuxfilter import DataFrame\n'), ((3767, 3795), 'cuxfilter.DataFrame.from_dataframe', 'DataFrame.from_dataframe', (['df'], {}), '(df)\n', (3791, 3795), False, 'from cuxfilter import DataFrame\n'), ((4271, 4287), 'cudf.DataFrame', 'cudf.DataFrame', ([], {}), '()\n', (4285, 4287), False, 'import cudf\n'), ((4289, 4305), 'cudf.DataFrame', 'cudf.DataFrame', ([], {}), '()\n', (4303, 4305), False, 'import cudf\n'), ((4338, 4386), 'cudf.DataFrame', 'cudf.DataFrame', (["{'a': [1, 2, 2], 'b': [3, 4, 5]}"], {}), "({'a': [1, 2, 2], 'b': [3, 4, 5]})\n", (4352, 4386), False, 'import cudf\n'), ((4404, 4452), 'cudf.DataFrame', 'cudf.DataFrame', (["{'a': [1, 2, 2], 'b': [3, 4, 5]}"], {}), "({'a': [1, 2, 2], 'b': [3, 4, 5]})\n", (4418, 4452), False, 'import cudf\n'), ((5824, 5852), 'cuxfilter.DataFrame.from_dataframe', 'DataFrame.from_dataframe', (['df'], {}), '(df)\n', (5848, 5852), False, 'from cuxfilter import DataFrame\n'), ((6989, 7017), 'cuxfilter.DataFrame.from_dataframe', 'DataFrame.from_dataframe', (['df'], {}), '(df)\n', (7013, 7017), False, 'from cuxfilter import DataFrame\n'), ((7700, 7728), 'cuxfilter.DataFrame.from_dataframe', 'DataFrame.from_dataframe', (['df'], {}), '(df)\n', (7724, 7728), False, 'from cuxfilter import DataFrame\n')]
# -*- coding: utf-8 -*- # # Copyright © Spyder Project Contributors # Licensed under the terms of the MIT License # (see spyder/__init__.py for details) """Outline explorer API. You need to declare a OutlineExplorerProxy, and a function for handle the edit_goto Signal. class OutlineExplorerProxyCustom(OutlineExplorerProxy): ... def handle_go_to(name, line, text): ... outlineexplorer = OutlineExplorerWidget(None) oe_proxy = OutlineExplorerProxyCustom(name) outlineexplorer.set_current_editor(oe_proxy, update=True, clear=False) outlineexplorer.edit_goto.connect(handle_go_to) """ import re from qtpy.QtCore import Signal, QObject from qtpy.QtGui import QTextBlock from spyder.config.base import _ from spyder.config.base import running_under_pytest def document_cells(block, forward=True): """ Get cells oedata before or after block in the document. Parameters ---------- forward : bool, optional Whether to iterate forward or backward from the current block. """ if not block.isValid(): # Not a valid block return if forward: block = block.next() else: block = block.previous() while block.isValid(): data = block.userData() if (data and data.oedata and data.oedata.def_type == OutlineExplorerData.CELL): yield data.oedata if forward: block = block.next() else: block = block.previous() def is_cell_header(block): """Check if the given block is a cell header.""" if not block.isValid(): return False data = block.userData() return (data and data.oedata and data.oedata.def_type == OutlineExplorerData.CELL) def cell_index(block): """Get the cell index of the given block.""" index = len(list(document_cells(block, forward=False))) if is_cell_header(block): return index + 1 return index def cell_name(block): """ Get the cell name the block is in. If the cell is unnamed, return the cell index instead. """ if is_cell_header(block): header = block.userData().oedata else: try: header = next(document_cells(block, forward=False)) except StopIteration: # This cell has no header, so it is the first cell. return 0 if header.has_name(): return header.def_name else: # No name, return the index return cell_index(block) class OutlineExplorerProxy(QObject): """ Proxy class between editors and OutlineExplorerWidget. """ sig_cursor_position_changed = Signal(int, int) sig_outline_explorer_data_changed = Signal() def __init__(self): super(OutlineExplorerProxy, self).__init__() self.fname = None def is_python(self): """Return whether the editor is a python file or not.""" raise NotImplementedError def get_id(self): """Return an unique id, used for identify objects in a dict""" raise NotImplementedError def give_focus(self): """Give focus to the editor, called when toogling visibility of OutlineExplorerWidget.""" raise NotImplementedError def get_line_count(self): """Return the number of lines of the editor (int).""" raise NotImplementedError def parent(self): """This is used for diferenciate editors in multi-window mode.""" return None def get_cursor_line_number(self): """Return the cursor line number.""" raise NotImplementedError def outlineexplorer_data_list(self): """Returns a list of outline explorer data.""" raise NotImplementedError class OutlineExplorerData(QObject): CLASS, FUNCTION, STATEMENT, COMMENT, CELL = list(range(5)) FUNCTION_TOKEN = 'def' CLASS_TOKEN = 'class' # Emitted if the OutlineExplorerData was changed sig_update = Signal() def __init__(self, block, text=None, fold_level=None, def_type=None, def_name=None, color=None): """ Args: text (str) fold_level (int) def_type (int): [CLASS, FUNCTION, STATEMENT, COMMENT, CELL] def_name (str) color (PyQt.QtGui.QTextCharFormat) """ super(OutlineExplorerData, self).__init__() self.text = text self.fold_level = fold_level self.def_type = def_type self.def_name = def_name self.color = color if running_under_pytest(): # block might be a dummy self.block = block else: # Copy the text block to make sure it is not deleted self.block = QTextBlock(block) def is_not_class_nor_function(self): return self.def_type not in (self.CLASS, self.FUNCTION) def is_class_or_function(self): return self.def_type in (self.CLASS, self.FUNCTION) def is_comment(self): return self.def_type in (self.COMMENT, self.CELL) def get_class_name(self): if self.def_type == self.CLASS: return self.def_name def get_function_name(self): if self.def_type == self.FUNCTION: return self.def_name def get_token(self): if self.def_type == self.FUNCTION: token = self.FUNCTION_TOKEN elif self.def_type == self.CLASS: token = self.CLASS_TOKEN return token @property def def_name(self): """Get the cell name.""" # Non cell don't need unique names. if self.def_type != self.CELL: return self._def_name def get_name(oedata): name = oedata._def_name if not name: name = _('Unnamed Cell') return name self_name = get_name(self) existing_numbers = [] def check_match(oedata): # Look for "string" other_name = get_name(oedata) pattern = '^' + re.escape(self_name) + r'(?:, #(\d+))?$' match = re.match(pattern, other_name) if match: # Check if already has a number number = match.groups()[0] if number: existing_numbers.append(int(number)) return True return False # Count cells N_prev = 0 for oedata in document_cells(self.block, forward=False): if check_match(oedata): N_prev += 1 N_fix_previous = len(existing_numbers) N_next = 0 for oedata in document_cells(self.block, forward=True): if check_match(oedata): N_next += 1 # Get the remaining indexeswe can use free_indexes = [idx for idx in range(N_prev + N_next + 1) if idx + 1 not in existing_numbers] idx = free_indexes[N_prev - N_fix_previous] if N_prev + N_next > 0: return self_name + ', #{}'.format(idx + 1) return self_name @def_name.setter def def_name(self, value): """Set name.""" self._def_name = value def update(self, other): """Try to update to avoid reloading everything.""" if (self.def_type == other.def_type and self.fold_level == other.fold_level): self.text = other.text old_def_name = self._def_name self._def_name = other._def_name self.color = other.color self.sig_update.emit() if self.def_type == self.CELL: if self.cell_level != other.cell_level: return False # Must update all other cells whose name has changed. for oedata in document_cells(self.block, forward=True): if oedata._def_name in [self._def_name, old_def_name]: oedata.sig_update.emit() return True return False def is_valid(self): """Check if the oedata has a valid block attached.""" block = self.block return (block and block.isValid() and block.userData() and hasattr(block.userData(), 'oedata') and block.userData().oedata == self ) def has_name(self): """Check if cell has a name.""" if self._def_name: return True else: return False def get_block_number(self): """Get the block number.""" if not self.is_valid(): # Avoid calling blockNumber if not a valid block return None return self.block.blockNumber()
[ "spyder.config.base.running_under_pytest", "re.escape", "qtpy.QtCore.Signal", "re.match", "qtpy.QtGui.QTextBlock", "spyder.config.base._" ]
[((2665, 2681), 'qtpy.QtCore.Signal', 'Signal', (['int', 'int'], {}), '(int, int)\n', (2671, 2681), False, 'from qtpy.QtCore import Signal, QObject\n'), ((2722, 2730), 'qtpy.QtCore.Signal', 'Signal', ([], {}), '()\n', (2728, 2730), False, 'from qtpy.QtCore import Signal, QObject\n'), ((3973, 3981), 'qtpy.QtCore.Signal', 'Signal', ([], {}), '()\n', (3979, 3981), False, 'from qtpy.QtCore import Signal, QObject\n'), ((4555, 4577), 'spyder.config.base.running_under_pytest', 'running_under_pytest', ([], {}), '()\n', (4575, 4577), False, 'from spyder.config.base import running_under_pytest\n'), ((4751, 4768), 'qtpy.QtGui.QTextBlock', 'QTextBlock', (['block'], {}), '(block)\n', (4761, 4768), False, 'from qtpy.QtGui import QTextBlock\n'), ((6091, 6120), 're.match', 're.match', (['pattern', 'other_name'], {}), '(pattern, other_name)\n', (6099, 6120), False, 'import re\n'), ((5785, 5802), 'spyder.config.base._', '_', (['"""Unnamed Cell"""'], {}), "('Unnamed Cell')\n", (5786, 5802), False, 'from spyder.config.base import _\n'), ((6030, 6050), 're.escape', 're.escape', (['self_name'], {}), '(self_name)\n', (6039, 6050), False, 'import re\n')]
import os import re import sys from oguilem.configuration.fitness import OGUILEMFitnessFunctionConfiguration from oguilem.configuration.ga import OGUILEMGlobOptConfig from oguilem.configuration.geometry import OGUILEMGeometryConfig from oguilem.configuration.utils import ConnectedValue, ConfigFileManager from oguilem.resources import options class OGUILEMConfig: def __init__(self): self.ui = OGUILEMUIConfig() self.globopt = OGUILEMGlobOptConfig() self.options = OGUILEMGeneralConfig() self.geometry = OGUILEMGeometryConfig() self.fitness = OGUILEMFitnessFunctionConfiguration() self.file_manager = ConfigFileManager() def save_to_file(self, path: str): content = "###OGOLEM###\n" content += self.globopt.get_finished_config() content += self.geometry.get_finished_config(path) content += self.fitness.get_finished_config() content += self.options.get_finished_config() with open(path, "w") as conf_file: conf_file.write(content) self.file_manager.signal_saved(path) def load_from_file(self, path: str, preset=False): self.options.set_to_default() with open(path, "r") as conf_file: content = conf_file.readlines() # Find geometry block and split off iter_content = iter(content) geo_block = list() backend_defs = list() charge_block = list() spin_block = list() offset = 0 # Separate off blocks for n, line in enumerate(iter_content): # Charge and Spin Blocks if line.strip().startswith("<CHARGES>"): start = n + offset try: charge_line = next(iter_content).strip() except StopIteration: raise RuntimeError("Config ends after <CHARGES> tag!?") while not charge_line.startswith("</CHARGES>"): charge_block.append(charge_line) try: charge_line = next(iter_content).strip() except StopIteration: raise RuntimeError("Dangling <GEOMETRY> tag in configuration!") end = start + len(charge_block) + 2 content = content[:start] + content[end:] offset -= 1 if line.strip().startswith("<SPINS>"): start = n + offset try: spin_line = next(iter_content).strip() except StopIteration: raise RuntimeError("Config ends after <SPINS> tag!?") while not spin_line.startswith("</SPINS>"): spin_block.append(spin_line) try: spin_line = next(iter_content).strip() except StopIteration: raise RuntimeError("Dangling <SPINS> tag in configuration!") end = start + len(spin_block) + 2 content = content[:start] + content[end:] offset -= 1 # Geometry Block if line.strip().startswith("<GEOMETRY>"): start = n + offset try: geo_line = next(iter_content).strip() except StopIteration: raise RuntimeError("Config ends after <GEOMETRY> tag!?") while not geo_line.startswith("</GEOMETRY>"): geo_block.append(geo_line) try: geo_line = next(iter_content).strip() except StopIteration: raise RuntimeError("Dangling <GEOMETRY> tag in configuration!") end = start + len(geo_block) + 2 content = content[:start] + content[end:] offset -= 1 # Any Backend Definitions if line.strip().startswith("<CLUSTERBACKEND>"): back_block = list() start = n + offset try: back_line = next(iter_content).strip() except StopIteration: raise RuntimeError("Config ends after <CLUSTERBACKEND> tag!?") while not back_line.startswith("</CLUSTERBACKEND>"): back_block.append(back_line) try: back_line = next(iter_content).strip() except StopIteration: raise RuntimeError("Dangling <CLUSTERBACKEND> tag in configuration!") end = start + len(back_block) + 2 backend_defs.append(back_block) content = content[:start] + content[end:] offset -= 1 # Parse them self.geometry.parse_from_block(geo_block) self.geometry.parse_charge_block(charge_block) self.geometry.parse_spin_block(spin_block) self.fitness.parse_backend_tags(backend_defs) # Deal with the rest for line in content: if line.strip().startswith("LocOptAlgo="): self.fitness.parse_locopt_algo(line.strip()[11:]) elif line.strip().startswith("GlobOptAlgo="): self.globopt.parse_globopt_string(line.strip()[12:]) else: for key in self.options.values: type = self.options.values[key].type if re.match(key + "=", line.strip()): value, index = parse_value(line.strip()[len(key) + 1:], type) if value is not None: print("Option {:>30} set to: {:>30}".format(key, str(value))) self.options.values[key].set(value, index) else: print("ERROR: Could not set Option %s. Set to default instead!" % key) self.options.values[key].set(self.options.defaults[key]) if not preset: self.file_manager.signal_saved(path) else: self.file_manager.signal_modification() def parse_value(line, type): value = None index = -1 work = line.strip() if type is str: value = work elif type is int: value = int(work) elif type is float: value = float(work) elif type is bool: value = work.lower() == "true" elif type is list: tmp = work.split(";") value = [float(tmp[0]), float(tmp[1]), float(tmp[2])] return value, index class OGUILEMGeneralConfig: def __init__(self): self.defaults = dict() self.values = dict() for key in options: type, default = options[key] if type == "str": self.defaults[key] = default elif type == "int": self.defaults[key] = int(default) elif type == "float": self.defaults[key] = float(default) elif type == "bool": self.defaults[key] = (default.lower() == "true") elif type == "3;float": default = default.strip().split(";") self.defaults[key] = [float(default[0]), float(default[1]), float(default[2])] else: raise IOError("Could not parse xml key %s in general configs!" % key) self.values[key] = ConnectedValue(self.defaults[key]) def set_to_default(self): for key in options: self.values[key].set(self.defaults[key]) def get_finished_config(self) -> str: content = "" for key in self.values: self.values[key].request_update() value = self.values[key].value if value != self.defaults[key]: content += "\n" + key + "=" + str(self.values[key]) return content def find_config_folder(): if sys.platform == 'Windows': path = os.path.join(os.environ['APPDATA'], 'oguilem') else: path = os.path.join(os.environ['HOME'], '.config', 'oguilem') if not os.path.isdir(path): os.mkdir(path) return path class OGUILEMUIConfig: def __init__(self): self.window_size = None self.window_position = None self.java_path = None self.java_vm_variables = None self.ogo_path = None self.ogo_args = None self.environmental_variables = None self.recover_from_file() def get_run_command(self, custom_run_command=""): run_cmd = custom_run_command if custom_run_command else self.ogo_args if not all([self.java_path, self.ogo_path, self.ogo_args]): raise RuntimeError("Cannot run ogolem without knowing java and ogolem paths as well as ogolem arguments!") if self.java_vm_variables: return "%s %s -jar %s %s" % (self.java_path, self.java_vm_variables, self.ogo_path, run_cmd) return "%s -jar %s %s" % (self.java_path, self.ogo_path, run_cmd) def recover_from_file(self): path = os.path.join(find_config_folder(), "oguilem.cfg") try: with open(path, "r") as config: lines = config.readlines() for line in lines: work = line.strip() if work.startswith("WINDOWSIZE"): self.window_size = (int(work.split()[1]), int(work.split()[2])) elif work.startswith("WINDOWPOS"): self.window_position = (int(work.split()[1]), int(work.split()[2])) elif work.startswith("JAVAPATH"): self.java_path = work[8:].strip() elif work.startswith("JAVAVM"): self.java_vm_variables = work[6:].strip() elif work.startswith("OGOPATH"): self.ogo_path = work[7:].strip() elif work.startswith("OGOARGS"): self.ogo_args = work[7:].strip() elif work.startswith("ENV"): self.environmental_variables = work[3:].strip() except ValueError: print("There are format errors in the UI config file in '%s'. Using defaults." % find_config_folder()) except IOError: print("Config file not found. A new one will generate once the program exits.") def save_to_file(self): path = os.path.join(find_config_folder(), "oguilem.cfg") with open(path, "w") as config: if self.window_size: config.write("WINDOWSIZE %d %d\n" % (self.window_size[0], self.window_size[1])) if self.window_position: config.write("WINDOWPOS %d %d\n" % (self.window_position[0], self.window_position[1])) if self.java_path: config.write("JAVAPATH %s\n" % self.java_path) if self.java_vm_variables: config.write("JAVAVM %s\n" % self.java_vm_variables) if self.ogo_path: config.write("OGOPATH %s\n" % self.ogo_path) if self.ogo_args: config.write("OGOARGS %s\n" % self.ogo_args) if self.java_path: config.write("ENV %s\n" % self.environmental_variables)
[ "oguilem.configuration.ga.OGUILEMGlobOptConfig", "oguilem.configuration.fitness.OGUILEMFitnessFunctionConfiguration", "os.path.join", "oguilem.configuration.geometry.OGUILEMGeometryConfig", "os.path.isdir", "oguilem.configuration.utils.ConnectedValue", "os.mkdir", "oguilem.configuration.utils.ConfigFileManager" ]
[((451, 473), 'oguilem.configuration.ga.OGUILEMGlobOptConfig', 'OGUILEMGlobOptConfig', ([], {}), '()\n', (471, 473), False, 'from oguilem.configuration.ga import OGUILEMGlobOptConfig\n'), ((544, 567), 'oguilem.configuration.geometry.OGUILEMGeometryConfig', 'OGUILEMGeometryConfig', ([], {}), '()\n', (565, 567), False, 'from oguilem.configuration.geometry import OGUILEMGeometryConfig\n'), ((591, 628), 'oguilem.configuration.fitness.OGUILEMFitnessFunctionConfiguration', 'OGUILEMFitnessFunctionConfiguration', ([], {}), '()\n', (626, 628), False, 'from oguilem.configuration.fitness import OGUILEMFitnessFunctionConfiguration\n'), ((657, 676), 'oguilem.configuration.utils.ConfigFileManager', 'ConfigFileManager', ([], {}), '()\n', (674, 676), False, 'from oguilem.configuration.utils import ConnectedValue, ConfigFileManager\n'), ((8343, 8389), 'os.path.join', 'os.path.join', (["os.environ['APPDATA']", '"""oguilem"""'], {}), "(os.environ['APPDATA'], 'oguilem')\n", (8355, 8389), False, 'import os\n'), ((8415, 8469), 'os.path.join', 'os.path.join', (["os.environ['HOME']", '""".config"""', '"""oguilem"""'], {}), "(os.environ['HOME'], '.config', 'oguilem')\n", (8427, 8469), False, 'import os\n'), ((8481, 8500), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (8494, 8500), False, 'import os\n'), ((8510, 8524), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (8518, 8524), False, 'import os\n'), ((7799, 7833), 'oguilem.configuration.utils.ConnectedValue', 'ConnectedValue', (['self.defaults[key]'], {}), '(self.defaults[key])\n', (7813, 7833), False, 'from oguilem.configuration.utils import ConnectedValue, ConfigFileManager\n')]
from __future__ import unicode_literals import copy import json from six import string_types from . import default_operators from . import sql_prepare from . import values from .error import WinnowError from .templating import SqlFragment from .templating import WinnowSql class Winnow(object): """ Winnow is a SQL query builder specifically designed for powerful filtering on a table. It is designed to be efficient and low-magic. """ # Take care here -- In order to avoid mucking up the parent's copy of this # static value we have to deep copy it to every subclass. _special_cases = {} sql_class = WinnowSql def __init__(self, table, sources): self.table = table self.sources = sources self.sql = self.sql_class() def prepare_query(self, *args, **kwargs): """ Proxy to self.sql """ return self.sql.prepare_query(*args, **kwargs) def resolve(self, filt): """ Given a filter, resolve (expand) all it's clauses. A resolved clause includes information about the value type of the data source, and how to perform queries against that data source. return the modified filter. """ filt['logical_op'] = filt.get('logical_op', '&') if filt['logical_op'] not in '&|': raise WinnowError("Logical op must be one of &, |. Given: {}".format( filt['logical_op'])) for ix in range(len(filt['filter_clauses'])): filt['filter_clauses'][ix] = self.resolve_clause( filt['filter_clauses'][ix]) return filt def validate(self, filt): """ Make sure a filter is valid (resolves properly), but avoid bulking up the json object (probably because it's about to go into the db, or across the network) """ self.resolve(copy.deepcopy(filt)) return filt def resolve_clause(self, filter_clause): """ Given a filter_clause, check that it's valid. Return a dict-style filter_clause with a vivified value field. """ if 'logical_op' in filter_clause: # nested filter return self.resolve(filter_clause) ds, op = self.resolve_components(filter_clause) value = self.vivify(op['value_type'], filter_clause['value']) filter_clause['data_source_resolved'] = ds filter_clause['operator_resolved'] = op filter_clause['value_vivified'] = value filter_clause['summary'] = self.summarize(filter_clause) return filter_clause def summarize(self, filter_clause): ds = filter_clause['data_source_resolved'] op = filter_clause['operator_resolved'] value = filter_clause['value_vivified'] cvt = self.coalesce_value_type(op['value_type']) value_string = value operator_string = op.get('summary_template') or '{{data_source}} {} {{value}}'.format(op['name']) if cvt == 'collection': operator_string, value_string = self.summarize_collection(filter_clause) elif cvt == 'relative_date': value_string = value.replace('_', ' ') elif cvt == 'numeric': value_string = '{:,}'.format(value) return operator_string.format(data_source=ds['display_name'], value=value_string) @classmethod def coalesce_value_type(cls, value_type): for op in cls.operators: if op['value_type'] == value_type: return op.get('coalesced_value_type', value_type) return value_type @classmethod def summarize_collection(cls, filter_clause): value = filter_clause['value'] if isinstance(filter_clause['value'], list) else json.loads(filter_clause['value']) operator_string = '{data_source} any of {value}' if len(value) != 1 else '{data_source} is {value}' if not value: value_string = '(none)' else: value_string = ', '.join(value) return operator_string, value_string @staticmethod def empty_filter(): return dict(logial_op='&', filter_clauses=[]) @classmethod def vivify(cls, value_type, value): """De-stringify <value> into <value_type> Raises WinnowError if <value> is not well formatted for that type.""" cvt = cls.coalesce_value_type(value_type) if cvt == 'string': return values.vivify_string(value) elif cvt == 'collection': return values.vivify_collection(value) elif cvt in ('numeric', 'string_length'): return values.vivify_numeric(value) elif cvt == 'relative_date': return values.vivify_relative_date(value) elif cvt == 'absolute_date': return values.vivify_absolute_date(value) elif cvt in ('bool', 'nullable'): return values.vivify_bool(value) elif cvt == 'single_choice': return values.vivify_single_choice(value) else: raise WinnowError("Unknown value_type, '{}'".format(value_type)) @classmethod def stringify(cls, value_type, value): cvt = cls.coalesce_value_type(value_type) if isinstance(value, string_types): value = cls.vivify(value_type, value) if cvt == 'string': return values.stringify_string(value) elif cvt == 'collection': return values.stringify_collection(value) elif cvt in ('numeric', 'string_length'): return values.stringify_numeric(value) elif cvt == 'relative_date': return values.stringify_relative_date(value) elif cvt == 'absolute_date': return values.stringify_absolute_date(value) elif cvt in ('bool', 'nullable'): return values.stringify_bool(value) elif cvt == 'single_choice': return values.stringify_single_choice(value) raise WinnowError("Unknown value_type, '{}'".format(value_type)) operators = default_operators.OPERATORS def resolve_operator(self, op_name, value_types): '''Given an operator name, return an Op object. Raise an error if the operator is not found''' if not isinstance(op_name, string_types): raise WinnowError("Bad operator type, '{}'. expected string".format(type(op_name))) op_name = op_name.lower() matches = [op for op in self.operators if op['name'].lower() == op_name and op['value_type'] in value_types] if len(matches) == 0: raise WinnowError("Unknown operator '{}'".format(op_name)) return matches.pop() def resolve_source(self, source_name): """ Given a source name, return a resolved data source. Raise an error if the source name is not allowable """ matches = [source for source in self.sources if source['display_name'] == source_name] if len(matches) == 0: raise WinnowError("Unknown data source '{}'".format(source_name)) elif len(matches) > 1: raise WinnowError("Ambiguous data source '{}'".format(source_name)) return matches.pop() def resolve_components(self, clause): source = self.resolve_source(clause['data_source']) operator = self.resolve_operator(clause['operator'], source['value_types']) return source, operator def query(self, filt): return self.prepare_query( "SELECT * FROM {{ table | sqlsafe }} WHERE {{ condition }}", table=self.table, condition=self.where_clauses(filt)) def strip(self, filt): """ Perform the opposite of resolving a filter. """ for k in ('data_source_resolved', 'operator_resolved', 'value_vivified'): filt.pop(k, None) if 'filter_clauses' in filt: filt['filter_clauses'] = [self.strip(f) for f in filt['filter_clauses']] return filt def where_clauses(self, filt): ''' Apply a user filter. Returns a paren-wrapped WHERE clause suitable for using in a SELECT statement on the opportunity table. ''' if not filt['filter_clauses']: return True filt = self.resolve(filt) where_clauses = [] for clause in filt['filter_clauses']: if 'logical_op' in clause: # nested filter where_clauses.append(self.where_clauses(clause)) elif 'data_source_resolved' in clause: where_clauses.append(self._dispatch_clause(clause)) else: # I don't expect to ever get here, because we should hit this # issue when we call `filt = self.resolve(filt)` raise WinnowError("Somehow, this is neither a nested filter, nor a resolved clause") if not where_clauses: return True sep = '\nAND \n ' if filt['logical_op'] == '&' else '\nOR \n ' self.strip(filt) sql_frag = SqlFragment.join(sep, where_clauses) sql_frag.query = '(' + sql_frag.query + ')' return sql_frag def _dispatch_clause(self, clause): """ Evaluates whether a clause is standard, special, or custom and calls the appropriate specialization function. Each specialization returns a paren-wrapped WHERE clause, to be AND'd or OR'd together to produce a final clause.""" for k in ('data_source_resolved', 'operator_resolved', 'value_vivified'): if k not in clause: raise WinnowError('failed to resolve component: {}'.format(k)) op = clause['operator_resolved'] special_handler = self.special_case_handler( source_name=clause['data_source'], value_type=op['value_type']) if special_handler is not None: return special_handler(self, clause) return self._default_clause(clause) def where_clause(self, data_source, operator, value): return sql_prepare.where_clause(data_source['column'], operator, value) def _default_clause(self, clause): """ Given a filter_clause, convert it to a WHERE clause """ ds = clause['data_source_resolved'] op = clause['operator_resolved'] value = clause['value_vivified'] return self.where_clause(ds, op, value) @classmethod def special_case(cls, source_name, *value_types): """ Register a special case handler. A special case handler is a function s: s(Winnow(), clause) -> WHERE clause string """ if cls._special_cases is getattr(super(cls, cls), '_special_cases', None): raise RuntimeError('Please define your own _special_cases dict, so as to avoid modifying your parent. ' 'Note to self: come up with a more durable way to handle this.') # ideas: # proxy the _special_cases as the union of own and parent's version. def decorator(func): """ Register a function in the handler table. """ for value_type in value_types: if (source_name, value_type) in cls._special_cases: raise WinnowError("Conflicting handlers registered for ({},{}): {} and {}".format( value_type, source_name, cls._special_cases[(source_name, value_type)].__name__, func.__name__)) cls._special_cases[(source_name, value_type)] = func return func return decorator def special_case_handler(self, source_name, value_type): """ Check if a given value_type, source_name pair has a special case handler. :return: A function handler for that case accepting the winnow instance and the clause. """ return self._special_cases.get((source_name, value_type))
[ "json.loads", "copy.deepcopy" ]
[((1905, 1924), 'copy.deepcopy', 'copy.deepcopy', (['filt'], {}), '(filt)\n', (1918, 1924), False, 'import copy\n'), ((3781, 3815), 'json.loads', 'json.loads', (["filter_clause['value']"], {}), "(filter_clause['value'])\n", (3791, 3815), False, 'import json\n')]
import numpy as np import ROOT from dummy_distributions import dummy_pt_eta counts, test_in1, test_in2 = dummy_pt_eta() f = ROOT.TFile.Open("samples/testSF2d.root") sf = f.Get("scalefactors_Tight_Electron") xmin, xmax = sf.GetXaxis().GetXmin(), sf.GetXaxis().GetXmax() ymin, ymax = sf.GetYaxis().GetXmin(), sf.GetYaxis().GetXmax() test_out = np.empty_like(test_in1) for i, (eta, pt) in enumerate(zip(test_in1, test_in2)): if xmax <= eta: eta = xmax - 1.0e-5 elif eta < xmin: eta = xmin if ymax <= pt: pt = ymax - 1.0e-5 elif pt < ymin: pt = ymin ib = sf.FindBin(eta, pt) test_out[i] = sf.GetBinContent(ib) print(repr(test_out))
[ "numpy.empty_like", "dummy_distributions.dummy_pt_eta", "ROOT.TFile.Open" ]
[((107, 121), 'dummy_distributions.dummy_pt_eta', 'dummy_pt_eta', ([], {}), '()\n', (119, 121), False, 'from dummy_distributions import dummy_pt_eta\n'), ((127, 167), 'ROOT.TFile.Open', 'ROOT.TFile.Open', (['"""samples/testSF2d.root"""'], {}), "('samples/testSF2d.root')\n", (142, 167), False, 'import ROOT\n'), ((347, 370), 'numpy.empty_like', 'np.empty_like', (['test_in1'], {}), '(test_in1)\n', (360, 370), True, 'import numpy as np\n')]
#from distutils.core import setup from setuptools import setup, find_packages from distutils.extension import Extension import re import os import codecs here = os.path.abspath(os.path.dirname(__file__)) def read(*parts): # intentionally *not* adding an encoding option to open, See: # https://github.com/pypa/virtualenv/issues/201#issuecomment-3145690 with codecs.open(os.path.join(here, *parts), 'r') as fp: return fp.read() def find_version(*file_paths): version_file = read(*file_paths) version_match = re.search( r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M, ) if version_match: return version_match.group(1) raise RuntimeError("Unable to find version string.") try: from Cython.Distutils import build_ext except ImportError: use_cython = False else: use_cython = True cmdclass = { } ext_modules = [ ] if use_cython: ext_modules += [ Extension("deepgmap.data_preprocessing_tools.seq_to_binary2", [ "deepgmap/data_preprocessing_tools/seq_to_binary2.pyx" ]), #Extension("data_preprocessing_tools.queue", [ "deepgmap/data_preprocessing_tools/queue.pyx" ],libraries=["calg"]), Extension("deepgmap.post_train_tools.cython_util", [ "deepgmap/post_train_tools/cython_util.pyx" ]), ] cmdclass.update({ 'build_ext': build_ext }) else: ext_modules += [ Extension("deepgmap.data_preprocessing_tools.seq_to_binary2", [ "deepgmap/data_preprocessing_tools/seq_to_binary2.c" ]), Extension("deepgmap.post_train_tools.cython_util", [ "deepgmap/post_train_tools/cython_util.c" ]), ] #print(find_version("deepgmap", "__init__.py")) setup( name='DeepGMAP', #version=VERSION, version=find_version("deepgmap", "__init__.py"), description='Learning and predicting gene regulatory sequences in genomes', author='<NAME>', author_email='<EMAIL>', url='', packages=['deepgmap','deepgmap.train','deepgmap.network_constructors','deepgmap.post_train_tools','deepgmap.data_preprocessing_tools','deepgmap.misc'], #packages=find_packages('deepgmap'), #packages=['deepgmap.'], package_dir={'DeepGMAP':'deepgmap'}, #package_data = { # '': ['enhancer_prediction/*', '*.pyx', '*.pxd', '*.c', '*.h'], #}, scripts=['bin/deepgmap', ], #packages=find_packages(), cmdclass = cmdclass, ext_modules=ext_modules, classifiers=[ 'Development Status :: 3 - Alpha', 'Environment :: Console', 'Intended Audience :: Developers', 'Programming Language :: Python :: 3.6', 'License :: OSI Approved :: Apache Software License ', 'Operating System :: POSIX :: Linux', 'Topic :: Scientific/Engineering :: Bio-Informatics', 'Topic :: Scientific/Engineering :: Artificial Intelligence', ], install_requires=['tensorflow>=1.15', 'numpy', 'matplotlib', 'sklearn', 'tornado', 'natsort', 'psutil', 'pyBigWig'], long_description=open('README.rst').read(), )
[ "os.path.dirname", "os.path.join", "distutils.extension.Extension", "re.search" ]
[((177, 202), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (192, 202), False, 'import os\n'), ((541, 614), 're.search', 're.search', (['"""^__version__ = [\'\\\\"]([^\'\\\\"]*)[\'\\\\"]"""', 'version_file', 're.M'], {}), '(\'^__version__ = [\\\'\\\\"]([^\\\'\\\\"]*)[\\\'\\\\"]\', version_file, re.M)\n', (550, 614), False, 'import re\n'), ((958, 1082), 'distutils.extension.Extension', 'Extension', (['"""deepgmap.data_preprocessing_tools.seq_to_binary2"""', "['deepgmap/data_preprocessing_tools/seq_to_binary2.pyx']"], {}), "('deepgmap.data_preprocessing_tools.seq_to_binary2', [\n 'deepgmap/data_preprocessing_tools/seq_to_binary2.pyx'])\n", (967, 1082), False, 'from distutils.extension import Extension\n'), ((1222, 1324), 'distutils.extension.Extension', 'Extension', (['"""deepgmap.post_train_tools.cython_util"""', "['deepgmap/post_train_tools/cython_util.pyx']"], {}), "('deepgmap.post_train_tools.cython_util', [\n 'deepgmap/post_train_tools/cython_util.pyx'])\n", (1231, 1324), False, 'from distutils.extension import Extension\n'), ((1412, 1534), 'distutils.extension.Extension', 'Extension', (['"""deepgmap.data_preprocessing_tools.seq_to_binary2"""', "['deepgmap/data_preprocessing_tools/seq_to_binary2.c']"], {}), "('deepgmap.data_preprocessing_tools.seq_to_binary2', [\n 'deepgmap/data_preprocessing_tools/seq_to_binary2.c'])\n", (1421, 1534), False, 'from distutils.extension import Extension\n'), ((1541, 1641), 'distutils.extension.Extension', 'Extension', (['"""deepgmap.post_train_tools.cython_util"""', "['deepgmap/post_train_tools/cython_util.c']"], {}), "('deepgmap.post_train_tools.cython_util', [\n 'deepgmap/post_train_tools/cython_util.c'])\n", (1550, 1641), False, 'from distutils.extension import Extension\n'), ((386, 412), 'os.path.join', 'os.path.join', (['here', '*parts'], {}), '(here, *parts)\n', (398, 412), False, 'import os\n')]
import logging from flask import Flask from flask_sqlalchemy import SQLAlchemy as _BaseSQLAlchemy from flask_migrate import Migrate from flask_cors import CORS from flask_talisman import Talisman from flask_ipban import IpBan from config import Config, get_logger_handler # database class SQLAlchemy(_BaseSQLAlchemy): def apply_pool_defaults(self, app, options): super(SQLAlchemy, self).apply_pool_defaults(app, options) options["pool_pre_ping"] = True db = SQLAlchemy() migrate = Migrate() cors = CORS() talisman = Talisman() global_config = Config() ip_ban = IpBan(ban_seconds=200, ban_count=global_config.IP_BAN_LIST_COUNT) # logging logger = logging.getLogger('frontend') def create_app(config_class=None): app = Flask(__name__) if config_class is None: config_class = Config() app.config.from_object(config_class) db.init_app(app) migrate.init_app(app, db) # TODO - Refine and update when build pipeline is stable. Get from global_config cors.init_app(app, origins=["http://localhost:5000", "http://localhost:3000", '*']) if app.config["ENV"] in ("staging", "production"): # Secure the application and implement best practice https redirects and a content security policy talisman.init_app(app, content_security_policy=None) # ip_ban.init_app(app) # ip_ban.load_nuisances(global_config.IP_BAN_REGEX_FILE) from api.routes import bp as api_bp app.register_blueprint(api_bp) if not app.debug and not app.testing: app.logger.addHandler(get_logger_handler()) @app.teardown_appcontext def shutdown_session(exception=None): db.session.remove() return app from api import models
[ "logging.getLogger", "flask_cors.CORS", "flask.Flask", "config.Config", "flask_talisman.Talisman", "flask_migrate.Migrate", "flask_ipban.IpBan", "config.get_logger_handler" ]
[((504, 513), 'flask_migrate.Migrate', 'Migrate', ([], {}), '()\n', (511, 513), False, 'from flask_migrate import Migrate\n'), ((521, 527), 'flask_cors.CORS', 'CORS', ([], {}), '()\n', (525, 527), False, 'from flask_cors import CORS\n'), ((539, 549), 'flask_talisman.Talisman', 'Talisman', ([], {}), '()\n', (547, 549), False, 'from flask_talisman import Talisman\n'), ((566, 574), 'config.Config', 'Config', ([], {}), '()\n', (572, 574), False, 'from config import Config, get_logger_handler\n'), ((584, 649), 'flask_ipban.IpBan', 'IpBan', ([], {'ban_seconds': '(200)', 'ban_count': 'global_config.IP_BAN_LIST_COUNT'}), '(ban_seconds=200, ban_count=global_config.IP_BAN_LIST_COUNT)\n', (589, 649), False, 'from flask_ipban import IpBan\n'), ((669, 698), 'logging.getLogger', 'logging.getLogger', (['"""frontend"""'], {}), "('frontend')\n", (686, 698), False, 'import logging\n'), ((746, 761), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (751, 761), False, 'from flask import Flask\n'), ((814, 822), 'config.Config', 'Config', ([], {}), '()\n', (820, 822), False, 'from config import Config, get_logger_handler\n'), ((1554, 1574), 'config.get_logger_handler', 'get_logger_handler', ([], {}), '()\n', (1572, 1574), False, 'from config import Config, get_logger_handler\n')]
from pathlib import Path from requests.auth import _basic_auth_str import pytest from bravado_core.formatter import SwaggerFormat, NO_OP from gc3_query.lib.gc3_config import GC3Config, IDMCredential TEST_BASE_DIR: Path = Path(__file__).parent.joinpath("GC3Config") config_dir = TEST_BASE_DIR.joinpath("config") def test_setup(): assert TEST_BASE_DIR.exists() assert config_dir.exists() def test_init(): gc3_config = GC3Config() assert 'gc30003' in gc3_config['idm']['domains'] assert gc3_config.user.cloud_username == '<EMAIL>' def test_set_credential(): gc3_config = GC3Config() assert 'gc3test' in gc3_config['idm']['domains'] assert gc3_config.user.cloud_username == '<EMAIL>' credential = gc3_config.set_credential(idm_domain_name='gc3test', password='<PASSWORD>' ) assert credential assert credential.password == '<PASSWORD>' assert credential.idm_domain_name == 'gc3test' def test_set_gc3pilot_credential(): gc3_config = GC3Config() assert 'gc3pilot' in gc3_config['idm']['domains'] assert gc3_config.user.cloud_username == '<EMAIL>' credential = gc3_config.set_credential(idm_domain_name='gc3pilot', password='<PASSWORD>!' ) assert credential assert credential.password == '<PASSWORD>!' assert credential.idm_domain_name == 'gc3pilot' @pytest.fixture() def get_credential_setup() -> IDMCredential: gc3_config = GC3Config() assert 'gc3test' in gc3_config['idm']['domains'] assert gc3_config.user.cloud_username == '<EMAIL>' credential = gc3_config.set_credential(idm_domain_name='gc3test', password='<PASSWORD>' ) yield (credential) def test_load_atoml_files_individually(get_credential_setup): credential = get_credential_setup gc3_config = GC3Config() assert 'gc3test' in gc3_config['idm']['domains'] assert gc3_config.user.cloud_username == '<EMAIL>' check_credential = gc3_config.get_credential(idm_domain_name='gc3test') assert check_credential==credential def test_credential_basic_auth(get_credential_setup): credential = get_credential_setup credential_expected_basic_auth =_basic_auth_str('<EMAIL>', '<PASSWORD>') gc3_config = GC3Config() check_credential = gc3_config.get_credential(idm_domain_name='gc30003') assert gc3_config.user.cloud_username == '<EMAIL>' assert check_credential.idm_domain_name=='gc30003' assert check_credential.basic_auth_str.startswith('Basic') assert check_credential.basic_auth_str != credential.basic_auth_str def test_get_main_credential(): gc3_config = GC3Config() check_credential = gc3_config.get_credential(idm_domain_name='gc30003') assert gc3_config.user.cloud_username == '<EMAIL>' assert check_credential.idm_domain_name=='gc30003' # @pytest.fixture() # def get_bravado_config_setup(): # gc3_config = GC3Config() # assert 'iaas_classic' in gc3_config # yield (gc3_config) # # def test_bravado_client_config(get_bravado_config_setup): # gc3_config = get_bravado_config_setup # assert 'iaas_classic' in gc3_config # bravado_client_config = gc3_config.bravado_client_config # assert bravado_client_config # assert 'formats' not in bravado_client_config # assert not 'include_missing_properties' in bravado_client_config # assert 'also_return_response' in bravado_client_config # bravado_client_config_2 = gc3_config.bravado_client_config # assert bravado_client_config==bravado_client_config_2 # assert bravado_client_config is not bravado_client_config_2 # assert isinstance(bravado_client_config, dict) # # def test_bravado_core_config(get_bravado_config_setup): # gc3_config = get_bravado_config_setup # assert 'iaas_classic' in gc3_config # bravado_core_config = gc3_config.bravado_core_config # assert bravado_core_config # assert 'formats' in bravado_core_config # assert 'include_missing_properties' in bravado_core_config # assert not 'also_return_response' in bravado_core_config # bravado_core_config_2 = gc3_config.bravado_core_config # assert bravado_core_config==bravado_core_config_2 # assert bravado_core_config is not bravado_core_config_2 # assert isinstance(bravado_core_config, dict) # assert isinstance(bravado_core_config['formats'], list) # # # # def test_bravado_config(get_bravado_config_setup): # gc3_config = get_bravado_config_setup # assert 'iaas_classic' in gc3_config # bravado_config = gc3_config.bravado_config # assert bravado_config # assert 'formats' in bravado_config # assert 'include_missing_properties' in bravado_config # assert 'also_return_response' in bravado_config # bravado_config_2 = gc3_config.bravado_config # assert bravado_config==bravado_config_2 # assert bravado_config is not bravado_config_2 # assert isinstance(bravado_config, dict) # assert isinstance(bravado_config['formats'], list) # @pytest.fixture() def get_constants_setup(): gc3_config = GC3Config() assert 'iaas_classic' in gc3_config yield (gc3_config) def test_open_api_catalog_dir(get_constants_setup): gc3_config = get_constants_setup open_api_catalog_dir = gc3_config.OPEN_API_CATALOG_DIR assert open_api_catalog_dir # def test_BRAVADO_CONFIG(get_constants_setup): # gc3_config = get_constants_setup # bravado_config = gc3_config.BRAVADO_CONFIG # assert bravado_config # assert 'formats' in bravado_config # assert 'include_missing_properties' in bravado_config # assert 'also_return_response' in bravado_config # assert isinstance(bravado_config, dict) # assert isinstance(bravado_config['formats'], list) # assert bravado_config['formats'] # formats = [f.format for f in bravado_config['formats']] # assert 'json-bool' in formats # assert all([isinstance(i , SwaggerFormat) for i in bravado_config['formats']])
[ "pytest.fixture", "requests.auth._basic_auth_str", "pathlib.Path", "gc3_query.lib.gc3_config.GC3Config" ]
[((1331, 1347), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (1345, 1347), False, 'import pytest\n'), ((4936, 4952), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (4950, 4952), False, 'import pytest\n'), ((432, 443), 'gc3_query.lib.gc3_config.GC3Config', 'GC3Config', ([], {}), '()\n', (441, 443), False, 'from gc3_query.lib.gc3_config import GC3Config, IDMCredential\n'), ((598, 609), 'gc3_query.lib.gc3_config.GC3Config', 'GC3Config', ([], {}), '()\n', (607, 609), False, 'from gc3_query.lib.gc3_config import GC3Config, IDMCredential\n'), ((987, 998), 'gc3_query.lib.gc3_config.GC3Config', 'GC3Config', ([], {}), '()\n', (996, 998), False, 'from gc3_query.lib.gc3_config import GC3Config, IDMCredential\n'), ((1410, 1421), 'gc3_query.lib.gc3_config.GC3Config', 'GC3Config', ([], {}), '()\n', (1419, 1421), False, 'from gc3_query.lib.gc3_config import GC3Config, IDMCredential\n'), ((1766, 1777), 'gc3_query.lib.gc3_config.GC3Config', 'GC3Config', ([], {}), '()\n', (1775, 1777), False, 'from gc3_query.lib.gc3_config import GC3Config, IDMCredential\n'), ((2131, 2171), 'requests.auth._basic_auth_str', '_basic_auth_str', (['"""<EMAIL>"""', '"""<PASSWORD>"""'], {}), "('<EMAIL>', '<PASSWORD>')\n", (2146, 2171), False, 'from requests.auth import _basic_auth_str\n'), ((2189, 2200), 'gc3_query.lib.gc3_config.GC3Config', 'GC3Config', ([], {}), '()\n', (2198, 2200), False, 'from gc3_query.lib.gc3_config import GC3Config, IDMCredential\n'), ((2573, 2584), 'gc3_query.lib.gc3_config.GC3Config', 'GC3Config', ([], {}), '()\n', (2582, 2584), False, 'from gc3_query.lib.gc3_config import GC3Config, IDMCredential\n'), ((4997, 5008), 'gc3_query.lib.gc3_config.GC3Config', 'GC3Config', ([], {}), '()\n', (5006, 5008), False, 'from gc3_query.lib.gc3_config import GC3Config, IDMCredential\n'), ((223, 237), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (227, 237), False, 'from pathlib import Path\n')]
import argparse import matplotlib.pyplot as plt import torch from pytorch_warmup import * def get_rates(warmup_cls, beta2, max_step): rates = [] p = torch.nn.Parameter(torch.arange(10, dtype=torch.float32)) optimizer = torch.optim.Adam([{'params': p}], lr=1.0, betas=(0.9, beta2)) lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda step: 1.0) warmup_scheduler = warmup_cls(optimizer) for step in range(1, max_step+1): rates.append(optimizer.param_groups[0]['lr']) optimizer.zero_grad() optimizer.step() lr_scheduler.step() warmup_scheduler.dampen() return rates parser = argparse.ArgumentParser(description='Warmup schedule') parser.add_argument('--output', type=str, default='none', choices=['none', 'png', 'pdf'], help='Output file type (default: none)') args = parser.parse_args() beta2 = 0.999 max_step = 3000 plt.plot(range(1, max_step+1), get_rates(RAdamWarmup, beta2, max_step), label='RAdam') plt.plot(range(1, max_step+1), get_rates(UntunedExponentialWarmup, beta2, max_step), label='Untuned Exponential') plt.plot(range(1, max_step+1), get_rates(UntunedLinearWarmup, beta2, max_step), label='Untuned Linear') plt.legend() plt.title('Warmup Schedule') plt.xlabel('Iteration') plt.ylabel(r'Warmup factor $(\omega_t)$') if args.output == 'none': plt.show() else: plt.savefig(f'warmup_schedule.{args.output}')
[ "torch.optim.Adam", "torch.optim.lr_scheduler.LambdaLR", "matplotlib.pyplot.savefig", "argparse.ArgumentParser", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xlabel", "torch.arange", "matplotlib.pyplot.title", "matplotlib.pyplot.legend", "matplotlib.pyplot.show" ]
[((669, 723), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Warmup schedule"""'}), "(description='Warmup schedule')\n", (692, 723), False, 'import argparse\n'), ((1259, 1271), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1269, 1271), True, 'import matplotlib.pyplot as plt\n'), ((1272, 1300), 'matplotlib.pyplot.title', 'plt.title', (['"""Warmup Schedule"""'], {}), "('Warmup Schedule')\n", (1281, 1300), True, 'import matplotlib.pyplot as plt\n'), ((1301, 1324), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iteration"""'], {}), "('Iteration')\n", (1311, 1324), True, 'import matplotlib.pyplot as plt\n'), ((1325, 1366), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Warmup factor $(\\\\omega_t)$"""'], {}), "('Warmup factor $(\\\\omega_t)$')\n", (1335, 1366), True, 'import matplotlib.pyplot as plt\n'), ((233, 294), 'torch.optim.Adam', 'torch.optim.Adam', (["[{'params': p}]"], {'lr': '(1.0)', 'betas': '(0.9, beta2)'}), "([{'params': p}], lr=1.0, betas=(0.9, beta2))\n", (249, 294), False, 'import torch\n'), ((314, 386), 'torch.optim.lr_scheduler.LambdaLR', 'torch.optim.lr_scheduler.LambdaLR', (['optimizer'], {'lr_lambda': '(lambda step: 1.0)'}), '(optimizer, lr_lambda=lambda step: 1.0)\n', (347, 386), False, 'import torch\n'), ((1397, 1407), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1405, 1407), True, 'import matplotlib.pyplot as plt\n'), ((1418, 1463), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""warmup_schedule.{args.output}"""'], {}), "(f'warmup_schedule.{args.output}')\n", (1429, 1463), True, 'import matplotlib.pyplot as plt\n'), ((178, 215), 'torch.arange', 'torch.arange', (['(10)'], {'dtype': 'torch.float32'}), '(10, dtype=torch.float32)\n', (190, 215), False, 'import torch\n')]
"""\ PROMORT example. """ import argparse import random import sys import pyecvl.ecvl as ecvl import pyeddl.eddl as eddl from pyeddl.tensor import Tensor import models def VGG16(in_layer, num_classes): x = in_layer x = eddl.ReLu(eddl.Conv(x, 64, [3, 3])) x = eddl.MaxPool(eddl.ReLu(eddl.Conv(x, 64, [3, 3])), [2, 2], [2, 2]) x = eddl.ReLu(eddl.Conv(x, 128, [3, 3])) x = eddl.MaxPool(eddl.ReLu(eddl.Conv(x, 128, [3, 3])), [2, 2], [2, 2]) x = eddl.ReLu(eddl.Conv(x, 256, [3, 3])) x = eddl.ReLu(eddl.Conv(x, 256, [3, 3])) x = eddl.MaxPool(eddl.ReLu(eddl.Conv(x, 256, [3, 3])), [2, 2], [2, 2]) x = eddl.ReLu(eddl.Conv(x, 512, [3, 3])) x = eddl.ReLu(eddl.Conv(x, 512, [3, 3])) x = eddl.MaxPool(eddl.ReLu(eddl.Conv(x, 512, [3, 3])), [2, 2], [2, 2]) x = eddl.ReLu(eddl.Conv(x, 512, [3, 3])) x = eddl.ReLu(eddl.Conv(x, 512, [3, 3])) x = eddl.MaxPool(eddl.ReLu(eddl.Conv(x, 512, [3, 3])), [2, 2], [2, 2]) x = eddl.Reshape(x, [-1]) x = eddl.ReLu(eddl.Dense(x, 256)) x = eddl.Softmax(eddl.Dense(x, num_classes)) return x def main(args): num_classes = 2 size = [256, 256] # size of images in_ = eddl.Input([3, size[0], size[1]]) out = models.VGG16_promort(in_, num_classes) net = eddl.Model([in_], [out]) eddl.build( net, eddl.rmsprop(1e-6), #eddl.sgd(0.001, 0.9), ["soft_cross_entropy"], ["categorical_accuracy"], eddl.CS_GPU([1], mem="low_mem") if args.gpu else eddl.CS_CPU() ) eddl.summary(net) eddl.setlogfile(net, "promort_VGG16_classification") training_augs = ecvl.SequentialAugmentationContainer([ ecvl.AugResizeDim(size) #ecvl.AugMirror(.5), #ecvl.AugFlip(.5), #ecvl.AugRotate([-180, 180]), #ecvl.AugAdditivePoissonNoise([0, 10]), #ecvl.AugGammaContrast([0.5, 1.5]), #ecvl.AugGaussianBlur([0, 0.8]), #ecvl.AugCoarseDropout([0, 0.3], [0.02, 0.05], 0.5) ]) validation_augs = ecvl.SequentialAugmentationContainer([ ecvl.AugResizeDim(size), ]) dataset_augs = ecvl.DatasetAugmentations( [training_augs, validation_augs, None] ) print("Reading dataset") #d = ecvl.DLDataset(args.in_ds, args.batch_size) d = ecvl.DLDataset(args.in_ds, args.batch_size, dataset_augs) x = Tensor([args.batch_size, d.n_channels_, size[0], size[1]]) y = Tensor([args.batch_size, len(d.classes_)]) num_samples_train = len(d.GetSplit()) num_batches_train = num_samples_train // args.batch_size d.SetSplit(ecvl.SplitType.validation) num_samples_val = len(d.GetSplit()) num_batches_val = num_samples_val // args.batch_size indices = list(range(args.batch_size)) metric = eddl.getMetric("categorical_accuracy") print("Starting training") ### Main loop across epochs for e in range(args.epochs): print("Epoch {:d}/{:d} - Training".format(e + 1, args.epochs), flush=True) if args.out_dir: current_path = os.path.join(args.out_dir, "Epoch_%d" % e) for c in d.classes_: c_dir = os.path.join(current_path, c) os.makedirs(c_dir, exist_ok=True) d.SetSplit(ecvl.SplitType.training) eddl.reset_loss(net) total_metric = [] s = d.GetSplit() random.shuffle(s) d.split_.training_ = s d.ResetAllBatches() ### Looping across batches of training data for b in range(num_batches_train): print("Epoch {:d}/{:d} (batch {:d}/{:d}) - ".format( e + 1, args.epochs, b + 1, num_batches_train ), end="", flush=True) d.LoadBatch(x, y) x.div_(255.0) tx, ty = [x], [y] #print (tx[0].info()) eddl.train_batch(net, tx, ty, indices) #eddl.print_loss(net, b) instances = (b+1) * args.batch_size print ("loss = %.3f, acc = %.3f" % (net.fiterr[0]/instances, net.fiterr[1]/instances)) #print() print("Saving weights") eddl.save(net, "promort_checkpoint_%s.bin" % e, "bin") ### Evaluation on validation set print("Epoch %d/%d - Evaluation" % (e + 1, args.epochs), flush=True) d.SetSplit(ecvl.SplitType.validation) for b in range(num_batches_val): n = 0 print("Epoch {:d}/{:d} (batch {:d}/{:d}) - ".format( e + 1, args.epochs, b + 1, num_batches_val ), end="", flush=True) d.LoadBatch(x, y) x.div_(255.0) eddl.forward(net, [x]) output = eddl.getOutput(out) sum_ = 0.0 for k in range(args.batch_size): result = output.select([str(k)]) target = y.select([str(k)]) ca = metric.value(target, result) total_metric.append(ca) sum_ += ca if args.out_dir: result_a = np.array(result, copy=False) target_a = np.array(target, copy=False) classe = np.argmax(result_a).item() gt_class = np.argmax(target_a).item() single_image = x.select([str(k)]) img_t = ecvl.TensorToView(single_image) img_t.colortype_ = ecvl.ColorType.BGR single_image.mult_(255.) filename = d.samples_[d.GetSplit()[n]].location_[0] head, tail = os.path.splitext(os.path.basename(filename)) bname = "%s_gt_class_%s.png" % (head, gt_class) cur_path = os.path.join( current_path, d.classes_[classe], bname ) ecvl.ImWrite(cur_path, img_t) n += 1 print("categorical_accuracy:", sum_ / args.batch_size) total_avg = sum(total_metric) / len(total_metric) print("Total categorical accuracy:", total_avg) if __name__ == "__main__": parser = argparse.ArgumentParser(description=__doc__) parser.add_argument("in_ds", metavar="INPUT_DATASET") parser.add_argument("--epochs", type=int, metavar="INT", default=50) parser.add_argument("--batch-size", type=int, metavar="INT", default=32) parser.add_argument("--gpu", action="store_true") parser.add_argument("--out-dir", metavar="DIR", help="if set, save images in this directory") main(parser.parse_args())
[ "pyeddl.eddl.Conv", "pyecvl.ecvl.TensorToView", "pyeddl.tensor.Tensor", "pyecvl.ecvl.DLDataset", "pyeddl.eddl.save", "pyeddl.eddl.setlogfile", "pyecvl.ecvl.DatasetAugmentations", "pyeddl.eddl.getMetric", "pyeddl.eddl.Input", "pyeddl.eddl.train_batch", "argparse.ArgumentParser", "pyeddl.eddl.Model", "models.VGG16_promort", "pyeddl.eddl.Dense", "pyecvl.ecvl.ImWrite", "random.shuffle", "pyeddl.eddl.CS_GPU", "pyeddl.eddl.getOutput", "pyeddl.eddl.reset_loss", "pyeddl.eddl.Reshape", "pyecvl.ecvl.AugResizeDim", "pyeddl.eddl.forward", "pyeddl.eddl.summary", "pyeddl.eddl.rmsprop", "pyeddl.eddl.CS_CPU" ]
[((965, 986), 'pyeddl.eddl.Reshape', 'eddl.Reshape', (['x', '[-1]'], {}), '(x, [-1])\n', (977, 986), True, 'import pyeddl.eddl as eddl\n'), ((1176, 1209), 'pyeddl.eddl.Input', 'eddl.Input', (['[3, size[0], size[1]]'], {}), '([3, size[0], size[1]])\n', (1186, 1209), True, 'import pyeddl.eddl as eddl\n'), ((1220, 1258), 'models.VGG16_promort', 'models.VGG16_promort', (['in_', 'num_classes'], {}), '(in_, num_classes)\n', (1240, 1258), False, 'import models\n'), ((1269, 1293), 'pyeddl.eddl.Model', 'eddl.Model', (['[in_]', '[out]'], {}), '([in_], [out])\n', (1279, 1293), True, 'import pyeddl.eddl as eddl\n'), ((1538, 1555), 'pyeddl.eddl.summary', 'eddl.summary', (['net'], {}), '(net)\n', (1550, 1555), True, 'import pyeddl.eddl as eddl\n'), ((1560, 1612), 'pyeddl.eddl.setlogfile', 'eddl.setlogfile', (['net', '"""promort_VGG16_classification"""'], {}), "(net, 'promort_VGG16_classification')\n", (1575, 1612), True, 'import pyeddl.eddl as eddl\n'), ((2133, 2198), 'pyecvl.ecvl.DatasetAugmentations', 'ecvl.DatasetAugmentations', (['[training_augs, validation_augs, None]'], {}), '([training_augs, validation_augs, None])\n', (2158, 2198), True, 'import pyecvl.ecvl as ecvl\n'), ((2304, 2361), 'pyecvl.ecvl.DLDataset', 'ecvl.DLDataset', (['args.in_ds', 'args.batch_size', 'dataset_augs'], {}), '(args.in_ds, args.batch_size, dataset_augs)\n', (2318, 2361), True, 'import pyecvl.ecvl as ecvl\n'), ((2370, 2428), 'pyeddl.tensor.Tensor', 'Tensor', (['[args.batch_size, d.n_channels_, size[0], size[1]]'], {}), '([args.batch_size, d.n_channels_, size[0], size[1]])\n', (2376, 2428), False, 'from pyeddl.tensor import Tensor\n'), ((2784, 2822), 'pyeddl.eddl.getMetric', 'eddl.getMetric', (['"""categorical_accuracy"""'], {}), "('categorical_accuracy')\n", (2798, 2822), True, 'import pyeddl.eddl as eddl\n'), ((6124, 6168), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__'}), '(description=__doc__)\n', (6147, 6168), False, 'import argparse\n'), ((242, 266), 'pyeddl.eddl.Conv', 'eddl.Conv', (['x', '(64)', '[3, 3]'], {}), '(x, 64, [3, 3])\n', (251, 266), True, 'import pyeddl.eddl as eddl\n'), ((360, 385), 'pyeddl.eddl.Conv', 'eddl.Conv', (['x', '(128)', '[3, 3]'], {}), '(x, 128, [3, 3])\n', (369, 385), True, 'import pyeddl.eddl as eddl\n'), ((480, 505), 'pyeddl.eddl.Conv', 'eddl.Conv', (['x', '(256)', '[3, 3]'], {}), '(x, 256, [3, 3])\n', (489, 505), True, 'import pyeddl.eddl as eddl\n'), ((525, 550), 'pyeddl.eddl.Conv', 'eddl.Conv', (['x', '(256)', '[3, 3]'], {}), '(x, 256, [3, 3])\n', (534, 550), True, 'import pyeddl.eddl as eddl\n'), ((645, 670), 'pyeddl.eddl.Conv', 'eddl.Conv', (['x', '(512)', '[3, 3]'], {}), '(x, 512, [3, 3])\n', (654, 670), True, 'import pyeddl.eddl as eddl\n'), ((690, 715), 'pyeddl.eddl.Conv', 'eddl.Conv', (['x', '(512)', '[3, 3]'], {}), '(x, 512, [3, 3])\n', (699, 715), True, 'import pyeddl.eddl as eddl\n'), ((810, 835), 'pyeddl.eddl.Conv', 'eddl.Conv', (['x', '(512)', '[3, 3]'], {}), '(x, 512, [3, 3])\n', (819, 835), True, 'import pyeddl.eddl as eddl\n'), ((855, 880), 'pyeddl.eddl.Conv', 'eddl.Conv', (['x', '(512)', '[3, 3]'], {}), '(x, 512, [3, 3])\n', (864, 880), True, 'import pyeddl.eddl as eddl\n'), ((1005, 1023), 'pyeddl.eddl.Dense', 'eddl.Dense', (['x', '(256)'], {}), '(x, 256)\n', (1015, 1023), True, 'import pyeddl.eddl as eddl\n'), ((1046, 1072), 'pyeddl.eddl.Dense', 'eddl.Dense', (['x', 'num_classes'], {}), '(x, num_classes)\n', (1056, 1072), True, 'import pyeddl.eddl as eddl\n'), ((1331, 1350), 'pyeddl.eddl.rmsprop', 'eddl.rmsprop', (['(1e-06)'], {}), '(1e-06)\n', (1343, 1350), True, 'import pyeddl.eddl as eddl\n'), ((3307, 3327), 'pyeddl.eddl.reset_loss', 'eddl.reset_loss', (['net'], {}), '(net)\n', (3322, 3327), True, 'import pyeddl.eddl as eddl\n'), ((3387, 3404), 'random.shuffle', 'random.shuffle', (['s'], {}), '(s)\n', (3401, 3404), False, 'import random\n'), ((4147, 4201), 'pyeddl.eddl.save', 'eddl.save', (['net', "('promort_checkpoint_%s.bin' % e)", '"""bin"""'], {}), "(net, 'promort_checkpoint_%s.bin' % e, 'bin')\n", (4156, 4201), True, 'import pyeddl.eddl as eddl\n'), ((299, 323), 'pyeddl.eddl.Conv', 'eddl.Conv', (['x', '(64)', '[3, 3]'], {}), '(x, 64, [3, 3])\n', (308, 323), True, 'import pyeddl.eddl as eddl\n'), ((418, 443), 'pyeddl.eddl.Conv', 'eddl.Conv', (['x', '(128)', '[3, 3]'], {}), '(x, 128, [3, 3])\n', (427, 443), True, 'import pyeddl.eddl as eddl\n'), ((583, 608), 'pyeddl.eddl.Conv', 'eddl.Conv', (['x', '(256)', '[3, 3]'], {}), '(x, 256, [3, 3])\n', (592, 608), True, 'import pyeddl.eddl as eddl\n'), ((748, 773), 'pyeddl.eddl.Conv', 'eddl.Conv', (['x', '(512)', '[3, 3]'], {}), '(x, 512, [3, 3])\n', (757, 773), True, 'import pyeddl.eddl as eddl\n'), ((913, 938), 'pyeddl.eddl.Conv', 'eddl.Conv', (['x', '(512)', '[3, 3]'], {}), '(x, 512, [3, 3])\n', (922, 938), True, 'import pyeddl.eddl as eddl\n'), ((1456, 1487), 'pyeddl.eddl.CS_GPU', 'eddl.CS_GPU', (['[1]'], {'mem': '"""low_mem"""'}), "([1], mem='low_mem')\n", (1467, 1487), True, 'import pyeddl.eddl as eddl\n'), ((1505, 1518), 'pyeddl.eddl.CS_CPU', 'eddl.CS_CPU', ([], {}), '()\n', (1516, 1518), True, 'import pyeddl.eddl as eddl\n'), ((1685, 1708), 'pyecvl.ecvl.AugResizeDim', 'ecvl.AugResizeDim', (['size'], {}), '(size)\n', (1702, 1708), True, 'import pyecvl.ecvl as ecvl\n'), ((2077, 2100), 'pyecvl.ecvl.AugResizeDim', 'ecvl.AugResizeDim', (['size'], {}), '(size)\n', (2094, 2100), True, 'import pyecvl.ecvl as ecvl\n'), ((3861, 3899), 'pyeddl.eddl.train_batch', 'eddl.train_batch', (['net', 'tx', 'ty', 'indices'], {}), '(net, tx, ty, indices)\n', (3877, 3899), True, 'import pyeddl.eddl as eddl\n'), ((4653, 4675), 'pyeddl.eddl.forward', 'eddl.forward', (['net', '[x]'], {}), '(net, [x])\n', (4665, 4675), True, 'import pyeddl.eddl as eddl\n'), ((4697, 4716), 'pyeddl.eddl.getOutput', 'eddl.getOutput', (['out'], {}), '(out)\n', (4711, 4716), True, 'import pyeddl.eddl as eddl\n'), ((5344, 5375), 'pyecvl.ecvl.TensorToView', 'ecvl.TensorToView', (['single_image'], {}), '(single_image)\n', (5361, 5375), True, 'import pyecvl.ecvl as ecvl\n'), ((5848, 5877), 'pyecvl.ecvl.ImWrite', 'ecvl.ImWrite', (['cur_path', 'img_t'], {}), '(cur_path, img_t)\n', (5860, 5877), True, 'import pyecvl.ecvl as ecvl\n')]
from hawc_hal.maptree.map_tree import map_tree_factory from hawc_hal.response import hawc_response_factory import os from conftest import check_map_trees, check_responses def test_root_to_hdf_response(response): r = hawc_response_factory(response) test_filename = "response.hd5" # Make sure it doesn't exist yet, if it does,remove it if os.path.exists(test_filename): os.remove(test_filename) r.write(test_filename) # Try to open and use it r2 = hawc_response_factory(test_filename) check_responses(r, r2) os.remove(test_filename) def do_one_test_maptree(geminga_roi, geminga_maptree, fullsky=False): # Test both with a defined ROI and full sky (ROI is None) if fullsky: roi_ = None else: roi_ = geminga_roi m = map_tree_factory(geminga_maptree, roi_) test_filename = "maptree.hd5" # Make sure it doesn't exist yet, if it does,remove it if os.path.exists(test_filename): os.remove(test_filename) m.write(test_filename) # Try to open and use it m2 = map_tree_factory(test_filename, roi_) check_map_trees(m, m2) os.remove(test_filename) def test_root_to_hdf_maptree_roi(geminga_roi, geminga_maptree): do_one_test_maptree(geminga_roi, geminga_maptree, fullsky=False) def test_root_to_hdf_maptree_full_sky(geminga_roi, geminga_maptree): do_one_test_maptree(geminga_roi, geminga_maptree, fullsky=True)
[ "conftest.check_responses", "os.path.exists", "conftest.check_map_trees", "hawc_hal.response.hawc_response_factory", "hawc_hal.maptree.map_tree.map_tree_factory", "os.remove" ]
[((223, 254), 'hawc_hal.response.hawc_response_factory', 'hawc_response_factory', (['response'], {}), '(response)\n', (244, 254), False, 'from hawc_hal.response import hawc_response_factory\n'), ((358, 387), 'os.path.exists', 'os.path.exists', (['test_filename'], {}), '(test_filename)\n', (372, 387), False, 'import os\n'), ((489, 525), 'hawc_hal.response.hawc_response_factory', 'hawc_response_factory', (['test_filename'], {}), '(test_filename)\n', (510, 525), False, 'from hawc_hal.response import hawc_response_factory\n'), ((531, 553), 'conftest.check_responses', 'check_responses', (['r', 'r2'], {}), '(r, r2)\n', (546, 553), False, 'from conftest import check_map_trees, check_responses\n'), ((559, 583), 'os.remove', 'os.remove', (['test_filename'], {}), '(test_filename)\n', (568, 583), False, 'import os\n'), ((851, 890), 'hawc_hal.maptree.map_tree.map_tree_factory', 'map_tree_factory', (['geminga_maptree', 'roi_'], {}), '(geminga_maptree, roi_)\n', (867, 890), False, 'from hawc_hal.maptree.map_tree import map_tree_factory\n'), ((993, 1022), 'os.path.exists', 'os.path.exists', (['test_filename'], {}), '(test_filename)\n', (1007, 1022), False, 'import os\n'), ((1124, 1161), 'hawc_hal.maptree.map_tree.map_tree_factory', 'map_tree_factory', (['test_filename', 'roi_'], {}), '(test_filename, roi_)\n', (1140, 1161), False, 'from hawc_hal.maptree.map_tree import map_tree_factory\n'), ((1167, 1189), 'conftest.check_map_trees', 'check_map_trees', (['m', 'm2'], {}), '(m, m2)\n', (1182, 1189), False, 'from conftest import check_map_trees, check_responses\n'), ((1195, 1219), 'os.remove', 'os.remove', (['test_filename'], {}), '(test_filename)\n', (1204, 1219), False, 'import os\n'), ((397, 421), 'os.remove', 'os.remove', (['test_filename'], {}), '(test_filename)\n', (406, 421), False, 'import os\n'), ((1032, 1056), 'os.remove', 'os.remove', (['test_filename'], {}), '(test_filename)\n', (1041, 1056), False, 'import os\n')]
from __future__ import absolute_import from io import BytesIO import zstd from .base import BaseCompressor, BaseDecompressor from ..protocol import CompressionMethod, CompressionMethodByte from ..reader import read_binary_uint32 from ..writer import write_binary_uint32, write_binary_uint8 class Compressor(BaseCompressor): method = CompressionMethod.ZSTD method_byte = CompressionMethodByte.ZSTD def get_compressed_data(self, extra_header_size): rv = BytesIO() data = self.get_value() compressed = zstd.compress(data) header_size = extra_header_size + 4 + 4 # sizes write_binary_uint32(header_size + len(compressed), rv) write_binary_uint32(len(data), rv) rv.write(compressed) return rv.getvalue() class Decompressor(BaseDecompressor): method = CompressionMethod.ZSTD method_byte = CompressionMethodByte.ZSTD def get_decompressed_data(self, method_byte, compressed_hash, extra_header_size): size_with_header = read_binary_uint32(self.stream) compressed_size = size_with_header - extra_header_size - 4 compressed = BytesIO(self.stream.read(compressed_size)) block_check = BytesIO() write_binary_uint8(method_byte, block_check) write_binary_uint32(size_with_header, block_check) block_check.write(compressed.getvalue()) self.check_hash(block_check.getvalue(), compressed_hash) compressed = compressed.read(compressed_size - 4) return zstd.decompress(compressed)
[ "zstd.compress", "io.BytesIO", "zstd.decompress" ]
[((477, 486), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (484, 486), False, 'from io import BytesIO\n'), ((541, 560), 'zstd.compress', 'zstd.compress', (['data'], {}), '(data)\n', (554, 560), False, 'import zstd\n'), ((1237, 1246), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (1244, 1246), False, 'from io import BytesIO\n'), ((1549, 1576), 'zstd.decompress', 'zstd.decompress', (['compressed'], {}), '(compressed)\n', (1564, 1576), False, 'import zstd\n')]
import math def main(): print(""" \tComsole by MumuNiMochii version beta 1.6.23 \t\"Originally made with C\" \tMAIN MENU \tWhat do you want to execute and evaluate? \t1.) Add two addends \t2.) Subtract a minuend from its subtrahend \t3.) Multiply a multiplicand to its multiplier \t4.) Divide a dividend to its divisor \t5.) Raise to power a base number \t6.) Get the square root of a number \t7.) Compare two numbers \t8.) Compare three numbers \t9.) Auto-summation up to inputted value \t10.) Auto-factorial up to inputted value \t0.) Exit """) opt = int(input("\t\tEnter the number of your choice: ")) if opt == 1: def add(): print("\n\tADD VALUES") x = float(input("\t1.) Enter a first value: ")) y = float(input("\t2.) Enter an second value: ")) print("\t3.) The number " + str(x) + " is added by " + str(y) + ", and is equals to " + str(float(x + y))) add() elif opt == 2: def sub(): print("\n\tSUBTRACT VALUES") x = float(input("\t1.) Enter a first value: ")) y = float(input("\t2.) Enter an second value: ")) print("\t3.) The number " + str(x) + " is subtracted by " + str(y) + ", and is equals to " + str(float(x-y))) sub() elif opt == 3: def mul(): print("\n\tMULTIPLY VALUES") x = float(input("\t1.) Enter a first value: ")) y = float(input("\t2.) Enter an second value: ")) print("\t3.) The number "+str(x)+" is multiplied by "+str(y)+", and is equals to "+str(float(x*y))) mul() elif opt == 4: def div(): print("\n\tDIVIDE VALUES") x = float(input("\t1.) Enter a first value: ")) y = float(input("\t2.) Enter an second value: ")) print("\t3.) The number "+str(x)+" is divided by "+str(y)+", and is equals to "+str(float(x/y))) div() elif opt == 5: def pow(): print("\n\tPOWERED VALUE") x = float(input("\t1.) Enter a base value: ")) y = int(input("\t2.) Enter an exponent value: ")) print("\t3.) The number "+str(x)+" is raised to "+str(y)+", and is equals to "+str(math.pow(x, y))+".") pow() elif opt == 6: def sqrt(): print("\n\tRADICAL VALUE") x = float(input("\t1.) Enter a value: ")) y = math.sqrt(x) print("\t2.) The number is "+str(int(x))+" and its square root is: "+str(y)+".") sqrt() elif opt == 7: def comp2(): print("\n\tCOMPARE TWO VALUES") x = int(input("\t1.) Enter a first value: ")) y = int(input("\t2.) Enter a second value: ")) msg = "\t3.) Your numbers are "+str(x)+", and "+str(y)+", where " if x > y: print(msg + str(x) + " is greater than " + str(y)+".") else: print(msg + str(y) + " is greater than " + str(x)+".") comp2() elif opt == 8: def comp3(): print("\n\tCOMPARE THREE VALUES") x = int(input("\t1.) Enter a first value: ")) y = int(input("\t2.) Enter a second value: ")) z = int(input("\t3.) Enter a third value: ")) msg = "\t4.) Your numbers are "+str(x)+", "+str(y)+", and "+str(z)+", where " if x > y and x > z: print(msg+str(x)+" is greater than the values "+str(y)+" and "+str(z)+".") elif y > x and y > z: print(msg+str(y)+" is greater than the values "+str(x)+" and "+str(z)+".") else: print(msg+str(z)+" is greater than the values "+str(x)+" and "+str(y)+".") comp3() elif opt == 9: def summ(): print("\n\tSUMMATION UP TO INPUT VALUE") x = int(input("\t1.) Count up to inputted number: ")) a = list(range(0, x)) a.append(x) print("\t2.) Summation of numbers: " + str(a)) b = [] b.extend(a) total = 0 for i in b: total += i print("\t3.) Sum: " + str(total)) summ() elif opt == 10: def fact(): print("\n\tFACTORIAL INPUT VALUE") x = int(input("\t1.) Factorial the inputted number: ")) a = list(range(1, x)) a.append(x) print("\t2.) List of factorials: "+str(a)) b = [] b.extend(a) total = 1 for i in b: total *= i print("\t3.) Product: "+str(total)) fact() else: print("Invalid input.") main()
[ "math.pow", "math.sqrt" ]
[((2408, 2420), 'math.sqrt', 'math.sqrt', (['x'], {}), '(x)\n', (2417, 2420), False, 'import math\n'), ((2225, 2239), 'math.pow', 'math.pow', (['x', 'y'], {}), '(x, y)\n', (2233, 2239), False, 'import math\n')]
#!/usr/bin/env python # -*- coding: utf-8 -*- import io import os import re from setuptools import setup, find_packages # classifiers = """\ # Development Status :: 4 - Beta # Programming Language :: Python # Programming Language :: Python :: 3 # Programming Language :: Python :: 3.4 # Programming Language :: Python :: 3.5 # Programming Language :: Python :: 3.6 # Programming Language :: Python :: 3.7 # Programming Language :: Python :: 3.8 # """ def _read(*parts, **kwargs): filepath = os.path.join(os.path.dirname(__file__), *parts) encoding = kwargs.pop('encoding', 'utf-8') with io.open(filepath, encoding=encoding) as fh: text = fh.read() return text def get_version(): version = re.search( r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', _read('bwtools', '__init__.py'), re.MULTILINE).group(1) return version def get_long_description(): return _read('README.md') def get_requirements(path): content = _read(path) return [ req for req in content.split("\n") if req != '' and not req.startswith('#') ] install_requires = get_requirements('requirements.txt') packages = find_packages() setup( name='bwtools', author='<NAME>', author_email='<EMAIL>', version=get_version(), license='MIT', description='tools for bigwigs', long_description=get_long_description(), long_description_content_type='text/markdown', keywords=['genomics', 'bioinformatics', 'Hi-C', 'analysis', 'cooler'], url='https://github.com/gspracklin/bwtools', zip_safe=False, # classifiers=[s.strip() for s in classifiers.split('\n') if s], packages=packages, install_requires=install_requires, entry_points={ 'console_scripts': [ 'bwtools = bwtools.cli:cli', ] } )
[ "os.path.dirname", "setuptools.find_packages", "io.open" ]
[((1213, 1228), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (1226, 1228), False, 'from setuptools import setup, find_packages\n'), ((544, 569), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (559, 569), False, 'import os\n'), ((635, 671), 'io.open', 'io.open', (['filepath'], {'encoding': 'encoding'}), '(filepath, encoding=encoding)\n', (642, 671), False, 'import io\n')]
from .trainer.models import MultiTaskTagger from .trainer.utils import load_dictionaries,Config from .trainer.tasks.multitask_tagging import MultiTaskTaggingModule from fairseq.data.data_utils import collate_tokens from attacut import tokenize class HoogBERTaEncoder(object): def __init__(self,layer=12,cuda=False,base_path="."): args = Config(base_path=base_path) self.base_path = base_path self.pos_dict, self.ne_dict, self.sent_dict = load_dictionaries(self.base_path) self.model = MultiTaskTagger(args,[len(self.pos_dict), len(self.ne_dict), len(self.sent_dict)]) if cuda == True: self.model = self.model.cuda() def extract_features(self,sentence): all_sent = [] sentences = sentence.split(" ") for sent in sentences: all_sent.append(" ".join(tokenize(sent)).replace("_","[!und:]")) sentence = " _ ".join(all_sent) tokens = self.model.bert.encode(sentence).unsqueeze(0) all_layers = self.model.bert.extract_features(tokens, return_all_hiddens=True) return tokens[0], all_layers[-1][0] def extract_features_batch(self,sentenceL): inputList = [] for sentX in sentenceL: sentences = sentX.split(" ") all_sent = [] for sent in sentences: all_sent.append(" ".join(tokenize(sent)).replace("_","[!und:]")) sentence = " _ ".join(all_sent) inputList.append(sentence) batch = collate_tokens([self.model.bert.encode(sent) for sent in inputList], pad_idx=1) #tokens = self.model.bert.encode(inputList) return self.extract_features_from_tensor(batch) def extract_features_from_tensor(self,batch): all_layers = self.model.bert.extract_features(batch, return_all_hiddens=True) return batch, all_layers[-1] def extract_features2(self,sentence): # all_sent = [] # sentences = sentence.split(" ") # for sent in sentences: # all_sent.append(" ".join(tokenize(sent)).replace("_","[!und:]")) # sentence = " _ ".join(all_sent) tokens = self.model.bert.encode(sentence).unsqueeze(0) all_layers = self.model.bert.extract_features(tokens, return_all_hiddens=True) return tokens[0], all_layers[-1][0] def extract_features_batch2(self,sentenceL): # inputList = [] # for sentX in sentenceL: # sentences = sentX.split(" ") # all_sent = [] # for sent in sentences: # all_sent.append(" ".join(tokenize(sent)).replace("_","[!und:]")) # sentence = " _ ".join(all_sent) # inputList.append(sentence) batch = collate_tokens([self.model.bert.encode(sent) for sent in sentenceL], pad_idx=1) #tokens = self.model.bert.encode(inputList) return self.extract_features_from_tensor(batch)
[ "attacut.tokenize" ]
[((856, 870), 'attacut.tokenize', 'tokenize', (['sent'], {}), '(sent)\n', (864, 870), False, 'from attacut import tokenize\n'), ((1404, 1418), 'attacut.tokenize', 'tokenize', (['sent'], {}), '(sent)\n', (1412, 1418), False, 'from attacut import tokenize\n')]
from pyconductor import load_test_values, calculate_conductance def conductance_calc(): preloaded_dict = load_test_values() while preloaded_dict: print( "[1] - Show currently available materials in Material Dictionary\n" "[2] - Add a material (will not be saved upon restart)\n" "[3] - Quit\n" "To test the conductive properties of a material, simply type in its name.\n" "Otherwise, type the corresponding number for an option above.\n" ) main_prompt = input(">>> ").lower() if main_prompt == "1": print(f"\nCurrently contains the following materials:\n{preloaded_dict.keys()}\n") elif main_prompt == "2": preloaded_dict.addmat() elif main_prompt == "3": quit() else: try: calculate_conductance(preloaded_dict[main_prompt]) while True: again_prompt = input( "Would you like to try another calculation? [Y]es or [N]o: ").lower() if again_prompt in ("y", "yes"): break elif again_prompt in ("n", "no"): print("\nGoodbye!\n") quit() except KeyError: if main_prompt == "": print("\nNo material specified.\nPlease enter a valid material name " "listed in option [1], or use option [2] to add your own.\n") else: # TODO: add logic handling whether user wants to add missing material print(f"\n{main_prompt} is not a valid material or command!\n") else: pass if __name__ == "__main__": conductance_calc()
[ "pyconductor.load_test_values", "pyconductor.calculate_conductance" ]
[((111, 129), 'pyconductor.load_test_values', 'load_test_values', ([], {}), '()\n', (127, 129), False, 'from pyconductor import load_test_values, calculate_conductance\n'), ((864, 914), 'pyconductor.calculate_conductance', 'calculate_conductance', (['preloaded_dict[main_prompt]'], {}), '(preloaded_dict[main_prompt])\n', (885, 914), False, 'from pyconductor import load_test_values, calculate_conductance\n')]
from fuzzconfig import FuzzConfig import nonrouting import fuzzloops import re cfgs = [ FuzzConfig(job="SYSCONFIG40", device="LIFCL-40", sv="../shared/empty_40.v", tiles=["CIB_R0C75:EFB_0", "CIB_R0C72:BANKREF0", "CIB_R0C77:EFB_1_OSC", "CIB_R0C79:EFB_2", "CIB_R0C81:I2C_EFB_3", "CIB_R0C85:PMU", "CIB_R0C87:MIB_CNR_32_FAFD", "CIB_R1C87:IREF_P33", "CIB_R2C87:POR"]), FuzzConfig(job="SYSCONFIG17", device="LIFCL-17", sv="../shared/empty_17.v", tiles=["CIB_R1C75:IREF_15K", "CIB_R0C75:PPT_QOUT_15K", "CIB_R0C74:PVTCAL33_15K", "CIB_R0C73:POR_15K", "CIB_R0C72:I2C_15K", "CIB_R0C71:OSC_15K", "CIB_R0C70:PMU_15K", "CIB_R0C66:EFB_15K"]) ] def main(): for cfg in cfgs: cfg.setup() empty = cfg.build_design(cfg.sv, {}) cfg.sv = "../shared/empty_presyn_40.v" cfg.struct_mode = False def get_substs(k, v): return dict(sysconfig="{}={}".format(k, v)) nonrouting.fuzz_enum_setting(cfg, empty, "SYSCONFIG.MASTER_SPI_PORT", ["DISABLE", "SERIAL", "DUAL", "QUAD"], lambda x: get_substs("MASTER_SPI_PORT", x), False, assume_zero_base=True, desc="status of master SPI port after configuration") nonrouting.fuzz_enum_setting(cfg, empty, "SYSCONFIG.SLAVE_SPI_PORT", ["DISABLE", "SERIAL", "DUAL", "QUAD"], lambda x: get_substs("SLAVE_SPI_PORT", x), False, assume_zero_base=True, desc="status of slave SPI port after configuration") nonrouting.fuzz_enum_setting(cfg, empty, "SYSCONFIG.SLAVE_I2C_PORT", ["DISABLE", "ENABLE"], lambda x: get_substs("SLAVE_I2C_PORT", x), False, assume_zero_base=True, desc="status of slave I2C port after configuration") nonrouting.fuzz_enum_setting(cfg, empty, "SYSCONFIG.SLAVE_I3C_PORT", ["DISABLE", "ENABLE"], lambda x: get_substs("SLAVE_I3C_PORT", x), False, assume_zero_base=True, desc="status of slave I3C port after configuration") nonrouting.fuzz_enum_setting(cfg, empty, "SYSCONFIG.JTAG_PORT", ["DISABLE", "ENABLE"], lambda x: get_substs("JTAG_PORT", x), False, assume_zero_base=True, desc="status of JTAG port after configuration") nonrouting.fuzz_enum_setting(cfg, empty, "SYSCONFIG.DONE_PORT", ["DISABLE", "ENABLE"], lambda x: get_substs("DONE_PORT", x), False, assume_zero_base=True, desc="use DONE output after configuration") nonrouting.fuzz_enum_setting(cfg, empty, "SYSCONFIG.INITN_PORT", ["DISABLE", "ENABLE"], lambda x: get_substs("INITN_PORT", x), False, assume_zero_base=True, desc="use INITN input after configuration") nonrouting.fuzz_enum_setting(cfg, empty, "SYSCONFIG.PROGRAMN_PORT", ["DISABLE", "ENABLE"], lambda x: get_substs("PROGRAMN_PORT", x), False, assume_zero_base=True, desc="use PROGRAMN input after configuration") if __name__ == "__main__": main()
[ "fuzzconfig.FuzzConfig" ]
[((93, 379), 'fuzzconfig.FuzzConfig', 'FuzzConfig', ([], {'job': '"""SYSCONFIG40"""', 'device': '"""LIFCL-40"""', 'sv': '"""../shared/empty_40.v"""', 'tiles': "['CIB_R0C75:EFB_0', 'CIB_R0C72:BANKREF0', 'CIB_R0C77:EFB_1_OSC',\n 'CIB_R0C79:EFB_2', 'CIB_R0C81:I2C_EFB_3', 'CIB_R0C85:PMU',\n 'CIB_R0C87:MIB_CNR_32_FAFD', 'CIB_R1C87:IREF_P33', 'CIB_R2C87:POR']"}), "(job='SYSCONFIG40', device='LIFCL-40', sv='../shared/empty_40.v',\n tiles=['CIB_R0C75:EFB_0', 'CIB_R0C72:BANKREF0', 'CIB_R0C77:EFB_1_OSC',\n 'CIB_R0C79:EFB_2', 'CIB_R0C81:I2C_EFB_3', 'CIB_R0C85:PMU',\n 'CIB_R0C87:MIB_CNR_32_FAFD', 'CIB_R1C87:IREF_P33', 'CIB_R2C87:POR'])\n", (103, 379), False, 'from fuzzconfig import FuzzConfig\n'), ((389, 663), 'fuzzconfig.FuzzConfig', 'FuzzConfig', ([], {'job': '"""SYSCONFIG17"""', 'device': '"""LIFCL-17"""', 'sv': '"""../shared/empty_17.v"""', 'tiles': "['CIB_R1C75:IREF_15K', 'CIB_R0C75:PPT_QOUT_15K', 'CIB_R0C74:PVTCAL33_15K',\n 'CIB_R0C73:POR_15K', 'CIB_R0C72:I2C_15K', 'CIB_R0C71:OSC_15K',\n 'CIB_R0C70:PMU_15K', 'CIB_R0C66:EFB_15K']"}), "(job='SYSCONFIG17', device='LIFCL-17', sv='../shared/empty_17.v',\n tiles=['CIB_R1C75:IREF_15K', 'CIB_R0C75:PPT_QOUT_15K',\n 'CIB_R0C74:PVTCAL33_15K', 'CIB_R0C73:POR_15K', 'CIB_R0C72:I2C_15K',\n 'CIB_R0C71:OSC_15K', 'CIB_R0C70:PMU_15K', 'CIB_R0C66:EFB_15K'])\n", (399, 663), False, 'from fuzzconfig import FuzzConfig\n')]
from uuid import UUID import os import pytest from notifications_utils.base64_uuid import base64_to_uuid, uuid_to_base64, base64_to_bytes, bytes_to_base64 def test_bytes_to_base64_to_bytes(): b = os.urandom(32) b64 = bytes_to_base64(b) assert base64_to_bytes(b64) == b @pytest.mark.parametrize( "url_val", [ "AAAAAAAAAAAAAAAAAAAAAQ", "AAAAAAAAAAAAAAAAAAAAAQ=", # even though this has invalid padding we put extra =s on the end so this is okay "AAAAAAAAAAAAAAAAAAAAAQ==", ], ) def test_base64_converter_to_python(url_val): assert base64_to_uuid(url_val) == UUID(int=1) @pytest.mark.parametrize("python_val", [UUID(int=1), "00000000-0000-0000-0000-000000000001"]) def test_base64_converter_to_url(python_val): assert uuid_to_base64(python_val) == "AAAAAAAAAAAAAAAAAAAAAQ" @pytest.mark.parametrize( "url_val", [ "this_is_valid_base64_but_is_too_long_to_be_a_uuid", "this_one_has_emoji_➕➕➕", ], ) def test_base64_converter_to_python_raises_validation_error(url_val): with pytest.raises(Exception): base64_to_uuid(url_val) def test_base64_converter_to_url_raises_validation_error(): with pytest.raises(Exception): uuid_to_base64(object())
[ "notifications_utils.base64_uuid.base64_to_uuid", "uuid.UUID", "os.urandom", "pytest.mark.parametrize", "notifications_utils.base64_uuid.bytes_to_base64", "pytest.raises", "notifications_utils.base64_uuid.base64_to_bytes", "notifications_utils.base64_uuid.uuid_to_base64" ]
[((287, 408), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""url_val"""', "['AAAAAAAAAAAAAAAAAAAAAQ', 'AAAAAAAAAAAAAAAAAAAAAQ=',\n 'AAAAAAAAAAAAAAAAAAAAAQ==']"], {}), "('url_val', ['AAAAAAAAAAAAAAAAAAAAAQ',\n 'AAAAAAAAAAAAAAAAAAAAAQ=', 'AAAAAAAAAAAAAAAAAAAAAQ=='])\n", (310, 408), False, 'import pytest\n'), ((837, 961), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""url_val"""', "['this_is_valid_base64_but_is_too_long_to_be_a_uuid', 'this_one_has_emoji_➕➕➕']"], {}), "('url_val', [\n 'this_is_valid_base64_but_is_too_long_to_be_a_uuid',\n 'this_one_has_emoji_➕➕➕'])\n", (860, 961), False, 'import pytest\n'), ((203, 217), 'os.urandom', 'os.urandom', (['(32)'], {}), '(32)\n', (213, 217), False, 'import os\n'), ((228, 246), 'notifications_utils.base64_uuid.bytes_to_base64', 'bytes_to_base64', (['b'], {}), '(b)\n', (243, 246), False, 'from notifications_utils.base64_uuid import base64_to_uuid, uuid_to_base64, base64_to_bytes, bytes_to_base64\n'), ((258, 278), 'notifications_utils.base64_uuid.base64_to_bytes', 'base64_to_bytes', (['b64'], {}), '(b64)\n', (273, 278), False, 'from notifications_utils.base64_uuid import base64_to_uuid, uuid_to_base64, base64_to_bytes, bytes_to_base64\n'), ((587, 610), 'notifications_utils.base64_uuid.base64_to_uuid', 'base64_to_uuid', (['url_val'], {}), '(url_val)\n', (601, 610), False, 'from notifications_utils.base64_uuid import base64_to_uuid, uuid_to_base64, base64_to_bytes, bytes_to_base64\n'), ((614, 625), 'uuid.UUID', 'UUID', ([], {'int': '(1)'}), '(int=1)\n', (618, 625), False, 'from uuid import UUID\n'), ((779, 805), 'notifications_utils.base64_uuid.uuid_to_base64', 'uuid_to_base64', (['python_val'], {}), '(python_val)\n', (793, 805), False, 'from notifications_utils.base64_uuid import base64_to_uuid, uuid_to_base64, base64_to_bytes, bytes_to_base64\n'), ((668, 679), 'uuid.UUID', 'UUID', ([], {'int': '(1)'}), '(int=1)\n', (672, 679), False, 'from uuid import UUID\n'), ((1066, 1090), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (1079, 1090), False, 'import pytest\n'), ((1100, 1123), 'notifications_utils.base64_uuid.base64_to_uuid', 'base64_to_uuid', (['url_val'], {}), '(url_val)\n', (1114, 1123), False, 'from notifications_utils.base64_uuid import base64_to_uuid, uuid_to_base64, base64_to_bytes, bytes_to_base64\n'), ((1195, 1219), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (1208, 1219), False, 'import pytest\n')]
"""The devolo_home_control integration.""" from __future__ import annotations import asyncio from functools import partial from types import MappingProxyType from typing import Any from devolo_home_control_api.exceptions.gateway import GatewayOfflineError from devolo_home_control_api.homecontrol import HomeControl from devolo_home_control_api.mydevolo import Mydevolo from homeassistant.components import zeroconf from homeassistant.config_entries import ConfigEntry from homeassistant.const import CONF_PASSWORD, CONF_USERNAME, EVENT_HOMEASSISTANT_STOP from homeassistant.core import Event, HomeAssistant from homeassistant.exceptions import ConfigEntryAuthFailed, ConfigEntryNotReady from .const import ( CONF_MYDEVOLO, DEFAULT_MYDEVOLO, DOMAIN, GATEWAY_SERIAL_PATTERN, PLATFORMS, ) async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: """Set up the devolo account from a config entry.""" hass.data.setdefault(DOMAIN, {}) mydevolo = configure_mydevolo(entry.data) credentials_valid = await hass.async_add_executor_job(mydevolo.credentials_valid) if not credentials_valid: raise ConfigEntryAuthFailed if await hass.async_add_executor_job(mydevolo.maintenance): raise ConfigEntryNotReady gateway_ids = await hass.async_add_executor_job(mydevolo.get_gateway_ids) if entry.unique_id and GATEWAY_SERIAL_PATTERN.match(entry.unique_id): uuid = await hass.async_add_executor_job(mydevolo.uuid) hass.config_entries.async_update_entry(entry, unique_id=uuid) try: zeroconf_instance = await zeroconf.async_get_instance(hass) hass.data[DOMAIN][entry.entry_id] = {"gateways": [], "listener": None} for gateway_id in gateway_ids: hass.data[DOMAIN][entry.entry_id]["gateways"].append( await hass.async_add_executor_job( partial( HomeControl, gateway_id=gateway_id, mydevolo_instance=mydevolo, zeroconf_instance=zeroconf_instance, ) ) ) except GatewayOfflineError as err: raise ConfigEntryNotReady from err hass.config_entries.async_setup_platforms(entry, PLATFORMS) def shutdown(event: Event) -> None: for gateway in hass.data[DOMAIN][entry.entry_id]["gateways"]: gateway.websocket_disconnect( f"websocket disconnect requested by {EVENT_HOMEASSISTANT_STOP}" ) # Listen when EVENT_HOMEASSISTANT_STOP is fired hass.data[DOMAIN][entry.entry_id]["listener"] = hass.bus.async_listen_once( EVENT_HOMEASSISTANT_STOP, shutdown ) return True async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: """Unload a config entry.""" unload = await hass.config_entries.async_unload_platforms(entry, PLATFORMS) await asyncio.gather( *[ hass.async_add_executor_job(gateway.websocket_disconnect) for gateway in hass.data[DOMAIN][entry.entry_id]["gateways"] ] ) hass.data[DOMAIN][entry.entry_id]["listener"]() hass.data[DOMAIN].pop(entry.entry_id) return unload def configure_mydevolo(conf: dict[str, Any] | MappingProxyType[str, Any]) -> Mydevolo: """Configure mydevolo.""" mydevolo = Mydevolo() mydevolo.user = conf[CONF_USERNAME] mydevolo.password = conf[CONF_PASSWORD] mydevolo.url = conf.get(CONF_MYDEVOLO, DEFAULT_MYDEVOLO) return mydevolo
[ "homeassistant.components.zeroconf.async_get_instance", "functools.partial", "devolo_home_control_api.mydevolo.Mydevolo" ]
[((3395, 3405), 'devolo_home_control_api.mydevolo.Mydevolo', 'Mydevolo', ([], {}), '()\n', (3403, 3405), False, 'from devolo_home_control_api.mydevolo import Mydevolo\n'), ((1617, 1650), 'homeassistant.components.zeroconf.async_get_instance', 'zeroconf.async_get_instance', (['hass'], {}), '(hass)\n', (1644, 1650), False, 'from homeassistant.components import zeroconf\n'), ((1906, 2018), 'functools.partial', 'partial', (['HomeControl'], {'gateway_id': 'gateway_id', 'mydevolo_instance': 'mydevolo', 'zeroconf_instance': 'zeroconf_instance'}), '(HomeControl, gateway_id=gateway_id, mydevolo_instance=mydevolo,\n zeroconf_instance=zeroconf_instance)\n', (1913, 2018), False, 'from functools import partial\n')]
## -------------------------------------------------------- ## # Trab 1 IA 2019-2 # # <NAME> # # hillClimbing.py: implements the hill climbing metaheuristic for the bag problem # # Python version: 3.7.4 ## -------------------------------------------------------- ## import bagProblem as bp from time import time # Returns True and the valid state with the biggest value, or False if no state is valid: def select_Best(si, T, OBJs): sn = -1 # best state position sv = 0 # state value for i in range(len(si)): v = bp.state_Value(si[i], OBJs) # current value if bp.state_Verify(si[i], T, OBJs) and v > sv: sv = v sn = i if sn == -1: return False, [] return True, si[sn] # Hill Climbing: def hill_Climbing(T, OBJs, execTime, *args): sn = [0]*len(OBJs) # initial state c = True # continue flag start = time() while c: if time() - start > execTime: break cs = sn # storing current state c, sn = select_Best(bp.state_Expansion(cs), T, OBJs) return cs # T = 19 # bag size # OBJs = [(1,3), (4,6), (5,7)] # object list (v,t) # print(hill_Climbing(T,OBJs))
[ "bagProblem.state_Verify", "bagProblem.state_Value", "time.time", "bagProblem.state_Expansion" ]
[((888, 894), 'time.time', 'time', ([], {}), '()\n', (892, 894), False, 'from time import time\n'), ((542, 569), 'bagProblem.state_Value', 'bp.state_Value', (['si[i]', 'OBJs'], {}), '(si[i], OBJs)\n', (556, 569), True, 'import bagProblem as bp\n'), ((597, 628), 'bagProblem.state_Verify', 'bp.state_Verify', (['si[i]', 'T', 'OBJs'], {}), '(si[i], T, OBJs)\n', (612, 628), True, 'import bagProblem as bp\n'), ((1032, 1054), 'bagProblem.state_Expansion', 'bp.state_Expansion', (['cs'], {}), '(cs)\n', (1050, 1054), True, 'import bagProblem as bp\n'), ((919, 925), 'time.time', 'time', ([], {}), '()\n', (923, 925), False, 'from time import time\n')]
from bs4 import BeautifulSoup from optimizers.AdvancedJSOptimizer import AdvancedJSOptimizer from optimizers.CSSOptimizer import CSSOptimizer class HTMLParser(object): def __init__(self, html): self.soup = BeautifulSoup(html, 'lxml') def js_parser(self): for script in self.soup.find_all('script'): opt = AdvancedJSOptimizer() script.string = opt.process(script.string) def css_parser(self): for style in self.soup.find_all('style'): opt = CSSOptimizer() style.string = opt.process(style.string)
[ "bs4.BeautifulSoup", "optimizers.AdvancedJSOptimizer.AdvancedJSOptimizer", "optimizers.CSSOptimizer.CSSOptimizer" ]
[((220, 247), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""lxml"""'], {}), "(html, 'lxml')\n", (233, 247), False, 'from bs4 import BeautifulSoup\n'), ((344, 365), 'optimizers.AdvancedJSOptimizer.AdvancedJSOptimizer', 'AdvancedJSOptimizer', ([], {}), '()\n', (363, 365), False, 'from optimizers.AdvancedJSOptimizer import AdvancedJSOptimizer\n'), ((516, 530), 'optimizers.CSSOptimizer.CSSOptimizer', 'CSSOptimizer', ([], {}), '()\n', (528, 530), False, 'from optimizers.CSSOptimizer import CSSOptimizer\n')]
'''Provide interface for game.''' from typing import Any, Dict, List, Optional, Union import flask from flask import Blueprint, url_for from flask_login import current_user, login_required from flask_wtf import FlaskForm from flask_sse import sse from werkzeug.wrappers import Response from wtforms import IntegerField, SubmitField from wtforms.validators import DataRequired, NumberRange # from spades import exceptions from spades.game import GameState from spades.game.models.player import Player main = Blueprint('main', __name__) mock_names: List[str] = ['john'] __game: GameState = GameState() class LobbyForm(FlaskForm): start_game: SubmitField = SubmitField('start game') join_game: SubmitField = SubmitField('join game') class BidForm(FlaskForm): bid: IntegerField = IntegerField( 'bid', validators=[ DataRequired(), NumberRange(min=1, max=13) ] ) submit: SubmitField = SubmitField('bid') def get_player() -> Optional[Player]: player = __game.get_player_by_username(current_user.username) if not player: __game.add_player(Player(current_user.username)) player = __game.get_player_by_username(current_user.username) return player def get_turns(players: List[Player]) -> List[Dict[str, Any]]: player_turns: List[Dict[str, Any]] = [] def is_active(turn: int) -> str: if __game.state != 'playing': # type: ignore print('gamestate', False) return 'false' elif __game.current_turn != turn: print('turn:', __game.current_turn, turn) return 'false' else: print('active:', True) return 'true' for n, player in enumerate(players): inst = { 'username': player.username, 'active': is_active(n) } if player.username == current_user.username: inst['hand'] = player.hand.to_json # type: ignore else: inst['card_count'] = len(player.hand) # type: ignore player_turns.append(inst) print('player turns', player_turns) return player_turns @main.route('/') def index() -> str: '''Provide start page.''' return flask.render_template('index.html') @main.route('/lobby', methods=['GET', 'POST']) @login_required def lobby() -> Union[Response, str]: '''Provide lobby to coordinate new games.''' form = LobbyForm() if form.validate_on_submit(): if form.join_game.data: print('join game') if ( hasattr(__game, 'state') and __game.state == 'waiting' # type: ignore ): if not __game.get_player_by_username( current_user.username ): __game.add_player(Player(current_user.username)) if __game.check_player_count(): __game.start_game() # type: ignore return flask.redirect(url_for('main.gameboard')) # if games != []: # return flask.render_template( # 'lobby.html', form=form, games=mock_names # ) return flask.render_template('lobby.html', form=form) @main.route('/play', methods=['POST']) @login_required def play() -> None: '''Publish card play for user.''' username = flask.request.form['username'] rank = flask.request.form['rank'] suit = flask.request.form['suit'] card_played = {'username': username, 'rank': rank, 'suit': suit} # TODO: submit card to game print( 'turn', __game.state, # type: ignore __game.get_player_turn(username), __game.current_turn ) __game.make_play(__game.get_player_turn(username), rank, suit) sse.publish(card_played, type='play-card') @main.route('/bids', methods=['GET', 'POST']) @login_required def bids() -> Union[Response, str]: form = BidForm() if form.validate_on_submit(): player_bid = flask.request.form['bid'] __game.accept_bid( __game.get_player_turn(current_user.username), player_bid ) __game.start_turn() # type: ignore return flask.redirect(url_for('main.gameboard')) player = get_player() return flask.render_template( 'bid.html', form=form, data=player.hand.to_json # type: ignore ) @main.route('/gameboard') @login_required def gameboard() -> Union[Response, str]: '''Provide gameboard.''' # Setup mock players - less than four fail for player_name in mock_names: if not __game.get_player_by_username(player_name): __game.add_player(Player(player_name)) # mock end players = [] player = get_player() if __game.check_player_count(): if __game.state == 'waiting': # type: ignore __game.start_game() print('starting game', __game.state) if __game.state == 'bidding': # type: ignore print('cards', player.hand.to_json) print('accepting bids') # return flask.redirect(url_for('main.bids')) if __game.state == 'playing': # type: ignore print('playing game') if __game.state == 'cleanup': # type: ignore print('clean up match') players = get_turns(__game.players) if hasattr(player, 'hand'): print('hand') return flask.render_template( 'gameboard.html', state=__game.state, data=players # type: ignore ) else: print('no hand') return flask.render_template('gameboard.html')
[ "flask.render_template", "wtforms.validators.NumberRange", "wtforms.validators.DataRequired", "wtforms.SubmitField", "flask.url_for", "spades.game.GameState", "flask_sse.sse.publish", "flask.Blueprint", "spades.game.models.player.Player" ]
[((511, 538), 'flask.Blueprint', 'Blueprint', (['"""main"""', '__name__'], {}), "('main', __name__)\n", (520, 538), False, 'from flask import Blueprint, url_for\n'), ((593, 604), 'spades.game.GameState', 'GameState', ([], {}), '()\n', (602, 604), False, 'from spades.game import GameState\n'), ((665, 690), 'wtforms.SubmitField', 'SubmitField', (['"""start game"""'], {}), "('start game')\n", (676, 690), False, 'from wtforms import IntegerField, SubmitField\n'), ((720, 744), 'wtforms.SubmitField', 'SubmitField', (['"""join game"""'], {}), "('join game')\n", (731, 744), False, 'from wtforms import IntegerField, SubmitField\n'), ((956, 974), 'wtforms.SubmitField', 'SubmitField', (['"""bid"""'], {}), "('bid')\n", (967, 974), False, 'from wtforms import IntegerField, SubmitField\n'), ((2227, 2262), 'flask.render_template', 'flask.render_template', (['"""index.html"""'], {}), "('index.html')\n", (2248, 2262), False, 'import flask\n'), ((3167, 3213), 'flask.render_template', 'flask.render_template', (['"""lobby.html"""'], {'form': 'form'}), "('lobby.html', form=form)\n", (3188, 3213), False, 'import flask\n'), ((3764, 3806), 'flask_sse.sse.publish', 'sse.publish', (['card_played'], {'type': '"""play-card"""'}), "(card_played, type='play-card')\n", (3775, 3806), False, 'from flask_sse import sse\n'), ((4266, 4336), 'flask.render_template', 'flask.render_template', (['"""bid.html"""'], {'form': 'form', 'data': 'player.hand.to_json'}), "('bid.html', form=form, data=player.hand.to_json)\n", (4287, 4336), False, 'import flask\n'), ((5388, 5461), 'flask.render_template', 'flask.render_template', (['"""gameboard.html"""'], {'state': '__game.state', 'data': 'players'}), "('gameboard.html', state=__game.state, data=players)\n", (5409, 5461), False, 'import flask\n'), ((5550, 5589), 'flask.render_template', 'flask.render_template', (['"""gameboard.html"""'], {}), "('gameboard.html')\n", (5571, 5589), False, 'import flask\n'), ((1126, 1155), 'spades.game.models.player.Player', 'Player', (['current_user.username'], {}), '(current_user.username)\n', (1132, 1155), False, 'from spades.game.models.player import Player\n'), ((4202, 4227), 'flask.url_for', 'url_for', (['"""main.gameboard"""'], {}), "('main.gameboard')\n", (4209, 4227), False, 'from flask import Blueprint, url_for\n'), ((859, 873), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (871, 873), False, 'from wtforms.validators import DataRequired, NumberRange\n'), ((887, 913), 'wtforms.validators.NumberRange', 'NumberRange', ([], {'min': '(1)', 'max': '(13)'}), '(min=1, max=13)\n', (898, 913), False, 'from wtforms.validators import DataRequired, NumberRange\n'), ((2999, 3024), 'flask.url_for', 'url_for', (['"""main.gameboard"""'], {}), "('main.gameboard')\n", (3006, 3024), False, 'from flask import Blueprint, url_for\n'), ((4652, 4671), 'spades.game.models.player.Player', 'Player', (['player_name'], {}), '(player_name)\n', (4658, 4671), False, 'from spades.game.models.player import Player\n'), ((2822, 2851), 'spades.game.models.player.Player', 'Player', (['current_user.username'], {}), '(current_user.username)\n', (2828, 2851), False, 'from spades.game.models.player import Player\n')]
# Copyright (c) 2019 - The Procedural Generation for Gazebo authors # For information on the respective copyright owner see the NOTICE file # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .actuator import Actuator from .axis import Axis from .box import Box from .child import Child from .collision import Collision from .color import Color from .cylinder import Cylinder from .dynamics import Dynamics from .gazebo import Gazebo from .geometry import Geometry from .hardware_interface import HardwareInterface from .inertia import Inertia from .inertial import Inertial from .joint import Joint from .limit import Limit from .link import Link from .mass import Mass from .material import Material from .mechanical_reduction import MechanicalReduction from .mesh import Mesh from .mimic import Mimic from .origin import Origin from .parent import Parent from .robot import Robot from .safety_controller import SafetyController from .sphere import Sphere from .texture import Texture from .transmission import Transmission from .type import Type from .visual import Visual def get_all_urdf_element_classes(): """Get list of all URDF element classes.""" import sys import inspect from ..types import XMLBase output = list() current_module = sys.modules[__name__] for name, obj in inspect.getmembers(current_module): if inspect.isclass(obj): if issubclass(obj, XMLBase) and obj._TYPE == 'urdf': output.append(obj) return output def create_urdf_element(tag, *args): """URDF element factory. > *Input arguments* * `tag` (*type:* `str`): Name of the URDF element. * `args`: Extra arguments for URDF element constructor. > *Returns* URDF element if `tag` refers to a valid URDF element. `None`, otherwise. """ import sys import inspect from ..types import XMLBase current_module = sys.modules[__name__] for name, obj in inspect.getmembers(current_module): if inspect.isclass(obj): if issubclass(obj, XMLBase): if tag == obj._NAME and obj._TYPE == 'urdf': return obj(*args) return None def create_urdf_type(tag): """Return handle of the URDF element type. > *Input arguments* * `tag` (*type:* `str`): Name of the URDF element. > *Returns* URDF element type if `tag` is valid, `None` otherwise`. """ import sys import inspect from ..types import XMLBase current_module = sys.modules[__name__] for name, obj in inspect.getmembers(current_module): if inspect.isclass(obj): if issubclass(obj, XMLBase): if tag == obj._NAME and obj._TYPE == 'urdf': return obj return None def is_urdf_element(obj): """Test if XML element is an URDF element.""" from ..types import XMLBase return obj.__class__ in XMLBase.__subclasses__() and \ obj._TYPE == 'urdf' __all__ = [ 'get_all_urdf_element_classes', 'create_urdf_element', 'create_urdf_type', 'is_urdf_element', 'Actuator', 'Axis', 'Box', 'Child', 'Collision', 'Color', 'Cylinder', 'Dynamics', 'Gazebo', 'Geometry', 'HardwareInterface', 'Inertia', 'Inertial', 'Joint', 'Limit', 'Link', 'Mass', 'Material', 'MechanicalReduction', 'Mesh', 'Mimic', 'Origin', 'Parent', 'Robot', 'SafetyController', 'Sphere', 'Texture', 'Transmission', 'Type', 'Visual' ]
[ "inspect.isclass", "inspect.getmembers" ]
[((1812, 1846), 'inspect.getmembers', 'inspect.getmembers', (['current_module'], {}), '(current_module)\n', (1830, 1846), False, 'import inspect\n'), ((2445, 2479), 'inspect.getmembers', 'inspect.getmembers', (['current_module'], {}), '(current_module)\n', (2463, 2479), False, 'import inspect\n'), ((3043, 3077), 'inspect.getmembers', 'inspect.getmembers', (['current_module'], {}), '(current_module)\n', (3061, 3077), False, 'import inspect\n'), ((1859, 1879), 'inspect.isclass', 'inspect.isclass', (['obj'], {}), '(obj)\n', (1874, 1879), False, 'import inspect\n'), ((2492, 2512), 'inspect.isclass', 'inspect.isclass', (['obj'], {}), '(obj)\n', (2507, 2512), False, 'import inspect\n'), ((3090, 3110), 'inspect.isclass', 'inspect.isclass', (['obj'], {}), '(obj)\n', (3105, 3110), False, 'import inspect\n')]
# Licensed to Modin Development Team under one or more contributor license agreements. # See the NOTICE file distributed with this work for additional information regarding # copyright ownership. The Modin Development Team licenses this file to you under the # Apache License, Version 2.0 (the "License"); you may not use this file except in # compliance with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific language # governing permissions and limitations under the License. """Module houses `FeatherDispatcher` class, that is used for reading `.feather` files.""" from modin.engines.base.io.column_stores.column_store_dispatcher import ( ColumnStoreDispatcher, ) class FeatherDispatcher(ColumnStoreDispatcher): """ Class handles utils for reading `.feather` files. Inherits some common for columnar store files util functions from `ColumnStoreDispatcher` class. """ @classmethod def _read(cls, path, columns=None, **kwargs): """ Read data from the file path, returning a query compiler. Parameters ---------- path : str or file-like object The filepath of the feather file. columns : array-like, optional Columns to read from file. If not provided, all columns are read. **kwargs : dict `read_feather` function kwargs. Returns ------- BaseQueryCompiler Query compiler with imported data for further processing. Notes ----- `PyArrow` engine and local files only are supported for now, multi threading is set to False by default. PyArrow feather is used. Please refer to the documentation here https://arrow.apache.org/docs/python/api.html#feather-format """ if columns is None: from pyarrow.feather import read_feather df = read_feather(path) # pyarrow.feather.read_feather doesn't support columns as pandas.Index columns = list(df.columns) return cls.build_query_compiler(path, columns, use_threads=False)
[ "pyarrow.feather.read_feather" ]
[((2189, 2207), 'pyarrow.feather.read_feather', 'read_feather', (['path'], {}), '(path)\n', (2201, 2207), False, 'from pyarrow.feather import read_feather\n')]
"""A script is a series of operations.""" import json import os from .ops import create class Script(object): """A script is a series of operations.""" def __init__(self, s=None): """Parse a script from a JSON string.""" if s is not None: self.parsed_script = json.loads(s) self.operations = [create(params) for params in self.parsed_script] def __len__(self): """Return the number of operations.""" return len(self.operations) def execute(self, data): """Execute all operations on the provided dataset. Args: data (:class:`pandas.DataFrame`): The data to transform. Not guaranteed immutable. Returns: :class:`pandas.DataFrame`: The transformed data. """ for op in self.operations: data = op(data) return data def load_script(f): """Load and parse the script given. Args: f (:class:`file` or :class:`str`): Open file object or filename. Returns: :class:`Script`: The parsed script object. """ if isinstance(f, (str, os.PathLike)): f = open(f) with f: return parse(f.read()) parse = Script
[ "json.loads" ]
[((301, 314), 'json.loads', 'json.loads', (['s'], {}), '(s)\n', (311, 314), False, 'import json\n')]
""" Totally untested file. Will be removed in subsequent commits """ import tensorflow as tf import matplotlib.image as mpimg import numpy as np from math import ceil, floor import os IMAGE_SIZE = 720 def central_scale_images(X_imgs, scales): # Various settings needed for Tensorflow operation boxes = np.zeros((len(scales), 4), dtype = np.float32) for index, scale in enumerate(scales): x1 = y1 = 0.5 - 0.5 * scale # To scale centrally x2 = y2 = 0.5 + 0.5 * scale boxes[index] = np.array([y1, x1, y2, x2], dtype = np.float32) box_ind = np.zeros((len(scales)), dtype = np.int32) crop_size = np.array([IMAGE_SIZE, IMAGE_SIZE], dtype = np.int32) X_scale_data = [] tf.reset_default_graph() X = tf.placeholder(tf.float32, shape = (1, IMAGE_SIZE, IMAGE_SIZE, 3)) # Define Tensorflow operation for all scales but only one base image at a time tf_img = tf.image.crop_and_resize(X, boxes, box_ind, crop_size) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for img_data in X_imgs: batch_img = np.expand_dims(img_data, axis = 0) scaled_imgs = sess.run(tf_img, feed_dict = {X: batch_img}) X_scale_data.extend(scaled_imgs) X_scale_data = np.array(X_scale_data, dtype = np.float32) return X_scale_data from math import ceil, floor def get_translate_parameters(index): if index == 0: # Translate left 20 percent offset = np.array([0.0, 0.2], dtype = np.float32) size = np.array([IMAGE_SIZE, ceil(0.8 * IMAGE_SIZE)], dtype = np.int32) w_start = 0 w_end = int(ceil(0.8 * IMAGE_SIZE)) h_start = 0 h_end = IMAGE_SIZE elif index == 1: # Translate right 20 percent offset = np.array([0.0, -0.2], dtype = np.float32) size = np.array([IMAGE_SIZE, ceil(0.8 * IMAGE_SIZE)], dtype = np.int32) w_start = int(floor((1 - 0.8) * IMAGE_SIZE)) w_end = IMAGE_SIZE h_start = 0 h_end = IMAGE_SIZE elif index == 2: # Translate top 20 percent offset = np.array([0.2, 0.0], dtype = np.float32) size = np.array([ceil(0.8 * IMAGE_SIZE), IMAGE_SIZE], dtype = np.int32) w_start = 0 w_end = IMAGE_SIZE h_start = 0 h_end = int(ceil(0.8 * IMAGE_SIZE)) else: # Translate bottom 20 percent offset = np.array([-0.2, 0.0], dtype = np.float32) size = np.array([ceil(0.8 * IMAGE_SIZE), IMAGE_SIZE], dtype = np.int32) w_start = 0 w_end = IMAGE_SIZE h_start = int(floor((1 - 0.8) * IMAGE_SIZE)) h_end = IMAGE_SIZE return offset, size, w_start, w_end, h_start, h_end def translate_images(X_imgs): offsets = np.zeros((len(X_imgs), 2), dtype = np.float32) n_translations = 4 X_translated_arr = [] tf.reset_default_graph() with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for i in range(n_translations): X_translated = np.zeros((len(X_imgs), IMAGE_SIZE, IMAGE_SIZE, 3), dtype = np.float32) X_translated.fill(0.0) # Filling background color base_offset, size, w_start, w_end, h_start, h_end = get_translate_parameters(i) offsets[:, :] = base_offset glimpses = tf.image.extract_glimpse(X_imgs, size, offsets) glimpses = sess.run(glimpses) X_translated[:, h_start: h_start + size[0], \ w_start: w_start + size[1], :] = glimpses X_translated_arr.extend(X_translated) X_translated_arr = np.array(X_translated_arr, dtype = np.float32) return X_translated_arr def rotate_images(X_imgs): X_rotate = [] tf.reset_default_graph() X = tf.placeholder(tf.float32, shape = (IMAGE_SIZE, IMAGE_SIZE, 3)) k = tf.placeholder(tf.int32) tf_img = tf.image.rot90(X, k = k) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for img in X_imgs: for i in range(3): # Rotation at 90, 180 and 270 degrees rotated_img = sess.run(tf_img, feed_dict = {X: img, k: i + 1}) X_rotate.append(rotated_img) X_rotate = np.array(X_rotate, dtype = np.float32) return X_rotate def flip_images(X_imgs): X_flip = [] tf.reset_default_graph() X = tf.placeholder(tf.float32, shape = (IMAGE_SIZE, IMAGE_SIZE, 3)) tf_img1 = tf.image.flip_left_right(X) tf_img2 = tf.image.flip_up_down(X) tf_img3 = tf.image.transpose_image(X) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for img in X_imgs: flipped_imgs = sess.run([tf_img1, tf_img2, tf_img3], feed_dict = {X: img}) X_flip.extend(flipped_imgs) X_flip = np.array(X_flip, dtype = np.float32) return X_flip # Produce each image at scaling of 90%, 75% and 60% of original image. X_imgs = os.listdir("/home/pallab/gestures-cnn/images/resized/") scaled_imgs = central_scale_images(X_imgs, [0.90, 0.75, 0.60]) translated_imgs = translate_images(X_imgs) rotated_imgs = rotate_images(X_imgs) flipped_images = flip_images(X_imgs)
[ "tensorflow.image.transpose_image", "os.listdir", "tensorflow.reset_default_graph", "tensorflow.image.rot90", "math.ceil", "math.floor", "tensorflow.placeholder", "tensorflow.Session", "tensorflow.image.flip_up_down", "tensorflow.global_variables_initializer", "numpy.array", "tensorflow.image.crop_and_resize", "numpy.expand_dims", "tensorflow.image.extract_glimpse", "tensorflow.image.flip_left_right" ]
[((4951, 5006), 'os.listdir', 'os.listdir', (['"""/home/pallab/gestures-cnn/images/resized/"""'], {}), "('/home/pallab/gestures-cnn/images/resized/')\n", (4961, 5006), False, 'import os\n'), ((637, 687), 'numpy.array', 'np.array', (['[IMAGE_SIZE, IMAGE_SIZE]'], {'dtype': 'np.int32'}), '([IMAGE_SIZE, IMAGE_SIZE], dtype=np.int32)\n', (645, 687), True, 'import numpy as np\n'), ((721, 745), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (743, 745), True, 'import tensorflow as tf\n'), ((754, 818), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(1, IMAGE_SIZE, IMAGE_SIZE, 3)'}), '(tf.float32, shape=(1, IMAGE_SIZE, IMAGE_SIZE, 3))\n', (768, 818), True, 'import tensorflow as tf\n'), ((917, 971), 'tensorflow.image.crop_and_resize', 'tf.image.crop_and_resize', (['X', 'boxes', 'box_ind', 'crop_size'], {}), '(X, boxes, box_ind, crop_size)\n', (941, 971), True, 'import tensorflow as tf\n'), ((1295, 1335), 'numpy.array', 'np.array', (['X_scale_data'], {'dtype': 'np.float32'}), '(X_scale_data, dtype=np.float32)\n', (1303, 1335), True, 'import numpy as np\n'), ((2863, 2887), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (2885, 2887), True, 'import tensorflow as tf\n'), ((3616, 3660), 'numpy.array', 'np.array', (['X_translated_arr'], {'dtype': 'np.float32'}), '(X_translated_arr, dtype=np.float32)\n', (3624, 3660), True, 'import numpy as np\n'), ((3741, 3765), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (3763, 3765), True, 'import tensorflow as tf\n'), ((3774, 3835), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(IMAGE_SIZE, IMAGE_SIZE, 3)'}), '(tf.float32, shape=(IMAGE_SIZE, IMAGE_SIZE, 3))\n', (3788, 3835), True, 'import tensorflow as tf\n'), ((3846, 3870), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {}), '(tf.int32)\n', (3860, 3870), True, 'import tensorflow as tf\n'), ((3884, 3906), 'tensorflow.image.rot90', 'tf.image.rot90', (['X'], {'k': 'k'}), '(X, k=k)\n', (3898, 3906), True, 'import tensorflow as tf\n'), ((4237, 4273), 'numpy.array', 'np.array', (['X_rotate'], {'dtype': 'np.float32'}), '(X_rotate, dtype=np.float32)\n', (4245, 4273), True, 'import numpy as np\n'), ((4343, 4367), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (4365, 4367), True, 'import tensorflow as tf\n'), ((4376, 4437), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(IMAGE_SIZE, IMAGE_SIZE, 3)'}), '(tf.float32, shape=(IMAGE_SIZE, IMAGE_SIZE, 3))\n', (4390, 4437), True, 'import tensorflow as tf\n'), ((4454, 4481), 'tensorflow.image.flip_left_right', 'tf.image.flip_left_right', (['X'], {}), '(X)\n', (4478, 4481), True, 'import tensorflow as tf\n'), ((4496, 4520), 'tensorflow.image.flip_up_down', 'tf.image.flip_up_down', (['X'], {}), '(X)\n', (4517, 4520), True, 'import tensorflow as tf\n'), ((4535, 4562), 'tensorflow.image.transpose_image', 'tf.image.transpose_image', (['X'], {}), '(X)\n', (4559, 4562), True, 'import tensorflow as tf\n'), ((4813, 4847), 'numpy.array', 'np.array', (['X_flip'], {'dtype': 'np.float32'}), '(X_flip, dtype=np.float32)\n', (4821, 4847), True, 'import numpy as np\n'), ((518, 562), 'numpy.array', 'np.array', (['[y1, x1, y2, x2]'], {'dtype': 'np.float32'}), '([y1, x1, y2, x2], dtype=np.float32)\n', (526, 562), True, 'import numpy as np\n'), ((981, 993), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (991, 993), True, 'import tensorflow as tf\n'), ((1495, 1533), 'numpy.array', 'np.array', (['[0.0, 0.2]'], {'dtype': 'np.float32'}), '([0.0, 0.2], dtype=np.float32)\n', (1503, 1533), True, 'import numpy as np\n'), ((2897, 2909), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2907, 2909), True, 'import tensorflow as tf\n'), ((3918, 3930), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3928, 3930), True, 'import tensorflow as tf\n'), ((4572, 4584), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (4582, 4584), True, 'import tensorflow as tf\n'), ((1020, 1053), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1051, 1053), True, 'import tensorflow as tf\n'), ((1120, 1152), 'numpy.expand_dims', 'np.expand_dims', (['img_data'], {'axis': '(0)'}), '(img_data, axis=0)\n', (1134, 1152), True, 'import numpy as np\n'), ((1656, 1678), 'math.ceil', 'ceil', (['(0.8 * IMAGE_SIZE)'], {}), '(0.8 * IMAGE_SIZE)\n', (1660, 1678), False, 'from math import ceil, floor\n'), ((1794, 1833), 'numpy.array', 'np.array', (['[0.0, -0.2]'], {'dtype': 'np.float32'}), '([0.0, -0.2], dtype=np.float32)\n', (1802, 1833), True, 'import numpy as np\n'), ((2936, 2969), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2967, 2969), True, 'import tensorflow as tf\n'), ((3336, 3383), 'tensorflow.image.extract_glimpse', 'tf.image.extract_glimpse', (['X_imgs', 'size', 'offsets'], {}), '(X_imgs, size, offsets)\n', (3360, 3383), True, 'import tensorflow as tf\n'), ((3957, 3990), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3988, 3990), True, 'import tensorflow as tf\n'), ((4611, 4644), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4642, 4644), True, 'import tensorflow as tf\n'), ((1573, 1595), 'math.ceil', 'ceil', (['(0.8 * IMAGE_SIZE)'], {}), '(0.8 * IMAGE_SIZE)\n', (1577, 1595), False, 'from math import ceil, floor\n'), ((1938, 1967), 'math.floor', 'floor', (['((1 - 0.8) * IMAGE_SIZE)'], {}), '((1 - 0.8) * IMAGE_SIZE)\n', (1943, 1967), False, 'from math import ceil, floor\n'), ((2108, 2146), 'numpy.array', 'np.array', (['[0.2, 0.0]'], {'dtype': 'np.float32'}), '([0.2, 0.0], dtype=np.float32)\n', (2116, 2146), True, 'import numpy as np\n'), ((2398, 2437), 'numpy.array', 'np.array', (['[-0.2, 0.0]'], {'dtype': 'np.float32'}), '([-0.2, 0.0], dtype=np.float32)\n', (2406, 2437), True, 'import numpy as np\n'), ((1873, 1895), 'math.ceil', 'ceil', (['(0.8 * IMAGE_SIZE)'], {}), '(0.8 * IMAGE_SIZE)\n', (1877, 1895), False, 'from math import ceil, floor\n'), ((2316, 2338), 'math.ceil', 'ceil', (['(0.8 * IMAGE_SIZE)'], {}), '(0.8 * IMAGE_SIZE)\n', (2320, 2338), False, 'from math import ceil, floor\n'), ((2589, 2618), 'math.floor', 'floor', (['((1 - 0.8) * IMAGE_SIZE)'], {}), '((1 - 0.8) * IMAGE_SIZE)\n', (2594, 2618), False, 'from math import ceil, floor\n'), ((2174, 2196), 'math.ceil', 'ceil', (['(0.8 * IMAGE_SIZE)'], {}), '(0.8 * IMAGE_SIZE)\n', (2178, 2196), False, 'from math import ceil, floor\n'), ((2465, 2487), 'math.ceil', 'ceil', (['(0.8 * IMAGE_SIZE)'], {}), '(0.8 * IMAGE_SIZE)\n', (2469, 2487), False, 'from math import ceil, floor\n')]
#!/usr/bin/env python3 # encoding: utf-8 # # (C) 2012-2016 <NAME> <<EMAIL>> # # SPDX-License-Identifier: BSD-3-Clause """\ Link To The Past - a backup tool Hash functions and commands. """ import hashlib import zlib class CRC32(object): """\ CRC32 API compatible to the hashlib functions (subset used by this program). >>> h = CRC32() >>> h.update(b'Hello World') >>> h.hexdigest() '4a17b156' """ def __init__(self): self.value = 0 def update(self, data): self.value = zlib.crc32(data, self.value) & 0xffffffff def hexdigest(self): return '{:08x}'.format(self.value) class NoHash(object): """\ API compatible to the hashlib functions (subset used by this program). >>> h = NoHash() >>> h.update(b'Hello World') >>> h.hexdigest() '-' """ def __init__(self): pass def update(self, data): pass def hexdigest(self): return '-' # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - SUPPORTED_HASHES = { 'NONE': NoHash, 'CRC32': CRC32, 'MD5': hashlib.md5, 'SHA-256': hashlib.sha256, 'SHA-512': hashlib.sha512, } def get_factory(name): """\ Get an object for calculating a hash. >>> f = get_factory('SHA-256') >>> h = f() >>> h.update(b'Hello World') >>> h.hexdigest() 'a591a6d40bf420404a011733cfb7b190d62c65bf0bcda32b57b277d9ad9f146e' """ if name is None: name = 'NONE' return SUPPORTED_HASHES[name.upper()] # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if __name__ == '__main__': import doctest doctest.testmod()
[ "doctest.testmod", "zlib.crc32" ]
[((1687, 1704), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (1702, 1704), False, 'import doctest\n'), ((531, 559), 'zlib.crc32', 'zlib.crc32', (['data', 'self.value'], {}), '(data, self.value)\n', (541, 559), False, 'import zlib\n')]
import unittest import numpy as np from astroNN.lamost import wavelength_solution, pseudo_continuum class LamostToolsTestCase(unittest.TestCase): def test_wavelength_solution(self): wavelength_solution() wavelength_solution(dr=5) self.assertRaises(ValueError, wavelength_solution, dr=1) def test_norm(self): pseudo_continuum(np.ones(3909), np.ones(3909)) if __name__ == '__main__': unittest.main()
[ "unittest.main", "astroNN.lamost.wavelength_solution", "numpy.ones" ]
[((432, 447), 'unittest.main', 'unittest.main', ([], {}), '()\n', (445, 447), False, 'import unittest\n'), ((197, 218), 'astroNN.lamost.wavelength_solution', 'wavelength_solution', ([], {}), '()\n', (216, 218), False, 'from astroNN.lamost import wavelength_solution, pseudo_continuum\n'), ((227, 252), 'astroNN.lamost.wavelength_solution', 'wavelength_solution', ([], {'dr': '(5)'}), '(dr=5)\n', (246, 252), False, 'from astroNN.lamost import wavelength_solution, pseudo_continuum\n'), ((369, 382), 'numpy.ones', 'np.ones', (['(3909)'], {}), '(3909)\n', (376, 382), True, 'import numpy as np\n'), ((384, 397), 'numpy.ones', 'np.ones', (['(3909)'], {}), '(3909)\n', (391, 397), True, 'import numpy as np\n')]
from django.http import HttpResponse from rest_framework.decorators import api_view from rest_framework.decorators import parser_classes from rest_framework.parsers import JSONParser import numpy as np import json import os from .utils.spectrogram_utils import SpectrogramUtils from .utils.feature_extraction_utils import FeatureExtractionUtils from .utils.classification_utils import ClassificationUtils from .utils.file_utils import FileUtils from .utils.dir_utils import DirUtils from .constants.headers import headers_data, headers_clusters, headers_clusters_no_display file_utils = FileUtils() dir_utils = DirUtils() @api_view(['GET']) @parser_classes((JSONParser,)) def get_species(request): species = os.listdir('clusters/model/') species_data = [] for specie in species: with open('clusters/model/' + specie, 'r') as infile: data = json.load(infile) species_data.append(data) return HttpResponse(json.dumps(species_data, separators=(',', ':'))) @api_view(['GET', 'POST']) @parser_classes((JSONParser,)) def get_clusters(request): if request.method == 'POST': data = request.data directory = data['dir'] files = data['files'] features, segs, metadata = file_utils.process_files( directory, files) classification_utils = ClassificationUtils() ex_level = 1 it_num = 5 data = np.hstack((features, metadata[:, 6].astype(float)[:, None])) mad = 'binomial' gad = '3pi' datanorm, mininums, maximums = classification_utils.norm(data) recon, mean_class, std_class = classification_utils.lamda( ex_level, it_num, datanorm, mad, gad) representive_calls = file_utils.get_representative_calls( recon, datanorm, metadata) keys_results = [header['label'] for header in headers_data] keys_clusters = [header['label'] for header in headers_clusters] keys_clusters_no_display = [header['label'] for header in headers_clusters_no_display] data_results = [] for i, value in enumerate(metadata): values = [value[0], str(recon[i]), * (value[1:].tolist()), datanorm[i]] zipbObj = zip(keys_results, values) data_results.append(dict(zipbObj)) data_clusters = [] for i, value in enumerate(representive_calls): zipbObj = zip(keys_clusters + keys_clusters_no_display, value) data_clusters.append(dict(zipbObj)) response = { 'results': { 'headers': headers_data, 'data': data_results, 'model': { 'features': datanorm.tolist(), 'min_values': mininums.tolist(), 'max_values': maximums.tolist(), 'metadata': metadata.tolist() } }, 'clusters': { 'headers': headers_clusters, 'data': data_clusters } } return HttpResponse(json.dumps(response, separators=(',', ':'))) @api_view(['GET', 'POST']) @parser_classes((JSONParser,)) def get_segment_in_image(request): if request.method == 'POST': data = request.data spectrogram_utils = SpectrogramUtils() filename = spectrogram_utils.get_segment_in_image(data['dir'], data['filename'], 1, float(data['start']) - 0.5, float(data['end']) + 0.5, float(data['min_freq']) - 200, float(data['max_freq']) + 200) response = { 'url': filename } return HttpResponse(json.dumps(response, separators=(',', ':'))) @api_view(['GET', 'POST']) @parser_classes((JSONParser,)) def save_cluster(request): if request.method == 'POST': data = request.data features = np.array(data['model']['features']) min_values = data['model']['min_values'] max_values = data['model']['max_values'] metadata = np.array(data['model']['metadata']) indices = np.array(data['selected']) audio_path, image_path, metadata_representative = file_utils.save_representative_call( data['name'], features[indices], metadata[indices]) model = { 'name': data['name'], 'metadata': metadata_representative.tolist(), 'mean_values': np.mean(features[indices], axis=0).tolist(), 'std_values': np.std(features[indices], axis=0).tolist(), 'min_values': min_values, 'max_values': max_values, 'image_path': image_path, 'audio_path': audio_path } dir_utils.create_dir('clusters/model/') with open('clusters/model/' + data['name'], 'w') as outfile: json.dump(model, outfile) return HttpResponse(json.dumps(model, separators=(',', ':'))) @api_view(['GET', 'POST']) @parser_classes((JSONParser,)) def search_clusters(request): if request.method == 'POST': data = request.data directory = data['dir'] files = data['files'] species = data['species'] features, segs, metadata = file_utils.process_files( directory, files) classification_utils = ClassificationUtils() ex_level = 1 it_num = 5 data = np.hstack((features, metadata[:, 6].astype(float)[:, None])) mad = 'binomial' gad = '3pi' num_datos, num_feat = data.shape mean_class = 0.5 * np.ones((1, num_feat)) std_class = 0.25 * np.ones((1, num_feat)) min_values = np.empty((0, num_feat)) max_values = np.empty((0, num_feat)) for specie in species: with open('clusters/model/' + specie, 'r') as infile: model = json.load(infile) mean_class = np.vstack( (mean_class, np.array(model['mean_values']))) std_class = np.vstack( (std_class, np.array(model['std_values']))) min_values = np.vstack( (min_values, np.array(model['min_values']))) max_values = np.vstack( (max_values, np.array(model['max_values']))) general_min_values = np.min(min_values, axis=0) general_max_values = np.max(max_values, axis=0) datanorm, mininums, maximums = classification_utils.norm( data, general_min_values, general_max_values) recon = classification_utils.predict_lamda( ex_level, datanorm, mad, gad, mean_class, std_class) representive_calls = file_utils.get_representative_calls( recon, datanorm, metadata) keys_results = [header['label'] for header in headers_data] keys_clusters = [header['label'] for header in headers_clusters] keys_clusters_no_display = [header['label'] for header in headers_clusters_no_display] data_results = [] for i, value in enumerate(metadata): species_name = species[recon[i] - 1] if recon[i] > 0 else 'NIC' values = [value[0], species_name, * (value[1:].tolist()), datanorm[i]] zipbObj = zip(keys_results, values) data_results.append(dict(zipbObj)) data_clusters = [] for i, value in enumerate(representive_calls): value[0] = species[i - 1] if i > 0 else 'NIC' zipbObj = zip(keys_clusters + keys_clusters_no_display, value) data_clusters.append(dict(zipbObj)) response = { 'results': { 'headers': headers_data, 'data': data_results, 'model': { 'features': datanorm.tolist(), 'min_values': mininums.tolist(), 'max_values': maximums.tolist(), 'metadata': metadata.tolist() } }, 'clusters': { 'headers': headers_clusters, 'data': data_clusters } } return HttpResponse(json.dumps(response, separators=(',', ':')))
[ "numpy.mean", "os.listdir", "numpy.ones", "numpy.std", "json.dumps", "numpy.max", "numpy.array", "numpy.empty", "rest_framework.decorators.parser_classes", "numpy.min", "json.load", "rest_framework.decorators.api_view", "json.dump" ]
[((626, 643), 'rest_framework.decorators.api_view', 'api_view', (["['GET']"], {}), "(['GET'])\n", (634, 643), False, 'from rest_framework.decorators import api_view\n'), ((645, 674), 'rest_framework.decorators.parser_classes', 'parser_classes', (['(JSONParser,)'], {}), '((JSONParser,))\n', (659, 674), False, 'from rest_framework.decorators import parser_classes\n'), ((1007, 1032), 'rest_framework.decorators.api_view', 'api_view', (["['GET', 'POST']"], {}), "(['GET', 'POST'])\n", (1015, 1032), False, 'from rest_framework.decorators import api_view\n'), ((1034, 1063), 'rest_framework.decorators.parser_classes', 'parser_classes', (['(JSONParser,)'], {}), '((JSONParser,))\n', (1048, 1063), False, 'from rest_framework.decorators import parser_classes\n'), ((3171, 3196), 'rest_framework.decorators.api_view', 'api_view', (["['GET', 'POST']"], {}), "(['GET', 'POST'])\n", (3179, 3196), False, 'from rest_framework.decorators import api_view\n'), ((3198, 3227), 'rest_framework.decorators.parser_classes', 'parser_classes', (['(JSONParser,)'], {}), '((JSONParser,))\n', (3212, 3227), False, 'from rest_framework.decorators import parser_classes\n'), ((3774, 3799), 'rest_framework.decorators.api_view', 'api_view', (["['GET', 'POST']"], {}), "(['GET', 'POST'])\n", (3782, 3799), False, 'from rest_framework.decorators import api_view\n'), ((3801, 3830), 'rest_framework.decorators.parser_classes', 'parser_classes', (['(JSONParser,)'], {}), '((JSONParser,))\n', (3815, 3830), False, 'from rest_framework.decorators import parser_classes\n'), ((4978, 5003), 'rest_framework.decorators.api_view', 'api_view', (["['GET', 'POST']"], {}), "(['GET', 'POST'])\n", (4986, 5003), False, 'from rest_framework.decorators import api_view\n'), ((5005, 5034), 'rest_framework.decorators.parser_classes', 'parser_classes', (['(JSONParser,)'], {}), '((JSONParser,))\n', (5019, 5034), False, 'from rest_framework.decorators import parser_classes\n'), ((715, 744), 'os.listdir', 'os.listdir', (['"""clusters/model/"""'], {}), "('clusters/model/')\n", (725, 744), False, 'import os\n'), ((955, 1002), 'json.dumps', 'json.dumps', (['species_data'], {'separators': "(',', ':')"}), "(species_data, separators=(',', ':'))\n", (965, 1002), False, 'import json\n'), ((3939, 3974), 'numpy.array', 'np.array', (["data['model']['features']"], {}), "(data['model']['features'])\n", (3947, 3974), True, 'import numpy as np\n'), ((4092, 4127), 'numpy.array', 'np.array', (["data['model']['metadata']"], {}), "(data['model']['metadata'])\n", (4100, 4127), True, 'import numpy as np\n'), ((4147, 4173), 'numpy.array', 'np.array', (["data['selected']"], {}), "(data['selected'])\n", (4155, 4173), True, 'import numpy as np\n'), ((5692, 5715), 'numpy.empty', 'np.empty', (['(0, num_feat)'], {}), '((0, num_feat))\n', (5700, 5715), True, 'import numpy as np\n'), ((5737, 5760), 'numpy.empty', 'np.empty', (['(0, num_feat)'], {}), '((0, num_feat))\n', (5745, 5760), True, 'import numpy as np\n'), ((6349, 6375), 'numpy.min', 'np.min', (['min_values'], {'axis': '(0)'}), '(min_values, axis=0)\n', (6355, 6375), True, 'import numpy as np\n'), ((6405, 6431), 'numpy.max', 'np.max', (['max_values'], {'axis': '(0)'}), '(max_values, axis=0)\n', (6411, 6431), True, 'import numpy as np\n'), ((875, 892), 'json.load', 'json.load', (['infile'], {}), '(infile)\n', (884, 892), False, 'import json\n'), ((3123, 3166), 'json.dumps', 'json.dumps', (['response'], {'separators': "(',', ':')"}), "(response, separators=(',', ':'))\n", (3133, 3166), False, 'import json\n'), ((3726, 3769), 'json.dumps', 'json.dumps', (['response'], {'separators': "(',', ':')"}), "(response, separators=(',', ':'))\n", (3736, 3769), False, 'import json\n'), ((4878, 4903), 'json.dump', 'json.dump', (['model', 'outfile'], {}), '(model, outfile)\n', (4887, 4903), False, 'import json\n'), ((4933, 4973), 'json.dumps', 'json.dumps', (['model'], {'separators': "(',', ':')"}), "(model, separators=(',', ':'))\n", (4943, 4973), False, 'import json\n'), ((5598, 5620), 'numpy.ones', 'np.ones', (['(1, num_feat)'], {}), '((1, num_feat))\n', (5605, 5620), True, 'import numpy as np\n'), ((5648, 5670), 'numpy.ones', 'np.ones', (['(1, num_feat)'], {}), '((1, num_feat))\n', (5655, 5670), True, 'import numpy as np\n'), ((8220, 8263), 'json.dumps', 'json.dumps', (['response'], {'separators': "(',', ':')"}), "(response, separators=(',', ':'))\n", (8230, 8263), False, 'import json\n'), ((5882, 5899), 'json.load', 'json.load', (['infile'], {}), '(infile)\n', (5891, 5899), False, 'import json\n'), ((4472, 4506), 'numpy.mean', 'np.mean', (['features[indices]'], {'axis': '(0)'}), '(features[indices], axis=0)\n', (4479, 4506), True, 'import numpy as np\n'), ((4543, 4576), 'numpy.std', 'np.std', (['features[indices]'], {'axis': '(0)'}), '(features[indices], axis=0)\n', (4549, 4576), True, 'import numpy as np\n'), ((5973, 6003), 'numpy.array', 'np.array', (["model['mean_values']"], {}), "(model['mean_values'])\n", (5981, 6003), True, 'import numpy as np\n'), ((6077, 6106), 'numpy.array', 'np.array', (["model['std_values']"], {}), "(model['std_values'])\n", (6085, 6106), True, 'import numpy as np\n'), ((6182, 6211), 'numpy.array', 'np.array', (["model['min_values']"], {}), "(model['min_values'])\n", (6190, 6211), True, 'import numpy as np\n'), ((6287, 6316), 'numpy.array', 'np.array', (["model['max_values']"], {}), "(model['max_values'])\n", (6295, 6316), True, 'import numpy as np\n')]
# coding=utf-8 # Filename: h5tree.py """ Print the ROOT file structure. Usage: rtree FILE rtree (-h | --help) rtree --version Options: FILE Input file. -h --help Show this screen. """ from __future__ import division, absolute_import, print_function from km3pipe.io.root import open_rfile __author__ = "<NAME>" __copyright__ = "Copyright 2016, <NAME> and the KM3NeT collaboration." __credits__ = [] __license__ = "MIT" __maintainer__ = "<NAME>" __email__ = "<EMAIL>" __status__ = "Development" def rtree(rfile): rfile = open_rfile(rfile) for k in rfile.walk(): print(k) rfile.close() def main(): from docopt import docopt arguments = docopt(__doc__) rtree(arguments['FILE'])
[ "docopt.docopt", "km3pipe.io.root.open_rfile" ]
[((556, 573), 'km3pipe.io.root.open_rfile', 'open_rfile', (['rfile'], {}), '(rfile)\n', (566, 573), False, 'from km3pipe.io.root import open_rfile\n'), ((696, 711), 'docopt.docopt', 'docopt', (['__doc__'], {}), '(__doc__)\n', (702, 711), False, 'from docopt import docopt\n')]
#!/usr/bin/env python2 # -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function, unicode_literals from psychopy.visual import Window, TextStim from psychopy.core import wait, Clock, quit from psychopy.event import clearEvents, waitKeys, Mouse from psychopy.gui import Dlg from time import gmtime, strftime from codecs import open from random import shuffle, choice, randint from copy import deepcopy from psychopy.iohub import launchHubServer from numpy import mean, std from datetime import datetime from itertools import permutations import random ## for testing testing = False # True for testing, False for real recording ### main_ddline = 1 # sec isi_set = (500, 800, 1100) instruction_color = '#111111' #formerly = #9999FF ############ MAIN ITEMS - paste from JS probe_crime_list_1 = ' Ausgeben als : <NAME>\n\n Nachricht an Deckname : <NAME>\n\n Aktion : Operation Kuh\n\n Objekt : Regen Akte\n\n Inhalt des Objektes : Helikopter Pläne\n\n Adresse : Hai Straße' probe_crime_list_2 = ' Ausgeben als : <NAME>\n\n Nachricht an Deckname : Weißes Shirt\n\n Aktion : Operation Fichte\n\n Objekt : Eulen Akte\n\n Inhalt des Objektes : Messing Pläne\n\n Adresse : Löwen Straße' crime_list_1 = ["<NAME>", "<NAME>", "Operation Kuh", "Regen Akte", "Helikopter Pläne", "Hai Straße"] crime_list_2 = ["<NAME>", "Weißes Shirt","Operation Fichte","Eulen Akte","Messing Pläne","Löwen Straße"] dummy_list_numbers = [0, 1, 2, 3, 4, 5] training_recall_item = {0 : 'Ausgeben als', 1 : 'Nachricht an Deckname', 2 : 'Aktion', 3 : 'Objekt', 4 : 'Inhalt des Objektes', 5 : 'Adresse'} rounds = 1 if testing: escape_key = 'escape' instr_wait = 0.1 else: escape_key = 'notallowed' instr_wait = 0.5 # EXECUTE all main functions here def execute(): start_input() # prompt to input stuff # now initiate stuff set_screen() # creates psychopy screen and stim objects # window opens create_file() # created output file consent_instructions() training_instruction() which_round_indicator() training_software() which_round_indicator() training_list() training_software() which_round_indicator() training_list() training_software() final_slide() win.mouseVisible = False # hide mouse print("************** END OF LEARNING TASK **************") ending() # saves demographic & final infos, gives feedback waitKeys(keyList = ['b']) # press B to end the exp (prevents subject from closing window) quit() def consent_instructions(): show_instruction("Bitte füllen Sie die Einverständniserklärung zur Teilnahme am Experiment aus. \nSie sollten diese vor sich auf dem Tisch finden. Bei Unklarheiten oder weiteren Fragen heben Sie leise Ihre Hand.\nWenn Sie damit fertig sind, drücken Sie die Leertaste, um mit dem Experiment zu starten.") show_instruction("Sie werden nun eine Reihe von Aufgaben am Computer durchführen. Bitte lesen und befolgen Sie die Anweisungen sorgfältig. Sollten Sie während des Experiments Fragen haben, melden Sie sich bei der Versuchsleitung, bevor Sie fortfahren.\nDrücken Sie die Leertaste, um die Anweisungen zu sehen.") def which_round_indicator(): global condition if rounds == 1: show_instruction("Es folgt nun die erste Runde, in der die soeben gezeigten Wortpaare abgefragt werden. Geben Sie diese exakt so, wie sie Ihnen eben gezeigt wurden, ein. \nLeertaste drücken, um fortzufahren.") elif rounds == 2: show_instruction("Es folgen erneut alle Informationen, die Sie benötigen, wenn Sie sich als Komplize ausgeben. Damit diese Täuschung funktioniert, ist es sehr wichtig, dass jedes Detail der Nachricht korrekt ist. Bitte prägen Sie sich deshalb erneut alle Informationen ein. \nLeertaste drücken, um fortzufahren.") elif rounds == 3: show_instruction("Es folgt nun eine dritte und letzte Runde. Die Wortpaare werden noch einmal gezeigt, bevor diese ein letztes Mal abgefragt werden.\nLeertaste drücken, um fortzufahren.") def training_instruction(): global condition if condition % 2 != 0: probe_crime_list = probe_crime_list_1 else: probe_crime_list = probe_crime_list_2 show_instruction('Sie sollen eine Person kontaktieren, die unter Verdacht steht, kriminelle Aktivitäten begangen zu haben. Schreiben Sie dieser Person eine E-Mail, in der Sie um die Übergabe illegal erlangter Dokumente bitten. Dazu geben Sie sich als einer der Komplizen der Person aus und loggen sich in den Mail-Account dieses Komplizen ein. In der Nachricht bitten Sie den Verdächtigen, dass er Sie an einem bestimmten Ort trifft und die entsprechenden Dokumente bei sich hat. Die Informationen, die Sie für diese Aufgabe benötigen werden, werden Ihnen gleich präsentiert.\n\nDrücken Sie die Leertaste um fortzufahren.') show_instruction('Für das Verfassen der E-Mail werden Sie die folgenden Informationen brauchen. Sie loggen sich in den Uni Wien Webmail Account des Komplizen ein und senden dann eine Nachricht an den Decknamen der anderen verdächtigen Person. Sie erklären dieser Person, dass es um eine bestimmte Aktion geht und bitten die Person, Sie an einer bestimmten Adresse zu treffen und zu diesem Treffen das genannte Objekt mit dem sich darin befindenden Inhalt mitzubringen. Drücken Sie daher erst die Leertaste, wenn Sie die unten stehenden Wortpaare, die für das Verfassen der Nachricht benötigt werden, gründlich auswendig gelernt haben. Im Folgenden werden diese in drei Runden abgefragt.\n\n' + probe_crime_list) def training_list(): global condition if condition % 2 != 0: probe_crime_list = probe_crime_list_1 else: probe_crime_list = probe_crime_list_2 show_instruction('Drücken Sie die Leertaste, wenn Sie die unten stehenden Items gründlich auswendig gelernt haben.\nSie loggen sich in den Uni Wien Webmail Account des Komplizen ein und senden dann eine Nachricht an den Decknamen der anderen verdächtigen Person. Sie erklären dieser Person, dass es um eine bestimmte Aktion geht und bitten die Person, Sie an einer bestimmten Adresse zu treffen und zu diesem Treffen das genannte Objekt mit dem sich darin befindenden Inhalt mitzubringen.\n\n' + probe_crime_list) def training_software(): global condition, required, typedin, rounds required_items = [] if condition % 2 != 0: required_items = crime_list_1 else: required_items = crime_list_2 combine_shuffle = list(zip(required_items, dummy_list_numbers)) shuffle(combine_shuffle) required_items[:], dummy_list_numbers[:] = zip(*combine_shuffle) counter = 0 while counter <= 5: required = required_items[counter] cue = training_recall_item[dummy_list_numbers[counter]] counter += 1 instr_display = TextStim(win, color=instruction_color, font='Helvetica', text = u'Bitte geben Sie im Folgenden das korrekte, zuvor auswendig gelernte Wortpaar ein, drücken Sie dann ENTER.', pos=(0, 150), height=30, wrapWidth=1100, colorSpace='rgb') input_prompt = TextStim(win, color=instruction_color, font='Helvetica', text = cue + ':', pos=(-100, 0), alignHoriz = 'right', height=35) input_display = TextStim(win, color='black', pos=(-100, -4), alignHoriz = 'left', height=35, bold = True, colorSpace='rgb') typedin = '' while True: input_display.setText(typedin) instr_display.draw() input_prompt.draw() input_display.draw() win.flip() char = waitKeys()[0] if char == 'backspace' and len(typedin) > 0: typedin = typedin[:-1] elif char == escape_key: break elif char == 'return': if len( trm(typedin) ) > 0: break elif len(char) == 1 and char.isalpha(): typedin += char.upper() elif char == 'space': typedin += ' ' elif char == 'comma': typedin += ',' typedin_words = trm(typedin) add_resp() if counter <= 5: wait(0.5) else: break rounds += 1 def final_slide(): show_instruction("Sie haben nun alle relevanten Informationen gelernt. Bitte führen Sie die Aufgabe nun aus, indem Sie im Google Chrome Browser auf webmail.univie.ac.at gehen und sich dort mit dem eingespeicherten user:account einloggen und die Nachricht mit den gelernten Informationen verfassen und senden. Wenden Sie sich bitte an die Versuchsleitung, um zum Desktop zu gelangen und führen Sie die Aufgabe dann eigenständig aus. Sollten Sie weitere Fragen haben, wenden Sie sich bitte ebenfalls an die Versuchsleitung.") waitKeys(keyList = ['b']) def set_screen(): # screen properties global win, start_text, left_label, right_label, center_disp, instruction_page win = Window([1280, 1000], color='#dddddd', fullscr = 1, units = 'pix', allowGUI = True) # 1280 1024 start_text = TextStim(win, color=instruction_color, font='Helvetica', text = u'Um anzufangen, bitte die Leertaste drücken.', pos = [0,-300], height=35, bold = True, wrapWidth= 1100) left_label = TextStim(win, color='#111111', font='Verdana', text = 'unvertraut', pos = [-350,-160], height=35, alignHoriz='center') right_label = TextStim(win, color='#111111', font='Verdana', text = 'vertraut', pos = [350,-160], height=35, alignHoriz='center') center_disp = TextStim(win, color='#111111', font='Arial', text = '', height = 60) instruction_page = TextStim(win, wrapWidth = 1200, height = 28, font='Helvetica', color = instruction_color) def start_input(): global subj_id, dems, condition, gender input_box = Dlg(title=u'Grunddaten', labelButtonOK=u'OK', labelButtonCancel=u'Abbrechen') input_box.addText(text=u'') input_box.addField(label=u'c.', tip = '1-8') input_box.addField(label=u'VP', tip = 'Ziffern') input_box.addText(text=u'') input_box.addText(text=u'Bitte ausfüllen:') input_box.addField(label=u'Geschlecht', initial = '', choices=[u'männlich',u'weiblich', u'divers'] ) input_box.addField(label=u'Alter', tip = 'Ziffern') input_box.addText(text=u'') input_box.show() if input_box.OK: stop = False try: condition = int(input_box.data[0]) except ValueError: condition = 99 print("Condition must be a number!") ## CONDITIONS: # use condition nos. for control vs. experimental group # plus for guilty vs innocent block first # 1 probes 1 + exp + crime first # 2 probes 2 + exp + nocrime first # 3 probes 1 + exp + nocrime first # 4 probes 2 + exp + crime first # 5 probes 1 + control + crime first # 6 probes 2 + control + no crime first # 7 probes 1 + control + no crime first # 8 probes 2 + control + crime first first # check if variables correctly given if condition not in range(1,9): if testing: condition = 1 # set value for testing to skip Dlg input box print("condition was not set, now set to " + str(condition) + " for testing.") else: print("condition was not set correctly (should be 1/2/3/4/5/6/7/8)") stop = True try: subj_num = int(input_box.data[1]) except ValueError: if testing: subj_num = 99 # set value for testing to skip Dlg input box print("subj_num was not set, now set to " + str(subj_num) + " for testing.") else: print("vp (subject number) was not set correctly (should be simple number)") stop = True try: age = int(input_box.data[3]) except ValueError: if testing: age = 11 # set value for testing to skip Dlg input box print("age was not set, now set to " + str(age) + " for testing.") else: print("age was not set correctly (should be simple number)") stop = True if stop: print("\nTry again with correct inputs.\n") quit() subj_id = str(subj_num).zfill(3) + "_" + str(strftime("%Y%m%d%H%M%S", gmtime())) if input_box.data[2] == 'weiblich': gender = 2 elif input_box.data[2] == 'männlich': gender = 1 else: gender = 3 dems = 'dems\tgender/age\t' + str(gender) + '/' + str(age) start_date = datetime.now() else: quit() def create_file(): global data_out f_name = 'lcp1_learning_' + str(condition) + "_" + subj_id + '.txt' data_out=open(f_name, 'a', encoding='utf-8') data_out.write( '\t'.join( [ "subject_id", "condition", "probe_item", "typed_in", "similarityscore", "rounds" ] ) + "\n" ) print("File created:", f_name) def show_instruction(instruction_text): instruction_page.setText(instruction_text) instruction_page.draw() win.flip() wait(instr_wait) inst_resp = waitKeys(keyList = ['space', escape_key]) end_on_esc(inst_resp[0]) def end_on_esc(escap): if escap == escape_key : # escape print("Trying to escape?") instruction_page.setText('Sure you want to discontinue and quit the experiment?\n\nPress "y" to quit, or press "n" to continue.') instruction_page.draw() win.flip() wait(1) quit_resp = waitKeys(keyList = ['y', 'n']) if quit_resp[0] == 'y': print("************ ESCAPED ************") data_out.close() win.close() quit() else: clearEvents() print("Continuing...") # from https://github.com/luosch/similar_text def similar_str(str1, str2): """ return the len of longest string both in str1 and str2 and the positions in str1 and str2 """ max_len = tmp = pos1 = pos2 = 0 len1, len2 = len(str1), len(str2) for p in range(len1): for q in range(len2): tmp = 0 while p + tmp < len1 and q + tmp < len2 \ and str1[p + tmp] == str2[q + tmp]: tmp += 1 if tmp > max_len: max_len, pos1, pos2 = tmp, p, q return max_len, pos1, pos2 def similar_char(str1, str2): """ return the total length of longest string both in str1 and str2 """ max_len, pos1, pos2 = similar_str(str1, str2) total = max_len if max_len != 0: if pos1 and pos2: total += similar_char(str1[:pos1], str2[:pos2]) if pos1 + max_len < len(str1) and pos2 + max_len < len(str2): total += similar_char(str1[pos1 + max_len:], str2[pos2 + max_len:]); return total def similar_text(str1, str2): """ return a int value in [0, 100], which stands for match level """ if not (isinstance(str1, str) or isinstance(str1, unicode)): raise TypeError("must be str or unicode") elif not (isinstance(str2, str) or isinstance(str2, unicode)): raise TypeError("must be str or unicode") elif len(str1) == 0 and len(str2) == 0: return 0.0 else: return int(similar_char(str1, str2) * 200.0 / (len(str1) + len(str2))) def trm(raw_inp): return [w for w in raw_inp.replace(',', ' ').split(' ') if w != ''][:2] def add_resp(): global condition, required data_out.write( '\t'.join( [ str(subj_id), str(condition), str(required), str(typedin), str(similar_text(str(required.upper()), str(typedin)))]) + '\t' + str(rounds) + '\n' ) print(required, str(typedin), similar_text(str(required.upper()), str(typedin))) def ending (): data_out.write(dems + "\n") data_out.close() show_instruction( "ENDE" ) # EXECUTE execute()
[ "psychopy.event.waitKeys", "psychopy.core.quit", "random.shuffle", "psychopy.visual.TextStim", "psychopy.gui.Dlg", "datetime.datetime.now", "psychopy.event.clearEvents", "codecs.open", "time.gmtime", "psychopy.visual.Window", "psychopy.core.wait" ]
[((2414, 2437), 'psychopy.event.waitKeys', 'waitKeys', ([], {'keyList': "['b']"}), "(keyList=['b'])\n", (2422, 2437), False, 'from psychopy.event import clearEvents, waitKeys, Mouse\n'), ((2508, 2514), 'psychopy.core.quit', 'quit', ([], {}), '()\n', (2512, 2514), False, 'from psychopy.core import wait, Clock, quit\n'), ((6525, 6549), 'random.shuffle', 'shuffle', (['combine_shuffle'], {}), '(combine_shuffle)\n', (6532, 6549), False, 'from random import shuffle, choice, randint\n'), ((8752, 8775), 'psychopy.event.waitKeys', 'waitKeys', ([], {'keyList': "['b']"}), "(keyList=['b'])\n", (8760, 8775), False, 'from psychopy.event import clearEvents, waitKeys, Mouse\n'), ((8910, 8986), 'psychopy.visual.Window', 'Window', (['[1280, 1000]'], {'color': '"""#dddddd"""', 'fullscr': '(1)', 'units': '"""pix"""', 'allowGUI': '(True)'}), "([1280, 1000], color='#dddddd', fullscr=1, units='pix', allowGUI=True)\n", (8916, 8986), False, 'from psychopy.visual import Window, TextStim\n'), ((9022, 9194), 'psychopy.visual.TextStim', 'TextStim', (['win'], {'color': 'instruction_color', 'font': '"""Helvetica"""', 'text': 'u"""Um anzufangen, bitte die Leertaste drücken."""', 'pos': '[0, -300]', 'height': '(35)', 'bold': '(True)', 'wrapWidth': '(1100)'}), "(win, color=instruction_color, font='Helvetica', text=\n u'Um anzufangen, bitte die Leertaste drücken.', pos=[0, -300], height=\n 35, bold=True, wrapWidth=1100)\n", (9030, 9194), False, 'from psychopy.visual import Window, TextStim\n'), ((9208, 9327), 'psychopy.visual.TextStim', 'TextStim', (['win'], {'color': '"""#111111"""', 'font': '"""Verdana"""', 'text': '"""unvertraut"""', 'pos': '[-350, -160]', 'height': '(35)', 'alignHoriz': '"""center"""'}), "(win, color='#111111', font='Verdana', text='unvertraut', pos=[-350,\n -160], height=35, alignHoriz='center')\n", (9216, 9327), False, 'from psychopy.visual import Window, TextStim\n'), ((9345, 9462), 'psychopy.visual.TextStim', 'TextStim', (['win'], {'color': '"""#111111"""', 'font': '"""Verdana"""', 'text': '"""vertraut"""', 'pos': '[350, -160]', 'height': '(35)', 'alignHoriz': '"""center"""'}), "(win, color='#111111', font='Verdana', text='vertraut', pos=[350, -\n 160], height=35, alignHoriz='center')\n", (9353, 9462), False, 'from psychopy.visual import Window, TextStim\n'), ((9479, 9543), 'psychopy.visual.TextStim', 'TextStim', (['win'], {'color': '"""#111111"""', 'font': '"""Arial"""', 'text': '""""""', 'height': '(60)'}), "(win, color='#111111', font='Arial', text='', height=60)\n", (9487, 9543), False, 'from psychopy.visual import Window, TextStim\n'), ((9571, 9659), 'psychopy.visual.TextStim', 'TextStim', (['win'], {'wrapWidth': '(1200)', 'height': '(28)', 'font': '"""Helvetica"""', 'color': 'instruction_color'}), "(win, wrapWidth=1200, height=28, font='Helvetica', color=\n instruction_color)\n", (9579, 9659), False, 'from psychopy.visual import Window, TextStim\n'), ((9742, 9819), 'psychopy.gui.Dlg', 'Dlg', ([], {'title': 'u"""Grunddaten"""', 'labelButtonOK': 'u"""OK"""', 'labelButtonCancel': 'u"""Abbrechen"""'}), "(title=u'Grunddaten', labelButtonOK=u'OK', labelButtonCancel=u'Abbrechen')\n", (9745, 9819), False, 'from psychopy.gui import Dlg\n'), ((12818, 12853), 'codecs.open', 'open', (['f_name', '"""a"""'], {'encoding': '"""utf-8"""'}), "(f_name, 'a', encoding='utf-8')\n", (12822, 12853), False, 'from codecs import open\n'), ((13154, 13170), 'psychopy.core.wait', 'wait', (['instr_wait'], {}), '(instr_wait)\n', (13158, 13170), False, 'from psychopy.core import wait, Clock, quit\n'), ((13187, 13226), 'psychopy.event.waitKeys', 'waitKeys', ([], {'keyList': "['space', escape_key]"}), "(keyList=['space', escape_key])\n", (13195, 13226), False, 'from psychopy.event import clearEvents, waitKeys, Mouse\n'), ((6812, 7052), 'psychopy.visual.TextStim', 'TextStim', (['win'], {'color': 'instruction_color', 'font': '"""Helvetica"""', 'text': 'u"""Bitte geben Sie im Folgenden das korrekte, zuvor auswendig gelernte Wortpaar ein, drücken Sie dann ENTER."""', 'pos': '(0, 150)', 'height': '(30)', 'wrapWidth': '(1100)', 'colorSpace': '"""rgb"""'}), "(win, color=instruction_color, font='Helvetica', text=\n u'Bitte geben Sie im Folgenden das korrekte, zuvor auswendig gelernte Wortpaar ein, drücken Sie dann ENTER.'\n , pos=(0, 150), height=30, wrapWidth=1100, colorSpace='rgb')\n", (6820, 7052), False, 'from psychopy.visual import Window, TextStim\n'), ((7069, 7191), 'psychopy.visual.TextStim', 'TextStim', (['win'], {'color': 'instruction_color', 'font': '"""Helvetica"""', 'text': "(cue + ':')", 'pos': '(-100, 0)', 'alignHoriz': '"""right"""', 'height': '(35)'}), "(win, color=instruction_color, font='Helvetica', text=cue + ':',\n pos=(-100, 0), alignHoriz='right', height=35)\n", (7077, 7191), False, 'from psychopy.visual import Window, TextStim\n'), ((7217, 7324), 'psychopy.visual.TextStim', 'TextStim', (['win'], {'color': '"""black"""', 'pos': '(-100, -4)', 'alignHoriz': '"""left"""', 'height': '(35)', 'bold': '(True)', 'colorSpace': '"""rgb"""'}), "(win, color='black', pos=(-100, -4), alignHoriz='left', height=35,\n bold=True, colorSpace='rgb')\n", (7225, 7324), False, 'from psychopy.visual import Window, TextStim\n'), ((12651, 12665), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (12663, 12665), False, 'from datetime import datetime\n'), ((12684, 12690), 'psychopy.core.quit', 'quit', ([], {}), '()\n', (12688, 12690), False, 'from psychopy.core import wait, Clock, quit\n'), ((13554, 13561), 'psychopy.core.wait', 'wait', (['(1)'], {}), '(1)\n', (13558, 13561), False, 'from psychopy.core import wait, Clock, quit\n'), ((13582, 13610), 'psychopy.event.waitKeys', 'waitKeys', ([], {'keyList': "['y', 'n']"}), "(keyList=['y', 'n'])\n", (13590, 13610), False, 'from psychopy.event import clearEvents, waitKeys, Mouse\n'), ((8139, 8148), 'psychopy.core.wait', 'wait', (['(0.5)'], {}), '(0.5)\n', (8143, 8148), False, 'from psychopy.core import wait, Clock, quit\n'), ((12294, 12300), 'psychopy.core.quit', 'quit', ([], {}), '()\n', (12298, 12300), False, 'from psychopy.core import wait, Clock, quit\n'), ((13765, 13771), 'psychopy.core.quit', 'quit', ([], {}), '()\n', (13769, 13771), False, 'from psychopy.core import wait, Clock, quit\n'), ((13798, 13811), 'psychopy.event.clearEvents', 'clearEvents', ([], {}), '()\n', (13809, 13811), False, 'from psychopy.event import clearEvents, waitKeys, Mouse\n'), ((7549, 7559), 'psychopy.event.waitKeys', 'waitKeys', ([], {}), '()\n', (7557, 7559), False, 'from psychopy.event import clearEvents, waitKeys, Mouse\n'), ((12379, 12387), 'time.gmtime', 'gmtime', ([], {}), '()\n', (12385, 12387), False, 'from time import gmtime, strftime\n')]
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright: (c) 2018, F5 Networks Inc. # GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'certified'} DOCUMENTATION = r''' --- module: bigip_asm_policy_import short_description: Manage BIG-IP ASM policy imports description: - Manage BIG-IP ASM policies policy imports. version_added: 2.8 options: name: description: - The ASM policy to create or override. type: str required: True inline: description: - When specified the ASM policy is created from a provided string. - Content needs to be provided in a valid XML format otherwise the operation will fail. type: str source: description: - Full path to a policy file to be imported into the BIG-IP ASM. - Policy files exported from newer versions of BIG-IP cannot be imported into older versions of BIG-IP. The opposite, however, is true; you can import older into newer. - The file format can be binary of XML. type: path force: description: - When set to C(yes) any existing policy with the same name will be overwritten by the new import. - Works for both inline and file imports, if the policy does not exist this setting is ignored. default: no type: bool partition: description: - Device partition to create policy on. type: str default: Common extends_documentation_fragment: f5 author: - <NAME> (@wojtek0806) ''' EXAMPLES = r''' - name: Import ASM policy bigip_asm_policy_import: name: new_asm_policy file: /root/asm_policy.xml provider: server: lb.mydomain.com user: admin password: <PASSWORD> delegate_to: localhost - name: Import ASM policy inline bigip_asm_policy_import: name: foo-policy4 inline: <xml>content</xml> provider: server: lb.mydomain.com user: admin password: <PASSWORD> delegate_to: localhost - name: Override existing ASM policy bigip_asm_policy: name: new_asm_policy file: /root/asm_policy_new.xml force: yes provider: server: lb.mydomain.com user: admin password: <PASSWORD> delegate_to: localhost ''' RETURN = r''' source: description: Local path to an ASM policy file. returned: changed type: str sample: /root/some_policy.xml inline: description: Contents of policy as an inline string returned: changed type: str sample: <xml>foobar contents</xml> name: description: Name of the ASM policy to be created/overwritten returned: changed type: str sample: Asm_APP1_Transparent force: description: Set when overwriting an existing policy returned: changed type: bool sample: yes ''' import os import time from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.basic import env_fallback try: from library.module_utils.network.f5.bigip import F5RestClient from library.module_utils.network.f5.common import F5ModuleError from library.module_utils.network.f5.common import AnsibleF5Parameters from library.module_utils.network.f5.common import fq_name from library.module_utils.network.f5.common import transform_name from library.module_utils.network.f5.common import f5_argument_spec from library.module_utils.network.f5.icontrol import upload_file from library.module_utils.network.f5.icontrol import module_provisioned except ImportError: from ansible.module_utils.network.f5.bigip import F5RestClient from ansible.module_utils.network.f5.common import F5ModuleError from ansible.module_utils.network.f5.common import AnsibleF5Parameters from ansible.module_utils.network.f5.common import fq_name from ansible.module_utils.network.f5.common import transform_name from ansible.module_utils.network.f5.common import f5_argument_spec from ansible.module_utils.network.f5.icontrol import upload_file from ansible.module_utils.network.f5.icontrol import module_provisioned class Parameters(AnsibleF5Parameters): updatables = [] returnables = [ 'name', 'inline', 'source', 'force' ] api_attributes = [ 'file', 'name', ] api_map = { 'file': 'inline', 'filename': 'source', } class ApiParameters(Parameters): pass class ModuleParameters(Parameters): pass class Changes(Parameters): def to_return(self): result = {} try: for returnable in self.returnables: result[returnable] = getattr(self, returnable) result = self._filter_params(result) except Exception: pass return result class UsableChanges(Changes): pass class ReportableChanges(Changes): pass class Difference(object): def __init__(self, want, have=None): self.want = want self.have = have def compare(self, param): try: result = getattr(self, param) return result except AttributeError: return self.__default(param) def __default(self, param): attr1 = getattr(self.want, param) try: attr2 = getattr(self.have, param) if attr1 != attr2: return attr1 except AttributeError: return attr1 class ModuleManager(object): def __init__(self, *args, **kwargs): self.module = kwargs.get('module', None) self.client = F5RestClient(**self.module.params) self.want = ModuleParameters(params=self.module.params) self.changes = UsableChanges() def _set_changed_options(self): changed = {} for key in Parameters.returnables: if getattr(self.want, key) is not None: changed[key] = getattr(self.want, key) if changed: self.changes = UsableChanges(params=changed) def _announce_deprecations(self, result): warnings = result.pop('__warnings', []) for warning in warnings: self.client.module.deprecate( msg=warning['msg'], version=warning['version'] ) def exec_module(self): if not module_provisioned(self.client, 'asm'): raise F5ModuleError( "ASM must be provisioned to use this module." ) result = dict() changed = self.policy_import() reportable = ReportableChanges(params=self.changes.to_return()) changes = reportable.to_return() result.update(**changes) result.update(dict(changed=changed)) self._announce_deprecations(result) return result def policy_import(self): self._set_changed_options() if self.module.check_mode: return True if self.exists(): if self.want.force is False: return False if self.want.inline: task = self.inline_import() self.wait_for_task(task) return True self.import_file_to_device() self.remove_temp_policy_from_device() return True def exists(self): uri = 'https://{0}:{1}/mgmt/tm/asm/policies/'.format( self.client.provider['server'], self.client.provider['server_port'], ) query = "?$filter=contains(name,'{0}')+and+contains(partition,'{1}')&$select=name,partition".format( self.want.name, self.want.partition ) resp = self.client.api.get(uri + query) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'items' in response and response['items'] != []: return True return False def upload_file_to_device(self, content, name): url = 'https://{0}:{1}/mgmt/shared/file-transfer/uploads'.format( self.client.provider['server'], self.client.provider['server_port'] ) try: upload_file(self.client, url, content, name) except F5ModuleError: raise F5ModuleError( "Failed to upload the file." ) def _get_policy_link(self): uri = 'https://{0}:{1}/mgmt/tm/asm/policies/'.format( self.client.provider['server'], self.client.provider['server_port'], ) query = "?$filter=contains(name,'{0}')+and+contains(partition,'{1}')&$select=name,partition".format( self.want.name, self.want.partition ) resp = self.client.api.get(uri + query) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) policy_link = response['items'][0]['selfLink'] return policy_link def inline_import(self): params = self.changes.api_params() uri = "https://{0}:{1}/mgmt/tm/asm/tasks/import-policy/".format( self.client.provider['server'], self.client.provider['server_port'], ) if self.want.force: params.update(dict(policyReference={'link': self._get_policy_link()})) params.pop('name') resp = self.client.api.post(uri, json=params) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] in [400, 403]: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) return response['id'] def wait_for_task(self, task_id): uri = "https://{0}:{1}/mgmt/tm/asm/tasks/import-policy/{2}".format( self.client.provider['server'], self.client.provider['server_port'], task_id ) while True: resp = self.client.api.get(uri) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) if response['status'] in ['COMPLETED', 'FAILURE']: break time.sleep(1) if response['status'] == 'FAILURE': raise F5ModuleError( 'Failed to import ASM policy.' ) if response['status'] == 'COMPLETED': return True def import_file_to_device(self): name = os.path.split(self.want.source)[1] self.upload_file_to_device(self.want.source, name) time.sleep(2) full_name = fq_name(self.want.partition, self.want.name) if self.want.force: cmd = 'tmsh load asm policy {0} file /var/config/rest/downloads/{1} overwrite'.format(full_name, name) else: cmd = 'tmsh load asm policy {0} file /var/config/rest/downloads/{1}'.format(full_name, name) uri = "https://{0}:{1}/mgmt/tm/util/bash/".format( self.client.provider['server'], self.client.provider['server_port'], ) args = dict( command='run', utilCmdArgs='-c "{0}"'.format(cmd) ) resp = self.client.api.post(uri, json=args) try: response = resp.json() if 'commandResult' in response: if 'Unexpected Error' in response['commandResult']: raise F5ModuleError(response['commandResult']) except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) return True def remove_temp_policy_from_device(self): name = os.path.split(self.want.source)[1] tpath_name = '/var/config/rest/downloads/{0}'.format(name) uri = "https://{0}:{1}/mgmt/tm/util/unix-rm/".format( self.client.provider['server'], self.client.provider['server_port'], ) args = dict( command='run', utilCmdArgs=tpath_name ) resp = self.client.api.post(uri, json=args) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) class ArgumentSpec(object): def __init__(self): self.supports_check_mode = True argument_spec = dict( name=dict( required=True, ), source=dict(type='path'), inline=dict(), force=dict( type='bool', default='no' ), partition=dict( default='Common', fallback=(env_fallback, ['F5_PARTITION']) ) ) self.argument_spec = {} self.argument_spec.update(f5_argument_spec) self.argument_spec.update(argument_spec) self.mutually_exclusive = [ ['source', 'inline'] ] def main(): spec = ArgumentSpec() module = AnsibleModule( argument_spec=spec.argument_spec, supports_check_mode=spec.supports_check_mode, mutually_exclusive=spec.mutually_exclusive ) try: mm = ModuleManager(module=module) results = mm.exec_module() module.exit_json(**results) except F5ModuleError as ex: module.fail_json(msg=str(ex)) if __name__ == '__main__': main()
[ "ansible.module_utils.basic.AnsibleModule", "ansible.module_utils.network.f5.bigip.F5RestClient", "ansible.module_utils.network.f5.icontrol.upload_file", "ansible.module_utils.network.f5.common.F5ModuleError", "time.sleep", "os.path.split", "ansible.module_utils.network.f5.icontrol.module_provisioned", "ansible.module_utils.network.f5.common.fq_name" ]
[((13811, 13953), 'ansible.module_utils.basic.AnsibleModule', 'AnsibleModule', ([], {'argument_spec': 'spec.argument_spec', 'supports_check_mode': 'spec.supports_check_mode', 'mutually_exclusive': 'spec.mutually_exclusive'}), '(argument_spec=spec.argument_spec, supports_check_mode=spec.\n supports_check_mode, mutually_exclusive=spec.mutually_exclusive)\n', (13824, 13953), False, 'from ansible.module_utils.basic import AnsibleModule\n'), ((5701, 5735), 'ansible.module_utils.network.f5.bigip.F5RestClient', 'F5RestClient', ([], {}), '(**self.module.params)\n', (5713, 5735), False, 'from ansible.module_utils.network.f5.bigip import F5RestClient\n'), ((11020, 11033), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (11030, 11033), False, 'import time\n'), ((11055, 11099), 'ansible.module_utils.network.f5.common.fq_name', 'fq_name', (['self.want.partition', 'self.want.name'], {}), '(self.want.partition, self.want.name)\n', (11062, 11099), False, 'from ansible.module_utils.network.f5.common import fq_name\n'), ((6430, 6468), 'ansible.module_utils.network.f5.icontrol.module_provisioned', 'module_provisioned', (['self.client', '"""asm"""'], {}), "(self.client, 'asm')\n", (6448, 6468), False, 'from ansible.module_utils.network.f5.icontrol import module_provisioned\n'), ((6488, 6548), 'ansible.module_utils.network.f5.common.F5ModuleError', 'F5ModuleError', (['"""ASM must be provisioned to use this module."""'], {}), "('ASM must be provisioned to use this module.')\n", (6501, 6548), False, 'from ansible.module_utils.network.f5.common import F5ModuleError\n'), ((8243, 8287), 'ansible.module_utils.network.f5.icontrol.upload_file', 'upload_file', (['self.client', 'url', 'content', 'name'], {}), '(self.client, url, content, name)\n', (8254, 8287), False, 'from ansible.module_utils.network.f5.icontrol import upload_file\n'), ((10642, 10655), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (10652, 10655), False, 'import time\n'), ((10719, 10764), 'ansible.module_utils.network.f5.common.F5ModuleError', 'F5ModuleError', (['"""Failed to import ASM policy."""'], {}), "('Failed to import ASM policy.')\n", (10732, 10764), False, 'from ansible.module_utils.network.f5.common import F5ModuleError\n'), ((10918, 10949), 'os.path.split', 'os.path.split', (['self.want.source'], {}), '(self.want.source)\n', (10931, 10949), False, 'import os\n'), ((12290, 12321), 'os.path.split', 'os.path.split', (['self.want.source'], {}), '(self.want.source)\n', (12303, 12321), False, 'import os\n'), ((8336, 8379), 'ansible.module_utils.network.f5.common.F5ModuleError', 'F5ModuleError', (['"""Failed to upload the file."""'], {}), "('Failed to upload the file.')\n", (8349, 8379), False, 'from ansible.module_utils.network.f5.common import F5ModuleError\n'), ((9727, 9761), 'ansible.module_utils.network.f5.common.F5ModuleError', 'F5ModuleError', (["response['message']"], {}), "(response['message'])\n", (9740, 9761), False, 'from ansible.module_utils.network.f5.common import F5ModuleError\n'), ((9802, 9829), 'ansible.module_utils.network.f5.common.F5ModuleError', 'F5ModuleError', (['resp.content'], {}), '(resp.content)\n', (9815, 9829), False, 'from ansible.module_utils.network.f5.common import F5ModuleError\n'), ((12105, 12139), 'ansible.module_utils.network.f5.common.F5ModuleError', 'F5ModuleError', (["response['message']"], {}), "(response['message'])\n", (12118, 12139), False, 'from ansible.module_utils.network.f5.common import F5ModuleError\n'), ((12180, 12207), 'ansible.module_utils.network.f5.common.F5ModuleError', 'F5ModuleError', (['resp.content'], {}), '(resp.content)\n', (12193, 12207), False, 'from ansible.module_utils.network.f5.common import F5ModuleError\n'), ((12943, 12977), 'ansible.module_utils.network.f5.common.F5ModuleError', 'F5ModuleError', (["response['message']"], {}), "(response['message'])\n", (12956, 12977), False, 'from ansible.module_utils.network.f5.common import F5ModuleError\n'), ((13018, 13045), 'ansible.module_utils.network.f5.common.F5ModuleError', 'F5ModuleError', (['resp.content'], {}), '(resp.content)\n', (13031, 13045), False, 'from ansible.module_utils.network.f5.common import F5ModuleError\n'), ((10433, 10467), 'ansible.module_utils.network.f5.common.F5ModuleError', 'F5ModuleError', (["response['message']"], {}), "(response['message'])\n", (10446, 10467), False, 'from ansible.module_utils.network.f5.common import F5ModuleError\n'), ((10516, 10543), 'ansible.module_utils.network.f5.common.F5ModuleError', 'F5ModuleError', (['resp.content'], {}), '(resp.content)\n', (10529, 10543), False, 'from ansible.module_utils.network.f5.common import F5ModuleError\n'), ((11870, 11910), 'ansible.module_utils.network.f5.common.F5ModuleError', 'F5ModuleError', (["response['commandResult']"], {}), "(response['commandResult'])\n", (11883, 11910), False, 'from ansible.module_utils.network.f5.common import F5ModuleError\n')]
from tweepy import OAuthHandler, Stream, API from tweepy.streaming import StreamListener import json import logging import pymongo import config client = pymongo.MongoClient(host='mongo_container', port=27018) db = client.tweets_db auth = OAuthHandler(config.CONSUMER_API_KEY, config.CONSUMER_API_SECRET) auth.set_access_token(config.ACCESS_TOKEN, config.ACCESS_TOKEN_SECRET) api = API(auth, wait_on_rate_limit=True) user = api.me() logging.critical("connection established with user: " + user.name) # # Function for Twitter authentication # def authenticate(): # auth = OAuthHandler(config.CONSUMER_API_KEY, config.CONSUMER_API_SECRET) # auth.set_access_token(config.ACCESS_TOKEN, config.ACCESS_TOKEN_SECRET) # return auth # Function for streaming tweets class TwitterListener(StreamListener): #defines what is done with every single tweet as it is intercepted in real-time def __init__(self, limit, callback): #super().__init__() self.limit = limit self.counter = 0 self.callback = callback # Return an error if twitter is unreachable def on_error(self, status): if status == 420: print(status) return False def get_tweets_dict(self, t): if 'extended_tweet' in t: text = t['extended_tweet']['full_text'] else: text = t['text'] tweet = { 'username': t['user']['screen_name'], 'text': t['text'], 'followers_count': t['user']['followers_count'], 'location':t['user']['location'], 'description':t['user']['description'] } return tweet def on_data(self, data): t = json.loads(data) tweet = self.get_tweet_dict(t) self.callback(tweet) self.counter += 1 if self.counter == self.limit: return False def stream_tweets(limit, callback): stream_listener = StreamListener() stream = tweepy.Stream(auth=api.auth, listener=stream_listener) stream.filter(track=['OnThisDay'], follow=['2278940227'], languages=['en']) def warning_log(tweet): #logging.critical(f'\n\nTWEET! {tweet["username"]} just tweeted: "{tweet["text"]}"\n\n\n') logging.critical('\n\nTWEET: ' + tweet['username'] + 'just tweeted: ' + tweet['text']) db.collections.onthisday.insert_one(tweet) # Driver function if __name__ == '__main__': while True: stream_tweets(5, warning_log) time.sleep(30)
[ "json.loads", "tweepy.streaming.StreamListener", "tweepy.API", "logging.critical", "pymongo.MongoClient", "tweepy.OAuthHandler" ]
[((156, 211), 'pymongo.MongoClient', 'pymongo.MongoClient', ([], {'host': '"""mongo_container"""', 'port': '(27018)'}), "(host='mongo_container', port=27018)\n", (175, 211), False, 'import pymongo\n'), ((242, 307), 'tweepy.OAuthHandler', 'OAuthHandler', (['config.CONSUMER_API_KEY', 'config.CONSUMER_API_SECRET'], {}), '(config.CONSUMER_API_KEY, config.CONSUMER_API_SECRET)\n', (254, 307), False, 'from tweepy import OAuthHandler, Stream, API\n'), ((385, 419), 'tweepy.API', 'API', (['auth'], {'wait_on_rate_limit': '(True)'}), '(auth, wait_on_rate_limit=True)\n', (388, 419), False, 'from tweepy import OAuthHandler, Stream, API\n'), ((436, 502), 'logging.critical', 'logging.critical', (["('connection established with user: ' + user.name)"], {}), "('connection established with user: ' + user.name)\n", (452, 502), False, 'import logging\n'), ((1919, 1935), 'tweepy.streaming.StreamListener', 'StreamListener', ([], {}), '()\n', (1933, 1935), False, 'from tweepy.streaming import StreamListener\n'), ((2208, 2298), 'logging.critical', 'logging.critical', (["('\\n\\nTWEET: ' + tweet['username'] + 'just tweeted: ' + tweet['text'])"], {}), "('\\n\\nTWEET: ' + tweet['username'] + 'just tweeted: ' +\n tweet['text'])\n", (2224, 2298), False, 'import logging\n'), ((1684, 1700), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (1694, 1700), False, 'import json\n')]
""" ASGI config for op_trans project. It exposes the ASGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/ """ import os from django.core.asgi import get_asgi_application from op_trans.websocket import websocket_application from op_trans.redis_cli import RedisCli os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'op_trans.settings') django_application = get_asgi_application() async def application(scope, receive, send): RedisCli.get() if scope['type'] == 'http': await django_application(scope, receive, send) elif scope['type'] == 'websocket': await websocket_application(scope, receive, send) else: raise NotImplementedError(f"Unknown scope type {scope['type']}")
[ "os.environ.setdefault", "django.core.asgi.get_asgi_application", "op_trans.websocket.websocket_application", "op_trans.redis_cli.RedisCli.get" ]
[((379, 447), 'os.environ.setdefault', 'os.environ.setdefault', (['"""DJANGO_SETTINGS_MODULE"""', '"""op_trans.settings"""'], {}), "('DJANGO_SETTINGS_MODULE', 'op_trans.settings')\n", (400, 447), False, 'import os\n'), ((470, 492), 'django.core.asgi.get_asgi_application', 'get_asgi_application', ([], {}), '()\n', (490, 492), False, 'from django.core.asgi import get_asgi_application\n'), ((543, 557), 'op_trans.redis_cli.RedisCli.get', 'RedisCli.get', ([], {}), '()\n', (555, 557), False, 'from op_trans.redis_cli import RedisCli\n'), ((698, 741), 'op_trans.websocket.websocket_application', 'websocket_application', (['scope', 'receive', 'send'], {}), '(scope, receive, send)\n', (719, 741), False, 'from op_trans.websocket import websocket_application\n')]
# -*- coding: utf-8 -*- """API routes config for notifai_recruitment project. REST framework adds support for automatic URL routing to Django, and provides simple, quick and consistent way of wiring view logic to a set of URLs. For more information on this file, see https://www.django-rest-framework.org/api-guide/routers/ """ from rest_framework import routers from textify.api.views import NoteViewSet router = routers.DefaultRouter() router.register(r'notes', NoteViewSet)
[ "rest_framework.routers.DefaultRouter" ]
[((419, 442), 'rest_framework.routers.DefaultRouter', 'routers.DefaultRouter', ([], {}), '()\n', (440, 442), False, 'from rest_framework import routers\n')]
import torch import torch.nn as nn class TorchModel(nn.ModuleList): def __init__(self): super(TorchModel, self).__init__() self.linear_1 = nn.Linear(2, 12) self.linear_2 = nn.Linear(12, 1) def forward(self, x): out = self.linear_1(x) out = torch.tanh(out) out = self.linear_2(out) out = torch.sigmoid(out) return out
[ "torch.tanh", "torch.sigmoid", "torch.nn.Linear" ]
[((145, 161), 'torch.nn.Linear', 'nn.Linear', (['(2)', '(12)'], {}), '(2, 12)\n', (154, 161), True, 'import torch.nn as nn\n'), ((180, 196), 'torch.nn.Linear', 'nn.Linear', (['(12)', '(1)'], {}), '(12, 1)\n', (189, 196), True, 'import torch.nn as nn\n'), ((258, 273), 'torch.tanh', 'torch.tanh', (['out'], {}), '(out)\n', (268, 273), False, 'import torch\n'), ((309, 327), 'torch.sigmoid', 'torch.sigmoid', (['out'], {}), '(out)\n', (322, 327), False, 'import torch\n')]
#!/usr/bin/env python3 # # base.py """ Base functionality. """ # # Copyright (c) 2020 <NAME> <<EMAIL>> # # Based on cyberpandas # https://github.com/ContinuumIO/cyberpandas # Copyright (c) 2018, Anaconda, Inc. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # _isstringslice based on awkward-array # https://github.com/scikit-hep/awkward-array # Copyright (c) 2018-2019, <NAME> # Licensed under the BSD 3-Clause License # # stdlib from abc import abstractmethod from numbers import Real from typing import Dict, Iterable, List, Optional, Sequence, SupportsFloat, Tuple, Type, TypeVar, Union, overload # 3rd party import numpy # type: ignore from domdf_python_tools.doctools import prettify_docstrings from pandas.core.arrays import ExtensionArray # type: ignore from pandas.core.dtypes.base import ExtensionDtype # type: ignore from pandas.core.dtypes.generic import ABCExtensionArray # type: ignore from typing_extensions import Literal, Protocol __all__ = ["NumPyBackedExtensionArrayMixin"] class NumPyBackedExtensionArrayMixin(ExtensionArray): """ Mixin for pandas extension backed by a numpy array. """ _dtype: Type[ExtensionDtype] @property def dtype(self): """ The dtype for this extension array, :class:`~.CelsiusType`. """ return self._dtype @classmethod def _from_sequence(cls, scalars: Iterable, dtype=None, copy: bool = False): """ Construct a new ExtensionArray from a sequence of scalars. :param scalars: Each element will be an instance of the scalar type for this array, ``cls.dtype.type``. :param dtype: Construct for this particular dtype. This should be a Dtype compatible with the ExtensionArray. :type dtype: dtype, optional :param copy: If True, copy the underlying data. """ return cls(scalars, dtype=dtype) @classmethod def _from_factorized(cls, values: numpy.ndarray, original: ExtensionArray): """ Reconstruct an ExtensionArray after factorization. :param values: An integer ndarray with the factorized values. :param original: The original ExtensionArray that factorize was called on. .. seealso:: :meth:`pandas.pandas.api.extensions.ExtensionArray.factorize` """ return cls(values) @property def shape(self) -> Tuple[int]: """ Return a tuple of the array dimensions. """ return len(self.data), def __len__(self) -> int: """ Returns the length of this array. """ return len(self.data) def setitem(self, indexer, value): """ Set the 'value' inplace. """ # I think having a separate than __setitem__ is good # since we have to return here, but __setitem__ doesn't. self[indexer] = value return self @property def nbytes(self) -> int: """ The number of bytes needed to store this object in memory. """ return self._itemsize * len(self) def _formatting_values(self): return numpy.array(self._format_values(), dtype="object") def copy(self, deep: bool = False) -> ABCExtensionArray: """ Return a copy of the array. :param deep: :return: :rtype: """ return type(self)(self.data.copy()) @classmethod def _concat_same_type(cls, to_concat: Sequence[ABCExtensionArray]) -> ABCExtensionArray: """ Concatenate multiple arrays. :param to_concat: sequence of this type """ return cls(numpy.concatenate([array.data for array in to_concat])) def tolist(self) -> List: """ Convert the array to a Python list. """ return self.data.tolist() def argsort( self, ascending: bool = True, kind: Union[Literal["quicksort"], Literal["mergesort"], Literal["heapsort"]] = "quicksort", *args, **kwargs, ) -> numpy.ndarray: r""" Return the indices that would sort this array. :param ascending: Whether the indices should result in an ascending or descending sort. :param kind: {'quicksort', 'mergesort', 'heapsort'}, optional Sorting algorithm. \*args and \*\*kwargs are passed through to :func:`numpy.argsort`. :return: Array of indices that sort ``self``. If NaN values are contained, NaN values are placed at the end. .. seealso:: :class:`numpy.argsort`: Sorting implementation used internally. """ return self.data.argsort() def unique(self) -> ExtensionArray: # noqa: D102 # https://github.com/pandas-dev/pandas/pull/19869 _, indices = numpy.unique(self.data, return_index=True) data = self.data.take(numpy.sort(indices)) return self._from_ndarray(data) _A = TypeVar("_A") class BaseArray(numpy.lib.mixins.NDArrayOperatorsMixin, NumPyBackedExtensionArrayMixin): ndim: int = 1 data: numpy.ndarray @classmethod def _from_ndarray(cls: _A, data: numpy.ndarray, copy: bool = False) -> _A: """ Zero-copy construction of a BaseArray from an ndarray. :param data: This should have CelsiusType._record_type dtype :param copy: Whether to copy the data. :return: """ if copy: data = data.copy() new = cls([]) # type: ignore new.data = data return new @property def na_value(self): """ The missing value. **Example:** .. code-block:: >>> BaseArray([]).na_value numpy.nan """ return self.dtype.na_value def take(self, indices, allow_fill: bool = False, fill_value=None): # Can't use pandas' take yet # 1. axis # 2. I don't know how to do the reshaping correctly. indices = numpy.asarray(indices, dtype="int") if allow_fill and fill_value is None: fill_value = self.na_value elif allow_fill and not isinstance(fill_value, tuple): if not numpy.isnan(fill_value): fill_value = int(fill_value) if allow_fill: mask = (indices == -1) if not len(self): if not (indices == -1).all(): msg = "Invalid take for empty array. Must be all -1." raise IndexError(msg) else: # all NA take from and empty array took = ( numpy.full( (len(indices), 2), fill_value, dtype=">u8", ).reshape(-1).astype(self.dtype._record_type) ) return self._from_ndarray(took) if (indices < -1).any(): msg = "Invalid value in 'indicies'. Must be all >= -1 for 'allow_fill=True'" raise ValueError(msg) took = self.data.take(indices) if allow_fill: took[mask] = fill_value return self._from_ndarray(took) def __repr__(self) -> str: formatted = self._format_values() return f"{self.__class__.__name__}({formatted!r})" def isna(self): """ Indicator for whether each element is missing. """ if numpy.isnan(self.na_value): return numpy.isnan(self.data) else: return self.data == self.na_value # From https://github.com/scikit-hep/awkward-array/blob/2bbdb68d7a4fff2eeaed81eb76195e59232e8c13/awkward/array/base.py#L611 def _isstringslice(self, where): if isinstance(where, str): return True elif isinstance(where, bytes): raise TypeError("column selection must be str, not bytes, in Python 3") elif isinstance(where, tuple): return False elif ( isinstance(where, (numpy.ndarray, self.__class__)) and issubclass(where.dtype.type, (numpy.str, numpy.str_)) ): return True elif isinstance(where, (numpy.ndarray, self.__class__)) and issubclass( where.dtype.type, (numpy.object, numpy.object_) ) and not issubclass(where.dtype.type, (numpy.bool, numpy.bool_)): return len(where) > 0 and all(isinstance(x, str) for x in where) elif isinstance(where, (numpy.ndarray, self.__class__)): return False try: assert len(where) > 0 assert all(isinstance(x, str) for x in where) except (TypeError, AssertionError): return False else: return True def __delitem__(self, where): if isinstance(where, str): del self.data[where] elif self._isstringslice(where): for x in where: del self.data[x] else: raise TypeError(f"invalid index for removing column from Table: {where}") @property @abstractmethod def _parser(self): raise NotImplementedError def append(self, value) -> None: """ Append a value to this BaseArray. :param value: """ self.data = numpy.append(self.data, self._parser(value).data) def __setitem__(self, key, value): value = self._parser(value).data self.data[key] = value class _SupportsIndex(Protocol): def __index__(self) -> int: ... _F = TypeVar("_F", bound="UserFloat") @prettify_docstrings class UserFloat(Real): """ Class that simulates a float. :param value: Values to initialise the :class:`~domdf_python_tools.bases.UserFloat` with. .. versionadded:: 1.6.0 """ def __init__(self, value: Union[SupportsFloat, _SupportsIndex, str, bytes, bytearray] = 0.0): self._value = (float(value), ) def as_integer_ratio(self) -> Tuple[int, int]: return float(self).as_integer_ratio() def hex(self) -> str: # noqa: A003 # pylint: disable=redefined-builtin return float(self).hex() def is_integer(self) -> bool: return float(self).is_integer() @classmethod def fromhex(cls: Type[_F], __s: str) -> _F: return cls(float.fromhex(__s)) def __add__(self: _F, other: float) -> _F: return self.__class__(float(self).__add__(other)) def __sub__(self: _F, other: float) -> _F: return self.__class__(float(self).__sub__(other)) def __mul__(self: _F, other: float) -> _F: return self.__class__(float(self).__mul__(other)) def __floordiv__(self: _F, other: float) -> _F: # type: ignore return self.__class__(float(self).__floordiv__(other)) def __truediv__(self: _F, other: float) -> _F: return self.__class__(float(self).__truediv__(other)) def __mod__(self: _F, other: float) -> _F: return self.__class__(float(self).__mod__(other)) def __divmod__(self: _F, other: float) -> Tuple[_F, _F]: return tuple(self.__class__(x) for x in float(self).__divmod__(other)) # type: ignore def __pow__(self: _F, other: float, mod=None) -> _F: return self.__class__(float(self).__pow__(other, mod)) def __radd__(self: _F, other: float) -> _F: return self.__class__(float(self).__radd__(other)) def __rsub__(self: _F, other: float) -> _F: return self.__class__(float(self).__rsub__(other)) def __rmul__(self: _F, other: float) -> _F: return self.__class__(float(self).__rmul__(other)) def __rfloordiv__(self: _F, other: float) -> _F: # type: ignore return self.__class__(float(self).__rfloordiv__(other)) def __rtruediv__(self: _F, other: float) -> _F: return self.__class__(float(self).__rtruediv__(other)) def __rmod__(self: _F, other: float) -> _F: return self.__class__(float(self).__rmod__(other)) def __rdivmod__(self: _F, other: float) -> Tuple[_F, _F]: return tuple(self.__class__(x) for x in float(self).__rdivmod__(other)) # type: ignore def __rpow__(self: _F, other: float, mod=None) -> _F: return self.__class__(float(self).__rpow__(other, mod)) def __getnewargs__(self) -> Tuple[float]: return self._value def __trunc__(self) -> int: return float(self).__trunc__() @overload def __round__(self, ndigits: int) -> float: ... @overload def __round__(self, ndigits: None = ...) -> int: ... def __round__(self, ndigits: Optional[int] = None) -> Union[int, float]: return float(self).__round__(ndigits) def __eq__(self, other: object) -> bool: if isinstance(other, UserFloat): return self._value == other._value else: return float(self).__eq__(other) def __ne__(self, other: object) -> bool: if isinstance(other, UserFloat): return self._value != other._value else: return float(self).__ne__(other) def __lt__(self, other: float) -> bool: if isinstance(other, UserFloat): return self._value < other._value else: return float(self).__lt__(other) def __le__(self, other: float) -> bool: if isinstance(other, UserFloat): return self._value <= other._value else: return float(self).__le__(other) def __gt__(self, other: float) -> bool: if isinstance(other, UserFloat): return self._value > other._value else: return float(self).__gt__(other) def __ge__(self, other: float) -> bool: if isinstance(other, UserFloat): return self._value >= other._value else: return float(self).__ge__(other) def __neg__(self: _F) -> _F: return self.__class__(float(self).__neg__()) def __pos__(self: _F) -> _F: return self.__class__(float(self).__pos__()) def __str__(self) -> str: return str(float(self)) def __int__(self) -> int: return int(float(self)) def __float__(self) -> float: return self._value[0] def __abs__(self: _F) -> _F: return self.__class__(float(self).__abs__()) def __hash__(self) -> int: return float(self).__hash__() def __repr__(self) -> str: return str(self) def __ceil__(self): raise NotImplementedError def __floor__(self): raise NotImplementedError
[ "numpy.unique", "numpy.sort", "numpy.asarray", "numpy.isnan", "numpy.concatenate", "typing.TypeVar" ]
[((5872, 5885), 'typing.TypeVar', 'TypeVar', (['"""_A"""'], {}), "('_A')\n", (5879, 5885), False, 'from typing import Dict, Iterable, List, Optional, Sequence, SupportsFloat, Tuple, Type, TypeVar, Union, overload\n'), ((9643, 9675), 'typing.TypeVar', 'TypeVar', (['"""_F"""'], {'bound': '"""UserFloat"""'}), "('_F', bound='UserFloat')\n", (9650, 9675), False, 'from typing import Dict, Iterable, List, Optional, Sequence, SupportsFloat, Tuple, Type, TypeVar, Union, overload\n'), ((5743, 5785), 'numpy.unique', 'numpy.unique', (['self.data'], {'return_index': '(True)'}), '(self.data, return_index=True)\n', (5755, 5785), False, 'import numpy\n'), ((6745, 6780), 'numpy.asarray', 'numpy.asarray', (['indices'], {'dtype': '"""int"""'}), "(indices, dtype='int')\n", (6758, 6780), False, 'import numpy\n'), ((7868, 7894), 'numpy.isnan', 'numpy.isnan', (['self.na_value'], {}), '(self.na_value)\n', (7879, 7894), False, 'import numpy\n'), ((4728, 4782), 'numpy.concatenate', 'numpy.concatenate', (['[array.data for array in to_concat]'], {}), '([array.data for array in to_concat])\n', (4745, 4782), False, 'import numpy\n'), ((5810, 5829), 'numpy.sort', 'numpy.sort', (['indices'], {}), '(indices)\n', (5820, 5829), False, 'import numpy\n'), ((7906, 7928), 'numpy.isnan', 'numpy.isnan', (['self.data'], {}), '(self.data)\n', (7917, 7928), False, 'import numpy\n'), ((6919, 6942), 'numpy.isnan', 'numpy.isnan', (['fill_value'], {}), '(fill_value)\n', (6930, 6942), False, 'import numpy\n')]
""" view predication for point cloud, Run valid_one_point_cloud first """ import torch import numpy as np import sys import os import pptk # ------ Configurations ------ # path to pth file pth_file = "../tmp/scene0015_00_vh_clean_2.pth.Random.100" show_gt = False # show groundtruth or not; groudtruth draw first, i.e., on back # --- end of configurations --- CLASS_LABELS = ['wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'shower curtain', 'toilet', 'sink', 'bathtub', 'otherfurniture'] # CLASS_COLOR = [ # [138, 43, 226], [0, 128, 128], [0, 255, 0], [0, 0, 255], [255, 255, 0], # [0, 255, 255], [255, 0, 255], [192, 192, 192], [128, 128, 128], [128, 0, 0], # [128, 128, 0], [0, 128, 0], [128, 0, 128], [255, 0, 0], [0, 0, 128], # [34, 139, 34], [64, 224, 208], [0, 0, 0], [75, 0, 130], [205, 133, 63] # ] SCANNET_COLOR_MAP = SCANNET_COLOR_MAP = { 0: (0., 0., 0.), 1: (174., 199., 232.), 2: (152., 223., 138.), 3: (31., 119., 180.), 4: (255., 187., 120.), 5: (188., 189., 34.), 6: (140., 86., 75.), 7: (255., 152., 150.), 8: (214., 39., 40.), 9: (197., 176., 213.), 10: (148., 103., 189.), 11: (196., 156., 148.), 12: (23., 190., 207.), 14: (247., 182., 210.), 15: (66., 188., 102.), 16: (219., 219., 141.), 17: (140., 57., 197.), 18: (202., 185., 52.), 19: (51., 176., 203.), 20: (200., 54., 131.), 21: (92., 193., 61.), 22: (78., 71., 183.), 23: (172., 114., 82.), 24: (255., 127., 14.), 25: (91., 163., 138.), 26: (153., 98., 156.), 27: (140., 153., 101.), 28: (158., 218., 229.), 29: (100., 125., 154.), 30: (178., 127., 135.), 32: (146., 111., 194.), 33: (44., 160., 44.), 34: (112., 128., 144.), 35: (96., 207., 209.), 36: (227., 119., 194.), 37: (213., 92., 176.), 38: (94., 106., 211.), 39: (82., 84., 163.), 40: (100., 85., 144.), } VALID_CLASS_IDS = [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39 ] CLASS_COLOR = [] for valid_id in VALID_CLASS_IDS: CLASS_COLOR.append(SCANNET_COLOR_MAP[valid_id]) CLASS_COLOR = np.array(CLASS_COLOR) / 255.0 def show_predication_result(pth_file, show_gt): data = torch.load(pth_file) coords, colors, labels, pred = data ignore_index = labels == -100 coords = coords[~ignore_index] colors = colors[~ignore_index] labels = labels[~ignore_index] pred = pred[~ignore_index] gt_color = [CLASS_COLOR[x] for x in labels.astype("int32")] pred_color = [CLASS_COLOR[x] for x in pred.astype("int32")] if show_gt: v1 = pptk.viewer(coords, gt_color) v1.set(point_size=0.01, bg_color=[1, 1, 1, 1], floor_color=[1, 1, 1, 1], show_grid=False, show_axis=False, show_info=False) v1.set(theta=1.8, lookat=[0, 0, 0], phi=0.52) v2 = pptk.viewer(coords, pred_color) v2.set(point_size=0.01, bg_color=[1, 1, 1, 1], floor_color=[1, 1, 1, 1], show_grid=False, show_axis=False, show_info=False) v2.set(theta=1.8, lookat=[0, 0, 0], phi=0.52) if __name__ == "__main__": show_predication_result(pth_file, show_gt)
[ "numpy.array", "torch.load", "pptk.viewer" ]
[((2278, 2299), 'numpy.array', 'np.array', (['CLASS_COLOR'], {}), '(CLASS_COLOR)\n', (2286, 2299), True, 'import numpy as np\n'), ((2370, 2390), 'torch.load', 'torch.load', (['pth_file'], {}), '(pth_file)\n', (2380, 2390), False, 'import torch\n'), ((2987, 3018), 'pptk.viewer', 'pptk.viewer', (['coords', 'pred_color'], {}), '(coords, pred_color)\n', (2998, 3018), False, 'import pptk\n'), ((2761, 2790), 'pptk.viewer', 'pptk.viewer', (['coords', 'gt_color'], {}), '(coords, gt_color)\n', (2772, 2790), False, 'import pptk\n')]
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('core', '0008_grow_owner'), ] operations = [ migrations.CreateModel( name='Measurement', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('timestamp', models.DateTimeField(auto_now_add=True)), ('air_temperature', models.IntegerField(null=True, blank=True)), ('water_temperature', models.IntegerField(null=True, blank=True)), ('humidity', models.IntegerField(null=True, blank=True)), ('co2', models.IntegerField(null=True, blank=True)), ('ppm', models.IntegerField(null=True, blank=True)), ('tds', models.IntegerField(null=True, blank=True)), ('ec', models.IntegerField(null=True, blank=True)), ('ph', models.IntegerField(null=True, blank=True)), ('lumen', models.IntegerField(null=True, blank=True)), ('plant', models.ForeignKey(to='core.Plant')), ], ), ]
[ "django.db.models.DateTimeField", "django.db.models.ForeignKey", "django.db.models.AutoField", "django.db.models.IntegerField" ]
[((340, 433), 'django.db.models.AutoField', 'models.AutoField', ([], {'verbose_name': '"""ID"""', 'serialize': '(False)', 'auto_created': '(True)', 'primary_key': '(True)'}), "(verbose_name='ID', serialize=False, auto_created=True,\n primary_key=True)\n", (356, 433), False, 'from django.db import models, migrations\n'), ((462, 501), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (482, 501), False, 'from django.db import models, migrations\n'), ((540, 582), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (559, 582), False, 'from django.db import models, migrations\n'), ((623, 665), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (642, 665), False, 'from django.db import models, migrations\n'), ((697, 739), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (716, 739), False, 'from django.db import models, migrations\n'), ((766, 808), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (785, 808), False, 'from django.db import models, migrations\n'), ((835, 877), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (854, 877), False, 'from django.db import models, migrations\n'), ((904, 946), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (923, 946), False, 'from django.db import models, migrations\n'), ((972, 1014), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (991, 1014), False, 'from django.db import models, migrations\n'), ((1040, 1082), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (1059, 1082), False, 'from django.db import models, migrations\n'), ((1111, 1153), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (1130, 1153), False, 'from django.db import models, migrations\n'), ((1182, 1216), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'to': '"""core.Plant"""'}), "(to='core.Plant')\n", (1199, 1216), False, 'from django.db import models, migrations\n')]
import datetime from django.conf import settings from django.db import models from django.utils import translation import tower from babel import Locale, numbers from jingo import env from jinja2.filters import do_dictsort from tower import ugettext as _ import amo from amo.fields import DecimalCharField from amo.helpers import absolutify, urlparams from amo.utils import get_locale_from_lang, send_mail, send_mail_jinja class ContributionError(Exception): def __init__(self, value): self.value = value def __str__(self): return repr(self.value) class Contribution(amo.models.ModelBase): addon = models.ForeignKey('webapps.Addon', blank=True, null=True) # For in-app purchases this links to the product. inapp_product = models.ForeignKey('inapp.InAppProduct', blank=True, null=True) amount = DecimalCharField(max_digits=9, decimal_places=2, nullify_invalid=True, null=True) currency = models.CharField(max_length=3, choices=do_dictsort(amo.PAYPAL_CURRENCIES), default=amo.CURRENCY_DEFAULT) source = models.CharField(max_length=255, null=True) source_locale = models.CharField(max_length=10, null=True) # This is the external id that you can communicate to the world. uuid = models.CharField(max_length=255, null=True, db_index=True) comment = models.CharField(max_length=255) # This is the internal transaction id between us and a provider, # for example paypal or solitude. transaction_id = models.CharField(max_length=255, null=True, db_index=True) paykey = models.CharField(max_length=255, null=True) # Marketplace specific. # TODO(andym): figure out what to do when we delete the user. user = models.ForeignKey('users.UserProfile', blank=True, null=True) type = models.PositiveIntegerField(default=amo.CONTRIB_TYPE_DEFAULT, choices=do_dictsort(amo.CONTRIB_TYPES)) price_tier = models.ForeignKey('prices.Price', blank=True, null=True, on_delete=models.PROTECT) # If this is a refund or a chargeback, which charge did it relate to. related = models.ForeignKey('self', blank=True, null=True, on_delete=models.PROTECT) class Meta: db_table = 'stats_contributions' def __unicode__(self): return u'App {app}: in-app: {inapp}: {amount}'.format( app=self.addon, amount=self.amount, inapp=self.inapp_product) @property def date(self): try: return datetime.date(self.created.year, self.created.month, self.created.day) except AttributeError: # created may be None return None def _switch_locale(self): if self.source_locale: lang = self.source_locale else: lang = self.addon.default_locale tower.activate(lang) return Locale(translation.to_locale(lang)) def _mail(self, template, subject, context): template = env.get_template(template) body = template.render(context) send_mail(subject, body, settings.MARKETPLACE_EMAIL, [self.user.email], fail_silently=True) def record_failed_refund(self, e, user): self.enqueue_refund(amo.REFUND_FAILED, user, rejection_reason=str(e)) self._switch_locale() self._mail('users/support/emails/refund-failed.txt', # L10n: the addon name. _(u'%s refund failed' % self.addon.name), {'name': self.addon.name}) send_mail_jinja( 'Refund failed', 'purchase/email/refund-failed.txt', {'name': self.user.email, 'error': str(e)}, settings.MARKETPLACE_EMAIL, [str(self.addon.support_email)], fail_silently=True) def mail_approved(self): """The developer has approved a refund.""" locale = self._switch_locale() amt = numbers.format_currency(abs(self.amount), self.currency, locale=locale) self._mail('users/support/emails/refund-approved.txt', # L10n: the adddon name. _(u'%s refund approved' % self.addon.name), {'name': self.addon.name, 'amount': amt}) def mail_declined(self): """The developer has declined a refund.""" self._switch_locale() self._mail('users/support/emails/refund-declined.txt', # L10n: the adddon name. _(u'%s refund declined' % self.addon.name), {'name': self.addon.name}) def enqueue_refund(self, status, user, refund_reason=None, rejection_reason=None): """Keep track of a contribution's refund status.""" from mkt.prices.models import Refund refund, c = Refund.objects.safer_get_or_create(contribution=self, user=user) refund.status = status # Determine which timestamps to update. timestamps = [] if status in (amo.REFUND_PENDING, amo.REFUND_APPROVED_INSTANT, amo.REFUND_FAILED): timestamps.append('requested') if status in (amo.REFUND_APPROVED, amo.REFUND_APPROVED_INSTANT): timestamps.append('approved') elif status == amo.REFUND_DECLINED: timestamps.append('declined') for ts in timestamps: setattr(refund, ts, datetime.datetime.now()) if refund_reason: refund.refund_reason = refund_reason if rejection_reason: refund.rejection_reason = rejection_reason refund.save() return refund def get_amount_locale(self, locale=None): """Localise the amount paid into the current locale.""" if not locale: lang = translation.get_language() locale = get_locale_from_lang(lang) return numbers.format_currency(self.amount or 0, self.currency or 'USD', locale=locale) def get_refund_url(self): return urlparams(self.addon.get_dev_url('issue_refund'), transaction_id=self.transaction_id) def get_absolute_refund_url(self): return absolutify(self.get_refund_url()) def get_refund_contribs(self): """Get related set of refund contributions.""" return Contribution.objects.filter( related=self, type=amo.CONTRIB_REFUND).order_by('-modified') def is_refunded(self): """ If related has been set, then this transaction has been refunded or charged back. This is a bit expensive, so refrain from using on listing pages. """ return (Contribution.objects.filter(related=self, type__in=[amo.CONTRIB_REFUND, amo.CONTRIB_CHARGEBACK]) .exists())
[ "amo.utils.get_locale_from_lang", "django.db.models.ForeignKey", "babel.numbers.format_currency", "django.utils.translation.to_locale", "jingo.env.get_template", "datetime.datetime.now", "jinja2.filters.do_dictsort", "tower.ugettext", "mkt.prices.models.Refund.objects.safer_get_or_create", "datetime.date", "django.utils.translation.get_language", "tower.activate", "amo.fields.DecimalCharField", "amo.utils.send_mail", "django.db.models.CharField" ]
[((635, 692), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""webapps.Addon"""'], {'blank': '(True)', 'null': '(True)'}), "('webapps.Addon', blank=True, null=True)\n", (652, 692), False, 'from django.db import models\n'), ((767, 829), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""inapp.InAppProduct"""'], {'blank': '(True)', 'null': '(True)'}), "('inapp.InAppProduct', blank=True, null=True)\n", (784, 829), False, 'from django.db import models\n'), ((881, 967), 'amo.fields.DecimalCharField', 'DecimalCharField', ([], {'max_digits': '(9)', 'decimal_places': '(2)', 'nullify_invalid': '(True)', 'null': '(True)'}), '(max_digits=9, decimal_places=2, nullify_invalid=True, null\n =True)\n', (897, 967), False, 'from amo.fields import DecimalCharField\n'), ((1190, 1233), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'null': '(True)'}), '(max_length=255, null=True)\n', (1206, 1233), False, 'from django.db import models\n'), ((1254, 1296), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)', 'null': '(True)'}), '(max_length=10, null=True)\n', (1270, 1296), False, 'from django.db import models\n'), ((1377, 1435), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'null': '(True)', 'db_index': '(True)'}), '(max_length=255, null=True, db_index=True)\n', (1393, 1435), False, 'from django.db import models\n'), ((1450, 1482), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (1466, 1482), False, 'from django.db import models\n'), ((1611, 1669), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'null': '(True)', 'db_index': '(True)'}), '(max_length=255, null=True, db_index=True)\n', (1627, 1669), False, 'from django.db import models\n'), ((1683, 1726), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'null': '(True)'}), '(max_length=255, null=True)\n', (1699, 1726), False, 'from django.db import models\n'), ((1833, 1894), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""users.UserProfile"""'], {'blank': '(True)', 'null': '(True)'}), "('users.UserProfile', blank=True, null=True)\n", (1850, 1894), False, 'from django.db import models\n'), ((2064, 2151), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""prices.Price"""'], {'blank': '(True)', 'null': '(True)', 'on_delete': 'models.PROTECT'}), "('prices.Price', blank=True, null=True, on_delete=models.\n PROTECT)\n", (2081, 2151), False, 'from django.db import models\n'), ((2270, 2344), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""self"""'], {'blank': '(True)', 'null': '(True)', 'on_delete': 'models.PROTECT'}), "('self', blank=True, null=True, on_delete=models.PROTECT)\n", (2287, 2344), False, 'from django.db import models\n'), ((3027, 3047), 'tower.activate', 'tower.activate', (['lang'], {}), '(lang)\n', (3041, 3047), False, 'import tower\n'), ((3168, 3194), 'jingo.env.get_template', 'env.get_template', (['template'], {}), '(template)\n', (3184, 3194), False, 'from jingo import env\n'), ((3243, 3338), 'amo.utils.send_mail', 'send_mail', (['subject', 'body', 'settings.MARKETPLACE_EMAIL', '[self.user.email]'], {'fail_silently': '(True)'}), '(subject, body, settings.MARKETPLACE_EMAIL, [self.user.email],\n fail_silently=True)\n', (3252, 3338), False, 'from amo.utils import get_locale_from_lang, send_mail, send_mail_jinja\n'), ((5048, 5112), 'mkt.prices.models.Refund.objects.safer_get_or_create', 'Refund.objects.safer_get_or_create', ([], {'contribution': 'self', 'user': 'user'}), '(contribution=self, user=user)\n', (5082, 5112), False, 'from mkt.prices.models import Refund\n'), ((6163, 6248), 'babel.numbers.format_currency', 'numbers.format_currency', (['(self.amount or 0)', "(self.currency or 'USD')"], {'locale': 'locale'}), "(self.amount or 0, self.currency or 'USD', locale=locale\n )\n", (6186, 6248), False, 'from babel import Locale, numbers\n'), ((1079, 1113), 'jinja2.filters.do_dictsort', 'do_dictsort', (['amo.PAYPAL_CURRENCIES'], {}), '(amo.PAYPAL_CURRENCIES)\n', (1090, 1113), False, 'from jinja2.filters import do_dictsort\n'), ((2015, 2045), 'jinja2.filters.do_dictsort', 'do_dictsort', (['amo.CONTRIB_TYPES'], {}), '(amo.CONTRIB_TYPES)\n', (2026, 2045), False, 'from jinja2.filters import do_dictsort\n'), ((2667, 2737), 'datetime.date', 'datetime.date', (['self.created.year', 'self.created.month', 'self.created.day'], {}), '(self.created.year, self.created.month, self.created.day)\n', (2680, 2737), False, 'import datetime\n'), ((3070, 3097), 'django.utils.translation.to_locale', 'translation.to_locale', (['lang'], {}), '(lang)\n', (3091, 3097), False, 'from django.utils import translation\n'), ((3658, 3698), 'tower.ugettext', '_', (["(u'%s refund failed' % self.addon.name)"], {}), "(u'%s refund failed' % self.addon.name)\n", (3659, 3698), True, 'from tower import ugettext as _\n'), ((4380, 4422), 'tower.ugettext', '_', (["(u'%s refund approved' % self.addon.name)"], {}), "(u'%s refund approved' % self.addon.name)\n", (4381, 4422), True, 'from tower import ugettext as _\n'), ((4722, 4764), 'tower.ugettext', '_', (["(u'%s refund declined' % self.addon.name)"], {}), "(u'%s refund declined' % self.addon.name)\n", (4723, 4764), True, 'from tower import ugettext as _\n'), ((6073, 6099), 'django.utils.translation.get_language', 'translation.get_language', ([], {}), '()\n', (6097, 6099), False, 'from django.utils import translation\n'), ((6121, 6147), 'amo.utils.get_locale_from_lang', 'get_locale_from_lang', (['lang'], {}), '(lang)\n', (6141, 6147), False, 'from amo.utils import get_locale_from_lang, send_mail, send_mail_jinja\n'), ((5691, 5714), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5712, 5714), False, 'import datetime\n')]
import datetime from pymongo import MongoClient import pymongo import pprint try: db = MongoClient("mongodb://localhost:27017")["hkust"] f=0.05 try: print("Querying Documents...") listOfCourseWithWaitingListSize = db.course.aggregate([ { "$unwind": "$sections" }, # { "$project": { "newProduct": {"$multiply": [f, "$sections.enrol"]}, "satisfied": satisfied} }, # { "$project": { "compareResult": {"$gte": ["$sections.wait", "$newProduct"]}, "match_ts" : "$sections.recordTime"} }, {"$match": #filter timeslot {"$and":[ # {"compareResult": "true"}, # {"satisfied" : "Yes"}, #{"sections.sectionId": {"$ne": null}}, #{"sections.sectionId": {"$exists": true}}, # {"sections.sectionId": {"$regex": '^L'}}, {"sections.recordTime": {"$gte": datetime.datetime.strptime("2018-01-26T14:00Z", "%Y-%m-%dT%H:%MZ")}}, {"sections.recordTime": {"$lte": datetime.datetime.strptime("2018-02-01T11:30Z", "%Y-%m-%dT%H:%MZ")}} ] } }, { "$project": {"code": 1, "title": 1, "credits": 1, "sections":1, # "description":1, "satisfied":{"$gte":["$sections.wait",{"$multiply":["$sections.enrol",float(f)]}]}, "lecSatisfied":{ "$cond":[{ "$and":[ { "$gte":["$sections.wait",{"$multiply":["$sections.enrol",float(f)]}] }, { "$eq":[{"$substr": ["$sections.sectionId",0,1]},"L"] } ] },1,0] } }, }, { "$sort": {"sections.sectionId": 1 } }, { "$group":{ "_id":{ "code": "$code", "recordTime":"$sections.recordTime"}, "code": {"$last": "$code"}, "title": {"$last": "$title"}, "credits": {"$last": "$credits"}, "recordTime":{"$last": "$sections.recordTime"}, "sections":{ "$push": { "sectionId":"$sections.sectionId", "dateAndTime":"$sections.offerings.dateAndTime", "quota":"$sections.quota", "enrol":"$sections.enrol", "avail": { "$subtract": [ "$sections.quota", "$sections.enrol"] } , "wait":"$sections.wait", "satisfied":"$satisfied", } }, "lecSatisfiedCount":{"$sum":"$lecSatisfied"} } }, { "$match": {"lecSatisfiedCount": {"$gt":0}} }, { "$sort": {"recordTime": 1 } }, { "$group":{ "_id":{ "code": "$code"}, "code": {"$last": "$code"}, "title": {"$last": "$title"}, "credits": {"$last": "$credits"}, "recordTime":{"$last": "$recordTime"}, "sections":{"$last": "$sections"}, "lecSatisfiedCount":{"$last": "$lecSatisfiedCount"} } }, { "$project":{ "_id":0, "code": 1, "title":1, "credits": 1, "recordTime":1, "sections":1 } } ] ) # pprint.pprint(listOfCourseWithWaitingListSize) recordNo = 0 for oneCourse in listOfCourseWithWaitingListSize: recordNo = recordNo + 1 print("Record {:d}:".format(recordNo)) pprint.pprint(oneCourse) # print("code: {:s}\ntitle: {:s}\ncredits: {:0.2f}\nquota: {:d}\nenrol: {:d}\navail: {:d}\nwait: {:d}".format(oneCourse["code"], oneCourse["title"], oneCourse["credits"],oneCourse["sections"][0]["quota"],oneCourse["sections"][0]["enrol"],oneCourse["sections"][0]["avail"],oneCourse["sections"][0]["wait"])) # for oneSection in oneCourse["sections"]: # print("sections: {:s}, Date & Time: {:s}".format(oneSection["sectionId"],' '.join(oneSection["dateAndTime"]))) # print("description: {:s}".format(oneCourse["description"])) #pprint(" Record {:d}: (sid={:s}, sname={:s}, byear={:d})".format(recordNo, oneStudent["sid"], oneStudent["sname"], oneStudent["byear"])) #print("Record {:d}: (course={:s})".format(recordNo, oneCourse)) except pymongo.errors.ConnectionFailure as error: print("Document Querying Failed! Error Message: \"{}\"".format(error)) #return outputCourseDetails(courseCode, lectureSection, satisfied) except pymongo.errors.ConnectionFailure as error: print("Document Insertion Failed! Error Message: \"{}\"".format(error)) import numpy import time from keras.models import Sequential from keras.layers import Dense from keras.models import model_from_json import numpy #Model 1 def trainModel(trainingDataFilename): # to set a seed of a random number generator used in the "optimization" tool in the neural network model numpy.random.seed(time.time()) # Step 1: to load the data # Step 1a: to read the dataset with "numpy" function dataset = numpy.loadtxt(trainingDataFilename, delimiter=",") # Step 1b: to split the dataset into two datasets, namely the input attribute dataset (X) and the target attribute dataset (Y) X = dataset[:,0:4] Y = dataset[:,4] # Step 2: to define the model model = Sequential() model.add(Dense(13, input_dim=4, activation='relu')) model.add(Dense(7, activation='relu')) model.add(Dense(1, activation='sigmoid')) # Step 3: to compile the model model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"]) # Step 4: To fit the model model.fit(X, Y, validation_split=0.2, epochs=150, batch_size=10) # Step 5: To evaluate the model scores = model.evaluate(X, Y) print("Evaluation: ") print("{}: {}".format(model.metrics_names[1], scores[1]*100)) return model # model 2: def trainModel2(trainingDataFilename): numpy.random.seed(time.time()) dataset = numpy.loadtxt(trainingDataFilename, delimiter=",") X = dataset[:,0:4] Y = dataset[:,4] # Step 2: to define the model model = Sequential() model.add(Dense(10, input_dim=4, activation='relu')) model.add(Dense(10, activation='relu')) model.add(Dense(10, activation='relu')) model.add(Dense(10, activation='relu')) model.add(Dense(10, activation='relu')) model.add(Dense(10, activation='relu')) model.add(Dense(1, activation='sigmoid')) # Step 3: to compile the model model.compile(loss='mean_squared_error', optimizer='sgd', metrics=["accuracy"]) # Step 4: To fit the model model.fit(X, Y, validation_split=0.2, epochs=150, batch_size=10) scores = model.evaluate(X, Y) print("Evaluation: ") print("{}: {}".format(model.metrics_names[1], scores[1]*100)) return model # model 3: def trainModel3(trainingDataFilename): numpy.random.seed(time.time()) dataset = numpy.loadtxt(trainingDataFilename, delimiter=",") X = dataset[:,0:4] Y = dataset[:,4] # Step 2: to define the model model = Sequential() model.add(Dense(64, input_dim=4, activation='softmax')) model.add(Dense(1, activation='sigmoid')) # Step 3: to compile the model model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=["accuracy"]) # Step 4: To fit the model model.fit(X, Y, validation_split=0.2, epochs=150, batch_size=10) scores = model.evaluate(X, Y) print("Evaluation: ") print("{}: {}".format(model.metrics_names[1], scores[1]*100)) return model # model 4: def trainModel4(trainingDataFilename): numpy.random.seed(time.time()) dataset = numpy.loadtxt(trainingDataFilename, delimiter=",") X = dataset[:,0:4] Y = dataset[:,4] # Step 2: to define the model model = Sequential() model.add(Dense(13, input_dim=4, activation='softmax')) model.add(Dense(7, activation='softmax')) model.add(Dense(1, activation='sigmoid')) # Step 3: to compile the model model.compile(loss='logcosh', optimizer='rmsprop', metrics=["accuracy"]) # Step 4: To fit the model model.fit(X, Y, validation_split=0.3, epochs=300, batch_size=7) scores = model.evaluate(X, Y) print("Evaluation: ") print("{}: {}".format(model.metrics_names[1], scores[1]*100)) return model # model 5: def trainModel5(trainingDataFilename): def trainModel5_beforeAddDrop(trainingDataFile_beforeAddDrop): numpy.random.seed(time.time()) dataset = numpy.loadtxt(trainingDataFile_beforeAddDrop, delimiter=",") X = dataset[:,0:4] Y = dataset[:,4] # Step 2: to define the model model = Sequential() model.add(Dense(13, input_dim=4, activation='relu')) model.add(Dense(7, activation='relu')) model.add(Dense(1, activation='sigmoid')) # Step 3: to compile the model model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"]) # Step 4: To fit the model model.fit(X, Y, validation_split=0.2, epochs=150, batch_size=10) scores = model.evaluate(X, Y) print("Evaluation: ") print("{}: {}".format(model.metrics_names[1], scores[1]*100)) return model def trainModel5_afterAddDrop(trainingDataFile_afterAddDrop): numpy.random.seed(time.time()) dataset = numpy.loadtxt(trainingDataFile_afterAddDrop, delimiter=",") X = dataset[:,0:4] Y = dataset[:,4] # Step 2: to define the model model = Sequential() model.add(Dense(13, input_dim=4, activation='relu')) model.add(Dense(7, activation='relu')) model.add(Dense(1, activation='sigmoid')) # Step 3: to compile the model model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"]) # Step 4: To fit the model model.fit(X, Y, validation_split=0.2, epochs=150, batch_size=10) scores = model.evaluate(X, Y) print("Evaluation: ") print("{}: {}".format(model.metrics_names[1], scores[1]*100)) return model
[ "datetime.datetime.strptime", "keras.models.Sequential", "keras.layers.Dense", "pymongo.MongoClient", "numpy.loadtxt", "time.time", "pprint.pprint" ]
[((4443, 4493), 'numpy.loadtxt', 'numpy.loadtxt', (['trainingDataFilename'], {'delimiter': '""","""'}), "(trainingDataFilename, delimiter=',')\n", (4456, 4493), False, 'import numpy\n'), ((4702, 4714), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (4712, 4714), False, 'from keras.models import Sequential\n'), ((5321, 5371), 'numpy.loadtxt', 'numpy.loadtxt', (['trainingDataFilename'], {'delimiter': '""","""'}), "(trainingDataFilename, delimiter=',')\n", (5334, 5371), False, 'import numpy\n'), ((5450, 5462), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (5460, 5462), False, 'from keras.models import Sequential\n'), ((6199, 6249), 'numpy.loadtxt', 'numpy.loadtxt', (['trainingDataFilename'], {'delimiter': '""","""'}), "(trainingDataFilename, delimiter=',')\n", (6212, 6249), False, 'import numpy\n'), ((6328, 6340), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (6338, 6340), False, 'from keras.models import Sequential\n'), ((6882, 6932), 'numpy.loadtxt', 'numpy.loadtxt', (['trainingDataFilename'], {'delimiter': '""","""'}), "(trainingDataFilename, delimiter=',')\n", (6895, 6932), False, 'import numpy\n'), ((7011, 7023), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (7021, 7023), False, 'from keras.models import Sequential\n'), ((88, 128), 'pymongo.MongoClient', 'MongoClient', (['"""mongodb://localhost:27017"""'], {}), "('mongodb://localhost:27017')\n", (99, 128), False, 'from pymongo import MongoClient\n'), ((4335, 4346), 'time.time', 'time.time', ([], {}), '()\n', (4344, 4346), False, 'import time\n'), ((4726, 4767), 'keras.layers.Dense', 'Dense', (['(13)'], {'input_dim': '(4)', 'activation': '"""relu"""'}), "(13, input_dim=4, activation='relu')\n", (4731, 4767), False, 'from keras.layers import Dense\n'), ((4780, 4807), 'keras.layers.Dense', 'Dense', (['(7)'], {'activation': '"""relu"""'}), "(7, activation='relu')\n", (4785, 4807), False, 'from keras.layers import Dense\n'), ((4820, 4850), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (4825, 4850), False, 'from keras.layers import Dense\n'), ((5297, 5308), 'time.time', 'time.time', ([], {}), '()\n', (5306, 5308), False, 'import time\n'), ((5474, 5515), 'keras.layers.Dense', 'Dense', (['(10)'], {'input_dim': '(4)', 'activation': '"""relu"""'}), "(10, input_dim=4, activation='relu')\n", (5479, 5515), False, 'from keras.layers import Dense\n'), ((5528, 5556), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""relu"""'}), "(10, activation='relu')\n", (5533, 5556), False, 'from keras.layers import Dense\n'), ((5569, 5597), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""relu"""'}), "(10, activation='relu')\n", (5574, 5597), False, 'from keras.layers import Dense\n'), ((5610, 5638), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""relu"""'}), "(10, activation='relu')\n", (5615, 5638), False, 'from keras.layers import Dense\n'), ((5651, 5679), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""relu"""'}), "(10, activation='relu')\n", (5656, 5679), False, 'from keras.layers import Dense\n'), ((5692, 5720), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""relu"""'}), "(10, activation='relu')\n", (5697, 5720), False, 'from keras.layers import Dense\n'), ((5733, 5763), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (5738, 5763), False, 'from keras.layers import Dense\n'), ((6175, 6186), 'time.time', 'time.time', ([], {}), '()\n', (6184, 6186), False, 'import time\n'), ((6352, 6396), 'keras.layers.Dense', 'Dense', (['(64)'], {'input_dim': '(4)', 'activation': '"""softmax"""'}), "(64, input_dim=4, activation='softmax')\n", (6357, 6396), False, 'from keras.layers import Dense\n'), ((6409, 6439), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (6414, 6439), False, 'from keras.layers import Dense\n'), ((6858, 6869), 'time.time', 'time.time', ([], {}), '()\n', (6867, 6869), False, 'import time\n'), ((7035, 7079), 'keras.layers.Dense', 'Dense', (['(13)'], {'input_dim': '(4)', 'activation': '"""softmax"""'}), "(13, input_dim=4, activation='softmax')\n", (7040, 7079), False, 'from keras.layers import Dense\n'), ((7092, 7122), 'keras.layers.Dense', 'Dense', (['(7)'], {'activation': '"""softmax"""'}), "(7, activation='softmax')\n", (7097, 7122), False, 'from keras.layers import Dense\n'), ((7135, 7165), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (7140, 7165), False, 'from keras.layers import Dense\n'), ((7656, 7716), 'numpy.loadtxt', 'numpy.loadtxt', (['trainingDataFile_beforeAddDrop'], {'delimiter': '""","""'}), "(trainingDataFile_beforeAddDrop, delimiter=',')\n", (7669, 7716), False, 'import numpy\n'), ((7799, 7811), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (7809, 7811), False, 'from keras.models import Sequential\n'), ((8407, 8466), 'numpy.loadtxt', 'numpy.loadtxt', (['trainingDataFile_afterAddDrop'], {'delimiter': '""","""'}), "(trainingDataFile_afterAddDrop, delimiter=',')\n", (8420, 8466), False, 'import numpy\n'), ((8549, 8561), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (8559, 8561), False, 'from keras.models import Sequential\n'), ((2914, 2938), 'pprint.pprint', 'pprint.pprint', (['oneCourse'], {}), '(oneCourse)\n', (2927, 2938), False, 'import pprint\n'), ((7631, 7642), 'time.time', 'time.time', ([], {}), '()\n', (7640, 7642), False, 'import time\n'), ((7824, 7865), 'keras.layers.Dense', 'Dense', (['(13)'], {'input_dim': '(4)', 'activation': '"""relu"""'}), "(13, input_dim=4, activation='relu')\n", (7829, 7865), False, 'from keras.layers import Dense\n'), ((7879, 7906), 'keras.layers.Dense', 'Dense', (['(7)'], {'activation': '"""relu"""'}), "(7, activation='relu')\n", (7884, 7906), False, 'from keras.layers import Dense\n'), ((7920, 7950), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (7925, 7950), False, 'from keras.layers import Dense\n'), ((8382, 8393), 'time.time', 'time.time', ([], {}), '()\n', (8391, 8393), False, 'import time\n'), ((8574, 8615), 'keras.layers.Dense', 'Dense', (['(13)'], {'input_dim': '(4)', 'activation': '"""relu"""'}), "(13, input_dim=4, activation='relu')\n", (8579, 8615), False, 'from keras.layers import Dense\n'), ((8629, 8656), 'keras.layers.Dense', 'Dense', (['(7)'], {'activation': '"""relu"""'}), "(7, activation='relu')\n", (8634, 8656), False, 'from keras.layers import Dense\n'), ((8670, 8700), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (8675, 8700), False, 'from keras.layers import Dense\n'), ((792, 858), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['"""2018-01-26T14:00Z"""', '"""%Y-%m-%dT%H:%MZ"""'], {}), "('2018-01-26T14:00Z', '%Y-%m-%dT%H:%MZ')\n", (818, 858), False, 'import datetime\n'), ((900, 966), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['"""2018-02-01T11:30Z"""', '"""%Y-%m-%dT%H:%MZ"""'], {}), "('2018-02-01T11:30Z', '%Y-%m-%dT%H:%MZ')\n", (926, 966), False, 'import datetime\n')]
from flask import Flask from flask_sqlalchemy import SQLAlchemy db = SQLAlchemy() def create_all_tables(): db.create_all() def initialize_db(app: Flask): db.init_app(app) db.app = app from investing_algorithm_framework.core.models.order_status import OrderStatus from investing_algorithm_framework.core.models.order_type import OrderType from investing_algorithm_framework.core.models.order_side import OrderSide from investing_algorithm_framework.core.models.time_unit import TimeUnit from investing_algorithm_framework.core.models.order import Order from investing_algorithm_framework.core.models.portfolio import Portfolio from investing_algorithm_framework.core.models.position import Position __all__ = [ "db", "Portfolio", "Position", 'Order', "OrderType", 'OrderSide', "TimeUnit", "create_all_tables", "initialize_db", "OrderStatus" ]
[ "flask_sqlalchemy.SQLAlchemy" ]
[((70, 82), 'flask_sqlalchemy.SQLAlchemy', 'SQLAlchemy', ([], {}), '()\n', (80, 82), False, 'from flask_sqlalchemy import SQLAlchemy\n')]
from sqlalchemy import Column, Integer from sqlalchemy import ForeignKey from sqlalchemy.orm import declarative_base from .base import Base class RelSaleSizeProject(Base): __tablename__ = 'rel_salesizes_projects' id = Column(Integer, primary_key=True) project_id = Column(Integer, ForeignKey('projects.id')) salesize_id = Column(Integer, ForeignKey('salesizes.id'))
[ "sqlalchemy.ForeignKey", "sqlalchemy.Column" ]
[((228, 261), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (234, 261), False, 'from sqlalchemy import Column, Integer\n'), ((295, 320), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""projects.id"""'], {}), "('projects.id')\n", (305, 320), False, 'from sqlalchemy import ForeignKey\n'), ((356, 382), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""salesizes.id"""'], {}), "('salesizes.id')\n", (366, 382), False, 'from sqlalchemy import ForeignKey\n')]
""" jinja2content.py ---------------- DONT EDIT THIS FILE Pelican plugin that processes Markdown files as jinja templates. """ from jinja2 import Environment, FileSystemLoader, ChoiceLoader import os from pelican import signals from pelican.readers import MarkdownReader, HTMLReader, RstReader from pelican.utils import pelican_open from tempfile import NamedTemporaryFile class JinjaContentMixin: def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # will look first in 'JINJA2CONTENT_TEMPLATES', by default the # content root path, then in the theme's templates local_dirs = self.settings.get('JINJA2CONTENT_TEMPLATES', ['.']) local_dirs = [os.path.join(self.settings['PATH'], folder) for folder in local_dirs] theme_dir = os.path.join(self.settings['THEME'], 'templates') loaders = [FileSystemLoader(_dir) for _dir in local_dirs + [theme_dir]] if 'JINJA_ENVIRONMENT' in self.settings: # pelican 3.7 jinja_environment = self.settings['JINJA_ENVIRONMENT'] else: jinja_environment = { 'trim_blocks': True, 'lstrip_blocks': True, 'extensions': self.settings['JINJA_EXTENSIONS'] } self.env = Environment( loader=ChoiceLoader(loaders), **jinja_environment) def read(self, source_path): with pelican_open(source_path) as text: text = self.env.from_string(text).render() with NamedTemporaryFile(delete=False) as f: f.write(text.encode()) f.close() content, metadata = super().read(f.name) os.unlink(f.name) return content, metadata class JinjaMarkdownReader(JinjaContentMixin, MarkdownReader): pass class JinjaRstReader(JinjaContentMixin, RstReader): pass class JinjaHTMLReader(JinjaContentMixin, HTMLReader): pass def add_reader(readers): for Reader in [JinjaMarkdownReader, JinjaRstReader, JinjaHTMLReader]: for ext in Reader.file_extensions: readers.reader_classes[ext] = Reader def register(): signals.readers_init.connect(add_reader)
[ "pelican.utils.pelican_open", "pelican.signals.readers_init.connect", "os.path.join", "jinja2.ChoiceLoader", "os.unlink", "tempfile.NamedTemporaryFile", "jinja2.FileSystemLoader" ]
[((2193, 2233), 'pelican.signals.readers_init.connect', 'signals.readers_init.connect', (['add_reader'], {}), '(add_reader)\n', (2221, 2233), False, 'from pelican import signals\n'), ((824, 873), 'os.path.join', 'os.path.join', (["self.settings['THEME']", '"""templates"""'], {}), "(self.settings['THEME'], 'templates')\n", (836, 873), False, 'import os\n'), ((712, 755), 'os.path.join', 'os.path.join', (["self.settings['PATH']", 'folder'], {}), "(self.settings['PATH'], folder)\n", (724, 755), False, 'import os\n'), ((894, 916), 'jinja2.FileSystemLoader', 'FileSystemLoader', (['_dir'], {}), '(_dir)\n', (910, 916), False, 'from jinja2 import Environment, FileSystemLoader, ChoiceLoader\n'), ((1461, 1486), 'pelican.utils.pelican_open', 'pelican_open', (['source_path'], {}), '(source_path)\n', (1473, 1486), False, 'from pelican.utils import pelican_open\n'), ((1565, 1597), 'tempfile.NamedTemporaryFile', 'NamedTemporaryFile', ([], {'delete': '(False)'}), '(delete=False)\n', (1583, 1597), False, 'from tempfile import NamedTemporaryFile\n'), ((1726, 1743), 'os.unlink', 'os.unlink', (['f.name'], {}), '(f.name)\n', (1735, 1743), False, 'import os\n'), ((1357, 1378), 'jinja2.ChoiceLoader', 'ChoiceLoader', (['loaders'], {}), '(loaders)\n', (1369, 1378), False, 'from jinja2 import Environment, FileSystemLoader, ChoiceLoader\n')]
from blacklist import BLACKLIST from flask import Flask, jsonify from flask_restful import Api from resources.hotel import Hoteis, Hotel from resources.user import User, UserLogin, UserLogout, UserRegister, Users from resources.site import Site, Sites from flask_jwt_extended import JWTManager app = Flask(__name__) app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///database.db' app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False app.config['JWT_SECRET_KEY'] = 'Jbs8aGbbAyt7iMa878Pnsj' app.config['JWT_BLACKLIST_ENABLED'] = True api = Api(app) jwt = JWTManager(app) @app.before_first_request def create_db(): db.create_all() @jwt.token_in_blacklist_loader def verify_block_list(token): return token['jti'] in BLACKLIST @jwt.revoked_token_loader def revoked_access_token(): return jsonify({'message': "You have been logged out."}), 401 # Unautorized # Hotels resource api.add_resource(Hoteis, '/hoteis') api.add_resource(Hotel, '/hoteis/<string:hotel_id>') # Users resource api.add_resource(Users, '/users') api.add_resource(User, '/users/<string:user_id>') # User register resource api.add_resource(UserRegister, '/register') # Login resource api.add_resource(UserLogin, '/login') # Logout resource api.add_resource(UserLogout, '/logout') # Sites resource api.add_resource(Sites, '/sites') api.add_resource(Site, '/sites/<string:site_url>') if __name__ == '__main__': from database.sql_alchemy import db db.init_app(app) app.run(debug=True)
[ "flask_jwt_extended.JWTManager", "flask_restful.Api", "flask.Flask", "database.sql_alchemy.db.init_app", "database.sql_alchemy.db.create_all", "flask.jsonify" ]
[((302, 317), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (307, 317), False, 'from flask import Flask, jsonify\n'), ((542, 550), 'flask_restful.Api', 'Api', (['app'], {}), '(app)\n', (545, 550), False, 'from flask_restful import Api\n'), ((557, 572), 'flask_jwt_extended.JWTManager', 'JWTManager', (['app'], {}), '(app)\n', (567, 572), False, 'from flask_jwt_extended import JWTManager\n'), ((621, 636), 'database.sql_alchemy.db.create_all', 'db.create_all', ([], {}), '()\n', (634, 636), False, 'from database.sql_alchemy import db\n'), ((1442, 1458), 'database.sql_alchemy.db.init_app', 'db.init_app', (['app'], {}), '(app)\n', (1453, 1458), False, 'from database.sql_alchemy import db\n'), ((802, 851), 'flask.jsonify', 'jsonify', (["{'message': 'You have been logged out.'}"], {}), "({'message': 'You have been logged out.'})\n", (809, 851), False, 'from flask import Flask, jsonify\n')]
import struct import pycom import time from network import LoRa def blink(seconds, rgb): pycom.rgbled(rgb) time.sleep(seconds) pycom.rgbled(0x000000) # off def setUSFrequencyPlan(lora): """ Sets the frequency plan that matches the TTN gateway in the USA """ # remove all US915 channels for channel in range(0, 72): lora.remove_channel(channel) # set all channels to the same frequency (must be before sending the OTAA join request) ttn_start_frequency = 903900000 ttn_step_frequency = 200000 ttn_ch8_frequency = 904600000 # Set up first 8 US915 TTN uplink channels for channel in range(0, 9): if (channel == 8): channel_frequency = ttn_ch8_frequency # DR3 = SF8/500kHz channel_dr_min = 4 channel_dr_max = 4 else: channel_frequency = ttn_start_frequency + \ (channel * ttn_step_frequency) # DR0 = SF10/125kHz channel_dr_min = 0 # DR3 = SF7/125kHz channel_dr_max = 3 lora.add_channel(channel, frequency=channel_frequency, dr_min=channel_dr_min, dr_max=channel_dr_max) print("Added channel", channel, channel_frequency, channel_dr_min, channel_dr_max) def join(app_eui, app_key, useADR): """ Join the Lorawan network using OTAA. new lora session is returned """ # Set the power to 20db for US915 # You can also set the default dr value but I found that was problematic # You need to turn on adr (auto data rate) at this point if it is to be used # only use adr for static devices (Not moving) # see https://lora-developers.semtech.com/library/tech-papers-and-guides/understanding-adr/ lora = LoRa(mode=LoRa.LORAWAN, region=LoRa.US915, adr=useADR, tx_power=20) setUSFrequencyPlan(lora) print('Joining', end='') lora.join(activation=LoRa.OTAA, auth=(app_eui, app_key), timeout=0) # wait until the module has joined the network while not lora.has_joined(): time.sleep(2.5) blink(.5, 0xff8f00) # dark orange print('.', end='') print('') print('Joined') blink(2, 0x006400) # dark green return lora def send(lora, socket, port, payload, useADR): """ send data to the lorawan gateway on selected port """ blink(.5, 0x00008b) # dark blue socket.setblocking(True) socket.bind(port) print("Sending data:", payload.pack(), " Size:", payload.calcsize()) socket.send(payload.pack()) # Give send a extra second to be returned before switching # the socket blocking mode (May not need this) time.sleep(1) socket.setblocking(False) lora.nvram_save() class gps_payload: """ Class for managing the GPS payload data that is transmitted to the lorawan service update the class properties and struct definition for the particular use case """ longitude = 0 latitude = 0 pack_format = "ff" def __init__(self, longitude, latitude): self.longitude = longitude # Float self.latitude = latitude # Float # see format options here https://docs.python.org/2/library/struct.html#format-characters # Noter: use single precision float f for GPS Lng/Lat to get locations down to a meter def pack(self): return struct.pack(self.pack_format, self.longitude, self.latitude) def calcsize(self): return struct.calcsize(self.pack_format) class sensor_payload: """ Class for managing the sensor payload data that is transmitted to the lorawan service update the class properties and struct definition for the particular use case """ celsius = 0 humidity = 0 waterlevel = 0 voltage = 0 pack_format = "bBBB" def __init__(self, celsius, humidity, waterlevel, voltage): self.celsius = celsius # In +/- celsius self.humidity = humidity # In percentage self.waterlevel = waterlevel # in centimeters self.voltage = voltage # In tenths of a volt # see format options here https://docs.python.org/2/library/struct.html#format-characters def pack(self): return struct.pack(self.pack_format, self.celsius, self.humidity, self.waterlevel, self.voltage) def calcsize(self): return struct.calcsize(self.pack_format)
[ "struct.calcsize", "time.sleep", "struct.pack", "pycom.rgbled", "network.LoRa" ]
[((95, 112), 'pycom.rgbled', 'pycom.rgbled', (['rgb'], {}), '(rgb)\n', (107, 112), False, 'import pycom\n'), ((117, 136), 'time.sleep', 'time.sleep', (['seconds'], {}), '(seconds)\n', (127, 136), False, 'import time\n'), ((141, 156), 'pycom.rgbled', 'pycom.rgbled', (['(0)'], {}), '(0)\n', (153, 156), False, 'import pycom\n'), ((1777, 1844), 'network.LoRa', 'LoRa', ([], {'mode': 'LoRa.LORAWAN', 'region': 'LoRa.US915', 'adr': 'useADR', 'tx_power': '(20)'}), '(mode=LoRa.LORAWAN, region=LoRa.US915, adr=useADR, tx_power=20)\n', (1781, 1844), False, 'from network import LoRa\n'), ((2680, 2693), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2690, 2693), False, 'import time\n'), ((2084, 2099), 'time.sleep', 'time.sleep', (['(2.5)'], {}), '(2.5)\n', (2094, 2099), False, 'import time\n'), ((3355, 3415), 'struct.pack', 'struct.pack', (['self.pack_format', 'self.longitude', 'self.latitude'], {}), '(self.pack_format, self.longitude, self.latitude)\n', (3366, 3415), False, 'import struct\n'), ((3456, 3489), 'struct.calcsize', 'struct.calcsize', (['self.pack_format'], {}), '(self.pack_format)\n', (3471, 3489), False, 'import struct\n'), ((4190, 4283), 'struct.pack', 'struct.pack', (['self.pack_format', 'self.celsius', 'self.humidity', 'self.waterlevel', 'self.voltage'], {}), '(self.pack_format, self.celsius, self.humidity, self.waterlevel,\n self.voltage)\n', (4201, 4283), False, 'import struct\n'), ((4321, 4354), 'struct.calcsize', 'struct.calcsize', (['self.pack_format'], {}), '(self.pack_format)\n', (4336, 4354), False, 'import struct\n')]
import os import json STOPWORDS_JSON_PATH = os.path.join( os.path.dirname(os.path.abspath(__file__)), os.pardir, "corpora/stopwords.json" ) with open(STOPWORDS_JSON_PATH, "r", encoding="utf-8") as f: STOPWORD = json.load(f)["stopwords"]
[ "os.path.abspath", "json.load" ]
[((79, 104), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (94, 104), False, 'import os\n'), ((221, 233), 'json.load', 'json.load', (['f'], {}), '(f)\n', (230, 233), False, 'import json\n')]
import argparse import os import shutil from tqdm import tqdm import logging from src.utils.common import read_yaml, create_directories import random from src.utils.model import log_model_summary import tensorflow as tf STAGE= "Base Model Creation" logging.basicConfig( filename=os.path.join("logs",'running_logs.log'), level=logging.INFO, format="[%(asctime)s: %(levelname)s: %(module)s]: %(message)s", filemode="a") def main(config_path): config=read_yaml(config_path) params=config["params"] logging.info("Layer Defined") LAYERS=[ tf.keras.layers.Input(shape=tuple(params["img_shape"])), tf.keras.layers.Conv2D(filters=32, kernel_size=(3,3), activation="relu"), tf.keras.layers.MaxPool2D(pool_size=(2,2)), tf.keras.layers.Conv2D(32,(3,3), activation="relu"), tf.keras.layers.MaxPool2D(pool_size=(2,2)), tf.keras.layers.Flatten(), tf.keras.layers.Dense(8, activation="relu"), tf.keras.layers.Dense(2, activation="softmax") ] classifier=tf.keras.Sequential(LAYERS) logging.info(f"Base Model Summary:\n{log_model_summary(classifier)}") classifier.compile(optimizer=tf.keras.optimizers.Adam(params["lr"]), loss=params["loss"], metrics=params["metrics"] ) path_to_model_dir=os.path.join(config["data"]["local_dir"], config["data"]["model_dir"] ) create_directories([path_to_model_dir]) path_to_model=os.path.join(path_to_model_dir, config["data"]["init_model_file"]) classifier.save(path_to_model) logging.info(f"model is save at : {path_to_model}") if __name__=="__main__": args=argparse.ArgumentParser() args.add_argument("--config", "-c", default="configs/config.yaml") parsed_args=args.parse_args() try: logging.info("\n*********************") logging.info(f">>>>>>>stage {STAGE} started <<<<<<<") main(config_path=parsed_args.config) logging.info(f">>>>>>>> stage {STAGE} completed! <<<<<<<<\n") except Exception as e: logging.exception(e) raise e
[ "tensorflow.keras.layers.Conv2D", "argparse.ArgumentParser", "tensorflow.keras.Sequential", "os.path.join", "logging.exception", "tensorflow.keras.optimizers.Adam", "src.utils.model.log_model_summary", "tensorflow.keras.layers.Dense", "src.utils.common.create_directories", "tensorflow.keras.layers.Flatten", "src.utils.common.read_yaml", "logging.info", "tensorflow.keras.layers.MaxPool2D" ]
[((471, 493), 'src.utils.common.read_yaml', 'read_yaml', (['config_path'], {}), '(config_path)\n', (480, 493), False, 'from src.utils.common import read_yaml, create_directories\n'), ((526, 555), 'logging.info', 'logging.info', (['"""Layer Defined"""'], {}), "('Layer Defined')\n", (538, 555), False, 'import logging\n'), ((1045, 1072), 'tensorflow.keras.Sequential', 'tf.keras.Sequential', (['LAYERS'], {}), '(LAYERS)\n', (1064, 1072), True, 'import tensorflow as tf\n'), ((1344, 1414), 'os.path.join', 'os.path.join', (["config['data']['local_dir']", "config['data']['model_dir']"], {}), "(config['data']['local_dir'], config['data']['model_dir'])\n", (1356, 1414), False, 'import os\n'), ((1460, 1499), 'src.utils.common.create_directories', 'create_directories', (['[path_to_model_dir]'], {}), '([path_to_model_dir])\n', (1478, 1499), False, 'from src.utils.common import read_yaml, create_directories\n'), ((1519, 1585), 'os.path.join', 'os.path.join', (['path_to_model_dir', "config['data']['init_model_file']"], {}), "(path_to_model_dir, config['data']['init_model_file'])\n", (1531, 1585), False, 'import os\n'), ((1656, 1707), 'logging.info', 'logging.info', (['f"""model is save at : {path_to_model}"""'], {}), "(f'model is save at : {path_to_model}')\n", (1668, 1707), False, 'import logging\n'), ((1744, 1769), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1767, 1769), False, 'import argparse\n'), ((286, 326), 'os.path.join', 'os.path.join', (['"""logs"""', '"""running_logs.log"""'], {}), "('logs', 'running_logs.log')\n", (298, 326), False, 'import os\n'), ((642, 715), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': '(32)', 'kernel_size': '(3, 3)', 'activation': '"""relu"""'}), "(filters=32, kernel_size=(3, 3), activation='relu')\n", (664, 715), True, 'import tensorflow as tf\n'), ((724, 767), 'tensorflow.keras.layers.MaxPool2D', 'tf.keras.layers.MaxPool2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (749, 767), True, 'import tensorflow as tf\n'), ((776, 829), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (798, 829), True, 'import tensorflow as tf\n'), ((837, 880), 'tensorflow.keras.layers.MaxPool2D', 'tf.keras.layers.MaxPool2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (862, 880), True, 'import tensorflow as tf\n'), ((889, 914), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), '()\n', (912, 914), True, 'import tensorflow as tf\n'), ((924, 967), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(8)'], {'activation': '"""relu"""'}), "(8, activation='relu')\n", (945, 967), True, 'import tensorflow as tf\n'), ((977, 1023), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(2)'], {'activation': '"""softmax"""'}), "(2, activation='softmax')\n", (998, 1023), True, 'import tensorflow as tf\n'), ((1893, 1935), 'logging.info', 'logging.info', (['"""\n*********************"""'], {}), '("""\n*********************""")\n', (1905, 1935), False, 'import logging\n'), ((1941, 1994), 'logging.info', 'logging.info', (['f""">>>>>>>stage {STAGE} started <<<<<<<"""'], {}), "(f'>>>>>>>stage {STAGE} started <<<<<<<')\n", (1953, 1994), False, 'import logging\n'), ((2048, 2109), 'logging.info', 'logging.info', (['f""">>>>>>>> stage {STAGE} completed! <<<<<<<<\n"""'], {}), "(f'>>>>>>>> stage {STAGE} completed! <<<<<<<<\\n')\n", (2060, 2109), False, 'import logging\n'), ((1181, 1219), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', (["params['lr']"], {}), "(params['lr'])\n", (1205, 1219), True, 'import tensorflow as tf\n'), ((2145, 2165), 'logging.exception', 'logging.exception', (['e'], {}), '(e)\n', (2162, 2165), False, 'import logging\n'), ((1114, 1143), 'src.utils.model.log_model_summary', 'log_model_summary', (['classifier'], {}), '(classifier)\n', (1131, 1143), False, 'from src.utils.model import log_model_summary\n')]
# coding: utf-8 # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import paddle import paddle.nn as nn import importlib from visualdl import LogWriter import numpy as np import pickle from models import utils from config import parser_args def train_model(args): if args.dataset=='cifar10': root = os.path.join(args.data_dir, args.dataset, 'cifar-10-python.tar.gz') print(args) model = importlib.import_module('models.__init__').__dict__[args.net]( None, drop_path_rate=args.drop_path_rate, use_drop_path=args.use_drop_path, use_official_implement=args.use_official_implement) train_loader, val_loader, test_loader = importlib.import_module( 'dataset.' + args.dataset).__dict__['load_data'](root, args.train_batch_size, args.test_batch_size, has_val_dataset=args.has_val_dataset) writer = LogWriter(logdir=args.save_dir) criterion = nn.CrossEntropyLoss() if args.optimizer == 'sgd': lr_scheduler = paddle.optimizer.lr.MultiStepDecay(learning_rate=args.learning_rate, milestones=args.milestones, gamma=args.gamma) optimizer = paddle.optimizer.Momentum(parameters=model.parameters(), learning_rate=lr_scheduler, momentum=args.momentum, weight_decay=args.weight_decay, use_nesterov=args.nesterov) elif args.optimizer == 'adam': optimizer = paddle.optimizer.AdamW(parameters=model.parameters(), learning_rate=args.learning_rate, weight_decay=args.weight_decay) else: raise ValueError("optimizer must be sgd or adam.") best_acc = 0 for i in range(args.epochs): utils.train_per_epoch(train_loader, model, criterion, optimizer, i, writer) top1_acc, top5_acc = utils.validate(val_loader, model, criterion) if args.optimizer == 'sgd': lr_scheduler.step() if best_acc < top1_acc: paddle.save(model.state_dict(), args.save_dir + '/model_best.pdparams') best_acc = top1_acc if not args.save_best: if (i + 1) % args.save_interval == 0 and i != 0: paddle.save(model.state_dict(), args.save_dir + '/model.pdparams') writer.add_scalar('val-acc', top1_acc, i) writer.add_scalar('val-top5-acc', top5_acc, i) writer.add_scalar('lr', optimizer.get_lr(), i) print('best acc: {:.2f}'.format(best_acc)) model.set_state_dict(paddle.load(args.save_dir + '/model_best.pdparams')) top1_acc, top5_acc = utils.validate(test_loader, model, criterion) with open(os.path.join(args.save_dir, 'test_acc.txt'), 'w') as f: f.write('test_acc:'+str(top1_acc)) def train_hl_api(args): if args.dataset=='cifar10': root = os.path.join(args.data_dir, args.dataset, 'cifar-10-python.tar.gz') print(args) model = importlib.import_module('models.__init__').__dict__[args.net]( None, drop_path_rate=args.drop_path_rate, use_drop_path=args.use_drop_path, use_official_implement=args.use_official_implement) train_loader, val_loader, test_loader = importlib.import_module( 'dataset.' + args.dataset).__dict__['load_data'](root, args.train_batch_size, args.test_batch_size, has_val_dataset=args.has_val_dataset) criterion = nn.CrossEntropyLoss() if args.optimizer == 'sgd': # 因为高层API是每个iter就执行lr_scheduler.step(),故这里把间隔调成m*len(train_loader)才合适 lr_scheduler = paddle.optimizer.lr.MultiStepDecay(learning_rate=args.learning_rate, milestones=[m*len(train_loader) for m in args.milestones], gamma=args.gamma) optimizer = paddle.optimizer.Momentum(parameters=model.parameters(), learning_rate=lr_scheduler, momentum=args.momentum, weight_decay=args.weight_decay, use_nesterov=args.nesterov) elif args.optimizer == 'adam': optimizer = paddle.optimizer.AdamW(parameters=model.parameters(), learning_rate=args.learning_rate, weight_decay=args.weight_decay) else: raise ValueError("optimizer must be sgd or adam.") model = paddle.Model(model) model.prepare(optimizer=optimizer, #指定优化器 loss=criterion, #指定损失函数 metrics=paddle.metric.Accuracy()) #指定评估方法 #用于visualdl可视化 visualdl = paddle.callbacks.VisualDL(log_dir=args.save_dir) #早停机制,这里使用只是为了在训练过程中保存验证集上的最佳模型,最后用于测试集验证 early_stop = paddle.callbacks.EarlyStopping('acc', mode='max', patience=args.epochs, verbose=1, min_delta=0, baseline=None, save_best_model=True) model.fit(train_data=train_loader, #训练数据集 eval_data=val_loader, #验证数据集 epochs=args.epochs, #迭代轮次 save_dir=args.save_dir, #把模型参数、优化器参数保存至自定义的文件夹 save_freq=args.save_interval, #设定每隔多少个epoch保存模型参数及优化器参数 verbose=1, log_freq=20, eval_freq=args.eval_freq, callbacks=[visualdl, early_stop]) #用验证集上最好模型在测试集上验证精度 model.load(os.path.join(args.save_dir, 'best_model.pdparams')) result = model.evaluate(eval_data=test_loader, verbose=1) print('test acc:', result['acc'], 'test error:', 1-result['acc']) if __name__ == '__main__': args = parser_args() utils.seed_paddle(args.seed) if not args.high_level_api: train_model(args) else: train_hl_api(args)
[ "paddle.metric.Accuracy", "paddle.Model", "visualdl.LogWriter", "importlib.import_module", "paddle.nn.CrossEntropyLoss", "os.path.join", "config.parser_args", "models.utils.validate", "models.utils.seed_paddle", "paddle.load", "models.utils.train_per_epoch", "paddle.callbacks.VisualDL", "paddle.optimizer.lr.MultiStepDecay", "paddle.callbacks.EarlyStopping" ]
[((1497, 1528), 'visualdl.LogWriter', 'LogWriter', ([], {'logdir': 'args.save_dir'}), '(logdir=args.save_dir)\n', (1506, 1528), False, 'from visualdl import LogWriter\n'), ((1546, 1567), 'paddle.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (1565, 1567), True, 'import paddle.nn as nn\n'), ((3438, 3483), 'models.utils.validate', 'utils.validate', (['test_loader', 'model', 'criterion'], {}), '(test_loader, model, criterion)\n', (3452, 3483), False, 'from models import utils\n'), ((4267, 4288), 'paddle.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (4286, 4288), True, 'import paddle.nn as nn\n'), ((5292, 5311), 'paddle.Model', 'paddle.Model', (['model'], {}), '(model)\n', (5304, 5311), False, 'import paddle\n'), ((5495, 5543), 'paddle.callbacks.VisualDL', 'paddle.callbacks.VisualDL', ([], {'log_dir': 'args.save_dir'}), '(log_dir=args.save_dir)\n', (5520, 5543), False, 'import paddle\n'), ((5609, 5745), 'paddle.callbacks.EarlyStopping', 'paddle.callbacks.EarlyStopping', (['"""acc"""'], {'mode': '"""max"""', 'patience': 'args.epochs', 'verbose': '(1)', 'min_delta': '(0)', 'baseline': 'None', 'save_best_model': '(True)'}), "('acc', mode='max', patience=args.epochs,\n verbose=1, min_delta=0, baseline=None, save_best_model=True)\n", (5639, 5745), False, 'import paddle\n'), ((6512, 6525), 'config.parser_args', 'parser_args', ([], {}), '()\n', (6523, 6525), False, 'from config import parser_args\n'), ((6531, 6559), 'models.utils.seed_paddle', 'utils.seed_paddle', (['args.seed'], {}), '(args.seed)\n', (6548, 6559), False, 'from models import utils\n'), ((906, 973), 'os.path.join', 'os.path.join', (['args.data_dir', 'args.dataset', '"""cifar-10-python.tar.gz"""'], {}), "(args.data_dir, args.dataset, 'cifar-10-python.tar.gz')\n", (918, 973), False, 'import os\n'), ((1627, 1745), 'paddle.optimizer.lr.MultiStepDecay', 'paddle.optimizer.lr.MultiStepDecay', ([], {'learning_rate': 'args.learning_rate', 'milestones': 'args.milestones', 'gamma': 'args.gamma'}), '(learning_rate=args.learning_rate,\n milestones=args.milestones, gamma=args.gamma)\n', (1661, 1745), False, 'import paddle\n'), ((2505, 2580), 'models.utils.train_per_epoch', 'utils.train_per_epoch', (['train_loader', 'model', 'criterion', 'optimizer', 'i', 'writer'], {}), '(train_loader, model, criterion, optimizer, i, writer)\n', (2526, 2580), False, 'from models import utils\n'), ((2613, 2657), 'models.utils.validate', 'utils.validate', (['val_loader', 'model', 'criterion'], {}), '(val_loader, model, criterion)\n', (2627, 2657), False, 'from models import utils\n'), ((3359, 3410), 'paddle.load', 'paddle.load', (["(args.save_dir + '/model_best.pdparams')"], {}), "(args.save_dir + '/model_best.pdparams')\n", (3370, 3410), False, 'import paddle\n'), ((3675, 3742), 'os.path.join', 'os.path.join', (['args.data_dir', 'args.dataset', '"""cifar-10-python.tar.gz"""'], {}), "(args.data_dir, args.dataset, 'cifar-10-python.tar.gz')\n", (3687, 3742), False, 'import os\n'), ((6286, 6336), 'os.path.join', 'os.path.join', (['args.save_dir', '"""best_model.pdparams"""'], {}), "(args.save_dir, 'best_model.pdparams')\n", (6298, 6336), False, 'import os\n'), ((3499, 3542), 'os.path.join', 'os.path.join', (['args.save_dir', '"""test_acc.txt"""'], {}), "(args.save_dir, 'test_acc.txt')\n", (3511, 3542), False, 'import os\n'), ((5425, 5449), 'paddle.metric.Accuracy', 'paddle.metric.Accuracy', ([], {}), '()\n', (5447, 5449), False, 'import paddle\n'), ((1004, 1046), 'importlib.import_module', 'importlib.import_module', (['"""models.__init__"""'], {}), "('models.__init__')\n", (1027, 1046), False, 'import importlib\n'), ((1251, 1301), 'importlib.import_module', 'importlib.import_module', (["('dataset.' + args.dataset)"], {}), "('dataset.' + args.dataset)\n", (1274, 1301), False, 'import importlib\n'), ((3773, 3815), 'importlib.import_module', 'importlib.import_module', (['"""models.__init__"""'], {}), "('models.__init__')\n", (3796, 3815), False, 'import importlib\n'), ((4020, 4070), 'importlib.import_module', 'importlib.import_module', (["('dataset.' + args.dataset)"], {}), "('dataset.' + args.dataset)\n", (4043, 4070), False, 'import importlib\n')]
import os from . import common import cv2 import numpy as np import imageio import torch import torch.utils.data as data class Video(data.Dataset): def __init__(self, args, name='Video', train=False, benchmark=False): self.args = args self.name = name self.scale = args.scale self.idx_scale = 0 self.train = False self.do_eval = False self.benchmark = benchmark self.filename, _ = os.path.splitext(os.path.basename(args.dir_demo)) self.vidcap = cv2.VideoCapture(args.dir_demo) self.n_frames = 0 self.total_frames = int(self.vidcap.get(cv2.CAP_PROP_FRAME_COUNT)) def __getitem__(self, idx): success, lr = self.vidcap.read() if success: self.n_frames += 1 lr, = common.set_channel(lr, n_channels=self.args.n_colors) lr_t, = common.np2Tensor(lr, rgb_range=self.args.rgb_range) return lr_t, -1, '{}_{:0>5}'.format(self.filename, self.n_frames) else: vidcap.release() return None def __len__(self): return self.total_frames def set_scale(self, idx_scale): self.idx_scale = idx_scale
[ "os.path.basename", "cv2.VideoCapture" ]
[((526, 557), 'cv2.VideoCapture', 'cv2.VideoCapture', (['args.dir_demo'], {}), '(args.dir_demo)\n', (542, 557), False, 'import cv2\n'), ((471, 502), 'os.path.basename', 'os.path.basename', (['args.dir_demo'], {}), '(args.dir_demo)\n', (487, 502), False, 'import os\n')]
# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import mock from oslo_versionedobjects import fixture as object_fixture from mogan.notifications import base as notification_base from mogan.notifications.objects import base as notification from mogan.objects import base from mogan.objects import fields from mogan.objects import server as server_obj from mogan.tests import base as test_base from mogan.tests.unit.db import utils as db_utils class TestNotificationBase(test_base.TestCase): @base.MoganObjectRegistry.register_if(False) class TestObject(base.MoganObject): VERSION = '1.0' fields = { 'field_1': fields.StringField(), 'field_2': fields.IntegerField(), 'not_important_field': fields.IntegerField(), } @base.MoganObjectRegistry.register_if(False) class TestNotificationPayload(notification.NotificationPayloadBase): VERSION = '1.0' SCHEMA = { 'field_1': ('source_field', 'field_1'), 'field_2': ('source_field', 'field_2'), } fields = { 'extra_field': fields.StringField(), # filled by ctor 'field_1': fields.StringField(), # filled by the schema 'field_2': fields.IntegerField(), # filled by the schema } def populate_schema(self, source_field): super(TestNotificationBase.TestNotificationPayload, self).populate_schema(source_field=source_field) @base.MoganObjectRegistry.register_if(False) class TestNotificationPayloadEmptySchema( notification.NotificationPayloadBase): VERSION = '1.0' fields = { 'extra_field': fields.StringField(), # filled by ctor } @notification.notification_sample('test-update-1.json') @notification.notification_sample('test-update-2.json') @base.MoganObjectRegistry.register_if(False) class TestNotification(notification.NotificationBase): VERSION = '1.0' fields = { 'payload': fields.ObjectField('TestNotificationPayload') } @base.MoganObjectRegistry.register_if(False) class TestNotificationEmptySchema(notification.NotificationBase): VERSION = '1.0' fields = { 'payload': fields.ObjectField('TestNotificationPayloadEmptySchema') } expected_payload = { 'mogan_object.name': 'TestNotificationPayload', 'mogan_object.data': { 'extra_field': 'test string', 'field_1': 'test1', 'field_2': 42}, 'mogan_object.version': '1.0', 'mogan_object.namespace': 'mogan'} def setUp(self): super(TestNotificationBase, self).setUp() self.my_obj = self.TestObject(field_1='test1', field_2=42, not_important_field=13) self.payload = self.TestNotificationPayload( extra_field='test string') self.payload.populate_schema(source_field=self.my_obj) self.notification = self.TestNotification( event_type=notification.EventType( object='test_object', action=fields.NotificationAction.UPDATE, phase=fields.NotificationPhase.START), publisher=notification.NotificationPublisher( host='fake-host', binary='mogan-fake'), priority=fields.NotificationPriority.INFO, payload=self.payload) def _verify_notification(self, mock_notifier, mock_context, expected_event_type, expected_payload): mock_notifier.prepare.assert_called_once_with( publisher_id='mogan-fake:fake-host') mock_notify = mock_notifier.prepare.return_value.info self.assertTrue(mock_notify.called) self.assertEqual(mock_notify.call_args[0][0], mock_context) self.assertEqual(mock_notify.call_args[1]['event_type'], expected_event_type) actual_payload = mock_notify.call_args[1]['payload'] self.assertJsonEqual(expected_payload, actual_payload) @mock.patch('mogan.common.rpc.NOTIFIER') def test_emit_notification(self, mock_notifier): mock_context = mock.Mock() mock_context.to_dict.return_value = {} self.notification.emit(mock_context) self._verify_notification( mock_notifier, mock_context, expected_event_type='test_object.update.start', expected_payload=self.expected_payload) @mock.patch('mogan.common.rpc.NOTIFIER') def test_emit_with_host_and_binary_as_publisher(self, mock_notifier): noti = self.TestNotification( event_type=notification.EventType( object='test_object', action=fields.NotificationAction.UPDATE), publisher=notification.NotificationPublisher( host='fake-host', binary='mogan-fake'), priority=fields.NotificationPriority.INFO, payload=self.payload) mock_context = mock.Mock() mock_context.to_dict.return_value = {} noti.emit(mock_context) self._verify_notification( mock_notifier, mock_context, expected_event_type='test_object.update', expected_payload=self.expected_payload) @mock.patch('mogan.common.rpc.NOTIFIER') def test_emit_event_type_without_phase(self, mock_notifier): noti = self.TestNotification( event_type=notification.EventType( object='test_object', action=fields.NotificationAction.UPDATE), publisher=notification.NotificationPublisher( host='fake-host', binary='mogan-fake'), priority=fields.NotificationPriority.INFO, payload=self.payload) mock_context = mock.Mock() mock_context.to_dict.return_value = {} noti.emit(mock_context) self._verify_notification( mock_notifier, mock_context, expected_event_type='test_object.update', expected_payload=self.expected_payload) @mock.patch('mogan.common.rpc.NOTIFIER') def test_not_possible_to_emit_if_not_populated(self, mock_notifier): non_populated_payload = self.TestNotificationPayload( extra_field='test string') noti = self.TestNotification( event_type=notification.EventType( object='test_object', action=fields.NotificationAction.UPDATE), publisher=notification.NotificationPublisher( host='fake-host', binary='mogan-fake'), priority=fields.NotificationPriority.INFO, payload=non_populated_payload) mock_context = mock.Mock() self.assertRaises(AssertionError, noti.emit, mock_context) mock_notifier.assert_not_called() @mock.patch('mogan.common.rpc.NOTIFIER') def test_empty_schema(self, mock_notifier): non_populated_payload = self.TestNotificationPayloadEmptySchema( extra_field='test string') noti = self.TestNotificationEmptySchema( event_type=notification.EventType( object='test_object', action=fields.NotificationAction.UPDATE), publisher=notification.NotificationPublisher( host='fake-host', binary='mogan-fake'), priority=fields.NotificationPriority.INFO, payload=non_populated_payload) mock_context = mock.Mock() mock_context.to_dict.return_value = {} noti.emit(mock_context) self._verify_notification( mock_notifier, mock_context, expected_event_type='test_object.update', expected_payload={ 'mogan_object.name': 'TestNotificationPayloadEmptySchema', 'mogan_object.data': {'extra_field': u'test string'}, 'mogan_object.version': '1.0', 'mogan_object.namespace': 'mogan'}) def test_sample_decorator(self): self.assertEqual(2, len(self.TestNotification.samples)) self.assertIn('test-update-1.json', self.TestNotification.samples) self.assertIn('test-update-2.json', self.TestNotification.samples) notification_object_data = { 'ServerPayload': '1.0-30fefa8478f1b9b35c66868377fb6dfd', 'ServerAddressesPayload': '1.0-69caf4c36f36756bb1f6970d093ee1f6', 'ServerActionPayload': '1.0-8dc4429afa34d86ab92c9387e3ccd0c3', 'ServerActionNotification': '1.0-20087e599436bd9db62ae1fb5e2dfef2', 'ExceptionPayload': '1.0-7c31986d8d78bed910c324965c431e18', 'EventType': '1.0-589894aac7c98fb640eca394f67ad621', 'NotificationPublisher': '1.0-4b0b0d662b21eeed0b23617f3f11794b' } class TestNotificationObjectVersions(test_base.TestCase): def setUp(self): super(test_base.TestCase, self).setUp() base.MoganObjectRegistry.register_notification_objects() def test_versions(self): noti_class = base.MoganObjectRegistry.notification_classes classes = {cls.__name__: [cls] for cls in noti_class} checker = object_fixture.ObjectVersionChecker(obj_classes=classes) # Compute the difference between actual fingerprints and # expect fingerprints. expect = actual = {} if there is no change. expect, actual = checker.test_hashes(notification_object_data) self.assertEqual(expect, actual, "Some objects fields or remotable methods have been " "modified. Please make sure the version of those " "objects have been bumped and then update " "expected_object_fingerprints with the new hashes. ") def test_notification_payload_version_depends_on_the_schema(self): @base.MoganObjectRegistry.register_if(False) class TestNotificationPayload(notification.NotificationPayloadBase): VERSION = '1.0' SCHEMA = { 'field_1': ('source_field', 'field_1'), 'field_2': ('source_field', 'field_2'), } fields = { 'extra_field': fields.StringField(), # filled by ctor 'field_1': fields.StringField(), # filled by the schema 'field_2': fields.IntegerField(), # filled by the schema } checker = object_fixture.ObjectVersionChecker( {'TestNotificationPayload': (TestNotificationPayload,)}) old_hash = checker.get_hashes(extra_data_func=get_extra_data) TestNotificationPayload.SCHEMA['field_3'] = ('source_field', 'field_3') new_hash = checker.get_hashes(extra_data_func=get_extra_data) self.assertNotEqual(old_hash, new_hash) def get_extra_data(obj_class): extra_data = tuple() # Get the SCHEMA items to add to the fingerprint # if we are looking at a notification if issubclass(obj_class, notification.NotificationPayloadBase): schema_data = collections.OrderedDict( sorted(obj_class.SCHEMA.items())) extra_data += (schema_data,) return extra_data class TestServerActionNotification(test_base.TestCase): @mock.patch('mogan.notifications.objects.server.' 'ServerActionNotification._emit') def test_send_version_server_action(self, mock_emit): # Make sure that the notification payload chooses the values in # server.flavor.$value instead of server.$value fake_server_values = db_utils.get_test_server() server = server_obj.Server(**fake_server_values) notification_base.notify_about_server_action( mock.MagicMock(), server, 'test-host', fields.NotificationAction.CREATE, fields.NotificationPhase.START, 'mogan-compute') self.assertEqual('server.create.start', mock_emit.call_args_list[0][1]['event_type']) self.assertEqual('mogan-compute:test-host', mock_emit.call_args_list[0][1]['publisher_id']) payload = mock_emit.call_args_list[0][1]['payload'][ 'mogan_object.data'] self.assertEqual(fake_server_values['uuid'], payload['uuid']) self.assertEqual(fake_server_values['flavor_uuid'], payload['flavor_uuid']) self.assertEqual(fake_server_values['status'], payload['status']) self.assertEqual(fake_server_values['user_id'], payload['user_id']) self.assertEqual(fake_server_values['availability_zone'], payload['availability_zone']) self.assertEqual(fake_server_values['name'], payload['name']) self.assertEqual(fake_server_values['image_uuid'], payload['image_uuid']) self.assertEqual(fake_server_values['project_id'], payload['project_id']) self.assertEqual(fake_server_values['description'], payload['description']) self.assertEqual(fake_server_values['power_state'], payload['power_state'])
[ "mogan.tests.unit.db.utils.get_test_server", "mock.patch", "mogan.objects.base.MoganObjectRegistry.register_notification_objects", "oslo_versionedobjects.fixture.ObjectVersionChecker", "mock.MagicMock", "mogan.objects.fields.StringField", "mock.Mock", "mogan.objects.fields.IntegerField", "mogan.notifications.objects.base.notification_sample", "mogan.objects.fields.ObjectField", "mogan.notifications.objects.base.NotificationPublisher", "mogan.notifications.objects.base.EventType", "mogan.objects.base.MoganObjectRegistry.register_if", "mogan.objects.server.Server" ]
[((1067, 1110), 'mogan.objects.base.MoganObjectRegistry.register_if', 'base.MoganObjectRegistry.register_if', (['(False)'], {}), '(False)\n', (1103, 1110), False, 'from mogan.objects import base\n'), ((1359, 1402), 'mogan.objects.base.MoganObjectRegistry.register_if', 'base.MoganObjectRegistry.register_if', (['(False)'], {}), '(False)\n', (1395, 1402), False, 'from mogan.objects import base\n'), ((2057, 2100), 'mogan.objects.base.MoganObjectRegistry.register_if', 'base.MoganObjectRegistry.register_if', (['(False)'], {}), '(False)\n', (2093, 2100), False, 'from mogan.objects import base\n'), ((2325, 2379), 'mogan.notifications.objects.base.notification_sample', 'notification.notification_sample', (['"""test-update-1.json"""'], {}), "('test-update-1.json')\n", (2357, 2379), True, 'from mogan.notifications.objects import base as notification\n'), ((2385, 2439), 'mogan.notifications.objects.base.notification_sample', 'notification.notification_sample', (['"""test-update-2.json"""'], {}), "('test-update-2.json')\n", (2417, 2439), True, 'from mogan.notifications.objects import base as notification\n'), ((2445, 2488), 'mogan.objects.base.MoganObjectRegistry.register_if', 'base.MoganObjectRegistry.register_if', (['(False)'], {}), '(False)\n', (2481, 2488), False, 'from mogan.objects import base\n'), ((2676, 2719), 'mogan.objects.base.MoganObjectRegistry.register_if', 'base.MoganObjectRegistry.register_if', (['(False)'], {}), '(False)\n', (2712, 2719), False, 'from mogan.objects import base\n'), ((4749, 4788), 'mock.patch', 'mock.patch', (['"""mogan.common.rpc.NOTIFIER"""'], {}), "('mogan.common.rpc.NOTIFIER')\n", (4759, 4788), False, 'import mock\n'), ((5176, 5215), 'mock.patch', 'mock.patch', (['"""mogan.common.rpc.NOTIFIER"""'], {}), "('mogan.common.rpc.NOTIFIER')\n", (5186, 5215), False, 'import mock\n'), ((5990, 6029), 'mock.patch', 'mock.patch', (['"""mogan.common.rpc.NOTIFIER"""'], {}), "('mogan.common.rpc.NOTIFIER')\n", (6000, 6029), False, 'import mock\n'), ((6795, 6834), 'mock.patch', 'mock.patch', (['"""mogan.common.rpc.NOTIFIER"""'], {}), "('mogan.common.rpc.NOTIFIER')\n", (6805, 6834), False, 'import mock\n'), ((7553, 7592), 'mock.patch', 'mock.patch', (['"""mogan.common.rpc.NOTIFIER"""'], {}), "('mogan.common.rpc.NOTIFIER')\n", (7563, 7592), False, 'import mock\n'), ((11941, 12020), 'mock.patch', 'mock.patch', (['"""mogan.notifications.objects.server.ServerActionNotification._emit"""'], {}), "('mogan.notifications.objects.server.ServerActionNotification._emit')\n", (11951, 12020), False, 'import mock\n'), ((4865, 4876), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (4874, 4876), False, 'import mock\n'), ((5698, 5709), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (5707, 5709), False, 'import mock\n'), ((6503, 6514), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (6512, 6514), False, 'import mock\n'), ((7426, 7437), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (7435, 7437), False, 'import mock\n'), ((8181, 8192), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (8190, 8192), False, 'import mock\n'), ((9571, 9627), 'mogan.objects.base.MoganObjectRegistry.register_notification_objects', 'base.MoganObjectRegistry.register_notification_objects', ([], {}), '()\n', (9625, 9627), False, 'from mogan.objects import base\n'), ((9805, 9861), 'oslo_versionedobjects.fixture.ObjectVersionChecker', 'object_fixture.ObjectVersionChecker', ([], {'obj_classes': 'classes'}), '(obj_classes=classes)\n', (9840, 9861), True, 'from oslo_versionedobjects import fixture as object_fixture\n'), ((10498, 10541), 'mogan.objects.base.MoganObjectRegistry.register_if', 'base.MoganObjectRegistry.register_if', (['(False)'], {}), '(False)\n', (10534, 10541), False, 'from mogan.objects import base\n'), ((11073, 11170), 'oslo_versionedobjects.fixture.ObjectVersionChecker', 'object_fixture.ObjectVersionChecker', (["{'TestNotificationPayload': (TestNotificationPayload,)}"], {}), "({'TestNotificationPayload': (\n TestNotificationPayload,)})\n", (11108, 11170), True, 'from oslo_versionedobjects import fixture as object_fixture\n'), ((12255, 12281), 'mogan.tests.unit.db.utils.get_test_server', 'db_utils.get_test_server', ([], {}), '()\n', (12279, 12281), True, 'from mogan.tests.unit.db import utils as db_utils\n'), ((12299, 12338), 'mogan.objects.server.Server', 'server_obj.Server', ([], {}), '(**fake_server_values)\n', (12316, 12338), True, 'from mogan.objects import server as server_obj\n'), ((1217, 1237), 'mogan.objects.fields.StringField', 'fields.StringField', ([], {}), '()\n', (1235, 1237), False, 'from mogan.objects import fields\n'), ((1262, 1283), 'mogan.objects.fields.IntegerField', 'fields.IntegerField', ([], {}), '()\n', (1281, 1283), False, 'from mogan.objects import fields\n'), ((1320, 1341), 'mogan.objects.fields.IntegerField', 'fields.IntegerField', ([], {}), '()\n', (1339, 1341), False, 'from mogan.objects import fields\n'), ((1681, 1701), 'mogan.objects.fields.StringField', 'fields.StringField', ([], {}), '()\n', (1699, 1701), False, 'from mogan.objects import fields\n'), ((1744, 1764), 'mogan.objects.fields.StringField', 'fields.StringField', ([], {}), '()\n', (1762, 1764), False, 'from mogan.objects import fields\n'), ((1813, 1834), 'mogan.objects.fields.IntegerField', 'fields.IntegerField', ([], {}), '()\n', (1832, 1834), False, 'from mogan.objects import fields\n'), ((2269, 2289), 'mogan.objects.fields.StringField', 'fields.StringField', ([], {}), '()\n', (2287, 2289), False, 'from mogan.objects import fields\n'), ((2614, 2659), 'mogan.objects.fields.ObjectField', 'fields.ObjectField', (['"""TestNotificationPayload"""'], {}), "('TestNotificationPayload')\n", (2632, 2659), False, 'from mogan.objects import fields\n'), ((2856, 2912), 'mogan.objects.fields.ObjectField', 'fields.ObjectField', (['"""TestNotificationPayloadEmptySchema"""'], {}), "('TestNotificationPayloadEmptySchema')\n", (2874, 2912), False, 'from mogan.objects import fields\n'), ((12405, 12421), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (12419, 12421), False, 'import mock\n'), ((3690, 3818), 'mogan.notifications.objects.base.EventType', 'notification.EventType', ([], {'object': '"""test_object"""', 'action': 'fields.NotificationAction.UPDATE', 'phase': 'fields.NotificationPhase.START'}), "(object='test_object', action=fields.\n NotificationAction.UPDATE, phase=fields.NotificationPhase.START)\n", (3712, 3818), True, 'from mogan.notifications.objects import base as notification\n'), ((3886, 3959), 'mogan.notifications.objects.base.NotificationPublisher', 'notification.NotificationPublisher', ([], {'host': '"""fake-host"""', 'binary': '"""mogan-fake"""'}), "(host='fake-host', binary='mogan-fake')\n", (3920, 3959), True, 'from mogan.notifications.objects import base as notification\n'), ((5351, 5441), 'mogan.notifications.objects.base.EventType', 'notification.EventType', ([], {'object': '"""test_object"""', 'action': 'fields.NotificationAction.UPDATE'}), "(object='test_object', action=fields.\n NotificationAction.UPDATE)\n", (5373, 5441), True, 'from mogan.notifications.objects import base as notification\n'), ((5493, 5566), 'mogan.notifications.objects.base.NotificationPublisher', 'notification.NotificationPublisher', ([], {'host': '"""fake-host"""', 'binary': '"""mogan-fake"""'}), "(host='fake-host', binary='mogan-fake')\n", (5527, 5566), True, 'from mogan.notifications.objects import base as notification\n'), ((6156, 6246), 'mogan.notifications.objects.base.EventType', 'notification.EventType', ([], {'object': '"""test_object"""', 'action': 'fields.NotificationAction.UPDATE'}), "(object='test_object', action=fields.\n NotificationAction.UPDATE)\n", (6178, 6246), True, 'from mogan.notifications.objects import base as notification\n'), ((6298, 6371), 'mogan.notifications.objects.base.NotificationPublisher', 'notification.NotificationPublisher', ([], {'host': '"""fake-host"""', 'binary': '"""mogan-fake"""'}), "(host='fake-host', binary='mogan-fake')\n", (6332, 6371), True, 'from mogan.notifications.objects import base as notification\n'), ((7070, 7160), 'mogan.notifications.objects.base.EventType', 'notification.EventType', ([], {'object': '"""test_object"""', 'action': 'fields.NotificationAction.UPDATE'}), "(object='test_object', action=fields.\n NotificationAction.UPDATE)\n", (7092, 7160), True, 'from mogan.notifications.objects import base as notification\n'), ((7212, 7285), 'mogan.notifications.objects.base.NotificationPublisher', 'notification.NotificationPublisher', ([], {'host': '"""fake-host"""', 'binary': '"""mogan-fake"""'}), "(host='fake-host', binary='mogan-fake')\n", (7246, 7285), True, 'from mogan.notifications.objects import base as notification\n'), ((7825, 7915), 'mogan.notifications.objects.base.EventType', 'notification.EventType', ([], {'object': '"""test_object"""', 'action': 'fields.NotificationAction.UPDATE'}), "(object='test_object', action=fields.\n NotificationAction.UPDATE)\n", (7847, 7915), True, 'from mogan.notifications.objects import base as notification\n'), ((7967, 8040), 'mogan.notifications.objects.base.NotificationPublisher', 'notification.NotificationPublisher', ([], {'host': '"""fake-host"""', 'binary': '"""mogan-fake"""'}), "(host='fake-host', binary='mogan-fake')\n", (8001, 8040), True, 'from mogan.notifications.objects import base as notification\n'), ((10852, 10872), 'mogan.objects.fields.StringField', 'fields.StringField', ([], {}), '()\n', (10870, 10872), False, 'from mogan.objects import fields\n'), ((10919, 10939), 'mogan.objects.fields.StringField', 'fields.StringField', ([], {}), '()\n', (10937, 10939), False, 'from mogan.objects import fields\n'), ((10992, 11013), 'mogan.objects.fields.IntegerField', 'fields.IntegerField', ([], {}), '()\n', (11011, 11013), False, 'from mogan.objects import fields\n')]
from plash.eval import eval, register_macro, shell_escape_args @register_macro() def defpm(name, *lines): 'define a new package manager' @register_macro(name, group='package managers') @shell_escape_args def package_manager(*packages): if not packages: return sh_packages = ' '.join(pkg for pkg in packages) expanded_lines = [line.format(sh_packages) for line in lines] return eval([['run'] + expanded_lines]) package_manager.__doc__ = "install packages with {}".format(name) eval([[ 'defpm', 'apt', 'apt-get update', 'apt-get install -y {}', ], [ 'defpm', 'add-apt-repository', 'apt-get install software-properties-common', 'run add-apt-repository -y {}', ], [ 'defpm', 'apk', 'apk update', 'apk add {}', ], [ 'defpm', 'yum', 'yum install -y {}', ], [ 'defpm', 'dnf', 'dnf install -y {}', ], [ 'defpm', 'pip', 'pip install {}', ], [ 'defpm', 'pip3', 'pip3 install {}', ], [ 'defpm', 'npm', 'npm install -g {}', ], [ 'defpm', 'pacman', 'pacman -Sy --noconfirm {}', ], [ 'defpm', 'emerge', 'emerge {}', ]])
[ "plash.eval.eval", "plash.eval.register_macro" ]
[((66, 82), 'plash.eval.register_macro', 'register_macro', ([], {}), '()\n', (80, 82), False, 'from plash.eval import eval, register_macro, shell_escape_args\n'), ((546, 1074), 'plash.eval.eval', 'eval', (["[['defpm', 'apt', 'apt-get update', 'apt-get install -y {}'], ['defpm',\n 'add-apt-repository', 'apt-get install software-properties-common',\n 'run add-apt-repository -y {}'], ['defpm', 'apk', 'apk update',\n 'apk add {}'], ['defpm', 'yum', 'yum install -y {}'], ['defpm', 'dnf',\n 'dnf install -y {}'], ['defpm', 'pip', 'pip install {}'], ['defpm',\n 'pip3', 'pip3 install {}'], ['defpm', 'npm', 'npm install -g {}'], [\n 'defpm', 'pacman', 'pacman -Sy --noconfirm {}'], ['defpm', 'emerge',\n 'emerge {}']]"], {}), "([['defpm', 'apt', 'apt-get update', 'apt-get install -y {}'], ['defpm',\n 'add-apt-repository', 'apt-get install software-properties-common',\n 'run add-apt-repository -y {}'], ['defpm', 'apk', 'apk update',\n 'apk add {}'], ['defpm', 'yum', 'yum install -y {}'], ['defpm', 'dnf',\n 'dnf install -y {}'], ['defpm', 'pip', 'pip install {}'], ['defpm',\n 'pip3', 'pip3 install {}'], ['defpm', 'npm', 'npm install -g {}'], [\n 'defpm', 'pacman', 'pacman -Sy --noconfirm {}'], ['defpm', 'emerge',\n 'emerge {}']])\n", (550, 1074), False, 'from plash.eval import eval, register_macro, shell_escape_args\n'), ((149, 195), 'plash.eval.register_macro', 'register_macro', (['name'], {'group': '"""package managers"""'}), "(name, group='package managers')\n", (163, 195), False, 'from plash.eval import eval, register_macro, shell_escape_args\n'), ((440, 472), 'plash.eval.eval', 'eval', (["[['run'] + expanded_lines]"], {}), "([['run'] + expanded_lines])\n", (444, 472), False, 'from plash.eval import eval, register_macro, shell_escape_args\n')]
import os import re import shutil import unittest from pathlib import Path from dianna.visualization.text import highlight_text class Example1: original_text = 'Doloremque aliquam totam ut. Aspernatur repellendus autem quia deleniti. Natus accusamus ' \ 'doloribus et in quam officiis veniam et. ' explanation = [('ut', 25, -0.06405025896517044), ('in', 102, -0.05127647027074053), ('et', 99, 0.02254588506724936), ('quia', 58, -0.0008216335740370412), ('aliquam', 11, -0.0006268298968242725), ('Natus', 73, -0.0005556223616156406), ('totam', 19, -0.0005126140261410219), ('veniam', 119, -0.0005058379023790869), ('quam', 105, -0.0004573258796550468), ('repellendus', 40, -0.0003253862469633824)] class Example2: expected_html = '<html><body><span style="background:rgba(255, 0, 0, 0.08)">such</span> ' \ '<span style="background:rgba(255, 0, 0, 0.01)">a</span> <span style="background:rgba(0, 0, 255, 0.800000)">' \ 'bad</span> <span style="background:rgba(0, 0, 255, 0.059287)">movie</span>.</body></html>\n' original_text = 'Such a bad movie.' explanation = [('bad', 7, -0.4922624307995777), ('such', 0, 0.04637815000309109), ('movie', 11, -0.03648111256069627), ('a', 5, 0.008377155657765745)] class MyTestCase(unittest.TestCase): temp_folder = 'temp_text_visualization_test' html_file_path = str(Path(temp_folder) / 'output.html') def test_text_visualization_no_output(self): highlight_text(Example1.explanation, original_text=Example1.original_text) assert not Path(self.html_file_path).exists() def test_text_visualization_html_output_exists(self): highlight_text(Example1.explanation, original_text=Example1.original_text, output_html_filename=self.html_file_path) assert Path(self.html_file_path).exists() def test_text_visualization_html_output_contains_text(self): highlight_text(Example1.explanation, original_text=Example1.original_text, output_html_filename=self.html_file_path) assert Path(self.html_file_path).exists() with open(self.html_file_path, encoding='utf-8') as result_file: result = result_file.read() for word in _split_text_into_words(Example1.original_text): assert word in result def test_text_visualization_html_output_is_correct(self): highlight_text(Example2.explanation, original_text=Example2.original_text, output_html_filename=self.html_file_path) assert Path(self.html_file_path).exists() with open(self.html_file_path, encoding='utf-8') as result_file: result = result_file.read() assert result == Example2.expected_html def test_text_visualization_show_plot(self): highlight_text(Example1.explanation, original_text=Example1.original_text, show_plot=True) def setUp(self) -> None: os.mkdir(self.temp_folder) def tearDown(self) -> None: shutil.rmtree(self.temp_folder, ignore_errors=True) def _split_text_into_words(text): # regex taken from # https://stackoverflow.com/questions/12683201/python-re-split-to-split-by-spaces-commas-and-periods-but-not-in-cases-like # explanation: split by \s (whitespace), and only split by commas and # periods if they are not followed (?!\d) or preceded (?<!\d) by a digit. regex = r'\s|(?<!\d)[,.](?!\d)' return re.split(regex, text)
[ "re.split", "dianna.visualization.text.highlight_text", "pathlib.Path", "os.mkdir", "shutil.rmtree" ]
[((3723, 3744), 're.split', 're.split', (['regex', 'text'], {}), '(regex, text)\n', (3731, 3744), False, 'import re\n'), ((1716, 1790), 'dianna.visualization.text.highlight_text', 'highlight_text', (['Example1.explanation'], {'original_text': 'Example1.original_text'}), '(Example1.explanation, original_text=Example1.original_text)\n', (1730, 1790), False, 'from dianna.visualization.text import highlight_text\n'), ((1913, 2033), 'dianna.visualization.text.highlight_text', 'highlight_text', (['Example1.explanation'], {'original_text': 'Example1.original_text', 'output_html_filename': 'self.html_file_path'}), '(Example1.explanation, original_text=Example1.original_text,\n output_html_filename=self.html_file_path)\n', (1927, 2033), False, 'from dianna.visualization.text import highlight_text\n'), ((2178, 2298), 'dianna.visualization.text.highlight_text', 'highlight_text', (['Example1.explanation'], {'original_text': 'Example1.original_text', 'output_html_filename': 'self.html_file_path'}), '(Example1.explanation, original_text=Example1.original_text,\n output_html_filename=self.html_file_path)\n', (2192, 2298), False, 'from dianna.visualization.text import highlight_text\n'), ((2655, 2775), 'dianna.visualization.text.highlight_text', 'highlight_text', (['Example2.explanation'], {'original_text': 'Example2.original_text', 'output_html_filename': 'self.html_file_path'}), '(Example2.explanation, original_text=Example2.original_text,\n output_html_filename=self.html_file_path)\n', (2669, 2775), False, 'from dianna.visualization.text import highlight_text\n'), ((3066, 3160), 'dianna.visualization.text.highlight_text', 'highlight_text', (['Example1.explanation'], {'original_text': 'Example1.original_text', 'show_plot': '(True)'}), '(Example1.explanation, original_text=Example1.original_text,\n show_plot=True)\n', (3080, 3160), False, 'from dianna.visualization.text import highlight_text\n'), ((3218, 3244), 'os.mkdir', 'os.mkdir', (['self.temp_folder'], {}), '(self.temp_folder)\n', (3226, 3244), False, 'import os\n'), ((3286, 3337), 'shutil.rmtree', 'shutil.rmtree', (['self.temp_folder'], {'ignore_errors': '(True)'}), '(self.temp_folder, ignore_errors=True)\n', (3299, 3337), False, 'import shutil\n'), ((1623, 1640), 'pathlib.Path', 'Path', (['temp_folder'], {}), '(temp_folder)\n', (1627, 1640), False, 'from pathlib import Path\n'), ((2069, 2094), 'pathlib.Path', 'Path', (['self.html_file_path'], {}), '(self.html_file_path)\n', (2073, 2094), False, 'from pathlib import Path\n'), ((2334, 2359), 'pathlib.Path', 'Path', (['self.html_file_path'], {}), '(self.html_file_path)\n', (2338, 2359), False, 'from pathlib import Path\n'), ((2811, 2836), 'pathlib.Path', 'Path', (['self.html_file_path'], {}), '(self.html_file_path)\n', (2815, 2836), False, 'from pathlib import Path\n'), ((1811, 1836), 'pathlib.Path', 'Path', (['self.html_file_path'], {}), '(self.html_file_path)\n', (1815, 1836), False, 'from pathlib import Path\n')]
import os import sys import builtins import versioneer if sys.version_info[:2] < (3, 7): raise RuntimeError("Python version >= 3.7 required.") builtins.__RBC_SETUP__ = True if os.path.exists('MANIFEST'): os.remove('MANIFEST') CONDA_BUILD = int(os.environ.get('CONDA_BUILD', '0')) CONDA_ENV = os.environ.get('CONDA_PREFIX', '') != '' from setuptools import setup, find_packages # noqa: E402 DESCRIPTION = "RBC - Remote Backend Compiler Project" LONG_DESCRIPTION = """ The aim of the Remote Backend Compiler project is to distribute the tasks of a program JIT compilation process to separate computer systems using the client-server model. The frontend of the compiler runs on the client computer and the backend runs on the server computer. The compiler frontend will send the program code to compiler backend in IR form where it will be compiled to machine code. """ def setup_package(): src_path = os.path.dirname(os.path.abspath(sys.argv[0])) old_path = os.getcwd() os.chdir(src_path) sys.path.insert(0, src_path) if CONDA_BUILD or CONDA_ENV: # conda dependencies are specified in meta.yaml or conda # enviroment should provide the correct requirements - using # PyPI is unreliable, see below. install_requires = [] setup_requires = [] tests_require = [] else: # Get requirements via PyPI. Use at your own risk as more than # once the numba and llvmlite have not matched. install_requires = open('requirements.txt', 'r').read().splitlines() setup_requires = ['pytest-runner', 'cffi'] tests_require = ['pytest'] metadata = dict( name='rbc-project', description=DESCRIPTION, long_description=LONG_DESCRIPTION, license='BSD', version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), author='<NAME>', maintainer='<NAME>', author_email='<EMAIL>', url='https://github.com/xnd-project/rbc', platforms='Cross Platform', classifiers=[ "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', "Operating System :: OS Independent", "Topic :: Software Development", ], packages=find_packages(), package_data={'': ['*.thrift']}, cffi_modules=['rbc/rbclib//_rbclib_build.py:ffibuilder'], install_requires=install_requires, setup_requires=setup_requires, tests_require=tests_require, ) try: setup(**metadata) finally: del sys.path[0] os.chdir(old_path) return if __name__ == '__main__': setup_package() del builtins.__RBC_SETUP__
[ "os.path.exists", "sys.path.insert", "setuptools.find_packages", "os.environ.get", "setuptools.setup", "versioneer.get_version", "os.getcwd", "os.chdir", "os.path.abspath", "versioneer.get_cmdclass", "os.remove" ]
[((183, 209), 'os.path.exists', 'os.path.exists', (['"""MANIFEST"""'], {}), "('MANIFEST')\n", (197, 209), False, 'import os\n'), ((215, 236), 'os.remove', 'os.remove', (['"""MANIFEST"""'], {}), "('MANIFEST')\n", (224, 236), False, 'import os\n'), ((256, 290), 'os.environ.get', 'os.environ.get', (['"""CONDA_BUILD"""', '"""0"""'], {}), "('CONDA_BUILD', '0')\n", (270, 290), False, 'import os\n'), ((304, 338), 'os.environ.get', 'os.environ.get', (['"""CONDA_PREFIX"""', '""""""'], {}), "('CONDA_PREFIX', '')\n", (318, 338), False, 'import os\n'), ((981, 992), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (990, 992), False, 'import os\n'), ((997, 1015), 'os.chdir', 'os.chdir', (['src_path'], {}), '(src_path)\n', (1005, 1015), False, 'import os\n'), ((1020, 1048), 'sys.path.insert', 'sys.path.insert', (['(0)', 'src_path'], {}), '(0, src_path)\n', (1035, 1048), False, 'import sys\n'), ((936, 964), 'os.path.abspath', 'os.path.abspath', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (951, 964), False, 'import os\n'), ((2773, 2790), 'setuptools.setup', 'setup', ([], {}), '(**metadata)\n', (2778, 2790), False, 'from setuptools import setup, find_packages\n'), ((2836, 2854), 'os.chdir', 'os.chdir', (['old_path'], {}), '(old_path)\n', (2844, 2854), False, 'import os\n'), ((1808, 1832), 'versioneer.get_version', 'versioneer.get_version', ([], {}), '()\n', (1830, 1832), False, 'import versioneer\n'), ((1851, 1876), 'versioneer.get_cmdclass', 'versioneer.get_cmdclass', ([], {}), '()\n', (1874, 1876), False, 'import versioneer\n'), ((2506, 2521), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (2519, 2521), False, 'from setuptools import setup, find_packages\n')]
""" Schedule adjustments are functions that accept a `datetime` and modify it in some way. Adjustments have the signature `Callable[[datetime], datetime]`. """ from datetime import datetime, timedelta from typing import Callable import pendulum import prefect.schedules.filters def add(interval: timedelta) -> Callable[[datetime], datetime]: """ Adjustment that adds a specified interval to the date. Args: - interval (timedelta): the amount of time to add Returns: - Callable[[datetime], bool]: the adjustment function """ def _adjustment_fn(dt: datetime) -> datetime: return pendulum.instance(dt) + interval return _adjustment_fn def next_weekday(dt: datetime) -> datetime: """ Adjustment that advances a date to the next weekday. If the date is already a weekday, it is returned unadjusted. Args: - dt (datetime): the datetime to adjust Returns: - datetime: the adjusted datetime """ pdt = pendulum.instance(dt) while not prefect.schedules.filters.is_weekday(pdt): pdt = pdt.add(days=1) return pdt
[ "pendulum.instance" ]
[((1002, 1023), 'pendulum.instance', 'pendulum.instance', (['dt'], {}), '(dt)\n', (1019, 1023), False, 'import pendulum\n'), ((633, 654), 'pendulum.instance', 'pendulum.instance', (['dt'], {}), '(dt)\n', (650, 654), False, 'import pendulum\n')]
#! /usr/bin/env python3 # -*- coding: utf-8 -*- import math def f(x): return math.exp(x)/x**3 def int(a,b): h = (b-a)/104 x_par = a+h x_impar = a+2*h soma_par = 0 soma_impar = 0 for i in range(52): soma_par += f(x_par) x_par += 2*h for i in range(51): soma_impar += f(x_impar) x_impar += 2*h return (f(a)+f(b) + 4 * soma_par + 2*soma_impar) *h/3 print(int(1.9,9.7))
[ "math.exp" ]
[((82, 93), 'math.exp', 'math.exp', (['x'], {}), '(x)\n', (90, 93), False, 'import math\n')]