max_stars_repo_path
stringlengths
3
269
max_stars_repo_name
stringlengths
4
119
max_stars_count
int64
0
191k
id
stringlengths
1
7
content
stringlengths
6
1.05M
score
float64
0.23
5.13
int_score
int64
0
5
python/ABC/167/b.py
KATO-Hiro/atcoder-1
0
12791951
<filename>python/ABC/167/b.py import sys input = sys.stdin.readline def main(): A, B, C, K = map(int, input().split()) if K <= A: ans = K elif K <= A + B: ans = A else: ans = A - (K - (A + B)) print(ans) if __name__ == "__main__": main()
3.09375
3
tuttle/process.py
lexman/tuttle
26
12791952
<reponame>lexman/tuttle # -*- coding: utf8 -*- from time import time class Process: """ Class wrapping a process. A process has some input resources, some output resources, some code that produces outputs from inputs, a processor that handle the language specificities """ def __init__(self, processor, filename, line_num): self._start = None self._end = None self._processor = processor self._filename = filename self._line_num = line_num self._inputs = [] self._outputs = [] self._code = "" self.log_stdout = None self.log_stderr = None self._reserved_path = None self._success = None self._error_message = None self._id = "{}_{}".format(self._filename, self._line_num) @property def start(self): return self._start @property def end(self): return self._end @property def id(self): return self._id @property def code(self): return self._code # TODO Use a setter ? def set_code(self, code): self._code = code @property def success(self): return self._success @property def error_message(self): return self._error_message @property def processor(self): return self._processor def add_input(self, input_res): self._inputs.append(input_res) def add_output(self, output): self._outputs.append(output) def iter_inputs(self): for res in self._inputs: yield res def iter_outputs(self): for res in self._outputs: yield res def has_outputs(self): return len(self._outputs) > 0 def has_input(self, resource): return resource in self._inputs def input_urls(self): return {resource.url for resource in self._inputs} def output_urls(self): return {resource.url for resource in self._outputs} def sorted_inputs_string(self): sorted_inputs_urls = sorted([resource.url for resource in self.iter_inputs()]) return ",".join(sorted_inputs_urls) def depends_on_process(self, process): """ Returns True if self deprends on a resource created by process""" for output_resource in process.iter_outputs(): if self.has_input(output_resource): return True return False def pick_an_output(self): if not self.has_outputs(): return None return self._outputs[0] def retrieve_execution_info(self, process): """ Copy the execution info (all the properties set by function run()) from another process :param process: :return: """ self._start = process.start self._end = process.end self._success = process.success self.log_stdout = process.log_stdout self.log_stderr = process.log_stderr self._reserved_path = process._reserved_path def reset_execution_info(self): """ Reset the execution info (all the properties set by function run()) because the resources produced by this process have been invalidated :return: """ self._start = None self._end = None self.log_stdout = None self.log_stderr = None self._success = None def static_check(self): """ Runs a verification that the process won't obviously fail. This is used for static analysis before any process is run """ self._processor.static_check(self) def assign_paths(self, reserved_path, log_stdout, log_stderr): assert reserved_path is not None self._reserved_path = reserved_path self.log_stdout = log_stdout self.log_stderr = log_stderr def set_start(self): self._start = time() def set_end(self, success, error_msg): self._end = time() self._success = success self._error_message = error_msg def missing_outputs(self): """ :return: True if all input resources for this process exist, False otherwise """ result = [] for resource in self.iter_outputs(): if not resource.exists(): result.append(resource) return result
2.96875
3
data/scrape/link_extractors/__init__.py
jamesrharwood/journal-guidelines
0
12791953
from .extractors import extract_links, extract_links_by_strategy
1.09375
1
monitor_and_notify.py
System-Exit/IoT-Climate
0
12791954
#!/usr/bin/env python3 import requests import json import sqlite3 import sense_hat import time from pushbullet_api import PushbulletAPI from climate_util import ClimateUtil # Monitor and notification class class MonitorNotifier: def __init__(self, databaseName): # Get sense hat access self.__sense = sense_hat.SenseHat() # Load JSON config variables with open("config.json", "r") as jsonFile: config = json.load(jsonFile) self.__minTemp = float(config["min_temperature"]) self.__maxTemp = float(config["max_temperature"]) self.__minHumid = float(config["min_humidity"]) self.__maxHumid = float(config["max_humidity"]) # Load Pushbullet API access self.__pushbulletAPI = PushbulletAPI() # Connect to database for logging climate data self.__connectToDatabase(databaseName) # Connects to climate database if it exists, otherwise creating one def __connectToDatabase(self, databaseName): # Connect to database file self.__database = sqlite3.connect(databaseName) with self.__database: # Get cursor for database cursor = self.__database.cursor() # Create climate data table if it doesn't exist cursor.execute("CREATE TABLE IF NOT EXISTS ClimateData \ (time DATETIME, temperature NUMERIC, humidity NUMERIC)") # Create notification table if it doesn't exist cursor.execute("CREATE TABLE IF NOT EXISTS Notifications \ (timesent DATETIME)") # Commit creating of table self.__database.commit() # Record the current temp data into database def recordClimate(self): # Get and validate current climate information try: temperature = float(ClimateUtil.getCalibratedTemp(self.__sense)) humidity = float(self.__sense.get_humidity()) except ValueError: print("Warning: Invalid climate data recorded,\ stopping climate monitor") SystemExit() # Record climate information in database and send notification with self.__database: cursor = self.__database.cursor() cursor.execute("INSERT INTO ClimateData (time, temperature, humidity) \ VALUES (DATETIME('now', 'localtime'), ?, ?)", (temperature, humidity)) self.__database.commit() # Check if notification sould be sent self.__checkAndNotify(temperature, humidity) # Sends a pushbullet notification if temperature is out of range # and a notification has not already been sent today def __checkAndNotify(self, temperature, humidity): # If outside of config range, check database if notification # has already been sent today if temperature < self.__minTemp or temperature > self.__maxTemp or\ humidity < self.__minHumid or humidity > self.__maxHumid: # Check if notification has already been sent today with self.__database: cursor = self.__database.cursor() cursor.execute( "SELECT COUNT(*) \ FROM Notifications \ WHERE strftime('%d-%m-%Y', timesent) \ = strftime('%d-%m-%Y', DATETIME('now', 'localtime'))") recordCount = cursor.fetchone()[0] # If a notification has already been sent, return immediately if recordCount >= 1: return # Construct pushbullet message strings title = "Raspberry Pi climate alert" message = "Warning," if temperature < self.__minTemp: message += " temperature is too low," if temperature > self.__maxTemp: message += " temperature is too high," if humidity < self.__minHumid: message += " humidity is too low," if humidity > self.__maxHumid: message += " humidity is too high," message = message.rstrip(',') + "." # Wait until program is able to connect to internet while not ClimateUtil.checkConnection(): time.sleep(1) # Send pushbullet message self.__pushbulletAPI.sendNotification(title, message) # Record sending of notification with self.__database: cursor = self.__database.cursor() cursor.execute("INSERT INTO Notifications (timesent) \ VALUES (DATETIME('now', 'localtime'))") self.__database.commit() # Main method if __name__ == "__main__": # Database name variable databaseName = "climate_data.db" # Initialize monitor class monitor = MonitorNotifier(databaseName) # Check and record climate conditions every minute while True: monitor.recordClimate() time.sleep(60)
2.9375
3
core/models/ipagnn.py
google-research/runtime-error-prediction
10
12791955
<reponame>google-research/runtime-error-prediction # Copyright (C) 2021 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """IPA-GNN models.""" from typing import Any from flax import linen as nn import jax import jax.numpy as jnp from core.data import error_kinds from core.modules.ipagnn import compressive_ipagnn from core.modules.ipagnn import encoder from core.modules.ipagnn import ipagnn from core.modules.ipagnn import logit_math from core.modules.ipagnn import spans from core.modules.ipagnn import raise_contributions as raise_contributions_lib from third_party.flax_examples import transformer_modules class IPAGNN(nn.Module): config: Any info: Any transformer_config: transformer_modules.TransformerConfig docstring_transformer_config: transformer_modules.TransformerConfig def setup(self): config = self.config vocab_size = self.info.vocab_size max_tokens = config.max_tokens max_num_nodes = config.max_num_nodes max_num_edges = config.max_num_edges max_steps = config.max_steps self.node_span_encoder = spans.NodeSpanEncoder( info=self.info, config=config, transformer_config=self.transformer_config, max_tokens=max_tokens, max_num_nodes=max_num_nodes, use_span_index_encoder=False, use_span_start_indicators=False, ) if config.use_film or config.use_cross_attention: self.docstring_token_encoder = encoder.TokenEncoder( transformer_config=self.docstring_transformer_config, num_embeddings=vocab_size, features=config.hidden_size, ) self.docstring_encoder = encoder.TransformerEncoder( config=self.docstring_transformer_config) if config.use_compressive_ipagnn: self.ipagnn = compressive_ipagnn.SkipIPAGNN( config=config, info=self.info, max_steps=max_steps, ) else: self.ipagnn = ipagnn.IPAGNNModule( info=self.info, config=config, max_steps=max_steps, ) @nn.compact def __call__(self, x): config = self.config info = self.info tokens = x['tokens'] docstring_tokens = x['docstring_tokens'] # tokens.shape: batch_size, max_tokens batch_size = tokens.shape[0] encoded_inputs = self.node_span_encoder( tokens, x['node_token_span_starts'], x['node_token_span_ends'], x['num_nodes']) # encoded_inputs.shape: batch_size, max_num_nodes, hidden_size if config.use_film or config.use_cross_attention: docstring_token_embeddings = self.docstring_token_encoder( docstring_tokens) docstring_mask = docstring_tokens > 0 docstring_encoder_mask = nn.make_attention_mask( docstring_mask, docstring_mask, dtype=jnp.float32) # docstring_token_embeddings.shape: batch_size, max_tokens, hidden_size docstring_embeddings = self.docstring_encoder( docstring_token_embeddings, encoder_mask=docstring_encoder_mask) else: docstring_embeddings = None docstring_mask = None ipagnn_output = self.ipagnn( node_embeddings=encoded_inputs, docstring_embeddings=docstring_embeddings, docstring_mask=docstring_mask, edge_sources=x['edge_sources'], edge_dests=x['edge_dests'], edge_types=x['edge_types'], true_indexes=x['true_branch_nodes'], false_indexes=x['false_branch_nodes'], raise_indexes=x['raise_nodes'], start_node_indexes=x['start_index'], exit_node_indexes=x['exit_index'], post_domination_matrix=x['post_domination_matrix'], step_limits=x['step_limit'], ) # ipagnn_output['exit_node_embeddings'].shape: batch_size, hidden_size # ipagnn_output['raise_node_embeddings'].shape: batch_size, hidden_size # ipagnn_output['exit_node_instruction_pointer'].shape: batch_size # ipagnn_output['raise_node_instruction_pointer'].shape: batch_size exit_node_embeddings = ipagnn_output['exit_node_embeddings'] # exit_node_embeddings.shape: batch_size, hidden_size exit_node_instruction_pointer = ipagnn_output['exit_node_instruction_pointer'] # exit_node_instruction_pointer.shape: batch_size num_classes = info.num_classes if config.raise_in_ipagnn: raise_node_embeddings = ipagnn_output['raise_node_embeddings'] # raise_node_embeddings.shape: batch_size, hidden_size raise_node_instruction_pointer = ipagnn_output['raise_node_instruction_pointer'] # raise_node_instruction_pointer.shape: batch_size if len(info.no_error_ids) == 1: # Multiple error classes; only one No-Error class. no_error_id = info.no_error_ids[0] logits = nn.Dense( features=num_classes, name='output' )(raise_node_embeddings) # P(e | yes exception) # logits.shape: batch_size, num_classes logits = logits.at[:, no_error_id].set(-jnp.inf) no_error_logits = jax.vmap(logit_math.get_additional_logit)( exit_node_instruction_pointer + 1e-9, raise_node_instruction_pointer + 1e-9, logits) # no_error_logits.shape: batch_size logits = logits.at[:, no_error_id].set(no_error_logits) elif len(info.no_error_ids) > 1: # Multiple No-Error classes; only one error class. if len(info.error_ids) > 1: raise NotImplementedError('Multiple error classes and multiple no-error classes.') assert len(info.error_ids) == 1 error_id = info.error_ids[0] logits = nn.Dense( features=num_classes, name='output' )(exit_node_embeddings) # P(e | no exception) # logits.shape: batch_size, num_classes logits = logits.at[:, error_id].set(-jnp.inf) error_logits = jax.vmap(logit_math.get_additional_logit)( raise_node_instruction_pointer + 1e-9, exit_node_instruction_pointer + 1e-9, logits) # error_logits.shape: batch_size logits = logits.at[:, error_id].set(error_logits) else: raise ValueError('Tried using Exception IPA-GNN on data with no errors.') else: logits = nn.Dense( features=num_classes, name='output' )(exit_node_embeddings) # logits.shape: batch_size, num_classes return logits, ipagnn_output
1.570313
2
rock/text.py
rockstack/rock
1
12791956
from __future__ import unicode_literals def _(text): return text.strip('\n') USAGE = _(""" Usage: rock [--help] [--env=ENV] [--path=PATH] [--runtime=RUNTIME] command """) HELP = _(""" --help show help message --verbose show script while running --dry-run show script without running --version show version project: --env=ENV set env --path=PATH set path --runtime=RUNTIME set runtime commands: build run build test run tests run run in environment clean clean project files other commands: config show project configuration env show evaluable environment variables init generates project skeleton runtime show installed runtimes """) CONFIG_USAGE = _(""" Usage: rock config [--format=FORMAT] """) CONFIG_HELP = _(""" --help show help message --format set output format (json, yaml) """) ENV_USAGE = _(""" Usage: rock env """) ENV_HELP = _(""" --help show help message """) RUNTIME_USAGE = _(""" Usage: rock runtime """) RUNTIME_HELP = _(""" --help show help message """)
2.515625
3
modules/2.79/bpy/types/TextureNodeCurveRGB.py
cmbasnett/fake-bpy-module
0
12791957
<reponame>cmbasnett/fake-bpy-module TextureNodeCurveRGB.mapping = None
0.949219
1
gui/status_bar.py
myrmarachne/minesweeper
0
12791958
import Tkinter as tk class StatusBar: def __init__(self, root, label): self.label = tk.StringVar() self.label.set(label) self.root = root self.initialize() def initialize(self): frame = tk.Frame(self.root, relief=tk.SUNKEN) label = tk.Label(frame, font=('arial', 12, 'normal'), textvariable=self.label, padx=10, pady=10) label.pack(fill=tk.X) frame.pack(side=tk.BOTTOM) def update(self, label): percent = int(float(label)*100) self.label.set("No mine probability: " + str(percent) + "%") def clear(self): self.label.set("")
3.359375
3
variant-calling/gatk/gatk.py
gis-rpd/pipelines
25
12791959
#!/usr/bin/env python3 """{PIPELINE_NAME} pipeline (version: {PIPELINE_VERSION}): creates pipeline-specific config files to given output directory and runs the pipeline (unless otherwise requested). """ # generic usage {PIPELINE_NAME} and {PIPELINE_VERSION} replaced while # printing usage #--- standard library imports # import sys import os import logging #--- third-party imports # import yaml #--- project specific imports # # add lib dir for this pipeline installation to PYTHONPATH LIB_PATH = os.path.abspath( os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "..", "lib")) if LIB_PATH not in sys.path: sys.path.insert(0, LIB_PATH) from readunits import get_samples_and_readunits_from_cfgfile from readunits import get_readunits_from_args from pipelines import get_pipeline_version from pipelines import PipelineHandler from pipelines import logger as aux_logger from pipelines import get_cluster_cfgfile from pipelines import default_argparser import configargparse __author__ = "<NAME>" __email__ = "<EMAIL>" __copyright__ = "2016 Genome Institute of Singapore" __license__ = "The MIT License (MIT)" # only dump() and following do not automatically create aliases yaml.Dumper.ignore_aliases = lambda *args: True PIPELINE_BASEDIR = os.path.dirname(sys.argv[0]) CFG_DIR = os.path.join(PIPELINE_BASEDIR, "cfg") # same as folder name. also used for cluster job names PIPELINE_NAME = "gatk" MARK_DUPS = True # global logger logger = logging.getLogger(__name__) handler = logging.StreamHandler() handler.setFormatter(logging.Formatter( '[{asctime}] {levelname:8s} {filename} {message}', style='{')) logger.addHandler(handler) def main(): """main function """ default_parser = default_argparser(CFG_DIR, with_readunits=True) parser = configargparse.ArgumentParser(description=__doc__.format( PIPELINE_NAME=PIPELINE_NAME, PIPELINE_VERSION=get_pipeline_version()), parents=[default_parser]) parser._optionals.title = "Arguments" # pipeline specific args parser.add_argument('-t', "--seqtype", required=True, choices=['WGS', 'WES', 'targeted'], help="Sequencing type") parser.add_argument('-l', "--bed", help="Bed file listing regions of interest." " Required for WES and targeted sequencing.") default = 4 parser.add_argument("-c", "--hc-nct", default=default, type=int, help="Number of Haplotype Caller threads (per region cluster)." " Values>1 reported to make Haplotype Caller unstable (default={})".format(default)) default = 100 parser.add_argument('-i', "--interval-padding", default=default, help="Interval padding (for non-WGS only; default = {})".format(default)) parser.add_argument('-j', "--joint-calls", action='store_true', help="Perform joint/cohort calling (requires multisample input)") parser.add_argument('--raw-bam', help="Advanced: Injects raw (pre-dedup, pre-BQSR etc.) BAM (overwrites fq options)." " WARNING: reference needs to match pipeline requirements") parser.add_argument('--proc-bam', help="Advanced: Injects processed (post-dedup, post-BQSR etc.) BAM (overwrites fq options)." " WARNING: reference and pre-processing need to match pipeline requirements") # FIXME can be achieved with --until rule as well parser.add_argument('--bam-only', action='store_true', help="Only process up until BAM file") parser.add_argument('--gvcf-only', action='store_true', help="Only process up until GVCF file") args = parser.parse_args() # Repeateable -v and -q for setting logging level. # See https://www.reddit.com/r/Python/comments/3nctlm/what_python_tools_should_i_be_using_on_every/ # and https://gist.github.com/andreas-wilm/b6031a84a33e652680d4 # script -vv -> DEBUG # script -v -> INFO # script -> WARNING # script -q -> ERROR # script -qq -> CRITICAL # script -qqq -> no logging at all logger.setLevel(logging.WARN + 10*args.quiet - 10*args.verbose) aux_logger.setLevel(logging.WARN + 10*args.quiet - 10*args.verbose) if os.path.exists(args.outdir): logger.fatal("Output directory %s already exists", args.outdir) sys.exit(1) # samples is a dictionary with sample names as key (mostly just # one) and readunit keys as value. readunits is a dict with # readunits (think: fastq pairs with attributes) as value if args.sample_cfg: if any([args.fq1, args.fq2, args.sample, args.raw_bam, args.proc_bam]): logger.fatal("Config file overrides fastq, sample and BAM arguments." " Use one or the other") sys.exit(1) if not os.path.exists(args.sample_cfg): logger.fatal("Config file %s does not exist", args.sample_cfg) sys.exit(1) samples, readunits = get_samples_and_readunits_from_cfgfile(args.sample_cfg) else:# no sample config, so input is either fastq or existing bam samples = dict() if not args.sample: logger.fatal("Need sample name if not using config file") sys.exit(1) if args.raw_bam or args.proc_bam: assert not args.fq1, ("BAM injection overwrites fastq arguments") if args.raw_bam: assert os.path.exists(args.raw_bam) assert not args.proc_bam, ("Cannot inject raw and processed BAM") if args.proc_bam: assert os.path.exists(args.proc_bam) assert not args.raw_bam, ("Cannot inject raw and processed BAM") readunits = dict() samples[args.sample] = [] elif args.fq1: readunits = get_readunits_from_args(args.fq1, args.fq2) # all readunits go into this one sample specified on the command-line samples[args.sample] = list(readunits.keys()) else: logger.fatal("Need at least one fastq files as argument if not using config file") sys.exit(1) if args.seqtype in ['WES', 'targeted']: if not args.bed: logger.fatal("Analysis of exome and targeted sequence runs requires a bed file") sys.exit(1) else: if not os.path.exists(args.bed): logger.fatal("Bed file %s does not exist", args.sample_cfg) sys.exit(1) if args.joint_calls: if len(samples)<2: logger.fatal("Need at least two samples for joint calling") sys.exit(1) # turn arguments into cfg_dict (gets merged with other configs late) # cfg_dict = dict() cfg_dict['readunits'] = readunits cfg_dict['samples'] = samples cfg_dict['seqtype'] = args.seqtype cfg_dict['intervals'] = os.path.abspath(args.bed) if args.bed else None# always safe, might be used for WGS as well cfg_dict['mark_dups'] = MARK_DUPS cfg_dict['bam_only'] = args.bam_only cfg_dict['gvcf_only'] = args.gvcf_only cfg_dict['hc_nct'] = args.hc_nct cfg_dict['joint_calls'] = args.joint_calls cfg_dict['interval_padding'] = args.interval_padding pipeline_handler = PipelineHandler( PIPELINE_NAME, PIPELINE_BASEDIR, args, cfg_dict, cluster_cfgfile=get_cluster_cfgfile(CFG_DIR)) pipeline_handler.setup_env() # Inject existing BAM by symlinking (everything upstream is temporary anyway) # WARNING: filename has to match definition in Snakefile! if args.raw_bam: target = os.path.join(args.outdir, "out", args.sample, "{}.bwamem.bam".format(args.sample)) os.makedirs(os.path.dirname(target)) os.symlink(os.path.abspath(args.raw_bam), target) src_bai = os.path.abspath(args.raw_bam) + ".bai" if os.path.exists(src_bai): os.symlink(src_bai, target + ".bai") elif args.proc_bam: target = os.path.join(args.outdir, "out", args.sample, "{}.bwamem".format(args.sample)) if cfg_dict['mark_dups']: target += ".dedup" if cfg_dict['seqtype'] != 'targeted': target += ".bqsr" target += ".bam" os.makedirs(os.path.dirname(target)) os.symlink(os.path.abspath(args.proc_bam), target) if os.path.exists(os.path.abspath(args.proc_bam) + ".bai"): os.symlink(os.path.abspath(args.proc_bam) + ".bai", target + ".bai") pipeline_handler.submit(args.no_run) if __name__ == "__main__": main()
1.953125
2
backend/app/seeds/palettes.py
saadjs/Color.ly
3
12791960
<filename>backend/app/seeds/palettes.py from app.models import db, User, Palette def seed_palettes(): canadian = Palette(title='Metalics', colors=[ {"name": "Jigglypuff", "color": "#ff9ff3"}, {"name": "Casandora Yellow", "color": "#feca57"}, {"name": "Pastel Red", "color": "#ff6b6b"}, {"name": "Megaman", "color": "#48dbfb"}, {"name": "Wild Caribbean Green", "color": "#1dd1a1"}, ], user_id=User.query.filter_by(username='monkey').first().id ) chinese = Palette(title='Chromium', colors=[ { "name": "Red", "color": "#F44336" }, { "name": "Pink", "color": "#E91E63" }, { "name": "Purple", "color": "#9C27B0" }, { "name": "Deep Purple", "color": "#673AB7" }, { "name": "indigo", "color": "#3F51B5" }, ], user_id=User.query.filter_by(username='Demo').first().id ) french = Palette(title='Rainbow', colors=[ { "name": "Flat Flesh", "color": "#fad390" }, { "name": "<NAME>", "color": "#f8c291" }, { "name": "Livid", "color": "#6a89cc" }, { "name": "Spray", "color": "#82ccdd" }, { "name": "Paradise Green", "color": "#b8e994" }, ], user_id=User.query.filter_by(username='Demo').first().id ) indian = Palette(title='Tropical', colors=[ { "name": "Orchid Orange", "color": "#FEA47F" }, { "name": "Spiro Disco Ball", "color": "#25CCF7" }, { "name": "Honey Glow", "color": "#EAB543" }, { "name": "Sweet Garden", "color": "#55E6C1" }, { "name": "Falling Star", "color": "#CAD3C8" }, ], user_id=User.query.filter_by(username='saad').first().id ) spanish = Palette(title='Floral', colors=[ { "name": "Jacksons Purple", "color": "#40407a" }, { "name": "C64 Purple", "color": "#706fd3" }, { "name": "Swan White", "color": "#f7f1e3" }, { "name": "Summer Sky", "color": "#34ace0" }, { "name": "Celestial Green", "color": "#33d9b2" }, ], user_id=User.query.filter_by(username='bob').first().id ) british = Palette(title='Pastels', colors=[ { "name": "<NAME>", "color": "#00a8ff" }, { "name": "Periwinkle", "color": "#9c88ff" }, { "name": "Rise-N-Shine", "color": "#fbc531" }, { "name": "Download Progress", "color": "#4cd137" }, { "name": "Seabrook", "color": "#487eb0" }, ], user_id=User.query.filter_by(username='cow').first().id ) aussie = Palette(title='Jungle', colors=[ { "name": "Beekeeper", "color": "#f6e58d" }, { "name": "Spiced Nectarine", "color": "#ffbe76" }, { "name": "Pink Glamour", "color": "#ff7979" }, { "name": "June Bud", "color": "#badc58" }, { "name": "Coastal Breeze", "color": "#dff9fb" }, ], user_id=User.query.filter_by(username='monkey').first().id ) american = Palette(title='City Lights', colors=[ { "name": "Light Greenish Blue", "color": "#55efc4" }, { "name": "Faded Poster", "color": "#81ecec" }, { "name": "Green Darner Tail", "color": "#74b9ff" }, { "name": "Shy Moment", "color": "#a29bfe" }, { "name": "City Lights", "color": "#dfe6e9" }, ], user_id=User.query.filter_by(username='Demo').first().id ) dutch = Palette(title='Primrose', colors=[ { "name": "Sunflower", "color": "#FFC312" }, { "name": "Energos", "color": "#C4E538" }, { "name": "Blue Martina", "color": "#12CBC4" }, { "name": "Lavender Rose", "color": "#FDA7DF" }, { "name": "Bara Rose", "color": "#ED4C67" }, ], user_id=User.query.filter_by(username='saad').first().id ) russian = Palette(title='Fruit Punch', colors=[ {"name": "Creamy Peach", "color": "#f3a683"}, {"name": "Rosy Highlight", "color": "#f7d794"}, {"name": "Soft Blue", "color": "#778beb"}, {"name": "Brewed Mustard", "color": "#e77f67"}, {"name": "Old Geranium", "color": "#cf6a87"}, ], user_id=User.query.filter_by(username='Demo').first().id ) german = Palette(title='Bora Bora', colors=[ {"name": "Fusion Red", "color": "#fc5c65"}, {"name": "Orange Hibiscus", "color": "#fd9644"}, {"name": "Flirtatious", "color": "#fed330"}, {"name": "Reptile Green", "color": "#26de81"}, {"name": "Maximum Blue Green", "color": "#2bcbba"}, ], user_id=User.query.filter_by(username='Demo').first().id ) turkish = Palette(title='Desert', colors=[ {"name": "Bright Lilac", "color": "#cd84f1"}, {"name": "Pretty Please", "color": "#ffcccc"}, {"name": "Light Red", "color": "#ff4d4d"}, {"name": "<NAME>", "color": "#ffaf40"}, {"name": "Unmellow Yellow", "color": "#fffa65"}, ], user_id=User.query.filter_by(username='saad').first().id ) swedish = Palette(title='Ikea', colors=[ {"name": "Highlighter Pink", "color": "#ef5777"}, {"name": "Dark Periwinkle", "color": "#575fcf"}, {"name": "Megaman", "color": "#4bcffa"}, {"name": "Fresh Turquoise", "color": "#34e7e4"}, {"name": "Minty Green", "color": "#0be881"}, ], user_id=User.query.filter_by(username='bob').first().id ) db.session.add(canadian) db.session.add(chinese) db.session.add(french) db.session.add(indian) db.session.add(spanish) db.session.add(british) db.session.add(aussie) db.session.add(american) db.session.add(dutch) db.session.add(russian) db.session.add(german) db.session.add(turkish) db.session.add(swedish) db.session.commit() def undo_palettes(): db.session.execute('TRUNCATE palettes;') db.session.commit()
2.3125
2
Station.py
kriete/AutomaticSocibQcRecheck
0
12791961
<filename>Station.py from utils import * from netCDF4 import Dataset import logging logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) formatter = logging.Formatter('[%(asctime)s] p%(process)s %(lineno)d - %(name)s - %(levelname)s - %(message)s', '%m-%d %H:%M:%S') handler.setFormatter(formatter) handler = logging.FileHandler('station.log') handler.setLevel(logging.INFO) logger.addHandler(handler) class StationManager: def __init__(self, qc_definitions, year=None, month=None, station_names=None): if (year is None) or (month is None): year, month = read_year_month_config() if station_names is None: single_stations = read_single_stations_config() self.station_links = get_mooring_stations('http://thredds.socib.es/thredds/catalog/mooring/weather_station/catalog.html', year, month, only_single_stations=single_stations) self.year = year self.month = month self.qc_definitions = qc_definitions self.station_container = [] self.create_stations() self.assign_qc_processes() self.process_stations() self.print_station_information() def print_station_information(self): for station in self.station_container: station.log_station_information() def create_stations(self): for link in self.station_links: if check_link_availability(link): name = get_station_name_from_link('weather_station/', '/L1/', link) logger.info('Found data for station ' + name) self.station_container.append(Station(link, name, self.year, self.month)) else: logger.info(link + ' does not exist. Will not use this station.') def assign_qc_processes(self): # TODO: also get dat stuff from database goddamn it # TODO: outsource dat here, should not be necessarily be here.... ! # Btw, the philosophy of these definitions also suck (ye ye ye ye I am completely aware that they are hardcoded # and I should have really avoided that) axys_watchMate_meteo = ['buoy_canaldeibiza-scb_met010', 'buoy_bahiadepalma-scb_met008'] meteoStation_aanderaa = ['station_salines-ime_met002'] meteoStation_vaisala = ['mobims_calamillor-scb_met001'] meteoStation_vaisala_airp_mbar = ['mobims_playadepalma-scb_met012', 'station_parcbit-scb_met004', 'station_galfi-scb_met005', 'station_esporles-scb_met003', 'mobims_sonbou-scb_met011'] for station in self.station_container: cur_name = station.name if cur_name in axys_watchMate_meteo: cur_process_name = 'Axys_WatchMate_Meteo' station.process_name = cur_process_name station.process_definitions = self.qc_definitions.processes[cur_process_name] elif cur_name in meteoStation_aanderaa: cur_process_name = 'MeteoStation_Aanderaa' station.process_name = cur_process_name station.process_definitions = self.qc_definitions.processes[cur_process_name] elif cur_name in meteoStation_vaisala: cur_process_name = 'MeteoStation_Vaisala' station.process_name = cur_process_name station.process_definitions = self.qc_definitions.processes[cur_process_name] elif cur_name in meteoStation_vaisala_airp_mbar: cur_process_name = 'MeteoStation_Vaisala_Airp_Mbar' station.process_name = cur_process_name station.process_definitions = self.qc_definitions.processes[cur_process_name] else: logger.warning('No Process defined for this station: ' + cur_name + '. Will use default now.') cur_process_name = 'MeteoStation_Vaisala_Airp_Mbar' station.process_name = cur_process_name station.process_definitions = self.qc_definitions.processes[cur_process_name] station.get_defined_variables_of_interest() def process_stations(self): for station in self.station_container: logger.info('Processing station ' + station.name + '...') station.perform_qc() logger.info('Plotting and saving station ' + station.name + '...') station.run_through_variables_of_interest() logger.info('Processing station ' + station.name + ' finished.') class Station: def __init__(self, link, name, year, month): # TODO: fix converted_time1 plz self.year = year self.month = month self.link = link self.name = name self.root = Dataset(link) self.time = get_data_array(self.root['time']) self.converted_time = get_md_datenum(self.time) date_converted = [datetime.fromtimestamp(ts) for ts in self.time] self.converted_time1 = get_pandas_timestamp_series(date_converted) translate_time = self.converted_time1.apply(lambda x: x.to_pydatetime()) self.converted_time_backward = map(totimestamp, translate_time) self.process_name = '' self.process_definitions = None self.variables_of_interest = [] self.qc_variables_of_interest = [] self.definitions_of_interest = dict() self.qc_output = dict() def get_defined_variables_of_interest(self): for method_name, method_definition in self.process_definitions.method_container.items(): var_name = method_definition.title if not self.check_variable_existence(var_name): continue self.variables_of_interest.append(var_name) self.definitions_of_interest[method_definition.title] = method_definition qc_variable_name = self.root[var_name].ancillary_variables self.qc_variables_of_interest.append(qc_variable_name) def log_station_information(self): logger.info('---') logger.info('Station ' + self.name) logger.info('Provided by source link ' + self.link) logger.info('Has been assigned to the process ' + self.process_name) logger.info('Has processes defined for the variables ' + str(self.process_definitions.method_container.keys())) logger.info('The definitions were connected with the variables ' + str(self.variables_of_interest)) def check_variable_existence(self, variable_name): try: self.root[variable_name] except IndexError: logger.warning('Variable of interest ' + variable_name + ' not found. Will pop it out.') return False return True def run_through_variables_of_interest(self): variable_counter = 0 tab_holder = [] for variable_name in self.variables_of_interest: variable = self.root[variable_name] qc_variable = self.root[self.qc_variables_of_interest[variable_counter]] variable_data = get_data_array(variable) qc_variable_data = get_data_array(qc_variable) # new_qc_variable_data = np.asarray(np.ones((1, len(qc_variable_data)))[0]) new_qc_variable_data = self.qc_output[variable_name] difference_highlights_idx = np.where(qc_variable_data != new_qc_variable_data)[0] tab_holder.append(get_bokeh_tab(self.converted_time1, variable_data, variable, self.converted_time_backward, qc_data=qc_variable_data, new_qc_data=new_qc_variable_data, diff_idx=difference_highlights_idx)) variable_counter += 1 plot_bokeh(tab_holder, self.name, self.year, self.month) def perform_qc(self): for variable_name in self.variables_of_interest: logger.info('Processing ' + variable_name) variable = self.root[variable_name] variable_data = get_data_array(variable) self.qc_output[variable_name] = np.ones((1, len(variable_data)))[0] nan_idx = np.isnan(variable_data) self.qc_output[variable_name][nan_idx] = 9 method_definitions = self.definitions_of_interest[variable_name].get_method_arrays() cur_qc_methods = method_definitions[0] cur_qc_input_parameters = method_definitions[1] cur_qc_lookup = method_definitions[2] if len(cur_qc_methods) != len(cur_qc_lookup): logger.error("Incorrect amount of flags with respect to the QC methods set.") return qc_counter = 0 for qc_method in cur_qc_methods: input_parameters = cur_qc_input_parameters[qc_counter] if qc_method == 'range': if len(input_parameters) != 2: logger.error('Not enough input parameters.') continue self.qc_output[variable_name] = compute_valid_range(variable_data, input_parameters[0], input_parameters[1], cur_qc_lookup[qc_counter], self.qc_output[variable_name]) elif qc_method == 'spike': if len(input_parameters) != 1: logger.error('Not enough input parameters.') continue self.qc_output[variable_name] = compute_spike(variable_data, input_parameters[0], cur_qc_lookup[qc_counter], self.qc_output[variable_name]) elif qc_method == 'gradient': if len(input_parameters) != 2: logger.error('Not enough input parameters.') continue # self.qc_output[variable_name] = compute_simple_gradient(variable_data, input_parameters[1], cur_qc_lookup[qc_counter], self.qc_output[variable_name]) self.qc_output[variable_name] = compute_extended_gradient(variable_data, self.time, input_parameters[1], input_parameters[0], cur_qc_lookup[qc_counter], self.qc_output[variable_name]) elif qc_method == 'stationary': if len(input_parameters) != 2: logger.error('Not enough input parameters.') continue self.qc_output[variable_name] = compute_stationary(variable_data, self.time, input_parameters[0], input_parameters[1], cur_qc_lookup[qc_counter], self.qc_output[variable_name]) qc_counter += 1
2.453125
2
packages/functional/extract_vol.py
justi/m2g
12
12791962
#!/usr/bin/env python # Copyright 2015 Open Connectome Project (http://openconnecto.me) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # extract_b0.py # Created by <NAME> on 2015-02-21. # Email: <EMAIL> # Copyright (c) 2015. All rights reserved. from argparse import ArgumentParser from nibabel import load, save, Nifti1Image from numpy import where, loadtxt def extract_vol(dti_img, bvals, b0_vol): print "Loading dti data..." d_img = load(dti_img) b0_data = d_img.get_data() b0_head = d_img.get_header() print "Loading bvals file..." b = loadtxt(bvals) print "Extracting B0 volume..." b0_data=b0_data[:,:,:,int(where(b==0)[0])] print "Updating image header..." b0_head.set_data_shape(b0_head.get_data_shape()[0:3]) print "Saving..." out = Nifti1Image( data=b0_data, affine=d_img.get_affine(), header=b0_head ) save(out, b0_vol) print "Complete!" def main(): parser = ArgumentParser(description="") parser.add_argument("dti", action="store", help="The DTI image we want to extract B0 from (.nii, .nii.gz)") parser.add_argument("bvals", action="store", help="The b-value file corresponding to the DTI image (.b)") parser.add_argument("b0", action="store", help="The output file location of the B0 scan (.nii, .nii.gz)") result = parser.parse_args() extract_vol(result.dti, result.bvals, result.b0) if __name__ == "__main__": main()
2.4375
2
galileo/WildSnakePy/WildSnake.py
garoa/dojo-shield
8
12791963
<filename>galileo/WildSnakePy/WildSnake.py """ WildSnakePy - WildSnakeUNO ported to Galileo (mraa native API) A two-segment snake slithers aimlessly over the display. On intersections, the next step is chosen at random. Most of the logic is encoded in the `moves` array which lists possible next steps for each current position and direction. Snake head position and direction is coded like pictured below, i.e. when the snake head is at the middle segment going right, the code is 6, going left in the same place is code 13. >:0 <:7 ----- ^:5 | | v:1 v:12 | >:6 | ^:8 ----- ^:4 | <:13| v:2 v:11 | | ^:9 ----- <:3 >:10 To understand this diagram, read: > as a right arrow < as a left arrow v as a down arrow ^ as an up arrow """ # TODO: works, but needs cleanup from mraa import * import time, random # {first_choice, second_choice*} # * -1 means there is no second_choice moves = [ [1, -1], # 0 [13, 2], # 1 [3, -1], # 2 [4, -1], # 3 [6, 5], # 4 [0, -1], # 5 [2, 8], # 6 [12, -1], # 7 [7, -1], # 8 [13, 8], # 9 [9, -1], # 10 [10, -1], # 11 [6, 11], # 12 [5, 11] # 13 ] print moves """ A ----- F | | B | G | ----- E | | C | D | ----- """ # A B C D E F G #int display[] = {11, 10, 8, 7, 6, 12, 13}; # DojoXXG # A B G E D C G F #int display[] = {8, 9, 7, 12, 11, 10, 7, 13}; # A B C D E F G SEGMENTS = 7 pins = [8, 9, 10, 11, 12, 13, 7] display = [Gpio(i) for i in pins] print repr(display) head = 8 # segment B tail = 9 # segment A pot = Aio(0) def setup(): for i in range(SEGMENTS): display[i].dir(DIR_OUT) #MRAA print repr(display[i]) display[i].write(1) print 'pin %d on' %pins[i] time.sleep(2) display[i].write(0) print 'tested i %d' %pins[i] display[tail % 7].write(1) #MRAA def loop(): global head global tail print ' head ' + str(head%7) print ' tail ' + str(tail%7) display[head % 7].write(1) #MRAA delay = pot.read() print 'delay %s' % str(delay) time.sleep(delay/1000.0) display[tail % 7].write(0) #MRAA tail = head choices = moves[head] if (choices[1] == -1) : head = choices[0] # no second choice else: choice = random.choice([0,1]) print ' choice ' + str(choice) head = choices[choice] print 'setup()' setup() print 'loop()' while True: loop()
3.4375
3
test/testGraphGrid.py
KodeWorker/CircuitRouter
0
12791964
# -*- coding: utf-8 -*- """ Unit Test On Grid description: This is the unit test for basic grid. content: - TestGrid - TestGrid8D author: Shin-Fu (<NAME> latest update: - 2019/05/10 - 2019/05/14 add TestGridDB - 2019/05/15 add test_case for DynamicBoundGridWithShortcuts """ import os import sys import unittest root = os.path.join(os.path.dirname(__file__), '..') sys.path.append(root) from graph.grid import GridWithWeights from graph.grid8d import EightDirectionGrid from graph.gridDB import DynamicBoundGrid, DynamicBoundGridWithShortcuts from graph.duality_graph import DualityGraph class TestGrid(unittest.TestCase): def __init__(self, methodName='runTest'): super(TestGrid, self).__init__(methodName) self.g = GridWithWeights(4, 4) def test_case1(self): self.assertSetEqual(set(self.g.neighbors((1,1))), set([(0, 1), (2, 1), (1, 0), (1, 2)])) self.assertSetEqual(set(self.g.neighbors((1,0))), set([(0, 0), (1, 1), (2, 0)])) self.assertSetEqual(set(self.g.neighbors((3,3))), set([(3, 2), (2, 3)])) class TestGrid8D(unittest.TestCase): def __init__(self, methodName='runTest'): super(TestGrid8D, self).__init__(methodName) self.g = EightDirectionGrid(4, 4) def test_case1(self): self.assertSetEqual(set(self.g.neighbors((1,1))), set([(2, 0), (1, 0), (0, 0), (2, 1), (0, 1), (2, 2), (1, 2), (0, 2)])) class TestGridDB(unittest.TestCase): def __init__(self, methodName='runTest'): super(TestGridDB, self).__init__(methodName) self.g1 = DynamicBoundGrid(4, 4) self.g1.set_search((0, 0), (3, 3)) self.g2 = DynamicBoundGridWithShortcuts(4, 4) self.g2.set_search((0, 0), (3, 3)) def test_case1(self): self.assertSetEqual(set(self.g1.neighbors((0,0))), set([(1, 0), (0, 1), (1, 1)])) def test_case2(self): self.assertSetEqual(set(self.g2.neighbors((0,0))), set([(1, 0), (0, 1), (1, 1)])) class TestDualityGraph(unittest.TestCase): def __init__(self, methodName='runTest'): super(TestDualityGraph, self).__init__(methodName) self.g1 = DualityGraph(4, 4) self.g1.set_search((0, 0), (3, 3)) def test_case1(self): self.assertSetEqual(set(self.g1.neighbors((0,0))), set([(3, 0), (0, 3), (3, 3)])) if __name__ == '__main__': unittest.main(verbosity=1)
3.109375
3
check_hbase_regionservers_requests_balance.py
adolci/nagios-plugins
0
12791965
#!/usr/bin/env python # vim:ts=4:sts=4:sw=4:et # # Author: <NAME> # Date: 2018-07-13 22:46:34 +0100 (Fri, 13 Jul 2018) # # https://github.com/harisekhon/nagios-plugins # # License: see accompanying Hari Sekhon LICENSE file # # If you're using my code you're welcome to connect with me on LinkedIn # and optionally send me feedback to help steer this or other code I publish # # https://www.linkedin.com/in/harisekhon # """ Nagios Plugin to check HBase RegionServer requests imbalance via the HMaster UI Tested on Apache HBase 0.95, 0.96, 0.98, 1.0, 1.1, 1.2, 1.3, 1.4, 2.0, 2.1 """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals #import logging #import json import os import sys import traceback try: from bs4 import BeautifulSoup except ImportError: print(traceback.format_exc(), end='') sys.exit(4) srcdir = os.path.abspath(os.path.dirname(__file__)) libdir = os.path.join(srcdir, 'pylib') sys.path.append(libdir) try: # pylint: disable=wrong-import-position from harisekhon.utils import isInt, support_msg, UnknownError, plural from harisekhon import RestNagiosPlugin except ImportError as _: print(traceback.format_exc(), end='') sys.exit(4) __author__ = '<NAME>' __version__ = '0.3.0' class CheckHBaseRegionServerBalance(RestNagiosPlugin): def __init__(self): # Python 2.x super(CheckHBaseRegionServerBalance, self).__init__() # Python 3.x # super().__init__() self.name = ['HBase Master', 'HBase'] self.default_port = 16010 self.path = '/master-status' self.auth = False self.json = False self.msg = 'HBase msg not defined' def add_options(self): super(CheckHBaseRegionServerBalance, self).add_options() self.add_thresholds(default_warning=50) def process_options(self): super(CheckHBaseRegionServerBalance, self).process_options() self.validate_thresholds(percent=True, optional=True) def parse(self, req): soup = BeautifulSoup(req.content, 'html.parser') #if log.isEnabledFor(logging.DEBUG): # log.debug("BeautifulSoup prettified:\n%s\n%s", soup.prettify(), '='*80) # this masks underlying exception #try: tab = soup.find('div', {'id':'tab_baseStats'}) table = tab.find_next('table') rows = table.findChildren('tr') if len(rows) < 2: raise UnknownError('no regionserver rows found in base stats table! {}'.format(support_msg())) # HBase 1.1 in HDP 2.3: ServerName | Start time | Requests Per Second | Num. Regions # HBase 1.2 (Apache): ServerName | Start time | Version | Requests per Second | Num. Regions # HBase 1.4 (Apache): ServerName | Start time | Last Contact | Version | Requests Per Second | Num. Regions th_list = rows[0].findChildren('th') if len(th_list) < 4: raise UnknownError('no table header for base stats table!') expected_header = 'Requests Per Second' col_index = len(th_list) - 2 found_header = th_list[col_index].text if found_header != expected_header: raise UnknownError("wrong table header found for column 4! Expected '{}' but got '{}'. {}"\ .format(expected_header, found_header, support_msg())) stats = {} for row in rows[1:]: cols = row.findChildren('td') if len(cols) < 4: raise UnknownError('4th column in table not found! {}'.format(support_msg())) regionserver = cols[0].text.strip().split(',')[0] if 'Total:' in regionserver: break reqs_per_sec = cols[col_index].text.strip() if not isInt(reqs_per_sec): raise UnknownError("non-integer found in Requests Per Second column for regionserver '{}'. {}"\ .format(regionserver, support_msg())) # fix for this is to cast string '1.0' to float and then cast to int # ValueError: invalid literal for int() with base 10: '1.0' stats[regionserver] = int(float(reqs_per_sec)) self.process_stats(stats) #except (AttributeError, TypeError): # raise UnknownError('failed to parse HBase Master UI status page. {}'.format(support_msg())) def process_stats(self, stats): lowest_requests = None highest_requests = None lowest_regionserver = None highest_regionserver = None for regionserver in stats: if lowest_requests is None: lowest_requests = stats[regionserver] lowest_regionserver = regionserver if highest_requests is None: highest_requests = stats[regionserver] highest_regionserver = regionserver if stats[regionserver] > highest_requests: highest_requests = stats[regionserver] highest_regionserver = regionserver if stats[regionserver] < lowest_requests: lowest_requests = stats[regionserver] lowest_regionserver = regionserver # simple algo - let me know if you think can be a better calculation imbalance = (highest_requests - lowest_requests) / max(highest_requests, 1) * 100 num_regionservers = len(stats) self.msg = 'HBase RegionServers reqs/sec imbalance = {:.0f}% across {} RegionServer{}'\ .format(imbalance, num_regionservers, plural(num_regionservers)) self.check_thresholds(imbalance) if self.verbose or not self.is_ok(): self.msg += ' [min reqs/sec={} on {} / max reqs/sec={} on {}]'\ .format(lowest_requests, lowest_regionserver, highest_requests, highest_regionserver) self.msg += ' | reqs_per_sec_balance={:.2f}%{} lowest_requests_per_sec={} highest_requests_per_sec={}'\ .format(imbalance, self.get_perf_thresholds(), lowest_requests, highest_requests) if __name__ == '__main__': CheckHBaseRegionServerBalance().main()
1.640625
2
main.py
standbit/Comics-publisher
0
12791966
import os import random from os.path import splitext from urllib.parse import urlparse import requests from dotenv import load_dotenv def get_file_extension(link): link_path = urlparse(link).path extension = splitext(link_path)[-1] return extension def get_last_comic_num(): url = "https://xkcd.com/info.0.json" response = requests.get(url) response.raise_for_status() last_comic_num = response.json()["num"] return last_comic_num def download_comic(url, filename): response = requests.get(url) response.raise_for_status() with open(filename, "wb") as file: file.write(response.content) def fetch_random_comic(): first_comic_num = 1 last_comic_num = int(get_last_comic_num()) comic_num = random.randint(first_comic_num, last_comic_num) url = f"https://xkcd.com/{comic_num}/info.0.json" response = requests.get(url) response.raise_for_status() converted_response = response.json() comments = converted_response["alt"] comic_link = converted_response["img"] extension = get_file_extension(comic_link) comic_name = converted_response["safe_title"] filename = f"{comic_name}{extension}" download_comic(comic_link, filename) return filename, comments def check_api_response(api_response): if "error" in api_response: raise requests.HTTPError( "Ошибка с VK API", api_response["error"]["error_msg"] ) def get_server_link(token): url = "https://api.vk.com/method/photos.getWallUploadServer" payload = { "access_token": token, "group_id": 212094963, "v": 5.131, } response = requests.get(url, params=payload) response.raise_for_status() converted_response = response.json() check_api_response(converted_response) server_link = converted_response["response"]["upload_url"] return server_link def upload_img_to_server(filename, upload_url): with open(filename, "rb") as file: files = { "photo": file, } response = requests.post(upload_url, files=files) response.raise_for_status() server_response = response.json() check_api_response(server_response) return server_response def upload_img_to_group(token, photo, server, hash_parameter): url = "https://api.vk.com/method/photos.saveWallPhoto" payload = { "access_token": token, "group_id": 212094963, "v": 5.131, "photo": photo, "server": server, "hash": hash_parameter, } response = requests.post( url, params=payload) response.raise_for_status() vk_response = response.json() check_api_response(vk_response) return vk_response def publish_comic(token, comments, owner_id, media_id): url = "https://api.vk.com/method/wall.post" payload = { "owner_id": -212094963, "from_group": 1, "message": comments, "access_token": token, "v": 5.131, "attachments": f"photo{owner_id}_{media_id}" } response = requests.post(url, params=payload) response.raise_for_status() check_api_response(response.json()) def main(): load_dotenv() vk_token = os.getenv("VK_ACCESS_TOKEN") try: filename, comments = fetch_random_comic() server_link = get_server_link(vk_token) server_response = upload_img_to_server(filename, server_link) uploaded_img = server_response["photo"] server_num = server_response["server"] server_hash = server_response["hash"] vk_response = upload_img_to_group( vk_token, uploaded_img, server_num, server_hash) group_owner_id = vk_response["response"][0]["owner_id"] media_id = vk_response["response"][0]["id"] publish_comic( vk_token, comments, group_owner_id, media_id) except requests.HTTPError as err: print(err) except requests.ConnectionError as err: print("Connection Error. Check Internet connection.\n", str(err)) except OSError as err: print("Error: %s - %s." % (err.filename, err.strerror)) finally: os.remove(f"./{filename}") if __name__ == "__main__": main()
3.09375
3
fitbox/consultas/admin.py
ravellys/fitbox
0
12791967
# Register your models here. from django.contrib.admin import register, ModelAdmin from fitbox.consultas.models import Consulta @register(Consulta) class ConsultaAdmin(ModelAdmin): list_filter = ('paciente',) prepopulated_fields = {'slug': ('descricao', )}
1.765625
2
naughty_string_validator/__init__.py
sachinpali146/naughty_string_validator_python
0
12791968
__author__ = '<NAME>' __email__ = '<EMAIL>' __version__ = '0.1.0' from .naughty_string_validator import NaughtyStringValidator, SRC_DIRECTORY
1.210938
1
src/model/config.py
aphostrophy/simplexity-AI
0
12791969
<filename>src/model/config.py<gh_stars>0 from src.constant import ShapeConstant class Config: """ Class representation for configuration needed in game [ATTRIBUTES] row: int -> boards configuration row shape col: int -> boards configuration column shape game_type: int (Look at GameConstant for available type) -> game configuration for game type, could be bot vs bot, player vs bot, and player vs player player_choice: int -> Could be 0 or 1 (only needed for player vs bot) thinking_time: float -> Maximal time for bot to think (only needed for player vs bot or bot vs bot) is_dump: bool -> is model loaded from bin file """ def __init__( self, row: int, col: int, game_type: int, player_choice: int, is_dump: bool, thinking_time: float, ): self.row = row self.col = col self.is_dump = is_dump n_quota = row * col / 2 self.quota = [ { ShapeConstant.CROSS: n_quota // 2, ShapeConstant.CIRCLE: n_quota - (n_quota // 2), }, { ShapeConstant.CROSS: n_quota - (n_quota // 2), ShapeConstant.CIRCLE: n_quota // 2, }, ] self.game_type = game_type self.player_choice = player_choice # 0 or 1 if self.player_choice == None: self.player_choice = -1 self.thinking_time = thinking_time def __str__(self): ret = '[Configuration]\n' ret += f'cow: {self.row}\n' ret += f'rol: {self.col}\n' ret += f'is_dump: {self.is_dump}\n' ret += f'game_type: {self.game_type}\n' ret += f'player_choice: {self.player_choice}\n' ret += f'thinking_time: {self.thinking_time}\n' return ret
2.921875
3
api/listings.py
derKiez/kiezBackend
0
12791970
<filename>api/listings.py import datetime from bson import ObjectId from flask import Response, request from utils import json_encode from flask.views import MethodView from models.listings import Listing, ListingComment from auth import authorize class ListingsView(MethodView): @authorize def get(self): zipcode = request.user.zipcode limit = request.args.get("limit", 20) offset = request.args.get("offset", 0) if not zipcode: return Response(status=400) listings = Listing.q.filter({"zipcode": zipcode})\ .skip(offset)\ .limit(limit)\ .sort("created_at", -1).all() listings = list(listings) has_more = len(listings) >= 20 offset = None if listings: offset = listings[-1]._id meta = {"next_offset": offset, "has_more": has_more} response_body = {"listings": [i.serialize() for i in listings], "meta": meta} return Response(json_encode(response_body)) @authorize def post(self): user = request.user data = request.json text = data.get("text") owner = request.user.serialize() zipcode = user.zipcode listing = Listing(text=text, owner=owner, zipcode=zipcode) listing.save() return Response(status=201) class *CommentListView(MethodView): @authorize def get(self, listing_id): listing = Listing.q.get(_id=ObjectId(listing_id)) limit = request.args.get("limit", 20) offset = request.args.get("offset", 0) if not listing: return Response(status=404) listing_comments = ListingComment.q.filter( {"listing.id": ObjectId(listing_id)})\ .skip(offset)\ .sort("-created_at")\ .limit(limit)\ .all() comments = list(listing_comments) has_more = len(comments) >= 20 offset = None if comments: offset = comments[-1]._id meta = {"next_offset": offset, "has_more": has_more} response_body = {"comments": [i.serialize() for i in comments], "meta": meta} return Response(json_encode(response_body)) @authorize def post(self, listing_id): listing = Listing.q.get(_id=ObjectId(listing_id)) if not listing: return Response(status=404) data = request.json text = data.get("text") is_private = data.get("is_private") parent = data.get("parent") owner = request.user.serialize() comment = ListingComment(text=text, parent=parent, listing=listing.serialize(), owner=owner) comment.save() return Response(status=201)
2.53125
3
leetcode/e_374.py
mmore21/competitive
0
12791971
<reponame>mmore21/competitive<filename>leetcode/e_374.py """ Problem: 374 - Guess Number Higher or Lower Difficulty: Easy URL: https://leetcode.com/problems/guess-number-higher-or-lower/ """ class Solution: def guessNumber(self, n: int) -> int: lo = 0 hi = n while lo <= hi: mid = (hi + lo) // 2 res = guess(mid) if res == 0: return mid elif res > 0: lo = mid + 1 else: hi = mid - 1 return -1
3.28125
3
lexer.py
billyeatcookies/Avocado
1
12791972
from tokens import Token, TokenType whitespace = " \n\t" digits = "0123456789" class Lexer: def __init__(self, code): self.source = code + '\n' self.cur_char = None self.cur_pos = -1 self.advance() def advance(self, pos=1): self.cur_pos += pos try: self.cur_char = self.source[self.cur_pos] except: self.cur_char = None def peek(self): try: return self.source[self.cur_pos + 1] except: return None # def advance(self): # try: # self.cur_char = next(self.text) # except StopIteration: # self.cur_char = None def generate_tokens(self): while self.cur_char is not None: if self.cur_char in whitespace: self.advance() elif self.cur_char == "." or self.cur_char.isdigit(): yield self.generate_number() elif self.cur_char == "+": self.advance() yield Token(TokenType.plus_token) elif self.cur_char == "-": self.advance() yield Token(TokenType.minus_token) elif self.cur_char == "%": self.advance() yield Token(TokenType.percent_token) elif self.cur_char == "*": if self.peek() == "*": self.advance(2) yield Token(TokenType.star_star_token) else: self.advance() yield Token(TokenType.star_token) elif self.cur_char == "/": if self.peek() == "/": self.advance(2) yield Token(TokenType.slash_slash_token) else: self.advance() yield Token(TokenType.slash_token) elif self.cur_char == "(": self.advance() yield Token(TokenType.left_parentheses_token) elif self.cur_char == ")": self.advance() yield Token(TokenType.right_parentheses_token) elif self.cur_char == "%": self.advance() yield Token(TokenType.percent_token) else: raise Exception(f"Illegal character '{self.cur_char}'") def generate_number(self): decimal_point_count = 0 number_str = self.cur_char self.advance() while self.cur_char is not None and (self.cur_char == "." or self.cur_char.isdigit()): if self.cur_char == ".": decimal_point_count += 1 if decimal_point_count > 1: break number_str += self.cur_char self.advance() if number_str.startswith("."): number_str = '0' + number_str if number_str.endswith("."): number_str += '0' return Token(TokenType.number_token, float(number_str)) # 1 + 2 * 3 # # + # / \ # a * # / \ # b c # (1 + 2) * 3 # # * # / \ # + c # / \ # a b
3.5
4
clear.py
tanaka-yoshi10/desk-calendar
0
12791973
#!/usr/bin/python3 import sys import os libdir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'epd') if os.path.exists(libdir): sys.path.append(libdir) import epd7in5_V2 epd = epd7in5_V2.EPD() epd.init() epd.Clear() epd.sleep() print("e-Paper clear & sleep done.")
1.953125
2
modules/SIGA/formats/compressed/fm_gzip.py
naaya17/carpe
56
12791974
<reponame>naaya17/carpe # This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild from pkg_resources import parse_version from modules.SIGA.kaitaistruct import __version__ as ks_version, KaitaiStruct, KaitaiStream, BytesIO from enum import Enum if parse_version(ks_version) < parse_version('0.7'): raise Exception("Incompatible Kaitai Struct Python API: 0.7 or later is required, but you have %s" % (ks_version)) class fm_GZip(KaitaiStruct): class EnumCompression(Enum): stored = 0 compressed = 1 packed = 2 lzhed = 3 reserved_4 = 4 reserved_5 = 5 reserved_6 = 6 reserved_7 = 7 deflate = 8 class EnumOs(Enum): fat_filesystem = 0 amiga = 1 vms = 2 unix = 3 vm_cms = 4 atari_tos = 5 hpfs_filesystem = 6 macintosh = 7 z_system = 8 cpm = 9 tops_20 = 10 ntfs_filesystem = 11 qdos = 12 acorn_riscos = 13 unknown = 255 def __init__(self, _io, _parent=None, _root=None): self._io = _io self._parent = _parent self._root = _root if _root else self self._read() def _read(self): self.header = self._root.GzipHeader(self._io, self, self._root) self.compressed = self._io.read_bytes(((self._io.size() - self._io.pos()) - 8)) self.crc32 = self._io.read_u4le() self.uncompressed_sized = self._io.read_u4le() class GzipHeader(KaitaiStruct): def __init__(self, _io, _parent=None, _root=None): self._io = _io self._parent = _parent self._root = _root if _root else self self._read() def _read(self): self.magic = self._io.read_bytes(2) self.compression_method = self._root.EnumCompression(self._io.read_u1()) self.flag_reserved_2bits = self._io.read_bits_int(2) self.flag_encrypted = self._io.read_bits_int(1) != 0 self.flag_comment = self._io.read_bits_int(1) != 0 self.flag_name = self._io.read_bits_int(1) != 0 self.flag_extra = self._io.read_bits_int(1) != 0 self.flag_continuation = self._io.read_bits_int(1) != 0 self.flag_ascii_text = self._io.read_bits_int(1) != 0 self._io.align_to_byte() self.modification_time = self._io.read_u4le() self.extra_flags = self._io.read_u1() self.operating_system = self._root.EnumOs(self._io.read_u1()) if self.flag_continuation == True: self.part_number = self._io.read_u2le() if self.flag_extra == True: self.extra_len = self._io.read_u2le() if self.flag_extra == True: self.extra_bytes = self._io.read_bytes(self.extra_len) if self.flag_name == True: self.original_file_name = (self._io.read_bytes_term(0, False, True, True)).decode(u"EUC-KR") if self.flag_encrypted == True: self.encryption_header = self._io.read_bytes(12)
1.992188
2
src/CIL.py
Tigre-Buti/cool-compiler-2020
0
12791975
class CILObject: pass class CILInstructionNode: def __init__(self,destination=None, params=[]): self.destination=destination self.params=params def instructionPrint(self): print(str(type(self))+" "+str(self.destination)+"<-"+str(self.params)) class CILTypeCheck(CILInstructionNode): #Devuelve True si el tipo es correcto, se le pasa una variable y una variable con string pass class CILBinaryOperator(CILInstructionNode): pass class CILAritmetic(CILBinaryOperator): pass class CILComparison(CILBinaryOperator): pass class CILPlus(CILAritmetic): pass class CILMinus(CILAritmetic): pass class CILMult(CILAritmetic): pass class CILDiv(CILAritmetic): pass class CILLesser(CILComparison): pass class CILLesserEqual(CILComparison): pass class CILEqual(CILComparison): pass class CILDifferent(CILComparison): pass class CILLabel(CILInstructionNode): pass class CILConditionalJump(CILInstructionNode): pass class CILJump(CILInstructionNode): pass class CILUnaryOperator(CILInstructionNode): pass class CILNot(CILUnaryOperator): pass class CILIntComplement(CILUnaryOperator): pass class CILComplement(CILUnaryOperator): pass class CILIsVoid(CILUnaryOperator): pass class CILAssign(CILUnaryOperator): pass class CILMemory(CILInstructionNode): pass class CILAllocate(CILMemory): pass class CILMethodInstruction(CILInstructionNode): pass class CILCall(CILMethodInstruction): pass class CILVirtualCall(CILMethodInstruction): pass class CILArgument(CILMethodInstruction): pass class CILReturn(CILMethodInstruction): pass class CILStringInstruction(CILInstructionNode): pass class CILStringLoad(CILStringInstruction): pass class CILStringLenght(CILStringInstruction): pass class CILStringConcat(CILStringInstruction): pass class CILStringSubstring(CILStringInstruction): pass class CILStringEqual(CILStringInstruction): pass class CILIOInstruction(CILInstructionNode): pass class CILOutString(CILIOInstruction): pass class CILOutInt(CILIOInstruction): pass class CILInString(CILIOInstruction): pass class CILInInt(CILIOInstruction): pass class CILAbort(CILInstructionNode): pass class CILCopy(CILInstructionNode): pass class CILClassMethod: def __init__(self,localname,globalname): self.localname=localname self.globalname=globalname class CILAttribute: def __init__(self,name, attributeType="Object"): self.name=name self.attributeType=attributeType class CILClass: def __init__(self,name,listaAtributos=[], listaMetodos=[],parent=None): self.name=name self.parent=parent self.listaAtributos=listaAtributos self.listaMetodos=listaMetodos class CILDataDeclaration: def __init__(self,nombre:str, valorString:str): self.nombre=nombre self.valorString=valorString class CILGlobalMethod: def __init__(self,nombre, params=[], locals=[], intrucciones=[], originclass="Object",comments="None"): self.nombre=nombre self.params=params self.locals=locals self.intrucciones=intrucciones self.originclass=originclass self.comments=comments class CILProgram: def __init__(self,Types=[],Data=[],Methods=[]): self.Types=Types self.Data=Data self.Methods=Methods
2.6875
3
input.py
aidan-clyens/Simple_Digit_Recognition
0
12791976
from trainer import Trainer import os import sys if __name__ == '__main__': # Create a Trainer object contour_area_threshhold = 60 crop_width = 50 crop_height = 50 crop_margin = 5 trainer = Trainer(contour_area_threshhold, crop_width, crop_height, crop_margin) # User must enter at least one argument, if not then exit if len(sys.argv) < 2: print "Enter file name: python", sys.argv[0], "filename" print "Enter python input.py --help for more commands" exit() # User enters the 'help' argument if sys.argv[1] == "--help": print "Usage: python input.py --command OR python input.py filename" print "Commands:" print "--g : generate new training data files" exit() # User enters the 'generate' argument if sys.argv[1] == "--g": print "Creating new training data files." trainer.create_training_data() exit() # User enters a file name as an argument input_file = sys.argv[1] if not os.path.exists(input_file): print input_file, "is not a valid file name." exit() # Create training data files if they have not been already created if not os.path.exists(trainer.train_data_file) or not os.path.exists(trainer.train_labels_file): print "Creating new training data files." trainer.create_training_data() # Load training data files into Numpy arrays train_data, train_labels = trainer.load_training_data() # Load a test image and convert it into a Numpy array test_data = trainer.create_test_image(input_file) # Use the training data and test image to train the algorithm and predict the drawn digits results = trainer.knn_train(train_data, train_labels, test_data) # Show the resulting test image with the results printed on top trainer.show_result_image(results)
3.40625
3
ioc_scanner_test.py
BlackSquirrelz/ioc_detektor
0
12791977
<reponame>BlackSquirrelz/ioc_detektor #!/usr/bin/venv python3 # -*- coding: utf-8 -*- # ioc_scanner_test.py # Author: BlackSquirrelz # Date: 2021-03-10 # Description: Script to get IP Addresses from Files, and compares them to known IOCs. # Unit Testing for IOC Scanner. # Import Statements from unittest import TestCase, main import ioc_scanner as iocs from os import path class IOCSTest(TestCase): """ IOC Processor Tests """ def test_get_ip_function(self): test_file = 'test_directory/test_2/ABC.log' result = iocs.get_ip(test_file) self.assertIsInstance(result, dict, "Dictionary requirement not met.") def test_default_ioc_directory(self): self.assertTrue(path.exists('iocs/ioc_hashes.txt'), "Default Hash IOC is empty.") self.assertTrue(path.exists('iocs/ioc_ip.txt'), "Default IP IOC is empty.") self.assertTrue(path.exists('iocs/ioc_regex.txt'), "Default REGEX IOC is empty.") self.assertTrue(path.exists('iocs/ioc_shells.txt'), "Default SHELLS IOC is empty.") def test_generic_regex_function(self): test_file ='test_directory/test_2/ABC.log' default_regex_file = 'iocs/ioc_regex.txt' result = iocs.generic_regex(default_regex_file, test_file) self.assertIsInstance(result, dict, "Dictionary requirement not met.")
2.796875
3
loginpass/azure.py
kairichard/loginpass
0
12791978
<filename>loginpass/azure.py<gh_stars>0 """ loginpass.azure ~~~~~~~~~~~~~~~ Loginpass Backend of Azure AD. :copyright: (c) 2018 by <NAME> :license: BSD, see LICENSE for more details. """ from ._core import UserInfo, OAuthBackend, parse_id_token _BASE_URL = 'https://login.microsoftonline.com/' def create_azure_backend(name, tenant, version=1, claims_options=None): if version == 1: authorize_url = '{}{}/oauth2/authorize'.format(_BASE_URL, tenant) token_url = '{}{}/oauth2/token'.format(_BASE_URL, tenant) issuer_url = 'https://sts.windows.net/{}/'.format(tenant) if claims_options is None: claims_options = { 'iss': { 'values': [issuer_url] } } elif version == 2: authorize_url = '{}{}/oauth2/v2.0/authorize'.format(_BASE_URL, tenant) token_url = '{}{}/oauth2/v2.0/token'.format(_BASE_URL, tenant) issuer_url = '{}{}/v2.0'.format(_BASE_URL, tenant) if claims_options is None: def validate_iss(claims, value): iss = 'https://login.microsoftonline.com/{}/v2.0'.format(claims['tid']) return iss == value claims_options = { 'iss': { 'essential': True, 'validate': validate_iss, } } else: raise ValueError('Invalid version') class AzureAD(OAuthBackend): OAUTH_TYPE = '2.0,oidc' OAUTH_NAME = name OAUTH_CONFIG = { 'api_base_url': 'graph.microsoft.com', 'access_token_url': token_url, 'authorize_url': authorize_url, 'client_kwargs': {'scope': 'openid email profile'}, } JWK_SET_URL = '{}{}/discovery/keys'.format(_BASE_URL, tenant) def profile(self, **kwargs): url = '{}{}/openid/userinfo'.format(_BASE_URL, tenant) resp = self.get(url, **kwargs) resp.raise_for_status() return UserInfo(**resp.json()) def parse_openid(self, token, nonce=None): return parse_id_token( self, token['id_token'], claims_options, token.get('access_token'), nonce ) class AzureADv2(AzureAD): JWK_SET_URL = '{}{}/discovery/v2.0/keys'.format(_BASE_URL, tenant) def profile(self, **kwargs): return self.parse_openid(**kwargs) if version == 2: return AzureADv2 else: return AzureAD Azure = create_azure_backend('azure', 'common')
2.015625
2
pythonfile/theoretic_graphs.py
penguinoneshaw/MPhysProject
0
12791979
<gh_stars>0 #!/usr/bin/env python3 from ctypes import cdll, c_double, CFUNCTYPE import numpy as np import seaborn from pathlib import Path import matplotlib matplotlib.use("pgf") pgf_with_pdflatex = { "pgf.texsystem": "pdflatex", "font.family": "serif", # use serif/main font for text elements "text.usetex": True, "errorbar.capsize": 0.5, "pgf.preamble": [ r"\usepackage[utf8]{inputenc}", r"\usepackage[T1]{fontenc}", r"\usepackage{mathpazo}", r"\usepackage[version-1-compatibility]{siunitx}" ] } matplotlib.rcParams.update(pgf_with_pdflatex) from matplotlib import pyplot as plt # import netCDF4 try: lib = cdll.LoadLibrary("pythonfile/libProjectPython.dylib") except OSError as e: lib = cdll.LoadLibrary("pythonfile/libProjectPython.so") lib.unesco_depth.argtypes = [c_double,c_double,c_double] lib.unesco_depth.restype = c_double lib.unesco_pressure.argtypes = [c_double,c_double,c_double] lib.unesco_pressure.restype = c_double lib.leroy_et_al.argtypes = [c_double,c_double,c_double] lib.leroy_et_al.restype = c_double lib.ideal_sound_channel.argtypes = [c_double,c_double,c_double,c_double,c_double] lib.ideal_sound_channel.restype = c_double depths = np.linspace(0,2000,5000,dtype=np.double) pressures = np.linspace(0,1000,5000,dtype=np.double) temps = np.linspace(0, 40, 100, dtype=np.double) salinities = np.linspace(0, 40, 100, dtype=np.double) ufunc_unesco = np.frompyfunc(lib.unesco_depth, 3, 1) ufunc_leroy = np.frompyfunc(lib.leroy_et_al, 3, 1) ufunc_ideal = np.frompyfunc(lib.ideal_sound_channel, 5, 1) def plot_contours(ufunc, title, filename): fig, plots = plt.subplots(2, 2, 'col', 'row', True, gridspec_kw={ 'hspace': 0.3, 'bottom': 0.08, 'top': 0.92}, figsize=(5,5)) t, d = np.meshgrid(temps, depths) cp = plots[0][0].contour(t, d, ufunc(d,t, 35)) plt.clabel(cp, fmt="%d", rightside_up=False) plots[0][0].set_ylim(2000, 0) plots[0][0].set_ylabel("Depth (m)") plots[0][0].set_xlabel(r"Temperature (\si{\degreeCelsius})") s, d = np.meshgrid(salinities, depths) cp = plots[0][1].contour(s, d, ufunc(d, 10, s)) plt.clabel(cp, fmt="%d", rightside_up=False) plots[0][1].set_ylim(2000, 0) plots[0][1].set_ylabel("Depth (m)") plots[0][1].set_xlabel("Salinity (ppt)") t,s = np.meshgrid(temps, salinities) cp = plots[1][0].contour(t,s, ufunc(1000, t, s)) plt.clabel(cp, fmt="%d") plots[1][0].set_xlabel(r"Temperature (\si{\degreeCelsius})") plots[1][0].set_ylabel("Salinity (ppt)") fig.suptitle(title) plots[0][0].grid() plots[0][1].grid() plots[1][0].grid() fig.delaxes(plots[1][1]) fig.savefig(filename) plot_contours(ufunc_unesco, "UNESCO Equation (Chen and Millero 1995)", Path("final_output/figures/unesco.pdf")) plot_contours(ufunc_leroy, "Leroy et al. 2008", Path("final_output/figures/leroy.pdf")) plt.figure() plt.plot(1000*ufunc_ideal(depths, 1160, 1.3, 1.45, 1.14e-3), depths) plt.ylim(2000, 0) plt.xlabel(r"Speed of Sound (\si{\meter\per\second})") plt.ylabel(r"Depth (\si{\meter})") plt.savefig(Path("final_output/figures/ideal.pdf"))
1.992188
2
helpers/alphanumeric.py
danielhenrymantilla/shellcode-factory
34
12791980
<filename>helpers/alphanumeric.py<gh_stars>10-100 from assembly import Assembly # String of forbidden chars forbidden_chars = "" class Utils: verbose = False # Is debug printing activated? @staticmethod def none(*args): return None @staticmethod def debug(*ss): if Utils.verbose: for s in ss: print s @staticmethod def offset_split_as_valids(offset): """ Returns x, y, z verifying that y and z are valid bytes and that 2 * (x * 0x100 + y) + z = offset + x + (x ? 4 : 0) """ hundreds = 0 # 4 = Assembly()("inc %esp; pop %ecx; push %ecx; dec %esp").len incpoppushdec = 4 for hundreds in range(0x100): if hundreds == 1: offset += incpoppushdec for once in Xor.valid_bytes: for twice in Xor.valid_bytes: if offset == \ 2*(hundreds * 0x100 + twice) + once: return once, twice, hundreds offset += 1 assert(False) @staticmethod def to_hex_wordff(bytes, arch = 32): """ Converts a series of bytes into its integer representation, but replacing the byte 0xff by a valid one """ assert(len(bytes) <= arch / 8) rev_chars = \ ((chr(i) if i != 0xff else Xor.zero) for i in bytes[::-1]) return "$0x" + "".join(c.encode("hex") for c in rev_chars) class Xor: @staticmethod def of(*args): """ xor operator with arbitrary arity """ acc = 0 for arg in args: acc ^= arg return acc # from functools import reduce; from operator import xor; \ # of = lambda *args: reduce(xor, args, 0) # String of valid chars import string;\ valid_chars = \ (string.digits + string.ascii_letters).translate(None, forbidden_chars) # One valid char will represent 0xff zero = valid_chars[0] # List of valid bytes valid_bytes = [ord(c) for c in valid_chars] # List of validff bytes (byte 0xff added for xor completude) valid_bytesff = [ord(c) for c in valid_chars.replace(zero, "\xff")] # Dictionary of: valid bytes split as tuples of valid bytes dups = {} for x in valid_bytes: for y in valid_bytes: if (x ^ y in valid_bytes): dups[x ^ y] = [x, y] # Dictionary of: bytes split as tuples of a validff byte and valid bytes splits = {} splits[0xff] = [0xff, valid_bytesff[1], valid_bytesff[1]] for z in valid_bytes: splits[z] = [z] splits[ord(zero)] = [valid_bytesff[1], valid_bytesff[1], ord(zero)] for x in valid_bytesff: for z in valid_bytes: if not (x ^ z in splits): splits[x ^ z] = [x, z] for x in valid_bytesff: for y in valid_bytes: for z in valid_bytes: if not (x ^ y ^ z in splits): splits[x ^ y ^ z] = [x, y, z] @staticmethod def display(): for i in range(len(Xor.splits)): print hex(i) + " = " + \ " ^ ".join(hex(x) for x in Xor.splits[i]) @staticmethod def split(word): """ Input = word : byte array Output = words : (byte array) array, verifying that for j in range(len(word)): word[j] == Xor.of(words[i][j] for i in range(words_nb)) """ Utils.debug("Debug(split)", "got " + \ Utils.to_hex_wordff(word), word) words_nb = max(len(Xor.splits[x]) for x in word) words = [[] for i in range(words_nb)] for byte in word: i = 0 for x in Xor.splits[byte]: words[i].append(x) i += 1 while (i < words_nb): x1, x2 = Xor.dups[words[i - 1][-1]] words[i - 1][-1] = x1 words[i].append(x2) i += 1 Utils.debug("Debug(split)", "returning", words) return words class Alphanumeric(Assembly): eax, ecx, edx, ebx, esp, ebp, esi, edi = "", "", "", "", "", "", "", "" word_size = 2 dword_size = 4 qword_size = 8 int_size = 0 def prologue(self): self.macro \ ("pmov")(lambda src, dst: \ Utils.none(self.push(src), \ self.popl(dst) )) @self.macro() def zero_eax(): word = "$0x" + Xor.zero.encode("hex") * 4 self.pmov(word, "%eax") self.xorl(word, "%eax") @self.macro() def set_regs(eax="%eax", ecx="%ecx", edx="%edx", ebx="%ebx",\ ebp="%ebp", esi="%esi", edi="%edi", buried="%eax"): self.push(eax) self.push(ecx) self.push(edx) self.push(ebx) self.push(buried) self.push(ebp) self.push(esi) self.push(edi) self.popa() @self.macro() def rep_add(n, reg): for i in range(abs(n)): self("inc" if n >= 0 else "dec", reg) @self.macro() def pop_eip(reg): self.rep_add(-4, "%esp") self.popl(reg) @self.macro() def init_regs(): self.pop_eip("%ecx") self.zero_eax() self.decl("%eax") self.pmov("%eax", "%edx") self.xorb("$0x33", "%al") self.set_regs( eax = "%edx", ebx = "%edx", \ ecx = "%edx", edx = "%eax", \ esi = "%eax", edi = "%eax", \ ebp = "%ecx" ) self.incl("%ebx") self.rep_add(3, "%edx") @self.macro() def popl_esp (aux_reg = "%eax"): self.pmov("%esp", aux_reg) self.xorl("0x34(%esp, %esi)", aux_reg) self.pmov(aux_reg, aux_reg) self.xorl("0x30(%esp, %esi)", "%esp") self.init_regs() self.pushl("%esp") self.incl("%esp") self.popl("%ecx") self.rep_add(2, "%ecx") self.pushl("%ecx") self.decl("%esp") self.popl_esp(aux_reg = "%ecx") self.pmov("%eax", "%ecx") def push_sc(self, sc): @self.macro() def pushff(word): """ Pushes the 4-bytes word using alphanumeric opcodes only (Requires %esi = -0x34, %dl = 0x30 ^ 0xff = 0xcf and %ecx = 0xffffffff) """ assert(len(word) == self.dword_size) is_validff = True for i in word: is_validff = is_validff and \ (i in Xor.valid_bytesff) assert(is_validff) ff_nb = sum(int(i == 0xff) for i in word) if ff_nb == 4: self.pushl("%ecx") return if word[2:4] == [0xff, 0xff]: self.pushw("%cx") self.pushw("$0x" + Utils.to_hex_wordff(word)[7:]) for i in range(2): if word[i] == 0xff: self.xorb("%dl", \ "0x" + str(34 + i) + \ "(%esp, %esi)") return self.pushl(Utils.to_hex_wordff(word)) for i in range(4): if word[i] == 0xff: self.xorb("%dl", \ "0x" + str(34 + i) + "(%esp, %esi)") return sc += (-len(sc) % 4) * "G" # "G" -> "inc %edi" Utils.debug(sc + " : " + str(len(sc)) + " bytes.") all_words = [] for k in range(len(sc) / 4): all_words.append( \ Xor.split([ord(c) for c in sc[4 * k:4 * (k + 1)]])) Utils.debug(all_words) for words in all_words[::-1]: Utils.debug([Utils.to_hex_wordff(word) \ for word in words]) self.pushff(words[0]) self.popl("%eax") for i in range(1, len(words)): self.xorl(Utils.to_hex_wordff(words[i]), "%eax") self.pushl("%eax") def epilogue (self, offset=ord(Xor.zero), edx=ord(Xor.zero), hundreds=0): @self.macro() def set_edx (edx, hundreds): self.push("$" + hex(edx)) if hundreds: self.incl("%esp") self.popl("%edx") self.rep_add(hundreds, "%edx") self.pushl("%edx") self.decl("%esp") self.popl("%edx") self.push("%esp") self.set_edx(edx, hundreds) self.pmov("%ecx", "%eax") self.xorb("$0x64", "%al") self.xorb("%al", hex(offset) + "(%ebp, %edx, 2)") self(".byte", "0x58") def __init__(self, sc, arch): self.autoassemble = False if arch == 64: self.eax, self.ecx, self.edx, self.ebx, \ self.esp, self.ebp, self.esi, self.edi = \ "%rax", "%rbx", "%rcx", "%rdx",\ "%rsp", "%rbp", "%rsi", "%rdi" self.int_size = 8 print "'" + argv[0] + \ "' error: 64 bits support not implemented yet" exit(1) else: self.eax, self.ecx, self.edx, self.ebx, \ self.esp, self.ebp, self.esi, self.edi = \ "%eax", "%ebx", "%ecx", "%edx",\ "%esp", "%ebp", "%esi", "%edi" self.int_size = 4 self.prologue() self.push_sc(sc) base_code = self.code self.epilogue() self._assemble() code_offset = self.len - 1 if Utils.verbose: print self self.code = base_code self.epilogue(*Utils.offset_split_as_valids(code_offset)) self._assemble() if __name__ == "__main__": if Utils.verbose: Xor.display() from sys import argv if len(argv) < 2 or len(argv) > 3: print "Usage:\n\tpython", argv[0], "shellcode", "[arch]" exit(1) sc = "".join(c if c != "\\" and c != "x" else "" \ for c in argv[1]).decode("hex") arch = int(argv[2]) if len(argv) == 3 else 32 code = Alphanumeric(sc, arch) if Utils.verbose: print code print "alphanumeric_shellcode =" print code.ascii print "Total: " + str(code.len) + " bytes."
2.8125
3
home/migrations/0012_auto_20220329_0825.py
SeanCodeMedia/codeMedia-django
0
12791981
<filename>home/migrations/0012_auto_20220329_0825.py # Generated by Django 3.1.2 on 2022-03-29 12:25 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('home', '0011_home_degree_3_description'), ] operations = [ migrations.AddField( model_name='home', name='role1', field=models.CharField(default='Web Developer', max_length=100), ), migrations.AddField( model_name='home', name='role2', field=models.CharField(default='Application Engineer', max_length=100), ), ]
1.515625
2
app/article/views.py
mapan1984/blog
0
12791982
<gh_stars>0 from flask import flash, render_template, redirect, url_for, request, abort from flask_login import current_user, login_required from app import db, redis from app.models import Article, Comment, Permission, Rating from app.decorators import permission_required, author_required from app.article import article as article_blueprint from app.article.forms import EditArticleForm, ModifyArticleForm from app.tasks import build_index, rebuild_index @article_blueprint.route('/<title>', methods=['GET']) def article(title): """ 显示单篇文章 """ article = Article.query.filter_by(title=title).first_or_404() # 相似文章 sim_articles = redis.zrevrange(article.title, 0, 4, withscores=True) # 获取评分情况 ratings = article.ratings.all() num_rating = len(ratings) try: avg_rating = sum(map(lambda rating: rating.value, ratings)) / num_rating except ZeroDivisionError: avg_rating = None try: current_user_rating = article.ratings.filter_by(user=current_user).first().value except AttributeError: current_user_rating = None try: current_user_id = current_user.id except AttributeError: current_user_id = None return render_template('article/article.html', article=article, sim_articles=sim_articles, num_rating=num_rating, avg_rating=avg_rating, current_user_rating=current_user_rating, article_id=article.id, user_id=current_user_id) @article_blueprint.route('/edit', methods=['GET', 'POST']) @author_required def edit(): form = EditArticleForm() if form.validate_on_submit(): article = Article( title=form.title.data, # TODO: delete name=form.title.data, body=form.body.data, author=current_user._get_current_object() ) article.set_category(form.category.data) article.add_tags(form.tags.data.strip().split(' ')) db.session.add(article) db.session.commit() build_index.delay(article.id) return redirect(url_for('article.article', title=article.title)) return render_template('article/edit.html', form=form) @article_blueprint.route('/delete/<title>') @author_required def delete(title): article = Article.query.filter_by(title=title).first_or_404() if current_user != article.author \ and not current_user.can(Permission.ADMINISTER): abort(403) else: flash(article.delete()) return (redirect(request.args.get('next') or url_for('user.user', username=current_user.username))) @article_blueprint.route('/modify/<title>', methods=['GET', 'POST']) @author_required def modify(title): article = Article.query.filter_by(title=title).first_or_404() form = ModifyArticleForm(article=article) if current_user != article.author \ and not current_user.can(Permission.ADMINISTER): abort(403) if form.validate_on_submit(): article.title = form.title.data # TODO: delete if article.name is None: article.name = form.title.data article.body = form.body.data article.set_category(form.category.data) article.delete_tags() article.add_tags(form.tags.data.split(' ')) db.session.add(article) db.session.commit() rebuild_index.delay(article.id) flash('您的文章已经成功修改') return redirect(url_for('article.article', title=article.title)) form.title.data = article.title form.category.data = article.category.id form.tags.data = " ".join(tag.name for tag in article.tags) form.body.data = article.body return render_template('article/edit.html', form=form) @article_blueprint.route('/rating', methods=['POST']) @login_required def rating(): user_id = request.json['user_id'] rating_value = request.json['rating_value'] article_id = request.json['article_id'] rating = Rating.query.filter_by(user_id=user_id, article_id=article_id).first() if rating is None: # 新建 rating = Rating(value=rating_value, user_id=user_id, article_id=article_id) else: # 更改 rating.value = rating_value db.session.add(rating) return "Rating done." @article_blueprint.route('/comment', methods=['POST']) @login_required def comment(): user_id = request.form['user_id'] article_id = request.form['article_id'] comment_body = request.form['comment_body'] comment = Comment(body=comment_body, article_id=article_id, author_id=user_id) db.session.add(comment) flash("你的评论已提交") return redirect(request.args.get('next') or url_for('main.index')) @article_blueprint.route('/moderate/<comment_id>') @login_required @permission_required(Permission.MODERATE_COMMENTS) def moderate(comment_id): comment = Comment.query.get_or_404(comment_id) if comment.article.author == current_user\ or comment.author == current_user\ or current_user.is_administrator: db.session.delete(comment) flash('评论已经删除') return redirect(url_for('article.article', title=comment.article.title))
2.203125
2
catstuff/core_plugins/tasks/filelist/main.py
modora/catstuff
0
12791983
from catstuff import core, tools from .config import mod_name, build class Filelist(core.plugins.CSTask): def __init__(self): super().__init__(mod_name, build) def main(self, path, max_depth=0, followlinks=False, include=None, exclude=None, mode='whitelist', safe_walk=True, **kwargs): return tools.path.import_file_list( path, max_depth=max_depth, followlinks=followlinks, include=include, exclude=exclude, mode=mode, safe_walk=safe_walk)
1.835938
2
yesno.py
wdymm/pymysql
0
12791984
<filename>yesno.py print('yesno')
1.03125
1
notebooks/_solutions/visualization_02_seaborn3.py
jonasvdd/DS-python-data-analysis
65
12791985
# Axes based sns.violinplot(data=titanic, x="Pclass", y="Age", hue="Sex", split=True, palette="Set2") sns.despine(left=True)
2.234375
2
anthill/exec/model/api.py
anthill-services/anthill-exec
0
12791986
import tornado.gen from tornado.gen import sleep, Future from tornado.httpclient import HTTPRequest, HTTPError from tornado.simple_httpclient import SimpleAsyncHTTPClient from .. import options as _opts from anthill.common.internal import Internal, InternalError from anthill.common.validate import validate_value from anthill.common.server import Server from . util import promise, PromiseContext, APIError API_TIMEOUT = 5 # noinspection PyUnusedLocal @promise async def sleep(delay, handler=None): await tornado.gen.sleep(delay) # noinspection PyUnusedLocal @promise async def moment(handler=None): await tornado.gen.moment def log(message): handler = PromiseContext.current if handler: handler.log(message) class AdminAPI(object): @promise async def delete_accounts(self, accounts, gamespace_only=True, handler=None, *args, **kwargs): application = Server.instance() publisher = await application.acquire_publisher() await publisher.publish("DEL", { "gamespace": handler.env["gamespace"], "accounts": accounts, "gamespace_only": gamespace_only }) # noinspection PyUnusedLocal class WebAPI(object): def __init__(self): self.http_client = SimpleAsyncHTTPClient() self.rc_cache = {} @promise async def get(self, url, headers=None, *args, **kwargs): request = HTTPRequest(url=url, use_gzip=True, headers=headers) existing_futures = self.rc_cache.get(url, None) if existing_futures is not None: future = Future() existing_futures.append(future) result = await future return result new_futures = [] self.rc_cache[url] = new_futures try: response = await self.http_client.fetch(request) except HTTPError as e: e = APIError(e.code, e.message) for future in new_futures: future.set_exception(e) del self.rc_cache[url] raise e else: body = response.body for future in new_futures: future.set_result(body) del self.rc_cache[url] return body # noinspection PyUnusedLocal class ConfigAPI(object): @promise async def get(self, handler=None, *ignored): app_name = handler.env["application_name"] app_version = handler.env["application_version"] key = "config:" + str(app_name) + ":" + str(app_version) cached = handler.get_cache(key) if cached: return cached internal = Internal() try: info = await internal.request( "config", "get_configuration", timeout=API_TIMEOUT, app_name=app_name, app_version=app_version, gamespace=handler.env["gamespace"]) except InternalError as e: raise APIError(e.code, e.body) handler.set_cache(key, info) return info # noinspection PyUnusedLocal class StoreAPI(object): @promise async def get(self, name, handler=None, *ignored): if not isinstance(name, str): raise APIError(400, "name should be a string") key = "store:" + str(name) cached = handler.get_cache(key) if cached: return cached internal = Internal() try: config = await internal.request( "store", "get_store", timeout=API_TIMEOUT, gamespace=handler.env["gamespace"], name=name) except InternalError as e: raise APIError(e.code, e.body) handler.set_cache(key, config) return config @promise async def new_order(self, store, item, currency, amount, component, env=None, handler=None, *ignored): internal = Internal() try: result = await internal.request( "store", "new_order", timeout=API_TIMEOUT, gamespace=handler.env["gamespace"], account=handler.env["account"], store=store, item=item, currency=currency, amount=amount, component=component, env=env) except InternalError as e: raise APIError(e.code, e.body) return result @promise async def update_order(self, order_id, handler=None, *ignored): internal = Internal() try: result = await internal.request( "store", "update_order", timeout=API_TIMEOUT, gamespace=handler.env["gamespace"], account=handler.env["account"], order_id=order_id) except InternalError as e: raise APIError(e.code, e.body) return result @promise async def update_orders(self, handler=None, *ignored): internal = Internal() try: result = await internal.request( "store", "update_orders", timeout=API_TIMEOUT, gamespace=handler.env["gamespace"], account=handler.env["account"]) except InternalError as e: raise APIError(e.code, e.body) return result # noinspection PyUnusedLocal class ProfileAPI(object): @promise async def get(self, path="", handler=None, *ignored): if not isinstance(path, str): raise APIError(400, "Path should be a string") internal = Internal() try: profile = await internal.request( "profile", "get_my_profile", timeout=API_TIMEOUT, gamespace_id=handler.env["gamespace"], account_id=handler.env["account"], path=path) except InternalError as e: raise APIError(e.code, e.body) return profile @promise async def update(self, profile=None, path="", merge=True, handler=None, *ignored): if not isinstance(path, str): raise APIError(400, "Path should be a string") key = "profile:" + str(path) if not profile: profile = {} internal = Internal() try: profile = await internal.request( "profile", "update_profile", timeout=API_TIMEOUT, gamespace_id=handler.env["gamespace"], account_id=handler.env["account"], fields=profile, path=path, merge=merge) except InternalError as e: raise APIError(e.code, e.body) handler.set_cache(key, profile) return profile @promise async def query(self, query, limit=1000, handler=None, *ignored): if not validate_value(query, "json_dict"): raise APIError(400, "Query should be a JSON object") internal = Internal() try: results = await internal.request( "profile", "query_profiles", timeout=API_TIMEOUT, gamespace_id=handler.env["gamespace"], query=query, limit=limit) except InternalError as e: raise APIError(e.code, e.body) return results # noinspection PyUnusedLocal class SocialAPI(object): @promise async def acquire_name(self, kind, name, handler=None, *ignored): internal = Internal() try: profile = await internal.request( "social", "acquire_name", gamespace=handler.env["gamespace"], account=handler.env["account"], kind=kind, name=name) except InternalError as e: raise APIError(e.code, e.body) return profile @promise async def check_name(self, kind, name, handler=None, *ignored): internal = Internal() try: account_id = await internal.request( "social", "check_name", gamespace=handler.env["gamespace"], kind=kind, name=name) except InternalError as e: raise APIError(e.code, e.body) return account_id @promise async def release_name(self, kind, handler=None, *ignored): internal = Internal() try: released = await internal.request( "social", "release_name", gamespace=handler.env["gamespace"], account=handler.env["account"], kind=kind) except InternalError as e: raise APIError(e.code, e.body) return released @promise async def update_profile(self, group_id, profile=None, path=None, merge=True, handler=None, *ignored): if path and not isinstance(path, (list, tuple)): raise APIError(400, "Path should be a list/tuple") internal = Internal() try: profile = await internal.request( "social", "update_group_profile", timeout=API_TIMEOUT, gamespace=handler.env["gamespace"], group_id=group_id, profile=profile, path=path, merge=merge) except InternalError as e: raise APIError(e.code, e.body) return profile @promise async def update_group_profiles(self, group_profiles, path=None, merge=True, synced=False, handler=None, *ignored): if not isinstance(group_profiles, dict): raise APIError(400, "Group profiles should be a dict") if path and not isinstance(path, (list, tuple)): raise APIError(400, "Path should be a list/tuple") internal = Internal() try: profile = await internal.request( "social", "update_group_profiles", timeout=API_TIMEOUT, gamespace=handler.env["gamespace"], group_profiles=group_profiles, path=path or [], merge=merge, synced=synced) except InternalError as e: raise APIError(e.code, e.body) return profile # noinspection PyUnusedLocal class MessageAPI(object): @promise async def send_batch(self, sender, messages, authoritative=True, handler=None, *ignored): internal = Internal() try: await internal.request( "message", "send_batch", timeout=API_TIMEOUT, gamespace=handler.env["gamespace"], sender=sender, messages=messages, authoritative=authoritative) except InternalError as e: raise APIError(e.code, e.body) return "OK" # noinspection PyUnusedLocal class PromoAPI(object): @promise async def use_code(self, key, handler=None, *ignored): internal = Internal() try: result = await internal.request( "promo", "use_code", timeout=API_TIMEOUT, gamespace=handler.env["gamespace"], account=handler.env["account"], key=key) except InternalError as e: raise APIError(e.code, e.body) try: result = result["result"] except KeyError: raise APIError(500, "Response had no 'result' field.") return result class EventAPI(object): @promise async def update_event_profile(self, event_id, profile, path=None, merge=True, handler=None): internal = Internal() try: events = await internal.request( "event", "update_event_profile", event_id=event_id, profile=profile, path=path, merge=merge, timeout=API_TIMEOUT, gamespace=handler.env["gamespace"], account=handler.env["account"]) except InternalError as e: raise APIError(e.code, e.body) return events @promise async def list(self, extra_start_time=0, extra_end_time=0, handler=None): internal = Internal() try: events = await internal.request( "event", "get_list", timeout=API_TIMEOUT, gamespace=handler.env["gamespace"], account=handler.env["account"], extra_start_time=extra_start_time, extra_end_time=extra_end_time) except InternalError as e: raise APIError(e.code, e.body) return events class APIS(object): config = ConfigAPI() store = StoreAPI() profile = ProfileAPI() social = SocialAPI() message = MessageAPI() promo = PromoAPI() web = WebAPI() event = EventAPI() admin = AdminAPI() def expose(context, is_server=False): expose_objects = { "log": log, "sleep": sleep, "moment": moment, "web": APIS.web, "config": APIS.config, "store": APIS.store, "profile": APIS.profile, "social": APIS.social, "message": APIS.message, "promo": APIS.promo, "event": APIS.event } if is_server: expose_objects.update({ "admin": APIS.admin }) # define them as readonly for name, callback in expose_objects.items(): context.Object.defineProperty( context.glob, name, {'value': callback, 'writable': False})
2.203125
2
extract.py
lpmi-13/python-code-corpus
0
12791987
<gh_stars>0 from pymongo import MongoClient import json import sys try: client = MongoClient(host= ['localhost:27017'], serverSelectionTimeoutMS = 2000) client.server_info() db = client.ast except: print('mongo isn\'t currently running...please start it first') sys.exit() result_dict = { "fors": [], "ifs": [], "lists": [], "listcomps": [], "dicts": [], "dictcomps": [], "functions": [], } def output_json(handle): results = handle.find( {}, {'_id': False } ).limit(25) for result in results: result_dict[result['type']].append(json.dumps(result)) output_json(db.fors) output_json(db.ifs) output_json(db.lists) output_json(db.listcomps) output_json(db.dicts) output_json(db.dictcomps) output_json(db.functions) with open('results.json', 'a') as output_file: output_file.write(json.dumps(result_dict))
2.5
2
SwitchTracer/cores/contrib/couriermiddlewares/status.py
IzayoiRin/VirtualVeyonST
0
12791988
<filename>SwitchTracer/cores/contrib/couriermiddlewares/status.py<gh_stars>0 # pack is on the passing way ON_WAY = -1 # pack is passed successfully SUCCEEDED = 0 # pack is going to start PREPARED = 1 # pack passing timeout TIMEOUT = 2 # pack passing refused REFUSED = 3 # pack is destroyed DESTROYED = 4 # unknown error with pack UNKNOWN = 9 # info-level INFO = 100
1.226563
1
move_xmls_p33.py
mcjczapiewski/work
0
12791989
<filename>move_xmls_p33.py<gh_stars>0 # -*- coding: utf-8 -*- # import bibliotek import os import datetime import shutil from natsort import natsorted from natsort import natsort_keygen nkey = natsort_keygen() byly = set() stare_xml = set() nowe_xml = set() count = 1 # aktualna data i godzina czasstart = datetime.datetime.now() print("~~~~~~START~~~~~~\t" + str(czasstart).split(".")[0]) print( """\n\nUWAGA! W folderze z xmlami mogą pojawić się 4 pliki tekstowe: - bledy - podmienione - brak_odpowiednika_dla_xmla - brak_nowego_xmla_dla_operatu\n\n""" ) tusaxml = input("Podaj ścieżkę do folderu z plikami xml: ") tusaoperaty = input("Podaj ścieżkę do folderu z operatami: ") for subdir, dirs, _ in os.walk(tusaoperaty): dirs.sort(key=nkey) operat = subdir stare_xml.add(operat) for subdir, dirs, files in os.walk(tusaxml): dirs.sort(key=nkey) for file in natsorted(files): if file.endswith((".xml", ".XML")): if os.path.splitext(file)[0] == os.path.basename(operat): print(count) count += 1 if not os.path.exists(os.path.join(operat, file)): try: shutil.copy( os.path.join(subdir, file), os.path.join(operat, file), ) except: with open( os.path.join(tusaxml, "bledy.txt"), "a" ) as bledy: bledy.write( os.path.join(subdir, file) + "\tNie udało się skopiować.\n" ) else: try: shutil.copy( os.path.join(subdir, file), os.path.join(operat, file), ) with open( os.path.join(tusaxml, "podmienione.txt"), "a" ) as bledy: bledy.write(os.path.join(operat, file) + "\n") except: with open( os.path.join(tusaxml, "bledy.txt"), "a" ) as bledy: bledy.write( os.path.join(subdir, file) + "\tNie udało się skopiować.\n" ) byly.add(os.path.join(subdir, file)) nowe_xml.add(operat) for subdir, dirs, files in os.walk(tusaxml): dirs.sort(key=nkey) for file in natsorted(files): if file.upper().endswith(".XML"): if os.path.join(subdir, file) not in byly: with open( os.path.join(tusaxml, "brak_odpowiednika_dla_xmla.txt"), "a", ) as bledy: bledy.write(os.path.join(subdir, file) + "\n") for i in natsorted(stare_xml): if i not in nowe_xml: with open( os.path.join(tusaxml, "brak_nowego_xmla_dla_operatu.txt"), "a" ) as bledy: bledy.write(os.path.join(i) + "\n") # czas trwania calego skryptu czaskoniec = datetime.datetime.now() roznicaczas = czaskoniec - czasstart czastrwania = roznicaczas.total_seconds() / 60 print("\nCałość zajęła (minuty):") print("%.2f" % czastrwania) print("\n~~~~~~KONIEC~~~~~~\t" + str(czaskoniec).split(".")[0]) input("Wciśnij ENTER aby wyjść...")
2.546875
3
src/simcore_service_deployment_agent/app_state.py
mrnicegyu11/osparc-deployment-agent
1
12791990
from enum import IntEnum class State(IntEnum): STARTING = 0 RUNNING = 1 FAILED = 2 STOPPED = 3 PAUSED = 4
2.59375
3
users/urls.py
LeonardoCruzx/Projeto_rede_social
0
12791991
<filename>users/urls.py<gh_stars>0 from django.urls import path from .views import * app_name = 'users' urlpatterns = [ path('pagina-inicial',pagina_inicial_usuario,name='pagina-inicial-usuario') ]
1.671875
2
CodingBat/Warmup-1/makes10.py
arthxvr/coding--python
0
12791992
<reponame>arthxvr/coding--python def makes10(a, b): return (a == 10 or b == 10 or a + b == 10)
3.78125
4
sl_cutscenes/scenarios/scenario.py
AIS-Bonn/sl-cutscenes
2
12791993
""" Abstract class for defining scenarios """ import random from typing import Tuple import numpy as np from copy import deepcopy import torch import stillleben as sl import nimblephysics as nimble from sl_cutscenes.room_models import RoomAssembler from sl_cutscenes.objects.mesh_loader import MeshLoader from sl_cutscenes.objects.object_loader import ObjectLoader from sl_cutscenes.objects.decorator_loader import DecoratorLoader from sl_cutscenes.lighting import get_lightmap from sl_cutscenes.camera import Camera import sl_cutscenes.utils.utils as utils import sl_cutscenes.constants as CONSTANTS from sl_cutscenes import object_info class Scenario(object): """ Abstract class for defining scenarios """ config = dict() name = 'scenario' def __init__(self, cfg, scene: sl.Scene, randomize=True): self.device = cfg.device self.viewer_mode = cfg.viewer self.scene = scene if randomize: utils.randomize() self.mesh_loader = MeshLoader() self.object_loader = ObjectLoader(scenario_reset=True) self.room_assembler = RoomAssembler(scene=self.scene) self.decorator_loader = DecoratorLoader(scene=self.scene) self.meshes_loaded, self.objects_loaded = False, False self.z_offset = 0. self.lights = cfg.lights self.lightmap = cfg.lightmap if self.lights == 0 else None if getattr(self, "allow_multiple_cameras", True): self.n_cameras = cfg.cameras else: print(f"scenario '{self.name}' supports only 1 camera -> ignoring n_cameras...") self.n_cameras = 1 self.coplanar_stereo = cfg.coplanar_stereo self.coplanar_stereo_dist = cfg.coplanar_stereo_dist self.cam_movement_complexity = cfg.cam_movement_complexity self.sim_dt = cfg.sim_dt self.cam_dt = cfg.cam_dt self.physics_engine = cfg.physics_engine self.nimble_debug = cfg.nimble_debug self.reset_sim() return def reset_sim(self): self.meshes_loaded, self.objects_loaded, self.cameras_loaded = False, False, False if self.physics_engine == "nimble": self.nimble_loaded = False self.sim_t = 0 self.setup_scene() self.setup_lighting() self.setup_objects() self.setup_cameras() self.decorate_scene() self.finalize_scene() @property def all_objects(self): return self.object_loader.all_objects @property def static_objects(self): return self.object_loader.static_objects @property def dynamic_objects(self): return self.object_loader.dynamic_objects def set_camera_look_at(self, pos, lookat): self.scene.set_camera_look_at(position=pos, look_at=lookat) def can_render(self): raise NotImplementedError def decorate_scene(self): self.room_assembler.add_wall_furniture() self.decorator_loader.decorate_scene(object_loader=self.object_loader) return def finalize_scene(self): """ Scene setup stuff that has to be done after everything else """ for obj in self.static_objects: obj.casts_shadows = False def setup_scene(self): """ Default setup_scene. Can be overriden from specific scenes """ _ = self.room_assembler.make_room() def setup_lighting(self): """ Default setup lighting. """ self.scene.ambient_light = torch.tensor([0.2, 0.2, 0.2]) if self.lightmap is not None: self.scene.light_map = get_lightmap(self.lightmap) self.scene.light_directions *= 0. # disable point lights self.scene.manual_exposure = 5.0 else: for i in range(self.lights): # self.scene.choose_random_light_direction() ori_angle = np.random.uniform(0, 360) elev_angle = np.random.uniform(30, 90) light_x = np.cos(ori_angle * np.pi / 180.) * np.cos(elev_angle * np.pi / 180.) light_y = np.sin(ori_angle * np.pi / 180.) * np.cos(elev_angle * np.pi / 180.) light_z = np.sin(elev_angle * np.pi / 180.) light_direction = torch.tensor([-light_x, -light_y, -light_z]) self.scene.light_directions[i] = light_direction light_color = torch.tensor([4.0, 4.0, 4.0]) + torch.rand(3) light_color_normalized = 5. * light_color / torch.linalg.norm(light_color) self.scene.light_colors[i] = light_color_normalized self.scene.manual_exposure = 3.0 def get_separations(self): # assert len(self.dynamic_objects) > 0, "Objects must be added to dynamic_objects before computing collisions" self.scene.check_collisions() separations = [obj.separation for obj in self.dynamic_objects if hasattr(obj, "separation")] return separations def is_there_collision(self): separations = self.get_separations() collision = True if np.sum(separations) < 0 else False return collision def load_meshes(self): """ """ if self.meshes_loaded: return print("mesh setup...") self.load_meshes_() self.meshes_loaded = True def load_meshes_(self): """ Scenario-specific logic """ raise NotImplementedError def setup_objects(self): """ """ if self.objects_loaded: return print("object setup...") if not self.meshes_loaded: self.load_meshes() # if objects have not been loaded yet, load them self.setup_objects_() self.objects_loaded = True return def setup_objects_(self): """ Scenario-specific logic """ raise NotImplementedError def setup_cameras(self): if self.cameras_loaded: return print("camera setup...") self.cameras = [] self.camera_objs = [] cam_config = self.config["camera"] base_lookat = cam_config["base_lookat"] # pick default ori. angle and (n_cameras-1) other angles from a linspace of angles that are 5 degrees apart default_ori_angle = cam_config["orientation_angle_default"] cam_ori_angles = [0] + random.sample(np.linspace(0, 360, 72+1).tolist()[1:-1], k=self.n_cameras-1) cam_ori_angles = [(angle + default_ori_angle) % 360 for angle in cam_ori_angles] # TODO parameters 'orientation_angle_min/max' are not yet used! for i, cam_ori_angle in enumerate(cam_ori_angles): cam_elev_angle = random.uniform(cam_config["elevation_angle_min"], cam_config["elevation_angle_max"]) cam_dist = random.uniform(cam_config["distance_min"], cam_config["distance_max"]) cam_lookat = deepcopy(base_lookat) cam_name = f"cam_{str(i).zfill(2)}" cam_stereo_positions = ["left", "right"] if self.coplanar_stereo else ["mono"] self.cameras.append(Camera(cam_name, self.cam_dt, cam_elev_angle, cam_ori_angle, cam_dist, cam_lookat, self.coplanar_stereo_dist, cam_stereo_positions, self.cam_movement_complexity)) self.setup_cameras_() # e.g. scenario-specific height adjustment self.setup_camera_objs() self.cameras_loaded = True def setup_camera_objs(self): """ Setting an object for each of the cameras. - Viewer mode: A full mesh is displayed at the position and with the pose of the camera - Normal mode: A tiny-dummy obj is place on the location of the camera to fill the occ-matrix cell """ camera_mesh = CONSTANTS.CAMERA_OBJ if self.viewer_mode else CONSTANTS.DUMMY_CAMERA_OBJ for camera_id, camera in enumerate(self.cameras): self.mesh_loader.load_meshes(camera_mesh) camera_pos = camera.get_pos() camera_info_mesh = self.mesh_loader.get_meshes()[-1] self.camera_objs.append(self.add_object_to_scene(camera_info_mesh, is_static=True)) pose = torch.eye(4) pose[:2, -1] = camera_pos[:2] pose[2, -1] = camera_pos[-1] + self.camera_objs[-1].mesh.bbox.min[-1] pose[:3, :3] = utils.get_rot_matrix( yaw=torch.tensor(camera.ori_angle * np.pi / 180), pitch=torch.tensor(-1 * camera.elev_angle * np.pi / 180), roll=torch.tensor(0.) ) self.camera_objs[-1].set_pose(pose) self.scene.add_object(self.camera_objs[-1]) return def setup_cameras_(self): """ Scenario-specific logic, e.g. height adjustment """ raise NotImplementedError def simulate(self): ''' Can be overwritten by scenario-specific logic ''' self.sim_t += self.sim_dt self.sim_step_() def sim_step_(self): ''' Just calls the appropriate simulator; assumes that all other things have been taken care of. ''' if self.physics_engine == "physx": self.scene.simulate(self.sim_dt) elif self.physics_engine == "physx_manipulation_sim": raise NotImplementedError # TODO implement for gripper sim elif self.physics_engine == "nimble": if not self.nimble_loaded: self.setup_nimble_() self.simulate_nimble_() else: raise ValueError(f"invalid physics_engine parameter: {self.physics_engine}") def setup_nimble_(self): ''' Creates a clone of the current stillleben scene for nimblephysics, enabling physics simulation there. ''' print("initializing nimble scene from sl...") # utils.dump_sl_scene_to_urdf(self.scene, "scene.urdf") self.nimble_world = nimble.simulation.World() self.nimble_world.setTimeStep(self.sim_dt) positions, velocities = [], [] for obj in self.scene.objects: obj_info = object_info.get_object_by_class_id(obj.mesh.class_index) skel, pos, vel = utils.sl_object_to_nimble(obj, obj_info, debug_mode=self.nimble_debug) self.nimble_world.addSkeleton(skel) positions.extend(pos) velocities.extend(vel) self.nimble_states = [torch.cat(positions + velocities)] self.nimble_loaded = True def simulate_nimble_(self, action=None): ''' Simulates a timestep in nimblephysics. ''' # simulate timestep in nimble if action is None: action = torch.zeros(self.nimble_world.getNumDofs()) new_state = nimble.timestep(self.nimble_world, self.nimble_states[-1], action) self.nimble_states.append(new_state) self.nimble_world.setState(new_state) # transfer object state back into the stillleben context obj_pos, obj_vel = torch.chunk(new_state.clone(), 2) obj_pos = torch.chunk(obj_pos, obj_pos.shape[0] // 6) obj_vel = torch.chunk(obj_vel, obj_vel.shape[0] // 6) for obj, pos, vel in zip(self.scene.objects, obj_pos, obj_vel): obj_pose = obj.pose() obj_rpy, obj_t = pos.split([3, 3]) obj_pose[:3, :3] = utils.get_mat_from_rpy(obj_rpy) obj_pose[:3, 3] = obj_t obj.set_pose(obj_pose) angular_velocity, obj.linear_velocity = vel.split([3, 3]) obj.angular_velocity = angular_velocity.flip(0) # flip back from ZYX convention def add_object_to_scene(self, obj_info_mesh: Tuple[object_info.ObjectInfo, sl.Mesh], is_static: bool, **obj_mod): obj_info, obj_mesh = obj_info_mesh obj = self.object_loader.create_object(obj_info, obj_mesh, is_static, **obj_mod) self.scene.add_object(obj) return obj def remove_obj_from_scene(self, obj: sl.Object, decrement_ins_idx: bool=True): self.scene.remove_object(obj) self.object_loader.remove_object(obj.instance_index, decrement_ins_idx=decrement_ins_idx) def update_object_height(self, cur_obj, objs=None, scales=None): """ Updating an object z-position given a list of supporting objects""" if objs is None: objs = [] scales = [1.0] * len(objs) if scales is None else scales assert len(objs) == len(scales), "provided non-matching scales for update_camera_height" cur_obj_pose = cur_obj.pose() z_pose = self.get_obj_z_offset(cur_obj) for obj, scale in zip(objs, scales): z_pose += self.get_obj_z_offset(obj) * scale cur_obj_pose[2, -1] = z_pose cur_obj.set_pose(cur_obj_pose) return cur_obj def update_camera_height(self, camera, objs=None, scales=None): """ Updating the camera position, camera-object position and the look-at parameter""" if objs is None: objs = [] scales = [1.0] * len(objs) if scales is None else scales assert len(objs) == len(scales), "provided non-matching scales for update_camera_height" z_lookat = deepcopy(camera.start_base_lookat[-1]) for obj, scale in zip(objs, scales): z_lookat += self.get_obj_z_offset(obj) * scale camera.start_base_lookat[-1] = z_lookat return camera def get_obj_z_offset(self, obj): """ Obtaining the z_offset (z-pos + height) for a given object""" obj_pose = obj.pose() z_offset = obj_pose[2, -1] + (obj.mesh.bbox.max[-1] - obj.mesh.bbox.min[-1]) / 2 return z_offset def get_obj_offset(self, obj): """ Obtaining the bbox boundaries (pos + size for x,y,z) for a given object""" obj_pose = obj.pose() offset_x, offset_y, offset_z = obj_pose[:3, -1] + obj.mesh.bbox.max offset = torch.Tensor([-offset_x, -offset_y, offset_z]) return offset
2.265625
2
asgard/users/test/userstest.py
nicolas-van/asgard
0
12791994
<gh_stars>0 # -*- coding: utf-8 -*- # Copyright (c) 2014, <NAME> # # Released under the MIT license # # Permission is hereby granted, free of charge, to any person obtaining a copy of # this software and associated documentation files (the "Software"), to deal in # the Software without restriction, including without limitation the rights to use, # copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the # Software, and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS # FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR # COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN # AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. from __future__ import unicode_literals, print_function, absolute_import import unittest import asgard.application as application import asgard.users as users import sqlalchemy as sa app = application.Asgard(__name__) app_users = app.register_plugin(users.UsersPlugin) class DbTest(unittest.TestCase): """Class to extend to easily test code using the database.""" def setUp(self): app_users.config["preferred_encryption"] = "werkzeug" self.tmp_engine = app.engine app.engine = sa.create_engine('sqlite:///:memory:') app.metadata.create_all(app.engine) app.__enter__() self.trans = app.transaction() self.trans.__enter__() def tearDown(self): self.trans.__exit__(None, None, None) app.engine.dispose() app.engine = self.tmp_engine app.__exit__(None, None, None) class UsersTest(DbTest): def test_base(self): app_users.UsersManager.i.create_user("<EMAIL>", "abc") self.assertTrue(app_users.UsersManager.i.test_user("<EMAIL>", "abc")) self.assertFalse(app_users.UsersManager.i.test_user("<EMAIL>", "xyz")) def test_change_password(self): id = app_users.UsersManager.i.create_user("<EMAIL>", "abc") self.assertTrue(app_users.UsersManager.i.test_user("<EMAIL>", "abc")) app_users.UsersManager.i.set_password(id, "xyz") self.assertTrue(app_users.UsersManager.i.test_user("<EMAIL>", "xyz")) self.assertFalse(app_users.UsersManager.i.test_user("<EMAIL>", "abc")) def test_bcrypt_compatibility(self): app_users.config["preferred_encryption"] = "bcrypt" id = app_users.UsersManager.i.create_user("<EMAIL>", "abc") hash_ = app_users.UsersManager.i.read_by_id(id, ["password_hash"])["password_hash"] self.assertTrue(hash_.startswith("bcrypt")) app_users.config["preferred_encryption"] = "werkzeug" self.assertTrue(app_users.UsersManager.i.test_user("<EMAIL>", "abc")) self.assertFalse(app_users.UsersManager.i.test_user("<EMAIL>", "xyz")) app_users.UsersManager.i.set_password(id, "abc") hash_ = app_users.UsersManager.i.read_by_id(id, ["password_hash"])["password_hash"] self.assertTrue(hash_.startswith("werkzeug")) self.assertTrue(app_users.UsersManager.i.test_user("<EMAIL>", "abc")) self.assertFalse(app_users.UsersManager.i.test_user("<EMAIL>", "xyz")) def test_unicode(self): app_users.UsersManager.i.create_user("<EMAIL>", "abcéèçÔ") self.assertTrue(app_users.UsersManager.i.test_user("<EMAIL>", "abcéèçÔ")) self.assertFalse(app_users.UsersManager.i.test_user("<EMAIL>", "abcéèçÔx")) def test_unicode_bcrypt(self): app_users.config["preferred_encryption"] = "bcrypt" app_users.UsersManager.i.create_user("<EMAIL>", "abcéèçÔ") self.assertTrue(app_users.UsersManager.i.test_user("<EMAIL>", "abcéèçÔ")) self.assertFalse(app_users.UsersManager.i.test_user("<EMAIL>", "abcéèçÔx")) """ # not run consistently, just used to tune the turns for pbkdf2 def test_time(self): hash_ = app_users.UsersManager.i._encode_password("<PASSWORD>") import time start = time.time() app_users.UsersManager.i._check_password("test", hash_) end = time.time() print(end - start) """
1.867188
2
examples/job-examples/TwitterSampledStreamerToKafka/TwitterSampledStreamerToKafka.py
prernaagarwal/cs6235Project
3
12791995
from edna.ingest.streaming import TwitterStreamingIngest from edna.process import BaseProcess from edna.emit import KafkaEmit from edna.serializers.EmptySerializer import EmptyStringSerializer from edna.core.execution.context import SimpleStreamingContext def main(): context = SimpleStreamingContext() ingest_serializer = EmptyStringSerializer() emit_serializer = EmptyStringSerializer() # twitter already provides byte encoded message ingest = TwitterStreamingIngest(serializer=ingest_serializer, bearer_token=context.getVariable("bearer_token"), tweet_fields=context.getVariable("tweet_fields"), user_fields=context.getVariable("user_fields"), place_fields=context.getVariable("place_fields"), media_fields=context.getVariable("media_fields")) process = BaseProcess() emit = KafkaEmit(serializer=emit_serializer, kafka_topic=context.getVariable("kafka_topic"), bootstrap_server=context.getVariable("bootstrap_server"), bootstrap_port=context.getVariable("bootstrap_port")) context.addIngest(ingest) context.addProcess(process) context.addEmit(emit) context.execute() if __name__=="__main__": main()
2.40625
2
WebMirror/management/rss_parser_funcs/feed_parse_extractKendalblackBlogspotCom.py
fake-name/ReadableWebProxy
193
12791996
def extractKendalblackBlogspotCom(item): ''' DISABLED Parser for 'kendalblack.blogspot.com' ''' return None
1.46875
1
src/pyobo/sources/mirbase_constants.py
ddomingof/pyobo
0
12791997
<filename>src/pyobo/sources/mirbase_constants.py # -*- coding: utf-8 -*- """Constants for miRBase.""" import pandas as pd from ..path_utils import ensure_df PREFIX = 'mirbase' VERSION = '22.1' PREFAM_URL = f'ftp://mirbase.org/pub/mirbase/{VERSION}/database_files/mirna_prefam.txt.gz' PREMATURE_TO_PREFAMILY_URL = f'ftp://mirbase.org/pub/mirbase/{VERSION}/database_files/mirna_2_prefam.txt.gz' PREMATURE_URL = f'ftp://mirbase.org/pub/mirbase/{VERSION}/database_files/mirna.txt.gz' MATURE_URL = f'ftp://mirbase.org/pub/mirbase/{VERSION}/database_files/mirna_mature.txt.gz' PREMATURE_TO_MATURE = f'ftp://mirbase.org/pub/mirbase/{VERSION}/database_files/mirna_pre_mature.txt.gz' def get_premature_family_df() -> pd.DataFrame: """Get premature family dataframe.""" return ensure_df( PREFIX, PREFAM_URL, version=VERSION, names=['prefamily_key', 'family_id', 'family_name'], usecols=[0, 1, 2], index_col=0, dtype=str, ) def get_premature_to_prefamily_df() -> pd.DataFrame: """Get premature miRNA to premature family dataframe.""" return ensure_df( PREFIX, PREMATURE_TO_PREFAMILY_URL, version=VERSION, names=['premature_key', 'prefamily_key'], dtype=str, ) def get_premature_df() -> pd.DataFrame: """Get premature miRNA dataframe.""" return ensure_df( PREFIX, PREMATURE_URL, version=VERSION, names=['premature_key', 'mirbase_id', 'mirna_name'], usecols=[0, 1, 2], index_col=0, dtype=str, ) def get_mature_df() -> pd.DataFrame: """Get mature miRNA dataframe.""" return ensure_df( PREFIX, MATURE_URL, version=VERSION, names=[ 'mature_key', 'name', 'previous', 'mirbase.mature_id', ], usecols=[0, 1, 2, 3], index_col=0, dtype=str, )
2.46875
2
cursoemvideo/python3_mundo2/aula_15/ex069.py
Tiago-Baptista/CursoEmVideo_Python3
0
12791998
sexo = '' maior = macho = menor = 0 while True: idade = int(input('Qual a sua idade? ')) sexo = str(input('Qual o seu sexo [M/F]? ')).strip().lower()[0] if idade >= 18: maior += 1 if sexo == 'm': macho += 1 if sexo == 'f' and idade < 20: menor += 1 while sexo not in 'mf': sexo = str(input('Qual o seu sexo [M/F]? ')).strip().lower()[0] print('\033[31mResisto inserido com sucesso!\033[m') cont = str(input('Quer continuar a resistar [S/N]? ')).strip().lower()[0] while cont not in 'sn': cont = str(input('Quer continuar a resistar [S/N]? ')).strip().lower()[0] if cont == 'n': break print('=' * 50) print(f'Foram resistadas {maior} pessoas maiores de 18 anos.') print(f'Foram resistados {macho} homens.') print(f'Foram resistadas {menor} mulheres com menos de 20 anos.')
3.765625
4
time/baseProgram.py
hy57in/git-auto-commit
0
12791999
import schedule import threading import datetime as dt import subprocess import time import os # auto commit 실행 def auto_commit(): print("auto commit을 시행합니다") subprocess.call(['sh', './continue.sh']) subprocess.call(['sh', './TimeAutoCommitProcess.sh']) # n분마다 auto_commit 실행 def time_based_autocommit(num): schedule.every(num).minutes.do(auto_commit) while 1: schedule.run_pending() # 파일생성시간을 계산 def createtime(file): if os.path.isfile(file): ctime = os.path.getctime(file) # create time 생성 ymd_ctime = dt.datetime.fromtimestamp(ctime) # 출력 형태를 ymd의 format으로 변경 return ymd_ctime # 파일생성시간을 timestamp를 이용해 float형 숫자로 바꾼 후, float형을 int형으로 변환 def start(filename): start_time = createtime(filename) start_time_timestamp = int(start_time.timestamp()) return start_time_timestamp # 현재시간을 timestamp를 이용해 float형 숫자로 바꾼 후, float형을 int형으로 변환 def stop(): stop_time = dt.datetime.now() stop_time_timestamp = int(stop_time.timestamp()) return stop_time_timestamp # (현재 시간 - 파일 생성 시간) % 60n을 통해서 나머지 계산 def remainder(filename, start, stop, n): time_remainder = (stop - start(filename)) % (60 * n) return time_remainder # 나머지가 0이 되면 autocommit 실행 def ctime_based_autocommit(filename, start, stop, n): print("시도 중") # 함수가 실행될 때마다 '시도 중'을 출력 print(remainder(filename, start, stop, n)) # 나머지 출력 if remainder(filename, start, stop, n) == 0: # auto commit 실행 - subprocess에서 에러가 계속 나서 주석처리해놓음 subprocess.call(['sh', './addFile.sh', filename]) subprocess.call(['sh', './TimeAutoCommitProcess.sh']) print("백업되었습니다.") choice = 0 while choice != 8: print("Menu") print("1. New") print("2. Continue") print("3. Time backup mode") print("4. Create time backup mode") print("5. Error Backup mode") print("6. Git add file") print("7. Git push to branch") print("8. Exit") choice = int(input(">> ")) if choice == 1: subprocess.call(['sh', './setting.sh']) subprocess.call(['sh', './autoCommitProcess.sh']) elif choice == 2: subprocess.call(['sh', './continue.sh']) subprocess.call(['sh', './autoCommitProcess.sh']) elif choice == 3: subprocess.call(['bash', './killProcess.sh']) subprocess.call(['sh', './setting.sh']) filename = str(input("Enter your file name : ")) num = int(input('Enter the minutes you want to set up : ')) # GUI에서 사용자가 분을 세팅했다고 가정 try: print("시도") time_based_autocommit(num) except Exception as ex: # GUI에서 체크버튼 해제되었다고 가정 print(ex) elif choice == 4: subprocess.call(['bash', './killProcess.sh']) subprocess.call(['sh', './setting.sh']) filename = str(input('Enter your file name : ')) # GUI에서 사용자가 특정 파일 선택했다고 가정 n = int(input('Enter the minutes you want to set up : ')) # GUI에서 사용자가 분을 n으로 세팅했다고 가정 while True: try: print("시도") ctime_based_autocommit(filename, start, stop(), n) # 파일 생성 시간을 기준으로 n분마다 auto commit하는 걸 백그라운드에서 실행 except Exception as ex: # GUI에서 체크버튼 해제되었다고 가정 print(ex) #if : # GUI에서 체크버튼 해제되었다고 가정 #print("버튼 해제2") #break elif choice == 5: path = "./code/" file_list = os.listdir(path) py_list = [file for file in file_list if file.endswith(".py")] # c_list = [file for file in file_list if file.endswith(".c")] # java_list = [file for file in file_list if file.endswith(".java")] for i in range(len(py_list)): try: subprocess.check_output(['python', path + py_list[0]], universal_newlines=True) except Exception as ex: branch = str("error") msg = str(ex) subprocess.call(['sh', './continue.sh']) subprocess.call(['sh', './autoCommitProcess.sh']) elif choice == 6: subprocess.call(['bash', './killProcess.sh']) filename = str(input("What file to add?(file_name) ")) subprocess.call(['sh', './addFile.sh', filename]) subprocess.call(['sh', './continue.sh']) subprocess.call(['sh', './autoCommitProcess.sh']) elif choice == 7: subprocess.call(['bash', './killProcess.sh']) branch = str(input("Where to push?(branch_name) ")) msg = str(input("Write commit message: ")) subprocess.call(['sh', './userCommit.sh', branch, msg]) subprocess.call(['sh', './continue.sh']) subprocess.call(['sh', './autoCommitProcess.sh']) elif choice == 8: subprocess.call(['bash', './killProcess.sh']) else: print("Wrong Input! Please input again")
2.703125
3
ScrapedData/CosineScore.py
Robo-Sapien/Search-Engine-for-arXiv.org
0
12792000
<reponame>Robo-Sapien/Search-Engine-for-arXiv.org from scipy import spatial import numpy class CosineScore: """Calculate cosine similarity score for each document and rank them :param query: query vector :param matrix: tf-idf numpy matrix """ rank = None docIndex = None score = None def __init__(self, query, matrix): self.rank = [] self.docIndex = [] self.score = [] """Constructor which calculates cosine similarity score for each document""" for j in range(matrix.shape[1]): column = matrix[:,j] self.docIndex.append(j) self.score.append(1 - spatial.distance.cosine(column, query)) self.rank = list(reversed([x for _, x in sorted(zip(self.score, self.docIndex))])) def getPages(self, number): """To get the indices of the douments between the given ranks :param start: starting *rank*. :param end: rank after the last rank. :return: list of document indices for ranks from start to end-1. """ rankList = [] if number > len(self.rank): min = len(self.rank) else: min = number for index in range(min): if self.score[self.rank[index]] > 0: rankList.append(self.rank[index]) return rankList # def main(): # query = [0, 1, 0] # matrix = numpy.array([[2, 1, 3], [4, 3, 5], [6, 5, 7]]) # obj = CosineScore(query, matrix) # print(obj.getPages(10)) # # if __name__ == '__main__': # main()
2.671875
3
script/get_weather.py
alaurie/Python-for-PowerShell-Users-Talk
0
12792001
#! /usr/bin/env python import argparse import requests arg_parser = argparse.ArgumentParser( prog="get-weather", description="Get weather for entered city." ) arg_parser.add_argument( "city", metavar="my_city", type=str, help="City for which you want to get weather." ) def get_city_weather(search_city): api_key = "2fe992c00735713d86f2d6577ff41a3d" url = f"http://api.openweathermap.org/data/2.5/weather?appid={api_key}&q=" response = requests.get(url + search_city) return response.json() if __name__ == "__main__": args = arg_parser.parse_args() try: weather = get_city_weather(args.city) print(f"The weather in {args.city}: {weather['weather'][0]['description']}") except KeyError: print("City no found.")
3.8125
4
main/migrations/0004_auto_20200614_1547.py
pcp11/link-lizard
0
12792002
# Generated by Django 3.0.7 on 2020-06-14 15:47 import django.core.validators from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('main', '0003_auto_20200614_1506'), ] operations = [ migrations.AlterField( model_name='urlmapping', name='original_url', field=models.TextField(validators=[django.core.validators.URLValidator]), ), ]
1.648438
2
dqn/environment.py
Alexanderli0816/DQN
1
12792003
<filename>dqn/environment.py<gh_stars>1-10 import gym from .utils import rgb2gray, imresize class Environment(object): def __init__(self, config): self.env = gym.make(config.env_name) screen_width, screen_height = config.screen_width, config.screen_height self.display = config.display self.dims = (screen_width, screen_height) self._screen = self.env.reset() self.reward = 0 self.terminal = True def new_game(self): self._screen = self.env.reset() self.render() return self.screen, 0, 0, self.terminal def step(self, action): self._screen, self.reward, self.terminal, _ = self.env.step(action) def random_step(self): action = self.env.action_space.sample() self.step(action) @property def screen(self): return imresize(rgb2gray(self._screen) / 255., self.dims) # return cv2.resize(cv2.cvtColor(self._screen, cv2.COLOR_BGR2YCR_CB)/255., self.dims)[:,:,0] @property def action_size(self): return self.env.action_space.n @property def lives(self): return self.env.ale.lives() @property def state(self): return self.screen, self.reward, self.terminal def render(self): if self.display: self.env.render() class GymEnvironment(Environment): def __init__(self, config): super(GymEnvironment, self).__init__(config) self.config = config def act(self, action): accumulated_reward = 0 start_lives = self.lives self.step(action) if start_lives > self.lives and self.terminal: accumulated_reward -= self.config.max_punishment self.render() return self.state
2.765625
3
generate_secretkey.py
Saknowman/django_setting_sample_project
0
12792004
from django.core.management.utils import get_random_secret_key secret_key = get_random_secret_key() text = 'SECRET_KEY = \'{0}\''.format(secret_key) print(text)
2
2
strongr/core/exception/__init__.py
bigr-erasmusmc/StrongR
0
12792005
<reponame>bigr-erasmusmc/StrongR from .isnotcallableexception import IsNotCallableException from .invalidparameterexception import InvalidParameterException
1.078125
1
nlp/nlp.py
stevencdang/data_proc_lib
0
12792006
<reponame>stevencdang/data_proc_lib<filename>nlp/nlp.py #!/usr/bin/env python # Author: <NAME> <EMAIL> import nltk from nltk.corpus import wordnet as wn import numpy as np from file_manager import read_data def cosine(doc1, doc2, doc_topic_weights): weights1 = doc_topic_weights[doc1] weights2 = doc_topic_weights[doc2] dotProduct = np.dot(weights1, weights2) mag1 = np.sqrt(sum([np.square(weight) for weight in weights1])) mag2 = np.sqrt(sum([np.square(weight) for weight in weights2])) if mag1 and mag2: return dotProduct / (mag1 * mag2) else: return 0.0 def get_wordnet_pos(treebank_tag): """ helper method to convert treebank tags into wordnet pos tags for query expansion """ if treebank_tag.startswith('J'): return wn.ADJ elif treebank_tag.startswith('V'): return wn.VERB elif treebank_tag.startswith('N'): return wn.NOUN elif treebank_tag.startswith('R'): return wn.ADV else: return '' def expand_text(pos_tokens): """ interface with wordnet to recursively add all synonyms and hypernyms for each token in input list of token-posTag tuples return expanded list of tokens that includes synonyms and hypernyms """ # first expand with synonyms synonyms = set() for item in pos_tokens: synsets = wn.synsets(item[0], get_wordnet_pos(item[1])) for synset in synsets: synonyms.add(synset) # start making the list of tokens to be output # initialize with lemmas of the synonyms bowTokens = set([t[0] for t in pos_tokens]) for synonym in synonyms: for lemma in synonym.lemmas(): bowTokens.add(lemma.name()) # now recursively add hypernyms nextStack = set(synonyms) # initialize stack while(len(nextStack)): currentStack = set(nextStack) nextStack.clear() # get all hypernyms, put in nextStack for s in currentStack: for hypernym in s.hypernyms(): nextStack.add(hypernym) # add lemmas from the current level of hypernyms to the master bag of tokens for hypernym in nextStack: for lemma in hypernym.lemmas(): bowTokens.add(lemma.name()) return sorted(list(bowTokens)) def get_stopwords(): """ Returns a list of stop words. Currently uses a list of words in a text file """ return read_data("englishstopwords-jc.txt") def bag_of_words(ideas, stopwords): """ Initial processing of ideas for Mike Terri's Ideaforest algorithm """ expandedText = [] data = [] for idea in ideas: # read the text text = idea['content'].encode('utf-8', 'ignore') # split into sentences (PunktSentenceTokenizer) sentences = nltk.sent_tokenize(text) # tokenize and pos tag words (TreeBank) pos_tokens = [] for sentence in sentences: tokens = [token.lower() for token in nltk.word_tokenize(sentence)] # tokenize pos_tokens += nltk.pos_tag(tokens) # pos tag # remove stopwords pos_tokens = [t for t in pos_tokens if t[0] not in stopwords] # remove "words" with no letters in them! pos_tokens = [t for t in pos_tokens if any(c.isalpha() for c in t[0])] # query expansion expandedTokens = expand_text(pos_tokens) # add the enriched bag of words as value to current d expandedText.append(expandedTokens) data.append(idea) return data, expandedText
2.765625
3
vnpy/app/algo_trading/__init__.py
Billy-Meng/vnpy_origin
1
12792007
<filename>vnpy/app/algo_trading/__init__.py # -*- coding:utf-8 -*- import sys import vnpy_algotrading sys.modules[__name__] = vnpy_algotrading
1.351563
1
language/python/python/inner_functions/print_demo.py
bigfoolliu/liu_aistuff
1
12792008
#!/usr/bin/env python3 # -*- coding:utf-8 -*- # author: bigfoolliu """ print函数展示 """ import time def end_demo(): """print结尾控制,加flush强制刷新""" for _ in range(100): print("#", end="", flush=True) time.sleep(0.01) print() def progress_demo(): """使用\r展示进度""" days = 365 for i in range(days): print("\r", "progress:{}%".format(round((i + 1) * 100 / days)), end="", flush=True) time.sleep(0.01) print() def sep_demo(): """使用sep参数将结果使用指定分隔符分割""" print("name", "age", "score", sep=" | ") def sysout_demo(): """将print的默认输出改到指定文件,而不是默认的屏幕""" f = open("print_demo.log", "w") print("hello, this is print demo.", file=f) print("hello, this is print demo again.", file=f) f.close() if __name__ == "__main__": # end_demo() # progress_demo() sep_demo() sysout_demo()
3.234375
3
dj_prosftpd/views.py
devopsmakers/dj-prosftpd
0
12792009
<gh_stars>0 # -*- coding: utf-8 -*- from django.views.generic import ( CreateView, DeleteView, DetailView, UpdateView, ListView ) from .models import ( SFTPUser, SFTPUserKey, FileHistory, FileValidator, ) class SFTPUserCreateView(CreateView): model = SFTPUser class SFTPUserDeleteView(DeleteView): model = SFTPUser class SFTPUserDetailView(DetailView): model = SFTPUser class SFTPUserUpdateView(UpdateView): model = SFTPUser class SFTPUserListView(ListView): model = SFTPUser class SFTPUserKeyCreateView(CreateView): model = SFTPUserKey class SFTPUserKeyDeleteView(DeleteView): model = SFTPUserKey class SFTPUserKeyDetailView(DetailView): model = SFTPUserKey class SFTPUserKeyUpdateView(UpdateView): model = SFTPUserKey class SFTPUserKeyListView(ListView): model = SFTPUserKey class FileHistoryCreateView(CreateView): model = FileHistory class FileHistoryDeleteView(DeleteView): model = FileHistory class FileHistoryDetailView(DetailView): model = FileHistory class FileHistoryUpdateView(UpdateView): model = FileHistory class FileHistoryListView(ListView): model = FileHistory class FileValidatorCreateView(CreateView): model = FileValidator class FileValidatorDeleteView(DeleteView): model = FileValidator class FileValidatorDetailView(DetailView): model = FileValidator class FileValidatorUpdateView(UpdateView): model = FileValidator class FileValidatorListView(ListView): model = FileValidator
2.0625
2
sympycore/heads/term_coeff_dict.py
radovankavicky/pymaclab
96
12792010
<filename>sympycore/heads/term_coeff_dict.py<gh_stars>10-100 __all__ = ['TERM_COEFF_DICT'] from .base import heads, heads_precedence, ArithmeticHead, Pair from ..core import init_module, Expr init_module.import_heads() init_module.import_numbers() init_module.import_lowlevel_operations() @init_module def _init(module): from ..arithmetic.number_theory import multinomial_coefficients module.multinomial_coefficients = multinomial_coefficients class TermCoeffDictHead(ArithmeticHead): def is_data_ok(self, cls, data): if type(data) is dict: n = len(data) #if n<=1: # return 'data dictonary should have more than 1 item' for item in data.iteritems(): msg = TERM_COEFF.is_data_ok(cls, item, allow_number_term=True) if msg: return 'TERM_COEFF data=%s: %s' % (item, msg) #pragma: no cover else: return 'data must be dict instance but got %s' % (type(data)) #pragma: no cover return def __repr__(self): return 'TERM_COEFF_DICT' def data_to_str_and_precedence(self, cls, term_coeff_dict): r = [cls(TERM_COEFF, tc) for tc in term_coeff_dict.items()] return ADD.data_to_str_and_precedence(cls, r) def new(self, cls, data, evaluate=True): return term_coeff_dict_new(cls, data) def reevaluate(self, cls, data): r = cls(NUMBER, 0) for term, coeff in data.iteritems(): r += term * coeff return r def to_ADD(self, Algebra, data, expr): return add_new(Algebra, [term_coeff_new(Algebra, term_coeff) for term_coeff in data.iteritems()]) def to_EXP_COEFF_DICT(self, cls, data, expr, variables=None): if variables is None: r = cls(EXP_COEFF_DICT, Pair((), {})) else: r = cls(EXP_COEFF_DICT, Pair(variables, {})) for term, coeff in data.iteritems(): r += term * coeff return r def term_coeff(self, cls, expr): term_coeff_dict = expr.data if len(term_coeff_dict)==1: return dict_get_item(term_coeff_dict) return expr, 1 def inplace_add(self, cls, lhs, rhs): if lhs.is_writable: data = lhs.data else: data = lhs.data.copy() self.add(cls, data, rhs, inplace=True) return term_coeff_dict_new(cls, data) def add(self, cls, lhs, rhs, inplace=False): if inplace: data = lhs else: data = lhs.data.copy() h2, d2 = rhs.pair if h2 is NUMBER: if d2 != 0: dict_add_item(cls, data, cls(NUMBER, 1), d2) elif h2 is SYMBOL: dict_add_item(cls, data, rhs, 1) elif h2 is TERM_COEFF: term, coeff = d2 dict_add_item(cls, data, term, coeff) elif h2 is TERM_COEFF_DICT: dict_add_dict(cls, data, d2) elif h2 is ADD: for op in d2: term, coeff = op.term_coeff() dict_add_item(cls, data, term, coeff) elif h2 is SUB or h2 is NEG or h2 is POS: raise NotImplementedError(`self, rhs.pair`) elif h2 is BASE_EXP_DICT: c = base_exp_dict_get_coefficient(cls, d2) if c is not None: d = d2.copy() del d[c] t = BASE_EXP_DICT.new(cls, d) if t.head is BASE_EXP_DICT: dict_add_item(cls, data, t, c) else: self.add(cls, data, t * c, inplace=True) else: dict_add_item(cls, data, rhs, 1) else: dict_add_item(cls, data, rhs, 1) if inplace: return lhs return term_coeff_dict_new(cls, data) def add_number(self, cls, lhs, rhs): if rhs==0: return lhs data = lhs.data.copy() term_coeff_dict_add_item(cls, data, cls(NUMBER, 1), rhs) return term_coeff_dict_new(cls, data) def sub_number(self, cls, lhs, rhs): if rhs==0: return lhs data = lhs.data.copy() term_coeff_dict_add_item(cls, data, cls(NUMBER, 1), -rhs) return term_coeff_dict_new(cls, data) def sub(self, cls, lhs, rhs): d = lhs.data.copy() self.add(cls, d, -rhs, inplace=True) return term_coeff_dict_new(cls, d) def commutative_mul(self, cls, lhs, rhs): rhead, rdata = rhs.pair if rhead is NUMBER: if rdata==0: return rhs data = lhs.data.copy() dict_mul_value(cls, data, rdata) return term_coeff_dict_new(cls, data) if rhead is TERM_COEFF: term, coeff = rdata return (lhs * term) * coeff if rhead is POW: base, exp = rdata if lhs==base: return POW.new(cls, (lhs, exp + 1)) return cls(BASE_EXP_DICT, {lhs:1, base:exp}) if rhead is SYMBOL or rhead is APPLY: return cls(BASE_EXP_DICT, {lhs:1, rhs:1}) if rhead is TERM_COEFF_DICT: if rdata==lhs.data: return cls(POW, (lhs, 2)) return cls(BASE_EXP_DICT, {lhs:1, rhs:1}) if rhead is BASE_EXP_DICT: data = rdata.copy() dict_add_item(cls, data, lhs, 1) return BASE_EXP_DICT.new(cls, data) if rhead is ADD: return cls(BASE_EXP_DICT, {lhs:1, rhs:1}) return ArithmeticHead.commutative_mul(self, cls, lhs, rhs) def commutative_mul_number(self, cls, lhs, rhs): if rhs==0: return cls(NUMBER, 0) data = lhs.data.copy() dict_mul_value(cls, data, rhs) return cls(self, data) non_commutative_mul_number = commutative_mul_number def commutative_rdiv_number(self, cls, lhs, rhs): return term_coeff_new(cls, (cls(POW, (lhs, -1)), rhs)) def commutative_div(self, cls, lhs, rhs): rhead, rdata = rhs.pair if rhead is NUMBER: return self.commutative_div_number(cls, lhs, rdata) if rhead is TERM_COEFF_DICT: if lhs.data == rdata: return cls(NUMBER, 1) return cls(BASE_EXP_DICT, {lhs:1, rhs:-1}) if rhead is SYMBOL or rhead is APPLY: return cls(BASE_EXP_DICT, {lhs:1, rhs:-1}) if rhead is TERM_COEFF: term, coeff = rdata return number_div(cls, 1, coeff) * (lhs / term) if rhead is POW: base, exp = rdata if lhs==base: return pow_new(cls, (lhs, 1-exp)) return cls(BASE_EXP_DICT, {lhs:1, base:-exp}) if rhead is BASE_EXP_DICT: data = {lhs:1} for base, exp in rdata.iteritems(): base_exp_dict_add_item(cls, data, base, -exp) return base_exp_dict_new(cls, data) return ArithmeticHead.commutative_div(self, cls, lhs, rhs) def pow(self, cls, base, exp): if exp==0: return cls(NUMBER, 1) if exp==1: return base d = base.data if len(d)==1: t,c = dict_get_item(d) t,c = t**exp, c**exp if t==1: return cls(NUMBER, c) if c==1: return t return cls(TERM_COEFF, (t, c)) return POW.new(cls, (base, exp)) pow_number = pow def neg(self, cls, expr): d = expr.data.copy() for key in d: d[key] = -d[key] return cls(TERM_COEFF_DICT, d) def expand(self, cls, expr): d = {} for t, c in expr.data.items(): self.add(cls, d, t.expand() * c, inplace=True) return term_coeff_dict_new(cls, d) def expand_intpow(self, cls, expr, intexp): if intexp<0: return cls(POW, (expr, intexp)) if intexp==0: return cls(NUMBER, 1) if intexp==1: return expr term_coeff_list = [(term.base_exp(), coeff) for term, coeff in expr.data.items()] mdata = multinomial_coefficients(len(term_coeff_list), intexp) d = {} for e,c in mdata.iteritems(): new_coeff = c df = {} for e_i, ((base, exp), coeff) in zip(e, term_coeff_list): if e_i: if e_i==1: base_exp_dict_add_item(cls, df, base, exp) if coeff is not 1: new_coeff *= coeff else: base_exp_dict_add_item(cls, df, base, exp*e_i) if coeff is not 1: new_coeff *= coeff ** e_i new_term = base_exp_dict_new(cls, df) term_coeff_dict_add_item(cls, d, new_term, new_coeff) return term_coeff_dict_new(cls, d) def walk(self, func, cls, data, target): d = {} flag = False add = self.add for t, c in data.iteritems(): t1 = t.head.walk(func, cls, t.data, t) if isinstance(c, Expr): c1 = c.head.walk(func, cls, c.data, c) else: c1 = NUMBER.walk(func, cls, c, c) if t1 is not t or c1 is not c: flag = True add(cls, d, t1 * c1, inplace=True) if flag: r = term_coeff_dict_new(cls, d) return func(cls, r.head, r.data, r) return func(cls, self, data, target) def scan(self, proc, cls, data, target): for t, c in data.iteritems(): t.head.scan(proc, cls, t.data, target) if isinstance(c, Expr): c.head.scan(proc, cls, c.data, target) else: NUMBER.scan(proc, cls, c, target) proc(cls, self, data, target) def diff(self, cls, data, expr, symbol, order, cache={}): key = (expr, symbol, order) result = cache.get(key) if result is not None: return result if result is None: d = {} result = cls(NUMBER, 0) for term, coeff in data.iteritems(): result += term.head.diff(cls, term.data, term, symbol, order, cache=cache) * coeff key1 = (expr, symbol, 1) cache[key] = result return result def apply(self, cls, data, func, args): result = cls(NUMBER, 0) for term, coeff in data.iteritems(): result += term.head.apply(cls, term.data, term, args) * coeff return result def integrate_indefinite(self, cls, data, expr, x): result = cls(NUMBER, 0) for term, coeff in data.iteritems(): result += term.head.integrate_indefinite(cls, term.data, term, x) * coeff return result def integrate_definite(self, cls, data, expr, x, a, b): result = cls(NUMBER, 0) for term, coeff in data.iteritems(): result += term.head.integrate_definite(cls, term.data, term, x, a, b) * coeff return result def algebra_pos(self, Algebra, expr): return expr def algebra_neg(self, Algebra, expr): if Algebra.algebra_options.get('evaluate_addition'): d = expr.data.copy() for key in d: d[key] = -d[key] return Algebra(TERM_COEFF_DICT, d) return Algebra(NEG, expr) def algebra_add_number(self, Algebra, lhs, rhs, inplace): if not rhs: return lhs if inplace: term_coeff_dict_add_item(Algebra, lhs.data, Algebra(NUMBER, 1), rhs) return term_coeff_dict(Algebra, lhs) d = lhs.data.copy() term_coeff_dict_add_item(Algebra, d, Algebra(NUMBER, 1), rhs) return term_coeff_dict_new(Algebra, d) def algebra_add(self, Algebra, lhs, rhs, inplace): rhead, rdata = rhs.pair if Algebra.algebra_options.get('evaluate_addition'): ldata = lhs.data if rhead is ADD or rhead is EXP_COEFF_DICT or rhead is MUL or rhead is NEG: rhs = rhead.to_TERM_COEFF_DICT(Algebra, rdata, rhs) rhead, rdata = rhs.pair if rhead is NUMBER: if not rdata: return lhs rterm, rcoeff = Algebra(NUMBER, 1), rdata elif rhead is SYMBOL: rterm, rcoeff = rhs, 1 elif rhead is TERM_COEFF: rterm, rcoeff = rdata elif rhead is TERM_COEFF_DICT: if inplace: term_coeff_dict_add_dict(Algebra, ldata, rdata) return term_coeff_dict(Algebra, lhs) d = ldata.copy() term_coeff_dict_add_dict(Algebra, d, rdata) return term_coeff_dict_new(Algebra, d) else: return super(type(self), self).algebra_add(Algebra, lhs, rhs, inplace) if inplace: term_coeff_dict_add_item(Algebra, ldata, rterm, rcoeff) return term_coeff_dict(Algebra, lhs) d = ldata.copy() term_coeff_dict_add_item(Algebra, d, rterm, rcoeff) return term_coeff_dict_new(Algebra, d) else: return TERM_COEFF_DICT.to_ADD(Algebra, lhs.data, lhs) + rhs return super(type(self), self).algebra_add(Algebra, lhs, rhs, inplace) def algebra_mul_number(self, Algebra, lhs, rhs, inplace): if Algebra.algebra_options.get('evaluate_addition'): if not rhs: return Algebra(NUMBER, 0) if rhs==1: return lhs if inplace: term_coeff_dict_mul_value(Algebra, lhs.data, rhs) return lhs d = lhs.data.copy() term_coeff_dict_mul_value(Algebra, d, rhs) return Algebra(TERM_COEFF_DICT, d) return Algebra(MUL, [lhs, Algebra(NUMBER, rhs)]) def algebra_mul(self, Algebra, lhs, rhs, inplace): rhead, rdata = rhs.pair if rhead is NUMBER: return self.algebra_mul_number(Algebra, lhs, rdata, inplace) return super(type(self), self).algebra_mul(Algebra, lhs, rhs, inplace) def algebra_div_number(self, Algebra, lhs, rhs, inplace): if Algebra.algebra_options.get('evaluate_addition'): if rhs==1: return lhs d1 = gcd(*lhs.data.values()) d2 = gcd(d1, rhs) d3 = rhs / d2 d = {} rd = 0 for t,c in lhs.data.items(): c /= d2 q, c = divmod(c, d3) if c: d[t] = c rd += q s = term_coeff_dict_new(Algebra, d) if rhs==d2: assert rd==0,`lsh, rhs, s,rd` return s return Algebra(DIV, [s, Algebra(NUMBER, d3)]) + rd return Algebra(DIV, [lhs, Algebra(NUMBER, rhs)]) def algebra_div(self, Algebra, lhs, rhs, inplace): rhead, rdata = rhs.pair if rhead is NUMBER: return self.algebra_div_number(Algebra, lhs, rdata, inplace) return super(type(self), self).algebra_div(Algebra, lhs, rhs, inplace) TERM_COEFF_DICT = TermCoeffDictHead()
2.328125
2
API/utils/more_utils/fortnite_utils.py
OreoDivision/Stat-Tracker
1
12792011
<gh_stars>1-10 import requests from cache import AsyncTTL ''' ----- Get fortnite players stats ----- name: the name of the user platform: the platform they are on (epic / psn / xbl) ''' @AsyncTTL(time_to_live=120, maxsize=1024) async def get_player_stats(name, platform): # Sends a request for the data data = requests.get(f"https://fortnite-api.com/v1/stats/br/v2?name={name}&accountType={platform}&image=all").json() # Retuns it return data
2.46875
2
sgh_PiGlow.py
davidramirezm30/scratch-orangepi
0
12792012
<reponame>davidramirezm30/scratch-orangepi #!/usr/bin/env python # sgh_PiGlow - control PiGlow LEDS via ScratchGPIO. #Copyright (C) 2013 by <NAME> based on code from Pimoroni #This program is free software; you can redistribute it and/or #modify it under the terms of the GNU General Public License #as published by the Free Software Foundation; either version 2 #of the License, or (at your option) any later version. #This program is distributed in the hope that it will be useful, #but WITHOUT ANY WARRANTY; without even the implied warranty of #MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #GNU General Public License for more details. #You should have received a copy of the GNU General Public License #along with this program; if not, write to the Free Software #Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Version = '0.0.3' # 1Jan13 #try and inport smbus but don't worry if not installed import subprocess try: import smbus except: pass CMD_ENABLE_OUTPUT = 0x00 CMD_ENABLE_LEDS = 0x13 CMD_SET_PWM_VALUES = 0x01 CMD_UPDATE = 0x16 class PiGlow: i2c_addr = 0x54 # fixed i2c address of SN3218 ic bus = None def __init__(self, i2c_bus=1): print "PiGlow init" self.i2c_bus = i2c_bus print "i2cbus:",self.i2c_bus #self.bus = smbus.SMBus(i2c_bus) self.bus = smbus.SMBus(i2c_bus) print "self.bus:", self.bus self.enable_output() print "enabled output" self.enable_leds() print "enabled leds " print "complete" def enable_output(self): self.write_i2c(CMD_ENABLE_OUTPUT, 0x01) def enable_leds(self): self.write_i2c(CMD_ENABLE_LEDS, [0xFF, 0xFF, 0xFF]) def update_pwm_values(self, values=[0] * 18): self.write_i2c(CMD_SET_PWM_VALUES, values) self.write_i2c(CMD_UPDATE, 0xFF) #print "update piglow pwm done" def write_i2c(self, reg_addr, value): if not isinstance(value, list): value = [value]; try: self.bus.write_i2c_block_data(self.i2c_addr, reg_addr, value) except IOError: subprocess.call(['i2cdetect', '-y', '0']) self.bus.write_i2c_block_data(self.i2c_addr, reg_addr, value) #### end PiGlow ###############################################################
2.8125
3
web/src/auth.py
computer-geek64/guardian
0
12792013
<filename>web/src/auth.py # auth.py import os import json import hashlib authentication_credentials = json.loads(os.environ['AUTHENTICATION_CREDENTIALS']) def authenticate(username, password): if username is None or password is None: return False password_hash = hashlib.sha512(password.encode()).hexdigest() return username in authentication_credentials and password_hash == authentication_credentials[username]
2.84375
3
src/Node.py
stevenwalton/Retro-Learner
0
12792014
import numpy as np class Node: def __init__(self, value=-np.inf, children=None): self.value = value self.visits = 0 self.children = {} if children is None else children def __repr__(self): return "<Node value=%f visits=%d len(children)=%d>" % ( self.value, self.visits, len(self.children), )
2.90625
3
tests/test_meme.py
Sung-Huan/ANNOgesic
26
12792015
<filename>tests/test_meme.py import sys import os import unittest import shutil from io import StringIO sys.path.append(".") import annogesiclib.meme as me from mock_helper import gen_file from mock_args_container import MockClass from annogesiclib.meme import MEME class Mock_func(object): def mock_del_repeat_fasta(self, tmp_fasta, all_no_orph): with open("tmp/all_type.fa", "w") as fh: fh.write("all") with open("tmp/without_orphan.fa", "w") as fh: fh.write("without_orphan") class TestMEME(unittest.TestCase): def setUp(self): self.mock_args = MockClass() self.test_folder = "test_folder" self.out_folder = "test_folder/output" if (not os.path.exists(self.test_folder)): os.mkdir(self.test_folder) os.mkdir(self.out_folder) os.mkdir(os.path.join(self.out_folder, "fasta_output")) self.tss_folder = os.path.join(self.test_folder, "tss_folder") if (not os.path.exists(self.tss_folder)): os.mkdir(self.tss_folder) self.gff_folder = os.path.join(self.test_folder, "gff_folder") if (not os.path.exists(self.gff_folder)): os.mkdir(self.gff_folder) self.fa_folder = os.path.join(self.test_folder, "fa_folder") if (not os.path.exists(self.fa_folder)): os.mkdir(self.fa_folder) args = self.mock_args.mock() args.tsss = self.tss_folder args.fastas = self.fa_folder args.gffs = self.gff_folder args.output_folder = self.out_folder self.meme = MEME(args) def tearDown(self): if os.path.exists(self.test_folder): shutil.rmtree(self.test_folder) def test_move_and_merge_fasta(self): me.del_repeat_fasta = Mock_func().mock_del_repeat_fasta if (not os.path.exists("tmp")): os.mkdir("tmp") gen_file("tmp/primary.fa", "primary") gen_file("tmp/secondary.fa", "secondary") gen_file("tmp/internal.fa", "internal") gen_file("tmp/antisense.fa", "antisense") gen_file("tmp/orphan.fa", "orphan") self.meme._move_and_merge_fasta(self.test_folder, "test") self.assertTrue(os.path.exists(os.path.join( self.test_folder, "test_allgenome_all_types.fa"))) self.assertTrue(os.path.exists(os.path.join( self.test_folder, "test_allgenome_primary.fa"))) self.assertTrue(os.path.exists(os.path.join( self.test_folder, "test_allgenome_secondary.fa"))) self.assertTrue(os.path.exists(os.path.join( self.test_folder, "test_allgenome_internal.fa"))) self.assertTrue(os.path.exists(os.path.join( self.test_folder, "test_allgenome_antisense.fa"))) self.assertTrue(os.path.exists(os.path.join( self.test_folder, "test_allgenome_orphan.fa"))) self.assertTrue(os.path.exists(os.path.join( self.test_folder, "test_allgenome_without_orphan.fa"))) def test_split_fasta_by_strain(self): with open(os.path.join(self.fa_folder, "allgenome.fa"), "w") as fh: fh.write(""">aaa_aaa_aaa ATTATATATA >bbb_bbb_bbb AATTAATTAA""") self.meme._split_fasta_by_strain(self.fa_folder) self.assertTrue(os.path.join(self.fa_folder, "aaa.fa")) self.assertTrue(os.path.join(self.fa_folder, "bbb.fa")) if __name__ == "__main__": unittest.main()
2.515625
3
dispatcher/models.py
stakewise/bls-horcrux
16
12792016
<reponame>stakewise/bls-horcrux<filename>dispatcher/models.py from sqlalchemy import Column, Integer, String from .database import Base class Share(Base): __tablename__ = "shares" id = Column(Integer, primary_key=True, index=True) recipient_rsa_public_key = Column(String) sender_rsa_public_key = Column(String) enc_session_key = Column(String) ciphertext = Column(String) tag = Column(String) nonce = Column(String) signature = Column(String)
2.125
2
src/state/board_state.py
PKU-GeekGame/gs-backend
7
12792017
from __future__ import annotations import time from abc import ABC, abstractmethod from typing import TYPE_CHECKING, Optional, List, Tuple, Dict, Any if TYPE_CHECKING: from . import * ScoreBoardItemType = Tuple[User, int] from . import WithGameLifecycle def minmax(x: int, a: int, b: int) -> int: if x<a: return a elif x>b: return b return x class Board(WithGameLifecycle, ABC): def __init__(self, board_type: str, name: str): self.board_type = board_type self.name = name @property @abstractmethod def summarized(self) -> Dict[str, Any]: raise NotImplementedError() class ScoreBoard(Board): MAX_DISPLAY_USERS = 100 MAX_TOPSTAR_USERS = 10 def __init__(self, name: str, game: Game, group: Optional[List[str]], show_group: bool): super().__init__('score', name) self._game = game self.show_group: bool = show_group self.group: Optional[List[str]] = group self.board: List[ScoreBoardItemType] = [] self.uid_to_rank: Dict[int, int] = {} self._summarized: Dict[str, Any] = self._summarize() @property def summarized(self) -> Dict[str, Any]: return self._summarized def _update_board(self) -> None: def is_valid(x: ScoreBoardItemType) -> bool: user, score = x return ( ((user._store.group in self.group) if self.group is not None else True) and score>0 ) def sorter(x: ScoreBoardItemType) -> Tuple[Any, ...]: user, score = x return ( -score, -1 if user.last_succ_submission is None else user.last_succ_submission._store.id, ) b = [(u, u.tot_score) for u in self._game.users.list] self.board = sorted([x for x in b if is_valid(x)], key=sorter) self.uid_to_rank = {user._store.id: idx+1 for idx, (user, _score) in enumerate(self.board)} def _summarize(self) -> Dict[str, Any]: return { 'challenges': [{ 'id': ch._store.id, 'title': ch._store.title, 'category': ch._store.category, 'flags': [f.name for f in ch.flags], } for ch in self._game.challenges.list], 'list': [{ 'rank': idx+1, 'nickname': u._store.profile.nickname_or_null or '--', 'group_disp': u._store.group_disp() if self.show_group else None, 'score': score, 'last_succ_submission_ts': int(u.last_succ_submission._store.timestamp_ms/1000) if u.last_succ_submission else None, 'challenge_status': { ch._store.id: ch.user_status(u) for ch in self._game.challenges.list }, 'flag_pass_ts': { f'{f.challenge._store.id}_{f.idx0}': int(sub._store.timestamp_ms/1000) for f, sub in u.passed_flags.items() }, } for idx, (u, score) in enumerate(self.board[:self.MAX_DISPLAY_USERS])], 'topstars': [{ 'nickname': u._store.profile.nickname_or_null or '--', 'submissions': [{ 'timestamp_ms': sub._store.timestamp_ms, 'gained_score': sub.gained_score(), } for sub in u.succ_submissions] } for u, _score in self.board[:self.MAX_TOPSTAR_USERS]], 'time_range': [ self._game.trigger.board_begin_ts, minmax(int(time.time())+1, self._game.trigger.board_begin_ts+1, self._game.trigger.board_end_ts), ], } def on_scoreboard_reset(self) -> None: self.board = [] self._summarized = self._summarize() def on_scoreboard_update(self, submission: Submission, in_batch: bool) -> None: if not in_batch and submission.matched_flag is not None: if self.group is None or submission.user._store.group in self.group: self._update_board() self._summarized = self._summarize() def on_scoreboard_batch_update_done(self) -> None: self._update_board() self._summarized = self._summarize() class FirstBloodBoard(Board): def __init__(self, name: str, game: Game, group: Optional[List[str]], show_group: bool): super().__init__('firstblood', name) self._game = game self.show_group: bool = show_group self.group: Optional[List[str]] = group self.chall_board: Dict[Challenge, Submission] = {} self.flag_board: Dict[Flag, Submission] = {} self._summarized: Dict[str, Any] = self._summarize() @property def summarized(self) -> Dict[str, Any]: return self._summarized def _summarize(self) -> Dict[str, Any]: return { 'list': [{ 'title': ch._store.title, 'id': ch._store.id, 'flags': [{ 'flag_name': None, 'nickname': ch_sub.user._store.profile.nickname_or_null if ch_sub is not None else None, 'group_disp': ch_sub.user._store.group_disp() if (ch_sub is not None and self.show_group) else None, 'timestamp': int(ch_sub._store.timestamp_ms/1000) if ch_sub is not None else None, }] + ([] if len(ch.flags)<=1 else [{ 'flag_name': f.name, 'nickname': f_sub.user._store.profile.nickname_or_null if f_sub is not None else None, 'group_disp': f_sub.user._store.group_disp() if (f_sub is not None and self.show_group) else None, 'timestamp': int(f_sub._store.timestamp_ms/1000) if f_sub is not None else None, } for f in ch.flags for f_sub in [self.flag_board.get(f, None)]]), } for ch in self._game.challenges.list for ch_sub in [self.chall_board.get(ch, None)]], } def on_scoreboard_reset(self) -> None: self.chall_board = {} self.flag_board = {} self._summarized = self._summarize() def on_scoreboard_update(self, submission: Submission, in_batch: bool) -> None: if submission.matched_flag is not None: assert submission.challenge is not None, 'submission matched flag to no challenge' if self.group is None or submission.user._store.group in self.group: passed_all_flags = submission.challenge in submission.user.passed_challs if submission.matched_flag not in self.flag_board: self.flag_board[submission.matched_flag] = submission if not in_batch and not passed_all_flags: self._game.worker.emit_local_message({ 'type': 'flag_first_blood', 'board_name': self.name, 'nickname': submission.user._store.profile.nickname_or_null, 'challenge': submission.challenge._store.title, 'flag': submission.matched_flag.name, }, self.group) if submission.challenge not in self.chall_board and passed_all_flags: self.chall_board[submission.challenge] = submission if not in_batch: self._game.worker.emit_local_message({ 'type': 'challenge_first_blood', 'board_name': self.name, 'nickname': submission.user._store.profile.nickname_or_null, 'challenge': submission.challenge._store.title, }, self.group) self._summarized = self._summarize()
3.015625
3
python/dblock.py
cpausmit/FiBS
0
12792018
<gh_stars>0 # dblock.py import os import socket import MySQLdb MY_CNF = "%s/.mysql/my.cnf"%(os.getenv('HOME')) if not os.path.exists(MY_CNF): MY_CNF = "/etc//my.cnf" print(" OPEN DB.") Db = MySQLdb.connect(read_default_file="%s"%(MY_CNF),read_default_group="mysql",db="Fibs") Cursor = Db.cursor() class dblock(object): '''Class to handle creating and removing database based locks''' # custom exceptions class DatabaseLockAcquisitionError(Exception): pass class DatabaseLockReleaseError(Exception): pass # convenience callables for formatting addr = lambda self: '%d@%s' % (self.pid, self.host) fddr = lambda self: '<%s %s>' % (self.path, self.addr()) pddr = lambda self, lock: '<%s %s@%s>' %\ (self.path, lock['pid'], lock['host']) def __init__(self,path,debug=None): self.pid = os.getpid() self.host = socket.gethostname() self.path = path self.debug = debug # set this to get status messages self.acquireSql = "insert into Locks(LockPath,LockHost,LockPid) " \ + " values('%s','%s',%d);"%(self.path,self.host,self.pid) self.releaseSql = "delete from Locks where LockPath='%s'"%(self.path) self.listSql = "select * from Locks where LockPath='%s';"%(self.path) def acquire(self): '''Acquire a lock, returning self if successful, False otherwise''' try: # Execute the SQL command Cursor.execute(self.acquireSql) if self.debug: print('Acquired lock: %s' % self.fddr()) except: if self.debug: lock = self._readlock() print('Existing lock detected: %s' % self.pddr(lock)) return False return self def release(self): '''Release lock, returning self''' if self.ownlock(): try: # Execute the SQL command Cursor.execute(self.releaseSql) if self.debug: print('Released lock: %s' % self.fddr()) except: print(" Error (%s): unable to release lock."%(self.releaseSql)) raise (self.DatabaseLockReleaseError, 'Error releasing lock: %s' % self.fddr()) return self def _readlock(self): '''Internal method to read lock info''' lock = {} try: Cursor.execute(self.listSql) results = Cursor.fetchall() if len(results) == 0: lock['path'], lock['host'], lock['pid'] = [ self.path, '', 0 ] elif len(results) > 1: print(' WARNING -- did not find unique result! (n=%d)'%(len(results))) lock['path'], lock['host'], lock['pid'] = [ self.path, '', 0 ] else: for row in results: lock['path'], lock['host'], lock['pid'] = row except: print(" Error (%s): unable to list locks."%(self.listSql)) lock['path'], lock['host'], lock['pid'] = [ self.path, '', 0 ] return lock def ownlock(self): '''Check if we own the lock''' lock = self._readlock() return (self.fddr() == self.pddr(lock)) def __del__(self): '''Magic method to clean up lock when program exits''' self.release()
2.34375
2
__init__.py
rowanc1/Seismogram
3
12792019
import Layers import Wavelets
0.964844
1
plugins/filter/version_filters.py
nrser/qb
1
12792020
from __future__ import (absolute_import, division, print_function) __metaclass__ = type import subprocess import os import json import sys from ansible.errors import AnsibleError import qb from qb.ipc.rpc import client as rpc_client def get_semver_path(): bin_path = os.path.join(qb.ROOT, 'node_modules', 'semver', 'bin', 'semver') if not os.path.isfile(bin_path): raise Exception("can't find semver at %s" % bin_path) return bin_path # get_semver_path() def semver_inc(version, level = None, preid = None): '''increment the version at level, with optional preid for pre- levels. runs semver --increment <level> [--preid <preid>] <version> This does **not** do what you probably want... `preid` is ignored: >>> semver_inc('1.0.0', 'patch', preid = 'dev') '1.0.1' >>> semver_inc('1.0.0', 'minor', preid = 'dev') '1.1.0' The only way to get the `preid` appended is to increment the prerelease: >>> semver_inc('1.0.0', 'prerelease', preid = 'dev') '1.0.1-dev.0' ''' cmd = [ get_semver_path(), '--increment', ] if not (level is None): cmd.append(level) if not (preid is None): cmd.append('--preid') cmd.append(preid) cmd.append(version) out = subprocess.check_output(cmd) return out.rstrip() # semver_inc() def semver_parse(version): '''parse semver. ''' stmt = ( '''console.log(JSON.stringify(require('semver')(%s), null, 2))''' % json.dumps(version) ) cmd = ['node', '--eval', stmt] out = subprocess.check_output( cmd, cwd = qb.ROOT ) version = json.loads(out) version['is_release'] = len(version['prerelease']) == 0 version['is_dev'] = ( len(version['prerelease']) > 0 and version['prerelease'][0] == 'dev' ) version['is_rc'] = ( len(version['prerelease']) > 0 and version['prerelease'][0] == 'rc' ) if version['is_release']: version['level'] = 'release' else: version['level'] = version['prerelease'][0] # depreciated name for level version['type'] = version['level'] version['release'] = "%(major)s.%(minor)s.%(patch)s" % version return version # semver_parse() def qb_version_parse(version_string): '''Parse version into QB::Package::Version ''' return rpc_client.send('QB::Package::Version', 'from', version_string) def qb_read_version(file_path): '''Read a QB::Package::Version from a file. ''' with open(file_path, 'r') as file: return qb_version_parse(file.read()) class FilterModule(object): ''' version manipulation filters ''' def filters(self): return { 'semver_inc': semver_inc, 'semver_parse': semver_parse, 'qb_version_parse': qb_version_parse, 'qb_read_version': qb_read_version, } # filters() # FilterModule # testing - call camel_case on first cli arg and print result if __name__ == '__main__': import doctest doctest.testmod()
1.96875
2
test/files/crawl_settings.py
mawentao007/reading_grab
0
12792021
GRAB_SPIDER_CONFIG = { 'global': { 'spider_modules': ['test.script_crawl'], }, }
1.179688
1
ContractorApp/migrations/0011_remove_profile_is_a_company.py
Paul-W-0/FindContractors
0
12792022
<reponame>Paul-W-0/FindContractors # Generated by Django 3.2.9 on 2021-12-01 22:00 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('ContractorApp', '0010_rename_user_type_profile_is_a_company'), ] operations = [ migrations.RemoveField( model_name='profile', name='is_a_company', ), ]
1.578125
2
sim/__main__.py
Tjorriemorrie/ufc
1
12792023
from datetime import datetime from collections import Counter, defaultdict, OrderedDict from itertools import chain from random import random import numpy as np from cma import CMAEvolutionStrategy, CMAOptions from loguru import logger from math import sqrt from sklearn.preprocessing import MinMaxScaler from sortedcontainers import SortedDict from trueskill import BETA, global_env, rate_1vs1, Rating from xgboost import XGBRegressor from .data import DATA from .data_2016 import DATA_2016 from .data_2017 import DATA_2017 from .data_2018 import DATA_2018 def win_probability(team1, team2): delta_mu = sum(r.mu for r in team1) - sum(r.mu for r in team2) sum_sigma = sum(r.sigma ** 2 for r in chain(team1, team2)) size = len(team1) + len(team2) denom = sqrt(size * (BETA * BETA) + sum_sigma) ts = global_env() return ts.cdf(delta_mu / denom) def to_decimal_odds(us_odds): if us_odds > 0: return us_odds / 100 + 1 else: return 100 / us_odds + 1 def to_implied_odds(us_odds: float) -> float: decimal_odds = to_decimal_odds(us_odds) try: return 1 / decimal_odds except ZeroDivisionError: return 1 def get_regressor(X_train, y_train, X_test=None, y_test=None, **reg_params): """get regressor""" logger.info('') logger.info('Training model...') eval_set = [(np.array(X_train), y_train)] if X_test and y_test: eval_set.append((np.array(X_test), y_test)) reg = XGBRegressor(objective='reg:squarederror', n_jobs=4, **reg_params) reg = reg.fit(X_train, y_train, eval_set=eval_set, eval_metric='auc', verbose=0) return reg def main(hyper_params, train=0): logger.info('Starting main training') all_data = DATA_2016 + DATA_2017 + DATA_2018 + DATA # estimators, learning_rate = hyper_params # gamma, max_depth, min_child_weight = hyper_params # max_delta_step, subsample, scale_pos_weight = hyper_params reg_params = { 'n_estimators': 100 if train else 1000, # 'learning_rate': 0.09426181829690375, # 0.24678854038938264 # 'gamma': 0.1860088097748791, # 0.0012826703538762253, # 'max_depth': int(round(2.1956102758009424)), # 2.5506573766936533)), # 'min_child_weight': 3.5802932556001426, # 'max_delta_step': 0.10779250505931337, # 'subsample': 0.9859889452465481, # 'scale_pos_weight': 1.2283288967549404, } # bet_pred_a, bet_pred_b, bet_odds_a, bet_odds_b, bet_wnl_a, bet_wnl_b = hyper_params bet_pred_a = 1.713980438805089 # -3.55 bet_pred_b = -4.065137791049565 # -17.93 bet_odds_a = 3.122323263774503 # -12.44 bet_odds_b = 0.0837110561236318 # -16.17 bet_wnl_a = 15.100288654913749 # -3.52 # -8.01 bet_wnl_b = -10.111913271763338 # -4.96 # 2.50 # bet_ts_a, bet_ts_b, bet_tmi_a, bet_tmi_b, bet_tma_a, bet_tma_b = hyper_params bet_ts_a = -50.59979897765422 # -26.88 # -3.52 # -8.01 bet_ts_b = -69.5794588139756 # -72.60 # -3.52 # -8.01 bet_tmi_a = -45.94904856923797 bet_tmi_b = -1.128236337281963 bet_tma_a = -28.62283185173976 bet_tma_b = -26.933801584409544 # init reg = None scaler = MinMaxScaler() cutoff = int(len(all_data) * 0.6) start_date = None ratings = defaultdict(lambda: Rating()) wins_losses = defaultdict(lambda: []) early_fights = defaultdict(lambda: 0.5) last_fights = defaultdict(lambda: 0.5) X_train = [] y_train = [] X_test = [] y_test = [] payouts = [] bet_amts = [] accuracy = (0, 0) tab = [] tab_amts = [] actual = (0, 0) actual_debug = [] bet_multis = [] bet_multis_cat = [] preds_flipped = [] odds_outcomes = [] # loop through scenes for i, scene in enumerate(all_data): is_training = i < cutoff if not is_training: if not reg: start_date = datetime.strptime(scene['date'], '%Y-%m-%d') # scale scaler.partial_fit(X_train) X_train = scaler.transform(X_train) reg = get_regressor(X_train, y_train, **reg_params) logger.info('') logger.info(f'{scene["date"]} {scene["name"]}') for fight in scene['fights']: bet_size = 1 # skip if no odds: if 'odds' not in fight: continue f1 = fight['fighters'][0]['name'] f2 = fight['fighters'][1]['name'] # trueskill data f1_ts = ratings[f1].mu f1_sigma = ratings[f1].sigma f2_ts = ratings[f2].mu f2_sigma = ratings[f2].sigma f1_ts_min = f1_ts - f1_sigma * 2 f2_ts_min = f2_ts - f2_sigma * 2 f1_ts_max = f1_ts + f1_sigma * 2 f2_ts_max = f2_ts + f2_sigma * 2 # odds data f1_odds = fight['odds'][f1] f2_odds = fight['odds'][f2] if not -50 < f1_odds < 50 or not -50 < f2_odds < 50: raise ValueError(f'surely these odds are wrong? {f1_odds} {f2_odds}') win1_prob = win_probability([ratings[f1]], [ratings[f2]]) win2_prob = win_probability([ratings[f2]], [ratings[f1]]) # wins losses data f1_wins_losses = Counter(wins_losses[f1]) f1_wnl_winrate = f1_wins_losses[1] / max(1, len(wins_losses[f1])) f2_wins_losses = Counter(wins_losses[f2]) f2_wnl_winrate = f2_wins_losses[1] / max(1, len(wins_losses[f2])) fight_data = [ [ win1_prob, f1_odds, f2_odds, f1_ts, f2_ts, f1_sigma, f2_sigma, f1_ts_min - f2_ts_min, f1_ts - f2_ts, f1_ts_max - f2_ts_max, last_fights[f1], last_fights[f2], early_fights[f1], early_fights[f2], f1_wins_losses[1], f1_wins_losses[-1], f1_wnl_winrate, f2_wins_losses[1], f2_wins_losses[-1], f2_wnl_winrate, ], [ win2_prob, f2_odds, f1_odds, f2_ts, f1_ts, f2_sigma, f1_sigma, f2_ts_min - f1_ts_min, f2_ts - f1_ts, f2_ts_max - f1_ts_max, last_fights[f2], last_fights[f1], early_fights[f2], early_fights[f1], f2_wins_losses[1], f2_wins_losses[-1], f2_wnl_winrate, f1_wins_losses[1], f1_wins_losses[-1], f1_wnl_winrate, ] ] ########################################## # update data if 'winner' in fight: # get winner fw = fight['winner']['fighter'] is_win_1 = fw == f1 fl = f2 if is_win_1 else f1 if not is_win_1 and fw != f2 and fw is not None: raise ValueError(f'unknown winner {fw}') drawn = fw is None # update wins losses wins_losses[f1] += [1] wins_losses[f2] += [-1] # update fights early_fights[fw] = last_fights[fw] early_fights[fl] = last_fights[fl] last_fights[fw] = 1 last_fights[fl] = 0 # update ratings ratings[fw], ratings[fl] = rate_1vs1(ratings[fw], ratings[fl], drawn=drawn) ################################### # train if is_training: if 'winner' in fight: X_train.extend(fight_data) y_train.extend([is_win_1, not is_win_1]) ################################### # test else: scaled_fight_data = scaler.transform(fight_data) f1_pred, f2_pred = reg.predict(scaled_fight_data) ############################# # bet scaling bet_multi = 1 # pred max if f1_pred > f2_pred: f_pred = f1_pred - f2_pred else: f_pred = f2_pred - f1_pred bet_pred_multi = np.polyval([bet_pred_a, bet_pred_b], [f_pred])[0] bet_pred_multi = round(min(1, max(0, bet_pred_multi))) bet_multi += bet_pred_multi bet_multis_cat.append(f'pred:{bet_pred_multi:.0f}') # odds diff if f1_pred > f2_pred: f_odds = 1 / f1_odds - 1 / f2_odds else: f_odds = 1 / f2_odds - 1 / f1_odds bet_odds_multi = np.polyval([bet_odds_a, bet_odds_b], [f_odds])[0] bet_odds_multi = round(min(1, max(0, bet_odds_multi))) bet_multi += bet_odds_multi bet_multis_cat.append(f'odds:{bet_odds_multi:.0f}') # wins and losses if f1_pred > f2_pred: f_wnl = f1_wnl_winrate - f2_wnl_winrate else: f_wnl = f2_wnl_winrate - f1_wnl_winrate bet_wnl_multi = np.polyval([bet_wnl_a, bet_wnl_b], [f_wnl])[0] bet_wnl_multi = round(min(1, max(0, bet_wnl_multi))) bet_multi += bet_wnl_multi bet_multis_cat.append(f'wnl:{bet_wnl_multi:.0f}') # trueskill mu if f1_pred > f2_pred: f_ts = f1_ts - f2_ts else: f_ts = f2_ts - f1_ts bet_ts_multi = np.polyval([bet_ts_a, bet_ts_b], [f_ts])[0] bet_ts_multi = round(min(1, max(0, bet_ts_multi))) bet_multi += bet_ts_multi bet_multis_cat.append(f'ts:{bet_ts_multi:.0f}') # trueskill min if f1_pred > f2_pred: f_ts_min = f1_ts_min - f2_ts_min else: f_ts_min = f2_ts_min - f1_ts_min bet_tmi_multi = np.polyval([bet_tmi_a, bet_tmi_b], [f_ts_min])[0] bet_tmi_multi = round(min(1, max(0, bet_tmi_multi))) bet_multi += bet_tmi_multi bet_multis_cat.append(f'tmi:{bet_tmi_multi:.0f}') # trueskill max if f1_pred > f2_pred: f_ts_max = f1_ts_max - f2_ts_max else: f_ts_max = f2_ts_max - f1_ts_max bet_tma_multi = np.polyval([bet_tma_a, bet_tma_b], [f_ts_max])[0] bet_tma_multi = round(min(1, max(0, bet_tma_multi))) bet_multi += bet_tma_multi bet_multis_cat.append(f'tma:{bet_tma_multi:.0f}') bet_size *= round(bet_multi) bet_amt = round(bet_size * bet_multi) assert bet_amt >= 1, f'bet multi is fucked: {bet_multi}' bet_amts.append(bet_size) bet_multis.append(int(round(bet_multi))) ############################# # prediction made if 'prediction' in fight and fight['prediction'] is None: if f1_pred > f2_pred: exp_winner = f1 pred_exp_winner = f1_pred exp_loser = f2 pred_exp_loser = f2_pred else: exp_winner = f2 pred_exp_winner = f2_pred exp_loser = f1 pred_exp_loser = f1_pred logger.warning(f'[{pred_exp_winner * 100:.0f}% vs {pred_exp_loser * 100:.0f}%] Bet x{bet_multi} on {exp_winner} to beat {exp_loser} [{ratings[exp_winner].mu:.0f} vs {ratings[exp_loser].mu:.0f}]') continue # good luck with your bets elif 'winner' not in fight: logger.warning(f'Pending {f1} vs {f2}') continue if is_win_1: fw_pred = f1_pred fl_pred = f2_pred else: fw_pred = f2_pred fl_pred = f1_pred # add test data X_test.extend(scaled_fight_data) y_test.extend([is_win_1, not is_win_1]) # testing outcome correct = 0 payout = -bet_size if is_win_1 and f1_pred > f2_pred: correct = 1 payout += f1_odds * bet_size elif not is_win_1 and f2_pred > f1_pred: correct = 1 payout += f2_odds * bet_size odds_outcomes.append(int((f1_odds < f2_odds and is_win_1) or (f2_odds > f1_odds and not is_win_1))) payouts.append(round(payout, 2)) accuracy = (accuracy[0] + correct, accuracy[1] + 1) # actual outcome pred_flipped = False if 'bet' in fight: is_actual_correct = fight['prediction'] == fw actual = (actual[0] + is_actual_correct, actual[1] + 1) cash = -fight['bet'] if is_actual_correct: fw_odds = f1_odds if is_win_1 else f2_odds cash += fw_odds * fight['bet'] else: fw_odds = f2_odds if is_win_1 else f1_odds tab.append(round(cash, 2)) tab_amts.append(fight['bet']) # pred flipped? pred_flipped = (f1_pred > f2_pred and fight['prediction'] != f1) or ( f2_pred > f1_pred and fight['prediction'] != f2) actual_debug.append(f'${fight["bet"]} {fw_odds:.2f}: {cash:.2f} {fight["prediction"]} {fight["date"]}') preds_flipped.append(int(pred_flipped)) log_balance = f'{"!!" if pred_flipped else " "}[{sum(payouts):.0f}|{payout:.0f}]' log_pred = f'[{fw_pred * 100:.0f}% vs {fl_pred * 100:.0f}%]' log_fight = f'x{bet_multi} {fw} {fight["winner"]["by"]} {fl}' log_ratings = f'[{ratings[fw].mu:.0f} vs {ratings[fl].mu:.0f}]' logger.info(f'{log_balance} {log_pred} {log_fight} {log_ratings}') if train: total_payouts = sum(payouts) roi = total_payouts / sum(bet_amts) res = -roi - (total_payouts / 5000) print(f'Score: {-res*100:.2f} ROI {roi * 100:.1f}% Profit ${total_payouts:.0f}') return res else: summary(reg, accuracy, payouts, start_date, bet_amts, bet_multis, bet_multis_cat, actual, tab, tab_amts, odds_outcomes) def summary(reg, accuracy, payouts, start_date, bet_amts, bet_multis, bet_multis_cat, actual, tab, tab_amts, odds_outcomes): logger.info('') logger.info('Tree info:') # reg = get_regressor(X_train, y_train, X_test, y_test, estimators=estimators, max_depth=max_depth) reg_score = reg.evals_result() params = reg.get_params() logger.info(f'Num estimators: {params["n_estimators"]}') logger.info(f'Learning rate: {params["learning_rate"]:.2f}') logger.info(f'Max depth: {params["max_depth"]}') logger.info(f'Accuracy: training={reg_score["validation_0"]["auc"][-1]*100:.0f}%') feature_names = [ 'win%', 'odds', '~odds', 'ts', '~ts', 'sigma', '~sigma', 'ts_min_diff', 'ts_diff', 'ts_max_diff', 'last', '~last', 'early', '~early', 'wins', '~wins', 'losses', '~losses', 'winrate', '~winrate', ] assert len(feature_names) == len(reg.feature_importances_), f'{len(feature_names)} features vs {len(reg.feature_importances_)} reg values' logger.info('') logger.info(f'Features:') features = SortedDict({v: k for k, v in zip(feature_names, reg.feature_importances_)}) for k in features.keys(): logger.info(f'{features[k]}: {k*1000:.0f}') continue if accuracy[1]: payouts = np.array(payouts) logger.info('') logger.info('Testing:') odds_acc = sum([t for t in odds_outcomes if t > 0]) / len(odds_outcomes) logger.info(f'Accuracy {accuracy[0]}/{accuracy[1]} = {accuracy[0]/accuracy[1]*100:.1f}% Odds: {odds_acc*100:.1f}%') logger.info(f'ROI {sum(payouts) / sum(bet_amts) * 100:.1f}% Profit ${sum(payouts):.0f}') days = (datetime.now() - start_date).days logger.info(f'Profit: per day: ${sum(payouts) / days:.2f} per bet ${payouts.mean():.2f}') logger.info(f'Common multis: {Counter(bet_multis).most_common(4)}') logger.info(f'cat multis: {Counter(bet_multis_cat).most_common()}') if actual[1]: tab = np.array(tab) logger.info('') logger.info('Actual:') logger.info(f'Accuracy {actual[0]}/{actual[1]} = {actual[0]/actual[1] * 100:.1f}%') logger.info(f'ROI {sum(tab) / sum(tab_amts) * 100:.2f}% Profit ${sum(tab):.0f}') days = (datetime.now() - datetime(2019, 7, 13)).days logger.info(f'Profit: per day: ${sum(tab) / days:.2f} per bet ${tab.mean():.2f}') sheet = -62.62 if abs(sum(tab) - sheet) > 0.01: for l in actual_debug: logger.warning(l) logger.error(f'debug! {sheet:.2f} != {sum(tab):.2f} diff {sum(tab) - sheet:.2f}') def run(): train = 0 names = [ # 'bet_pred_a', 'bet_pred_b', 'bet_odds_a', 'bet_odds_b', 'bet_wnl_a', 'bet_wnl_b', 'bet_ts_a', 'bet_ts_b', 'bet_tmi_a', 'bet_tmi_b', 'bet_tma_a', 'bet_tma_b', ] params = [ 0, 0, 0, 0, 0, 0 ] bounds = [[-np.inf], [np.inf]] assert len(params) == len(names) # assert len(params) == len(bounds[0]) if train: sigma = 1 opts = CMAOptions() # opts['tolx'] = 1E-2 opts['bounds'] = bounds es = CMAEvolutionStrategy(params, sigma, inopts=opts) while not es.stop(): solutions = es.ask() fitness = [main(x, train=1) for x in solutions] es.tell(solutions, fitness) es.disp() print(list(es.result[0])) print(list(es.result[5])) es.result_pretty() print('') print('best') print(list(es.result[0])) print('') print('xfavorite: distribution mean in "phenotype" space, to be considered as current best estimate of the optimum') print(list(es.result[5])) else: main(params) if __name__ == '__main__': run()
2.09375
2
kron.py
seangilleran/kron
0
12792024
from flask_script import Manager from kron import Kron, db, restore_from_file from kron import Tag, Post, Archive, Box, Document, Person, Topic app = Kron(__name__) manager = Manager(app) @manager.command def restore(file): restore_from_file(file) @manager.shell def _make_shell_context(): return dict( app=app, db=db, Tag=Tag, Post=Post, Archive=Archive, Box=Box, Document=Document, Person=Person, Topic=Topic ) if __name__ == "__main__": manager.run()
2.140625
2
Corpus_Olusturma/gezi_web_siteleri.py
Halil-ibrahim-GUNBULAK/IMAGEPROCESSORS
4
12792025
import requests from bs4 import BeautifulSoup linklertoplam=[] # öncelikle haber sitesinin page şeklinde olması önemli # burda linkleri alıp link listesi oluşturuyoruz #html bilenler bilir a href kısmında bizim linklerimiz bulunmakta fakat sitedeki her link işimize yaramıyor # bu yüzden öncelikle göslem yapmanız geriyor ben yaptığım gözlemle birlikte işimize yarayan linkler #https://play.google.com/store/apps/details?id=hurriyet.mobil.android&hl=tr && /seyahat/konu/gezgin/?p= linkleri arasında olduğunu keşfettim # bu yüzden buna uygun bir kod ile linkeri aldım # oluşan korpusunu düzenlemeniz için korpus düzenleme py de bırakıyorum bu metninizdeki noktalama işaretlerini kaldıracak bir program örnek vermek gerekirse # Fenerbahçe'nin takım otobüsü. cümlesini size Fenerbahçe takım otobüsü olarak dönderecek bu da size Fenerbahçe ve otobüsü kelimesini kazandıracak # diğer türlü modelinizin bu kelimeleri tanıması için çokca otobüsü. ve Fenerbahçe'nin kelimelerinin çokca geçmesi gerekir bu şekilde özel isimler ve noktalama işaretlerinden kurtuluyoruz for p in range(2,49): r=requests.get("https://www.hurriyet.com.tr/seyahat/konu/gezgin/".format(p)) soup=BeautifulSoup(r.content) linkler=soup.find_all("a") linklist=[] list_min=0 list_max=0 counter=0 counter2=1 for link in linkler: s=link.get("href") linklist.append(s) counter=counter+1 str_s=str(s) if str_s[:]=="https://play.google.com/store/apps/details?id=hurriyet.mobil.android&hl=tr": print(counter) list_min = counter if str_s[0:24]=='/seyahat/konu/gezgin/?p=' and counter2==1: counter2=counter2+1 print(counter) list_max = counter for i in range(list_min,list_max-1): linklertoplam.append(linklist[i]) print(len(linklertoplam)) dosya= open('turklink.txt', 'a', encoding='utf8') for d in range(len(linklertoplam)): dosya.write('https://www.hurriyet.com.tr'+str(linklertoplam[d])+'\n')
2.203125
2
src/library/utils/transforms/param_copy.py
inessus/ai-skills
5
12792026
import copy def transfer_weights(model_from, model_to): """ 实现了从model_from到model to的相同网络参数的拷贝。 复制一个目标参数,把没有的都填上,就可以使用了, :param model_from: :param model_to: :return: """ wf = copy.deepcopy(model_from.state_dict()) wt = model_to.state_dict() for k in wt.keys() : if (not k in wf) | (k == 'fc.weight') | (k == 'fc.bias'): # 例如两个都有fc层,那么要求fc层的参数是一样的 就是参数维度不匹配啦。 wf[k] = wt[k] model_to.load_state_dict(wf)
2.78125
3
code/crawler/src/get_raw_arxiv.py
vipulraheja/IteraTeR
11
12792027
import os import time import json import arxiv def get_paper_list(query="cat:cs.CL", domain='cscl', latest_year=2021, end_year=2010, max_results=1000): outs = {} year = latest_year i = 0 while year > end_year: print(f"Results {i} - {i+max_results}:") result = arxiv.query(query = query, start = i, sort_by = 'submittedDate', sort_order = 'descending', max_results = max_results) new_papers = 0 for paper in result: arxiv_id = paper.id.split('/')[-1] N = int(arxiv_id[-1]) if '.' in arxiv_id and N > 1: arxiv_id = arxiv_id.replace(f'v{N}', '') print(arxiv_id) new_papers += 1 year = int(paper.updated[:4]) if arxiv_id not in outs.keys(): outs[arxiv_id] = [N] else: outs[arxiv_id].append(N) i += max_results time.sleep(3) print(year) if new_papers == 0: break with open(f'../data/arxiv/list/{domain}_list_{len(outs)}.json', 'w') as json_file: json.dump(outs, json_file, indent=2) return outs def generate_json_file(preprint_list, tmp_file_path, domain): with open(f'{tmp_file_path}/raw_revisions_{domain}.json', 'a') as json_file: for ID in preprint_list.keys(): max_ver = max(preprint_list[ID]) for i in range(1, max_ver): print(ID) preprint_v1 = ID+f'v{i}' preprint_v2 = ID+f'v{i+1}' papers = arxiv.query(query="", id_list=[preprint_v1,preprint_v2], max_results=2) try: source_abs = papers[0].summary target_abs = papers[1].summary except: print(f'Fail to get paper {ID}!!!') continue tmp = { "arxiv_id": ID, "before_version": i, "after_version": i+1, "before_raw_txt": source_abs, "after_raw_txt": target_abs, } time.sleep(3) json_file.write(json.dumps(tmp)+'\n') if __name__ == '__main__': tmp_path = '../data/arxiv' tmp_list_path = '../data/arxiv/list' tmp_file_path = '../data/arxiv/raw' if not os.path.isdir(tmp_path): os.mkdir(tmp_path) if not os.path.isdir(tmp_list_path): os.mkdir(tmp_list_path) if not os.path.isdir(tmp_file_path): os.mkdir(tmp_file_path) # get raw paper id list (paper version >= 2) cates = ['econ.EM', 'econ.GN', 'econ.TH'] cates += ['q-fin.CP', 'q-fin.EC', 'q-fin.GN', 'q-fin.MF', 'q-fin.PM', 'q-fin.PR', 'q-fin.RM', 'q-fin.ST', 'q-fin.TR'] cates += ['q-bio.BM', 'q-bio.CB', 'q-bio.GN', 'q-bio.MN', 'q-bio.NC', 'q-bio.OT', 'q-bio.PE', 'q-bio.QM', 'q-bio.SC', 'q-bio.TO'] cates += ['cs.AI', 'cs.CC', 'cs.CE', 'cs.CG', 'cs.GT', 'cs.CV', 'cs.CY', 'cs.CR', 'cs.DS', 'cs.DB', 'cs.DL', 'cs.DM', 'cs.DC', 'cs.ET', 'cs.FL', 'cs.GL', 'cs.GR', 'cs.AR', 'cs.HC', 'cs.IR', 'cs.IT', 'cs.LO', 'cs.LG', 'cs.MS', 'cs.MA', 'cs.MM', 'cs.NI', 'cs.NE', 'cs.NA', 'cs.OS', 'cs.OH', 'cs.PF', 'cs.PL', 'cs.RO', 'cs.SI', 'cs.SE', 'cs.SD', 'cs.SC', 'cs.SY'] for cate in cates: preprint_list = get_paper_list(query=f"cat:{cate}", domain=f'{cate}', latest_year=2021, end_year=1900, max_results=1000) # extract paper abstract by paper id files = os.listdir(tmp_list_path) for fname in files: if fname == '.DS_Store': continue domain = fname.split('_')[0] print(domain) with open(f'{tmp_list_path}/{fname}', 'r') as f: preprint_list = json.load(f) outs = generate_json_file(preprint_list, tmp_file_path, domain)
2.6875
3
examples/casvlm1_avl_plane_SAND.py
KikeM/AeroSandbox
0
12792028
import copy from aerosandbox import * opti = cas.Opti() # Initialize an optimization environment def variable(init_val, lb=None, ub=None): """ Initialize attrib_name scalar design variable. :param init_val: Initial guess :param lb: Optional lower bound :param ub: Optional upper bound :return: The created variable """ var = opti.variable() opti.set_initial(var, init_val) if lb is not None: opti.subject_to(var >= lb) if ub is not None: opti.subject_to(var <= ub) return var def quasi_variable(val): """ Initialize attrib_name scalar design variable. :param init_val: Initial guess :param lb: Optional lower bound :param ub: Optional upper bound :return: The created variable """ var = opti.variable() opti.set_initial(var, val) opti.subject_to(var == val) return var airplane = Airplane( name="AVL's plane.avl", x_ref=0.02463, # CG location y_ref=0, # CG location z_ref=0.2239, # CG location s_ref=12, c_ref=1, b_ref=15, wings=[ Wing( name="Main Wing", x_le=0, # Coordinates of the wing's leading edge y_le=0, # Coordinates of the wing's leading edge z_le=0, # Coordinates of the wing's leading edge symmetric=True, chordwise_panels=1, xsecs=[ # The wing's cross ("X") sections WingXSec( # Root x_le=-0.25, # Coordinates of the XSec's leading edge, relative to the wing's leading edge. y_le=0, # Coordinates of the XSec's leading edge, relative to the wing's leading edge. z_le=0, # Coordinates of the XSec's leading edge, relative to the wing's leading edge. chord=1, # 0.18, twist=4, # variable(0,-10,10), # degrees airfoil=Airfoil(name="naca0012"), control_surface_type='symmetric_problem', # Flap # Control surfaces are applied between attrib_name given XSec and the next one. control_surface_deflection=0, # degrees control_surface_hinge_point=0.75, # as chord fraction spanwise_panels=16, ), WingXSec( # Mid x_le=-0.175, y_le=7.5, z_le=0.5, chord=0.7, # 0.16, twist=4, # variable(0,-10,10), airfoil=Airfoil(name="naca0012"), control_surface_type='asymmetric', # Aileron control_surface_deflection=0, control_surface_hinge_point=0.75 ), # WingXSec( # Tip # x_c=0.08,#variable(0.08, 0, 0.16), # y_c=1,#variable(1, 0.5, 1.25), # z_c=0.1,#variable(0.1, 0, 0.2), # chord=variable(0.08,0,1),#0.08,#variable(0.08, 0.01, 1), # twist=0,#variable(0,-10,10), # airfoil=Airfoil(name="naca4412"), # ) ] ), Wing( name="Horizontal Stabilizer", x_le=6, y_le=0, z_le=0.5, symmetric=True, chordwise_panels=1, xsecs=[ WingXSec( # root x_le=-0.1, y_le=0, z_le=0, chord=0.4, twist=variable(0, -60, 60), airfoil=Airfoil(name="naca0012"), control_surface_type='symmetric_problem', # Elevator control_surface_deflection=0, control_surface_hinge_point=0.75, spanwise_panels=10 ), WingXSec( # tip x_le=-0.075, y_le=2, z_le=0, chord=0.3, twist=variable(0, -60, 60), airfoil=Airfoil(name="naca0012") ) ] ), Wing( name="Vertical Stabilizer", x_le=6, y_le=0, z_le=0.5, symmetric=False, chordwise_panels=1, xsecs=[ WingXSec( x_le=-0.1, y_le=0, z_le=0, chord=0.4, twist=0, airfoil=Airfoil(name="naca0012"), control_surface_type='symmetric_problem', # Rudder control_surface_deflection=0, control_surface_hinge_point=0.75, spanwise_panels=10 ), WingXSec( x_le=-0.075, y_le=0, z_le=1, chord=0.3, twist=0, airfoil=Airfoil(name="naca0012") ) ] ) ] ) # airplane.set_paneling_everywhere(6, 10) ap = Casvlm1( # Set up the AeroProblem airplane=airplane, op_point=OperatingPoint( velocity=65, density=0.002377, alpha=variable(0), beta=quasi_variable(0), p=quasi_variable(0), q=quasi_variable(0), r=quasi_variable(0), ), opti=opti ) # Set up the VLM optimization submatrix ap.setup() # Extra constraints # Trim constraint opti.subject_to([ ap.CL == 0.390510, ap.airplane.wings[1].xsecs[0].twist == ap.airplane.wings[1].xsecs[1].twist, ap.Cm == 0, # -ap.force_total_inviscid_wind[2] == 9.81 * 0.5, # # ap.CY == 0, # # ap.Cl == 0, # ap.Cm == 0, # # ap.Cn == 0, ]) # Cmalpha constraint # opti.subject_to(cas.gradient(ap.Cm, ap.op_point.alpha) * 180/np.pi == -1) # Objective # opti.minimize(-ap.force_total_inviscid_wind[0]) # Solver options p_opts = {} s_opts = {} s_opts["max_iter"] = 1e6 # If you need to interrupt, just use ctrl+c # s_opts["mu_strategy"] = "adaptive" # s_opts["start_with_resto"] = "yes" # s_opts["required_infeasibility_reduction"] = 0.1 opti.solver('ipopt', p_opts, s_opts) # Solve try: sol = opti.solve() except RuntimeError: sol = opti.debug # Create solved object ap_sol = copy.deepcopy(ap) ap_sol.substitute_solution(sol) # Postprocess ap_sol.draw() # Answer you should get: (XFLR5) # CL = 0.797 # CDi = 0.017 # CL/CDi = 47.211
2.34375
2
exercicios/aula08-ex-a.py
anildoferreira/CursoPython-PyCharm
0
12792029
import math num = int(input('Digite um número para encontrar a raíz: ')) raiz = math.sqrt(num) print('A raíz quadrada de {}, é {}.'.format(num, raiz))
4.0625
4
apps/decision/templatetags/calculation.py
bahattincinic/cheers
3
12792030
from __future__ import division import json from django.template import Library register = Library() def global_weight(criterion, report): """ Formula: Global Weight = Criterion W value / Criterion Count For example: W Value = 1 Criterion Count = 5 Global Weight = 1 / 5 = 0.2 """ criterion_count = criterion['parent']['count'] data = report.criterion_compare[str(criterion['parent']['id'])] criterion_index = 0 columns = filter(lambda x: x != 'criterion_0', data['main_table'][0]) # get column index from matris for index, column in enumerate(columns): if 'criterion_%s' % criterion['id'] == column: criterion_index = index break w_value = data['w'][criterion_index] return json.dumps(round(w_value / criterion_count, 4)) @register.simple_tag def criterion_w(criterion, report, index): """ Get W value for given index. """ data = report.supplier_compare[str(criterion['id'])] return data['w'][index - 1] @register.simple_tag def calculate_supplier_score(report, index): """ Calculate supplier score for given report and index. """ total = 0 for cr_id, data in report.supplier_compare.items(): criterion = list(filter(lambda x: str(x['id']) == str(cr_id), report.get_child_criterions()))[0] w = float(data['w'][index - 1]) weight = w * float(global_weight(criterion, report)) total += weight return '%.3f' % total @register.simple_tag def get_supplier_criterion_score(report, supplier, criterion): """ Vikor Step 1 Calculation. """ result = filter( lambda x: x['criterion_id'] == str(criterion['id']) and x['supplier_id'] == str(supplier['id']), report.criterion_supplier_score) if len(result) > 0: return result[0]['score'] return 0 @register.simple_tag def get_supplier_normalized_criterion_score(report, supplier, criterion): """ Vikor Step 1 Calculation. """ result = filter( lambda x: x['criterion_id'] == str(criterion['id']) and x['supplier_id'] == str(supplier['id']), report.criterion_supplier_score) if len(result) > 0: score = int(result[0]['score']) best = best_criterion_score(report, criterion) worst = worst_criterion_score(report, criterion) result = float((best - score) / (best - worst)) return '%.3f' % result return 0 @register.simple_tag def get_supplier_weighted_criterion_score(report, supplier, criterion): """ Vikor Step 1 Calculation. """ normalized = float(get_supplier_normalized_criterion_score( report, supplier, criterion)) w = float(global_weight(criterion, report)) result = normalized * w return '%.3f' % result @register.simple_tag def best_criterion_score(report, criterion): """ Vikor Step 1 Calculation. """ max_score = 0 for item in report.criterion_supplier_score: if item['criterion_id'] == str(criterion['id']) and \ int(item['score']) > max_score: max_score = int(item['score']) return max_score @register.simple_tag def get_si_value(report, supplier): """ Vikor Step 1 Calculation. """ total = 0 for criterion in report.get_child_criterions(): total += float(get_supplier_weighted_criterion_score( report, supplier, criterion)) return '%.4f' % total @register.simple_tag def get_min_si_value(report): """ Vikor Step 1 Calculation. """ min_value = 0 for supplier in report.suppliers: value = float(get_si_value(report, supplier)) if min_value == 0 or value < min_value: min_value = value return '%.4f' % min_value @register.simple_tag def get_max_si_value(report): """ Vikor Step 1 Calculation. """ max_value = 0 for supplier in report.suppliers: value = float(get_si_value(report, supplier)) if value > max_value: max_value = value return '%.4f' % max_value @register.simple_tag def get_min_ri_value(report): """ Vikor Step 1 Calculation. """ min_value = 0 for supplier in report.suppliers: value = float(get_ri_value(report, supplier)) if min_value == 0 or value < min_value: min_value = value return '%.4f' % min_value @register.simple_tag def get_max_ri_value(report): """ Vikor Step 1 Calculation. """ max_value = 0 for supplier in report.suppliers: value = float(get_ri_value(report, supplier)) if value > max_value: max_value = value return '%.4f' % max_value @register.simple_tag def get_ri_value(report, supplier): """ Vikor Step 1 Calculation. """ max_value = 0 for criterion in report.get_child_criterions(): score = float(get_supplier_weighted_criterion_score( report, supplier, criterion)) if score > max_value: max_value = score return '%.4f' % max_value @register.simple_tag def get_qi_value(report, supplier, weight, min_si, max_si, min_ri, max_ri, si, ri): """ Vikor Step 1 Calculation. """ si = float(si) ri = float(ri) min_si = float(min_si) max_si = float(max_si) min_ri = float(min_ri) max_ri = float(max_ri) total = ((weight * (si - min_si)) / (max_si - min_si)) + \ (((1 - weight) * (ri - min_ri)) / (max_ri - min_ri)) return '%.4f' % total @register.simple_tag def worst_criterion_score(report, criterion): """ Vikor Step 1 Calculation. """ min_score = 0 for item in report.criterion_supplier_score: if item['criterion_id'] == str(criterion['id']) and \ (min_score == 0 or int(item['score']) < min_score): min_score = int(item['score']) return min_score register.filter('global_weight', global_weight)
2.125
2
src/triggers/recommendation_trigger.py
jherrerotardon/spies
0
12792031
<filename>src/triggers/recommendation_trigger.py from pyframework.triggers.abstract_trigger import AbstractTrigger from src.commands.fire.base_fire import Event class RecommendationTrigger(AbstractTrigger): ACTION_KEY_PREFIX = AbstractTrigger.ACTION_KEY_PREFIX + ':' + 'download' EVENT_TASK = Event.RECOMMENDATION_DOWNLOAD_TASK.value EVENT_ACTION = Event.RECOMMENDATION_DOWNLOAD_ACTION.value
1.695313
2
tests/test_show.py
SickChill/libtvdb
1
12792032
"""Test searching for shows.""" import datetime from tests.context import BaseTVDBTest from libtvdb.model.enums import AirDay, ShowStatus class ShowTestSuite(BaseTVDBTest): """Show test cases.""" def test_show_parse(self): """Test that a show is parsed as we'd expect.""" show = self.client().show_info(73739) self.assertEqual(show.added, None, f"'{show.added}' was not equal to expected added 'None'") self.assertEqual(show.added_by, None, f"'{show.added_by}' was not equal to expected added by 'None'") self.assertEqual(show.air_day, AirDay.tuesday, f"'{show.air_day}' was not equal to expected air_day '{AirDay.tuesday}'") self.assertEqual(show.air_time, '9:00 PM', f"'{show.air_time}' was not equal to expected air time '9:00 PM'") self.assertEqual(show.aliases, ['Lost: Missing Pieces'], f"'{show.aliases}' was not equal to expected aliases '{['Lost: Missing Pieces']}'") self.assertEqual(show.banner, 'graphical/73739-g4.jpg', f"'{show.banner}' was not equal to expected banner 'graphical/73739-g4.jpg") self.assertEqual(show.first_aired, datetime.date(2004, 9, 22), f"'{show.first_aired}' was not equal to expected first_aired '{datetime.date(2004, 9, 22)}'") self.assertEqual(show.genres, ['Action', 'Adventure', 'Drama', 'Science-Fiction'], f"'{show.genres}' was not equal to expected genres '{['Action', 'Adventure', 'Drama', 'Science-Fiction']}'") self.assertEqual(show.identifier, 73739, f"'{show.identifier}' was not equal to expected identifier '73739'") self.assertEqual(show.imdb_id, 'tt0411008', f"'{show.imdb_id}' was not equal to expected imdb_id 'tt0411008'") self.assertEqual(show.name, 'Lost', f"'{show.name}' was not equal to expected name Lost'") self.assertEqual(show.network, 'ABC (US)', f"'{show.network}' was not equal to expected network 'ABC (US)'") self.assertEqual(show.network_identifier, '', f"'{show.network_identifier}' was not equal to expected network_identifier ''") self.assertEqual(show.rating, 'TV-14', f"'{show.rating}' was not equal to expected rating 'TV-14'") self.assertEqual(show.runtime, '45', f"'{show.runtime}' was not equal to expected runtime '45'") self.assertEqual(show.series_identifier, '24313', f"'{show.series_identifier}' was not equal to expected series_identifier '24313'") self.assertEqual(show.site_rating, 9.1, f"'{show.site_rating}' was not equal to expected site_rating '9.1'") self.assertEqual(show.site_rating_count, 768, f"'{show.site_rating_count}' was not equal to expected site_rating_count '768'") self.assertEqual(show.slug, 'lost', f"'{show.slug}' was not equal to expected slug 'lost") self.assertEqual(show.status, ShowStatus.ended, f"'{show.status}' was not equal to expected status '{ShowStatus.ended}'") self.assertEqual(show.zap2it_id, 'SH672362', f"'{show.zap2it_id}' was not equal to expected zap2it_id 'SH672362'") self.assertGreaterEqual( show.last_updated, datetime.datetime(2018, 11, 23, 0, 28, 59), f"'{show.last_updated}' was not greater or equal to expected last_updated '{datetime.datetime(2018, 11, 23, 0, 28, 59)}'" ) #pylint: disable=line-too-long self.assertEqual(show.overview, 'After their plane, Oceanic Air flight 815, tore apart whilst thousands of miles off course, the survivors find themselves on a mysterious deserted island where they soon find out they are not alone.', f"'{show.overview}' was not equal to expected overview 'After their plane, Oceanic Air flight 815, tore apart whilst thousands of miles off course, the survivors find themselves on a mysterious deserted island where they soon find out they are not alone.'") #pylint: enable=line-too-long
2.703125
3
dffml/source/memory.py
Patil2099/dffml
0
12792033
<filename>dffml/source/memory.py<gh_stars>0 # SPDX-License-Identifier: MIT # Copyright (c) 2019 Intel Corporation """ Fake data sources used for testing """ import asyncio from typing import Any, Dict, List, NamedTuple, AsyncIterator from ..base import BaseConfig from ..repo import Repo from .source import BaseSourceContext, BaseSource from ..util.cli.arg import Arg from ..util.entrypoint import entry_point class MemorySourceContext(BaseSourceContext): async def update(self, repo): self.parent.mem[repo.src_url] = repo async def repos(self) -> AsyncIterator[Repo]: for repo in self.parent.mem.values(): yield repo async def repo(self, src_url: str) -> Repo: return self.parent.mem.get(src_url, Repo(src_url)) class MemorySourceConfig(BaseConfig, NamedTuple): repos: List[Repo] @entry_point("memory") class MemorySource(BaseSource): """ Stores repos in a dict in memory """ CONTEXT = MemorySourceContext def __init__(self, config: BaseConfig) -> None: super().__init__(config) self.mem: Dict[str, Repo] = {} if isinstance(self.config, MemorySourceConfig): self.mem = {repo.src_url: repo for repo in self.config.repos} @classmethod def args(cls, args, *above) -> Dict[str, Arg]: cls.config_set( args, above, "keys", Arg(type=str, nargs="+", default=[]) ) return args @classmethod def config(cls, config, *above): return MemorySourceConfig( repos=list(map(Repo, cls.config_get(config, above, "keys"))) )
2.078125
2
gp_dev/core.py
matjesg/gp_dev
0
12792034
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/00_core.ipynb (unless otherwise specified). __all__ = ['ExactGPModel', 'MultitaskGPModel', 'nv_cost', 'week_of_month'] # Cell import gpytorch from math import ceil import datetime # Cell class ExactGPModel(gpytorch.models.ExactGP): def __init__(self, train_x, train_y, likelihood): super(ExactGPModel, self).__init__(train_x, train_y, likelihood) self.mean_module = gpytorch.means.ConstantMean() self.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.PeriodicKernel()) def forward(self, x): mean_x = self.mean_module(x) covar_x = self.covar_module(x) return gpytorch.distributions.MultivariateNormal(mean_x, covar_x) # Cell class MultitaskGPModel(gpytorch.models.ExactGP): def __init__(self, train_x, train_y, likelihood, num_tasks): super(MultitaskGPModel, self).__init__(train_x, train_y, likelihood) self.mean_module = gpytorch.means.MultitaskMean( gpytorch.means.ConstantMean(), num_tasks=num_tasks ) self.covar_module = gpytorch.kernels.MultitaskKernel( gpytorch.kernels.RBFKernel(), num_tasks=num_tasks, rank=1 ) def forward(self, x): mean_x = self.mean_module(x) covar_x = self.covar_module(x) return gpytorch.distributions.MultitaskMultivariateNormal(mean_x, covar_x) # Cell def nv_cost(q, y, cu, co): if q>y: cost = (q-y)*co else: cost = (y-q)*cu return cost # Cell def week_of_month(dt_str): """ Returns the week of the month for the specified date. """ dt = datetime.datetime.strptime(dt_str, '%Y-%m-%d') first_day = dt.replace(day=1) dom = dt.day adjusted_dom = dom + first_day.weekday() return int(ceil(adjusted_dom/7.0))
2.09375
2
dephell/__main__.py
OliverHofkens/dephell
1,880
12792035
# app from .cli import entrypoint entrypoint()
1.0625
1
s06_classes_intro/exercises/book.py
silverfield/pythonsessions
0
12792036
<gh_stars>0 __author__ = 'ferrard' # --------------------------------------------------------------- # Imports # --------------------------------------------------------------- import pickle import math # --------------------------------------------------------------- # Class - Book # --------------------------------------------------------------- class Book: """Represents an "electronic book" - one can add pages, save/load to file and search for phrases""" # --------------------------------------------------------------- # Initialisation # --------------------------------------------------------------- def __init__(self): self._pages = [] self.can_do_tfidf = False self._word2number_of_pages_having_it = None self._page2word_frequency = None self._page2max_word_frequency = None # --------------------------------------------------------------- # Interface # --------------------------------------------------------------- def add_page(self, page): """Adds a new page to the book""" self._pages.append(page) self.can_do_tfidf = False def make_pages_from_textfile(self, fname, lines_per_page=20): """Opens given text file, and makes pages out of it""" with open(fname, 'r') as f: lines = f.readlines() buffer = [] for line in lines: buffer.append(line) if len(buffer) == lines_per_page: self.add_page("\n".join(buffer)) buffer.clear() if len(buffer) != 0: self.add_page("\n".join(buffer)) self.can_do_tfidf = False def search_for(self, text): """Searches for the text in all the pages, then sorts the results by TF.IDF importance of the search phrase in the page """ # find pages containing the text found_pages = [i for i in range(len(self._pages)) if text in self._pages[i]] # split the search string to words words = [word for word in self.__split_to_words(text)] # get the importance-score for each found page pages_and_scores = [] for i in found_pages: score = 0 for word in words: score += self.__tf_idf(word, i) pages_and_scores.append((i, score)) # sort by importance score pages_and_scores.sort(key=lambda x: x[1], reverse=True) print("Search phrase \"" + text + "\" located in following pages (sorted by importance): ") for pas in pages_and_scores: print("\tPage " + str(pas[0]) + " (importance = " + str(pas[1]) + ")") def save_to_file(self, fname): """Saves the book to a specified file""" with open(fname, 'wb') as f: pickle.dump(self._pages, f) def load_from_file(self, fname): """Loads the book from specified file""" with open(fname, 'rb') as f: self._pages = pickle.load(f) self.can_do_tfidf = False def print_page(self, page_number, width=120): """Prints the specified page of the book, to the console One may specify the maximum width of the page """ print('-'*width) print("| Page " + str(page_number + 1)) print('-'*width) line = "" for c in self._pages[page_number]: line += c if len(line) == width: print(line) line = "" print(line) print('-'*width) print() def print_all(self, width=120): """Prints all the pages of the book, one by one, to the console""" for i in range(len(self._pages)): self.print_page(i, width) def print_important_words(self, page_number, k=10): """Gets the top k important words for given page""" words_and_importances = [] for word in self.__get_words_for_page(page_number): words_and_importances.append((word, self.__tf_idf(word, page_number))) words_and_importances.sort(key=lambda x: x[1], reverse=True) print("Most important words on page " + str(page_number + 1)) for wai in words_and_importances[:k]: print("\t" + wai[0] + " (importance = " + str(wai[1]) + ")") # --------------------------------------------------------------- # Implementation # --------------------------------------------------------------- def __tf_idf(self, word, page_index): """Computes the TF.IDF for given word on given page""" if not self.can_do_tfidf: self.__compute_stats_for_tfidf() return self.__tf(word, page_index)*self.__idf(word) def __tf(self, word, page_index): """Computes the TF for given word""" if not self.can_do_tfidf: self.__compute_stats_for_tfidf() f = 0 if word not in self._page2word_frequency[page_index] else self._page2word_frequency[page_index][word] return f/self._page2max_word_frequency[page_index] def __idf(self, word): """Computes the IDF for given word""" if not self.can_do_tfidf: self.__compute_stats_for_tfidf() n_word = 0 if word not in self._word2number_of_pages_having_it else self._word2number_of_pages_having_it[word] return math.log(len(self._pages)/(1 + n_word)) def __get_words_for_page(self, page_index): """Gets the set of words found in the given page""" if not self.can_do_tfidf: self.__compute_stats_for_tfidf() return self._page2word_frequency[page_index].keys() @staticmethod def __filter_to_letters(word): """Returns the word filtered to contain letters only""" filtered = "" for c in word: if not c.isalpha(): continue filtered += c return filtered def __split_to_words(self, s): """Splits the string s to words (in a simplistic manner used in this class) and iterates through them""" for word in s.split(' '): # skip empty words word = self.__filter_to_letters(word) word = word.lower() if len(word.strip()) == 0: continue yield word def __compute_stats_for_tfidf(self): """Computes stats necessary for computation of TF.IDF""" # get the necessary stats on the words word2number_of_pages_having_it = {} # e.g. 'word' -> '15' page2word_frequency = {} # e.g. 47 -> ('word' -> 2) for i in range(len(self._pages)): page = self._pages[i] page2word_frequency[i] = {} for word in self.__split_to_words(page): # mark the occurence of the word for this page first_occurence = False if word not in page2word_frequency[i]: page2word_frequency[i][word] = 0 first_occurence = True page2word_frequency[i][word] += 1 # if this is the first time we see this word for this page, we also increase the # of pages having it if first_occurence: if word not in word2number_of_pages_having_it: word2number_of_pages_having_it[word] = 0 word2number_of_pages_having_it[word] += 1 # get max-word frequency for each page page2max_word_frequency = {} # e.g. 47 -> 17 for i in page2word_frequency: page2max_word_frequency[i] = max(page2word_frequency[i].values()) self._word2number_of_pages_having_it = word2number_of_pages_having_it self._page2word_frequency = page2word_frequency self._page2max_word_frequency = page2max_word_frequency self.can_do_tfidf = True # --------------------------------------------------------------- # Main # --------------------------------------------------------------- def main(): b = Book() b.make_pages_from_textfile('animal_farm.txt') b.print_page(0) b.print_important_words(0) b.search_for("pig") b.print_important_words(38) b.search_for("point of view") b.print_page(10) if __name__ == '__main__': main()
2.921875
3
ocs_sample_library_preview/Security/AccessControlEntry.py
osisoft/sample-ocs-sample_libraries-python
4
12792037
<gh_stars>1-10 from enum import IntEnum import json from .AccessType import AccessType from .CommonAccessRightsEnum import CommonAccessRightsEnum from .Trustee import Trustee class AccessControlEntry(object): """OCS access control entry definition""" def __init__(self, trustee: 'Trustee' = None, access_type: 'AccessType' = None, access_rights: CommonAccessRightsEnum = None): self.Trustee = trustee self.AccessType = access_type self.AccessRights = access_rights @property def Trustee(self) -> 'Trustee': return self.__trustee @Trustee.setter def Trustee(self, value: 'Trustee'): self.__trustee = value @property def AccessType(self) -> 'AccessType': return self.__access_type @AccessType.setter def AccessType(self, value: 'AccessType'): self.__access_type = value @property def AccessRights(self) -> CommonAccessRightsEnum: return self.__access_rights @AccessRights.setter def AccessRights(self, value: CommonAccessRightsEnum): self.__access_rights = value def toJson(self): return json.dumps(self.toDictionary()) def toDictionary(self): return {'Trustee': self.Trustee.toDictionary(), 'AccessType': self.AccessType.value, 'AccessRights': self.AccessRights.value} @staticmethod def fromJson(content: dict[str, str]): result = AccessControlEntry() if not content: return result if 'Trustee' in content: result.Trustee = Trustee.fromJson(content['Trustee']) if 'AccessType' in content and type(content['AccessType']) == int: result.AccessType = AccessType(content['AccessType']) elif 'AccessType' in content and type(content['AccessType']) == str: result.AccessType = AccessType[content['AccessType']] else: result.AccessType = AccessType.Allowed if 'AccessRights' in content: result.AccessRights = CommonAccessRightsEnum( content['AccessRights']) else: result.AccessRights = CommonAccessRightsEnum.none return result
2.578125
3
tests/odometry.py
oknuutti/visnav-py
4
12792038
import unittest import pickle import tempfile import os import math from datetime import datetime import numpy as np import quaternion import cv2 from visnav.algo.model import Camera from visnav.algo.odometry import VisualOdometry, Pose from visnav.algo import tools class TestOdometry(unittest.TestCase): def setUp(self, verbose=False): self.cam = get_cam() params = { 'min_keypoint_dist': 10, 'min_inliers': 12, 'min_2d2d_inliers': 24, } self.odo = VisualOdometry(self.cam, self.cam.width/4, verbose=verbose, pause=False, use_scale_correction=False, est_cam_pose=False, **params) def tearDown(self): pass def assertQuatAlmostEqual(self, quat0, quat1, delta=1e-4, msg=None): if quat0 is None and quat1 is None: return diff = math.degrees(tools.angle_between_q(quat0, quat1)) self.assertAlmostEqual(0, diff, delta=delta, msg=None if msg is None else (msg + ': angle[deg] %f > %f' % (diff, delta))) def assertArrayAlmostEqual(self, arr0, arr1, delta=1e-7, ord=np.inf, msg=None): if arr0 is None and arr1 is None: return norm = np.linalg.norm(np.array(arr0)-np.array(arr1), ord=ord) self.assertAlmostEqual(0, norm, delta=delta, msg=None if msg is None else (msg + ': norm(%s) %f > %f' % (ord, norm, delta))) def assertPoseAlmostEqual(self, pose0: Pose, pose1: Pose, delta_v=1e-7, delta_q=1e-4, msg=None): if pose0 is None and pose1 is None: return self.assertArrayAlmostEqual(pose0.loc, pose1.loc, delta=delta_v, ord=2, msg=None if msg is None else (msg + ': loc %s vs %s'%(pose0.loc, pose1.loc))) self.assertQuatAlmostEqual(pose0.quat, pose1.quat, delta=delta_q, msg=None if msg is None else (msg + ': quat %s vs %s'%(pose0.quat, pose1.quat))) def assertOdomResultAlmostEqual(self, result0, result1): pose0, bias_sds0, scale_sd0 = result0 pose1, bias_sds1, scale_sd1 = result1 msg = '%s deviate(s) too much from the expected value(s)' self.assertPoseAlmostEqual(pose0, pose1, delta_v=0.02, delta_q=1, msg=msg%'estimated poses') self.assertArrayAlmostEqual(bias_sds0, bias_sds1, delta=0.1, ord=np.inf, msg=msg%'error estimates') self.assertAlmostEqual(scale_sd0, scale_sd1, delta=0.01, msg=msg%'scale error estimate') def test_rotating_object(self, inputs=None, results=None): pickle_file = os.path.join(os.path.dirname(__file__), 'data', 'test_rotating_object.pickle') record = inputs is not None and results is None if not record and results is None: inputs, results = self._load_recording(pickle_file) else: results = [] cam_q = quaternion.one orig_time = datetime.strptime('2020-07-01 15:42:00', '%Y-%m-%d %H:%M:%S').timestamp() for i, (img, cam_obj_v, cam_obj_q) in enumerate(inputs): time = datetime.fromtimestamp(orig_time + i*60) prior = Pose(cam_obj_v, cam_obj_q, np.ones((3,)) * 0.1, np.ones((3,)) * 0.01) res = self.odo.process(img, time, prior, cam_q) if record: results.append(res) elif 0: self.assertOdomResultAlmostEqual(results[i], res) if i > 1 and 0: self.assertIsNotNone(res[0], msg='failed to get pose estimate') self.assertPoseAlmostEqual(prior, res[0], delta_v=0.1, delta_q=10, msg='estimated pose deviates too much from the real one') if record: self._save_recording(pickle_file, inputs, results) def _save_recording(self, fname, inputs, results): tf = tempfile.NamedTemporaryFile(suffix='.png', delete=False) tf.close() for i in range(len(inputs)): cv2.imwrite(tf.name, inputs[i][0], (cv2.IMWRITE_PNG_COMPRESSION, 9)) with open(tf.name, 'br') as fh: inputs[i][0] = fh.read() os.unlink(tf.name) with open(fname, 'wb') as fh: pickle.dump((inputs, results), fh) def _load_recording(self, fname): with open(fname, 'rb') as fh: inputs, results = pickle.load(fh) tf = tempfile.NamedTemporaryFile(suffix='.png', delete=False) tf.close() for i in range(len(inputs)): with open(tf.name, 'wb') as fh: fh.write(inputs[i][0]) inputs[i][0] = cv2.imread(tf.name, cv2.IMREAD_GRAYSCALE) os.unlink(tf.name) return inputs, results def get_rot_imgs(): pass def get_cam(): common_kwargs_worst = { 'sensor_size': (2048 * 0.0022, 1944 * 0.0022), 'quantum_eff': 0.30, 'px_saturation_e': 2200, # snr_max = 20*log10(sqrt(sat_e)) dB 'lambda_min': 350e-9, 'lambda_eff': 580e-9, 'lambda_max': 800e-9, 'dark_noise_mu': 40, 'dark_noise_sd': 6.32, 'readout_noise_sd': 15, # dark_noise_sd should be sqrt(dark_noise_mu) 'emp_coef': 1, # dynamic range = 20*log10(sat_e/readout_noise)) 'exclusion_angle_x': 55, 'exclusion_angle_y': 90, } common_kwargs_best = dict(common_kwargs_worst) common_kwargs_best.update({ 'quantum_eff': 0.4, 'px_saturation_e': 3500, 'dark_noise_mu': 25, 'dark_noise_sd': 5, 'readout_noise_sd': 5, }) common_kwargs = common_kwargs_best return Camera( 2048, # width in pixels 1944, # height in pixels 7.7, # x fov in degrees (could be 6 & 5.695, 5.15 & 4.89, 7.7 & 7.309) 7.309, # y fov in degrees f_stop=5, # TODO: put better value here point_spread_fn=0.50, # ratio of brightness in center pixel scattering_coef=2e-10, # affects strength of haze/veil when sun shines on the lens **common_kwargs ) if __name__ == '__main__': import sys if len(sys.argv) > 1 and sys.argv[1] == 'record': from visnav.algo.model import SystemModel from visnav.missions.didymos import DidymosSystemModel from visnav.render.render import RenderEngine from visnav.settings import * sm = DidymosSystemModel(use_narrow_cam=False, target_primary=False, hi_res_shape_model=True) re = RenderEngine(sm.cam.width, sm.cam.height, antialias_samples=0) re.set_frustum(sm.cam.x_fov, sm.cam.y_fov, 0.05, 2) obj = sm.asteroid.real_shape_model obj_idx = re.load_object(obj) light = np.array([1, 0, -0.5]) light /= np.linalg.norm(light) cam_ast_v0 = np.array([0, 0, -sm.min_med_distance * 0.7]) cam_ast_q0 = quaternion.one dq = tools.angleaxis_to_q((math.radians(1), 0, 1, 0)) inputs = [] for i in range(60): cam_ast_v = cam_ast_v0 cam_ast_q = dq**i * cam_ast_q0 image = re.render(obj_idx, cam_ast_v, cam_ast_q, light, gamma=1.8, get_depth=False) cam_ast_cv_v = tools.q_times_v(SystemModel.cv2gl_q, cam_ast_v) cam_ast_cv_q = SystemModel.cv2gl_q * cam_ast_q * SystemModel.cv2gl_q.conj() inputs.append([image, cam_ast_cv_v, cam_ast_cv_q]) if 0: for image, _, _ in inputs: cv2.imshow('t', cv2.resize(image, None, fx=0.5, fy=0.5)) cv2.waitKey() else: t = TestOdometry() t.setUp(verbose=True) t.test_rotating_object(inputs=inputs) else: unittest.main()
2.5625
3
bot/main.py
curtonius/HitchhikerBot
0
12792039
<filename>bot/main.py import discord import os #import pynacl #import dnspython import server from discord.ext import commands from discord.utils import get import re bot = commands.Bot(command_prefix="!") TOKEN = os.getenv("DISCORD_TOKEN") replace = { "\\|3": "b", "/\\\\": "a", "ph": "f", "ck": "k", "zz": "s", "qw": "qu", "kw": "qu", "wh": "h", "uh": "a", "pe": "p", "p3": "p", "nn": "n", "🇦 ": "a", "🇧 ": "b", "🇨 ": "c", "🇩 ": "d", "🇪 ": "e", "🇫 ": "f", "🇬 ": "g", "🇭 ": "h", "🇮 ": "i", "🇯 ": "j", "🇰 ": "k", "🇱 ": "l", "🇲 ": "m", "🇳 ": "n", "🇴 ": "o", "🇵 ": "p", "🇶 ": "q", "🇷 ": "r", "🇸 ": "s", "🇹 ": "t", "🇺 ": "u", "🇻 ": "v", "🇼 ": "w", "🇽 ": "x", "🇾 ": "y", "🇿 ": "z", "¶":"p", "[@4^âäàåæáã]": "a", "[68]": "b", "[瀩]": "c", "[3êëèæ€]": "e", "[l1!|ïîìí¡¦]": "i", "[0ôöòó•Ø]": "o", "[z5$§]":"s", "[%+7]":"t", "[üûùúµ]":"u", "[ÿ¥µŸý]": "y" } bad_word = { "re+ta+rd", #r slur "ni+g+[eau]r?", #n slur "[qck]o+o+n", #c slur "we+tba+[qck]", #wetback "be+[ae]n[euar]r", #beaner "chi+n[qck]", #chinese c slur "g[yi]psy", #g slur "tra+n+y", #t slur "she+ma+le", #shemale "d[yi]ke", #d slur "fa+g", #f slur } exceptions = { "retardant", "rac+oon","cocoon","tycoon","cooncan", "puccoon","coontie", "coonskin", "coonhound", "beanery", "beaneries", "pachinko", "chinkapin", "chinquapin", "chinch", "chincapin", "vandyke", "klondike","fagin","fage" } role_type_dictionary = { 'React to this message with the notification roles you would like.\n🔴 Youtube Notifications\n🟣 Stream Notifications\n🟡 Announcement Notifications': { "🔴": 'Youtube Notifications', "🟣": 'Stream Notifications', "🟡": 'Announcement Notifications' }, 'React to this message with the gender roles you identify as.\n❤️ He/Him\n🧡 She/Her\n💛 They/Them\n💚 He/They\n💙 She/They\n💜 Name Only\n🤍 Ask for Pronouns': { "❤️": 'He/Him', "🧡": 'She/Her', "💛": 'They/Them', "💚": 'He/They', "💙": 'She/They', "💜": 'Name Only', "🤍": 'Ask for Pronouns' }, 'React to this message with a ✅ to accept being pinged by anybody at any time (Non-Notification based pings)': { "✅": 'Accept Pings' } } channels = { "🔔-assign-roles": 951692177086488626, "bot-dev": 958468714846634004 } @bot.event async def on_ready(): print('We have logged in as {0.user}'.format(bot)) @bot.event async def on_message(message): if message.author == bot.user: return str = message.content for replacer in replace.keys(): str = str.replace(replacer, replace[replacer]) matched = False match = 0 for pattern in bad_word: result = re.search(pattern, str) if result: matched = True match += 1 print("Matched with: " + pattern) for pattern in exceptions: result = re.search(pattern, str) if result and matched == True: match -= 1 print("Unmatched with: " + pattern) if matched == True and match != 0: channel = bot.get_channel(channels['bot-dev']) await channel.send(message.author.display_name + " posted in " + "Channel **" + message.channel.name + "**:\n" + message.content ) await message.delete() await bot.process_commands(message) @bot.event async def on_raw_reaction_add(payload): if payload.channel_id != channels["🔔-assign-roles"]: return channel = bot.get_channel(payload.channel_id) message = await channel.fetch_message(payload.message_id) user = payload.member emoji_check = str(payload.emoji) if message.content in role_type_dictionary.keys(): role = None role_dictionary = role_type_dictionary[message.content] if emoji_check in role_dictionary.keys(): role = get(user.guild.roles, name=role_dictionary[emoji_check]) else: await message.remove_reaction(emoji_check, user) if role != None: await user.add_roles(role) @bot.event async def on_raw_reaction_remove(payload): if payload.channel_id != channels["🔔-assign-roles"]: return channel = bot.get_channel(payload.channel_id) message = await channel.fetch_message(payload.message_id) guild = await bot.fetch_guild(payload.guild_id) user = await guild.fetch_member(payload.user_id) emoji_check = str(payload.emoji) if message.content in role_type_dictionary.keys(): role = None role_dictionary = role_type_dictionary[message.content] if emoji_check in role_dictionary.keys(): role = get(user.guild.roles, name=role_dictionary[emoji_check]) if role != None: await user.remove_roles(role) @bot.command() async def get_channel_id(ctx): channel = bot.get_channel(channels['bot-dev']) await channel.send('Channel **' + ctx.channel.name + '** ID: ' + str(ctx.channel.id)) await ctx.message.delete() @bot.command() async def prime_reactions(ctx): if ctx.message.reference == None: return ref_message = await ctx.channel.fetch_message(ctx.message.reference.message_id) await ctx.message.delete() if ref_message.content in role_type_dictionary.keys(): for emoji_check in role_type_dictionary[ref_message.content].keys(): await ref_message.add_reaction(emoji_check) server.server() bot.run(TOKEN)
2.46875
2
shepard.py
randbrown/PyWaveTools
1
12792040
<reponame>randbrown/PyWaveTools<gh_stars>1-10 """ Generate shepard tone """ import wavelib #import plotlib FREQ_A4 = 440.0 STEPS = 12.0 DURATION_PER_STEP = 1.0 # seconds TOTAL_DURATION = DURATION_PER_STEP * STEPS def shepard_glissando(times, freq_start, freq_end): # the exponential glissando sounds good, but when stitching together with play_n, # there is an audible click. perhaps due to rounding? # so for now I'm leaving this as the linear scaling to avoid the click... TODO fix it #freq = wavelib.glissando(times, freq_start, freq_end) freq = wavelib.glissando_lin(times, freq_start, freq_end) print('gliss: ', freq) vals = wavelib.shepardtone(times, freq) return vals def shepard_discrete(times, freq_start, freq_end): freq = wavelib.discrete(times, freq_start, freq_end, STEPS) print('discrete: ', freq) vals = wavelib.shepardtone(times, freq) return vals def main(): """main function""" # times is array of values at each time slot of the whole wav file times = wavelib.createtimes(TOTAL_DURATION) vals = wavelib.normalize(shepard_glissando(times, FREQ_A4*2, FREQ_A4)) #plotlib.plot_wave_and_fft(times, vals) vals = wavelib.play_n(vals, 2) wavelib.write_wave_file('output/shepard_glissando_down_2x.wav', vals) vals = wavelib.normalize(shepard_discrete(times, FREQ_A4*2, FREQ_A4)) #plotlib.plot_wave_and_fft(times, vals) vals = wavelib.play_n(vals, 2) wavelib.write_wave_file('output/shepard_discrete_down_2x.wav', vals) vals = wavelib.normalize(shepard_glissando(times, FREQ_A4, FREQ_A4*2)) #plotlib.plot_wave_and_fft(times, vals) vals = wavelib.play_n(vals, 2) wavelib.write_wave_file('output/shepard_glissando_up_2x.wav', vals) vals = wavelib.normalize(shepard_discrete(times, FREQ_A4, FREQ_A4*2)) #plotlib.plot_wave_and_fft(times, vals) vals = wavelib.play_n(vals, 2) wavelib.write_wave_file('output/shepard_discrete_up_2x.wav', vals) main()
2.703125
3
retinal-fundus/src/bsmu/retinal_fundus/app/main.py
IvanKosik/vision
2
12792041
<filename>retinal-fundus/src/bsmu/retinal_fundus/app/main.py<gh_stars>1-10 from bsmu.vision.app.base import App class RetinalFundusApp(App): pass def run_app(): print('Run, Retinal Fundus! Run!') app = RetinalFundusApp() app.run() if __name__ == '__main__': run_app()
1.382813
1
plugins/content/content_birthday.py
almazboot/sketal
43
12792042
<filename>plugins/content/content_birthday.py import datetime from handler.base_plugin import CommandPlugin from utils import plural_form, age class BirthdayPlugin(CommandPlugin): __slots__ = ("max_users_in_group", ) def __init__(self, *commands, prefixes=None, strict=False, max_users_in_group=1000): """Answers with birthday for users in group (but no more than `max_users_in_group`), for users in conference.""" if not commands: commands = ("дни рождения",) super().__init__(*commands, prefixes=prefixes, strict=strict) self.max_users_in_group = max_users_in_group self.set_description() def set_description(self): example = self.command_example() self.description = [f"Дни рождения", f"Вывод дней рождений людей в группе или в беседе.", f"{example} - показать дни кождения в конференции.", f"{example} <id группы> - показать дни рождения пользователей в группу."] async def process_message(self, msg): command, argument = self.parse_message(msg) if argument: members = [] offset = 0 while True: result = await msg.api.groups.getMembers(group_id=argument, offset=offset, fields="bdate") if not result or "items" not in result or not result["items"]: if offset == 0: return await msg.answer("Не удалось получить сообщество или оно пусто!") break members += result["items"] offset += 1000 if result["count"] > self.max_users_in_group: await msg.answer(f"Вы пытаетесь узнать дни рождения слишком многих людей!\n" f"Будут показана лишь {self.max_users_in_group} из пользователей") break message = f"Дни рождения пользователей в группе \"{argument}\" ✨:\n" else: if not msg.is_multichat: members = await msg.api.users.get(user_ids=msg.user_id, fields="bdate") message = f"Ваш день рождения ✨:\n" else: members = await msg.api.messages.getChatUsers(chat_id=msg.chat_id, fields="bdate") message = f"Дни рождения пользователей в беседе ✨:\n" data = [] now = datetime.datetime.today().date() for m in members: if "bdate" not in m or "deactivated" in m: continue try: if m['bdate'].count(".") > 1: year = True user_date = datetime.datetime.strptime(m['bdate'], '%d.%m.%Y').date() else: year = False user_date = datetime.datetime.strptime(m['bdate'], '%d.%m').date() except ValueError: continue try: check_date = user_date.replace(year=now.year) except ValueError: check_date = user_date + (datetime.date(now.year, 1, 1) - datetime.date(user_date.year, 1, 1)) difference = check_date - now if difference.days < 0: check_date = check_date.replace(year=now.year + 1) difference = check_date - now bdate_in = " (будет через " + plural_form(difference.days, ("день", "дня", "дней")) + ")" if year: bdate_in = bdate_in[:-1] + ", исполнится " + plural_form(age(user_date) + 1, ("год", "года", "лет")) + ")" data.append((" 🌍 " + m["first_name"] + " " + m["last_name"] + ": " + user_date.strftime("%d.%m") + bdate_in, difference.days)) message += "\n".join(d[0] for d in sorted(data, key=lambda x: x[1])) return await msg.answer(message)
2.65625
3
src/powerbidedicated/azext_powerbidedicated/vendored_sdks/powerbidedicated/models/_power_bi_dedicated_management_client_enums.py
Mannan2812/azure-cli-extensions
8
12792043
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from enum import Enum class SkuTier(str, Enum): pbie_azure = "PBIE_Azure" class State(str, Enum): deleting = "Deleting" succeeded = "Succeeded" failed = "Failed" paused = "Paused" suspended = "Suspended" provisioning = "Provisioning" updating = "Updating" suspending = "Suspending" pausing = "Pausing" resuming = "Resuming" preparing = "Preparing" scaling = "Scaling" class ProvisioningState(str, Enum): deleting = "Deleting" succeeded = "Succeeded" failed = "Failed" paused = "Paused" suspended = "Suspended" provisioning = "Provisioning" updating = "Updating" suspending = "Suspending" pausing = "Pausing" resuming = "Resuming" preparing = "Preparing" scaling = "Scaling"
2.0625
2
scripts/opengitdiff.py
jmettraux/dotvim
2
12792044
<gh_stars>1-10 # -*- coding: UTF-8 -*- # opengitdiff.py import sys, re, subprocess lines = subprocess\ .Popen( 'git diff -U9999999 --no-color ' + sys.argv[1], shell=True, stdout=subprocess.PIPE)\ .stdout\ .readlines() digits = str(len(str(len(lines)))) lnum = -1 for line in lines: line = line.rstrip('\r\n') if lnum > -1: if re.match(r'^[^-]', line): lnum = lnum + 1 print(('%' + digits + 'i %s') % (lnum, line)) else: print(line) m = re.match(r'^@@ [-+0-9, ]+ @@$', line) if m: lnum = 0 print() print()
2.59375
3
web_scanner/whois.py
ntnshrm87/4hathacker
0
12792045
<reponame>ntnshrm87/4hathacker<filename>web_scanner/whois.py #!/usr/bin/python import os def get_whois(url): command = "whois" + " " + url process = os.popen(command) results = str(process.read()) #print results return results
2.640625
3
setup.py
bsodhi/books_scraper
0
12792046
import setuptools with open("README.md", "r") as fh: long_description = fh.read() setuptools.setup( name="books_info-bsodhi", version="0.0.2", author="<NAME>", author_email="<EMAIL>", description="Books data scraper.", long_description="Scrapes books and articles informatio from Goodreads and Google Scholar", long_description_content_type="text/markdown", url="https://github.com/bsodhi/books_scraper", packages=setuptools.find_packages(), classifiers=[ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ], python_requires='>=3.6', install_requires=["requests", "cryptography", "pyOpenSSL", "lxml", "argparse", "beautifulsoup4", "fake_useragent", "scholarly", "selenium", ], entry_points={ 'console_scripts': [ 'bscrape=books_scraper.scraper:main', ], }, )
1.695313
2
triangulos.py
userddssilva/Exemplos-python-introducao-a-programacao
0
12792047
<filename>triangulos.py def lado_maior(a, b, c): if a > b and a > c: return a elif b > a and b > c: return b else: return c def lados_menores(a, b, c): if a > b and a > c: return b, c elif b > a and b > c: return a, c else: return a, b def eh_triangulo(a, b, c): _a = lado_maior(a, b, c) _b, _c = lados_menores(a, b, c) if _a >= _b + _c: return False else: return True def eh_equilatero(a, b, c): if a == b and b == c: return True return False def eh_isosceles(a, b, c): if (a == b) or (a == c) or (b == c): return True return False def eh_escaleno(a, b, c): if (a != b) and (a != c) and (b != c): return True return False def tipo_triangulo(a, b, c): _a = lado_maior(a, b, c) _b, _c = lados_menores(a, b, c) a, b, c = _a, _b, _c if eh_equilatero(a, b, c): return 1 elif eh_isosceles(a, b, c): return 2 elif eh_escaleno(a, b, c): return 3 ler = True while ler == True: print("Informe três valores!") lado_a = int(input()) lado_b = int(input()) lado_c = int(input()) if (lado_a < 1) or (lado_b < 1) or (lado_c < 1): print("Valor errado!") print("Insira valores maiores que 1!") else : if eh_triangulo(lado_a, lado_b, lado_c): print("É triangulo") tipo = tipo_triangulo(lado_a, lado_b, lado_c) if tipo == 1: print("Tipo Equilátero") elif tipo == 2: print("Tipo Isósceles") elif tipo == 3: print("Tipo Escaleno") else: print("Não é um triangulo") novo = input("Deseja fazer nova inserção? [S/N]: ") if (novo == "N"): print("********** fim ************") ler = False
4
4
ignore_signals.py
alex-bormotov/AXE-Bot-open
18
12792048
<gh_stars>10-100 import time from time import sleep from notification import notificator from config import get_config ignore_buy_signal_counter = 0 ignore_sell_signal_counter = 0 def ignore_buy_signal_times(signal, times): global ignore_buy_signal_counter if signal["signal"] == "BUY" and ignore_buy_signal_counter < times: notificator( "Ignore {} {} ... cooldown {} seconds".format( signal["signal"], str(ignore_buy_signal_counter + 1), get_config()["ignore_buy_cooldown_sec"], ) ) ignore_buy_signal_counter = ignore_buy_signal_counter + 1 time.sleep(int(get_config()["ignore_buy_cooldown_sec"])) return "PASS" else: notificator("Execute {}".format(signal["signal"])) ignore_buy_signal_counter = 0 return "OK" def ignore_sell_signal_times(signal, times): global ignore_sell_signal_counter if signal["signal"] == "SELL" and ignore_sell_signal_counter < times: notificator( "Ignore {} {} ... cooldown {} seconds".format( signal["signal"], str(ignore_sell_signal_counter + 1), get_config()["ignore_sell_cooldown_sec"], ) ) ignore_sell_signal_counter = ignore_sell_signal_counter + 1 time.sleep(int(get_config()["ignore_sell_cooldown_sec"])) return "PASS" else: notificator("Execute {}".format(signal["signal"])) ignore_sell_signal_counter = 0 return "OK" def ingnore_signal_time(signal, time_sec): notificator( "{} signal received, sleep {} sec ...".format(signal["signal"], time_sec) ) time.sleep(time_sec) notificator("Execute {}, after sleep {} sec ".format(signal["signal"], time_sec)) return signal
2.5625
3
perftool/__main__.py
YajanaRao/Perftool
3
12792049
<reponame>YajanaRao/Perftool import sys,os from perftool import interactive,main from os import path import sys import random import sys sys.path.insert(0, path.dirname(path.abspath(path.dirname(__file__)))) class color: HEADER = '\033[95m' IMPORTANT = '\33[35m' NOTICE = '\033[33m' OKBLUE = '\033[94m' OKGREEN = '\033[92m' WARNING = '\033[93m' RED = '\033[91m' END = '\033[0m' UNDERLINE = '\033[4m' LOGGING = '\33[34m' class welcome: color_random=[color.HEADER,color.IMPORTANT,color.NOTICE,color.OKBLUE,color.OKGREEN,color.WARNING,color.RED,color.END,color.UNDERLINE,color.LOGGING] random.shuffle(color_random) note = color_random[0] +''' ____ _____ ____ _____ _____ ___ ___ _ | _ \ | ____| | _ \ | ___| |_ _| / _ \ / _ \ | | | |_) | | _| | |_) | | |_ | | | | | | | | | | | | | __/ | |___ | _ < | _| | | | |_| | | |_| | | |___ |_| |_____| |_| \_\ |_| |_| \___/ \___/ |_____| '''+color.END inputPrompt = ">>" if __name__ == '__main__': if len(sys.argv) < 2: print(welcome.note) print("\t \t \033[0m :- Yajana.N.Rao") print("Enter 'break' to exit the console") while True: if sys.version_info[0] > 2: #print(sys.version_info) #from future.builtins import input command = input(welcome.inputPrompt) else: command = raw_input(welcome.inputPrompt) if "break" in command or "exit" in command: print("Exiting the application") break elif command == "clear": os.system('cls' if os.name == 'nt' else 'clear') else: interactive(command) else: main()
2.875
3
geodex/utils.py
developmentseed/geodex
37
12792050
""" utils.py @author: developmentseed Functions used to generate a list of tiles via recursion """ from os import path as op import json from shapely.geometry import Polygon from pygeotile.tile import Tile def _get_quadrant_tiles(tile): """Return indicies of tiles at one higher zoom (in google tiling scheme)""" ul = (tile.google[0] * 2, tile.google[1] * 2) return [Tile.from_google(ul[0], ul[1], tile.zoom + 1), # UL Tile.from_google(ul[0], ul[1] + 1, tile.zoom + 1), # LL Tile.from_google(ul[0] + 1, ul[1], tile.zoom + 1), # UR Tile.from_google(ul[0] + 1, ul[1] + 1, tile.zoom + 1)] # LR def _calc_overlap(geom1, geom2): """Return area overlap""" return geom1.intersection(geom2).area def load_geojson(geojson_fpath): """Load geojson and return all contained polygons. Parameters: ---------- geojson_fpath: str Filepath of to geojson containing boundaries. Returns: ------- bounds: list List of geometries read from geojson file.""" if not op.exists(geojson_fpath): raise FileNotFoundError('{} does not exist'.format(geojson_fpath)) if not op.splitext(geojson_fpath) not in ['.geojson', '.json']: raise ValueError('{} should be a .geojson or .json file'.format(geojson_fpath)) bounds = None with open(geojson_fpath, 'r') as geojson_f: raw_json = json.loads(geojson_f.read()) features = raw_json['features'] bounds = [feat['geometry'] for feat in features if feat['geometry']['type'] in ['Polygon', 'MultiPolygon']] return bounds def format_tile(tile, tile_format, format_str='{x} {y} {z}'): """Convert tile to necessary format. Parameters ---------- tile: pygeotile.tile.Tile Tile object to be formatted. tile_format: str Desired tile format. `google`, `tms`, or `quad_tree` format_str: str String to guide formatting. Only used for `google` or `tms` (as quad_tree is one value). Default: "{x} {y} {z}". Example: "{z}-{x}-{y}" """ if tile_format == 'google': td = {key: val for key, val in zip(['x', 'y', 'z'], list(tile.google) + [tile.zoom])} return format_str.format(**td) elif tile_format == 'tms': td = {key: val for key, val in zip(['x', 'y', 'z'], list(tile.tms) + [tile.zoom])} return format_str.format(**td) elif tile_format == 'quad_tree': return tile.quad_tree else: raise ValueError('`tile_format`: {} not recognized'.format(tile_format)) def get_overlap_child_tiles(tile, roi_geom, completely_contained=False): """Find all children tiles that overlap a boundary Parameters ---------- tile: pygeotile.tile.Tile Tile that is checked for overlap with `roi_geom`. roi_geom: shapely.geometry.shape Boundary of region-of-interest. completely_contained: bool Whether or not a tile is completely contained in the boundary. If a tile is found to have 100% overlap with boundary, set to `True` and algorithm can avoid calculating overlap for all future child tiles. Default False. Returns: ------- return_tiles: list of pygeotile.tile.Tile, bool Tiles that are children of `tile` and overlap the boundary """ return_tiles = [] quad_tiles = _get_quadrant_tiles(tile) # Compute four contained tiles # If sub-tiles are completely contained within boundary, no need to compute overlap if completely_contained: return [[qt, True] for qt in quad_tiles] # For each tile, compute overlap with ROI boundary for qt in quad_tiles: ll, ur = qt.bounds # Get lower-left and upper-right points tile_pts = ((ll[1], ll[0]), (ur[1], ll[0]), (ur[1], ur[0]), (ll[1], ur[0])) tile_polygon = Polygon(tile_pts) # Calculate overlap of tile with ROI overlap_area = _calc_overlap(roi_geom, tile_polygon) # If 100% overlap, indicate this to avoid checking overlap in future if overlap_area == tile_polygon.area: return_tiles.append([qt, True]) elif overlap_area > 0: return_tiles.append([qt, False]) return return_tiles
3.015625
3