hexsha
stringlengths
40
40
size
int64
5
2.06M
ext
stringclasses
11 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
3
251
max_stars_repo_name
stringlengths
4
130
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
sequencelengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
3
251
max_issues_repo_name
stringlengths
4
130
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
sequencelengths
1
10
max_issues_count
int64
1
116k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
3
251
max_forks_repo_name
stringlengths
4
130
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
sequencelengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
1
1.05M
avg_line_length
float64
1
1.02M
max_line_length
int64
3
1.04M
alphanum_fraction
float64
0
1
07e2537b3e43653ce0616ed6421ef634050042c8
3,085
py
Python
pysrc/classifier.py
CrackerCat/xed
428712c28e831573579b7f749db63d3a58dcdbd9
[ "Apache-2.0" ]
1,261
2016-12-16T14:29:30.000Z
2022-03-30T20:21:25.000Z
pysrc/classifier.py
CrackerCat/xed
428712c28e831573579b7f749db63d3a58dcdbd9
[ "Apache-2.0" ]
190
2016-12-17T13:44:09.000Z
2022-03-27T09:28:13.000Z
pysrc/classifier.py
CrackerCat/xed
428712c28e831573579b7f749db63d3a58dcdbd9
[ "Apache-2.0" ]
155
2016-12-16T22:17:20.000Z
2022-02-16T20:53:59.000Z
#!/usr/bin/env python # -*- python -*- #BEGIN_LEGAL # #Copyright (c) 2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # #END_LEGAL from __future__ import print_function import re import genutil import codegen
39.551282
96
0.647326
07e2d1b8a7c46e378298b64b296fe93ed48acbf5
1,828
py
Python
tests/integration/api/test_target_groups.py
lanz/Tenable.io-SDK-for-Python
e81a61c369ac103d1524b0898153a569536a131e
[ "MIT" ]
90
2017-02-02T18:36:17.000Z
2022-02-05T17:58:50.000Z
tests/integration/api/test_target_groups.py
lanz/Tenable.io-SDK-for-Python
e81a61c369ac103d1524b0898153a569536a131e
[ "MIT" ]
64
2017-02-03T00:54:00.000Z
2020-08-06T14:06:50.000Z
tests/integration/api/test_target_groups.py
lanz/Tenable.io-SDK-for-Python
e81a61c369ac103d1524b0898153a569536a131e
[ "MIT" ]
49
2017-02-03T01:01:00.000Z
2022-02-25T13:25:28.000Z
import pytest from tenable_io.api.target_groups import TargetListEditRequest from tenable_io.api.models import TargetGroup, TargetGroupList
44.585366
119
0.784464
07e312b03c9acc9dee5930a523090053e38045ca
85,560
py
Python
Installation/nnAudio/Spectrogram.py
tasercake/nnAudio
5edc37b7b73674598d533261314429b875ba285d
[ "MIT" ]
null
null
null
Installation/nnAudio/Spectrogram.py
tasercake/nnAudio
5edc37b7b73674598d533261314429b875ba285d
[ "MIT" ]
null
null
null
Installation/nnAudio/Spectrogram.py
tasercake/nnAudio
5edc37b7b73674598d533261314429b875ba285d
[ "MIT" ]
null
null
null
""" Module containing all the spectrogram classes """ # 0.2.0 import torch import torch.nn as nn from torch.nn.functional import conv1d, conv2d, fold import numpy as np from time import time from nnAudio.librosa_functions import * from nnAudio.utils import * sz_float = 4 # size of a float epsilon = 10e-8 # fudge factor for normalization ### --------------------------- Spectrogram Classes ---------------------------### # The section below is for developing purpose # Please don't use the following classes #
41.695906
172
0.593233
07e4a4e5e49ff1a01f2886f954c1382ba8822f86
9,352
py
Python
train.py
hui-won/KoBART_Project
105608997473abc669d777c588d56382efb524c6
[ "MIT" ]
13
2020-12-30T15:09:08.000Z
2022-01-02T08:11:18.000Z
train.py
hui-won/KoBART_Project
105608997473abc669d777c588d56382efb524c6
[ "MIT" ]
2
2021-11-21T11:49:31.000Z
2022-03-18T05:09:13.000Z
train.py
hui-won/KoBART_Project
105608997473abc669d777c588d56382efb524c6
[ "MIT" ]
1
2021-06-15T01:24:18.000Z
2021-06-15T01:24:18.000Z
import argparse import logging import os import numpy as np import pandas as pd import pytorch_lightning as pl import torch from pytorch_lightning import loggers as pl_loggers from torch.utils.data import DataLoader, Dataset from dataset import KoBARTSummaryDataset from transformers import BartForConditionalGeneration, PreTrainedTokenizerFast from transformers.optimization import AdamW, get_cosine_schedule_with_warmup from kobart import get_pytorch_kobart_model, get_kobart_tokenizer parser = argparse.ArgumentParser(description='KoBART translation') parser.add_argument('--checkpoint_path', type=str, help='checkpoint path') logger = logging.getLogger() logger.setLevel(logging.INFO) def train_dataloader(self): train = DataLoader(self.train, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=True) return train def val_dataloader(self): val = DataLoader(self.test, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=False) return val def test_dataloader(self): test = DataLoader(self.test, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=False) return test class Base(pl.LightningModule): class KoBARTConditionalGeneration(Base): if __name__ == '__main__': parser = Base.add_model_specific_args(parser) parser = ArgsBase.add_model_specific_args(parser) parser = KobartSummaryModule.add_model_specific_args(parser) parser = pl.Trainer.add_argparse_args(parser) args = parser.parse_args() logging.info(args) model = KoBARTConditionalGeneration(args) dm = KobartSummaryModule(args.train_file, args.test_file, None, max_len=args.max_len, batch_size=args.batch_size, num_workers=args.num_workers) checkpoint_callback = pl.callbacks.ModelCheckpoint(monitor='val_loss', dirpath=args.default_root_dir, filename='model_chp/{epoch:02d}-{val_loss:.3f}', verbose=True, save_last=True, mode='min', save_top_k=-1, prefix='kobart_translation') tb_logger = pl_loggers.TensorBoardLogger(os.path.join(args.default_root_dir, 'tb_logs')) lr_logger = pl.callbacks.LearningRateMonitor() trainer = pl.Trainer.from_argparse_args(args, logger=tb_logger, callbacks=[checkpoint_callback, lr_logger]) trainer.fit(model, dm)
39.627119
153
0.559773
07e4f4a4fe370f9d6aeaae97b9bd2ee2d9364898
11,945
py
Python
homeassistant/components/shelly/sensor.py
RavensburgOP/core
0ea76e848b182ca0ebb0fdb54558f7f733898ad7
[ "Apache-2.0" ]
1
2019-08-28T00:54:28.000Z
2019-08-28T00:54:28.000Z
homeassistant/components/shelly/sensor.py
RavensburgOP/core
0ea76e848b182ca0ebb0fdb54558f7f733898ad7
[ "Apache-2.0" ]
71
2020-07-14T09:08:56.000Z
2022-03-31T06:01:47.000Z
homeassistant/components/shelly/sensor.py
Vaarlion/core
f3de8b9f28de01abf72c0f5bb0b457eb1841f201
[ "Apache-2.0" ]
null
null
null
"""Sensor for Shelly.""" from __future__ import annotations from datetime import timedelta import logging from typing import Final, cast import aioshelly from homeassistant.components import sensor from homeassistant.components.sensor import SensorEntity from homeassistant.config_entries import ConfigEntry from homeassistant.const import ( CONCENTRATION_PARTS_PER_MILLION, DEGREE, ELECTRIC_CURRENT_AMPERE, ELECTRIC_POTENTIAL_VOLT, ENERGY_KILO_WATT_HOUR, LIGHT_LUX, PERCENTAGE, POWER_WATT, SIGNAL_STRENGTH_DECIBELS_MILLIWATT, ) from homeassistant.core import HomeAssistant from homeassistant.helpers.entity_platform import AddEntitiesCallback from homeassistant.helpers.typing import StateType from homeassistant.util import dt from . import ShellyDeviceWrapper from .const import LAST_RESET_NEVER, LAST_RESET_UPTIME, SHAIR_MAX_WORK_HOURS from .entity import ( BlockAttributeDescription, RestAttributeDescription, ShellyBlockAttributeEntity, ShellyRestAttributeEntity, ShellySleepingBlockAttributeEntity, async_setup_entry_attribute_entities, async_setup_entry_rest, ) from .utils import get_device_uptime, temperature_unit _LOGGER: Final = logging.getLogger(__name__) SENSORS: Final = { ("device", "battery"): BlockAttributeDescription( name="Battery", unit=PERCENTAGE, device_class=sensor.DEVICE_CLASS_BATTERY, state_class=sensor.STATE_CLASS_MEASUREMENT, removal_condition=lambda settings, _: settings.get("external_power") == 1, ), ("device", "deviceTemp"): BlockAttributeDescription( name="Device Temperature", unit=temperature_unit, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_TEMPERATURE, state_class=sensor.STATE_CLASS_MEASUREMENT, default_enabled=False, ), ("emeter", "current"): BlockAttributeDescription( name="Current", unit=ELECTRIC_CURRENT_AMPERE, value=lambda value: value, device_class=sensor.DEVICE_CLASS_CURRENT, state_class=sensor.STATE_CLASS_MEASUREMENT, ), ("light", "power"): BlockAttributeDescription( name="Power", unit=POWER_WATT, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_POWER, state_class=sensor.STATE_CLASS_MEASUREMENT, default_enabled=False, ), ("device", "power"): BlockAttributeDescription( name="Power", unit=POWER_WATT, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_POWER, state_class=sensor.STATE_CLASS_MEASUREMENT, ), ("emeter", "power"): BlockAttributeDescription( name="Power", unit=POWER_WATT, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_POWER, state_class=sensor.STATE_CLASS_MEASUREMENT, ), ("emeter", "voltage"): BlockAttributeDescription( name="Voltage", unit=ELECTRIC_POTENTIAL_VOLT, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_VOLTAGE, state_class=sensor.STATE_CLASS_MEASUREMENT, ), ("emeter", "powerFactor"): BlockAttributeDescription( name="Power Factor", unit=PERCENTAGE, value=lambda value: round(value * 100, 1), device_class=sensor.DEVICE_CLASS_POWER_FACTOR, state_class=sensor.STATE_CLASS_MEASUREMENT, ), ("relay", "power"): BlockAttributeDescription( name="Power", unit=POWER_WATT, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_POWER, state_class=sensor.STATE_CLASS_MEASUREMENT, ), ("roller", "rollerPower"): BlockAttributeDescription( name="Power", unit=POWER_WATT, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_POWER, state_class=sensor.STATE_CLASS_MEASUREMENT, ), ("device", "energy"): BlockAttributeDescription( name="Energy", unit=ENERGY_KILO_WATT_HOUR, value=lambda value: round(value / 60 / 1000, 2), device_class=sensor.DEVICE_CLASS_ENERGY, state_class=sensor.STATE_CLASS_MEASUREMENT, last_reset=LAST_RESET_UPTIME, ), ("emeter", "energy"): BlockAttributeDescription( name="Energy", unit=ENERGY_KILO_WATT_HOUR, value=lambda value: round(value / 1000, 2), device_class=sensor.DEVICE_CLASS_ENERGY, state_class=sensor.STATE_CLASS_MEASUREMENT, last_reset=LAST_RESET_NEVER, ), ("emeter", "energyReturned"): BlockAttributeDescription( name="Energy Returned", unit=ENERGY_KILO_WATT_HOUR, value=lambda value: round(value / 1000, 2), device_class=sensor.DEVICE_CLASS_ENERGY, state_class=sensor.STATE_CLASS_MEASUREMENT, last_reset=LAST_RESET_NEVER, ), ("light", "energy"): BlockAttributeDescription( name="Energy", unit=ENERGY_KILO_WATT_HOUR, value=lambda value: round(value / 60 / 1000, 2), device_class=sensor.DEVICE_CLASS_ENERGY, state_class=sensor.STATE_CLASS_MEASUREMENT, default_enabled=False, last_reset=LAST_RESET_UPTIME, ), ("relay", "energy"): BlockAttributeDescription( name="Energy", unit=ENERGY_KILO_WATT_HOUR, value=lambda value: round(value / 60 / 1000, 2), device_class=sensor.DEVICE_CLASS_ENERGY, state_class=sensor.STATE_CLASS_MEASUREMENT, last_reset=LAST_RESET_UPTIME, ), ("roller", "rollerEnergy"): BlockAttributeDescription( name="Energy", unit=ENERGY_KILO_WATT_HOUR, value=lambda value: round(value / 60 / 1000, 2), device_class=sensor.DEVICE_CLASS_ENERGY, state_class=sensor.STATE_CLASS_MEASUREMENT, last_reset=LAST_RESET_UPTIME, ), ("sensor", "concentration"): BlockAttributeDescription( name="Gas Concentration", unit=CONCENTRATION_PARTS_PER_MILLION, icon="mdi:gauge", state_class=sensor.STATE_CLASS_MEASUREMENT, ), ("sensor", "extTemp"): BlockAttributeDescription( name="Temperature", unit=temperature_unit, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_TEMPERATURE, state_class=sensor.STATE_CLASS_MEASUREMENT, available=lambda block: cast(bool, block.extTemp != 999), ), ("sensor", "humidity"): BlockAttributeDescription( name="Humidity", unit=PERCENTAGE, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_HUMIDITY, state_class=sensor.STATE_CLASS_MEASUREMENT, available=lambda block: cast(bool, block.extTemp != 999), ), ("sensor", "luminosity"): BlockAttributeDescription( name="Luminosity", unit=LIGHT_LUX, device_class=sensor.DEVICE_CLASS_ILLUMINANCE, state_class=sensor.STATE_CLASS_MEASUREMENT, ), ("sensor", "tilt"): BlockAttributeDescription( name="Tilt", unit=DEGREE, icon="mdi:angle-acute", state_class=sensor.STATE_CLASS_MEASUREMENT, ), ("relay", "totalWorkTime"): BlockAttributeDescription( name="Lamp Life", unit=PERCENTAGE, icon="mdi:progress-wrench", value=lambda value: round(100 - (value / 3600 / SHAIR_MAX_WORK_HOURS), 1), extra_state_attributes=lambda block: { "Operational hours": round(block.totalWorkTime / 3600, 1) }, ), ("adc", "adc"): BlockAttributeDescription( name="ADC", unit=ELECTRIC_POTENTIAL_VOLT, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_VOLTAGE, state_class=sensor.STATE_CLASS_MEASUREMENT, ), ("sensor", "sensorOp"): BlockAttributeDescription( name="Operation", icon="mdi:cog-transfer", value=lambda value: value, extra_state_attributes=lambda block: {"self_test": block.selfTest}, ), } REST_SENSORS: Final = { "rssi": RestAttributeDescription( name="RSSI", unit=SIGNAL_STRENGTH_DECIBELS_MILLIWATT, value=lambda status, _: status["wifi_sta"]["rssi"], device_class=sensor.DEVICE_CLASS_SIGNAL_STRENGTH, state_class=sensor.STATE_CLASS_MEASUREMENT, default_enabled=False, ), "uptime": RestAttributeDescription( name="Uptime", value=get_device_uptime, device_class=sensor.DEVICE_CLASS_TIMESTAMP, default_enabled=False, ), }
34.226361
84
0.669904
07e5b14fe954fccf9ada38a8fb44f9dd227c6830
1,301
py
Python
tests/web/config.py
zcqian/biothings.api
61c0300317cf2ac7db8310b5b5741ad9b08c4163
[ "Apache-2.0" ]
null
null
null
tests/web/config.py
zcqian/biothings.api
61c0300317cf2ac7db8310b5b5741ad9b08c4163
[ "Apache-2.0" ]
null
null
null
tests/web/config.py
zcqian/biothings.api
61c0300317cf2ac7db8310b5b5741ad9b08c4163
[ "Apache-2.0" ]
null
null
null
""" Web settings to override for testing. """ import os from biothings.web.settings.default import QUERY_KWARGS # ***************************************************************************** # Elasticsearch Variables # ***************************************************************************** ES_INDEX = 'bts_test' ES_DOC_TYPE = 'gene' ES_SCROLL_SIZE = 60 # ***************************************************************************** # User Input Control # ***************************************************************************** # use a smaller size for testing QUERY_KWARGS['GET']['facet_size']['default'] = 3 QUERY_KWARGS['GET']['facet_size']['max'] = 5 QUERY_KWARGS['POST']['q']['jsoninput'] = True # ***************************************************************************** # Elasticsearch Query Builder # ***************************************************************************** ALLOW_RANDOM_QUERY = True ALLOW_NESTED_AGGS = True USERQUERY_DIR = os.path.join(os.path.dirname(__file__), 'userquery') # ***************************************************************************** # Endpoints Specifics # ***************************************************************************** STATUS_CHECK = { 'id': '1017', 'index': 'bts_test', 'doc_type': '_all' }
34.236842
79
0.362798
07e700a1cf3d3463190722de4956e44165a923c7
1,969
py
Python
InvenTree/InvenTree/management/commands/rebuild_thumbnails.py
rocheparadox/InvenTree
76c1e936db78424e0d6953c4062eb32863e302c6
[ "MIT" ]
656
2017-03-29T22:06:14.000Z
2022-03-30T11:23:52.000Z
InvenTree/InvenTree/management/commands/rebuild_thumbnails.py
rocheparadox/InvenTree
76c1e936db78424e0d6953c4062eb32863e302c6
[ "MIT" ]
1,545
2017-04-10T23:26:04.000Z
2022-03-31T18:32:10.000Z
InvenTree/InvenTree/management/commands/rebuild_thumbnails.py
fablabbcn/InvenTree
1d7ea7716cc96c6ffd151c822b01cd1fb5dcfecd
[ "MIT" ]
196
2017-03-28T03:06:21.000Z
2022-03-28T11:53:29.000Z
""" Custom management command to rebuild thumbnail images - May be required after importing a new dataset, for example """ import os import logging from PIL import UnidentifiedImageError from django.core.management.base import BaseCommand from django.conf import settings from django.db.utils import OperationalError, ProgrammingError from company.models import Company from part.models import Part logger = logging.getLogger("inventree-thumbnails")
27.732394
82
0.630269
07e88f36bd18f9a9dc8241de858cfab239c3ca4a
1,758
py
Python
cogs/carbon.py
Baracchino-Della-Scuola/Bot
65c1ef37ca9eae5d104de7d7de5cc58cc138402d
[ "MIT" ]
6
2021-12-18T10:15:01.000Z
2022-03-25T18:11:04.000Z
cogs/carbon.py
Baracchino-Della-Scuola/Bot
65c1ef37ca9eae5d104de7d7de5cc58cc138402d
[ "MIT" ]
3
2022-01-13T12:44:46.000Z
2022-02-21T17:40:52.000Z
cogs/carbon.py
Baracchino-Della-Scuola/Bot
65c1ef37ca9eae5d104de7d7de5cc58cc138402d
[ "MIT" ]
1
2022-02-14T21:54:07.000Z
2022-02-14T21:54:07.000Z
import discord from discord.ext import commands import urllib.parse from .constants import themes, controls, languages, fonts, escales import os from pathlib import Path from typing import Any # from pyppeteer import launch from io import * import requests def hex_to_rgb(hex: str) -> tuple: """ Args: hex (str): """ return tuple(int(hex.lstrip("#")[i : i + 2], 16) for i in (0, 2, 4)) _carbon_url = "https://carbonnowsh.herokuapp.com/"
21.975
86
0.633675
07e994b02286199ddba77a78c4751e4388520310
267
py
Python
examples/show_artist.py
jimcortez/spotipy_twisted
49ff2a4a5a5a9b3184b22adbe068eb91a38f3102
[ "MIT" ]
null
null
null
examples/show_artist.py
jimcortez/spotipy_twisted
49ff2a4a5a5a9b3184b22adbe068eb91a38f3102
[ "MIT" ]
null
null
null
examples/show_artist.py
jimcortez/spotipy_twisted
49ff2a4a5a5a9b3184b22adbe068eb91a38f3102
[ "MIT" ]
null
null
null
# shows artist info for a URN or URL import spotipy_twisted import sys import pprint if len(sys.argv) > 1: urn = sys.argv[1] else: urn = 'spotify:artist:3jOstUTkEu2JkjvRdBA5Gu' sp = spotipy_twisted.Spotify() artist = sp.artist(urn) pprint.pprint(artist)
15.705882
49
0.726592
07ea3ff52f1fa71b79053f13390d47944be9bd66
499
py
Python
examples/mcp3xxx_mcp3002_single_ended_simpletest.py
sommersoft/Adafruit_CircuitPython_MCP3xxx
94088a7e2b30f1b34e8a5fd7076075d88aad460b
[ "MIT" ]
null
null
null
examples/mcp3xxx_mcp3002_single_ended_simpletest.py
sommersoft/Adafruit_CircuitPython_MCP3xxx
94088a7e2b30f1b34e8a5fd7076075d88aad460b
[ "MIT" ]
null
null
null
examples/mcp3xxx_mcp3002_single_ended_simpletest.py
sommersoft/Adafruit_CircuitPython_MCP3xxx
94088a7e2b30f1b34e8a5fd7076075d88aad460b
[ "MIT" ]
null
null
null
import busio import digitalio import board import adafruit_mcp3xxx.mcp3002 as MCP from adafruit_mcp3xxx.analog_in import AnalogIn # create the spi bus spi = busio.SPI(clock=board.SCK, MISO=board.MISO, MOSI=board.MOSI) # create the cs (chip select) cs = digitalio.DigitalInOut(board.D5) # create the mcp object mcp = MCP.MCP3002(spi, cs) # create an analog input channel on pin 0 chan = AnalogIn(mcp, MCP.P0) print("Raw ADC Value: ", chan.value) print("ADC Voltage: " + str(chan.voltage) + "V")
23.761905
66
0.747495
07eb8c54a1c0d882798ebdd645e52dda754bb70e
759
py
Python
glue/core/data_factories/tables.py
rosteen/glue
ed71979f8e0e41f993a2363b3b5a8f8c3167a130
[ "BSD-3-Clause" ]
550
2015-01-08T13:51:06.000Z
2022-03-31T11:54:47.000Z
glue/core/data_factories/tables.py
mmorys/glue
b58ced518ba6f56c59a4e03ffe84afa47235e193
[ "BSD-3-Clause" ]
1,362
2015-01-03T19:15:52.000Z
2022-03-30T13:23:11.000Z
glue/core/data_factories/tables.py
mmorys/glue
b58ced518ba6f56c59a4e03ffe84afa47235e193
[ "BSD-3-Clause" ]
142
2015-01-08T13:08:00.000Z
2022-03-18T13:25:57.000Z
from glue.core.data_factories.helpers import has_extension from glue.config import data_factory __all__ = ['tabular_data']
33
75
0.613966
07ebcae81863c1e60bd65e743d7f7961451a23cf
2,895
py
Python
code_doc/views/author_views.py
coordt/code_doc
c2fac64ac3ad61952a2d9f036727166741f9aff9
[ "BSD-3-Clause" ]
null
null
null
code_doc/views/author_views.py
coordt/code_doc
c2fac64ac3ad61952a2d9f036727166741f9aff9
[ "BSD-3-Clause" ]
null
null
null
code_doc/views/author_views.py
coordt/code_doc
c2fac64ac3ad61952a2d9f036727166741f9aff9
[ "BSD-3-Clause" ]
null
null
null
from django.shortcuts import render from django.http import Http404 from django.views.generic.edit import UpdateView from django.views.generic import ListView, View from django.contrib.auth.decorators import login_required from django.contrib.auth.models import User from django.utils.decorators import method_decorator import logging from ..models.projects import Project from ..models.authors import Author from ..forms import AuthorForm from .permission_helpers import PermissionOnObjectViewMixin # logger for this file logger = logging.getLogger(__name__)
28.382353
88
0.680484
07ee95bf0289bb4f328ba250a0e725c6cb917270
2,073
py
Python
d00dfeed/analyses/print_sloc_per_soc.py
rehosting/rehosting_sok
499b625c8aa60020f311df97a6253820982f20d4
[ "MIT" ]
4
2021-09-17T02:37:08.000Z
2022-02-15T01:44:41.000Z
d00dfeed/analyses/print_sloc_per_soc.py
rehosting/rehosting_sok
499b625c8aa60020f311df97a6253820982f20d4
[ "MIT" ]
null
null
null
d00dfeed/analyses/print_sloc_per_soc.py
rehosting/rehosting_sok
499b625c8aa60020f311df97a6253820982f20d4
[ "MIT" ]
null
null
null
# External deps import os, sys, json from pathlib import Path from typing import Dict, List # Internal deps os.chdir(sys.path[0]) sys.path.append("..") import df_common as dfc import analyses_common as ac # Generated files directory GEN_FILE_DIR = str(Path(__file__).resolve().parent.parent) + os.sep + "generated_files" # TODO: ugly parent.parent pathing if os.path.exists(GEN_FILE_DIR): sys.path.append(GEN_FILE_DIR) if os.path.exists(os.path.join(GEN_FILE_DIR, "sloc_cnt.py")): from sloc_cnt import DRIVER_NAME_TO_SLOC else: print("Error: no SLOC file! Run \'df_analyze.py\' with \'--linux-src-dir\'") sys.exit(1) if __name__ == "__main__": json_files = ac.argparse_and_get_files("Graph SLOC/SoC data") soc_sloc_by_arch: Dict[str, List[int]] = {} print("Gathering SLOC average by arch...") from graph_dd_sloc_by_arch import get_sloc_avg_and_list_by_arch cmp_by_arch = ac.build_dict_two_lvl_cnt(json_files, dfc.JSON_ARC, dfc.JSON_CMP_STR) avg_sloc_by_arch, sloc_list_by_arch = get_sloc_avg_and_list_by_arch(cmp_by_arch, verbose = False) # Collection print("Iterating DTBs/SoCs...") for dtb_json in json_files: with open(dtb_json) as json_file: data = json.load(json_file) soc_sloc = 0 arch = data[dfc.JSON_ARC] cmp_strs = data[dfc.JSON_CMP_STR] # Total SLOC for this SoC for cmp_str in cmp_strs: driver_sloc = dfc.cmp_str_to_sloc(cmp_str) if not driver_sloc: # Closed-source driver driver_sloc = avg_sloc_by_arch[arch] soc_sloc += driver_sloc #print("{}: {}".format(cmp_str, driver_sloc)) if arch not in soc_sloc_by_arch: soc_sloc_by_arch[arch] = [] else: soc_sloc_by_arch[arch].append(soc_sloc) print("{} ({}): {}".format(dtb_json.split(os.sep)[-1], arch, soc_sloc)) # Final stats ac.print_mean_median_std_dev_for_dict_of_lists(soc_sloc_by_arch, "\nSloc Per Soc, format: [arch : (mean, median, std_dev)]\n")
32.904762
122
0.673903
07eea84b8f7990a608b685c1a60f3250095ce8a2
1,271
py
Python
mingpt/lr_decay.py
asigalov61/minGPT
b4f8d57aaf1bb5c64d480f8005b73d39b075ae4b
[ "MIT" ]
18
2020-09-10T02:29:38.000Z
2022-03-16T03:17:35.000Z
mingpt/lr_decay.py
asigalov61/minGPT
b4f8d57aaf1bb5c64d480f8005b73d39b075ae4b
[ "MIT" ]
null
null
null
mingpt/lr_decay.py
asigalov61/minGPT
b4f8d57aaf1bb5c64d480f8005b73d39b075ae4b
[ "MIT" ]
7
2020-08-20T16:35:38.000Z
2022-01-10T21:57:49.000Z
import math import pytorch_lightning as pl
41
106
0.609756
07ef68929a367d76f0cb572e51ac36b254e815b0
40,518
py
Python
apprise/config/ConfigBase.py
calvinbui/apprise
a5510790baf5aa1d74afabab25ff57d6b2304d56
[ "MIT" ]
null
null
null
apprise/config/ConfigBase.py
calvinbui/apprise
a5510790baf5aa1d74afabab25ff57d6b2304d56
[ "MIT" ]
null
null
null
apprise/config/ConfigBase.py
calvinbui/apprise
a5510790baf5aa1d74afabab25ff57d6b2304d56
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # # Copyright (C) 2020 Chris Caron <[email protected]> # All rights reserved. # # This code is licensed under the MIT License. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files(the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and / or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions : # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. import os import re import six import yaml import time from .. import plugins from ..AppriseAsset import AppriseAsset from ..URLBase import URLBase from ..common import ConfigFormat from ..common import CONFIG_FORMATS from ..common import ContentIncludeMode from ..utils import GET_SCHEMA_RE from ..utils import parse_list from ..utils import parse_bool from ..utils import parse_urls from . import SCHEMA_MAP # Test whether token is valid or not VALID_TOKEN = re.compile( r'(?P<token>[a-z0-9][a-z0-9_]+)', re.I) def __getitem__(self, index): """ Returns the indexed server entry associated with the loaded notification servers """ if not isinstance(self._cached_servers, list): # Generate ourselves a list of content we can pull from self.servers() return self._cached_servers[index] def __iter__(self): """ Returns an iterator to our server list """ if not isinstance(self._cached_servers, list): # Generate ourselves a list of content we can pull from self.servers() return iter(self._cached_servers) def __len__(self): """ Returns the total number of servers loaded """ if not isinstance(self._cached_servers, list): # Generate ourselves a list of content we can pull from self.servers() return len(self._cached_servers) def __bool__(self): """ Allows the Apprise object to be wrapped in an Python 3.x based 'if statement'. True is returned if our content was downloaded correctly. """ if not isinstance(self._cached_servers, list): # Generate ourselves a list of content we can pull from self.servers() return True if self._cached_servers else False def __nonzero__(self): """ Allows the Apprise object to be wrapped in an Python 2.x based 'if statement'. True is returned if our content was downloaded correctly. """ if not isinstance(self._cached_servers, list): # Generate ourselves a list of content we can pull from self.servers() return True if self._cached_servers else False
38.116651
79
0.550768
07f0b2b68417d129704d340d100e569555824ebc
977
py
Python
ffmpeg_util.py
manuel-fischer/ScrollRec
ec5662d3f61630f939613481290a166133d23a20
[ "MIT" ]
null
null
null
ffmpeg_util.py
manuel-fischer/ScrollRec
ec5662d3f61630f939613481290a166133d23a20
[ "MIT" ]
null
null
null
ffmpeg_util.py
manuel-fischer/ScrollRec
ec5662d3f61630f939613481290a166133d23a20
[ "MIT" ]
null
null
null
import sys import subprocess from subprocess import Popen, PIPE AV_LOG_QUIET = "quiet" AV_LOG_PANIC = "panic" AV_LOG_FATAL = "fatal" AV_LOG_ERROR = "error" AV_LOG_WARNING = "warning" AV_LOG_INFO = "info" AV_LOG_VERBOSE = "verbose" AV_LOG_DEBUG = "debug" ffmpeg_loglevel = AV_LOG_ERROR IS_WIN32 = 'win32' in str(sys.platform).lower() SUBPROCESS_ARGS = {} if IS_WIN32: startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags = subprocess.CREATE_NEW_CONSOLE | subprocess.STARTF_USESHOWWINDOW startupinfo.wShowWindow = subprocess.SW_HIDE SUBPROCESS_ARGS['startupinfo'] = startupinfo
24.425
89
0.698055
07f0cc1a2096d19875fd8be0522fd82d52ff8b5c
128
py
Python
setup.py
rizar/CLOSURE
57f80d4e89fa281830bb9c8b6a7a2498747e727a
[ "MIT" ]
14
2019-12-08T04:38:57.000Z
2021-07-13T15:46:57.000Z
setup.py
rizar/CLOSURE
57f80d4e89fa281830bb9c8b6a7a2498747e727a
[ "MIT" ]
null
null
null
setup.py
rizar/CLOSURE
57f80d4e89fa281830bb9c8b6a7a2498747e727a
[ "MIT" ]
2
2020-09-01T10:21:27.000Z
2021-01-15T02:38:25.000Z
from setuptools import setup setup( name="nmn-iwp", version="0.1", keywords="", packages=["vr", "vr.models"] )
14.222222
32
0.585938
07f1195aa55500ccfbdb1eb16ce8a5e553bfeb5d
11,381
py
Python
analysis_tools/PYTHON_RICARDO/output_ingress_egress/scripts/uniform_grid.py
lefevre-fraser/openmeta-mms
08f3115e76498df1f8d70641d71f5c52cab4ce5f
[ "MIT" ]
null
null
null
analysis_tools/PYTHON_RICARDO/output_ingress_egress/scripts/uniform_grid.py
lefevre-fraser/openmeta-mms
08f3115e76498df1f8d70641d71f5c52cab4ce5f
[ "MIT" ]
null
null
null
analysis_tools/PYTHON_RICARDO/output_ingress_egress/scripts/uniform_grid.py
lefevre-fraser/openmeta-mms
08f3115e76498df1f8d70641d71f5c52cab4ce5f
[ "MIT" ]
null
null
null
""" Represent a triangulated surface using a 3D boolean grid""" import logging import numpy as np from rpl.tools.ray_tracing.bsp_tree_poly import BSP_Element from rpl.tools.geometry import geom_utils import data_io def make_grid(veh_surfs, settings): """ Make coordinates of voxelated grid based on overall list of vehicle surfaces """ ## Find overall bounding box x_min, x_max = 1e30, -1e30 y_min, y_max = 1e30, -1e30 z_min, z_max = 1e30, -1e30 for key, veh_surf in veh_surfs.items(): x_min, x_max = min(x_min, np.min(veh_surf["x"])), max(x_max, np.max(veh_surf["x"])) y_min, y_max = min(y_min, np.min(veh_surf["y"])), max(y_max, np.max(veh_surf["y"])) z_min, z_max = min(z_min, np.min(veh_surf["z"])), max(z_max, np.max(veh_surf["z"])) x_min, x_max = x_min - settings["voxel_size"], x_max + settings["voxel_size"] y_min, y_max = y_min - settings["voxel_size"], y_max + settings["voxel_size"] z_min, z_max = z_min - settings["voxel_size"], z_max + settings["voxel_size"] ########################################### # Create the uniformly spaced grid points x_grid = np.arange(x_min, x_max + settings["voxel_size"], settings["voxel_size"]) y_grid = np.arange(y_min, y_max + settings["voxel_size"], settings["voxel_size"]) z_grid = np.arange(z_min, z_max + settings["voxel_size"], settings["voxel_size"]) return x_grid, y_grid, z_grid def convert_geom(veh_surf, tr_mat): """ Rotate nodes using provided transformation matrix; convert xyz node dict to nodes array """ veh_surf["nodes"] = np.vstack((veh_surf["x"], veh_surf["y"], veh_surf["z"])).T veh_surf['nodes'] = np.dot(veh_surf['nodes'], tr_mat[:3, :3]) veh_surf["x"] = veh_surf['nodes'][:, 0] veh_surf["y"] = veh_surf['nodes'][:, 1] veh_surf["z"] = veh_surf['nodes'][:, 2] return veh_surf def find_occupied_voxels(surf, surf_mask, voxel_data): """ Voxels with any triangle from ``surf`` are considered occupied and or'ed with ``group_mask``. If the supplied ``occupied_voxels`` is None a voxel array is created and returned. """ nodes = surf["nodes"] tris = surf["tris"] x_pts, y_pts, z_pts = [voxel_data[k] for k in ("x_grid", "y_grid", "z_grid")] vox_size = voxel_data["vox_size"] ## Find the local extents of this part min_x, max_x = np.min(surf["x"]) - vox_size, np.max(surf["x"]) + vox_size min_y, max_y = np.min(surf["y"]) - vox_size, np.max(surf["y"]) + vox_size min_z, max_z = np.min(surf["z"]) - vox_size, np.max(surf["z"]) + vox_size b_tree = BSP_Grid(nodes, tris) # Create BSP tree elements- we're not using a tree, but we are using some of the functions b_x_root = BSP_Element(b_tree.tris, b_tree) size_i, size_j, size_k = len(x_pts), len(y_pts), len(z_pts) ## Create the occupied voxels if none were supplied if voxel_data["value"] is None: voxel_data["value"] = np.zeros((size_i - 1, size_j - 1, size_k - 1), dtype=np.uint32) occupied_voxels = voxel_data["value"] ## The [1:] is because to make n voxels in a given direction we need n-1 splits for i, x_pos in enumerate(x_pts[1:]): if x_pos < min_x: continue if x_pos > max_x: break b_above_x, b_below_x = b_x_root.split_at(0, x_pos) b_y_root = b_below_x for j, y_pos in enumerate(y_pts[1:]): if b_y_root is None: break if y_pos < min_y: continue if y_pos > max_y: break b_above_y, b_below_y = b_y_root.split_at(1, y_pos) b_z_root = b_below_y for k, z_pos in enumerate(z_pts[1:]): if b_z_root is None: break if z_pos < min_z: continue if z_pos > max_z: break b_above_z, b_below_z = b_z_root.split_at(2, z_pos) if not (b_below_z and (len(b_below_z.tris) == 0)): ## There is at least part of triangle here so mark as occupied occupied_voxels[i, j, k] |= surf_mask b_z_root = b_above_z b_y_root = b_above_y b_x_root = b_above_x return voxel_data ############# # Main code def main(vehicle_comp_coords, tr_mat, voxel_masks, settings): """ Perform voxelization for all vehicle geometries in a list of parts. Combine on a uniform grid. """ for key, veh_surf in vehicle_comp_coords.items(): # Convert coordinates and find overall best bounding box veh_surf = convert_geom(veh_surf, tr_mat) x_grid, y_grid, z_grid = make_grid(vehicle_comp_coords, settings) voxel_data = {"x_grid": x_grid, "y_grid": y_grid, "z_grid": z_grid, "vox_size": settings["voxel_size"], "csys_trans": tr_mat, "value": None} for key, veh_surf in vehicle_comp_coords.items(): # Build up the voxel_data logging.debug("Sampling component: {}".format(key)) ## Default mask is 1 for anything not in an identified set surf_mask = 1 for mask, geo_set in voxel_masks.items(): if veh_surf['part_class'] in geo_set: surf_mask |= mask voxel_data = find_occupied_voxels(veh_surf, surf_mask, voxel_data) return voxel_data if __name__ == "__main__": from rpl.tools.api import test_bench_api as tb_api SETTINGS = tb_api.load_settings("settings.js") DOORS = {'Hatch_Assembly_Rear_Ramp', 'Hatch_Assembly_Personnel_Door'} HATCHES = {'Hatch_Assembly_Driver_Commander', 'Hatch_Assembly_Cargo'} HULLS = {"Hull_Assembly_Parametric", 'Hull_Assembly_Example_With_Connector'} MANIKINS = {"Manikin"} # Special labels applied to specific types of voxels VOXEL_LABELS = {2: HULLS, 4: DOORS, 8: HATCHES, 16: MANIKINS} vehicle_surfs = tb_api.load_geometry(tb_api.get_all_geom_set() - MANIKINS, single_file=False) # Modify node coords so object aligns with cartesian axes of occ voxel grid, +z=up # Vector to rotate around is cross product of current z axis and sfc normal veh_up = np.array([0., 1., 0.]) rot_around = np.cross(veh_up, np.array([0, 0, 1])) rot_ang = -np.arccos(veh_up[2]) tr_mat = geom_utils.rotation_about_vector(rot_around, rot_ang) # voxel_data = main(vehicle_surfs, tr_mat, VOXEL_LABELS, SETTINGS) vox_veh_folder = r"voxelated_models/vehicles/{}/{}".format(SETTINGS["run_id"], SETTINGS["voxel_size"]) vox_veh_file = "voxels_{}_vox{}_hacked".format(SETTINGS["run_id"], SETTINGS["voxel_size"]) try: voxel_data = data_io.load_array(vox_veh_folder, vox_veh_file, True) except: voxel_data = main(vehicle_surfs, tr_mat, VOXEL_LABELS, SETTINGS) from mayavi import mlab xo, yo, zo = np.where(voxel_data["value"] == 1) plot_vehicle = mlab.points3d(voxel_data["x_grid"][xo], voxel_data["y_grid"][yo], voxel_data["z_grid"][zo], color=(0.9, 0.9, 0.9), scale_mode="none", scale_factor=voxel_data["vox_size"], mode='cube', opacity=1) xo, yo, zo = np.where(voxel_data["value"] & 2) plot_vehicle = mlab.points3d(voxel_data["x_grid"][xo], voxel_data["y_grid"][yo], voxel_data["z_grid"][zo], color=(1, 1, 1), scale_mode="none", scale_factor=voxel_data["vox_size"], mode='cube', opacity=0.05) xo, yo, zo = np.where(voxel_data["value"] & 4) plot_vehicle = mlab.points3d(voxel_data["x_grid"][xo], voxel_data["y_grid"][yo], voxel_data["z_grid"][zo], color=(1.0, 0.5, 0.5), scale_mode="none", scale_factor=voxel_data["vox_size"], mode='cube', opacity=1) xo, yo, zo = np.where(voxel_data["value"] & 8) plot_vehicle = mlab.points3d(voxel_data["x_grid"][xo], voxel_data["y_grid"][yo], voxel_data["z_grid"][zo], color=(0.6, 0.6, 1.0), scale_mode="none", scale_factor=voxel_data["vox_size"], mode='cube', opacity=1) # No manikins included, no need to plot them # xo, yo, zo = np.where(voxel_data["value"] & 16) # plot_vehicle = mlab.points3d(voxel_data["x_grid"][xo], # voxel_data["y_grid"][yo], # voxel_data["z_grid"][zo], # color=(0.5, 1.0, 0.8), # scale_mode="none", scale_factor=voxel_data["vox_size"], # mode='cube', opacity=1.0) mlab.show() # Save the voxelated model of the vehicle (sans door and other excluded parts) data_io.save_multi_array(vox_veh_folder, vox_veh_file, voxel_data)
42.30855
99
0.567613
07f12eb8f08aef21196193b3111071cb20b8013a
1,884
py
Python
silver_bullet/crypto.py
Hojung-Jeong/Silver-Bullet-Encryption-Tool
5ea29b3cd78cf7488e0cbdcf4ea60d7c9151c2a7
[ "Apache-2.0" ]
null
null
null
silver_bullet/crypto.py
Hojung-Jeong/Silver-Bullet-Encryption-Tool
5ea29b3cd78cf7488e0cbdcf4ea60d7c9151c2a7
[ "Apache-2.0" ]
null
null
null
silver_bullet/crypto.py
Hojung-Jeong/Silver-Bullet-Encryption-Tool
5ea29b3cd78cf7488e0cbdcf4ea60d7c9151c2a7
[ "Apache-2.0" ]
null
null
null
''' >List of functions 1. encrypt(user_input,passphrase) - Encrypt the given string with the given passphrase. Returns cipher text and locked pad. 2. decrypt(cipher_text,locked_pad,passphrase) - Decrypt the cipher text encrypted with SBET. It requires cipher text, locked pad, and passphrase. ''' # CODE ======================================================================== import zlib import random from hashlib import sha1 from silver_bullet.TRNG import trlist from silver_bullet.contain_value import contain ascii_value=256
24.789474
146
0.735669
07f1ea6de606abc50abb899228cdc43831fa522e
876
py
Python
pyfire/errors.py
RavidLevi98/pyfire
404ae2082fd5be3ef652b3e15a66ad0d79b7a1b5
[ "BSD-3-Clause" ]
null
null
null
pyfire/errors.py
RavidLevi98/pyfire
404ae2082fd5be3ef652b3e15a66ad0d79b7a1b5
[ "BSD-3-Clause" ]
1
2021-05-22T21:34:44.000Z
2021-05-22T21:34:44.000Z
pyfire/errors.py
RavidLevi98/pyfire
404ae2082fd5be3ef652b3e15a66ad0d79b7a1b5
[ "BSD-3-Clause" ]
1
2021-05-22T21:21:11.000Z
2021-05-22T21:21:11.000Z
# -*- coding: utf-8 -*- """ pyfire.errors ~~~~~~~~~~~~~~~~~~~~~~ Holds the global used base errors :copyright: 2011 by the pyfire Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ import xml.etree.ElementTree as ET
28.258065
72
0.651826
07f21adedf8ef7aa0ba52361a9cf4372ad43ac9a
4,967
py
Python
app/nextMoveLogic.py
thekitbag/starter-snake-python
48d12d2fa61ecfc976cd5750316b1db49a641f7f
[ "MIT" ]
null
null
null
app/nextMoveLogic.py
thekitbag/starter-snake-python
48d12d2fa61ecfc976cd5750316b1db49a641f7f
[ "MIT" ]
null
null
null
app/nextMoveLogic.py
thekitbag/starter-snake-python
48d12d2fa61ecfc976cd5750316b1db49a641f7f
[ "MIT" ]
null
null
null
import random
29.217647
122
0.655124
07f27b728b22aae57de29b0aad696e2f245d31dd
2,921
py
Python
generator/util.py
gbtami/lichess-puzzler
e7338b35f592481141acefe39c7aaa444b26aa9e
[ "MIT" ]
1
2021-02-20T11:21:53.000Z
2021-02-20T11:21:53.000Z
generator/util.py
gbtami/lichess-puzzler
e7338b35f592481141acefe39c7aaa444b26aa9e
[ "MIT" ]
null
null
null
generator/util.py
gbtami/lichess-puzzler
e7338b35f592481141acefe39c7aaa444b26aa9e
[ "MIT" ]
null
null
null
from dataclasses import dataclass import math import chess import chess.engine from model import EngineMove, NextMovePair from chess import Color, Board from chess.pgn import GameNode from chess.engine import SimpleEngine, Score nps = [] def win_chances(score: Score) -> float: """ winning chances from -1 to 1 https://graphsketch.com/?eqn1_color=1&eqn1_eqn=100+*+%282+%2F+%281+%2B+exp%28-0.004+*+x%29%29+-+1%29&eqn2_color=2&eqn2_eqn=&eqn3_color=3&eqn3_eqn=&eqn4_color=4&eqn4_eqn=&eqn5_color=5&eqn5_eqn=&eqn6_color=6&eqn6_eqn=&x_min=-1000&x_max=1000&y_min=-100&y_max=100&x_tick=100&y_tick=10&x_label_freq=2&y_label_freq=2&do_grid=0&do_grid=1&bold_labeled_lines=0&bold_labeled_lines=1&line_width=4&image_w=850&image_h=525 """ mate = score.mate() if mate is not None: return 1 if mate > 0 else -1 cp = score.score() return 2 / (1 + math.exp(-0.004 * cp)) - 1 if cp is not None else 0 CORRESP_TIME = 999999
38.946667
442
0.66176
07f699666466b24dd921c96e35a918f2fe5e627b
293
py
Python
sleep.py
SkylerHoward/O
989246a5cdc297ab9f76cb6b26daebd799a03741
[ "MIT" ]
null
null
null
sleep.py
SkylerHoward/O
989246a5cdc297ab9f76cb6b26daebd799a03741
[ "MIT" ]
null
null
null
sleep.py
SkylerHoward/O
989246a5cdc297ab9f76cb6b26daebd799a03741
[ "MIT" ]
null
null
null
import time, morning from datetime import datetime
29.3
63
0.648464
07f6a33d952a989c19f3efa056df22e95ace1f20
4,526
py
Python
tests/unit/commands/local/start_lambda/test_cli.py
ourobouros/aws-sam-cli
3fba861f5106d604fde6d023923a9b83377a35d9
[ "Apache-2.0" ]
2
2018-11-09T04:43:41.000Z
2018-11-20T06:39:45.000Z
tests/unit/commands/local/start_lambda/test_cli.py
ourobouros/aws-sam-cli
3fba861f5106d604fde6d023923a9b83377a35d9
[ "Apache-2.0" ]
null
null
null
tests/unit/commands/local/start_lambda/test_cli.py
ourobouros/aws-sam-cli
3fba861f5106d604fde6d023923a9b83377a35d9
[ "Apache-2.0" ]
null
null
null
from unittest import TestCase from mock import patch, Mock from samcli.commands.local.start_lambda.cli import do_cli as start_lambda_cli from samcli.commands.local.cli_common.user_exceptions import UserException from samcli.commands.validate.lib.exceptions import InvalidSamDocumentException from samcli.commands.local.lib.exceptions import OverridesNotWellDefinedError
45.26
96
0.579319
07f8b674abd2c96105f1e6a593d1a4ae299e109a
5,389
py
Python
restapi/services/Utils/test_getters.py
Varun487/CapstoneProject_TradingSystem
b21e3f2c6c5e75596927666bf65294a2014babcf
[ "MIT" ]
3
2022-01-10T01:39:00.000Z
2022-01-11T13:17:36.000Z
restapi/services/Utils/test_getters.py
Varun487/CapstoneProject_TradingSystem
b21e3f2c6c5e75596927666bf65294a2014babcf
[ "MIT" ]
null
null
null
restapi/services/Utils/test_getters.py
Varun487/CapstoneProject_TradingSystem
b21e3f2c6c5e75596927666bf65294a2014babcf
[ "MIT" ]
1
2022-01-09T07:30:36.000Z
2022-01-09T07:30:36.000Z
from django.test import TestCase import pandas as pd from .getters import Getter from .converter import Converter from strategies.models import Company from strategies.models import IndicatorType
52.833333
119
0.660419
07f928cd0ad75195469f95ed414958ac002210c7
3,376
py
Python
pytorch_toolkit/ote/ote/modules/trainers/mmdetection.py
abhatikar/training_extensions
1c96e0f5f39688f8b79735e8dfa90646afc3d5e6
[ "Apache-2.0" ]
2
2021-01-07T05:09:17.000Z
2021-10-15T05:13:46.000Z
pytorch_toolkit/ote/ote/modules/trainers/mmdetection.py
abhatikar/training_extensions
1c96e0f5f39688f8b79735e8dfa90646afc3d5e6
[ "Apache-2.0" ]
9
2021-09-08T03:12:59.000Z
2022-03-12T00:57:19.000Z
pytorch_toolkit/ote/ote/modules/trainers/mmdetection.py
abhatikar/training_extensions
1c96e0f5f39688f8b79735e8dfa90646afc3d5e6
[ "Apache-2.0" ]
null
null
null
""" Copyright (c) 2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import json import logging import subprocess import tempfile from ote import MMDETECTION_TOOLS from .base import BaseTrainer from ..registry import TRAINERS
34.44898
101
0.627073
07fabd24f913f0cde7669692291156d001f2e833
1,979
py
Python
svn-go-stats/transform.py
BT-OpenSource/bt-betalab
af5a1b0d778c1746312149f62da0c4159f387293
[ "MIT" ]
1
2021-03-02T10:44:07.000Z
2021-03-02T10:44:07.000Z
svn-go-stats/transform.py
BT-OpenSource/bt-betalab
af5a1b0d778c1746312149f62da0c4159f387293
[ "MIT" ]
null
null
null
svn-go-stats/transform.py
BT-OpenSource/bt-betalab
af5a1b0d778c1746312149f62da0c4159f387293
[ "MIT" ]
null
null
null
import sys import json import subprocess import re import statistics # Main service body if __name__ == "__main__": complexity = get_complexity() duplicate_const_strings = get_duplicate_const_strings() files = set() files.update(complexity.keys()) files.update(duplicate_const_strings.keys()) result = [] for f in files: result.append({ 'filename': f, 'cyclomaticComplexity': complexity[f] if f in complexity else 0, 'duplicateConstStrings': duplicate_const_strings[f] if f in duplicate_const_strings else 0 }) print(json.dumps(result))
29.102941
106
0.623042
07fb390e2fe8908e8e3a429d629ca30f1d77df66
11,225
py
Python
test/test_python_errors.py
yangyangxcf/parso
e496b07b6342f6182225a60aad6031d7ad08f24d
[ "PSF-2.0" ]
null
null
null
test/test_python_errors.py
yangyangxcf/parso
e496b07b6342f6182225a60aad6031d7ad08f24d
[ "PSF-2.0" ]
null
null
null
test/test_python_errors.py
yangyangxcf/parso
e496b07b6342f6182225a60aad6031d7ad08f24d
[ "PSF-2.0" ]
null
null
null
""" Testing if parso finds syntax errors and indentation errors. """ import sys import warnings import pytest import parso from parso._compatibility import is_pypy from .failing_examples import FAILING_EXAMPLES, indent, build_nested if is_pypy: # The errors in PyPy might be different. Just skip the module for now. pytestmark = pytest.mark.skip() def test_non_async_in_async(): """ This example doesn't work with FAILING_EXAMPLES, because the line numbers are not always the same / incorrect in Python 3.8. """ if sys.version_info[:2] < (3, 5): pytest.skip() # Raises multiple errors in previous versions. code = 'async def foo():\n def nofoo():[x async for x in []]' wanted, line_nr = _get_actual_exception(code) errors = _get_error_list(code) if errors: error, = errors actual = error.message assert actual in wanted if sys.version_info[:2] < (3, 8): assert line_nr == error.start_pos[0] else: assert line_nr == 0 # For whatever reason this is zero in Python 3.8+ def _get_actual_exception(code): with warnings.catch_warnings(): # We don't care about warnings where locals/globals misbehave here. # It's as simple as either an error or not. warnings.filterwarnings('ignore', category=SyntaxWarning) try: compile(code, '<unknown>', 'exec') except (SyntaxError, IndentationError) as e: wanted = e.__class__.__name__ + ': ' + e.msg line_nr = e.lineno except ValueError as e: # The ValueError comes from byte literals in Python 2 like '\x' # that are oddly enough not SyntaxErrors. wanted = 'SyntaxError: (value error) ' + str(e) line_nr = None else: assert False, "The piece of code should raise an exception." # SyntaxError # Python 2.6 has a bit different error messages here, so skip it. if sys.version_info[:2] == (2, 6) and wanted == 'SyntaxError: unexpected EOF while parsing': wanted = 'SyntaxError: invalid syntax' if wanted == 'SyntaxError: non-keyword arg after keyword arg': # The python 3.5+ way, a bit nicer. wanted = 'SyntaxError: positional argument follows keyword argument' elif wanted == 'SyntaxError: assignment to keyword': return [wanted, "SyntaxError: can't assign to keyword", 'SyntaxError: cannot assign to __debug__'], line_nr elif wanted == 'SyntaxError: assignment to None': # Python 2.6 does has a slightly different error. wanted = 'SyntaxError: cannot assign to None' elif wanted == 'SyntaxError: can not assign to __debug__': # Python 2.6 does has a slightly different error. wanted = 'SyntaxError: cannot assign to __debug__' elif wanted == 'SyntaxError: can use starred expression only as assignment target': # Python 3.4/3.4 have a bit of a different warning than 3.5/3.6 in # certain places. But in others this error makes sense. return [wanted, "SyntaxError: can't use starred expression here"], line_nr elif wanted == 'SyntaxError: f-string: unterminated string': wanted = 'SyntaxError: EOL while scanning string literal' elif wanted == 'SyntaxError: f-string expression part cannot include a backslash': return [ wanted, "SyntaxError: EOL while scanning string literal", "SyntaxError: unexpected character after line continuation character", ], line_nr elif wanted == "SyntaxError: f-string: expecting '}'": wanted = 'SyntaxError: EOL while scanning string literal' elif wanted == 'SyntaxError: f-string: empty expression not allowed': wanted = 'SyntaxError: invalid syntax' elif wanted == "SyntaxError: f-string expression part cannot include '#'": wanted = 'SyntaxError: invalid syntax' elif wanted == "SyntaxError: f-string: single '}' is not allowed": wanted = 'SyntaxError: invalid syntax' return [wanted], line_nr def test_default_except_error_postition(): # For this error the position seemed to be one line off, but that doesn't # really matter. code = 'try: pass\nexcept: pass\nexcept X: pass' wanted, line_nr = _get_actual_exception(code) error, = _get_error_list(code) assert error.message in wanted assert line_nr != error.start_pos[0] # I think this is the better position. assert error.start_pos[0] == 2 def test_statically_nested_blocks(): assert get_error(19) is None assert get_error(19, add_func=True) is None assert get_error(20) assert get_error(20, add_func=True) def test_future_import_first(): i1 = 'from __future__ import division' i2 = 'from __future__ import absolute_import' assert not is_issue(i1) assert not is_issue(i1 + ';' + i2) assert not is_issue(i1 + '\n' + i2) assert not is_issue('"";' + i1) assert not is_issue('"";' + i1) assert not is_issue('""\n' + i1) assert not is_issue('""\n%s\n%s', i1, i2) assert not is_issue('""\n%s;%s', i1, i2) assert not is_issue('"";%s;%s ', i1, i2) assert not is_issue('"";%s\n%s ', i1, i2) assert is_issue('1;' + i1) assert is_issue('1\n' + i1) assert is_issue('"";1\n' + i1) assert is_issue('""\n%s\nfrom x import a\n%s', i1, i2) assert is_issue('%s\n""\n%s', i1, i2) def test_named_argument_issues(works_not_in_py): message = works_not_in_py.get_error_message('def foo(*, **dict): pass') message = works_not_in_py.get_error_message('def foo(*): pass') if works_not_in_py.version.startswith('2'): assert message == 'SyntaxError: invalid syntax' else: assert message == 'SyntaxError: named arguments must follow bare *' works_not_in_py.assert_no_error_in_passing('def foo(*, name): pass') works_not_in_py.assert_no_error_in_passing('def foo(bar, *, name=1): pass') works_not_in_py.assert_no_error_in_passing('def foo(bar, *, name=1, **dct): pass') def test_escape_decode_literals(each_version): """ We are using internal functions to assure that unicode/bytes escaping is without syntax errors. Here we make a bit of quality assurance that this works through versions, because the internal function might change over time. """ error, = _get_error_list(r'u"\x"', version=each_version) assert error.message in get_msgs(r'\xXX') error, = _get_error_list(r'u"\u"', version=each_version) assert error.message in get_msgs(r'\uXXXX') error, = _get_error_list(r'u"\U"', version=each_version) assert error.message in get_msgs(r'\UXXXXXXXX') error, = _get_error_list(r'u"\N{}"', version=each_version) assert error.message == get_msg(r'malformed \N character escape', to=2) error, = _get_error_list(r'u"\N{foo}"', version=each_version) assert error.message == get_msg(r'unknown Unicode character name', to=6) # Finally bytes. error, = _get_error_list(r'b"\x"', version=each_version) wanted = r'SyntaxError: (value error) invalid \x escape' if sys.version_info >= (3, 0): # The positioning information is only available in Python 3. wanted += ' at position 0' assert error.message == wanted def test_too_many_levels_of_indentation(): assert not _get_error_list(build_nested('pass', 99)) assert _get_error_list(build_nested('pass', 100)) base = 'def x():\n if x:\n' assert not _get_error_list(build_nested('pass', 49, base=base)) assert _get_error_list(build_nested('pass', 50, base=base))
34.860248
96
0.625568
07fb5058c7a297096cbf1ff7f21aedcf66b7d3ad
985
py
Python
shogitk/usikif.py
koji-hirono/pytk-shogi-replayer
a10819a797faecbee5c7b0654beb3694eb522840
[ "MIT" ]
null
null
null
shogitk/usikif.py
koji-hirono/pytk-shogi-replayer
a10819a797faecbee5c7b0654beb3694eb522840
[ "MIT" ]
null
null
null
shogitk/usikif.py
koji-hirono/pytk-shogi-replayer
a10819a797faecbee5c7b0654beb3694eb522840
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- from __future__ import unicode_literals from shogitk.shogi import Coords, Move, BLACK, WHITE, DROP, PROMOTE RANKNUM = { 'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5, 'f': 6, 'g': 7, 'h': 8, 'i': 9 }
25.921053
74
0.450761
07fca3ec2f3896a49c6703b50dabc9ec79e258a9
2,160
py
Python
etherbank_cli/oracles.py
ideal-money/etherbank-cli
d957daa13aa951331cadc35c246c1ce8459ca8df
[ "BSD-2-Clause" ]
1
2018-12-29T02:12:25.000Z
2018-12-29T02:12:25.000Z
etherbank_cli/oracles.py
ideal-money/etherbank-cli
d957daa13aa951331cadc35c246c1ce8459ca8df
[ "BSD-2-Clause" ]
5
2018-12-20T12:45:39.000Z
2019-01-08T06:16:01.000Z
etherbank_cli/oracles.py
ideal-money/etherbank-cli
d957daa13aa951331cadc35c246c1ce8459ca8df
[ "BSD-2-Clause" ]
null
null
null
import click from . import utils if __name__ == '__main__': main()
30
82
0.697222
07fd108f6337b8e7a88da0155cf318b6098e4ae4
2,585
py
Python
src/grader/machine.py
MrKaStep/csc230-grader
559846f4d921c5c4be6b6e9ba8629fb24b448e41
[ "MIT" ]
null
null
null
src/grader/machine.py
MrKaStep/csc230-grader
559846f4d921c5c4be6b6e9ba8629fb24b448e41
[ "MIT" ]
null
null
null
src/grader/machine.py
MrKaStep/csc230-grader
559846f4d921c5c4be6b6e9ba8629fb24b448e41
[ "MIT" ]
null
null
null
import getpass from plumbum import local from plumbum.machines.paramiko_machine import ParamikoMachine from plumbum.path.utils import copy def get_local_machine(): return local def with_machine_rule(cls): old_init = cls.__init__ cls.__init__ = new_init old_apply = cls.apply cls.apply = new_apply return cls
32.3125
133
0.600387
07fdb99131cb0d8251646bea304d79ad58fc7ab6
157
py
Python
Mundo 1/Ex33.py
legna7/Python
52e0b642d1b7acc592ec82dd360c5697fb0765db
[ "MIT" ]
null
null
null
Mundo 1/Ex33.py
legna7/Python
52e0b642d1b7acc592ec82dd360c5697fb0765db
[ "MIT" ]
null
null
null
Mundo 1/Ex33.py
legna7/Python
52e0b642d1b7acc592ec82dd360c5697fb0765db
[ "MIT" ]
null
null
null
salario = float(input('digite o seu salario: ')) aumento = (salario + (salario * 15)/100 if salario <= 1250 else salario + (salario * 10)/100) print(aumento)
52.333333
93
0.687898
07ff0da6e717ab9585c2e512803b8604ff985d37
2,793
py
Python
tests/test_tree.py
andreax79/airflow-code-editor
031170387496bbc6d540179c6c2f1765e1e70694
[ "Apache-2.0" ]
194
2019-08-06T13:03:11.000Z
2022-03-25T15:29:29.000Z
tests/test_tree.py
andreax79/airflow-code-editor
031170387496bbc6d540179c6c2f1765e1e70694
[ "Apache-2.0" ]
29
2019-08-23T16:07:17.000Z
2022-03-31T03:43:47.000Z
tests/test_tree.py
andreax79/airflow-code-editor
031170387496bbc6d540179c6c2f1765e1e70694
[ "Apache-2.0" ]
32
2019-08-15T12:13:37.000Z
2022-03-31T17:27:24.000Z
#!/usr/bin/env python import os import os.path import airflow import airflow.plugins_manager from airflow import configuration from flask import Flask from unittest import TestCase, main from airflow_code_editor.commons import PLUGIN_NAME from airflow_code_editor.tree import ( get_tree, ) assert airflow.plugins_manager app = Flask(__name__) if __name__ == '__main__': main()
31.382022
85
0.583602
07ff31219d3e42ddfa090b695c0d4b6ede8d31e9
2,826
py
Python
examples/token_freshness.py
greenape/flask-jwt-extended
11ac3bf0937ee199aea7d6dc47c748bef9bf1d2f
[ "MIT" ]
2
2021-03-20T01:55:08.000Z
2021-11-14T12:20:23.000Z
examples/token_freshness.py
greenape/flask-jwt-extended
11ac3bf0937ee199aea7d6dc47c748bef9bf1d2f
[ "MIT" ]
1
2020-08-06T23:02:45.000Z
2020-09-26T01:36:21.000Z
examples/token_freshness.py
greenape/flask-jwt-extended
11ac3bf0937ee199aea7d6dc47c748bef9bf1d2f
[ "MIT" ]
1
2020-10-28T20:09:00.000Z
2020-10-28T20:09:00.000Z
from quart import Quart, jsonify, request from quart_jwt_extended import ( JWTManager, jwt_required, create_access_token, jwt_refresh_token_required, create_refresh_token, get_jwt_identity, fresh_jwt_required, ) app = Quart(__name__) app.config["JWT_SECRET_KEY"] = "super-secret" # Change this! jwt = JWTManager(app) # Standard login endpoint. Will return a fresh access token and # a refresh token # Refresh token endpoint. This will generate a new access token from # the refresh token, but will mark that access token as non-fresh, # as we do not actually verify a password in this endpoint. # Fresh login endpoint. This is designed to be used if we need to # make a fresh token for a user (by verifying they have the # correct username and password). Unlike the standard login endpoint, # this will only return a new access token, so that we don't keep # generating new refresh tokens, which entirely defeats their point. # Any valid JWT can access this endpoint # Only fresh JWTs can access this endpoint if __name__ == "__main__": app.run()
33.247059
75
0.714084
07ff9a1f5ea3709124ad1fb347eb63a9284a7ad2
195
py
Python
env/lib/python3.7/site-packages/tinvest/typedefs.py
umchemurziev/Practics
82b49f9d58e67f1ecff9e6303e7d914bc1905730
[ "MIT" ]
1
2021-03-29T18:47:32.000Z
2021-03-29T18:47:32.000Z
env/lib/python3.7/site-packages/tinvest/typedefs.py
umchemurziev/Practics
82b49f9d58e67f1ecff9e6303e7d914bc1905730
[ "MIT" ]
null
null
null
env/lib/python3.7/site-packages/tinvest/typedefs.py
umchemurziev/Practics
82b49f9d58e67f1ecff9e6303e7d914bc1905730
[ "MIT" ]
null
null
null
from datetime import datetime from typing import Any, Dict, Union __all__ = 'AnyDict' AnyDict = Dict[str, Any] # pragma: no mutate datetime_or_str = Union[datetime, str] # pragma: no mutate
21.666667
59
0.738462
07ffdb3c18cae37c2fe662c5c84ed5398af39b35
1,345
py
Python
keras/linear/model/pipeline_train.py
PipelineAI/models
d8df07877aa8b10ce9b84983bb440af75e84dca7
[ "Apache-2.0" ]
44
2017-11-17T06:19:05.000Z
2021-11-03T06:00:56.000Z
keras/linear/model/pipeline_train.py
PipelineAI/models
d8df07877aa8b10ce9b84983bb440af75e84dca7
[ "Apache-2.0" ]
3
2018-08-09T14:28:17.000Z
2018-09-10T03:32:42.000Z
keras/linear/model/pipeline_train.py
PipelineAI/models
d8df07877aa8b10ce9b84983bb440af75e84dca7
[ "Apache-2.0" ]
21
2017-11-18T15:12:12.000Z
2020-08-15T07:08:33.000Z
import os os.environ['KERAS_BACKEND'] = 'theano' os.environ['THEANO_FLAGS'] = 'floatX=float32,device=cpu' import cloudpickle as pickle import pipeline_invoke import pandas as pd import numpy as np import keras from keras.layers import Input, Dense from keras.models import Model from keras.models import save_model, load_model from sklearn.preprocessing import StandardScaler, MinMaxScaler, Normalizer if __name__ == '__main__': df = pd.read_csv("../input/training/training.csv") df["People per Television"] = pd.to_numeric(df["People per Television"],errors='coerce') df = df.dropna() x = df["People per Television"].values.reshape(-1,1).astype(np.float64) y = df["People per Physician"].values.reshape(-1,1).astype(np.float64) # min-max -1,1 sc = MinMaxScaler(feature_range=(-1,1)) x_ = sc.fit_transform(x) y_ = sc.fit_transform(y) inputs = Input(shape=(1,)) preds = Dense(1,activation='linear')(inputs) model = Model(inputs=inputs,outputs=preds) sgd = keras.optimizers.SGD() model.compile(optimizer=sgd ,loss='mse') model.fit(x_,y_, batch_size=1, verbose=1, epochs=10, shuffle=False) save_model(model, 'state/keras_theano_linear_model_state.h5') # model_pkl_path = 'model.pkl' # with open(model_pkl_path, 'wb') as fh: # pickle.dump(pipeline_invoke, fh)
30.568182
92
0.709294
580077f8f713a612aa61ab64e08f6fd83f19a081
1,454
py
Python
tests/effects/test_cheerlights.py
RatJuggler/led-shim-effects
3c63f5f2ce3f35f52e784489deb9212757c18cd2
[ "MIT" ]
1
2021-04-17T16:18:14.000Z
2021-04-17T16:18:14.000Z
tests/effects/test_cheerlights.py
RatJuggler/led-shim-effects
3c63f5f2ce3f35f52e784489deb9212757c18cd2
[ "MIT" ]
12
2019-07-26T18:01:56.000Z
2019-08-31T15:35:17.000Z
tests/effects/test_cheerlights.py
RatJuggler/led-shim-demo
3c63f5f2ce3f35f52e784489deb9212757c18cd2
[ "MIT" ]
null
null
null
from unittest import TestCase from unittest.mock import Mock, patch import sys sys.modules['smbus'] = Mock() # Mock the hardware layer to avoid errors. from ledshimdemo.canvas import Canvas from ledshimdemo.effects.cheerlights import CheerLightsEffect
39.297297
106
0.72696
5800997c4a49cdfc01a368bf3ebf423b84d98d2c
7,074
py
Python
figures/Figure_7/02_generate_images.py
Jhsmit/ColiCoords-Paper
7b92e67600930f64859d14867113b6de3edf1379
[ "MIT" ]
2
2019-05-12T12:06:50.000Z
2020-11-11T16:44:49.000Z
figures/Figure_7/02_generate_images.py
Jhsmit/ColiCoords-Paper
7b92e67600930f64859d14867113b6de3edf1379
[ "MIT" ]
null
null
null
figures/Figure_7/02_generate_images.py
Jhsmit/ColiCoords-Paper
7b92e67600930f64859d14867113b6de3edf1379
[ "MIT" ]
2
2019-06-17T16:00:56.000Z
2020-02-07T22:17:47.000Z
from colicoords.synthetic_data import add_readout_noise, draw_poisson from colicoords import load import numpy as np import mahotas as mh from tqdm import tqdm import os import tifffile def gen_im(data_dir): """Generate microscopy images from a list of cell objects by placing them randomly oriented in the image.""" cell_list = load(os.path.join(data_dir, 'cell_obj', 'cells_final_selected.hdf5')) out_dict = generate_images(cell_list, 1000, 10, 3, (512, 512)) if not os.path.exists(os.path.join(data_dir, 'images')): os.mkdir(os.path.join(data_dir, 'images')) np.save(os.path.join(data_dir, 'images', 'binary.npy'), out_dict['binary']) np.save(os.path.join(data_dir, 'images', 'brightfield.npy'), out_dict['brightfield']) np.save(os.path.join(data_dir, 'images', 'foci_inner.npy'), out_dict['foci_inner']) np.save(os.path.join(data_dir, 'images', 'foci_outer.npy'), out_dict['foci_outer']) np.save(os.path.join(data_dir, 'images', 'storm_inner.npy'), out_dict['storm_inner']) np.save(os.path.join(data_dir, 'images', 'storm_outer.npy'), out_dict['storm_outer']) tifffile.imsave(os.path.join(data_dir, 'images', 'binary.tif'), out_dict['binary']) tifffile.imsave(os.path.join(data_dir, 'images', 'brightfield.tif'), out_dict['brightfield']) tifffile.imsave(os.path.join(data_dir, 'images', 'foci_inner.tif'), out_dict['foci_inner']) tifffile.imsave(os.path.join(data_dir, 'images', 'foci_outer.tif'), out_dict['foci_outer']) np.savetxt(os.path.join(data_dir, 'images', 'storm_inner.txt'), out_dict['storm_inner']) np.savetxt(os.path.join(data_dir, 'images', 'storm_outer.txt'), out_dict['storm_inner']) def noise_bf(data_dir): """add poissonian and readout noise to brightfield images""" noise = 20 img_stack = np.load(os.path.join(data_dir, 'images', 'brightfield.npy')) for photons in [10000, 1000, 500]: ratio = 1.0453 # ratio between 'background' (no cells) and cell wall img = (photons*(ratio-1))*img_stack + photons img = draw_poisson(img) img = add_readout_noise(img, noise) tifffile.imsave(os.path.join(data_dir, 'images', 'bf_noise_{}_photons.tif'.format(photons)), img) np.save(os.path.join(data_dir, 'images', 'bf_noise_{}_photons.npy'.format(photons)), img) if __name__ == '__main__': np.random.seed(42) data_dir = r'.' if not os.path.exists(os.path.join(data_dir, 'images')): os.mkdir(os.path.join(data_dir, 'images')) gen_im(data_dir) noise_bf(data_dir)
40.890173
126
0.602771
580134063c60e1903557dccde046d7a394258b01
319
py
Python
dictionary.py
SchmitzAndrew/OSS-101-example
1efecd4c5bfef4495904568d11e3f8d0a5ed9bd0
[ "MIT" ]
null
null
null
dictionary.py
SchmitzAndrew/OSS-101-example
1efecd4c5bfef4495904568d11e3f8d0a5ed9bd0
[ "MIT" ]
null
null
null
dictionary.py
SchmitzAndrew/OSS-101-example
1efecd4c5bfef4495904568d11e3f8d0a5ed9bd0
[ "MIT" ]
null
null
null
word = input("Enter a word: ") if word == "a": print("one; any") elif word == "apple": print("familiar, round fleshy fruit") elif word == "rhinoceros": print("large thick-skinned animal with one or two horns on its nose") else: print("That word must not exist. This dictionary is very comprehensive.")
29
77
0.667712
58023a0843c7993ed1535e882c1755e33a7a8544
116
py
Python
solved_bronze/num11720.py
ilmntr/white_study
51d69d122b07e9a0922dddb134bff4ec79077eb9
[ "MIT" ]
null
null
null
solved_bronze/num11720.py
ilmntr/white_study
51d69d122b07e9a0922dddb134bff4ec79077eb9
[ "MIT" ]
null
null
null
solved_bronze/num11720.py
ilmntr/white_study
51d69d122b07e9a0922dddb134bff4ec79077eb9
[ "MIT" ]
null
null
null
cnt = int(input()) num = list(map(int, input())) sum = 0 for i in range(len(num)): sum = sum + num[i] print(sum)
19.333333
29
0.586207
58035ad02fa85d7c60de0ef4d5c14279175bc2ac
566
py
Python
setup.py
sdnhub/kube-navi
d16a9289ba7261011e6c8d19c48cdc9bd533e629
[ "Apache-2.0" ]
null
null
null
setup.py
sdnhub/kube-navi
d16a9289ba7261011e6c8d19c48cdc9bd533e629
[ "Apache-2.0" ]
null
null
null
setup.py
sdnhub/kube-navi
d16a9289ba7261011e6c8d19c48cdc9bd533e629
[ "Apache-2.0" ]
null
null
null
from distutils.core import setup setup( name = 'kube_navi', packages = ['kube_navi'], # this must be the same as the name above version = '0.1', description = 'Kubernetes resource discovery toolkit', author = 'Srini Seetharaman', author_email = '[email protected]', url = 'https://github.com/sdnhub/kube-navi', # use the URL to the github repo download_url = 'https://github.com/sdnhub/kube-navi/archive/0.1.tar.gz', # I'll explain this in a second keywords = ['testing', 'logging', 'example'], # arbitrary keywords classifiers = [], )
40.428571
106
0.69788
5804951a8f92330526763d3f11395d318d54d180
10,444
py
Python
flink-ai-flow/ai_flow/metric/utils.py
MarvinMiao/flink-ai-extended
e45eecf2deea6976ba3d7ba821ffb8d9ce0a17f4
[ "Apache-2.0", "BSD-2-Clause", "MIT", "ECL-2.0", "BSD-3-Clause" ]
1
2020-12-12T15:21:05.000Z
2020-12-12T15:21:05.000Z
flink-ai-flow/ai_flow/metric/utils.py
MarvinMiao/flink-ai-extended
e45eecf2deea6976ba3d7ba821ffb8d9ce0a17f4
[ "Apache-2.0", "BSD-2-Clause", "MIT", "ECL-2.0", "BSD-3-Clause" ]
1
2021-01-30T11:28:37.000Z
2021-01-30T11:28:37.000Z
flink-ai-flow/ai_flow/metric/utils.py
MarvinMiao/flink-ai-extended
e45eecf2deea6976ba3d7ba821ffb8d9ce0a17f4
[ "Apache-2.0", "BSD-2-Clause", "MIT", "ECL-2.0", "BSD-3-Clause" ]
null
null
null
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import ast from typing import Text, Optional, Union, List from ai_flow.rest_endpoint.protobuf.metric_service_pb2 import MetricMetaResponse, ListMetricMetaResponse, \ MetricSummaryResponse, ListMetricSummaryResponse from ai_flow.rest_endpoint.service import int64Value, stringValue from ai_flow.common.properties import Properties from ai_flow.meta.metric_meta import MetricMeta, MetricType, MetricSummary from ai_flow.rest_endpoint.protobuf.message_pb2 import MetricMetaProto, MetricSummaryProto, MetricTypeProto, ReturnCode, \ SUCCESS, RESOURCE_DOES_NOT_EXIST from ai_flow.store.db.db_model import SqlMetricMeta, SqlMetricSummary from ai_flow.store.db.db_model import MongoMetricSummary, MongoMetricMeta
49.032864
122
0.627633
58055aabc65a23b166d03e3a5c7b5b2ffaa06173
3,154
py
Python
src/moduels/gui/Tab_Help.py
HaujetZhao/Caps_Writer
f2b2038a2c0984a1d356f024cbac421fe594601a
[ "MIT" ]
234
2020-07-10T11:23:09.000Z
2022-03-31T09:41:40.000Z
src/moduels/gui/Tab_Help.py
HaujetZhao/Caps_Writer
f2b2038a2c0984a1d356f024cbac421fe594601a
[ "MIT" ]
9
2020-07-11T08:31:11.000Z
2022-03-01T04:30:08.000Z
src/moduels/gui/Tab_Help.py
HaujetZhao/Caps_Writer
f2b2038a2c0984a1d356f024cbac421fe594601a
[ "MIT" ]
23
2020-07-14T08:58:44.000Z
2022-03-17T06:38:10.000Z
# -*- coding: UTF-8 -*- from PySide2.QtWidgets import QWidget, QPushButton, QVBoxLayout from PySide2.QtCore import Signal from moduels.component.NormalValue import from moduels.component.SponsorDialog import SponsorDialog import os, webbrowser def initElement(self): self. = QPushButton(self.tr('')) self.ffmpegMannualNoteButton = QPushButton(self.tr(' FFmpeg ')) self.openVideoHelpButtone = QPushButton(self.tr('')) self.openGiteePage = QPushButton(self.tr(f' v{.} Gitee ')) self.openGithubPage = QPushButton(self.tr(f' v{.} Github ')) self.linkToDiscussPage = QPushButton(self.tr(' QQ ')) self.tipButton = QPushButton(self.tr('')) self.masterLayout = QVBoxLayout() def initSlots(self): self..clicked.connect(self.openHelpDocument) self.ffmpegMannualNoteButton.clicked.connect(lambda: webbrowser.open(self.tr(r'https://hacpai.com/article/1595480295489'))) self.openVideoHelpButtone.clicked.connect(lambda: webbrowser.open(self.tr(r'https://www.bilibili.com/video/BV12A411p73r/'))) self.openGiteePage.clicked.connect(lambda: webbrowser.open(self.tr(r'https://gitee.com/haujet/CapsWriter/releases'))) self.openGithubPage.clicked.connect(lambda: webbrowser.open(self.tr(r'https://github.com/HaujetZhao/CapsWriter/releases'))) self.linkToDiscussPage.clicked.connect(lambda: webbrowser.open( self.tr(r'https://qm.qq.com/cgi-bin/qm/qr?k=DgiFh5cclAElnELH4mOxqWUBxReyEVpm&jump_from=webapi'))) self.tipButton.clicked.connect(lambda: SponsorDialog(self)) def initLayout(self): self.setLayout(self.masterLayout) # self.masterLayout.addWidget(self.) # self.masterLayout.addWidget(self.ffmpegMannualNoteButton) self.masterLayout.addWidget(self.openVideoHelpButtone) self.masterLayout.addWidget(self.openGiteePage) self.masterLayout.addWidget(self.openGithubPage) self.masterLayout.addWidget(self.linkToDiscussPage) self.masterLayout.addWidget(self.tipButton) def initValue(self): self..setMaximumHeight(100) self.ffmpegMannualNoteButton.setMaximumHeight(100) self.openVideoHelpButtone.setMaximumHeight(100) self.openGiteePage.setMaximumHeight(100) self.openGithubPage.setMaximumHeight(100) self.linkToDiscussPage.setMaximumHeight(100) self.tipButton.setMaximumHeight(100) def openHelpDocument(self): try: if . == 'Darwin': import shlex os.system("open " + shlex.quote(self.tr("./misc/Docs/README_zh.html"))) elif . == 'Windows': os.startfile(os.path.realpath(self.tr('./misc/Docs/README_zh.html'))) except: print('')
45.057143
132
0.69499
5805a2c8d616906daf19682b40baa91f10a88715
1,845
py
Python
app/routes/register.py
AuFeld/COAG
3874a9c1c6ceb908a6bbabfb49e2c701d8e54f20
[ "MIT" ]
1
2021-06-03T10:29:12.000Z
2021-06-03T10:29:12.000Z
app/routes/register.py
AuFeld/COAG
3874a9c1c6ceb908a6bbabfb49e2c701d8e54f20
[ "MIT" ]
45
2021-06-05T14:47:09.000Z
2022-03-30T06:16:44.000Z
app/routes/register.py
AuFeld/COAG
3874a9c1c6ceb908a6bbabfb49e2c701d8e54f20
[ "MIT" ]
null
null
null
from typing import Callable, Optional, Type, cast from fastapi import APIRouter, HTTPException, Request, status from app.models import users from app.common.user import ErrorCode, run_handler from app.users.user import ( CreateUserProtocol, InvalidPasswordException, UserAlreadyExists, ValidatePasswordProtocol, ) def get_register_router( create_user: CreateUserProtocol, user_model: Type[users.BaseUser], user_create_model: Type[users.BaseUserCreate], after_register: Optional[Callable[[users.UD, Request], None]] = None, validate_password: Optional[ValidatePasswordProtocol] = None, ) -> APIRouter: """Generate a router with the register route.""" router = APIRouter() return router
32.946429
83
0.648238
5805ce50d417618b337a1e60276ff06de0f997f8
1,425
py
Python
utils/visual.py
xizaoqu/Panoptic-PolarNet
8ce05f437f54e030eac7de150f43caab2810cfbb
[ "BSD-3-Clause" ]
90
2021-03-30T08:02:15.000Z
2022-03-30T03:29:56.000Z
utils/visual.py
xizaoqu/Panoptic-PolarNet
8ce05f437f54e030eac7de150f43caab2810cfbb
[ "BSD-3-Clause" ]
11
2021-04-01T02:29:08.000Z
2022-03-04T07:30:50.000Z
utils/visual.py
xizaoqu/Panoptic-PolarNet
8ce05f437f54e030eac7de150f43caab2810cfbb
[ "BSD-3-Clause" ]
21
2021-04-01T09:29:38.000Z
2022-03-28T01:36:02.000Z
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import cv2 import numpy as np def flow_to_img(flow, normalize=True): """Convert flow to viewable image, using color hue to encode flow vector orientation, and color saturation to encode vector length. This is similar to the OpenCV tutorial on dense optical flow, except that they map vector length to the value plane of the HSV color model, instead of the saturation plane, as we do here. Args: flow: optical flow normalize: Normalize flow to 0..255 Returns: img: viewable representation of the dense optical flow in RGB format Ref: https://github.com/philferriere/tfoptflow/blob/33e8a701e34c8ce061f17297d40619afbd459ade/tfoptflow/optflow.py """ hsv = np.zeros((flow.shape[0], flow.shape[1], 3), dtype=np.uint8) flow_magnitude, flow_angle = cv2.cartToPolar(flow[..., 0].astype(np.float32), flow[..., 1].astype(np.float32)) # A couple times, we've gotten NaNs out of the above... nans = np.isnan(flow_magnitude) if np.any(nans): nans = np.where(nans) flow_magnitude[nans] = 0. # Normalize hsv[..., 0] = flow_angle * 180 / np.pi / 2 if normalize is True: hsv[..., 1] = cv2.normalize(flow_magnitude, None, 0, 255, cv2.NORM_MINMAX) else: hsv[..., 1] = flow_magnitude hsv[..., 2] = 255 img = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB) return img
39.583333
116
0.665263
5806fd8ba37feb4c4d823dfb9c4c105ed07bdd0c
624
py
Python
DistributedRL/Gateway/build/Code/sim/Parser/LAI/GreenIndex.py
zhkmxx9302013/SoftwarePilot
826098465b800085774946c20a7a283f369f1d21
[ "MIT" ]
4
2019-03-20T17:46:01.000Z
2019-03-31T17:32:44.000Z
DistributedRL/Gateway/build/Code/sim/Parser/LAI/GreenIndex.py
zhkmxx9302013/SoftwarePilot
826098465b800085774946c20a7a283f369f1d21
[ "MIT" ]
null
null
null
DistributedRL/Gateway/build/Code/sim/Parser/LAI/GreenIndex.py
zhkmxx9302013/SoftwarePilot
826098465b800085774946c20a7a283f369f1d21
[ "MIT" ]
null
null
null
import argparse from PIL import Image, ImageStat import math parser = argparse.ArgumentParser() parser.add_argument('fname') parser.add_argument('pref', default="", nargs="?") args = parser.parse_args() im = Image.open(args.fname) RGB = im.convert('RGB') imWidth, imHeight = im.size ratg = 1.2 ratgb = 1.66 ming = 10 ratr = 2 speed = 8 leafcount = 0 total = 0 for i in range(0, int(imWidth/speed)): for j in range(0, int(imHeight/speed)): R,G,B = RGB.getpixel((i*speed,j*speed)) if R*ratg < G and B*ratgb < G and B*ratr < R: leafcount = leafcount + 1 total = total+1 print("LAI="+str(float(leafcount)/total))
20.8
50
0.684295
58077bea9c4435d13d9ff119348291eadd3323f7
4,561
py
Python
reports/heliosV1/python/heliosStorageStats/heliosStorageStats.py
ped998/scripts
0dcaaf47f9676210e1c972a5d59d8d0de82a1d93
[ "Apache-2.0" ]
null
null
null
reports/heliosV1/python/heliosStorageStats/heliosStorageStats.py
ped998/scripts
0dcaaf47f9676210e1c972a5d59d8d0de82a1d93
[ "Apache-2.0" ]
null
null
null
reports/heliosV1/python/heliosStorageStats/heliosStorageStats.py
ped998/scripts
0dcaaf47f9676210e1c972a5d59d8d0de82a1d93
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python """cluster storage stats for python""" # import pyhesity wrapper module from pyhesity import * from datetime import datetime import codecs # command line arguments import argparse parser = argparse.ArgumentParser() parser.add_argument('-v', '--vip', type=str, default='helios.cohesity.com') # cluster to connect to parser.add_argument('-u', '--username', type=str, required=True) # username parser.add_argument('-d', '--domain', type=str, default='local') # (optional) domain - defaults to local parser.add_argument('-pwd', '--password', type=str, default=None) # optional password parser.add_argument('-n', '--unit', type=str, choices=['GiB', 'TiB', 'gib', 'tib'], default='TiB') args = parser.parse_args() vip = args.vip username = args.username domain = args.domain password = args.password unit = args.unit if unit.lower() == 'tib': multiplier = 1024 * 1024 * 1024 * 1024 unit = 'TiB' else: multiplier = 1024 * 1024 * 1024 unit = 'GiB' # authenticate apiauth(vip=vip, username=username, domain=domain, password=password, useApiKey=True, noretry=True) # outfile now = datetime.now() # cluster = api('get', 'cluster') dateString = now.strftime("%Y-%m-%d") outfile = 'heliosStorageStats-%s.csv' % dateString f = codecs.open(outfile, 'w') # headings f.write('Date,Capacity (%s),Consumed (%s),Free (%s),Used %%,Data In (%s),Data Written (%s),Storage Reduction,Data Reduction\n' % (unit, unit, unit, unit, unit)) stats = {} endMsecs = dateToUsecs(now.strftime("%Y-%m-%d %H:%M:%S")) / 1000 startMsecs = (timeAgo(2, 'days')) / 1000 print('\nGathering cluster stats:\n') for cluster in heliosClusters(): heliosCluster(cluster) print(' %s' % cluster['name']) capacityStats = api('get', 'statistics/timeSeriesStats?endTimeMsecs=%s&entityId=%s&metricName=kCapacityBytes&metricUnitType=0&range=day&rollupFunction=average&rollupIntervalSecs=86400&schemaName=kBridgeClusterStats&startTimeMsecs=%s' % (endMsecs, cluster['clusterId'], startMsecs)) consumedStats = api('get', 'statistics/timeSeriesStats?startTimeMsecs=%s&schemaName=kBridgeClusterTierPhysicalStats&metricName=kMorphedUsageBytes&rollupIntervalSecs=86400&rollupFunction=latest&entityIdList=%s:Local&endTimeMsecs=%s' % (startMsecs, cluster['clusterId'], endMsecs)) dataInStats = api('get', 'statistics/timeSeriesStats?startTimeMsecs=%s&schemaName=ApolloV2ClusterStats&metricName=BrickBytesLogical&rollupIntervalSecs=86400&rollupFunction=latest&entityIdList=%s (ID %s)&endTimeMsecs=%s' % (startMsecs, cluster['name'], cluster['clusterId'], endMsecs)) dataWrittenStats = api('get', 'statistics/timeSeriesStats?startTimeMsecs=%s&schemaName=ApolloV2ClusterStats&metricName=ChunkBytesMorphed&rollupIntervalSecs=86400&rollupFunction=latest&entityIdList=%s (ID %s)&endTimeMsecs=%s' % (startMsecs, cluster['name'], cluster['clusterId'], endMsecs)) logicalSizeStats = api('get', 'statistics/timeSeriesStats?startTimeMsecs=%s&schemaName=kBridgeClusterLogicalStats&metricName=kUnmorphedUsageBytes&rollupIntervalSecs=86400&rollupFunction=latest&entityIdList=%s&endTimeMsecs=%s' % (startMsecs, cluster['clusterId'], endMsecs)) parseStats(cluster['name'], capacityStats['dataPointVec'][0], 'capacity') parseStats(cluster['name'], consumedStats['dataPointVec'][0], 'consumed') parseStats(cluster['name'], dataInStats['dataPointVec'][0], 'dataIn') parseStats(cluster['name'], dataWrittenStats['dataPointVec'][0], 'dataWritten') parseStats(cluster['name'], logicalSizeStats['dataPointVec'][0], 'logicalSize') for clusterName in sorted(stats.keys()): capacity = stats[clusterName]['capacity'] consumed = stats[clusterName]['consumed'] dataIn = stats[clusterName]['dataIn'] dataWritten = stats[clusterName]['dataWritten'] logicalSize = stats[clusterName]['logicalSize'] free = capacity - consumed pctUsed = round(100 * consumed / capacity, 0) storageReduction = round(float(logicalSize) / consumed, 1) dataReduction = round(float(dataIn) / dataWritten, 1) f.write('"%s","%s","%s","%s","%s","%s","%s","%s","%s"\n' % (clusterName, toUnits(capacity), toUnits(consumed), toUnits(free), pctUsed, toUnits(dataIn), toUnits(dataWritten), storageReduction, dataReduction)) f.close() print('\nOutput saved to %s\n' % outfile)
49.043011
293
0.726814
580792f2d4c1bf5c14b84d5f807f69b1126aead4
5,422
py
Python
src/advanceoperate/malimgthread.py
zengrx/S.M.A.R.T
47a9abe89008e9b34f9b9d057656dbf3fb286456
[ "MIT" ]
10
2017-07-11T01:08:28.000Z
2021-05-07T01:49:00.000Z
src/advanceoperate/malimgthread.py
YanqiangHuang/S.M.A.R.T
47a9abe89008e9b34f9b9d057656dbf3fb286456
[ "MIT" ]
null
null
null
src/advanceoperate/malimgthread.py
YanqiangHuang/S.M.A.R.T
47a9abe89008e9b34f9b9d057656dbf3fb286456
[ "MIT" ]
6
2017-05-02T14:27:15.000Z
2017-05-15T05:56:40.000Z
#coding=utf-8 from PyQt4 import QtCore import os, glob, numpy, sys from PIL import Image from sklearn.cross_validation import StratifiedKFold from sklearn.metrics import confusion_matrix from sklearn.neighbors import KNeighborsClassifier from sklearn.neighbors import BallTree from sklearn import cross_validation from sklearn.utils import shuffle import sklearn import leargist import cPickle import random import sys reload(sys) sys.setdefaultencoding( "utf-8" )
31.34104
87
0.568241
6af1bee4f8dfc29969377047eaf5953641fb77f7
4,823
py
Python
tests/test_find_forks/test_find_forks.py
ivan2kh/find_forks
409251282a85da48445afc03c5a1797df393ca95
[ "MIT" ]
41
2015-05-15T14:37:42.000Z
2022-02-05T01:52:00.000Z
tests/test_find_forks/test_find_forks.py
ivan2kh/find_forks
409251282a85da48445afc03c5a1797df393ca95
[ "MIT" ]
12
2015-05-15T22:10:36.000Z
2021-12-05T14:21:58.000Z
tests/test_find_forks/test_find_forks.py
ivan2kh/find_forks
409251282a85da48445afc03c5a1797df393ca95
[ "MIT" ]
16
2015-05-15T14:44:33.000Z
2020-11-18T00:54:18.000Z
# coding: utf-8 """test_find_fork.""" # pylint: disable=no-self-use from __future__ import absolute_import, division, print_function, unicode_literals from os import path import unittest from six import PY3 from find_forks.__init__ import CONFIG from find_forks.find_forks import add_forks, determine_names, find_forks, main from .__init__ import BASEPATH if PY3: from unittest.mock import patch, MagicMock, Mock # pylint: disable=no-name-in-module else: from mock import patch, MagicMock, Mock
41.577586
146
0.644412
6af1e67adc2134fb57f91c04b0e1763048fc52e2
15,853
py
Python
neutron/agent/l3/dvr_router.py
insequent/neutron
2b1c4f121e3e8ba1c5eb2ba6661bf6326e1507c5
[ "Apache-2.0" ]
null
null
null
neutron/agent/l3/dvr_router.py
insequent/neutron
2b1c4f121e3e8ba1c5eb2ba6661bf6326e1507c5
[ "Apache-2.0" ]
null
null
null
neutron/agent/l3/dvr_router.py
insequent/neutron
2b1c4f121e3e8ba1c5eb2ba6661bf6326e1507c5
[ "Apache-2.0" ]
null
null
null
# Copyright (c) 2015 Openstack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import binascii import netaddr from oslo_log import log as logging from oslo_utils import excutils from neutron.agent.l3 import dvr_fip_ns from neutron.agent.l3 import dvr_snat_ns from neutron.agent.l3 import router_info as router from neutron.agent.linux import ip_lib from neutron.common import constants as l3_constants from neutron.common import utils as common_utils from neutron.i18n import _LE LOG = logging.getLogger(__name__) # xor-folding mask used for IPv6 rule index MASK_30 = 0x3fffffff
43.196185
79
0.6272
6af266c300822127b8933d07e4d514dfddafdca3
575
py
Python
sider/warnings.py
PCManticore/sider
cd11b38b2a1bf1ea3600eb287abfe3c2b40c67c1
[ "MIT" ]
19
2015-01-17T18:24:36.000Z
2022-02-05T06:33:41.000Z
sider/warnings.py
PCManticore/sider
cd11b38b2a1bf1ea3600eb287abfe3c2b40c67c1
[ "MIT" ]
5
2016-01-13T14:19:44.000Z
2016-06-01T18:50:36.000Z
sider/warnings.py
PCManticore/sider
cd11b38b2a1bf1ea3600eb287abfe3c2b40c67c1
[ "MIT" ]
6
2015-08-16T10:32:09.000Z
2019-11-29T09:57:06.000Z
""":mod:`sider.warnings` --- Warning categories ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This module defines several custom warning category classes. """
25
71
0.683478
6af312eaf5ecd0a0c44737d1362c73e2f2338489
2,512
py
Python
kadal/query.py
Bucolo/Kadal
a0085f15df4f8ebbf5ec4cd4344e207773c6b498
[ "MIT" ]
1
2022-03-12T15:04:01.000Z
2022-03-12T15:04:01.000Z
kadal/query.py
Bucolo/Kadal
a0085f15df4f8ebbf5ec4cd4344e207773c6b498
[ "MIT" ]
null
null
null
kadal/query.py
Bucolo/Kadal
a0085f15df4f8ebbf5ec4cd4344e207773c6b498
[ "MIT" ]
null
null
null
MEDIA_SEARCH = """ query ($search: String, $type: MediaType, $exclude: MediaFormat, $isAdult: Boolean) { Media(search: $search, type: $type, format_not: $exclude, isAdult: $isAdult) { id type format title { english romaji native } synonyms status description startDate { year month day } endDate { year month day } episodes chapters volumes coverImage { large color } bannerImage genres averageScore siteUrl isAdult nextAiringEpisode { timeUntilAiring episode } } } """ MEDIA_BY_ID = """ query ($id: Int, $type: MediaType) { Media(id: $id, type: $type) { id type format title { english romaji native } synonyms status description startDate { year month day } endDate { year month day } episodes chapters coverImage { large color } bannerImage genres averageScore siteUrl isAdult nextAiringEpisode { timeUntilAiring episode } } } """ MEDIA_PAGED = """ query ( $id: Int, $page: Int, $perPage: Int, $search: String, $type: MediaType, $sort: [MediaSort] = [SEARCH_MATCH], $exclude: MediaFormat, $isAdult: Boolean ) { Page(page: $page, perPage: $perPage) { media(id: $id, search: $search, type: $type, sort: $sort, format_not: $exclude, isAdult: $isAdult) { id type format title { english romaji native } synonyms status description startDate { year month day } endDate { year month day } episodes chapters volumes coverImage { large color } bannerImage genres averageScore siteUrl isAdult popularity } } } """ USER_SEARCH = """ query ($search: String) { User(search: $search) { id name html_about: about(asHtml: true) about avatar { large } bannerImage siteUrl stats { watchedTime chaptersRead } } } """ USER_BY_ID = """ query ($id: Int) { User(id: $id) { id name html_about: about(asHtml: true) about avatar { large } bannerImage siteUrl stats { watchedTime chaptersRead } } } """
14.03352
104
0.515525
6af335ed4a4087ef091d5830d5a795b074596342
1,032
py
Python
sandbox/test/testChainop.py
turkeydonkey/nzmath3
a48ae9efcf0d9ad1485c2e9863c948a7f1b20311
[ "BSD-3-Clause" ]
1
2021-05-26T19:22:17.000Z
2021-05-26T19:22:17.000Z
sandbox/test/testChainop.py
turkeydonkey/nzmath3
a48ae9efcf0d9ad1485c2e9863c948a7f1b20311
[ "BSD-3-Clause" ]
null
null
null
sandbox/test/testChainop.py
turkeydonkey/nzmath3
a48ae9efcf0d9ad1485c2e9863c948a7f1b20311
[ "BSD-3-Clause" ]
null
null
null
import unittest import operator import sandbox.chainop as chainop def suite(suffix="Test"): suite = unittest.TestSuite() all_names = globals() for name in all_names: if name.endswith(suffix): suite.addTest(unittest.makeSuite(all_names[name], "test")) return suite if __name__ == '__main__': runner = unittest.TextTestRunner() runner.run(suite())
31.272727
98
0.650194
6af3beecef5460df43fd5570a5ba8ce1f6a0d13d
1,131
py
Python
labs_final/lab5/experiments/run_trpo_pendulum.py
mrmotallebi/berkeley-deeprl-bootcamp
9257c693724c38edfa4571e3510667ca168b7ca1
[ "MIT" ]
3
2018-03-26T14:13:11.000Z
2020-07-23T22:26:28.000Z
labs_final/lab5/experiments/run_trpo_pendulum.py
mrmotallebi/berkeley-deeprl-bootcamp
9257c693724c38edfa4571e3510667ca168b7ca1
[ "MIT" ]
null
null
null
labs_final/lab5/experiments/run_trpo_pendulum.py
mrmotallebi/berkeley-deeprl-bootcamp
9257c693724c38edfa4571e3510667ca168b7ca1
[ "MIT" ]
null
null
null
#!/usr/bin/env python import chainer from algs import trpo from env_makers import EnvMaker from models import GaussianMLPPolicy, MLPBaseline from utils import SnapshotSaver import numpy as np import os import logger log_dir = "data/local/trpo-pendulum" np.random.seed(42) # Clean up existing logs os.system("rm -rf {}".format(log_dir)) with logger.session(log_dir): env_maker = EnvMaker('Pendulum-v0') env = env_maker.make() policy = GaussianMLPPolicy( observation_space=env.observation_space, action_space=env.action_space, env_spec=env.spec, hidden_sizes=(64, 64), hidden_nonlinearity=chainer.functions.tanh, ) baseline = MLPBaseline( observation_space=env.observation_space, action_space=env.action_space, env_spec=env.spec, hidden_sizes=(64, 64), hidden_nonlinearity=chainer.functions.tanh, ) trpo( env=env, env_maker=env_maker, n_envs=16, policy=policy, baseline=baseline, batch_size=10000, n_iters=100, snapshot_saver=SnapshotSaver(log_dir), )
24.586957
51
0.678161
6af3f2a17f291a65e5f9b17cbe9f19d00752f642
2,098
py
Python
jtyoui/regular/regexengine.py
yy1244/Jtyoui
d3c212ed9d6ffa6b37a8ca49098ab59c89216f09
[ "MIT" ]
1
2019-12-05T09:46:51.000Z
2019-12-05T09:46:51.000Z
jtyoui/regular/regexengine.py
yy1244/Jtyoui
d3c212ed9d6ffa6b37a8ca49098ab59c89216f09
[ "MIT" ]
null
null
null
jtyoui/regular/regexengine.py
yy1244/Jtyoui
d3c212ed9d6ffa6b37a8ca49098ab59c89216f09
[ "MIT" ]
null
null
null
#!/usr/bin/python3.7 # -*- coding: utf-8 -*- # @Time : 2019/12/2 10:17 # @Author: [email protected] """ """ try: import xml.etree.cElementTree as et except ModuleNotFoundError: import xml.etree.ElementTree as et import re
27.246753
87
0.479981
6af4d5fe8b77a49f0cbdce7b5f8e3248894cc3b5
5,117
py
Python
proglearn/transformers.py
rflperry/ProgLearn
9f799b4a8cf2157ba40b04842dc88eaf646e6420
[ "MIT" ]
null
null
null
proglearn/transformers.py
rflperry/ProgLearn
9f799b4a8cf2157ba40b04842dc88eaf646e6420
[ "MIT" ]
1
2020-11-25T19:21:54.000Z
2020-11-25T19:21:54.000Z
proglearn/transformers.py
rflperry/ProgLearn
9f799b4a8cf2157ba40b04842dc88eaf646e6420
[ "MIT" ]
null
null
null
""" Main Author: Will LeVine Corresponding Email: [email protected] """ import keras import numpy as np from sklearn.tree import DecisionTreeClassifier from sklearn.utils.validation import check_array, check_is_fitted, check_X_y from .base import BaseTransformer
26.931579
94
0.587063
6af5766ae43b84c8b76547fb51e5b56cfdb7f3af
9,900
py
Python
morphelia/external/saphire.py
marx-alex/Morphelia
809278b07f1a535789455d54df3cbddc850d609c
[ "MIT" ]
null
null
null
morphelia/external/saphire.py
marx-alex/Morphelia
809278b07f1a535789455d54df3cbddc850d609c
[ "MIT" ]
null
null
null
morphelia/external/saphire.py
marx-alex/Morphelia
809278b07f1a535789455d54df3cbddc850d609c
[ "MIT" ]
null
null
null
import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt import matplotlib.collections as mcoll from matplotlib.ticker import MaxNLocator plt.style.use('seaborn-darkgrid') def colorline(x, y, z=None, cmap=plt.get_cmap('copper'), norm=plt.Normalize(0.0, 1.0), linewidth=3, alpha=1.0, zorder=1): """ Plot a colored line with coordinates x and y Optionally specify colors in the array z Optionally specify a colormap, a norm function and a line width """ # Default colors equally spaced on [0,1]: if z is None: z = np.linspace(0.0, 1.0, len(x)) # Special case if a single number: if not hasattr(z, "__iter__"): # to check for numerical input -- this is a hack z = np.array([z]) z = np.asarray(z) segments = make_segments(x, y) lc = mcoll.LineCollection(segments, array=z, cmap=cmap, norm=norm, linewidth=linewidth, alpha=alpha, zorder=zorder) ax = plt.gca() ax.add_collection(lc) return lc def make_segments(x, y): """ Create list of line segments from x and y coordinates, in the correct format for LineCollection: an array of the form numlines x (points per line) x 2 (x and y) array """ points = np.array([x, y]).T.reshape(-1, 1, 2) segments = np.concatenate([points[:-1], points[1:]], axis=1) return segments def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100): ''' https://stackoverflow.com/a/18926541 ''' if isinstance(cmap, str): cmap = plt.get_cmap(cmap) new_cmap = mpl.colors.LinearSegmentedColormap.from_list( 'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval), cmap(np.linspace(minval, maxval, n))) return new_cmap
34.256055
114
0.58101
6af5ea523e6e4b25159d80c12448780bfd106c8c
4,824
py
Python
account/views.py
Stfuncode/food-beverage-investigator
0fea4943a5c2634068dc04118f83742327937c25
[ "MIT" ]
null
null
null
account/views.py
Stfuncode/food-beverage-investigator
0fea4943a5c2634068dc04118f83742327937c25
[ "MIT" ]
null
null
null
account/views.py
Stfuncode/food-beverage-investigator
0fea4943a5c2634068dc04118f83742327937c25
[ "MIT" ]
null
null
null
import imp from venv import create from django.shortcuts import render, redirect from django.views import View from django.views.generic import ( ListView, ) from account.models import * from account.forms import * from data.models import * from django.contrib.auth import login as auth_login from django.contrib.auth.models import auth from django.contrib import messages from django.contrib.auth.mixins import PermissionRequiredMixin, LoginRequiredMixin # Create your views here. def deleteUser(request, event_id): event = Account.objects.get(pk=event_id) event.delete() return redirect('userlist')
33.5
87
0.645522
6af67173be2103fc04ef7a7c51b006d1f866e003
2,697
py
Python
fpds/client.py
mgradowski/aiproject
855332bd982bef2530ad935a209ae8be35963165
[ "MIT" ]
null
null
null
fpds/client.py
mgradowski/aiproject
855332bd982bef2530ad935a209ae8be35963165
[ "MIT" ]
null
null
null
fpds/client.py
mgradowski/aiproject
855332bd982bef2530ad935a209ae8be35963165
[ "MIT" ]
null
null
null
import cv2 import aiohttp import asyncio import concurrent.futures import argparse import numpy as np def main(): parser = argparse.ArgumentParser('fpds.client') parser.add_argument('url', type=str, help='WebSocket endpoint of fpds.server e.g. http://localhost:8181/fpds') args = parser.parse_args() loop = asyncio.get_event_loop() task = loop.create_task(amain(args.url)) try: loop.run_until_complete(task) except KeyboardInterrupt: task.cancel() loop.run_until_complete(asyncio.wait_for(task, timeout=None)) finally: loop.close() if __name__ == '__main__': main()
35.025974
127
0.668891
6af759aad1d331394cb7f013c9559f17569541f2
3,619
py
Python
Giveme5W1H/extractor/tools/key_value_cache.py
bkrrr/Giveme5W
657738781fe387d76e6e0da35ed009ccf81f4290
[ "Apache-2.0" ]
410
2018-05-02T12:53:02.000Z
2022-03-28T16:11:34.000Z
Giveme5W1H/extractor/tools/key_value_cache.py
bkrrr/Giveme5W
657738781fe387d76e6e0da35ed009ccf81f4290
[ "Apache-2.0" ]
51
2018-05-02T13:53:19.000Z
2022-03-22T00:16:39.000Z
Giveme5W1H/extractor/tools/key_value_cache.py
TU-Berlin/Giveme5W1H
b1586328393a50acde86015d22f78a4c15bf2f34
[ "Apache-2.0" ]
81
2018-05-29T14:03:27.000Z
2022-02-08T08:59:38.000Z
import logging import os import pickle import sys import threading import time from typing import List from Giveme5W1H.extractor.root import path from Giveme5W1H.extractor.tools.util import bytes_2_human_readable
30.931624
115
0.585797
6af7f07129b756fc33dfdd705556d009ef89fe63
3,121
py
Python
nsst_translate_corpus.py
AlexanderJenke/nsst
75f6afa39568c72c9c513ac0313db33b80bb67d5
[ "Apache-2.0" ]
null
null
null
nsst_translate_corpus.py
AlexanderJenke/nsst
75f6afa39568c72c9c513ac0313db33b80bb67d5
[ "Apache-2.0" ]
null
null
null
nsst_translate_corpus.py
AlexanderJenke/nsst
75f6afa39568c72c9c513ac0313db33b80bb67d5
[ "Apache-2.0" ]
null
null
null
from argparse import ArgumentParser from tqdm import tqdm import NSST from nsst_translate import best_transition_sequence if __name__ == '__main__': parser = ArgumentParser() parser.add_argument("--nsst_file", default="output/nsst_tss20_th4_nSt100_Q0.pkl", help="nsst file") parser.add_argument("--src_lang", default="output/europarl-v7.de-en.de.clean") parser.add_argument("--tgt_lang", default="output/europarl-v7.de-en.en.clean") parser.add_argument("--enforce_n_reg", default=True) parser.add_argument("--output", default=f"output/nsst_stat_nreg_100Q0.csv") args = parser.parse_args() args.enforce_n_final_reg = False # load NSST nsst = NSST.NSST() nsst.load(args.nsst_file) args.nsst = nsst # open files src_file = open(args.src_lang, 'r') tgt_file = open(args.tgt_lang, 'r') output_file = open(args.output, 'w') # iterate over sentences, first 4096 -> test sentences for src, tgt, _ in tqdm(list(zip(src_file, tgt_file, range(4096))), desc="Processing sentences"): # remove line breaks src = src[:-1] tgt = tgt[:-1] # try to translate try: # prepare tokenisations token_src = [nsst.tokenization_src[word] if word in nsst.tokenization_src else 0 for word in src.split(" ") if len(word)] token_tgt = [nsst.tokenization_tgt[word] if word in nsst.tokenization_tgt else 0 for word in tgt.split(" ") if len(word)] # run nsst args.input = src args.token_src = token_src result = best_transition_sequence(args) # get best result pred = sorted((k for k in result if ('Qf' in args.nsst_file or not args.enforce_n_final_reg or len(k[1]) == 1) and ('Q0' in args.nsst_file or k[0] == -1) ), key=lambda x: x[2], reverse=True)[0] n_res = len(result) q, reg, prob = pred # write to csv if not len(reg): # catch empty registers continue token_pred = [w for w in reg[0].split(' ') if len(w)] pred_str = "" for t in token_pred: pred_str += f"{nsst.tokenization_tgt_lut[int(t)]} " token_src_str = "" for t in token_src: token_src_str += f"{t} " token_tgt_str = "" for t in token_tgt: token_tgt_str += f"{t} " token_pred_str = "" for t in token_pred: token_pred_str += f"{t} " print(f"{src};{token_src_str[:-1]};" f"{tgt};{token_tgt_str[:-1]};" f"{pred_str};{token_pred_str[:-1]};" f"{prob};{len(reg)};{n_res}", file=output_file) output_file.flush() except RuntimeError: pass # close files src_file.close() tgt_file.close() output_file.close()
32.852632
104
0.544056
6afa2508436ad02c7fe767127789a72b5fa053d8
382
py
Python
10 Days of Statistics/Day 1/Standard Deviation.py
dhyanpatel110/HACKERRANK
949b1ff468ff3487663bf063a8fe6cdfb9dea26b
[ "Apache-2.0" ]
null
null
null
10 Days of Statistics/Day 1/Standard Deviation.py
dhyanpatel110/HACKERRANK
949b1ff468ff3487663bf063a8fe6cdfb9dea26b
[ "Apache-2.0" ]
null
null
null
10 Days of Statistics/Day 1/Standard Deviation.py
dhyanpatel110/HACKERRANK
949b1ff468ff3487663bf063a8fe6cdfb9dea26b
[ "Apache-2.0" ]
null
null
null
# Import library import math # Define functionts # Set data size = int(input()) numbers = list(map(int, input().split())) # Get standard deviation print(round(stddev(numbers, size), 1))
19.1
47
0.63089
6afb588f82055ac18339fc17c00162ed0a0496d8
314
py
Python
Homework/Hw4/Solution/problem5a.py
jmsevillam/Herramientas-Computacionales-UniAndes
957338873bd6a17201dfd4629c7edd5760e2271d
[ "MIT" ]
null
null
null
Homework/Hw4/Solution/problem5a.py
jmsevillam/Herramientas-Computacionales-UniAndes
957338873bd6a17201dfd4629c7edd5760e2271d
[ "MIT" ]
null
null
null
Homework/Hw4/Solution/problem5a.py
jmsevillam/Herramientas-Computacionales-UniAndes
957338873bd6a17201dfd4629c7edd5760e2271d
[ "MIT" ]
5
2019-05-27T13:35:51.000Z
2020-09-30T15:19:39.000Z
Alice='Ti rga eoe esg o h ore"ermetsCmuainls' Bob='hspormdcdsamsaefrtecus Hraina optcoae"' print(decode(Alice,Bob,''))
24.153846
47
0.630573
6afbbfc0a8b4d96b676b80363b2e541af846b662
7,415
py
Python
pychron/lasers/power/composite_calibration_manager.py
ASUPychron/pychron
dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76
[ "Apache-2.0" ]
31
2016-03-07T02:38:17.000Z
2022-02-14T18:23:43.000Z
pychron/lasers/power/composite_calibration_manager.py
ASUPychron/pychron
dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76
[ "Apache-2.0" ]
1,626
2015-01-07T04:52:35.000Z
2022-03-25T19:15:59.000Z
pychron/lasers/power/composite_calibration_manager.py
UIllinoisHALPychron/pychron
f21b79f4592a9fb9dc9a4cb2e4e943a3885ededc
[ "Apache-2.0" ]
26
2015-05-23T00:10:06.000Z
2022-03-07T16:51:57.000Z
# =============================================================================== # Copyright 2012 Jake Ross # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # =============================================================================== # ============= enthought library imports ======================= from __future__ import absolute_import from traits.api import HasTraits, Instance, DelegatesTo, Button, List, Any, Float from traitsui.api import View, Item, VGroup, HGroup, Group, spring, TabularEditor # ============= standard library imports ======================== import pickle import os from numpy import polyval # ============= local library imports ========================== from pychron.managers.manager import Manager from pychron.database.selectors.power_calibration_selector import ( PowerCalibrationSelector, ) from pychron.database.adapters.power_calibration_adapter import PowerCalibrationAdapter from pychron.paths import paths from pychron.graph.graph import Graph from pychron.hardware.meter_calibration import MeterCalibration """ use a dbselector to select data """ if __name__ == "__main__": ccm = CompositeCalibrationManager() ccm.configure_traits() # ============= EOF =============================================
30.142276
88
0.552124
6afbd0d610e5a63b6a074ba49e684ae0359ba35a
3,957
py
Python
ttt_package/libs/best_move.py
Ipgnosis/tic_tac_toe
e1519b702531965cc647ff37c1c46d72f4b3b24e
[ "BSD-3-Clause" ]
null
null
null
ttt_package/libs/best_move.py
Ipgnosis/tic_tac_toe
e1519b702531965cc647ff37c1c46d72f4b3b24e
[ "BSD-3-Clause" ]
4
2021-03-25T19:52:40.000Z
2021-12-12T17:57:11.000Z
ttt_package/libs/best_move.py
Ipgnosis/tic_tac_toe
e1519b702531965cc647ff37c1c46d72f4b3b24e
[ "BSD-3-Clause" ]
null
null
null
# refactored from make_play to simplify # by Russell on 3/5/21 #from ttt_package.libs.move_utils import get_open_cells from ttt_package.libs.compare import get_transposed_games, reorient_games from ttt_package.libs.calc_game_bound import calc_game_bound from ttt_package.libs.maxi_min import maximin # find the best move for this agent, based on prior games in the game_history
42.548387
100
0.703058
6afc188b33bb84dbd980d2429af99225dafac393
805
py
Python
yard/skills/66-python/cookbook/yvhai/demo/mt/raw_thread.py
paser4se/bbxyard
d09bc6efb75618b2cef047bad9c8b835043446cb
[ "Apache-2.0" ]
1
2016-03-29T02:01:58.000Z
2016-03-29T02:01:58.000Z
yard/skills/66-python/cookbook/yvhai/demo/mt/raw_thread.py
paser4se/bbxyard
d09bc6efb75618b2cef047bad9c8b835043446cb
[ "Apache-2.0" ]
18
2019-02-13T09:15:25.000Z
2021-12-09T21:32:13.000Z
yard/skills/66-python/cookbook/yvhai/demo/mt/raw_thread.py
paser4se/bbxyard
d09bc6efb75618b2cef047bad9c8b835043446cb
[ "Apache-2.0" ]
2
2020-07-05T01:01:30.000Z
2020-07-08T22:33:06.000Z
#!/usr/bin/env python3 # python import _thread import time from yvhai.demo.base import YHDemo if __name__ == '__main__': RawThreadDemo.demo()
20.125
70
0.601242
6afc7c91ed45303d7c201609e1cc6104aa29ad90
3,108
py
Python
rasa/utils/tensorflow/constants.py
praneethgb/rasa
5bf227f165d0b041a367d2c0bbf712ebb6a54792
[ "Apache-2.0" ]
8
2020-09-16T17:22:13.000Z
2022-02-01T00:11:30.000Z
rasa/utils/tensorflow/constants.py
praneethgb/rasa
5bf227f165d0b041a367d2c0bbf712ebb6a54792
[ "Apache-2.0" ]
216
2020-09-20T13:05:58.000Z
2022-03-28T12:10:24.000Z
rasa/utils/tensorflow/constants.py
praneethgb/rasa
5bf227f165d0b041a367d2c0bbf712ebb6a54792
[ "Apache-2.0" ]
1
2022-02-01T18:23:23.000Z
2022-02-01T18:23:23.000Z
# constants for configuration parameters of our tensorflow models LABEL = "label" IDS = "ids" # LABEL_PAD_ID is used to pad multi-label training examples. # It should be < 0 to avoid index out of bounds errors by tf.one_hot. LABEL_PAD_ID = -1 HIDDEN_LAYERS_SIZES = "hidden_layers_sizes" SHARE_HIDDEN_LAYERS = "share_hidden_layers" TRANSFORMER_SIZE = "transformer_size" NUM_TRANSFORMER_LAYERS = "number_of_transformer_layers" NUM_HEADS = "number_of_attention_heads" UNIDIRECTIONAL_ENCODER = "unidirectional_encoder" KEY_RELATIVE_ATTENTION = "use_key_relative_attention" VALUE_RELATIVE_ATTENTION = "use_value_relative_attention" MAX_RELATIVE_POSITION = "max_relative_position" BATCH_SIZES = "batch_size" BATCH_STRATEGY = "batch_strategy" EPOCHS = "epochs" RANDOM_SEED = "random_seed" LEARNING_RATE = "learning_rate" DENSE_DIMENSION = "dense_dimension" CONCAT_DIMENSION = "concat_dimension" EMBEDDING_DIMENSION = "embedding_dimension" ENCODING_DIMENSION = "encoding_dimension" SIMILARITY_TYPE = "similarity_type" LOSS_TYPE = "loss_type" NUM_NEG = "number_of_negative_examples" MAX_POS_SIM = "maximum_positive_similarity" MAX_NEG_SIM = "maximum_negative_similarity" USE_MAX_NEG_SIM = "use_maximum_negative_similarity" SCALE_LOSS = "scale_loss" REGULARIZATION_CONSTANT = "regularization_constant" NEGATIVE_MARGIN_SCALE = "negative_margin_scale" DROP_RATE = "drop_rate" DROP_RATE_ATTENTION = "drop_rate_attention" DROP_RATE_DIALOGUE = "drop_rate_dialogue" DROP_RATE_LABEL = "drop_rate_label" CONSTRAIN_SIMILARITIES = "constrain_similarities" WEIGHT_SPARSITY = "weight_sparsity" # Deprecated and superseeded by CONNECTION_DENSITY CONNECTION_DENSITY = "connection_density" EVAL_NUM_EPOCHS = "evaluate_every_number_of_epochs" EVAL_NUM_EXAMPLES = "evaluate_on_number_of_examples" INTENT_CLASSIFICATION = "intent_classification" ENTITY_RECOGNITION = "entity_recognition" MASKED_LM = "use_masked_language_model" SPARSE_INPUT_DROPOUT = "use_sparse_input_dropout" DENSE_INPUT_DROPOUT = "use_dense_input_dropout" RANKING_LENGTH = "ranking_length" MODEL_CONFIDENCE = "model_confidence" BILOU_FLAG = "BILOU_flag" RETRIEVAL_INTENT = "retrieval_intent" USE_TEXT_AS_LABEL = "use_text_as_label" SOFTMAX = "softmax" MARGIN = "margin" AUTO = "auto" INNER = "inner" LINEAR_NORM = "linear_norm" COSINE = "cosine" CROSS_ENTROPY = "cross_entropy" BALANCED = "balanced" SEQUENCE = "sequence" SEQUENCE_LENGTH = f"{SEQUENCE}_lengths" SENTENCE = "sentence" POOLING = "pooling" MAX_POOLING = "max" MEAN_POOLING = "mean" TENSORBOARD_LOG_DIR = "tensorboard_log_directory" TENSORBOARD_LOG_LEVEL = "tensorboard_log_level" SEQUENCE_FEATURES = "sequence_features" SENTENCE_FEATURES = "sentence_features" FEATURIZERS = "featurizers" CHECKPOINT_MODEL = "checkpoint_model" MASK = "mask" IGNORE_INTENTS_LIST = "ignore_intents_list" TOLERANCE = "tolerance" POSITIVE_SCORES_KEY = "positive_scores" NEGATIVE_SCORES_KEY = "negative_scores" RANKING_KEY = "label_ranking" QUERY_INTENT_KEY = "query_intent" SCORE_KEY = "score" THRESHOLD_KEY = "threshold" SEVERITY_KEY = "severity" NAME = "name" EPOCH_OVERRIDE = "epoch_override"
27.75
87
0.818855
6afcd2c6547b58f11a5de71fbf337c71913e7438
32,025
py
Python
client/canyons-of-mars/maze.py
GamesCreatorsClub/GCC-Rover
25a69f62a1bb01fc421924ec39f180f50d6a640b
[ "MIT" ]
3
2018-02-13T21:39:55.000Z
2018-04-26T18:17:39.000Z
client/canyons-of-mars/maze.py
GamesCreatorsClub/GCC-Rover
25a69f62a1bb01fc421924ec39f180f50d6a640b
[ "MIT" ]
null
null
null
client/canyons-of-mars/maze.py
GamesCreatorsClub/GCC-Rover
25a69f62a1bb01fc421924ec39f180f50d6a640b
[ "MIT" ]
null
null
null
# # Copyright 2016-2019 Games Creators Club # # MIT License # import math import pyroslib import pyroslib.logging import time from pyroslib.logging import log, LOG_LEVEL_ALWAYS, LOG_LEVEL_INFO, LOG_LEVEL_DEBUG from rover import WheelOdos, WHEEL_NAMES from rover import normaiseAngle, angleDiference from challenge_utils import Action, PID SQRT2 = math.sqrt(2) PIhalf = math.pi / 2 if __name__ == "__main__": from rover import Radar, RoverState radar_values = {0: 10, 45: SQRT2 * 10, 90: 10, 135: SQRT2 * 10, 180: 10, 225: SQRT2 * 10, 270: 10, 315: SQRT2 * 10} radar_last_values = {0: 10, 45: SQRT2 * 10, 90: 10, 135: SQRT2 * 10, 180: 10, 225: SQRT2 * 10, 270: 10, 315: SQRT2 * 10} radar_status = {0: 0, 45: 0, 90: 0, 135: 0, 180: 0, 225: 0, 270: 0, 315: 0} attitude = MazeAttitude() radar = Radar(0, radar_values, radar_status, Radar(0, radar_last_values, radar_status)) state = RoverState(None, None, None, radar, None, None) # attitude.calculate(state) # printWalls() # # state.radar.radar[0] = 5 # state.radar.radar[45] = SQRT2 * 5 * 0.9 # state.radar.radar[315] = SQRT2 * 17 # state.radar.radar[270] = SQRT2 * 13 # state.radar.radar[225] = SQRT2 * 12 # attitude.calculate(state) # printWalls() state.radar.radar[180] = 50 state.radar.radar[315] = 30 attitude.calculate(state) printWalls()
43.75
238
0.600031
6afda5b387926673c59318881a0eddf14e127e55
732
py
Python
src/spaceone/monitoring/conf/proto_conf.py
jean1042/monitoring
0585a1ea52ec13285eaca81cc5b19fa3f7a1fba4
[ "Apache-2.0" ]
5
2020-06-04T23:01:30.000Z
2020-09-09T08:58:51.000Z
src/spaceone/monitoring/conf/proto_conf.py
jean1042/monitoring
0585a1ea52ec13285eaca81cc5b19fa3f7a1fba4
[ "Apache-2.0" ]
8
2021-11-12T08:13:00.000Z
2022-03-28T11:13:12.000Z
src/spaceone/monitoring/conf/proto_conf.py
jean1042/monitoring
0585a1ea52ec13285eaca81cc5b19fa3f7a1fba4
[ "Apache-2.0" ]
7
2020-06-10T01:56:35.000Z
2021-12-02T05:36:21.000Z
PROTO = { 'spaceone.monitoring.interface.grpc.v1.data_source': ['DataSource'], 'spaceone.monitoring.interface.grpc.v1.metric': ['Metric'], 'spaceone.monitoring.interface.grpc.v1.project_alert_config': ['ProjectAlertConfig'], 'spaceone.monitoring.interface.grpc.v1.escalation_policy': ['EscalationPolicy'], 'spaceone.monitoring.interface.grpc.v1.event_rule': ['EventRule'], 'spaceone.monitoring.interface.grpc.v1.webhook': ['Webhook'], 'spaceone.monitoring.interface.grpc.v1.maintenance_window': ['MaintenanceWindow'], 'spaceone.monitoring.interface.grpc.v1.alert': ['Alert'], 'spaceone.monitoring.interface.grpc.v1.note': ['Note'], 'spaceone.monitoring.interface.grpc.v1.event': ['Event'], }
56.307692
89
0.730874
6afe84146c4619406b9150aea7be577bdc37e585
2,929
py
Python
tests/delete_regress/models.py
PirosB3/django
9b729ddd8f2040722971ccfb3b12f7d8162633d1
[ "BSD-3-Clause" ]
2
2015-01-21T15:45:07.000Z
2015-02-21T02:38:13.000Z
tests/delete_regress/models.py
PirosB3/django
9b729ddd8f2040722971ccfb3b12f7d8162633d1
[ "BSD-3-Clause" ]
null
null
null
tests/delete_regress/models.py
PirosB3/django
9b729ddd8f2040722971ccfb3b12f7d8162633d1
[ "BSD-3-Clause" ]
1
2020-05-25T08:55:19.000Z
2020-05-25T08:55:19.000Z
from django.contrib.contenttypes.fields import ( GenericForeignKey, GenericRelation ) from django.contrib.contenttypes.models import ContentType from django.db import models # Models for #15776 # Models for #16128
20.626761
80
0.725162
6afe91d71e827ccc78b53873ca9a15887ff25298
5,550
py
Python
All_Program.py
TheoSaify/Yolo-Detector
f1ac387370982de323a4fc09109c57736b8ce8d6
[ "Apache-2.0" ]
null
null
null
All_Program.py
TheoSaify/Yolo-Detector
f1ac387370982de323a4fc09109c57736b8ce8d6
[ "Apache-2.0" ]
null
null
null
All_Program.py
TheoSaify/Yolo-Detector
f1ac387370982de323a4fc09109c57736b8ce8d6
[ "Apache-2.0" ]
null
null
null
import cv2 from cv2 import * import numpy as np from matplotlib import pyplot as plt ###############################SIFT MATCH Function################################# ################################################################################################### #################################Function######################### ############################################################ ###########################MAIN############################# MIN_MATCH_COUNT = 10 e1 = cv2.getTickCount() # # initialize the camera # cam = VideoCapture(0) # 0 -> index of camera # s, img1 = cam.read() # ret = cam.set(3,1920); # ret = cam.set(4,1080); # if s: # frame captured without any errors # cv2.namedWindow("output", cv2.WINDOW_NORMAL) # cv2.imshow("cam-test",img1) # waitKey(0) # destroyWindow("cam-test") # imwrite("Scene.jpg",img1) #save image # del(cam) # Scene image in Grayscale # imgray = cv2.cvtColor(img1,cv2.COLOR_BGR2GRAY) imgray = cv2.imread('Scene.jpg', 0) # queryImage # Reference Piece Image img1 = cv2.imread('img3.jpg',0) # queryImage # SIFT Algorithm fore Object Detection SIFTMATCH(img1, imgray) # image de reference cX, cY = CercleDetection('img3.jpg') print('cX = %.3f , cY =%.3f' % (cX, cY)) # Image Webcam cX2, cY2 = CercleDetection('img3.jpg') print('cX2 = %.3f , cY2 =%.3f' % (cX2, cY2)) deltaX = (cX2-cX) deltaY = -(CY2-cY) # Write X and Y values to File file = open("values.txt", "w") file.write("%.3f \n" % deltaX) file.write("%.3f \n" % deltaY) file.close() #Calculate time of execution e2 = cv2.getTickCount() time = (e2 - e1)/ cv2.getTickFrequency() print('time needed to execute') print(time)
29.83871
127
0.571171
6afebab1780e5e05d2dbd1b300b2e8c2a43c36a7
17,003
py
Python
apps/UI_phone_mcdm.py
industrial-optimization-group/researchers-night
68f2fcb8530032e157badda772a795e1f3bb2c4b
[ "MIT" ]
null
null
null
apps/UI_phone_mcdm.py
industrial-optimization-group/researchers-night
68f2fcb8530032e157badda772a795e1f3bb2c4b
[ "MIT" ]
null
null
null
apps/UI_phone_mcdm.py
industrial-optimization-group/researchers-night
68f2fcb8530032e157badda772a795e1f3bb2c4b
[ "MIT" ]
null
null
null
import dash from dash.exceptions import PreventUpdate import dash_core_components as dcc import dash_html_components as html from dash.dependencies import Input, Output, State import dash_bootstrap_components as dbc import dash_table import plotly.express as ex import plotly.graph_objects as go import pandas as pd import numpy as np data = pd.read_csv("./data/Phone_dataset_new.csv", header=0) details = pd.read_csv("./data/Phone_details.csv", header=0) names = details.loc[0] data = data.rename(columns=names) details = details.rename(columns=names) maxi = details.loc[1].astype(int) details_on_card = details.loc[2].astype(int) details_on_card = details.columns[details_on_card == 1] fitness_columns = { "Memory": -1, "RAM": -1, "Camera (MP)": -1, "Price (Euros)": 1, } fitness_data = data[fitness_columns] * maxi[fitness_columns].values external_stylesheets = ["https://codepen.io/chriddyp/pen/bWLwgP.css"] app = dash.Dash( __name__, external_stylesheets=[dbc.themes.LITERA], eager_loading=True, suppress_callback_exceptions=True, ) app.layout = html.Div( children=[ # .container class is fixed, .container.scalable is scalable dbc.Row( [ dbc.Col( html.H1( children="What is your optimal phone?", className="text-center mt-4", ) ) ] ), dbc.Row( [ dbc.Col( children=[ # Top card with details(?) dbc.Card( children=[ dbc.CardBody( [ html.H4( "Researcher's Night Event", className="card-title text-center", ), html.P( ( "This app uses decision support tools to " "quickly and easily find phones which reflect " "the user's desires. Input your preferences " "below. The box on top right shows the phone " "which matches the preferences the best. " "The box on bottom right provides some " "close alternatives." ), className="card-text", ), ] ) ], className="mr-3 ml-3 mb-2 mt-2", ), dbc.Form( [ dbc.FormGroup( children=[ dbc.Label( "Choose desired operating system", html_for="os-choice", ), dbc.RadioItems( options=[ { "label": "Android", "value": "Android", }, {"label": "iOS", "value": "IOS"}, { "label": "No preference", "value": "both", }, ], id="os-choice", value="both", inline=True, # className="text-center mt-4", ), ], className="mr-3 ml-3 mb-2 mt-2", ), dbc.FormGroup( children=[ dbc.Label( "Choose desired Memory capacity (GB)", html_for="memory-choice", ), dcc.Slider( id="memory-choice", min=16, max=256, step=None, included=False, value=256, marks={ 16: "16", 32: "32", 64: "64", 128: "128", 256: "256", }, # className="text-center mt-5", ), ], className="mr-3 ml-3 mb-2 mt-2", ), dbc.FormGroup( children=[ dbc.Label( "Choose desired RAM capacity (GB)", html_for="ram-choice", ), dcc.Slider( id="ram-choice", min=2, max=12, step=1, value=12, included=False, marks={ 2: "2", 3: "3", 4: "4", 5: "5", 6: "6", 7: "7", 8: "8", 9: "9", 10: "10", 11: "11", 12: "12", }, className="text-center mt-5", ), ], className="mr-3 ml-3 mb-2 mt-2", ), dbc.FormGroup( children=[ dbc.Label( "Choose desired camera resolution (MP)", html_for="cam-choice", ), dcc.Slider( id="cam-choice", min=0, max=130, step=1, included=False, value=70, marks={ 0: "0", 10: "10", 30: "30", 50: "50", 70: "70", 90: "90", 110: "110", 130: "130", }, className="text-center mt-5", ), ], className="mr-3 ml-3 mb-2 mt-2", ), dbc.FormGroup( children=[ dbc.Label( "Choose desired budget (Euros)", html_for="cost-choice", ), dcc.Slider( id="cost-choice", min=0, max=1400, step=1, included=False, value=100, marks={ 0: "0", 200: "200", 400: "400", 600: "600", 800: "800", 1000: "1000", 1200: "1200", 1400: "1400", }, className="text-center mt-5", ), ], className="mr-3 ml-3 mb-2 mt-2", ), ], style={"maxHeight": "560px", "overflow": "auto"}, ), ], width={"size": 5, "offset": 1}, ), dbc.Col( children=[ dbc.Card( children=[ dbc.CardHeader("The best phone for you is:"), dbc.CardBody(id="results"), ], className="mb-4", ), dbc.Card( children=[ dbc.CardHeader("Other great phones:"), dbc.CardBody( id="other-results", children=( [ html.P( html.Span( f"{i}. ", id=f"other-results-list-{i}", ) ) for i in range(2, 6) ] + [ dbc.Tooltip( id=f"other-results-tooltip-{i}", target=f"other-results-list-{i}", placement="right", style={ "maxWidth": 700, "background-color": "white", "color": "white", "border-style": "solid", "border-color": "black", }, ) for i in range(2, 6) ] ), ), ], className="mt-4", ), html.Div(id="tooltips"), ], width={"size": 5, "offset": 0}, className="mb-2 mt-2", ), ] ), dbc.Row([html.Div(id="callback-dump")]), ], ) """@app.callback(Output("tooltips", "children"), [Input("callback-dump", "children")]) def tooltips(tooldict): num = len(tooldict["ids"]) content = [] for i in range(num): content.append(dbc.Tooltip(tooldict["tables"][i], target=tooldict["ids"][i])) return content""" if __name__ == "__main__": app.run_server(debug=False)
43.485934
95
0.283597
6aff4d7639431aa38a4d3a68b963afee4300b218
3,479
py
Python
pyxon/utils.py
k-j-m/Pyxon
a7f9b3ce524f2441e952c47acd199dd4024d2322
[ "MIT" ]
null
null
null
pyxon/utils.py
k-j-m/Pyxon
a7f9b3ce524f2441e952c47acd199dd4024d2322
[ "MIT" ]
null
null
null
pyxon/utils.py
k-j-m/Pyxon
a7f9b3ce524f2441e952c47acd199dd4024d2322
[ "MIT" ]
null
null
null
import pyxon.decode as pd def unobjectify(obj): """ Turns a python object (must be a class instance) into the corresponding JSON data. Example: >>> @sprop.a # sprop annotations are needed to tell the >>> @sprop.b # unobjectify function what parameter need >>> @sprop.c # to be written out. >>> class Baz(object): pass >>> def __init__(self, a, b, c): >>> self.a = a >>> self.b = b >>> self.c = c >>> >>> baz = Baz(a=1, b=2, c='three') >>> unobjectify(baz) { 'a':1, 'b':2, 'c':'three' } """ cls = obj.__class__ # Create empty data data = {} sprops,cprops = _get_registered_props(cls) # Add simple properties for p in sprops: data[p]=getattr(obj,p) # Add calculated data for p in cprops: f2 = cprops[p][1] data[p]=f2(getattr(obj,p)) data = pd.add_type_property(data, cls) return data def _get_registered_props(cls): """ Returns all of the registered properties for a given class. Recursively calls up to parent classes that are inherited from. """ sprops = pd.class_sprops.get(cls,{}) # [name] cprops = pd.class_cprops.get(cls,{}) # {name:(fn, inv_fn)} if cls in pd.conc_to_abstract: # {ConcreteClass: (AbstractClass, _)} parent_cls = pd.conc_to_abstract[cls][0] parent_sprops, parent_cprops = _get_registered_props(parent_cls) sprops = list(set(sprops).union(set(parent_sprops))) cprops2 = parent_cprops.copy() cprops2.update(cprops) cprops = cprops2 return sprops,cprops def obj(cls): """ Helper function returns a closure turning objectify into a single argument function. This cuts down the amount of code needed in class annotations by removing the need to write lambda functions. """ return lambda d: objectify(d, cls) def objectify(data, cls): """ Function takes JSON data and a target class as arguments and returns an instance of the class created using the JSON data. I'm not sure whether it is a great idea to keep (un)objectify separate from the decode module, since they need to access some of the module-level parameters. """ # Create empty class concrete_cls = pd.conc2(data, cls) obj = concrete_cls() sprops,cprops = _get_registered_props(cls) # Add simple properties from data for p in sprops: setattr(obj, p, data[p]) # Add calculated properties from data for p in cprops: f1 = cprops[p][0] setattr(obj, p, f1(data[p])) return obj def transform_map(kfun=lambda x: x, vfun=lambda x: x): """ Function that takes two functions as arguments and returns a function that applies those functions over all of the keys and values in a map and returns the transformed version of the map. kfun: function applied to all keys (default identity) vfun: function applied to all values (default identity) (k -> k') -> (v -> v') -> ((k, v) -> (k', v')) """ return lambda dct: dict([(kfun(k),vfun(v)) for k,v in dct.items()]) def identity(x): """ Identity function is needed when performing transformations on maps where some operation is needed on either the keys or values, but not both. """ return x
28.284553
72
0.627479
6affc41b95b69a262ac3e3eb689401cbbc182548
19,112
py
Python
AxonDeepSeg/segment.py
sophie685/newfileplzworklord
fbbb03c44dc9e4b0409364b49265f453ac80d3c0
[ "MIT" ]
null
null
null
AxonDeepSeg/segment.py
sophie685/newfileplzworklord
fbbb03c44dc9e4b0409364b49265f453ac80d3c0
[ "MIT" ]
8
2020-09-26T00:42:19.000Z
2022-02-10T00:41:55.000Z
AxonDeepSeg/segment.py
sophie685/newfileplzworklord
fbbb03c44dc9e4b0409364b49265f453ac80d3c0
[ "MIT" ]
null
null
null
# Segmentation script # ------------------- # This script lets the user segment automatically one or many images based on the default segmentation models: SEM or # TEM. # # Maxime Wabartha - 2017-08-30 # Imports import sys from pathlib import Path import json import argparse from argparse import RawTextHelpFormatter from tqdm import tqdm import pkg_resources import AxonDeepSeg import AxonDeepSeg.ads_utils as ads from AxonDeepSeg.apply_model import axon_segmentation from AxonDeepSeg.ads_utils import convert_path # Global variables SEM_DEFAULT_MODEL_NAME = "default_SEM_model_v1" TEM_DEFAULT_MODEL_NAME = "default_TEM_model_v1" MODELS_PATH = pkg_resources.resource_filename('AxonDeepSeg', 'models') MODELS_PATH = Path(MODELS_PATH) default_SEM_path = MODELS_PATH / SEM_DEFAULT_MODEL_NAME default_TEM_path = MODELS_PATH / TEM_DEFAULT_MODEL_NAME default_overlap = 25 # Definition of the functions def segment_image(path_testing_image, path_model, overlap_value, config, resolution_model, acquired_resolution = None, verbosity_level=0): ''' Segment the image located at the path_testing_image location. :param path_testing_image: the path of the image to segment. :param path_model: where to access the model :param overlap_value: the number of pixels to be used for overlap when doing prediction. Higher value means less border effects but more time to perform the segmentation. :param config: dict containing the configuration of the network :param resolution_model: the resolution the model was trained on. :param verbosity_level: Level of verbosity. The higher, the more information is given about the segmentation process. :return: Nothing. ''' # If string, convert to Path objects path_testing_image = convert_path(path_testing_image) path_model = convert_path(path_model) if path_testing_image.exists(): # Extracting the image name and its folder path from the total path. path_parts = path_testing_image.parts acquisition_name = Path(path_parts[-1]) path_acquisition = Path(*path_parts[:-1]) # Get type of model we are using selected_model = path_model.name # Read image img = ads.imread(str(path_testing_image)) # Generate tmp file fp = open(path_acquisition / '__tmp_segment__.png', 'wb+') img_name_original = acquisition_name.stem if selected_model == "default_TEM_model_v1": ads.imwrite(fp,255-img, format='png') else: ads.imwrite(fp, img, format='png') acquisition_name = Path(fp.name).name segmented_image_name = img_name_original + '_seg-axonmyelin' + '.png' # Performing the segmentation axon_segmentation(path_acquisitions_folders=path_acquisition, acquisitions_filenames=[acquisition_name], path_model_folder=path_model, config_dict=config, ckpt_name='model', inference_batch_size=1, overlap_value=overlap_value, segmentations_filenames=segmented_image_name, resampled_resolutions=resolution_model, verbosity_level=verbosity_level, acquired_resolution=acquired_resolution, prediction_proba_activate=False, write_mode=True) if verbosity_level >= 1: print(("Image {0} segmented.".format(path_testing_image))) # Remove temporary file used for the segmentation fp.close() (path_acquisition / '__tmp_segment__.png').unlink() else: print(("The path {0} does not exist.".format(path_testing_image))) return None def segment_folders(path_testing_images_folder, path_model, overlap_value, config, resolution_model, acquired_resolution = None, verbosity_level=0): ''' Segments the images contained in the image folders located in the path_testing_images_folder. :param path_testing_images_folder: the folder where all image folders are located (the images to segment are located in those image folders) :param path_model: where to access the model. :param overlap_value: the number of pixels to be used for overlap when doing prediction. Higher value means less border effects but more time to perform the segmentation. :param config: dict containing the configuration of the network :param resolution_model: the resolution the model was trained on. :param verbosity_level: Level of verbosity. The higher, the more information is given about the segmentation process. :return: Nothing. ''' # If string, convert to Path objects path_testing_images_folder = convert_path(path_testing_images_folder) path_model = convert_path(path_model) # Update list of images to segment by selecting only image files (not already segmented or not masks) img_files = [file for file in path_testing_images_folder.iterdir() if (file.suffix.lower() in ('.png','.jpg','.jpeg','.tif','.tiff')) and (not str(file).endswith(('_seg-axonmyelin.png','_seg-axon.png','_seg-myelin.png','mask.png')))] # Pre-processing: convert to png if not already done and adapt to model contrast for file_ in tqdm(img_files, desc="Segmentation..."): print(path_testing_images_folder / file_) try: height, width, _ = ads.imread(str(path_testing_images_folder / file_)).shape except: try: height, width = ads.imread(str(path_testing_images_folder / file_)).shape except Exception as e: raise e image_size = [height, width] minimum_resolution = config["trainingset_patchsize"] * resolution_model / min(image_size) if acquired_resolution < minimum_resolution: print("EXCEPTION: The size of one of the images ({0}x{1}) is too small for the provided pixel size ({2}).\n".format(height, width, acquired_resolution), "The image size must be at least {0}x{0} after resampling to a resolution of {1} to create standard sized patches.\n".format(config["trainingset_patchsize"], resolution_model), "One of the dimensions of the image has a size of {0} after resampling to that resolution.\n".format(round(acquired_resolution * min(image_size) / resolution_model)), "Image file location: {0}".format(str(path_testing_images_folder / file_)) ) sys.exit(2) selected_model = path_model.name # Read image for conversion img = ads.imread(str(path_testing_images_folder / file_)) # Generate tmpfile for segmentation pipeline fp = open(path_testing_images_folder / '__tmp_segment__.png', 'wb+') img_name_original = file_.stem if selected_model == "default_TEM_model_v1": ads.imwrite(fp,255-img, format='png') else: ads.imwrite(fp,img, format='png') acquisition_name = Path(fp.name).name segmented_image_name = img_name_original + '_seg-axonmyelin' + '.png' axon_segmentation(path_acquisitions_folders=path_testing_images_folder, acquisitions_filenames=[acquisition_name], path_model_folder=path_model, config_dict=config, ckpt_name='model', inference_batch_size=1, overlap_value=overlap_value, segmentations_filenames=[segmented_image_name], acquired_resolution=acquired_resolution, verbosity_level=verbosity_level, resampled_resolutions=resolution_model, prediction_proba_activate=False, write_mode=True) if verbosity_level >= 1: tqdm.write("Image {0} segmented.".format(str(path_testing_images_folder / file_))) # Remove temporary file used for the segmentation fp.close() (path_testing_images_folder / '__tmp_segment__.png').unlink() return None def generate_default_parameters(type_acquisition, new_path): ''' Generates the parameters used for segmentation for the default model corresponding to the type_model acquisition. :param type_model: String, the type of model to get the parameters from. :param new_path: Path to the model to use. :return: the config dictionary. ''' # If string, convert to Path objects new_path = convert_path(new_path) # Building the path of the requested model if it exists and was supplied, else we load the default model. if type_acquisition == 'SEM': if (new_path is not None) and new_path.exists(): path_model = new_path else: path_model = MODELS_PATH / SEM_DEFAULT_MODEL_NAME elif type_acquisition == 'TEM': if (new_path is not None) and new_path.exists(): path_model = new_path else: path_model = MODELS_PATH / TEM_DEFAULT_MODEL_NAME path_config_file = path_model / 'config_network.json' config = generate_config_dict(path_config_file) return path_model, config def generate_config_dict(path_to_config_file): ''' Generates the dictionary version of the configuration file from the path where it is located. :param path_to_config: relative path where the file config_network.json is located. :return: dict containing the configuration of the network, or None if no configuration file was found at the mentioned path. ''' # If string, convert to Path objects path_to_config_file = convert_path(path_to_config_file) try: with open(path_to_config_file, 'r') as fd: config_network = json.loads(fd.read()) except: raise ValueError("No configuration file available at this path.") return config_network def generate_resolution(type_acquisition, model_input_size): ''' Generates the resolution to use related to the trained modeL. :param type_acquisition: String, "SEM" or "TEM" :param model_input_size: String or Int, the size of the input. :return: Float, the resolution of the model. ''' dict_size = { "SEM":{ "512":0.1, "256":0.2 }, "TEM":{ "512":0.01 } } return dict_size[str(type_acquisition)][str(model_input_size)] # Main loop def main(argv=None): ''' Main loop. :return: Exit code. 0: Success 2: Invalid argument value 3: Missing value or file ''' print(('AxonDeepSeg v.{}'.format(AxonDeepSeg.__version__))) ap = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter) requiredName = ap.add_argument_group('required arguments') # Setting the arguments of the segmentation requiredName.add_argument('-t', '--type', required=True, choices=['SEM','TEM'], help='Type of acquisition to segment. \n'+ 'SEM: scanning electron microscopy samples. \n'+ 'TEM: transmission electron microscopy samples. ') requiredName.add_argument('-i', '--imgpath', required=True, nargs='+', help='Path to the image to segment or path to the folder \n'+ 'where the image(s) to segment is/are located.') ap.add_argument("-m", "--model", required=False, help='Folder where the model is located. \n'+ 'The default SEM model path is: \n'+str(default_SEM_path)+'\n'+ 'The default TEM model path is: \n'+str(default_TEM_path)+'\n') ap.add_argument('-s', '--sizepixel', required=False, help='Pixel size of the image(s) to segment, in micrometers. \n'+ 'If no pixel size is specified, a pixel_size_in_micrometer.txt \n'+ 'file needs to be added to the image folder path. The pixel size \n'+ 'in that file will be used for the segmentation.', default=None) ap.add_argument('-v', '--verbose', required=False, type=int, choices=list(range(0,4)), help='Verbosity level. \n'+ '0 (default) : Displays the progress bar for the segmentation. \n'+ '1: Also displays the path of the image(s) being segmented. \n'+ '2: Also displays the information about the prediction step \n'+ ' for the segmentation of current sample. \n'+ '3: Also displays the patch number being processed in the current sample.', default=0) ap.add_argument('-o', '--overlap', required=False, type=int, help='Overlap value (in pixels) of the patches when doing the segmentation. \n'+ 'Higher values of overlap can improve the segmentation at patch borders, \n'+ 'but also increase the segmentation time. \n'+ 'Default value: '+str(default_overlap)+'\n'+ 'Recommended range of values: [10-100]. \n', default=25) ap._action_groups.reverse() # Processing the arguments args = vars(ap.parse_args(argv)) type_ = str(args["type"]) verbosity_level = int(args["verbose"]) overlap_value = int(args["overlap"]) if args["sizepixel"] is not None: psm = float(args["sizepixel"]) else: psm = None path_target_list = [Path(p) for p in args["imgpath"]] new_path = Path(args["model"]) if args["model"] else None # Preparing the arguments to axon_segmentation function path_model, config = generate_default_parameters(type_, new_path) resolution_model = generate_resolution(type_, config["trainingset_patchsize"]) # Tuple of valid file extensions validExtensions = ( ".jpeg", ".jpg", ".tif", ".tiff", ".png" ) # Going through all paths passed into arguments for current_path_target in path_target_list: if not current_path_target.is_dir(): if current_path_target.suffix.lower() in validExtensions: # Handle cases if no resolution is provided on the CLI if psm == None: # Check if a pixel size file exists, if so read it. if (current_path_target.parent / 'pixel_size_in_micrometer.txt').exists(): resolution_file = open(current_path_target.parent / 'pixel_size_in_micrometer.txt', 'r') psm = float(resolution_file.read()) else: print("ERROR: No pixel size is provided, and there is no pixel_size_in_micrometer.txt file in image folder. ", "Please provide a pixel size (using argument -s), or add a pixel_size_in_micrometer.txt file ", "containing the pixel size value." ) sys.exit(3) # Check that image size is large enough for given resolution to reach minimum patch size after resizing. try: height, width, _ = ads.imread(str(current_path_target)).shape except: try: height, width = ads.imread(str(current_path_target)).shape except Exception as e: raise e image_size = [height, width] minimum_resolution = config["trainingset_patchsize"] * resolution_model / min(image_size) if psm < minimum_resolution: print("EXCEPTION: The size of one of the images ({0}x{1}) is too small for the provided pixel size ({2}).\n".format(height, width, psm), "The image size must be at least {0}x{0} after resampling to a resolution of {1} to create standard sized patches.\n".format(config["trainingset_patchsize"], resolution_model), "One of the dimensions of the image has a size of {0} after resampling to that resolution.\n".format(round(psm * min(image_size) / resolution_model)), "Image file location: {0}".format(current_path_target) ) sys.exit(2) # Performing the segmentation over the image segment_image(current_path_target, path_model, overlap_value, config, resolution_model, acquired_resolution=psm, verbosity_level=verbosity_level) print("Segmentation finished.") else: print("The path(s) specified is/are not image(s). Please update the input path(s) and try again.") break else: # Handle cases if no resolution is provided on the CLI if psm == None: # Check if a pixel size file exists, if so read it. if (current_path_target / 'pixel_size_in_micrometer.txt').exists(): resolution_file = open(current_path_target / 'pixel_size_in_micrometer.txt', 'r') psm = float(resolution_file.read()) else: print("ERROR: No pixel size is provided, and there is no pixel_size_in_micrometer.txt file in image folder. ", "Please provide a pixel size (using argument -s), or add a pixel_size_in_micrometer.txt file ", "containing the pixel size value." ) sys.exit(3) # Performing the segmentation over all folders in the specified folder containing acquisitions to segment. segment_folders(current_path_target, path_model, overlap_value, config, resolution_model, acquired_resolution=psm, verbosity_level=verbosity_level) print("Segmentation finished.") sys.exit(0) # Calling the script if __name__ == '__main__': main()
45.075472
202
0.604071
ed0142db547eada6fd1f50b0e7939a47e99944a3
1,746
py
Python
tests/test_hedges.py
aplested/DC_Pyps
da33fc7d0e7365044e368488d1c7cbbae7473cc7
[ "MIT" ]
1
2021-03-25T18:09:25.000Z
2021-03-25T18:09:25.000Z
tests/test_hedges.py
aplested/DC_Pyps
da33fc7d0e7365044e368488d1c7cbbae7473cc7
[ "MIT" ]
null
null
null
tests/test_hedges.py
aplested/DC_Pyps
da33fc7d0e7365044e368488d1c7cbbae7473cc7
[ "MIT" ]
null
null
null
from dcstats.hedges import Hedges_d from dcstats.statistics_EJ import simple_stats as mean_SD import random import math ###tests
29.59322
102
0.689003
ed0157759bef39b622e00be4f990c696bc0f3dd8
110
py
Python
src/FYP/fifaRecords/urls.py
MustafaAbbas110/FinalProject
30d371f06a8a1875285cfd4a8940ca3610ec1274
[ "BSD-3-Clause" ]
null
null
null
src/FYP/fifaRecords/urls.py
MustafaAbbas110/FinalProject
30d371f06a8a1875285cfd4a8940ca3610ec1274
[ "BSD-3-Clause" ]
null
null
null
src/FYP/fifaRecords/urls.py
MustafaAbbas110/FinalProject
30d371f06a8a1875285cfd4a8940ca3610ec1274
[ "BSD-3-Clause" ]
null
null
null
from django.urls import path from . import views urlpatterns = [ path('', views.Records, name ="fRec"), ]
18.333333
42
0.672727
ed0176bb36b001f6300ef33bd058b934c1c2ff34
2,022
py
Python
spacy_transformers/tests/regression/test_spacy_issue6401.py
KennethEnevoldsen/spacy-transformers
fa39a94ba276ae3681d14a4b376ea50fadd574b3
[ "MIT" ]
null
null
null
spacy_transformers/tests/regression/test_spacy_issue6401.py
KennethEnevoldsen/spacy-transformers
fa39a94ba276ae3681d14a4b376ea50fadd574b3
[ "MIT" ]
null
null
null
spacy_transformers/tests/regression/test_spacy_issue6401.py
KennethEnevoldsen/spacy-transformers
fa39a94ba276ae3681d14a4b376ea50fadd574b3
[ "MIT" ]
null
null
null
import pytest from spacy.training.example import Example from spacy.util import make_tempdir from spacy import util from thinc.api import Config TRAIN_DATA = [ ("I'm so happy.", {"cats": {"POSITIVE": 1.0, "NEGATIVE": 0.0}}), ("I'm so angry", {"cats": {"POSITIVE": 0.0, "NEGATIVE": 1.0}}), ] cfg_string = """ [nlp] lang = "en" pipeline = ["transformer","textcat"] [components] [components.textcat] factory = "textcat" [components.textcat.model] @architectures = "spacy.TextCatEnsemble.v2" [components.textcat.model.tok2vec] @architectures = "spacy-transformers.TransformerListener.v1" grad_factor = 1.0 [components.textcat.model.tok2vec.pooling] @layers = "reduce_mean.v1" [components.transformer] factory = "transformer" """ # Xfail this until the new spaCy rc is up.
29.304348
85
0.678536
ed0376f91f0c41a8fa993fc5f6223d8bbb5eb7cb
712
py
Python
hydra/client/repl.py
rpacholek/hydra
60e3c2eec5ab1fd1dde8e510baa5175173c66a6a
[ "MIT" ]
null
null
null
hydra/client/repl.py
rpacholek/hydra
60e3c2eec5ab1fd1dde8e510baa5175173c66a6a
[ "MIT" ]
null
null
null
hydra/client/repl.py
rpacholek/hydra
60e3c2eec5ab1fd1dde8e510baa5175173c66a6a
[ "MIT" ]
null
null
null
import asyncio from ..core.common.io import input from .action_creator import ActionCreator
28.48
62
0.58427
ed037d47b7c87bc348767b05b7307204b77059ed
35,606
py
Python
train_dv3.py
drat/Neural-Voice-Cloning-With-Few-Samples
4febde43ccc143fc88d74d5fa0c5a117636778b4
[ "MIT" ]
361
2018-08-17T14:37:29.000Z
2022-03-15T13:04:16.000Z
train_dv3.py
drat/Neural-Voice-Cloning-With-Few-Samples
4febde43ccc143fc88d74d5fa0c5a117636778b4
[ "MIT" ]
22
2018-11-25T13:42:26.000Z
2020-04-29T05:16:25.000Z
train_dv3.py
drat/Neural-Voice-Cloning-With-Few-Samples
4febde43ccc143fc88d74d5fa0c5a117636778b4
[ "MIT" ]
121
2018-08-30T03:53:09.000Z
2022-03-25T09:03:17.000Z
"""Trainining script for seq2seq text-to-speech synthesis model. usage: train.py [options] options: --data-root=<dir> Directory contains preprocessed features. --checkpoint-dir=<dir> Directory where to save model checkpoints [default: checkpoints]. --hparams=<parmas> Hyper parameters [default: ]. --checkpoint=<path> Restore model from checkpoint path if given. --checkpoint-seq2seq=<path> Restore seq2seq model from checkpoint path. --checkpoint-postnet=<path> Restore postnet model from checkpoint path. --train-seq2seq-only Train only seq2seq model. --train-postnet-only Train only postnet model. --restore-parts=<path> Restore part of the model. --log-event-path=<name> Log event path. --reset-optimizer Reset optimizer. --load-embedding=<path> Load embedding from checkpoint. --speaker-id=<N> Use specific speaker of data in case for multi-speaker datasets. -h, --help Show this help message and exit """ from docopt import docopt import sys from os.path import dirname, join from tqdm import tqdm, trange from datetime import datetime # The deepvoice3 model from dv3.deepvoice3_pytorch import frontend, builder import dv3.audio import dv3.lrschedule import torch from torch.utils import data as data_utils from torch.autograd import Variable from torch import nn from torch import optim import torch.backends.cudnn as cudnn from torch.utils import data as data_utils from torch.utils.data.sampler import Sampler import numpy as np from numba import jit from nnmnkwii.datasets import FileSourceDataset, FileDataSource from os.path import join, expanduser import random import librosa.display from matplotlib import pyplot as plt import sys import os from tensorboardX import SummaryWriter from matplotlib import cm from warnings import warn from dv3.hparams import hparams, hparams_debug_string fs = hparams.sample_rate global_step = 0 global_epoch = 0 use_cuda = torch.cuda.is_available() if use_cuda: cudnn.benchmark = False _frontend = None # to be set later def sequence_mask(sequence_length, max_len=None): if max_len is None: max_len = sequence_length.data.max() batch_size = sequence_length.size(0) seq_range = torch.arange(0, max_len).long() seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len) seq_range_expand = Variable(seq_range_expand) if sequence_length.is_cuda: seq_range_expand = seq_range_expand.cuda() seq_length_expand = sequence_length.unsqueeze(1) \ .expand_as(seq_range_expand) return (seq_range_expand < seq_length_expand).float() def collate_fn(batch): """Create batch""" r = hparams.outputs_per_step downsample_step = hparams.downsample_step multi_speaker = len(batch[0]) == 4 # Lengths input_lengths = [len(x[0]) for x in batch] max_input_len = max(input_lengths) target_lengths = [len(x[1]) for x in batch] max_target_len = max(target_lengths) if max_target_len % r != 0: max_target_len += r - max_target_len % r assert max_target_len % r == 0 if max_target_len % downsample_step != 0: max_target_len += downsample_step - max_target_len % downsample_step assert max_target_len % downsample_step == 0 # Set 0 for zero beginning padding # imitates initial decoder states b_pad = r max_target_len += b_pad * downsample_step a = np.array([_pad(x[0], max_input_len) for x in batch], dtype=np.int) x_batch = torch.LongTensor(a) input_lengths = torch.LongTensor(input_lengths) target_lengths = torch.LongTensor(target_lengths) b = np.array([_pad_2d(x[1], max_target_len, b_pad=b_pad) for x in batch], dtype=np.float32) mel_batch = torch.FloatTensor(b) c = np.array([_pad_2d(x[2], max_target_len, b_pad=b_pad) for x in batch], dtype=np.float32) y_batch = torch.FloatTensor(c) # text positions text_positions = np.array([_pad(np.arange(1, len(x[0]) + 1), max_input_len) for x in batch], dtype=np.int) text_positions = torch.LongTensor(text_positions) max_decoder_target_len = max_target_len // r // downsample_step # frame positions s, e = 1, max_decoder_target_len + 1 # if b_pad > 0: # s, e = s - 1, e - 1 frame_positions = torch.arange(s, e).long().unsqueeze(0).expand( len(batch), max_decoder_target_len) # done flags done = np.array([_pad(np.zeros(len(x[1]) // r // downsample_step - 1), max_decoder_target_len, constant_values=1) for x in batch]) done = torch.FloatTensor(done).unsqueeze(-1) if multi_speaker: speaker_ids = torch.LongTensor([x[3] for x in batch]) else: speaker_ids = None return x_batch, input_lengths, mel_batch, y_batch, \ (text_positions, frame_positions), done, target_lengths, speaker_ids # https://discuss.pytorch.org/t/how-to-load-part-of-pre-trained-model/1113/3 if __name__ == "__main__": args = docopt(__doc__) print("Command line args:\n", args) checkpoint_dir = args["--checkpoint-dir"] checkpoint_path = args["--checkpoint"] checkpoint_seq2seq_path = args["--checkpoint-seq2seq"] checkpoint_postnet_path = args["--checkpoint-postnet"] load_embedding = args["--load-embedding"] checkpoint_restore_parts = args["--restore-parts"] speaker_id = args["--speaker-id"] speaker_id = int(speaker_id) if speaker_id is not None else None data_root = args["--data-root"] if data_root is None: data_root = join(dirname(__file__), "data", "ljspeech") log_event_path = args["--log-event-path"] reset_optimizer = args["--reset-optimizer"] # Which model to be trained train_seq2seq = args["--train-seq2seq-only"] train_postnet = args["--train-postnet-only"] # train both if not specified if not train_seq2seq and not train_postnet: print("Training whole model") train_seq2seq, train_postnet = True, True if train_seq2seq: print("Training seq2seq model") elif train_postnet: print("Training postnet model") else: assert False, "must be specified wrong args" # Override hyper parameters hparams.parse(args["--hparams"]) print(hparams_debug_string()) assert hparams.name == "deepvoice3" # Presets if hparams.preset is not None and hparams.preset != "": preset = hparams.presets[hparams.preset] import json hparams.parse_json(json.dumps(preset)) print("Override hyper parameters with preset \"{}\": {}".format( hparams.preset, json.dumps(preset, indent=4))) _frontend = getattr(frontend, hparams.frontend) os.makedirs(checkpoint_dir, exist_ok=True) # Input dataset definitions X = FileSourceDataset(TextDataSource(data_root, speaker_id)) Mel = FileSourceDataset(MelSpecDataSource(data_root, speaker_id)) Y = FileSourceDataset(LinearSpecDataSource(data_root, speaker_id)) # Prepare sampler frame_lengths = Mel.file_data_source.frame_lengths sampler = PartialyRandomizedSimilarTimeLengthSampler( frame_lengths, batch_size=hparams.batch_size) # Dataset and Dataloader setup dataset = PyTorchDataset(X, Mel, Y) data_loader = data_utils.DataLoader( dataset, batch_size=hparams.batch_size, num_workers=hparams.num_workers, sampler=sampler, collate_fn=collate_fn, pin_memory=hparams.pin_memory) print("dataloader_prepared") # Model model = build_model() if use_cuda: model = model.cuda() optimizer = optim.Adam(model.get_trainable_parameters(), lr=hparams.initial_learning_rate, betas=( hparams.adam_beta1, hparams.adam_beta2), eps=hparams.adam_eps, weight_decay=hparams.weight_decay) if checkpoint_restore_parts is not None: restore_parts(checkpoint_restore_parts, model) # Load checkpoints if checkpoint_postnet_path is not None: load_checkpoint(checkpoint_postnet_path, model.postnet, optimizer, reset_optimizer) if checkpoint_seq2seq_path is not None: load_checkpoint(checkpoint_seq2seq_path, model.seq2seq, optimizer, reset_optimizer) if checkpoint_path is not None: load_checkpoint(checkpoint_path, model, optimizer, reset_optimizer) # Load embedding if load_embedding is not None: print("Loading embedding from {}".format(load_embedding)) _load_embedding(load_embedding, model) # Setup summary writer for tensorboard if log_event_path is None: log_event_path = "log/run-test" + str(datetime.now()).replace(" ", "_") print("Los event path: {}".format(log_event_path)) writer = SummaryWriter(log_dir=log_event_path) # Train! try: train(model, data_loader, optimizer, writer, init_lr=hparams.initial_learning_rate, checkpoint_dir=checkpoint_dir, checkpoint_interval=hparams.checkpoint_interval, nepochs=hparams.nepochs, clip_thresh=hparams.clip_thresh, train_seq2seq=train_seq2seq, train_postnet=train_postnet) except KeyboardInterrupt: save_checkpoint( model, optimizer, global_step, checkpoint_dir, global_epoch, train_seq2seq, train_postnet) print("Finished") sys.exit(0)
37.559072
102
0.63068
ed03eb092480421cebe7ff1098718fc83eac9aac
3,324
py
Python
magic_mirror.py
alcinnz/Historical-Twin
54a9ab5dc130aaeb2e00058bbaeace7377e2ff3d
[ "MIT" ]
1
2018-08-16T10:06:21.000Z
2018-08-16T10:06:21.000Z
magic_mirror.py
alcinnz/Historical-Twin
54a9ab5dc130aaeb2e00058bbaeace7377e2ff3d
[ "MIT" ]
null
null
null
magic_mirror.py
alcinnz/Historical-Twin
54a9ab5dc130aaeb2e00058bbaeace7377e2ff3d
[ "MIT" ]
null
null
null
#! /usr/bin/python2 import time start = time.time() import pygame, numpy import pygame.camera # Init display screen = pygame.display.set_mode((0,0), pygame.FULLSCREEN) pygame.display.set_caption("Magic Mirror") #pygame.mouse.set_visible(False) # Init font pygame.font.init() font_colour = 16, 117, 186 fonts = {40: pygame.font.Font("Futura.ttc", 40)} # Init AI import recognition import sys, os index = recognition.MultiBinaryTree() imgdir = sys.argv[1] if len(sys.argv) > 1 else "images" photo_samples = [] screen.blit(write("Loading index... %fs" % (time.time() - start)), (0,0)) pygame.display.flip() with open(os.path.join(imgdir, "index.tsv")) as f: for line in f: line = line.strip().split("\t") img = os.path.join(imgdir, line[0]) description = numpy.array([float(n) for n in line[1:]]) index.insert(description, img) screen.blit(write("Loading images... %fs" % (time.time() - start)), (0,50)) pygame.display.flip() for img in os.listdir(os.path.join(imgdir, "thumbnails")): photo_samples.append(pygame.image.load(os.path.join(imgdir, "thumbnails", img))) # Init clock clock = pygame.time.Clock() # Init camera pygame.camera.init() cameras = pygame.camera.list_cameras() if not cameras: pygame.quit() print "No cameras found, exiting!" sys.exit(1) camera = pygame.camera.Camera(cameras[0]) camera.start() # Mainloop if __name__ == "__main__": main()
28.655172
87
0.653129
ed0441122c358fac6781aee918c51f234c854920
288
py
Python
resolwe/__init__.py
plojyon/resolwe
1bee6f0860fdd087534adf1680e9350d79ab97cf
[ "Apache-2.0" ]
27
2015-12-07T18:29:12.000Z
2022-03-16T08:01:47.000Z
resolwe/__init__.py
plojyon/resolwe
1bee6f0860fdd087534adf1680e9350d79ab97cf
[ "Apache-2.0" ]
681
2015-12-01T11:52:24.000Z
2022-03-21T07:43:37.000Z
resolwe/__init__.py
plojyon/resolwe
1bee6f0860fdd087534adf1680e9350d79ab97cf
[ "Apache-2.0" ]
28
2015-12-01T08:32:57.000Z
2021-12-14T00:04:16.000Z
""".. Ignore pydocstyle D400. ======= Resolwe ======= Open source enterprise dataflow engine in Django. """ from resolwe.__about__ import ( # noqa: F401 __author__, __copyright__, __email__, __license__, __summary__, __title__, __url__, __version__, )
14.4
49
0.638889
ed04e3a19994480a40ef35eabbb8a7e09343ee2c
8,898
py
Python
audio_som64_u_grupo1.py
andremsouza/swine_sound_analysis
5583bf91b18e8ad2dcaccb30a94c134e2eab34a5
[ "MIT" ]
null
null
null
audio_som64_u_grupo1.py
andremsouza/swine_sound_analysis
5583bf91b18e8ad2dcaccb30a94c134e2eab34a5
[ "MIT" ]
1
2021-01-20T01:56:42.000Z
2021-01-20T01:56:42.000Z
audio_som64_u_grupo1.py
andremsouza/swine_sound_analysis
5583bf91b18e8ad2dcaccb30a94c134e2eab34a5
[ "MIT" ]
null
null
null
# %% [markdown] # # Testing python-som with audio dataset # %% [markdown] # # Imports # %% import matplotlib.pyplot as plt # import librosa as lr # import librosa.display as lrdisp import numpy as np import pandas as pd import pickle import seaborn as sns import sklearn.preprocessing from python_som import SOM FILE_PREFIX = 'som64_u_grupo1' # %% [markdown] # # Loading dataset # %% df = pd.read_csv('features_means.csv', index_col=0, verbose=True) df.index = pd.to_datetime(df.index) df['rac'] = False df.loc['2020-09-22':, 'rac'] = True # type: ignore df.sort_index(inplace=True) # %% [markdown] # ## Checking for and dropping duplicates # %% # Resetting index for duplicate analysis df.reset_index(inplace=True) print("Duplicates by filename:", df.duplicated(subset=['file_name']).value_counts(), sep='\n') df.drop_duplicates(subset=['file_name'], inplace=True) print("Duplicates by (datetime, ala, grupo):", df.duplicated(subset=['datetime', 'ala', 'grupo']).value_counts(), sep='\n') df.drop_duplicates(subset=['datetime', 'ala', 'grupo'], inplace=True) # Rebuilding dataframe index df.set_index('datetime', inplace=True) # %% # Filtering dataset by 'group' df = df[df['grupo'] == 1] # %% # Dropping tail of dataset for class balancing # tail_size = abs( # len(df[df['rac'].astype(int) == 1]) - len(df[df['rac'].astype(int) == 0])) # df.drop(df.tail(tail_size).index, inplace=True) # %% [markdown] # ## Visualizing distribution of sample dates # %% df_tmp = pd.DataFrame(df['file_name'].resample('1D').count()) df_tmp['count'] = df_tmp['file_name'] del df_tmp['file_name'] df_tmp['rac'] = False df_tmp.loc['2020-09-22':, 'rac'] = True # type: ignore plt.figure(figsize=(10, 10)) sns.set(style="whitegrid", palette=sns.color_palette("muted", n_colors=6, desat=1.0)) sns.barplot(y=df_tmp.index, x=df_tmp['count'], hue=df_tmp['rac']) plt.draw() df_tmp = pd.DataFrame(df['file_name'].resample('1H').count()) df_tmp['count'] = df_tmp['file_name'] del df_tmp['file_name'] df_tmp['rac'] = False df_tmp.loc['2020-09-22':, 'rac'] = True # type: ignore df_tmp = df_tmp.reset_index() df_tmp['hour'] = df_tmp['datetime'].dt.hour plt.figure(figsize=(10, 10)) sns.set(style="whitegrid", palette=sns.color_palette("muted", n_colors=6, desat=1.0)) sns.barplot(y=df_tmp['hour'], x=df_tmp['count'], hue=df_tmp['rac'], orient='h') plt.draw() # %% df_melt = pd.melt(df, value_vars=['rac'], value_name='ractopamine') plt.figure(figsize=(10, 10)) sns.set(style="whitegrid", palette=sns.color_palette("muted", n_colors=6, desat=1.0)) ax = sns.countplot(data=df_melt, x='ractopamine', hue='ractopamine') for p in ax.patches: ax.annotate(f'\n{p.get_height()}', (p.get_x() + 0.2, p.get_height()), ha='center', va='top', color='white', size=18) plt.draw() # %% # using sklearn's MinMaxScaler scaler = sklearn.preprocessing.MinMaxScaler(feature_range=(0, 1)) df_train = df.iloc[:, 3:-1].copy() df_train = scaler.fit_transform(df_train) # %% # Defining first element of SOM shape # Second element will be assigned based on the ratio between the # first two principal components of the train dataset som_x: int = 64 try: with open(f'./{FILE_PREFIX}.obj', 'rb') as f: som = pickle.load(f) except FileNotFoundError: som = SOM(x=som_x, y=None, input_len=df_train.shape[1], learning_rate=0.5, neighborhood_radius=1.0, neighborhood_function='gaussian', cyclic_x=True, cyclic_y=True, data=df_train) # Training SOM som.weight_initialization(mode='linear', data=df_train) som.train(data=df_train, mode='random', verbose=True) with open(f'./{FILE_PREFIX}.obj', 'wb') as f: pickle.dump(som, f) # %% som_x, som_y = som.get_shape() print('SOM shape:', (som_x, som_y)) # %% # Visualizing distance matrix and activation matrix umatrix = som.distance_matrix() fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 9)) sns.heatmap(umatrix.T, cmap='bone_r', ax=ax1, robust=True) sns.heatmap(som.activation_matrix(data=df_train).T, cmap='mako', ax=ax2, robust=True) ax1.invert_yaxis() ax2.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_umatrix_activation.png', bbox_inches='tight', transparent=True) plt.draw() # %% # Visualizing distance matrix anc activation matrix separately fig = plt.figure(figsize=(16, 9)) ax = sns.heatmap(umatrix.T, cmap='bone_r', robust=True) ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_umatrix.png', bbox_inches='tight', transparent=True) fig = plt.figure(figsize=(16, 9)) ax = sns.heatmap(som.activation_matrix(data=df_train).T, cmap='mako', robust=True) ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_activation_matrix.png', bbox_inches='tight', transparent=True) # %% [markdown] # ## Visualizing distribution of features # %% for column in df.iloc[:, 3:-1].columns: hmap = som.get_weights()[:, :, df.iloc[:, 3:-1].columns.get_loc(column)].T fig = plt.figure(figsize=(16, 9)) ax = sns.heatmap(hmap, robust=True, cmap='BrBG') ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png', bbox_inches='tight', transparent=True) plt.close(fig=fig) # %% [markdown] # ## Visualizing distribution of audios by metadata (day, hour, ...) # Each node is colorized according to its most frequent label # %% df['days'] = df.index.date df['days'] = (df['days'] - df['days'][0]) df['days'] = df['days'].apply(lambda x: x.days) df['hour'] = df.index.hour # %% # Visualizing 'rac' distribution class_assignments = som.label_map(np.array(df_train), np.array(df['rac'])) hmap = np.zeros((som_x, som_y)) for i, j in sorted(class_assignments.keys()): try: hmap[i][j] = class_assignments[(i, j)].most_common()[0][0] + 1 except Exception: continue hmap = hmap.T fig = plt.figure(figsize=(16, 9)) ax = sns.heatmap(hmap, cmap=sns.color_palette(palette=["#000000", "blue", "orange"], n_colors=3), cbar_kws={'ticks': [0, 1, 2]}) ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_rac.png', bbox_inches='tight', transparent=True) plt.show() # %% # Visualizing by 'grupo' print(df.groupby('grupo')['rac'].count()) column = 'grupo' class_assignments = som.label_map(np.array(df_train), np.array(df[column])) hmap = np.zeros((som_x, som_y)) for i, j in sorted(class_assignments.keys()): try: hmap[i][j] = class_assignments[(i, j)].most_common()[0][0] except Exception: hmap[i][j] = 0 hmap = hmap.T fig = plt.figure(figsize=(16, 9)) ax = sns.heatmap(hmap, cmap=sns.color_palette(palette=["#000000", "blue", "orange"], n_colors=3), cbar_kws={'ticks': [0, 1, 2]}) ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png', bbox_inches='tight', transparent=True) plt.show() # %% # Visualizing by 'days' print(df.groupby('days')['rac'].count()) column = 'days' class_assignments = som.label_map(np.array(df_train), np.array(df[column])) hmap = np.zeros((som_x, som_y)) for i, j in sorted(class_assignments.keys()): try: hmap[i][j] = class_assignments[(i, j)].most_common()[0][0] except Exception: hmap[i][j] = -1 hmap = hmap.T fig = plt.figure(figsize=(16, 9)) ax = sns.heatmap(hmap, cmap='viridis') ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png', bbox_inches='tight', transparent=True) plt.show() # %% # Visualizing by 'hour' print(df.groupby('hour')['rac'].count()) column = 'hour' class_assignments = som.label_map(np.array(df_train), np.array(df[column])) hmap = np.zeros((som_x, som_y)) for i, j in sorted(class_assignments.keys()): try: hmap[i][j] = class_assignments[(i, j)].most_common()[0][0] except Exception: hmap[i][j] = -1 hmap = hmap.T fig = plt.figure(figsize=(16, 9)) ax = sns.heatmap(hmap, cmap=sns.diverging_palette(150, 250, s=100, l=20, sep=1, n=26, center='light'), center=12) ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png', bbox_inches='tight', transparent=True) plt.show() # %%
30.682759
80
0.615532
ed0683361fef86b73c36e55f72fcd846b4e5dd8c
1,019
py
Python
footy/engine/UpdateEngine.py
dallinb/footy
d6879481a85b4a84023805bf29bd7dff32afa67f
[ "BSD-3-Clause" ]
2
2020-08-27T17:59:13.000Z
2021-10-10T02:26:20.000Z
footy/engine/UpdateEngine.py
FootyStats/footy
d6879481a85b4a84023805bf29bd7dff32afa67f
[ "BSD-3-Clause" ]
32
2020-08-24T15:01:57.000Z
2022-03-12T00:47:02.000Z
footy/engine/UpdateEngine.py
dallinb/footy
d6879481a85b4a84023805bf29bd7dff32afa67f
[ "BSD-3-Clause" ]
null
null
null
"""Prediction Engine - Update the data model with the most resent fixtures and results.""" from footy.domain import Competition
29.970588
109
0.649657
ed06ae9dc5fa12b66e8b0650821700fcf43fb094
2,286
py
Python
bindings/pydeck/docs/scripts/embed_examples.py
marsupialmarcos/deck.gl
c9867c1db87e492253865353f68c985019c7c613
[ "MIT" ]
2
2021-08-11T08:05:51.000Z
2021-08-11T08:05:54.000Z
bindings/pydeck/docs/scripts/embed_examples.py
marsupialmarcos/deck.gl
c9867c1db87e492253865353f68c985019c7c613
[ "MIT" ]
null
null
null
bindings/pydeck/docs/scripts/embed_examples.py
marsupialmarcos/deck.gl
c9867c1db87e492253865353f68c985019c7c613
[ "MIT" ]
null
null
null
"""Script to embed pydeck examples into .rst pages with code These populate the files you see once you click into a grid cell on the pydeck gallery page """ from multiprocessing import Pool import os import subprocess import sys from const import DECKGL_URL_BASE, EXAMPLE_GLOB, GALLERY_DIR, HTML_DIR, HOSTED_STATIC_PATH from utils import to_presentation_name, to_snake_case_string from templates import DOC_TEMPLATE if not os.environ.get("MAPBOX_API_KEY"): # If running for rtfd.io, set this variable from the Admin panel raise Exception("MAPBOX_API_KEY not set") if __name__ == "__main__": main()
35.169231
106
0.71916
ed0721912431ef604f57495e6aa85dbb0102f18a
45,482
py
Python
symbolicR/python/forward_kin.py
mharding01/augmented-neuromuscular-RT-running
7e1ef00d3fdf9cfa9d59fc4f3a6a0e6dd792a834
[ "MIT" ]
null
null
null
symbolicR/python/forward_kin.py
mharding01/augmented-neuromuscular-RT-running
7e1ef00d3fdf9cfa9d59fc4f3a6a0e6dd792a834
[ "MIT" ]
null
null
null
symbolicR/python/forward_kin.py
mharding01/augmented-neuromuscular-RT-running
7e1ef00d3fdf9cfa9d59fc4f3a6a0e6dd792a834
[ "MIT" ]
null
null
null
import numpy as np import sympy as sp import re import os ###################### # # # 17 16 21 # # 18 15 22 # # 19 14 23 # # 20 01 24 # # 02 08 # # 03 09 # # 04 10 # # 05 11 # # 06 12 # # 07 13 # # # ###################### # # origin: in the waist, middle point between the two pitch hip rotations # inertial frame: located at the origin (waist), but aligned with the ground (info from IMU) # # Di : position vector from the anchor point of the previous body to the current body i # (previous body is not always body i-1), expressed in the relative # frame of the previous body # DGi : position vector from the anchor point of body i to its COM (center of mass) G_i, # expressed in the relative frame of the current body i # Omi : rotational vector from the previous body to the current body i # (previous body is not always body i-1), expressed in the relative # frame of the previous body # Rdi : rotational matrix between body i and its predecessor # si : sine of the relative angle before body i # ci : cosine of the relative angle before body i # # xi : absolute position vector (from origin, expressed in the inertial frame) # of the anchor point of body i # xgi : absolute position vector of the COM G_i of body i # xpi : derivative of xi # xgpi : derivative of xgi # omi : absolute rotational vector of body i # Ri : absolute rotational matrix # Rti : transpose matrix of Ri # xji : jacobian of 'xi' # xgji : jacobian of 'xgi' # Rji : jacobian of 'Ri' # return true if it is a float # return true if it has a shape 'R%a_%b%c' (indexes %a, %b, %c also returned) # return true if it has a shape 'x%a_%b' (indexes %a, %b also returned) # count the number of 'elem' in the file # print the declaration of an element # print all declarations # get tilde matrix # get rotation matrix # get vector axis # compute the derivative of an element (for jacobian) # compute the derivative of an expression (for jacobian) # write the beginning of the file # compute the center of mass position and velocity # from an orientation matrix, compute the roll, pitch, yaw angles (and derivative) # compute the time derivatives of 'yaw_pitch_roll_angles' # angles (position and derivative) of the waist and the torso # compute the feet position, velocity and orientation # compute the wrists position, velocity and orientation # get a string for the enumeration of joints # write the end of the file # print matrix components declaration # print variables declaration # variables initialization # write symbolic vector and replace symbolic variable by its name # write symbolic matrix and replace symbolic variable by its name # save the symbolic vector for print # save the symbolic matrix for print # write symbolic jacobian of a rotation matrix # write symbolic jacobian of an anchor point # write symbolic jacobian of a com point # symbolic computation # generate the symbolic output file # main script # rotation axis for each joint before body i (1:x, 2:y, 3:z) rot_axis = np.array([0, # waist 2, 1, 3, 2, 1, 2, # right leg 2, 1, 3, 2, 1, 2, # left leg 1, 2, 3, # trunk 2, 1, 3, 2, # right arm 2, 1, 3, 2 # left arm ]) # parent index parent_body_index = np.array([ -1, # waist 0, 1, 2, 3, 4, 5, # right leg 0, 7, 8, 9, 10, 11, # left leg 0, 13, 14, # trunk 15, 16, 17, 18, # right arm 15, 20, 21, 22 # left arm ]) nb_bodies = len(parent_body_index) ## anchor point positions Dpt = nb_bodies*[None] # waist Dpt[0] = sp.Matrix([0.0, 0.0, 0.0]) # right leg Dpt[1] = sp.Matrix([0.0, sp.Symbol('DPT_2_2'), 0.0]) Dpt[2] = sp.Matrix([0.0, sp.Symbol('DPT_2_6'), 0.0]) Dpt[3] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_8')]) Dpt[4] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_10')]) Dpt[5] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_12')]) Dpt[6] = sp.Matrix([0.0, 0.0, 0.0]) # left leg Dpt[7] = sp.Matrix([0.0, sp.Symbol('DPT_2_3'), 0.0]) Dpt[8] = sp.Matrix([0.0, sp.Symbol('DPT_2_18'), 0.0]) Dpt[9] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_20')]) Dpt[10] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_22')]) Dpt[11] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_24')]) Dpt[12] = sp.Matrix([0.0, 0.0, 0.0]) # trunk Dpt[13] = sp.Matrix([sp.Symbol('DPT_1_4'), 0.0, sp.Symbol('DPT_3_4')]) Dpt[14] = sp.Matrix([0.0, 0.0, 0.0]) Dpt[15] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_32')]) # right arm Dpt[16] = sp.Matrix([sp.Symbol('DPT_1_36'), sp.Symbol('DPT_2_36'), sp.Symbol('DPT_3_36')]) Dpt[17] = sp.Matrix([0.0, sp.Symbol('DPT_2_39'), 0.0]) Dpt[18] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_41')]) Dpt[19] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_43')]) # left arm Dpt[20] = sp.Matrix([sp.Symbol('DPT_1_37'), sp.Symbol('DPT_2_37'), sp.Symbol('DPT_3_37')]) Dpt[21] = sp.Matrix([0.0, sp.Symbol('DPT_2_46'), 0.0]) Dpt[22] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_48')]) Dpt[23] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_50')]) ## COM positions Dg = nb_bodies*[None] # waist Dg[0] = sp.Matrix([sp.Symbol('L_1_6'), sp.Symbol('L_2_6'), sp.Symbol('L_3_6')]) # right leg Dg[1] = sp.Matrix([sp.Symbol('L_1_7') , sp.Symbol('L_2_7') , sp.Symbol('L_3_7')]) Dg[2] = sp.Matrix([sp.Symbol('L_1_8') , sp.Symbol('L_2_8') , sp.Symbol('L_3_8')]) Dg[3] = sp.Matrix([sp.Symbol('L_1_9') , sp.Symbol('L_2_9') , sp.Symbol('L_3_9')]) Dg[4] = sp.Matrix([sp.Symbol('L_1_10'), sp.Symbol('L_2_10'), sp.Symbol('L_3_10')]) Dg[5] = sp.Matrix([sp.Symbol('L_1_11'), sp.Symbol('L_2_11'), sp.Symbol('L_3_11')]) Dg[6] = sp.Matrix([sp.Symbol('L_1_12'), 0.0 , sp.Symbol('L_3_12')]) # left leg Dg[7] = sp.Matrix([sp.Symbol('L_1_13'), sp.Symbol('L_2_13'), sp.Symbol('L_3_13')]) Dg[8] = sp.Matrix([sp.Symbol('L_1_14'), sp.Symbol('L_2_14'), sp.Symbol('L_3_14')]) Dg[9] = sp.Matrix([sp.Symbol('L_1_15'), sp.Symbol('L_2_15'), sp.Symbol('L_3_15')]) Dg[10] = sp.Matrix([sp.Symbol('L_1_16'), sp.Symbol('L_2_16'), sp.Symbol('L_3_16')]) Dg[11] = sp.Matrix([sp.Symbol('L_1_17'), sp.Symbol('L_2_17'), sp.Symbol('L_3_17')]) Dg[12] = sp.Matrix([sp.Symbol('L_1_18'), 0.0 , sp.Symbol('L_3_18')]) # trunk Dg[13] = sp.Matrix([sp.Symbol('L_1_19'), sp.Symbol('L_2_19'), sp.Symbol('L_3_19')]) Dg[14] = sp.Matrix([sp.Symbol('L_1_20'), sp.Symbol('L_2_20'), sp.Symbol('L_3_20')]) Dg[15] = sp.Matrix([sp.Symbol('L_1_21'), sp.Symbol('L_2_21'), sp.Symbol('L_3_21')]) # right arm Dg[16] = sp.Matrix([sp.Symbol('L_1_22'), sp.Symbol('L_2_22'), sp.Symbol('L_3_22')]) Dg[17] = sp.Matrix([sp.Symbol('L_1_23'), sp.Symbol('L_2_23'), sp.Symbol('L_3_23')]) Dg[18] = sp.Matrix([sp.Symbol('L_1_24'), sp.Symbol('L_2_24'), sp.Symbol('L_3_24')]) Dg[19] = sp.Matrix([sp.Symbol('L_1_25'), sp.Symbol('L_2_25'), sp.Symbol('L_3_25')]) # left arm Dg[20] = sp.Matrix([sp.Symbol('L_1_26'), sp.Symbol('L_2_26'), sp.Symbol('L_3_26')]) Dg[21] = sp.Matrix([sp.Symbol('L_1_27'), sp.Symbol('L_2_27'), sp.Symbol('L_3_27')]) Dg[22] = sp.Matrix([sp.Symbol('L_1_28'), sp.Symbol('L_2_28'), sp.Symbol('L_3_28')]) Dg[23] = sp.Matrix([sp.Symbol('L_1_29'), sp.Symbol('L_2_29'), sp.Symbol('L_3_29')]) # masses M = np.array([ 'M_6', # waist 'M_7' , 'M_8' , 'M_9' , 'M_10', 'M_11', 'M_12', # right leg 'M_13', 'M_14', 'M_15', 'M_16', 'M_17', 'M_18', # left leg 'M_19', 'M_20', 'M_21', # trunk 'M_22', 'M_23', 'M_24', 'M_25', # right arm 'M_26', 'M_27', 'M_28', 'M_29' # left arm ]) # joint names joint_id_names = np.array(['0', # waist 'RightHipPitch_id', 'RightHipRoll_id', 'RightHipYaw_id', 'RightKneePitch_id', 'RightFootRoll_id', 'RightFootPitch_id', # right leg 'LeftHipPitch_id' , 'LeftHipRoll_id' , 'LeftHipYaw_id' , 'LeftKneePitch_id' , 'LeftFootRoll_id' , 'LeftFootPitch_id' , # left leg 'TorsoRoll_id' , 'TorsoPitch_id' , 'TorsoYaw_id' , # trunk 'RightShPitch_id' , 'RightShRoll_id' , 'RightShYaw_id' , 'RightElbPitch_id', # right arm 'LeftShPitch_id' , 'LeftShRoll_id' , 'LeftShYaw_id' , 'LeftElbPitch_id' # left arm ]) out_file_name = 'forward_kinematics' gen_symbolic_out(out_file_name, nb_bodies, rot_axis, parent_body_index, joint_id_names, Dpt, Dg, M)
32.348506
189
0.633767
ed07be5c12830ff2ea484a69a77d31923d6aa5cb
1,223
py
Python
examples/last.py
0xiso/PyMISP
20a340414422714dcf31389957343c663550ed1a
[ "BSD-2-Clause" ]
5
2019-08-12T15:21:00.000Z
2021-10-01T01:50:52.000Z
examples/last.py
DragonDev1906/PyMISP
5c72dc9c33b4ae850d40ff06dfb05c27f3e80e5d
[ "BSD-2-Clause" ]
null
null
null
examples/last.py
DragonDev1906/PyMISP
5c72dc9c33b4ae850d40ff06dfb05c27f3e80e5d
[ "BSD-2-Clause" ]
3
2018-11-22T15:33:16.000Z
2019-09-02T14:23:35.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- from pymisp import PyMISP from keys import misp_url, misp_key, misp_verifycert import argparse import os import json # Usage for pipe masters: ./last.py -l 5h | jq . if __name__ == '__main__': parser = argparse.ArgumentParser(description='Download latest events from a MISP instance.') parser.add_argument("-l", "--last", required=True, help="can be defined in days, hours, minutes (for example 5d or 12h or 30m).") parser.add_argument("-o", "--output", help="Output file") args = parser.parse_args() if args.output is not None and os.path.exists(args.output): print('Output file already exists, abord.') exit(0) misp = init(misp_url, misp_key) download_last(misp, args.last, args.output)
27.795455
133
0.644317
ed0ba1aa04531d363f3421c385e69a867b69168b
2,949
py
Python
saleor/dashboard/urls.py
Chaoslecion123/Diver
8c5c493701422eada49cbf95b0b0add08f1ea561
[ "BSD-3-Clause" ]
null
null
null
saleor/dashboard/urls.py
Chaoslecion123/Diver
8c5c493701422eada49cbf95b0b0add08f1ea561
[ "BSD-3-Clause" ]
null
null
null
saleor/dashboard/urls.py
Chaoslecion123/Diver
8c5c493701422eada49cbf95b0b0add08f1ea561
[ "BSD-3-Clause" ]
null
null
null
from django.conf.urls import include, url from django.views.generic.base import TemplateView from . import views as core_views from .category.urls import urlpatterns as category_urls from .collection.urls import urlpatterns as collection_urls from .customer.urls import urlpatterns as customer_urls from .discount.urls import urlpatterns as discount_urls from .menu.urls import urlpatterns as menu_urls from .order.urls import urlpatterns as order_urls from .page.urls import urlpatterns as page_urls from .product.urls import urlpatterns as product_urls from .search.urls import urlpatterns as search_urls from .shipping.urls import urlpatterns as shipping_urls from .sites.urls import urlpatterns as site_urls from .staff.urls import urlpatterns as staff_urls from .taxes.urls import urlpatterns as taxes_urls # BEGIN :: SoftButterfly Extensions -------------------------------------------- from .brand.urls import urlpatterns as brand_urls from .widget.slider.urls import urlpatterns as slider_urls from .widget.banner.urls import urlpatterns as banner_urls from .widget.scene.urls import urlpatterns as scene_urls from .widget.benefit.urls import urlpatterns as benefit_urls from .store.physical_store.urls import urlpatterns as store_urls from .store.social_network.urls import urlpatterns as social_network_urls from .store.special_page.urls import urlpatterns as special_page_urls from .store.bank_account.urls import urlpatterns as bank_account_urls from .store.footer_item.urls import urlpatterns as footer_item_urls # END :: SoftButterfly Extensions ---------------------------------------------- urlpatterns = [ url(r'^$', core_views.index, name='index'), url(r'^categories/', include(category_urls)), url(r'^collections/', include(collection_urls)), url(r'^orders/', include(order_urls)), url(r'^page/', include(page_urls)), url(r'^products/', include(product_urls)), url(r'^customers/', include(customer_urls)), url(r'^staff/', include(staff_urls)), url(r'^discounts/', include(discount_urls)), url(r'^settings/', include( site_urls + social_network_urls + special_page_urls + bank_account_urls + footer_item_urls)), # Extensions url(r'^menu/', include(menu_urls)), url(r'^shipping/', include(shipping_urls)), url(r'^style-guide/', core_views.styleguide, name='styleguide'), url(r'^search/', include(search_urls)), url(r'^taxes/', include(taxes_urls)), url(r'^next/', TemplateView.as_view(template_name='dashboard/next.html')), # BEGIN :: SoftButterfly Extensions ---------------------------------------- url(r'^brand/', include(brand_urls)), url(r'^slider/', include(slider_urls)), url(r'^banner/', include(banner_urls)), url(r'^scene/', include(scene_urls)), url(r'^store/', include(store_urls)), url(r'^benefit/', include(benefit_urls)), # END :: SoftButterfly Extensions ------------------------------------------ ]
49.15
83
0.714819
ed0cbfaf8410cb124a4ef21f7ca9796ba91008fc
1,146
py
Python
experiments/CUB_fewshot_raw/FRN/ResNet-12/train.py
Jf-Chen/FRN-main
5b57b9e0d7368058a8e3ba41a53c460b54ab9b91
[ "MIT" ]
43
2021-04-27T23:42:35.000Z
2022-03-30T02:41:19.000Z
experiments/CUB_fewshot_raw/FRN/ResNet-12/train.py
Jf-Chen/FRN-main
5b57b9e0d7368058a8e3ba41a53c460b54ab9b91
[ "MIT" ]
7
2021-05-31T10:38:17.000Z
2022-01-06T05:20:08.000Z
experiments/CUB_fewshot_raw/FRN/ResNet-12/train.py
Jf-Chen/FRN-main
5b57b9e0d7368058a8e3ba41a53c460b54ab9b91
[ "MIT" ]
7
2021-05-18T00:37:46.000Z
2022-01-23T07:09:51.000Z
import os import sys import torch import yaml from functools import partial sys.path.append('../../../../') from trainers import trainer, frn_train from datasets import dataloaders from models.FRN import FRN args = trainer.train_parser() with open('../../../../config.yml', 'r') as f: temp = yaml.safe_load(f) data_path = os.path.abspath(temp['data_path']) fewshot_path = os.path.join(data_path,'CUB_fewshot_raw') pm = trainer.Path_Manager(fewshot_path=fewshot_path,args=args) train_way = args.train_way shots = [args.train_shot, args.train_query_shot] train_loader = dataloaders.meta_train_dataloader(data_path=pm.train, way=train_way, shots=shots, transform_type=args.train_transform_type) model = FRN(way=train_way, shots=[args.train_shot, args.train_query_shot], resnet=args.resnet) train_func = partial(frn_train.default_train,train_loader=train_loader) tm = trainer.Train_Manager(args,path_manager=pm,train_func=train_func) tm.train(model) tm.evaluate(model)
30.157895
89
0.666667
ed0d0fb11e355951942a4b93a958119ead61c53e
6,037
py
Python
exp/DFRdatasets/simulate.py
zzzace2000/dropout-feature-rankin
7769ce822f3c0a6d23167d11f1569f59e56b1266
[ "CC-BY-4.0" ]
6
2019-02-24T07:31:38.000Z
2021-12-27T08:57:38.000Z
exp/DFRdatasets/simulate.py
zzzace2000/dropout-feature-rankin
7769ce822f3c0a6d23167d11f1569f59e56b1266
[ "CC-BY-4.0" ]
2
2019-01-13T11:49:35.000Z
2020-05-18T01:59:15.000Z
exp/DFRdatasets/simulate.py
zzzace2000/dropout-feature-rankin
7769ce822f3c0a6d23167d11f1569f59e56b1266
[ "CC-BY-4.0" ]
6
2018-11-06T14:17:07.000Z
2021-11-06T14:30:10.000Z
import argparse import argparse import os import numpy as np import torch from dataloaders.LoaderBase import LoaderBase import exp.feature.feature_utils as feature_utils if __name__ == '__main__': args = parse_args() if 'other_ranks' in args.rank_func: args.rank_func.remove('other_ranks') args.rank_func += ['marginal_rank', 'rf_rank', 'zero_rank', 'shuffle_rank', 'random_rank', 'enet_rank', 'lasso_rank'] for mode in args.modes: run(mode, args)
41.068027
98
0.629617
ed0d8ae3aab7a47853ec71583d13c54d255d0f51
2,430
py
Python
gym_pcgrl/envs/reps/wide_3D_rep.py
JiangZehua/gym-pcgrl
80ddbde173803e81060578c2c4167d8d1f5cacba
[ "MIT" ]
null
null
null
gym_pcgrl/envs/reps/wide_3D_rep.py
JiangZehua/gym-pcgrl
80ddbde173803e81060578c2c4167d8d1f5cacba
[ "MIT" ]
null
null
null
gym_pcgrl/envs/reps/wide_3D_rep.py
JiangZehua/gym-pcgrl
80ddbde173803e81060578c2c4167d8d1f5cacba
[ "MIT" ]
null
null
null
from gym_pcgrl.envs.reps.representation3D import Representation3D from PIL import Image from gym import spaces import numpy as np from gym_pcgrl.envs.probs.minecraft.mc_render import reps_3D_render """ The wide representation where the agent can pick the tile position and tile value at each update. """
32.837838
101
0.671605
ed0ede6f5172ebc43a6bba82ff98dc80379f3c8f
10,696
py
Python
ucsmsdk/mometa/adaptor/AdaptorMenloQStats.py
thinkitdata/ucsmsdk
da6599e1dbc1207a30eabe548a7e5791af5f476b
[ "Apache-2.0" ]
null
null
null
ucsmsdk/mometa/adaptor/AdaptorMenloQStats.py
thinkitdata/ucsmsdk
da6599e1dbc1207a30eabe548a7e5791af5f476b
[ "Apache-2.0" ]
null
null
null
ucsmsdk/mometa/adaptor/AdaptorMenloQStats.py
thinkitdata/ucsmsdk
da6599e1dbc1207a30eabe548a7e5791af5f476b
[ "Apache-2.0" ]
null
null
null
"""This module contains the general information for AdaptorMenloQStats ManagedObject.""" from ...ucsmo import ManagedObject from ...ucscoremeta import MoPropertyMeta, MoMeta from ...ucsmeta import VersionMeta
76.94964
277
0.708022
ed0fc8cf4f946e650eb4b14f0a5d7690952a62a3
980
py
Python
python/old_password_test.py
XelaRellum/old_password
b461941069bc7f1187776a992f86c89317ab215e
[ "MIT" ]
null
null
null
python/old_password_test.py
XelaRellum/old_password
b461941069bc7f1187776a992f86c89317ab215e
[ "MIT" ]
null
null
null
python/old_password_test.py
XelaRellum/old_password
b461941069bc7f1187776a992f86c89317ab215e
[ "MIT" ]
null
null
null
import unittest import pytest from old_password import old_password import csv import re def test_password_with_space(): """ spaces in password are skipped """ assert old_password("pass word") == old_password("password") def test_password_with_tab(): """ tabs in password are skipped """ assert old_password("pass\tword") == old_password("password")
22.272727
67
0.643878
ed10c3db0c256d5bebae34542a471bf7c8fc94ae
6,829
py
Python
src/RS_model/train_mlp.py
CindyChen1995/MKR
f9ae37903dcf43b6d101dfc08644ce4a29ecbf9d
[ "MIT" ]
null
null
null
src/RS_model/train_mlp.py
CindyChen1995/MKR
f9ae37903dcf43b6d101dfc08644ce4a29ecbf9d
[ "MIT" ]
null
null
null
src/RS_model/train_mlp.py
CindyChen1995/MKR
f9ae37903dcf43b6d101dfc08644ce4a29ecbf9d
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ ------------------------------------------------- Description : Author : cmy date 2020/1/2 ------------------------------------------------- """ import datetime import heapq import numpy as np import tensorflow as tf import time from metrics import ndcg_at_k from train import get_user_record from DMF import DMF import os os.environ["CUDA_VISIBLE_DEVICES"] = "1" config = tf.ConfigProto() config.gpu_options.per_process_gpu_memory_fraction = 0.5 # maximun alloc gpu50% of MEM config.gpu_options.allow_growth = True #allocate dynamically
41.387879
130
0.516767
ed12384bdaa43735e81fa807c26ed334db11e7a1
84,211
py
Python
pylipid.py
glass-w/PyLipID
ee29f92ba6187cd22b9554a599177152ebed9c4c
[ "MIT" ]
null
null
null
pylipid.py
glass-w/PyLipID
ee29f92ba6187cd22b9554a599177152ebed9c4c
[ "MIT" ]
null
null
null
pylipid.py
glass-w/PyLipID
ee29f92ba6187cd22b9554a599177152ebed9c4c
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed Aug 28 19:28:17 2019 @author: Wanling Song """ import mdtraj as md import numpy as np import pandas as pd import argparse import sys from collections import defaultdict import pickle import os import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import networkx as nx import seaborn as sns from matplotlib.ticker import MultipleLocator from scipy.optimize import curve_fit from scipy.sparse import coo_matrix from scipy import sparse from statsmodels.nonparametric.kernel_density import KDEMultivariate import community import warnings from shutil import copyfile import datetime from itertools import product import logomaker import re warnings.simplefilter(action='ignore', category=FutureWarning) warnings.filterwarnings('ignore') np.seterr(all='ignore') ################################### ###### Parameter settings ####### ################################### parser = argparse.ArgumentParser() parser.add_argument("-f", nargs="+", metavar="./run/md.xtc", help="List of trajectories, seperated by space, \ Supports xtc, gro format. Used by mdtraj.load()") parser.add_argument("-c", nargs="+", metavar="./run/system.gro", \ help="List of coordinates of trajectory, in the same order as -f, required when inputs of -f are xtc trajectories, \ Supported format: gro, pdb, etc., Used by mdtraj.load()") parser.add_argument("-stride", default=1, metavar=1, help="Striding through trajectories. Only every stride-th will be analized." ) parser.add_argument("-dt", default=None, metavar="None", help="The time interval between two adjacent frames in the trajectories. \ If not specified, the mdtraj will deduce from the trajectories. This works for trajectories in format of e.g. xtc which \ include timestep information. For trajectories in dcd format, users have to provide the time interval manually, \ in a time unite consistent with -tu") parser.add_argument("-tu", default="us", choices=["ns", "us"], metavar="us", \ help="Time unit for interaction duration calculation. Available options: ns, us. This will affect the unit of koff as well.") parser.add_argument("-save_dir", default=None, metavar="None", help="The directory where all the generated results will be put in. \ The directory will be created if not existing. Using the current working directory if not specified.") parser.add_argument("-cutoffs", nargs=2, default=(0.55, 1.0), metavar=(0.55, 1.0), \ help="Double cutoff seperated by space. In unit of nm. Default is 0.55 1.0. The double cutoffs are used to define lipid \ interactions. A continuous lipid contact with a given residue starts when the lipid moves to the given residue \ closer than the smaller cutoff; and ends when the lipid moves farther than the larger cutoff. The standard single \ cutoff can be acheived by setting the same value for both cutoffs.") parser.add_argument("-lipids", nargs="+", metavar="POPC", default="POPC CHOL POP2", \ help="Lipid species to check, seperated by space. Should be consistent with residue names in your trajectories.") parser.add_argument("-lipid_atoms", nargs="+", metavar="PO4", default=None, \ help="Lipid atoms to check, seperated by space. Should be consistent with the atom names in your trajectories.") parser.add_argument("-radii", nargs="+", default=None, metavar="BB:0.26 SC1:0.23", help="Change/Define the radius of atoms/beads \ that is used for the calculation of binding site surface area. Values need to be in the unit of nm. Supported syntax is \ BB:0.26, which defines the radius of bead BB as 0.26 nm, or CA:0.12 which defines the radius of atom CA as 0.12 nm. For \ atomistic simulations, the default radii are taken from \ mdtraj https://github.com/mdtraj/mdtraj/blob/master/mdtraj/geometry/sasa.py#L56. For coarse-grained \ simulations, this script defines the radius of the MARTINI 2 beads of BB as 0.26 nm and SC1/2/3 as 0.23 nm.") parser.add_argument("-nprot", default=1, metavar="1", \ help="num. of proteins (or chains) in the simulation system. The calculated results will be averaged among these proteins \ (or chains). The proteins (or chains) need to be identical, otherwise the averaging will fail.") parser.add_argument("-resi_offset", default=0, metavar="0", help="Shifting the residue index. It is useful if you need to change the residue \ index in your trajectories. For example, to change the residue indeces from 5,6,7,..., to 10,11,12,..., use -resi_offset 4. \ All the outputs, including plotted figures and saved coordinates, will be changed by this.") parser.add_argument("-resi_list", nargs="+", default=[], metavar="1-10 20-30", help="The indices of residues on which the calculations are done. \ This option is useful for those proteins with large regions that don't require calculation. Skipping those calculations could \ save time and memory. Accepted syntax include 1/ defining a range, like 1-10 (both ends included); 2/ single residue index, \ like 25 26 17. All the selections are seperated by space. For example, -resi_list 1-10 20-30 40 45 46 means selecting \ residues 1-10, 20-30, 40, 45 and 46 for calculation. The residue indices are not affected by -resi_offset, i.e. they \ should be consistent with the indices in your trajectories.") parser.add_argument("-chain_breaks", nargs="+", default=[], metavar="100 281 420", help="Start a new chain at the X-th residue (starting at 1) in \ the trajectory topology. This identifier is independent of the residue index but checks the residue order in the topology. \ Multiple chain breaks are supported. This option is useful when the simulation system contains \ multiple differnt chains, or users want to see the difference between chains even if these chains are identical. Using this flag \ will generate seperate figures for each of the chains. But the binding site detection will still treat the proteins in the \ system collectively, i.e. those binding sites that cover at multiple chains will be identified.") parser.add_argument("-nbootstrap", default=10, metavar=10, help="The number of samples for bootstrapping the calcultion of koff. \ The default is 10. The larger the number, the more time-consuming the calculation will be. The closer the bootstrapped \ residence time/koffs are to the original values, the more reliable those original values are. The bootstrapped results \ are ploted in each of the koff plots and plotted apposed to the original values in the figure showing residence time. ") parser.add_argument("-save_dataset", nargs="?", default=True, const=True, metavar="True", help="Save dataset in Pickle. Default is True") parser.add_argument("-gen_binding_poses", default=5, metavar=5, help="The num. of top-scored lipid binding poses to be generated for each binding \ site. The default is 5. A scoring function is generated for each binding site based on the sum of the probability density function of each atom/bead \ the lipid molecule. Score = sum(PDF(atom_i) * Weight(atom_i)) for atom_i in the lipid molecule. The weight function Weight(atom_i) \ is specified by the flag -score_weights.") parser.add_argument("-save_pose_format", default="gro", metavar="gro", help="The format the generated lipid binding poses are written into. This function \ is carried out by mdtraj.save(), hence supports the formats that are included by mdtraj. ") parser.add_argument("-score_weights", nargs="+", default=None, metavar="PO4:1 C1:1", help="The weight of each of the lipid atom/bead contributes to the scoring function. \ Top-rated lipid binding poses can be generated based on users' specification. The bounds poses of each binding site are scored based \ on the scoring function Score = sum(PDF(atom_i) * Weight(atom_i)) for atom_i in the lipid molecule.") parser.add_argument("-letter_map", nargs="+", default=None, metavar="ARG:K GLY:G", help="Map the three-letter amino acids to one letter. This map is \ used in making logomaker figures (https://logomaker.readthedocs.io/en/latest/). The common 20 amino acids are defined \ by this script. Users need to use this flag to define maps for uncommon amino acids in their systems.") parser.add_argument("-pdb", default=None, metavar="None", help="Provide a PDB structure onto which the binding site information will be mapped. \ Using this flag will generate a 'show_binding_site_info.py' file in the -save_dir directory, which allows users to check the \ mapped binding site information in PyMol. Users can run the generated script by 'python show_binding_site_info.py' \ to open such a PyMol session.") parser.add_argument("-pymol_gui", nargs="?", default=True, const=True, metavar="True", help="Show the PyMol session of binding site information \ at the end of the calcution. Need to be used in conjuction with -pdb.") args = parser.parse_args(sys.argv[1:]) ########################################## ########## assisting functions ########### ########################################## def cal_interaction_intensity(contact_residues_high): """ The probablily of finding the lipids around the selected residue plus the number of lipids found around the selected residue, the average number of lipid per contact """ contact_counts = [len(item) for item in contact_residues_high] mask = np.array(contact_counts) > 0 contact_counts_nonzero = np.array(contact_counts)[mask] return 100 * len(contact_counts_nonzero)/len(contact_residues_high), np.nan_to_num(contact_counts_nonzero.mean()) def cal_restime_koff(sigma, initial_guess): """ fit the exponential curve y=A*e^(-k1*x)+B*e^(-k2*x) """ delta_t_range = list(sigma.keys()) delta_t_range.sort() # x hist_values = np.nan_to_num([sigma[delta_t] for delta_t in delta_t_range]) # y try: popt, pcov = curve_fit(bi_expo, np.array(delta_t_range, dtype=np.float128), np.array(hist_values, dtype=np.float128), p0=initial_guess, maxfev=100000) n_fitted = bi_expo(np.array(delta_t_range, dtype=np.float128), *popt) r_squared = 1 - np.sum((np.nan_to_num(n_fitted) - np.nan_to_num(hist_values))**2)/np.sum((hist_values - np.mean(hist_values))**2) ks = [abs(k) for k in popt[:2]] koff = np.min(ks) restime = 1/koff except RuntimeError: koff = 0 restime = 0 r_squared = 0 popt = [0, 0, 0, 0] return restime, koff, r_squared, popt ##################################### ####### Main Class object ########### ##################################### ###################################################### ########### Load params and do calculation ########### ###################################################### if __name__ == '__main__': trajfile_list = args.f grofile_list = args.c lipid_set = args.lipids cutoff = [float(data) for data in args.cutoffs] save_dir = check_dir(args.save_dir) ####################################################################### ######## write a backup file of params for reproducibility ############ fn = os.path.join(save_dir, "pylipid_backup_{}.txt".format(datetime.datetime.now().strftime("%Y_%m_%d_%H%M"))) with open(fn, "w") as f: f.write("##### Record params for reproducibility #####\n") f.write("python {}\n".format(" ".join(sys.argv))) ###################################################################### ######################### process resi_list ########################## resi_list = [] if len(args.resi_list) > 0: for item in args.resi_list: if "-" in item: item_list = item.split("-") resi_list.append(np.arange(int(item_list[0]), int(item_list[-1])+1)) else: resi_list.append(int(item)) resi_list = np.hstack(resi_list) ####################################################################### ############################ change of radii ########################## ##### mdtraj default radii: ##### https://github.com/mdtraj/mdtraj/blob/b28df2cd6e5c35fa006fe3c24728857880793abb/mdtraj/geometry/sasa.py#L56 if args.radii == None: radii_book = None else: radii_book = {} for item in args.radii: radius = item.split(":") radii_book[radius[0]] = float(radius[1]) ####################################################################### ################# score weight for kde calculation #################### if args.score_weights == None: score_weights = None else: score_weights = {} for item in args.score_weights: weight = item.split(":") score_weights[weight[0]] = float(weight[1]) ####################################################################### ################# map three letter to single letter ################### letter_map = None if args.letter_map != None: letter_map = {} for item in args.letter_map: letter_map[item.split(":")[0]] = item.split(":")[1] ####################################################################### ################# process chain breaks ################################ chain_breaks = [] if len(args.chain_breaks) == 0 else [int(num)-1 for num in args.chain_breaks] ####################################################################### for lipid in lipid_set: li = LipidInteraction(trajfile_list, grofile_list, stride=int(args.stride), dt=args.dt, cutoff=cutoff, lipid=lipid, \ lipid_atoms=args.lipid_atoms, nprot=args.nprot, timeunit=args.tu, resi_offset=int(args.resi_offset), \ resi_list=resi_list, save_dir=args.save_dir) li.cal_interactions(save_dataset=args.save_dataset, nbootstrap=int(args.nbootstrap)) li.plot_interactions(item="Duration", letter_map=letter_map, chain_breaks=chain_breaks) li.plot_interactions(item="Residence Time", letter_map=letter_map, chain_breaks=chain_breaks) li.plot_interactions(item="Occupancy", letter_map=letter_map, chain_breaks=chain_breaks) li.plot_interactions(item="LipidCount", letter_map=letter_map, chain_breaks=chain_breaks) li.write_to_pdb(item="Duration") li.write_to_pdb(item="Residence Time") li.write_to_pdb(item="Occupancy") li.write_to_pdb(item="LipidCount") li.cal_interaction_network(pdb=args.pdb, save_dataset=args.save_dataset, \ pymol_gui=args.pymol_gui, radii=radii_book, gen_binding_poses=int(args.gen_binding_poses), \ score_weights=score_weights, save_pose_format=args.save_pose_format)
62.332346
227
0.574723
ed123b848cc69e55c673d2f62ec3999397f7c2b8
547
py
Python
main.py
yaakiyu/rt-bot
f68bca95c516e08c31ecc846524dcea4c8ba1503
[ "BSD-4-Clause" ]
null
null
null
main.py
yaakiyu/rt-bot
f68bca95c516e08c31ecc846524dcea4c8ba1503
[ "BSD-4-Clause" ]
null
null
null
main.py
yaakiyu/rt-bot
f68bca95c516e08c31ecc846524dcea4c8ba1503
[ "BSD-4-Clause" ]
null
null
null
# RT by Rext from asyncio import run from discord import Intents, Status, Game, AllowedMentions from core.bot import RT from data import SECRET try: from uvloop import install except ModuleNotFoundError: ... else: install() intents = Intents.default() intents.message_content = True intents.members = True bot = RT( allowed_mentions=AllowedMentions(everyone=False), intents=intents, status=Status.dnd, activity=Game("") ) bot.print("Now loading...") try: run(bot.start(SECRET["token"])) except KeyboardInterrupt: bot.print("Bye")
21.038462
70
0.753199
ed1329b2789d579e2ef82e7b330a86a58150d0b6
13,014
py
Python
hiplot/fetchers_demo.py
dmitryvinn/hiplot
52fe8b195a4e254240eb1a0847953fa3c1957a43
[ "MIT" ]
1
2022-03-21T15:46:17.000Z
2022-03-21T15:46:17.000Z
hiplot/fetchers_demo.py
ai2ys/hiplot
148f7c4eba11c6393957a819169f3cf07c469bec
[ "MIT" ]
null
null
null
hiplot/fetchers_demo.py
ai2ys/hiplot
148f7c4eba11c6393957a819169f3cf07c469bec
[ "MIT" ]
null
null
null
# Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import uuid import random import math import time import typing as t from . import experiment as hip # Demos from the README. If one of those is modified, please modify the readme as well README_DEMOS: t.Dict[str, t.Callable[[], hip.Experiment]] = { "demo": demo, "demo_3xcols": demo_3xcols, "demo_big": lambda: demo(1000), "demo_change_column_properties": demo_change_column_properties, "demo_basic_usage": demo_basic_usage, "demo_line_xy": demo_line_xy, "demo_bug_uid": demo_bug_uid, "demo_force_scale": demo_force_scale, "demo_distribution_cat": lambda: demo_distribution(axis="cat"), "demo_distribution_num": lambda: demo_distribution(axis="numeric"), "demo_distribution_num_100bins": lambda: demo_distribution(axis="numeric", nbins=100), "demo_bool": demo_bool, "demo_color_interpolate": demo_color_interpolate, "demo_color_scheme_ylrd": demo_color_scheme_ylrd, "demo_color_scheme_accent": demo_color_scheme_accent, "demo_axis_style": demo_axis_style, "demo_categorical": demo_categorical, "demo_customize": demo_customize, "demo_long_names": demo_long_names, "demo_force_constant_pplot": demo_force_constant_pplot, "demo_color_interpolate_inverse": demo_color_interpolate_inverse, "demo_first_value_nan": demo_first_value_nan, "demo_weighted_rows": demo_weighted_rows, "demo_col_html": demo_col_html, "demo_disable_table": demo_disable_table, "demo_big_floats": demo_big_floats, }
34.247368
140
0.593592
ed148a5bf47d0fb8ac1d5504a3f607cd11c82fbf
313
py
Python
app/endpoints/common/dtos/ingredient.py
brauls/ingredients-service
67c1408f96f4b407d7e7b3e5e62406a6931de1c1
[ "MIT" ]
null
null
null
app/endpoints/common/dtos/ingredient.py
brauls/ingredients-service
67c1408f96f4b407d7e7b3e5e62406a6931de1c1
[ "MIT" ]
1
2021-06-01T23:34:40.000Z
2021-06-01T23:34:40.000Z
app/endpoints/common/dtos/ingredient.py
brauls/ingredients-service
67c1408f96f4b407d7e7b3e5e62406a6931de1c1
[ "MIT" ]
null
null
null
"""Ingredient dto. """
24.076923
60
0.642173
ed14a6749afbe24501971f360abe8e3e8754902d
423
py
Python
barcode.py
kallangerard/grocery-barcode-scanner
0a866c5b20c43355b642c0b78ba09d5cf4b0383c
[ "MIT" ]
null
null
null
barcode.py
kallangerard/grocery-barcode-scanner
0a866c5b20c43355b642c0b78ba09d5cf4b0383c
[ "MIT" ]
null
null
null
barcode.py
kallangerard/grocery-barcode-scanner
0a866c5b20c43355b642c0b78ba09d5cf4b0383c
[ "MIT" ]
null
null
null
import logging import groceries.api as groceries import barcodescanner.scan as barcode if __name__ == "__main__": logging.basicConfig(level=logging.DEBUG) main()
22.263158
50
0.65721
ed152979dba20d65fa46d571939edbfd7eb69a09
790
py
Python
setup.py
mr-sk/easy-icm-runner
01cf9d7d8e4ef13afc18dbdda2862035121f3624
[ "MIT" ]
null
null
null
setup.py
mr-sk/easy-icm-runner
01cf9d7d8e4ef13afc18dbdda2862035121f3624
[ "MIT" ]
null
null
null
setup.py
mr-sk/easy-icm-runner
01cf9d7d8e4ef13afc18dbdda2862035121f3624
[ "MIT" ]
null
null
null
import setuptools with open("README.md", "r") as fh: long_description = fh.read() setuptools.setup( name="easy-icm-runner", version="1.0.6", author="Bachir El Koussa", author_email="[email protected]", description="A wrapper for IBM ICMs Scheduler API Calls", long_description=long_description, long_description_content_type="text/markdown", url="https://github.com/equinoxfitness/easy-icm-runner/", #packages=setuptools.find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]), py_modules = ['icm_runner'], install_requires=[ 'requests', ], classifiers=[ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ], )
30.384615
93
0.641772
ed15a4196531124a25ec9256f08c4c288fc464c6
1,784
py
Python
test_molecule.py
zee93/molecule_parser
42f5a3722d733ef9f7243bfa2b0b9a08c7bc5d23
[ "MIT" ]
null
null
null
test_molecule.py
zee93/molecule_parser
42f5a3722d733ef9f7243bfa2b0b9a08c7bc5d23
[ "MIT" ]
null
null
null
test_molecule.py
zee93/molecule_parser
42f5a3722d733ef9f7243bfa2b0b9a08c7bc5d23
[ "MIT" ]
null
null
null
import unittest from molecule import onize_formula, update_equation_with_multiplier, flaten_formula, parse_molecule if __name__ == '__main__': unittest.main()
39.644444
100
0.669283