hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 11
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
251
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
251
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
251
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.05M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.04M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7c6cbf4764a2e4e9b78da1978c82aa4f5d7862ce | 3,637 | py | Python | tests/conftest.py | priyatharsan/beyond | 1061b870407d316d43e4d1351a7ec026629685ae | [
"MIT"
] | null | null | null | tests/conftest.py | priyatharsan/beyond | 1061b870407d316d43e4d1351a7ec026629685ae | [
"MIT"
] | null | null | null | tests/conftest.py | priyatharsan/beyond | 1061b870407d316d43e4d1351a7ec026629685ae | [
"MIT"
] | null | null | null | import numpy as np
from pytest import fixture, mark, skip
from unittest.mock import patch
from pathlib import Path
from beyond.config import config
from beyond.dates.eop import Eop
from beyond.frames.stations import create_station
from beyond.io.tle import Tle
from beyond.propagators.keplernum import KeplerNum
from beyond.dates import Date, timedelta
from beyond.env.solarsystem import get_body
np.set_printoptions(linewidth=200)
def _skip_if_no_mpl():
"""Specific for dynamically skipping the test if matplotlib is not present
as it is not a dependency of the library, but merely a convenience
"""
try:
import matplotlib.pyplot as plt
except ImportError:
return True
else:
return False
def pytest_configure(config):
"""Declare the skip_if_no_mpl marker in pytest's '--markers' helper option
This has no actual effect on the tests
"""
config.addinivalue_line(
"markers", "skip_if_no_mpl: skip if matplotlib is not installed"
)
def pytest_runtest_setup(item):
"""This function is called for each test case.
It looks if the test case has the skip_if_no_mpl decorator. If so, skip the test case
"""
if _skip_if_no_mpl() and list(item.iter_markers(name="skip_if_no_mpl")):
skip("matplotlib not installed")
| 27.141791 | 89 | 0.653011 |
7c6cc14ec8ce3c7dc9875cccdf742d57d079973d | 10,181 | py | Python | diofant/tests/integrals/test_heurisch.py | Electric-tric/diofant | 92c4bf0ef301e5d6f0cfab545b036e1cb7de3c0a | [
"BSD-3-Clause"
] | 1 | 2021-08-22T09:34:15.000Z | 2021-08-22T09:34:15.000Z | diofant/tests/integrals/test_heurisch.py | Electric-tric/diofant | 92c4bf0ef301e5d6f0cfab545b036e1cb7de3c0a | [
"BSD-3-Clause"
] | null | null | null | diofant/tests/integrals/test_heurisch.py | Electric-tric/diofant | 92c4bf0ef301e5d6f0cfab545b036e1cb7de3c0a | [
"BSD-3-Clause"
] | null | null | null | import pytest
from diofant import (Add, Derivative, Ei, Eq, Function, I, Integral, LambertW,
Piecewise, Rational, Sum, Symbol, acos, asin, asinh,
besselj, cos, cosh, diff, erf, exp, li, log, pi, ratsimp,
root, simplify, sin, sinh, sqrt, symbols, tan)
from diofant.integrals.heurisch import components, heurisch, heurisch_wrapper
__all__ = ()
x, y, z, nu = symbols('x,y,z,nu')
f = Function('f')
# These are examples from the Poor Man's Integrator
# http://www-sop.inria.fr/cafe/Manuel.Bronstein/pmint/examples/
def test_pmint_LambertW():
f = LambertW(x)
g = x*LambertW(x) - x + x/LambertW(x)
assert heurisch(f, x) == g
def test_RR():
# Make sure the algorithm does the right thing if the ring is RR. See
# issue sympy/sympy#8685.
assert heurisch(sqrt(1 + 0.25*x**2), x, hints=[]) == \
0.5*x*sqrt(0.25*x**2 + 1) + 1.0*asinh(0.5*x)
# TODO: convert the rest of PMINT tests:
# Airy functions
# f = (x - AiryAi(x)*AiryAi(1, x)) / (x**2 - AiryAi(x)**2)
# g = Rational(1,2)*ln(x + AiryAi(x)) + Rational(1,2)*ln(x - AiryAi(x))
# f = x**2 * AiryAi(x)
# g = -AiryAi(x) + AiryAi(1, x)*x
# Whittaker functions
# f = WhittakerW(mu + 1, nu, x) / (WhittakerW(mu, nu, x) * x)
# g = x/2 - mu*ln(x) - ln(WhittakerW(mu, nu, x))
| 34.511864 | 112 | 0.534722 |
7c6d185f736a9be6f5e0a171cd9fc68f8a4ce031 | 12,105 | py | Python | kornia/color/adjust.py | carlosb1/kornia | a2b34d497314e7ed65f114401efdd3cc9ba2077c | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | kornia/color/adjust.py | carlosb1/kornia | a2b34d497314e7ed65f114401efdd3cc9ba2077c | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | kornia/color/adjust.py | carlosb1/kornia | a2b34d497314e7ed65f114401efdd3cc9ba2077c | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | from typing import Union
import torch
import torch.nn as nn
from kornia.color.hsv import rgb_to_hsv, hsv_to_rgb
from kornia.constants import pi
def adjust_saturation_raw(input: torch.Tensor, saturation_factor: Union[float, torch.Tensor]) -> torch.Tensor:
r"""Adjust color saturation of an image. Expecting input to be in hsv format already.
See :class:`~kornia.color.AdjustSaturation` for details.
"""
if not torch.is_tensor(input):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(input)}")
if not isinstance(saturation_factor, (float, torch.Tensor,)):
raise TypeError(f"The saturation_factor should be a float number or torch.Tensor."
f"Got {type(saturation_factor)}")
if isinstance(saturation_factor, float):
saturation_factor = torch.tensor([saturation_factor])
saturation_factor = saturation_factor.to(input.device).to(input.dtype)
if (saturation_factor < 0).any():
raise ValueError(f"Saturation factor must be non-negative. Got {saturation_factor}")
for _ in input.shape[1:]:
saturation_factor = torch.unsqueeze(saturation_factor, dim=-1)
# unpack the hsv values
h, s, v = torch.chunk(input, chunks=3, dim=-3)
# transform the hue value and appl module
s_out: torch.Tensor = torch.clamp(s * saturation_factor, min=0, max=1)
# pack back back the corrected hue
out: torch.Tensor = torch.cat([h, s_out, v], dim=-3)
return out
def adjust_saturation(input: torch.Tensor, saturation_factor: Union[float, torch.Tensor]) -> torch.Tensor:
r"""Adjust color saturation of an image.
See :class:`~kornia.color.AdjustSaturation` for details.
"""
# convert the rgb image to hsv
x_hsv: torch.Tensor = rgb_to_hsv(input)
# perform the conversion
x_adjusted: torch.Tensor = adjust_saturation_raw(x_hsv, saturation_factor)
# convert back to rgb
out: torch.Tensor = hsv_to_rgb(x_adjusted)
return out
def adjust_hue_raw(input: torch.Tensor, hue_factor: Union[float, torch.Tensor]) -> torch.Tensor:
r"""Adjust hue of an image. Expecting input to be in hsv format already.
See :class:`~kornia.color.AdjustHue` for details.
"""
if not torch.is_tensor(input):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(input)}")
if not isinstance(hue_factor, (float, torch.Tensor)):
raise TypeError(f"The hue_factor should be a float number or torch.Tensor in the range between"
f" [-PI, PI]. Got {type(hue_factor)}")
if isinstance(hue_factor, float):
hue_factor = torch.tensor([hue_factor])
hue_factor = hue_factor.to(input.device).to(input.dtype)
if ((hue_factor < -pi) | (hue_factor > pi)).any():
raise ValueError(f"Hue-factor must be in the range [-PI, PI]. Got {hue_factor}")
for _ in input.shape[1:]:
hue_factor = torch.unsqueeze(hue_factor, dim=-1)
# unpack the hsv values
h, s, v = torch.chunk(input, chunks=3, dim=-3)
# transform the hue value and appl module
divisor: float = 2 * pi.item()
h_out: torch.Tensor = torch.fmod(h + hue_factor, divisor)
# pack back back the corrected hue
out: torch.Tensor = torch.cat([h_out, s, v], dim=-3)
return out
def adjust_hue(input: torch.Tensor, hue_factor: Union[float, torch.Tensor]) -> torch.Tensor:
r"""Adjust hue of an image.
See :class:`~kornia.color.AdjustHue` for details.
"""
# convert the rgb image to hsv
x_hsv: torch.Tensor = rgb_to_hsv(input)
# perform the conversion
x_adjusted: torch.Tensor = adjust_hue_raw(x_hsv, hue_factor)
# convert back to rgb
out: torch.Tensor = hsv_to_rgb(x_adjusted)
return out
def adjust_gamma(input: torch.Tensor, gamma: Union[float, torch.Tensor],
gain: Union[float, torch.Tensor] = 1.) -> torch.Tensor:
r"""Perform gamma correction on an image.
See :class:`~kornia.color.AdjustGamma` for details.
"""
if not torch.is_tensor(input):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(input)}")
if not isinstance(gamma, (float, torch.Tensor)):
raise TypeError(f"The gamma should be a positive float or torch.Tensor. Got {type(gamma)}")
if not isinstance(gain, (float, torch.Tensor)):
raise TypeError(f"The gain should be a positive float or torch.Tensor. Got {type(gain)}")
if isinstance(gamma, float):
gamma = torch.tensor([gamma])
if isinstance(gain, float):
gain = torch.tensor([gain])
gamma = gamma.to(input.device).to(input.dtype)
gain = gain.to(input.device).to(input.dtype)
if (gamma < 0.0).any():
raise ValueError(f"Gamma must be non-negative. Got {gamma}")
if (gain < 0.0).any():
raise ValueError(f"Gain must be non-negative. Got {gain}")
for _ in input.shape[1:]:
gamma = torch.unsqueeze(gamma, dim=-1)
gain = torch.unsqueeze(gain, dim=-1)
# Apply the gamma correction
x_adjust: torch.Tensor = gain * torch.pow(input, gamma)
# Truncate between pixel values
out: torch.Tensor = torch.clamp(x_adjust, 0.0, 1.0)
return out
def adjust_contrast(input: torch.Tensor,
contrast_factor: Union[float, torch.Tensor]) -> torch.Tensor:
r"""Adjust Contrast of an image.
See :class:`~kornia.color.AdjustContrast` for details.
"""
if not torch.is_tensor(input):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(input)}")
if not isinstance(contrast_factor, (float, torch.Tensor,)):
raise TypeError(f"The factor should be either a float or torch.Tensor. "
f"Got {type(contrast_factor)}")
if isinstance(contrast_factor, float):
contrast_factor = torch.tensor([contrast_factor])
contrast_factor = contrast_factor.to(input.device).to(input.dtype)
if (contrast_factor < 0).any():
raise ValueError(f"Contrast factor must be non-negative. Got {contrast_factor}")
for _ in input.shape[1:]:
contrast_factor = torch.unsqueeze(contrast_factor, dim=-1)
# Apply contrast factor to each channel
x_adjust: torch.Tensor = input * contrast_factor
# Truncate between pixel values
out: torch.Tensor = torch.clamp(x_adjust, 0.0, 1.0)
return out
def adjust_brightness(input: torch.Tensor,
brightness_factor: Union[float, torch.Tensor]) -> torch.Tensor:
r"""Adjust Brightness of an image.
See :class:`~kornia.color.AdjustBrightness` for details.
"""
if not torch.is_tensor(input):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(input)}")
if not isinstance(brightness_factor, (float, torch.Tensor,)):
raise TypeError(f"The factor should be either a float or torch.Tensor. "
f"Got {type(brightness_factor)}")
if isinstance(brightness_factor, float):
brightness_factor = torch.tensor([brightness_factor])
brightness_factor = brightness_factor.to(input.device).to(input.dtype)
for _ in input.shape[1:]:
brightness_factor = torch.unsqueeze(brightness_factor, dim=-1)
# Apply brightness factor to each channel
x_adjust: torch.Tensor = input + brightness_factor
# Truncate between pixel values
out: torch.Tensor = torch.clamp(x_adjust, 0.0, 1.0)
return out
| 34.884726 | 110 | 0.671871 |
7c6ea33d579371cc05a40f107c83af6d179fcd7a | 1,418 | py | Python | pommerman/__init__.py | rmccann01/playground | 354041cd1d9b70ffe82c18fb5b4035fab721eb92 | [
"Apache-2.0"
] | 725 | 2018-02-14T09:48:18.000Z | 2022-03-29T03:04:28.000Z | pommerman/__init__.py | rmccann01/playground | 354041cd1d9b70ffe82c18fb5b4035fab721eb92 | [
"Apache-2.0"
] | 214 | 2018-02-16T22:00:41.000Z | 2022-03-11T23:26:20.000Z | pommerman/__init__.py | rmccann01/playground | 354041cd1d9b70ffe82c18fb5b4035fab721eb92 | [
"Apache-2.0"
] | 265 | 2018-02-15T05:33:46.000Z | 2022-03-11T03:04:17.000Z | '''Entry point into the pommerman module'''
import gym
import inspect
from . import agents
from . import configs
from . import constants
from . import forward_model
from . import helpers
from . import utility
from . import network
gym.logger.set_level(40)
REGISTRY = None
# Register environments with gym
_register()
def make(config_id, agent_list, game_state_file=None, render_mode='human'):
'''Makes the pommerman env and registers it with gym'''
assert config_id in REGISTRY, "Unknown configuration '{}'. " \
"Possible values: {}".format(config_id, REGISTRY)
env = gym.make(config_id)
for id_, agent in enumerate(agent_list):
assert isinstance(agent, agents.BaseAgent)
# NOTE: This is IMPORTANT so that the agent character is initialized
agent.init_agent(id_, env.spec._kwargs['game_type'])
env.set_agents(agent_list)
env.set_init_game_state(game_state_file)
env.set_render_mode(render_mode)
return env
from . import cli
| 26.754717 | 76 | 0.682652 |
7c7069a54d49756f83e36923521eba70ab74f6c7 | 139 | py | Python | demo/demo/accounts/urls.py | caravancoop/rest-auth-toolkit | 425bf293987f7128d9538f27a5eca7e47ba84217 | [
"MIT"
] | 1 | 2019-12-23T21:51:06.000Z | 2019-12-23T21:51:06.000Z | demo/demo/accounts/urls.py | caravancoop/rest-framework-auth-toolkit | 425bf293987f7128d9538f27a5eca7e47ba84217 | [
"MIT"
] | 127 | 2017-10-27T15:20:01.000Z | 2022-03-07T04:09:15.000Z | demo/demo/accounts/urls.py | caravancoop/rest-auth-toolkit | 425bf293987f7128d9538f27a5eca7e47ba84217 | [
"MIT"
] | 2 | 2018-01-03T16:22:51.000Z | 2019-12-23T21:51:54.000Z | from django.urls import path
from .views import ProfileView
urlpatterns = [
path('', ProfileView.as_view(), name='user-profile'),
]
| 15.444444 | 57 | 0.705036 |
7c70c6e774d6a8ca53417d3cc9999e257be28aad | 1,093 | py | Python | test/test_pipeline/components/classification/test_passive_aggressive.py | vardaan-raj/auto-sklearn | 4597152e3a60cd6f6e32719a3bef26e13951b102 | [
"BSD-3-Clause"
] | 1 | 2021-02-21T16:44:44.000Z | 2021-02-21T16:44:44.000Z | test/test_pipeline/components/classification/test_passive_aggressive.py | vardaan-raj/auto-sklearn | 4597152e3a60cd6f6e32719a3bef26e13951b102 | [
"BSD-3-Clause"
] | 9 | 2021-02-12T17:52:34.000Z | 2021-06-26T11:37:41.000Z | test/test_pipeline/components/classification/test_passive_aggressive.py | vardaan-raj/auto-sklearn | 4597152e3a60cd6f6e32719a3bef26e13951b102 | [
"BSD-3-Clause"
] | 1 | 2021-07-06T23:02:42.000Z | 2021-07-06T23:02:42.000Z | import sklearn.linear_model
from autosklearn.pipeline.components.classification.passive_aggressive import \
PassiveAggressive
from .test_base import BaseClassificationComponentTest
| 30.361111 | 79 | 0.725526 |
7c71eb8f52ad23f62b8d9e0d27dc37cf322f70c3 | 3,148 | py | Python | tensorflow_datasets/structured/dart/dart_test.py | harsh020/datasets | b4ad3617b279ec65356e696c4c860458621976f6 | [
"Apache-2.0"
] | 1 | 2020-12-10T06:37:27.000Z | 2020-12-10T06:37:27.000Z | tensorflow_datasets/structured/dart/dart_test.py | Jinwook-shim/datasets | 815037e87150e3c8a557d91a68b07e8ffb6a2a86 | [
"Apache-2.0"
] | null | null | null | tensorflow_datasets/structured/dart/dart_test.py | Jinwook-shim/datasets | 815037e87150e3c8a557d91a68b07e8ffb6a2a86 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dart dataset tests."""
import json
import mock
import tensorflow.compat.v2 as tf
import tensorflow_datasets.public_api as tfds
from tensorflow_datasets.structured.dart import dart
if __name__ == '__main__':
tfds.testing.test_main()
| 28.880734 | 80 | 0.493011 |
7c72f8f31e7cf39a7edd3dbce8585cf8da069b38 | 9,085 | py | Python | exp/exp_informer_dad.py | AdamLohSg/GTA | bf6a745a6e28e365466e76360a15ca10ce61e009 | [
"Apache-2.0"
] | 8 | 2022-01-19T20:47:36.000Z | 2022-03-20T05:11:04.000Z | exp/exp_informer_dad.py | AdamLohSg/GTA | bf6a745a6e28e365466e76360a15ca10ce61e009 | [
"Apache-2.0"
] | 2 | 2022-02-17T06:14:25.000Z | 2022-02-17T08:43:57.000Z | exp/exp_informer_dad.py | AdamLohSg/GTA | bf6a745a6e28e365466e76360a15ca10ce61e009 | [
"Apache-2.0"
] | 5 | 2022-02-15T04:16:27.000Z | 2022-03-29T01:21:41.000Z | from data.data_loader_dad import (
NASA_Anomaly,
WADI
)
from exp.exp_basic import Exp_Basic
from models.model import Informer
from utils.tools import EarlyStopping, adjust_learning_rate
from utils.metrics import metric
from sklearn.metrics import classification_report
import numpy as np
import torch
import torch.nn as nn
from torch import optim
from torch.utils.data import DataLoader
import os
import time
import warnings
warnings.filterwarnings('ignore') | 36.051587 | 113 | 0.557292 |
7c73ce1a389f347a8681ff6c30c8fe84612d252e | 9,270 | py | Python | tests/components/mysensors/conftest.py | liangleslie/core | cc807b4d597daaaadc92df4a93c6e30da4f570c6 | [
"Apache-2.0"
] | 30,023 | 2016-04-13T10:17:53.000Z | 2020-03-02T12:56:31.000Z | tests/components/mysensors/conftest.py | liangleslie/core | cc807b4d597daaaadc92df4a93c6e30da4f570c6 | [
"Apache-2.0"
] | 24,710 | 2016-04-13T08:27:26.000Z | 2020-03-02T12:59:13.000Z | tests/components/mysensors/conftest.py | liangleslie/core | cc807b4d597daaaadc92df4a93c6e30da4f570c6 | [
"Apache-2.0"
] | 11,956 | 2016-04-13T18:42:31.000Z | 2020-03-02T09:32:12.000Z | """Provide common mysensors fixtures."""
from __future__ import annotations
from collections.abc import AsyncGenerator, Callable, Generator
import json
from typing import Any
from unittest.mock import AsyncMock, MagicMock, patch
from mysensors import BaseSyncGateway
from mysensors.persistence import MySensorsJSONDecoder
from mysensors.sensor import Sensor
import pytest
from homeassistant.components.device_tracker.legacy import Device
from homeassistant.components.mqtt import DOMAIN as MQTT_DOMAIN
from homeassistant.components.mysensors.config_flow import DEFAULT_BAUD_RATE
from homeassistant.components.mysensors.const import (
CONF_BAUD_RATE,
CONF_DEVICE,
CONF_GATEWAY_TYPE,
CONF_GATEWAY_TYPE_SERIAL,
CONF_VERSION,
DOMAIN,
)
from homeassistant.core import HomeAssistant
from homeassistant.setup import async_setup_component
from tests.common import MockConfigEntry, load_fixture
def mock_gateway_features(
persistence: MagicMock, transport_class: MagicMock, nodes: dict[int, Sensor]
) -> None:
"""Mock the gateway features."""
persistence.schedule_save_sensors = AsyncMock(
side_effect=mock_schedule_save_sensors
)
# For some reason autospeccing does not recognize these methods.
persistence.safe_load_sensors = MagicMock()
persistence.save_sensors = MagicMock()
transport = transport_class.return_value
transport.connect_task = None
transport.connect.side_effect = mock_connect
def load_nodes_state(fixture_path: str) -> dict:
"""Load mysensors nodes fixture."""
return json.loads(load_fixture(fixture_path), cls=MySensorsJSONDecoder)
def update_gateway_nodes(
gateway_nodes: dict[int, Sensor], nodes: dict[int, Sensor]
) -> dict:
"""Update the gateway nodes."""
gateway_nodes.update(nodes)
return nodes
| 31.530612 | 87 | 0.73247 |
7c7633cae0980db6c9c40b9c34972bdb7f5c0282 | 7,139 | py | Python | Detect.py | SymenYang/Vanish-Point-Detect | 0e83e2b2a86e9523ed4a86f592f3a8dee594d691 | [
"MIT"
] | 2 | 2017-10-17T10:08:25.000Z | 2017-10-17T11:17:39.000Z | Detect.py | SymenYang/Vanish-Point-Detect | 0e83e2b2a86e9523ed4a86f592f3a8dee594d691 | [
"MIT"
] | null | null | null | Detect.py | SymenYang/Vanish-Point-Detect | 0e83e2b2a86e9523ed4a86f592f3a8dee594d691 | [
"MIT"
] | null | null | null | import cv2 as cv
import numpy as np
import copy
import math
import Edges
import INTPoint
eps = 1e-7
votes = {}
Groups = []
VPoints = []
Centers = []
Cluster = []
voters = {}
deal("data/1.jpg",'1') | 26.838346 | 157 | 0.559462 |
7c76c121d957b364e4b6f2fa9125b58b9c909aee | 4,086 | py | Python | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/course_groups/migrations/0001_initial.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | 3 | 2021-12-15T04:58:18.000Z | 2022-02-06T12:15:37.000Z | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/course_groups/migrations/0001_initial.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | null | null | null | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/course_groups/migrations/0001_initial.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | 1 | 2019-01-02T14:38:50.000Z | 2019-01-02T14:38:50.000Z | from django.db import migrations, models
from django.conf import settings
from opaque_keys.edx.django.models import CourseKeyField
| 49.829268 | 159 | 0.618698 |
7c76d6a2f8e354238a96f859815250852db8cda1 | 738 | py | Python | kafka-rockset-integration/generate_customers_data.py | farkaskid/recipes | 8eef799cda899ea266f2849d485917f9b0d83190 | [
"Apache-2.0"
] | 21 | 2019-02-27T22:30:28.000Z | 2021-07-18T17:26:56.000Z | kafka-rockset-integration/generate_customers_data.py | farkaskid/recipes | 8eef799cda899ea266f2849d485917f9b0d83190 | [
"Apache-2.0"
] | 16 | 2019-07-03T22:04:21.000Z | 2022-02-26T18:34:05.000Z | kafka-rockset-integration/generate_customers_data.py | farkaskid/recipes | 8eef799cda899ea266f2849d485917f9b0d83190 | [
"Apache-2.0"
] | 11 | 2019-03-13T08:55:31.000Z | 2022-02-07T08:35:16.000Z | """Generate Customer Data"""
import csv
import random
from config import MIN_CUSTOMER_ID, MAX_CUSTOMER_ID
ACQUISITION_SOURCES = [
'OrganicSearch',
'PaidSearch',
'Email',
'SocialMedia',
'Display',
'Affiliate'
'Referral'
]
if __name__ == '__main__':
main()
| 22.363636 | 85 | 0.617886 |
7c77100c5bc822f15ee0cc031b607fff7a7b2f70 | 899 | py | Python | parsl/tests/test_error_handling/test_resource_spec.py | MatthewBM/parsl | f11417a0255ed290fd0d78ffa1bc52cfe7a06301 | [
"Apache-2.0"
] | null | null | null | parsl/tests/test_error_handling/test_resource_spec.py | MatthewBM/parsl | f11417a0255ed290fd0d78ffa1bc52cfe7a06301 | [
"Apache-2.0"
] | null | null | null | parsl/tests/test_error_handling/test_resource_spec.py | MatthewBM/parsl | f11417a0255ed290fd0d78ffa1bc52cfe7a06301 | [
"Apache-2.0"
] | null | null | null | import parsl
from parsl.app.app import python_app
from parsl.tests.configs.local_threads import config
from parsl.executors.errors import UnsupportedFeatureError
from parsl.executors import WorkQueueExecutor
if __name__ == '__main__':
local_config = config
parsl.load(local_config)
x = test_resource(2)
| 26.441176 | 58 | 0.676307 |
7c78f1b09da753afd4fbe81d818781bc202c7f29 | 9,565 | py | Python | cincan/file_tool.py | cincanproject/cincan-command | b8cde81931b1c8583ac7daa1327520fb9f06856e | [
"MIT"
] | 1 | 2022-03-11T02:37:42.000Z | 2022-03-11T02:37:42.000Z | cincan/file_tool.py | cincanproject/cincan-command | b8cde81931b1c8583ac7daa1327520fb9f06856e | [
"MIT"
] | null | null | null | cincan/file_tool.py | cincanproject/cincan-command | b8cde81931b1c8583ac7daa1327520fb9f06856e | [
"MIT"
] | null | null | null | import pathlib
import re
from typing import List, Optional, Dict, Set, Tuple, Iterable
import shlex
| 41.228448 | 119 | 0.576477 |
7c7958cdc1aac4d3672c25246775beb5da7fc72d | 997 | py | Python | aws_interface/cloud/auth/set_me.py | hubaimaster/aws-interface | 162dd056546d58b6eb29afcae1c3c2d78e4309b2 | [
"Apache-2.0"
] | 53 | 2018-10-02T05:58:54.000Z | 2020-09-15T08:58:26.000Z | aws_interface/cloud/auth/set_me.py | hubaimaster/aws-interface | 162dd056546d58b6eb29afcae1c3c2d78e4309b2 | [
"Apache-2.0"
] | 52 | 2018-09-26T05:16:09.000Z | 2022-03-11T23:51:14.000Z | aws_interface/cloud/auth/set_me.py | hubaimaster/aws-interface | 162dd056546d58b6eb29afcae1c3c2d78e4309b2 | [
"Apache-2.0"
] | 10 | 2019-03-11T16:35:14.000Z | 2019-10-23T08:03:54.000Z |
from cloud.permission import Permission, NeedPermission
from cloud.message import error
# Define the input output format of the function.
# This information is used when creating the *SDK*.
info = {
'input_format': {
'session_id': 'str',
'field': 'str',
'value?': 'str',
},
'output_format': {
'user_id?': 'str',
},
'description': 'Set my information'
}
| 24.317073 | 83 | 0.608826 |
7c79d2fe84aae88ef213fa559ea2499797887d57 | 959 | py | Python | doc/gallery-src/analysis/run_blockMcnpMaterialCard.py | celikten/armi | 4e100dd514a59caa9c502bd5a0967fd77fdaf00e | [
"Apache-2.0"
] | 1 | 2021-05-29T16:02:31.000Z | 2021-05-29T16:02:31.000Z | doc/gallery-src/analysis/run_blockMcnpMaterialCard.py | celikten/armi | 4e100dd514a59caa9c502bd5a0967fd77fdaf00e | [
"Apache-2.0"
] | null | null | null | doc/gallery-src/analysis/run_blockMcnpMaterialCard.py | celikten/armi | 4e100dd514a59caa9c502bd5a0967fd77fdaf00e | [
"Apache-2.0"
] | null | null | null | """
Write MCNP Material Cards
=========================
Here we load a test reactor and write each component of one fuel block out as
MCNP material cards.
Normally, code-specific utility code would belong in a code-specific ARMI
plugin. But in this case, the need for MCNP materials cards is so pervasive
that it made it into the framework.
"""
from armi.reactor.tests import test_reactors
from armi.reactor.flags import Flags
from armi.utils.densityTools import formatMaterialCard
from armi.nucDirectory import nuclideBases as nb
from armi import configure
configure(permissive=True)
_o, r = test_reactors.loadTestReactor()
bFuel = r.core.getBlocks(Flags.FUEL)[0]
for ci, component in enumerate(bFuel, start=1):
ndens = component.getNumberDensities()
# convert nucName (str) keys to nuclideBase keys
ndensByBase = {nb.byName[nucName]: dens for nucName, dens in ndens.items()}
print("".join(formatMaterialCard(ndensByBase, matNum=ci)))
| 31.966667 | 79 | 0.755996 |
7c79e12b0a22b9ba1c999ecbf405c389b15998f7 | 6,612 | py | Python | life_line_chart/_autogenerate_data.py | mustaqimM/life_line_chart | a9bbbbdeb5568aa0cc3b3b585337a3d655f4b2d6 | [
"MIT"
] | null | null | null | life_line_chart/_autogenerate_data.py | mustaqimM/life_line_chart | a9bbbbdeb5568aa0cc3b3b585337a3d655f4b2d6 | [
"MIT"
] | null | null | null | life_line_chart/_autogenerate_data.py | mustaqimM/life_line_chart | a9bbbbdeb5568aa0cc3b3b585337a3d655f4b2d6 | [
"MIT"
] | null | null | null | import names
import os
import datetime
from random import random
def generate_gedcom_file():
"""generate some gedcom file"""
db = {}
db['n_individuals'] = 0
db['max_individuals'] = 8000
db['n_families'] = 0
db['yougest'] = None
gedcom_content = """
0 HEAD
1 SOUR Gramps
2 VERS 3.3.0
2 NAME Gramps
1 DATE {}
2 TIME 15:35:24
1 SUBM @SUBM@
1 COPR Copyright (c) 2020 Christian Schulze,,,.
1 GEDC
2 VERS 5.5
1 CHAR UTF-8
1 LANG German
""".format(datetime.date.today())
generate_recursive_family(db, generations=8, max_children=4)
for k, v in db.items():
if k.startswith('@I'):
gedcom_content += v['string']
for k, v in db.items():
if k.startswith('@F'):
gedcom_content += v['string']
gedcom_content += '0 TRLR\n'
open(os.path.join(os.path.dirname(__file__), '..', 'tests',
'autogenerated.ged'), 'w').write(gedcom_content)
# generate_gedcom_file()
generate_individual_images()
| 35.548387 | 130 | 0.545977 |
7c79e8c0feadf546c1f7ffb56f2c6aded823808d | 4,647 | py | Python | arcade/examples/sprite_bullets_enemy_aims.py | LiorAvrahami/arcade | fce254a9eb89629de1f99d57a63759a2953184e9 | [
"MIT"
] | 1 | 2020-01-18T04:48:38.000Z | 2020-01-18T04:48:38.000Z | arcade/examples/sprite_bullets_enemy_aims.py | LiorAvrahami/arcade | fce254a9eb89629de1f99d57a63759a2953184e9 | [
"MIT"
] | 1 | 2019-08-11T18:47:27.000Z | 2019-08-12T03:02:11.000Z | arcade/examples/sprite_bullets_enemy_aims.py | LiorAvrahami/arcade | fce254a9eb89629de1f99d57a63759a2953184e9 | [
"MIT"
] | null | null | null | """
Show how to have enemies shoot bullets aimed at the player.
If Python and Arcade are installed, this example can be run from the command line with:
python -m arcade.examples.sprite_bullets_enemy_aims
"""
import arcade
import math
import os
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
SCREEN_TITLE = "Sprites and Bullets Enemy Aims Example"
BULLET_SPEED = 4
def main():
""" Main method """
window = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
window.setup()
arcade.run()
if __name__ == "__main__":
main()
| 32.957447 | 98 | 0.624919 |
7c7a936052804b42678eb433f6f64454107e4317 | 450 | py | Python | app1.py | FreakX23/EBook_Training | de445b0a9e56a1f1ffc51ae3c5e10ebe8297e9b6 | [
"MIT"
] | null | null | null | app1.py | FreakX23/EBook_Training | de445b0a9e56a1f1ffc51ae3c5e10ebe8297e9b6 | [
"MIT"
] | null | null | null | app1.py | FreakX23/EBook_Training | de445b0a9e56a1f1ffc51ae3c5e10ebe8297e9b6 | [
"MIT"
] | null | null | null | # This Part will gather Infos and demonstrate the use of Variables.
usrName = input("What is your Name?")
usrAge = int(input("What is your Age?"))
usrGPA = float(input("What is your GPA?"))
print () #cheap way to get a new line
print ("Hello, %s" % (usrName))
print ("Did you know that in two years you will be %d years old? " % (usrAge +2))
print ("Also you need to improve your GPA by %f points to have a perfect score." % (4.0 - usrGPA))
print ()
| 45 | 98 | 0.682222 |
7c7af573be1400de8cf6ff87c171a26f3cda1e1f | 96 | py | Python | borze.py | AmitHasanShuvo/Programming | f47ecc626e518a0bf5f9f749afd15ce67bbe737b | [
"MIT"
] | 8 | 2019-05-26T19:24:13.000Z | 2021-03-24T17:36:14.000Z | borze.py | AmitHasanShuvo/Programming | f47ecc626e518a0bf5f9f749afd15ce67bbe737b | [
"MIT"
] | null | null | null | borze.py | AmitHasanShuvo/Programming | f47ecc626e518a0bf5f9f749afd15ce67bbe737b | [
"MIT"
] | 1 | 2020-04-19T04:59:54.000Z | 2020-04-19T04:59:54.000Z | a = input()
a = a.replace('--', '2')
a = a.replace('-.', '1')
a = a.replace('.', '0')
print(a)
| 16 | 24 | 0.4375 |
7c7ce13176c091aaa43308e8a58ace22a4dd604d | 684 | py | Python | distalg/message.py | charlesemurray/DistributedProgramming | f7b5001a6acb0583cd6b7bb611f27893b830c296 | [
"MIT"
] | null | null | null | distalg/message.py | charlesemurray/DistributedProgramming | f7b5001a6acb0583cd6b7bb611f27893b830c296 | [
"MIT"
] | null | null | null | distalg/message.py | charlesemurray/DistributedProgramming | f7b5001a6acb0583cd6b7bb611f27893b830c296 | [
"MIT"
] | null | null | null |
if __name__ == "__main__":
msg = Message(sender="A", receiver="B")
assert msg.sender is "A"
assert msg.receiver is "B"
| 23.586207 | 64 | 0.630117 |
7c7d98835e8aa5d863003dad874d15530ea2ef72 | 7,799 | py | Python | myenv/lib/python3.5/site-packages/tests/handlers/logging/logging_tests.py | rupeshparab/techscan | ce2558602ddad31873d7129f25b1cc61895b9939 | [
"MIT"
] | 1 | 2019-11-01T11:45:22.000Z | 2019-11-01T11:45:22.000Z | myenv/lib/python3.5/site-packages/tests/handlers/logging/logging_tests.py | rupeshparab/techscan | ce2558602ddad31873d7129f25b1cc61895b9939 | [
"MIT"
] | 3 | 2020-02-11T23:03:45.000Z | 2021-06-10T18:05:11.000Z | myenv/lib/python3.5/site-packages/tests/handlers/logging/logging_tests.py | rupeshparab/techscan | ce2558602ddad31873d7129f25b1cc61895b9939 | [
"MIT"
] | 1 | 2019-11-01T11:38:54.000Z | 2019-11-01T11:38:54.000Z | import logging
from opbeat.handlers.logging import OpbeatHandler
from opbeat.utils.stacks import iter_stack_frames
from tests.helpers import get_tempstoreclient
from tests.utils.compat import TestCase
| 43.569832 | 103 | 0.647391 |
7c7e4ec9d240f0bbb6bcb11b797135aad6a43254 | 1,342 | py | Python | amnesia/modules/mime/model.py | silenius/amnesia | ba5e3ac79a89da599c22206ad1fd17541855f74c | [
"BSD-2-Clause"
] | 4 | 2015-05-08T10:57:56.000Z | 2021-05-17T04:32:11.000Z | amnesia/modules/mime/model.py | silenius/amnesia | ba5e3ac79a89da599c22206ad1fd17541855f74c | [
"BSD-2-Clause"
] | 6 | 2019-12-26T16:43:41.000Z | 2022-02-28T11:07:54.000Z | amnesia/modules/mime/model.py | silenius/amnesia | ba5e3ac79a89da599c22206ad1fd17541855f74c | [
"BSD-2-Clause"
] | 1 | 2019-09-23T14:08:11.000Z | 2019-09-23T14:08:11.000Z | # -*- coding: utf-8 -*-
# pylint: disable=E1101
from sqlalchemy import sql
from sqlalchemy import orm
from sqlalchemy.orm.exc import NoResultFound
from .. import Base
# http://www.iana.org/assignments/media-types/media-types.xhtml
| 21.645161 | 63 | 0.568554 |
7c7e5ef5e8a7277261b9729c9f251391fd2d29dc | 1,415 | py | Python | apps/goods/views_base.py | sunwei19910119/DjangoShop | 188102dc8ef9f4751f4eeeb7574e95c8cc270484 | [
"MIT"
] | 3 | 2018-08-22T02:41:55.000Z | 2022-03-03T08:49:38.000Z | apps/goods/views_base.py | sunwei19910119/DjangoShop | 188102dc8ef9f4751f4eeeb7574e95c8cc270484 | [
"MIT"
] | null | null | null | apps/goods/views_base.py | sunwei19910119/DjangoShop | 188102dc8ef9f4751f4eeeb7574e95c8cc270484 | [
"MIT"
] | 1 | 2019-10-23T12:24:08.000Z | 2019-10-23T12:24:08.000Z | # encoding: utf-8
from goods.models import Goods
from django.views.generic.base import View
| 32.159091 | 85 | 0.633922 |
7c7ea1a87be56599bff87dd5b87938ba5b672c0b | 14,385 | py | Python | launcher/src/main/scripts/bin/launcher.py | iyersathya/airlift | 27e981a50cee655ff4e1e13801ba5a55991f93ce | [
"Apache-2.0"
] | null | null | null | launcher/src/main/scripts/bin/launcher.py | iyersathya/airlift | 27e981a50cee655ff4e1e13801ba5a55991f93ce | [
"Apache-2.0"
] | 35 | 2019-09-27T23:27:54.000Z | 2021-10-06T14:57:28.000Z | launcher/src/main/scripts/bin/launcher.py | iyersathya/airlift | 27e981a50cee655ff4e1e13801ba5a55991f93ce | [
"Apache-2.0"
] | 21 | 2019-09-21T06:13:58.000Z | 2021-08-10T20:05:09.000Z | #!/usr/bin/env python
import errno
import os
import platform
import sys
import traceback
from fcntl import flock, LOCK_EX, LOCK_NB
from optparse import OptionParser
from os import O_RDWR, O_CREAT, O_WRONLY, O_APPEND
from os.path import basename, dirname, exists, realpath
from os.path import join as pathjoin
from signal import SIGTERM, SIGKILL
from stat import S_ISLNK
from time import sleep
COMMANDS = ['run', 'start', 'stop', 'restart', 'kill', 'status']
LSB_NOT_RUNNING = 3
LSB_STATUS_UNKNOWN = 4
def find_install_path(f):
"""Find canonical parent of bin/launcher.py"""
if basename(f) != 'launcher.py':
raise Exception("Expected file '%s' to be 'launcher.py' not '%s'" % (f, basename(f)))
p = realpath(dirname(f))
if basename(p) != 'bin':
raise Exception("Expected file '%s' directory to be 'bin' not '%s" % (f, basename(p)))
return dirname(p)
def makedirs(p):
"""Create directory and all intermediate ones"""
try:
os.makedirs(p)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def load_properties(f):
"""Load key/value pairs from a file"""
properties = {}
for line in load_lines(f):
k, v = line.split('=', 1)
properties[k.strip()] = v.strip()
return properties
def load_lines(f):
"""Load lines from a file, ignoring blank or comment lines"""
lines = []
for line in open(f, 'r').readlines():
line = line.strip()
if len(line) > 0 and not line.startswith('#'):
lines.append(line)
return lines
def try_lock(f):
"""Try to open an exclusive lock (inheritable) on a file"""
try:
flock(f, LOCK_EX | LOCK_NB)
return True
except (IOError, OSError): # IOError in Python 2, OSError in Python 3.
return False
def open_read_write(f, mode):
"""Open file in read/write mode (without truncating it)"""
return os.fdopen(os.open(f, O_RDWR | O_CREAT, mode), 'r+')
def redirect_stdin_to_devnull():
"""Redirect stdin to /dev/null"""
fd = os.open(os.devnull, O_RDWR)
os.dup2(fd, sys.stdin.fileno())
os.close(fd)
def open_append(f):
"""Open a raw file descriptor in append mode"""
# noinspection PyTypeChecker
return os.open(f, O_WRONLY | O_APPEND | O_CREAT, 0o644)
def redirect_output(fd):
"""Redirect stdout and stderr to a file descriptor"""
os.dup2(fd, sys.stdout.fileno())
os.dup2(fd, sys.stderr.fileno())
def symlink_exists(p):
"""Check if symlink exists and raise if another type of file exists"""
try:
st = os.lstat(p)
if not S_ISLNK(st.st_mode):
raise Exception('Path exists and is not a symlink: %s' % p)
return True
except OSError as e:
if e.errno != errno.ENOENT:
raise
return False
def create_symlink(source, target):
"""Create a symlink, removing the target first if it is a symlink"""
if symlink_exists(target):
os.remove(target)
if exists(source):
os.symlink(source, target)
def create_app_symlinks(options):
"""
Symlink the 'etc' and 'plugin' directory into the data directory.
This is needed to support programs that reference 'etc/xyz' from within
their config files: log.levels-file=etc/log.properties
"""
if options.etc_dir != pathjoin(options.data_dir, 'etc'):
create_symlink(
options.etc_dir,
pathjoin(options.data_dir, 'etc'))
if options.install_path != options.data_dir:
create_symlink(
pathjoin(options.install_path, 'plugin'),
pathjoin(options.data_dir, 'plugin'))
if __name__ == '__main__':
main()
| 31.136364 | 135 | 0.639694 |
7c7f557e50cc992f1ad5414b88efb2c8bf4f59f5 | 1,213 | py | Python | code/sim/test.py | vectorcrumb/Ballbot_IEE2913 | 5ab54825b2bfadae251e2c6bfaaa7f8fcdae77a0 | [
"MIT"
] | null | null | null | code/sim/test.py | vectorcrumb/Ballbot_IEE2913 | 5ab54825b2bfadae251e2c6bfaaa7f8fcdae77a0 | [
"MIT"
] | null | null | null | code/sim/test.py | vectorcrumb/Ballbot_IEE2913 | 5ab54825b2bfadae251e2c6bfaaa7f8fcdae77a0 | [
"MIT"
] | null | null | null | from direct.showbase.ShowBase import ShowBase
from direct.task import Task
from direct.actor.Actor import Actor
import numpy as np
app = MyApp()
app.run() | 32.783784 | 85 | 0.649629 |
7c80c3cc37ddb266e34cc1676cdc4a68cdabc9ff | 32 | py | Python | run_locally.py | nationalarchives/tdr-service-unavailable | fcb5930f57459b1e4e6d2d14244ebeecee2f6907 | [
"MIT"
] | null | null | null | run_locally.py | nationalarchives/tdr-service-unavailable | fcb5930f57459b1e4e6d2d14244ebeecee2f6907 | [
"MIT"
] | null | null | null | run_locally.py | nationalarchives/tdr-service-unavailable | fcb5930f57459b1e4e6d2d14244ebeecee2f6907 | [
"MIT"
] | null | null | null | from app import app
app.run()
| 8 | 20 | 0.6875 |
7c80d22f73704982f5f02b4193bf4d13e0699eda | 5,914 | py | Python | src/pandas_profiling/model/describe.py | briangrahamww/pandas-profiling | 62f8e3fd81720d444041069191c4aacd03d79ad5 | [
"MIT"
] | null | null | null | src/pandas_profiling/model/describe.py | briangrahamww/pandas-profiling | 62f8e3fd81720d444041069191c4aacd03d79ad5 | [
"MIT"
] | 4 | 2021-11-01T15:17:07.000Z | 2022-01-26T15:22:15.000Z | src/pandas_profiling/model/describe.py | briangrahamww/pandas-profiling | 62f8e3fd81720d444041069191c4aacd03d79ad5 | [
"MIT"
] | null | null | null | """Organize the calculation of statistics for each series in this DataFrame."""
import warnings
from datetime import datetime
from typing import Optional
import pandas as pd
from tqdm.auto import tqdm
from visions import VisionsTypeset
from pandas_profiling.config import Settings
from pandas_profiling.model.correlations import calculate_correlation
from pandas_profiling.model.duplicates import get_duplicates
from pandas_profiling.model.sample import Sample, get_sample
from pandas_profiling.model.summarizer import BaseSummarizer
from pandas_profiling.model.summary import (
get_messages,
get_missing_diagrams,
get_scatter_matrix,
get_series_descriptions,
get_table_stats,
)
from pandas_profiling.version import __version__
def describe(
config: Settings,
df: pd.DataFrame,
summarizer: BaseSummarizer,
typeset: VisionsTypeset,
sample: Optional[dict] = None,
) -> dict:
"""Calculate the statistics for each series in this DataFrame.
Args:
config: report Settings object
df: DataFrame.
sample: optional, dict with custom sample
Returns:
This function returns a dictionary containing:
- table: overall statistics.
- variables: descriptions per series.
- correlations: correlation matrices.
- missing: missing value diagrams.
- messages: direct special attention to these patterns in your data.
- package: package details.
"""
if df is None:
raise ValueError("Can not describe a `lazy` ProfileReport without a DataFrame.")
if not isinstance(df, pd.DataFrame):
warnings.warn("df is not of type pandas.DataFrame")
disable_progress_bar = not config.progress_bar
date_start = datetime.utcnow()
correlation_names = [
correlation_name
for correlation_name in [
"pearson",
"spearman",
"kendall",
"phi_k",
"cramers",
]
if config.correlations[correlation_name].calculate
]
number_of_tasks = 8 + len(df.columns) + len(correlation_names)
with tqdm(
total=number_of_tasks, desc="Summarize dataset", disable=disable_progress_bar
) as pbar:
series_description = get_series_descriptions(
config, df, summarizer, typeset, pbar
)
pbar.set_postfix_str("Get variable types")
variables = {
column: description["type"]
for column, description in series_description.items()
}
supported_columns = [
column
for column, type_name in variables.items()
if type_name != "Unsupported"
]
interval_columns = [
column for column, type_name in variables.items() if type_name == "Numeric"
]
pbar.update()
# Get correlations
correlations = {}
for correlation_name in correlation_names:
pbar.set_postfix_str(f"Calculate {correlation_name} correlation")
correlations[correlation_name] = calculate_correlation(
config, df, correlation_name, series_description
)
pbar.update()
# make sure correlations is not None
correlations = {
key: value for key, value in correlations.items() if value is not None
}
# Scatter matrix
pbar.set_postfix_str("Get scatter matrix")
scatter_matrix = get_scatter_matrix(config, df, interval_columns)
pbar.update()
# Table statistics
pbar.set_postfix_str("Get table statistics")
table_stats = get_table_stats(config, df, series_description)
pbar.update()
# missing diagrams
pbar.set_postfix_str("Get missing diagrams")
missing = get_missing_diagrams(config, df, table_stats)
pbar.update()
# Sample
pbar.set_postfix_str("Take sample")
if sample is None:
samples = get_sample(config, df)
else:
if "name" not in sample:
sample["name"] = None
if "caption" not in sample:
sample["caption"] = None
samples = [
Sample(
id="custom",
data=sample["data"],
name=sample["name"],
caption=sample["caption"],
)
]
pbar.update()
# Duplicates
pbar.set_postfix_str("Locating duplicates")
metrics, duplicates = get_duplicates(config, df, supported_columns)
table_stats.update(metrics)
pbar.update()
# Messages
pbar.set_postfix_str("Get messages/warnings")
messages = get_messages(config, table_stats, series_description, correlations)
pbar.update()
pbar.set_postfix_str("Get reproduction details")
package = {
"pandas_profiling_version": __version__,
"pandas_profiling_config": config.json(),
}
pbar.update()
pbar.set_postfix_str("Completed")
date_end = datetime.utcnow()
analysis = {
"title": config.title,
"date_start": date_start,
"date_end": date_end,
"duration": date_end - date_start,
}
return {
# Analysis metadata
"analysis": analysis,
# Overall dataset description
"table": table_stats,
# Per variable descriptions
"variables": series_description,
# Bivariate relations
"scatter": scatter_matrix,
# Correlation matrices
"correlations": correlations,
# Missing values
"missing": missing,
# Warnings
"messages": messages,
# Package
"package": package,
# Sample
"sample": samples,
# Duplicates
"duplicates": duplicates,
}
| 30.328205 | 88 | 0.615996 |
7c813b2cc84c9caa5444e2c87441c4626db990da | 1,114 | py | Python | maxOfferNum.py | Ruanxingzhi/King-of-Pigeon | 38d6191c93c2d485b2e5cf163f06b9f2a5dacbec | [
"MIT"
] | null | null | null | maxOfferNum.py | Ruanxingzhi/King-of-Pigeon | 38d6191c93c2d485b2e5cf163f06b9f2a5dacbec | [
"MIT"
] | null | null | null | maxOfferNum.py | Ruanxingzhi/King-of-Pigeon | 38d6191c93c2d485b2e5cf163f06b9f2a5dacbec | [
"MIT"
] | null | null | null | import operator
stds = []
stdsDict = {}
index = 0
if __name__ == "__main__":
campers = ['PKUxk','THUsz_ai','THUsz_cs','THUsz_data','USTC_cs']
for camper in campers:
filename = camper + '.txt'
with open('data/%s'%(filename), "r") as f:
data = f.readlines()
for std in data:
readStd(std,camper)
cmpfun = operator.attrgetter('offerNum','name')
stds.sort(key = cmpfun,reverse = True)
for std in stds:
if std.name[-1] == '\n':
std.name = std.name[:-1]
print(f'{std.name} {std.offerNum} offer: {std.offers}') | 26.52381 | 68 | 0.56553 |
7c81a099c1328ddb836ac7f6bc808bcec8ce85e6 | 5,525 | py | Python | tabnine-vim/third_party/ycmd/third_party/python-future/setup.py | MrMonk3y/vimrc | 950230fb3fd7991d1234c2ab516ec03245945677 | [
"MIT"
] | 2 | 2018-04-16T03:08:42.000Z | 2021-01-06T10:21:49.000Z | tabnine-vim/third_party/ycmd/third_party/python-future/setup.py | MrMonk3y/vimrc | 950230fb3fd7991d1234c2ab516ec03245945677 | [
"MIT"
] | null | null | null | tabnine-vim/third_party/ycmd/third_party/python-future/setup.py | MrMonk3y/vimrc | 950230fb3fd7991d1234c2ab516ec03245945677 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from __future__ import absolute_import, print_function
import os
import os.path
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
NAME = "future"
PACKAGES = ["future",
"future.builtins",
"future.types",
"future.standard_library",
"future.backports",
"future.backports.email",
"future.backports.email.mime",
"future.backports.html",
"future.backports.http",
"future.backports.test",
"future.backports.urllib",
"future.backports.xmlrpc",
"future.moves",
"future.moves.dbm",
"future.moves.html",
"future.moves.http",
"future.moves.test",
"future.moves.tkinter",
"future.moves.urllib",
"future.moves.xmlrpc",
"future.tests", # for future.tests.base
# "future.tests.test_email",
"future.utils",
"past",
"past.builtins",
"past.types",
"past.utils",
# "past.tests",
"past.translation",
"libfuturize",
"libfuturize.fixes",
"libpasteurize",
"libpasteurize.fixes",
]
# PEP 3108 stdlib moves:
if sys.version_info[:2] < (3, 0):
PACKAGES += [
"builtins",
"configparser",
"copyreg",
"html",
"http",
"queue",
"reprlib",
"socketserver",
"tkinter",
"winreg",
"xmlrpc",
"_dummy_thread",
"_markupbase",
"_thread",
]
PACKAGE_DATA = {'': [
'README.rst',
'LICENSE.txt',
'futurize.py',
'pasteurize.py',
'discover_tests.py',
'check_rst.sh',
'TESTING.txt',
],
'tests': ['*.py'],
}
REQUIRES = []
TEST_REQUIRES = []
if sys.version_info[:2] == (2, 6):
REQUIRES += ['importlib', 'argparse']
TEST_REQUIRES += ['unittest2']
import src.future
VERSION = src.future.__version__
DESCRIPTION = "Clean single-source support for Python 3 and 2"
LONG_DESC = src.future.__doc__
AUTHOR = "Ed Schofield"
AUTHOR_EMAIL = "[email protected]"
URL="https://python-future.org"
LICENSE = "MIT"
KEYWORDS = "future past python3 migration futurize backport six 2to3 modernize pasteurize 3to2"
CLASSIFIERS = [
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"License :: OSI Approved",
"License :: OSI Approved :: MIT License",
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
]
setup_kwds = {}
# * Important *
# We forcibly remove the build folder to avoid breaking the
# user's Py3 installation if they run "python2 setup.py
# build" and then "python3 setup.py install".
try:
# If the user happens to run:
# python2 setup.py build
# python3 setup.py install
# then folders like "configparser" will be in build/lib.
# If so, we CANNOT let the user install this, because
# this may break his/her Python 3 install, depending on the folder order in
# sys.path. (Running "import configparser" etc. may pick up our Py2
# substitute packages, instead of the intended system stdlib modules.)
SYSTEM_MODULES = set([
'_dummy_thread',
'_markupbase',
'_thread',
'builtins',
'configparser',
'copyreg',
'html',
'http',
'queue',
'reprlib',
'socketserver',
'tkinter',
'winreg',
'xmlrpc'
])
if sys.version_info[0] >= 3:
# Do any of the above folders exist in build/lib?
files = os.listdir(os.path.join('build', 'lib'))
if len(set(files) & set(SYSTEM_MODULES)) > 0:
print('ERROR: Your build folder is in an inconsistent state for '
'a Python 3.x install. Please remove it manually and run '
'setup.py again.', file=sys.stderr)
sys.exit(1)
except OSError:
pass
setup(name=NAME,
version=VERSION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
url=URL,
description=DESCRIPTION,
long_description=LONG_DESC,
license=LICENSE,
keywords=KEYWORDS,
entry_points={
'console_scripts': [
'futurize = libfuturize.main:main',
'pasteurize = libpasteurize.main:main'
]
},
package_dir={'': 'src'},
packages=PACKAGES,
package_data=PACKAGE_DATA,
include_package_data=True,
install_requires=REQUIRES,
classifiers=CLASSIFIERS,
test_suite = "discover_tests",
tests_require=TEST_REQUIRES,
**setup_kwds
)
| 29.864865 | 95 | 0.523439 |
7c81cc51df1ab53c03a469cdc7c5c3c8cd7e2980 | 508 | py | Python | url_shortener/src/__init__.py | Andrelpoj/hire.me | 79428e2094a6b56e762a7f958e1b75f395f59cef | [
"Apache-2.0"
] | null | null | null | url_shortener/src/__init__.py | Andrelpoj/hire.me | 79428e2094a6b56e762a7f958e1b75f395f59cef | [
"Apache-2.0"
] | null | null | null | url_shortener/src/__init__.py | Andrelpoj/hire.me | 79428e2094a6b56e762a7f958e1b75f395f59cef | [
"Apache-2.0"
] | null | null | null | from flask import Flask
from .extensions import db
from .routes import short
from . import config
def create_app():
""" Creates Flask App, connect to Database and register Blueprint of routes"""
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = config.DATABASE_CONNECTION_URI
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.app_context().push()
db.init_app(app)
db.create_all()
app.register_blueprint(short)
return app | 28.222222 | 83 | 0.690945 |
7c82276d6def1d1d6f137aa1788b787b2da8110f | 3,009 | py | Python | python-百度翻译调用/Baidu_translate/com/translate/baidu/stackoverflow_question_handler.py | wangchuanli001/Project-experience | b563c5c3afc07c913c2e1fd25dff41c70533f8de | [
"Apache-2.0"
] | 12 | 2019-12-07T01:44:55.000Z | 2022-01-27T14:13:30.000Z | python-百度翻译调用/Baidu_translate/com/translate/baidu/stackoverflow_question_handler.py | hujiese/Project-experience | b563c5c3afc07c913c2e1fd25dff41c70533f8de | [
"Apache-2.0"
] | 23 | 2020-05-23T03:56:33.000Z | 2022-02-28T07:54:45.000Z | python-百度翻译调用/Baidu_translate/com/translate/baidu/stackoverflow_question_handler.py | hujiese/Project-experience | b563c5c3afc07c913c2e1fd25dff41c70533f8de | [
"Apache-2.0"
] | 7 | 2019-12-20T04:48:56.000Z | 2021-11-19T02:23:45.000Z | import requests
from bs4 import BeautifulSoup
import urllib.request
import os
import random
import time
if __name__ == '__main__':
html("https://stackoverflow.com/questions/50119673/nginx-fast-cgi-cache-on-error-page-404")
| 42.985714 | 164 | 0.623463 |
7c82c0f597ec23a15334ec51934c9484615b1b1f | 2,541 | py | Python | Research/data_loader.py | ALEXKIRNAS/Kaggle-C-CORE-Iceberg-Classifier-Challenge | d8b06969c9393cfce6d9ac96b58c9d365ff4369d | [
"MIT"
] | null | null | null | Research/data_loader.py | ALEXKIRNAS/Kaggle-C-CORE-Iceberg-Classifier-Challenge | d8b06969c9393cfce6d9ac96b58c9d365ff4369d | [
"MIT"
] | null | null | null | Research/data_loader.py | ALEXKIRNAS/Kaggle-C-CORE-Iceberg-Classifier-Challenge | d8b06969c9393cfce6d9ac96b58c9d365ff4369d | [
"MIT"
] | null | null | null | import os
import numpy as np
import pandas as pd
from keras.utils import to_categorical
from sklearn.model_selection import KFold, train_test_split
| 34.337838 | 92 | 0.562377 |
7c82fafc5019f5e066e5d9af9ec1a1742645a993 | 27,180 | py | Python | polyaxon_cli/cli/experiment.py | tiagopms/polyaxon-cli | eb13e3b8389ccf069a421a4dabc87aaa506ab61c | [
"MIT"
] | null | null | null | polyaxon_cli/cli/experiment.py | tiagopms/polyaxon-cli | eb13e3b8389ccf069a421a4dabc87aaa506ab61c | [
"MIT"
] | null | null | null | polyaxon_cli/cli/experiment.py | tiagopms/polyaxon-cli | eb13e3b8389ccf069a421a4dabc87aaa506ab61c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import sys
import click
import rhea
from polyaxon_cli.cli.getters.experiment import (
get_experiment_job_or_local,
get_project_experiment_or_local
)
from polyaxon_cli.cli.upload import upload
from polyaxon_cli.client import PolyaxonClient
from polyaxon_cli.client.exceptions import PolyaxonHTTPError, PolyaxonShouldExitError
from polyaxon_cli.logger import clean_outputs
from polyaxon_cli.managers.experiment import ExperimentManager
from polyaxon_cli.managers.experiment_job import ExperimentJobManager
from polyaxon_cli.utils import cache
from polyaxon_cli.utils.formatting import (
Printer,
dict_tabulate,
get_meta_response,
get_resources,
list_dicts_to_tabulate
)
from polyaxon_cli.utils.log_handler import get_logs_handler
from polyaxon_cli.utils.validation import validate_tags
from polyaxon_client.exceptions import PolyaxonClientException
| 33.84807 | 99 | 0.606659 |
7c839f4dc74ac86e89c284ecfbdaf987fd07d858 | 554 | py | Python | Problem_09.py | Habbo3/Project-Euler | 1a01d67f72b9cfb606d13df91af89159b588216e | [
"MIT"
] | null | null | null | Problem_09.py | Habbo3/Project-Euler | 1a01d67f72b9cfb606d13df91af89159b588216e | [
"MIT"
] | null | null | null | Problem_09.py | Habbo3/Project-Euler | 1a01d67f72b9cfb606d13df91af89159b588216e | [
"MIT"
] | null | null | null | """
A Pythagorean triplet is a set of three natural numbers, a < b < c, for which,
a2 + b2 = c2
For example, 32 + 42 = 9 + 16 = 25 = 52.
There exists exactly one Pythagorean triplet for which a + b + c = 1000.
Find the product abc.
"""
solved = False
for a in range(1, 1000):
for b in range(1, 1000):
for c in range(1, 1000):
if a < b < c:
if a + b + c == 1000:
if a**2 + b**2 == c**2:
solved = True
break
if solved:
break
if solved:
break
product = a*b*c
print("The product of only triplet who exists is : ", product) | 24.086957 | 78 | 0.601083 |
7c83aa67c0a65ae58c0709d1dc148cd1d75e4a56 | 2,862 | py | Python | fanscribed/apps/transcripts/tests/test_transcripts.py | fanscribed/fanscribed | 89b14496459f81a152df38ed5098fba2b087a1d7 | [
"MIT"
] | 8 | 2015-01-05T07:04:02.000Z | 2016-07-19T17:56:46.000Z | fanscribed/apps/transcripts/tests/test_transcripts.py | fanscribed/fanscribed | 89b14496459f81a152df38ed5098fba2b087a1d7 | [
"MIT"
] | 32 | 2015-03-18T18:51:00.000Z | 2021-06-10T20:37:33.000Z | fanscribed/apps/transcripts/tests/test_transcripts.py | fanscribed/fanscribed | 89b14496459f81a152df38ed5098fba2b087a1d7 | [
"MIT"
] | 5 | 2015-02-10T21:15:32.000Z | 2016-06-02T17:26:14.000Z | from decimal import Decimal
import os
from django.test import TestCase
from unipath import Path
from ....utils import refresh
from ...media import tests
from ..models import Transcript, TranscriptMedia
MEDIA_TESTDATA_PATH = Path(tests.__file__).parent.child('testdata')
RAW_MEDIA_PATH = MEDIA_TESTDATA_PATH.child('raw').child(
'NA-472-2012-12-23-Final-excerpt.mp3').absolute()
if os.environ.get('FAST_TEST') != '1':
from django.core.files import File
| 33.27907 | 90 | 0.628931 |
7c83fd89c702ba9d9dcb725c78535f9419ea8d70 | 2,771 | py | Python | buildAncestryFeats.py | BurcinSayin/pf2 | bcd362dc0a750b8ee59cd19ecff9cf5be4f34b19 | [
"MIT"
] | null | null | null | buildAncestryFeats.py | BurcinSayin/pf2 | bcd362dc0a750b8ee59cd19ecff9cf5be4f34b19 | [
"MIT"
] | null | null | null | buildAncestryFeats.py | BurcinSayin/pf2 | bcd362dc0a750b8ee59cd19ecff9cf5be4f34b19 | [
"MIT"
] | null | null | null | from bs4 import BeautifulSoup
import requests
import json
import datetime
import codecs
import re
featHolder = {}
featHolder['name'] = 'Pathfinder 2.0 Ancestry feat list'
featHolder['date'] = datetime.date.today().strftime("%B %d, %Y")
listOfPages = codecs.open("ancestryFeats.csv", encoding='utf-8')
for line in listOfPages:
featMD = line.split(",")
print("Getting feats for :", featMD[0],"This url:", featMD[2])
featHolder[featMD[1]] = get_feats(featMD[2].strip('\n'))
json_data = json.dumps(featHolder, indent=4)
#print(json_data)
filename = "ancestry-feats-pf2.json"
f = open(filename, "w")
f.write(json_data)
f.close
| 34.209877 | 155 | 0.572717 |
7c84005ad03ff1fb7961f46195db1060fc63cb16 | 861 | py | Python | Random_item_selector_module.py | Jahronimo/public_question_book_framework | 812bd11b104de013e930536713b8134d046642d5 | [
"MIT"
] | null | null | null | Random_item_selector_module.py | Jahronimo/public_question_book_framework | 812bd11b104de013e930536713b8134d046642d5 | [
"MIT"
] | null | null | null | Random_item_selector_module.py | Jahronimo/public_question_book_framework | 812bd11b104de013e930536713b8134d046642d5 | [
"MIT"
] | 1 | 2020-03-07T10:53:30.000Z | 2020-03-07T10:53:30.000Z | import random
| 47.833333 | 86 | 0.682927 |
7c8421979f69cbc7cf5cd9ec5a87a153ab3efc74 | 1,228 | py | Python | python_scrape/test_functions.py | jose-marquez89/tech-job-landscape | 0b509536e7ba22885f50c82da8cf990b65373090 | [
"MIT"
] | null | null | null | python_scrape/test_functions.py | jose-marquez89/tech-job-landscape | 0b509536e7ba22885f50c82da8cf990b65373090 | [
"MIT"
] | null | null | null | python_scrape/test_functions.py | jose-marquez89/tech-job-landscape | 0b509536e7ba22885f50c82da8cf990b65373090 | [
"MIT"
] | null | null | null | import unittest
import scrape
if __name__ == '__main__':
unittest.main()
| 34.111111 | 78 | 0.556189 |
7c84e9b3f92ddbf93482eff72a312c6afff49d17 | 173 | py | Python | Level1_Input_Output/10172.py | jaeheeLee17/BOJ_Algorithms | c14641693d7ef0f5bba0a6637166c7ceadb2a0be | [
"MIT"
] | null | null | null | Level1_Input_Output/10172.py | jaeheeLee17/BOJ_Algorithms | c14641693d7ef0f5bba0a6637166c7ceadb2a0be | [
"MIT"
] | null | null | null | Level1_Input_Output/10172.py | jaeheeLee17/BOJ_Algorithms | c14641693d7ef0f5bba0a6637166c7ceadb2a0be | [
"MIT"
] | null | null | null |
if __name__ == "__main__":
main()
| 17.3 | 26 | 0.352601 |
7c85f2097ce6518402e3aa24b38cc365cc5ffeaa | 4,981 | py | Python | Whats Cooking/KaggleCookingComparison.py | rupakc/Kaggle-Compendium | 61634ba742f9a0239f2d1e45973c4bb477ac6306 | [
"MIT"
] | 17 | 2018-01-11T05:49:06.000Z | 2021-08-22T16:50:10.000Z | Whats Cooking/KaggleCookingComparison.py | Tuanlase02874/Machine-Learning-Kaggle | c31651acd8f2407d8b60774e843a2527ce19b013 | [
"MIT"
] | null | null | null | Whats Cooking/KaggleCookingComparison.py | Tuanlase02874/Machine-Learning-Kaggle | c31651acd8f2407d8b60774e843a2527ce19b013 | [
"MIT"
] | 8 | 2017-11-27T06:58:50.000Z | 2021-08-22T16:50:13.000Z | # -*- coding: utf-8 -*-
"""
Created on Sat Dec 26 13:20:45 2015
Code for Kaggle What's Cooking Competition
It uses the following classifiers with tf-idf,hashvectors and bag_of_words approach
1. Adaboost
2. Extratrees
3. Bagging
4. Random Forests
@author: Rupak Chakraborty
"""
import numpy as np
import time
import json
import ClassificationUtils
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn import metrics
# Create the feature extractors
bag_of_words = CountVectorizer(stop_words='english')
tfidf = TfidfVectorizer(stop_words='english')
hashvec = HashingVectorizer(stop_words='english')
# Create the Classifier objects
adaboost = AdaBoostClassifier()
randomforest = RandomForestClassifier()
extratrees = ExtraTreesClassifier()
bagging = BaggingClassifier()
filepath = "train.json"
f = open(filepath,"r")
content = f.read()
jsonData = json.loads(content)
cuisine_set = set([])
ingredient_set = set([])
cuisine_map = {}
cuisine_numerical_map = {}
ingredient_numerical_map = {}
ingredient_map = {}
ingredient_list = list([])
c = 0
print "Size of the data set : ", len(jsonData)
print "Starting Loading of Data Set...."
start = time.time()
for recipe in jsonData:
if "cuisine" in recipe:
s = ""
if recipe["cuisine"] in cuisine_set:
cuisine_map[recipe["cuisine"]] = cuisine_map[recipe["cuisine"]] + 1
else:
cuisine_map[recipe["cuisine"]] = 1
cuisine_set.add(recipe["cuisine"])
for ingredient in recipe["ingredients"]:
if ingredient in ingredient_set:
ingredient_map[ingredient] = ingredient_map[ingredient] + 1
else:
ingredient_map[ingredient] = 1
ingredient_set.add(ingredient)
s = s + " " + ingredient
ingredient_list.append(s)
end = time.time()
print "Time Taken to Load the Dataset : ",end-start
for cuisine in cuisine_set:
cuisine_numerical_map[cuisine] = c
c = c+1
c = 0
for ingredient in ingredient_set:
ingredient_numerical_map[ingredient] = c
c = c+1
print "Starting Feature Extracting ......"
start = time.time()
train_labels = np.zeros(len(ingredient_list))
train_data_tfidf = tfidf.fit_transform(ingredient_list)
train_data_hash = hashvec.fit_transform(ingredient_list)
train_data_bag = bag_of_words.fit_transform(ingredient_list)
c = 0
for recipe in jsonData:
if "cuisine" in recipe:
train_labels[c] = cuisine_numerical_map[recipe["cuisine"]]
c = c+1
end = time.time()
print "Time Taken to Train Extract Different Features : ", end-start
test_labels = train_labels[1:30000]
test_data_tfidf = tfidf.transform(ingredient_list[1:30000])
test_data_hash = hashvec.transform(ingredient_list[1:30000])
test_data_bag = bag_of_words.transform(ingredient_list[1:30000])
print "Starting Training of Models for Hash Vectorizer Feature....."
start = time.time()
adaboost.fit(train_data_bag,train_labels)
randomforest.fit(train_data_bag,train_labels)
extratrees.fit(train_data_bag,train_labels)
bagging.fit(train_data_bag,train_labels)
end=time.time()
print "Time Taken to train all Ensemble Models : ", end-start
print "Starting Prediction of Test Labels ...."
start = time.time()
ada_predict = adaboost.predict(test_data_bag)
rf_predict = randomforest.predict(test_data_bag)
extree_predict = extratrees.predict(test_data_bag)
bagging_predict = bagging.predict(test_data_bag)
end = time.time()
print "Time Taken to Test the models : ", end-start
print "Accuracy of AdaBoost Algorithm : ", metrics.accuracy_score(test_labels,ada_predict)
print "Accuracy of Random Forests : ", metrics.accuracy_score(test_labels,rf_predict)
print "Accuracy of Extra Trees : ", metrics.accuracy_score(test_labels,extree_predict)
print "Accuracy of Bagging : ", metrics.accuracy_score(test_labels,bagging_predict)
# Saving the tf-idf model and classifiers
ClassificationUtils.save_classifier("ada_bag_cook.pickle",adaboost)
ClassificationUtils.save_classifier("rf_bag_cook.pickle",randomforest)
ClassificationUtils.save_classifier("extree_bag_cook.pickle",extratrees)
ClassificationUtils.save_classifier("bagging_bag_cook.pickle",bagging)
ClassificationUtils.save_classifier("bag_of_words.pickle",tfidf)
| 32.344156 | 90 | 0.739611 |
7c85f5102089b2dbe1aa3c33bc6b5354992888f4 | 466 | py | Python | pybook/ch10/DeckOfCards.py | YanhaoXu/python-learning | 856687a71635a2ca67dab49d396c238f128e5ec0 | [
"MIT"
] | 2 | 2021-12-06T13:29:48.000Z | 2022-01-20T11:39:45.000Z | pybook/ch10/DeckOfCards.py | YanhaoXu/python-learning | 856687a71635a2ca67dab49d396c238f128e5ec0 | [
"MIT"
] | null | null | null | pybook/ch10/DeckOfCards.py | YanhaoXu/python-learning | 856687a71635a2ca67dab49d396c238f128e5ec0 | [
"MIT"
] | null | null | null | import random
# Create a deck of cards
deck = [x for x in range(52)]
# Create suits and ranks lists
suits = ["Spades", "Hearts", "Diamonds", "Clubs"]
ranks = ["Ace", "2", "3", "4", "5", "6", "7", "8", "9",
"10", "Jack", "Queen", "King"]
# Shuffle the cards
random.shuffle(deck)
# Display the first four cards
for i in range(4):
suit = suits[deck[i] // 13]
rank = ranks[deck[i] % 13]
print("Card number", deck[i], "is the", rank, "of", suit)
| 24.526316 | 61 | 0.575107 |
7c8673116b02c8c1dd21b123ad5da8653dbefe4c | 3,410 | py | Python | nlpgnn/gnn/RGCNConv.py | ojipadeson/NLPGNN | 7c43d2f0cb2b16c046c930037fd505c5c4f36db4 | [
"MIT"
] | 263 | 2020-05-19T10:40:26.000Z | 2022-03-25T05:22:49.000Z | nlpgnn/gnn/RGCNConv.py | Kuan-Louis/NLPGNN | b9ecec2c6df1b3e40a54511366dcb6085cf90c34 | [
"MIT"
] | 7 | 2020-05-18T23:02:55.000Z | 2021-04-29T18:27:43.000Z | nlpgnn/gnn/RGCNConv.py | Kuan-Louis/NLPGNN | b9ecec2c6df1b3e40a54511366dcb6085cf90c34 | [
"MIT"
] | 56 | 2020-05-19T05:59:36.000Z | 2022-03-14T06:21:33.000Z | #! usr/bin/env python3
# -*- coding:utf-8 -*-
"""
@Author:Kaiyin Zhou
Usage:
node_embeddings = tf.random.normal(shape=(5, 3))
adjacency_lists = [
tf.constant([[0, 1], [2, 4], [2, 4]], dtype=tf.int32),
tf.constant([[0, 1], [2, 4], [2, 4]], dtype=tf.int32)
]
layer = RGraphConvolution(out_features=12)
x = layer(GNNInput(node_embeddings, adjacency_lists))
"""
import tensorflow as tf
from nlpgnn.gnn.messagepassing import MessagePassing
| 35.894737 | 97 | 0.575367 |
7c872854a67dcbee173ef18681a5116e43865d52 | 53,677 | py | Python | automl/google/cloud/automl_v1beta1/gapic/auto_ml_client.py | erikwebb/google-cloud-python | 288a878e9a07239015c78a193eca1cc15e926127 | [
"Apache-2.0"
] | 1 | 2019-04-16T08:13:06.000Z | 2019-04-16T08:13:06.000Z | automl/google/cloud/automl_v1beta1/gapic/auto_ml_client.py | erikwebb/google-cloud-python | 288a878e9a07239015c78a193eca1cc15e926127 | [
"Apache-2.0"
] | null | null | null | automl/google/cloud/automl_v1beta1/gapic/auto_ml_client.py | erikwebb/google-cloud-python | 288a878e9a07239015c78a193eca1cc15e926127 | [
"Apache-2.0"
] | 1 | 2020-11-15T11:44:36.000Z | 2020-11-15T11:44:36.000Z | # -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.cloud.automl.v1beta1 AutoMl API."""
import functools
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.grpc_helpers
import google.api_core.operation
import google.api_core.operations_v1
import google.api_core.page_iterator
import google.api_core.path_template
import grpc
from google.cloud.automl_v1beta1.gapic import auto_ml_client_config
from google.cloud.automl_v1beta1.gapic import enums
from google.cloud.automl_v1beta1.gapic.transports import auto_ml_grpc_transport
from google.cloud.automl_v1beta1.proto import data_items_pb2
from google.cloud.automl_v1beta1.proto import dataset_pb2
from google.cloud.automl_v1beta1.proto import io_pb2
from google.cloud.automl_v1beta1.proto import model_evaluation_pb2
from google.cloud.automl_v1beta1.proto import model_pb2
from google.cloud.automl_v1beta1.proto import operations_pb2 as proto_operations_pb2
from google.cloud.automl_v1beta1.proto import prediction_service_pb2
from google.cloud.automl_v1beta1.proto import prediction_service_pb2_grpc
from google.cloud.automl_v1beta1.proto import service_pb2
from google.cloud.automl_v1beta1.proto import service_pb2_grpc
from google.longrunning import operations_pb2 as longrunning_operations_pb2
from google.protobuf import empty_pb2
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution("google-cloud-automl").version
# Service calls
def create_dataset(
self,
parent,
dataset,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Creates a dataset.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> client = automl_v1beta1.AutoMlClient()
>>>
>>> parent = client.location_path('[PROJECT]', '[LOCATION]')
>>>
>>> # TODO: Initialize `dataset`:
>>> dataset = {}
>>>
>>> response = client.create_dataset(parent, dataset)
Args:
parent (str): The resource name of the project to create the dataset for.
dataset (Union[dict, ~google.cloud.automl_v1beta1.types.Dataset]): The dataset to create.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.automl_v1beta1.types.Dataset`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.automl_v1beta1.types.Dataset` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "create_dataset" not in self._inner_api_calls:
self._inner_api_calls[
"create_dataset"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_dataset,
default_retry=self._method_configs["CreateDataset"].retry,
default_timeout=self._method_configs["CreateDataset"].timeout,
client_info=self._client_info,
)
request = service_pb2.CreateDatasetRequest(parent=parent, dataset=dataset)
return self._inner_api_calls["create_dataset"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def get_dataset(
self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Gets a dataset.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> client = automl_v1beta1.AutoMlClient()
>>>
>>> name = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]')
>>>
>>> response = client.get_dataset(name)
Args:
name (str): The resource name of the dataset to retrieve.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.automl_v1beta1.types.Dataset` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "get_dataset" not in self._inner_api_calls:
self._inner_api_calls[
"get_dataset"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_dataset,
default_retry=self._method_configs["GetDataset"].retry,
default_timeout=self._method_configs["GetDataset"].timeout,
client_info=self._client_info,
)
request = service_pb2.GetDatasetRequest(name=name)
return self._inner_api_calls["get_dataset"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def list_datasets(
self,
parent,
filter_=None,
page_size=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Lists datasets in a project.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> client = automl_v1beta1.AutoMlClient()
>>>
>>> parent = client.location_path('[PROJECT]', '[LOCATION]')
>>>
>>> # Iterate over all results
>>> for element in client.list_datasets(parent):
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_datasets(parent).pages:
... for element in page:
... # process element
... pass
Args:
parent (str): The resource name of the project from which to list datasets.
filter_ (str): An expression for filtering the results of the request.
- ``dataset_metadata`` - for existence of the case.
An example of using the filter is:
- ``translation_dataset_metadata:*`` --> The dataset has
translation\_dataset\_metadata.
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.gax.PageIterator` instance. By default, this
is an iterable of :class:`~google.cloud.automl_v1beta1.types.Dataset` instances.
This object can also be configured to iterate over the pages
of the response through the `options` parameter.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "list_datasets" not in self._inner_api_calls:
self._inner_api_calls[
"list_datasets"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_datasets,
default_retry=self._method_configs["ListDatasets"].retry,
default_timeout=self._method_configs["ListDatasets"].timeout,
client_info=self._client_info,
)
request = service_pb2.ListDatasetsRequest(
parent=parent, filter=filter_, page_size=page_size
)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._inner_api_calls["list_datasets"],
retry=retry,
timeout=timeout,
metadata=metadata,
),
request=request,
items_field="datasets",
request_token_field="page_token",
response_token_field="next_page_token",
)
return iterator
def delete_dataset(
self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Deletes a dataset and all of its contents. Returns empty response in the
``response`` field when it completes, and ``delete_details`` in the
``metadata`` field.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> client = automl_v1beta1.AutoMlClient()
>>>
>>> name = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]')
>>>
>>> response = client.delete_dataset(name)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
name (str): The resource name of the dataset to delete.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "delete_dataset" not in self._inner_api_calls:
self._inner_api_calls[
"delete_dataset"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.delete_dataset,
default_retry=self._method_configs["DeleteDataset"].retry,
default_timeout=self._method_configs["DeleteDataset"].timeout,
client_info=self._client_info,
)
request = service_pb2.DeleteDatasetRequest(name=name)
operation = self._inner_api_calls["delete_dataset"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
empty_pb2.Empty,
metadata_type=proto_operations_pb2.OperationMetadata,
)
def import_data(
self,
name,
input_config,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Imports data into a dataset. Returns an empty response in the
``response`` field when it completes.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> client = automl_v1beta1.AutoMlClient()
>>>
>>> name = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]')
>>>
>>> # TODO: Initialize `input_config`:
>>> input_config = {}
>>>
>>> response = client.import_data(name, input_config)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
name (str): Required. Dataset name. Dataset must already exist. All imported
annotations and examples will be added.
input_config (Union[dict, ~google.cloud.automl_v1beta1.types.InputConfig]): Required. The desired input location.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.automl_v1beta1.types.InputConfig`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "import_data" not in self._inner_api_calls:
self._inner_api_calls[
"import_data"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.import_data,
default_retry=self._method_configs["ImportData"].retry,
default_timeout=self._method_configs["ImportData"].timeout,
client_info=self._client_info,
)
request = service_pb2.ImportDataRequest(name=name, input_config=input_config)
operation = self._inner_api_calls["import_data"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
empty_pb2.Empty,
metadata_type=proto_operations_pb2.OperationMetadata,
)
def export_data(
self,
name,
output_config,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Exports dataset's data to a Google Cloud Storage bucket. Returns an
empty response in the ``response`` field when it completes.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> client = automl_v1beta1.AutoMlClient()
>>>
>>> name = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]')
>>>
>>> # TODO: Initialize `output_config`:
>>> output_config = {}
>>>
>>> response = client.export_data(name, output_config)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
name (str): Required. The resource name of the dataset.
output_config (Union[dict, ~google.cloud.automl_v1beta1.types.OutputConfig]): Required. The desired output location.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.automl_v1beta1.types.OutputConfig`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "export_data" not in self._inner_api_calls:
self._inner_api_calls[
"export_data"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.export_data,
default_retry=self._method_configs["ExportData"].retry,
default_timeout=self._method_configs["ExportData"].timeout,
client_info=self._client_info,
)
request = service_pb2.ExportDataRequest(name=name, output_config=output_config)
operation = self._inner_api_calls["export_data"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
empty_pb2.Empty,
metadata_type=proto_operations_pb2.OperationMetadata,
)
def create_model(
self,
parent,
model,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Creates a model. Returns a Model in the ``response`` field when it
completes. When you create a model, several model evaluations are
created for it: a global evaluation, and one evaluation for each
annotation spec.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> client = automl_v1beta1.AutoMlClient()
>>>
>>> parent = client.location_path('[PROJECT]', '[LOCATION]')
>>>
>>> # TODO: Initialize `model`:
>>> model = {}
>>>
>>> response = client.create_model(parent, model)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
parent (str): Resource name of the parent project where the model is being created.
model (Union[dict, ~google.cloud.automl_v1beta1.types.Model]): The model to create.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.automl_v1beta1.types.Model`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "create_model" not in self._inner_api_calls:
self._inner_api_calls[
"create_model"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_model,
default_retry=self._method_configs["CreateModel"].retry,
default_timeout=self._method_configs["CreateModel"].timeout,
client_info=self._client_info,
)
request = service_pb2.CreateModelRequest(parent=parent, model=model)
operation = self._inner_api_calls["create_model"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
model_pb2.Model,
metadata_type=proto_operations_pb2.OperationMetadata,
)
def get_model(
self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Gets a model.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> client = automl_v1beta1.AutoMlClient()
>>>
>>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]')
>>>
>>> response = client.get_model(name)
Args:
name (str): Resource name of the model.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.automl_v1beta1.types.Model` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "get_model" not in self._inner_api_calls:
self._inner_api_calls[
"get_model"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_model,
default_retry=self._method_configs["GetModel"].retry,
default_timeout=self._method_configs["GetModel"].timeout,
client_info=self._client_info,
)
request = service_pb2.GetModelRequest(name=name)
return self._inner_api_calls["get_model"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def list_models(
self,
parent,
filter_=None,
page_size=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Lists models.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> client = automl_v1beta1.AutoMlClient()
>>>
>>> parent = client.location_path('[PROJECT]', '[LOCATION]')
>>>
>>> # Iterate over all results
>>> for element in client.list_models(parent):
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_models(parent).pages:
... for element in page:
... # process element
... pass
Args:
parent (str): Resource name of the project, from which to list the models.
filter_ (str): An expression for filtering the results of the request.
- ``model_metadata`` - for existence of the case.
- ``dataset_id`` - for = or !=.
Some examples of using the filter are:
- ``image_classification_model_metadata:*`` --> The model has
image\_classification\_model\_metadata.
- ``dataset_id=5`` --> The model was created from a sibling dataset
with ID 5.
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.gax.PageIterator` instance. By default, this
is an iterable of :class:`~google.cloud.automl_v1beta1.types.Model` instances.
This object can also be configured to iterate over the pages
of the response through the `options` parameter.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "list_models" not in self._inner_api_calls:
self._inner_api_calls[
"list_models"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_models,
default_retry=self._method_configs["ListModels"].retry,
default_timeout=self._method_configs["ListModels"].timeout,
client_info=self._client_info,
)
request = service_pb2.ListModelsRequest(
parent=parent, filter=filter_, page_size=page_size
)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._inner_api_calls["list_models"],
retry=retry,
timeout=timeout,
metadata=metadata,
),
request=request,
items_field="model",
request_token_field="page_token",
response_token_field="next_page_token",
)
return iterator
def delete_model(
self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Deletes a model. If a model is already deployed, this only deletes the
model in AutoML BE, and does not change the status of the deployed model
in the production environment. Returns ``google.protobuf.Empty`` in the
``response`` field when it completes, and ``delete_details`` in the
``metadata`` field.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> client = automl_v1beta1.AutoMlClient()
>>>
>>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]')
>>>
>>> response = client.delete_model(name)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
name (str): Resource name of the model being deleted.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "delete_model" not in self._inner_api_calls:
self._inner_api_calls[
"delete_model"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.delete_model,
default_retry=self._method_configs["DeleteModel"].retry,
default_timeout=self._method_configs["DeleteModel"].timeout,
client_info=self._client_info,
)
request = service_pb2.DeleteModelRequest(name=name)
operation = self._inner_api_calls["delete_model"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
empty_pb2.Empty,
metadata_type=proto_operations_pb2.OperationMetadata,
)
def deploy_model(
self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Deploys model. Returns a ``DeployModelResponse`` in the ``response``
field when it completes.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> client = automl_v1beta1.AutoMlClient()
>>>
>>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]')
>>>
>>> response = client.deploy_model(name)
Args:
name (str): Resource name of the model to deploy.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.automl_v1beta1.types.Operation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "deploy_model" not in self._inner_api_calls:
self._inner_api_calls[
"deploy_model"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.deploy_model,
default_retry=self._method_configs["DeployModel"].retry,
default_timeout=self._method_configs["DeployModel"].timeout,
client_info=self._client_info,
)
request = service_pb2.DeployModelRequest(name=name)
return self._inner_api_calls["deploy_model"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def undeploy_model(
self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Undeploys model. Returns an ``UndeployModelResponse`` in the
``response`` field when it completes.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> client = automl_v1beta1.AutoMlClient()
>>>
>>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]')
>>>
>>> response = client.undeploy_model(name)
Args:
name (str): Resource name of the model to undeploy.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.automl_v1beta1.types.Operation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "undeploy_model" not in self._inner_api_calls:
self._inner_api_calls[
"undeploy_model"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.undeploy_model,
default_retry=self._method_configs["UndeployModel"].retry,
default_timeout=self._method_configs["UndeployModel"].timeout,
client_info=self._client_info,
)
request = service_pb2.UndeployModelRequest(name=name)
return self._inner_api_calls["undeploy_model"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def get_model_evaluation(
self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Gets a model evaluation.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> client = automl_v1beta1.AutoMlClient()
>>>
>>> name = client.model_evaluation_path('[PROJECT]', '[LOCATION]', '[MODEL]', '[MODEL_EVALUATION]')
>>>
>>> response = client.get_model_evaluation(name)
Args:
name (str): Resource name for the model evaluation.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.automl_v1beta1.types.ModelEvaluation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "get_model_evaluation" not in self._inner_api_calls:
self._inner_api_calls[
"get_model_evaluation"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_model_evaluation,
default_retry=self._method_configs["GetModelEvaluation"].retry,
default_timeout=self._method_configs["GetModelEvaluation"].timeout,
client_info=self._client_info,
)
request = service_pb2.GetModelEvaluationRequest(name=name)
return self._inner_api_calls["get_model_evaluation"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def list_model_evaluations(
self,
parent,
filter_=None,
page_size=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Lists model evaluations.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> client = automl_v1beta1.AutoMlClient()
>>>
>>> parent = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]')
>>>
>>> # Iterate over all results
>>> for element in client.list_model_evaluations(parent):
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_model_evaluations(parent).pages:
... for element in page:
... # process element
... pass
Args:
parent (str): Resource name of the model to list the model evaluations for.
If modelId is set as "-", this will list model evaluations from across all
models of the parent location.
filter_ (str): An expression for filtering the results of the request.
- ``annotation_spec_id`` - for =, != or existence. See example below
for the last.
Some examples of using the filter are:
- ``annotation_spec_id!=4`` --> The model evaluation was done for
annotation spec with ID different than 4.
- ``NOT annotation_spec_id:*`` --> The model evaluation was done for
aggregate of all annotation specs.
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.gax.PageIterator` instance. By default, this
is an iterable of :class:`~google.cloud.automl_v1beta1.types.ModelEvaluation` instances.
This object can also be configured to iterate over the pages
of the response through the `options` parameter.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "list_model_evaluations" not in self._inner_api_calls:
self._inner_api_calls[
"list_model_evaluations"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_model_evaluations,
default_retry=self._method_configs["ListModelEvaluations"].retry,
default_timeout=self._method_configs["ListModelEvaluations"].timeout,
client_info=self._client_info,
)
request = service_pb2.ListModelEvaluationsRequest(
parent=parent, filter=filter_, page_size=page_size
)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._inner_api_calls["list_model_evaluations"],
retry=retry,
timeout=timeout,
metadata=metadata,
),
request=request,
items_field="model_evaluation",
request_token_field="page_token",
response_token_field="next_page_token",
)
return iterator
| 42.198899 | 128 | 0.596512 |
7c87af0c38dbd1633d14f5192f2da57d1ebe0d89 | 73,923 | py | Python | addons/project/models/project.py | SHIVJITH/Odoo_Machine_Test | 310497a9872db7844b521e6dab5f7a9f61d365a4 | [
"Apache-2.0"
] | null | null | null | addons/project/models/project.py | SHIVJITH/Odoo_Machine_Test | 310497a9872db7844b521e6dab5f7a9f61d365a4 | [
"Apache-2.0"
] | null | null | null | addons/project/models/project.py | SHIVJITH/Odoo_Machine_Test | 310497a9872db7844b521e6dab5f7a9f61d365a4 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import ast
from datetime import timedelta, datetime
from random import randint
from odoo import api, fields, models, tools, SUPERUSER_ID, _
from odoo.exceptions import UserError, AccessError, ValidationError, RedirectWarning
from odoo.tools.misc import format_date, get_lang
from odoo.osv.expression import OR
from .project_task_recurrence import DAYS, WEEKS
def write(self, vals):
allowed_users_changed = 'allowed_portal_user_ids' in vals or 'allowed_internal_user_ids' in vals
if allowed_users_changed:
allowed_users = {project: project.allowed_user_ids for project in self}
# directly compute is_favorite to dodge allow write access right
if 'is_favorite' in vals:
vals.pop('is_favorite')
self._fields['is_favorite'].determine_inverse(self)
res = super(Project, self).write(vals) if vals else True
if allowed_users_changed:
for project in self:
permission_removed = allowed_users.get(project) - project.allowed_user_ids
allowed_portal_users_removed = permission_removed.filtered('share')
project.message_unsubscribe(allowed_portal_users_removed.partner_id.commercial_partner_id.ids)
for task in project.task_ids:
task.allowed_user_ids -= permission_removed
if 'allow_recurring_tasks' in vals and not vals.get('allow_recurring_tasks'):
self.env['project.task'].search([('project_id', 'in', self.ids), ('recurring_task', '=', True)]).write({'recurring_task': False})
if 'active' in vals:
# archiving/unarchiving a project does it on its tasks, too
self.with_context(active_test=False).mapped('tasks').write({'active': vals['active']})
if vals.get('partner_id') or vals.get('privacy_visibility'):
for project in self.filtered(lambda project: project.privacy_visibility == 'portal'):
project.allowed_user_ids |= project.partner_id.user_ids
return res
def action_unlink(self):
wizard = self.env['project.delete.wizard'].create({
'project_ids': self.ids
})
return {
'name': _('Confirmation'),
'view_mode': 'form',
'res_model': 'project.delete.wizard',
'views': [(self.env.ref('project.project_delete_wizard_form').id, 'form')],
'type': 'ir.actions.act_window',
'res_id': wizard.id,
'target': 'new',
'context': self.env.context,
}
def unlink(self):
# Check project is empty
for project in self.with_context(active_test=False):
if project.tasks:
raise UserError(_('You cannot delete a project containing tasks. You can either archive it or first delete all of its tasks.'))
# Delete the empty related analytic account
analytic_accounts_to_delete = self.env['account.analytic.account']
for project in self:
if project.analytic_account_id and not project.analytic_account_id.line_ids:
analytic_accounts_to_delete |= project.analytic_account_id
result = super(Project, self).unlink()
analytic_accounts_to_delete.unlink()
return result
def message_subscribe(self, partner_ids=None, channel_ids=None, subtype_ids=None):
"""
Subscribe to all existing active tasks when subscribing to a project
And add the portal user subscribed to allowed portal users
"""
res = super(Project, self).message_subscribe(partner_ids=partner_ids, channel_ids=channel_ids, subtype_ids=subtype_ids)
project_subtypes = self.env['mail.message.subtype'].browse(subtype_ids) if subtype_ids else None
task_subtypes = (project_subtypes.mapped('parent_id') | project_subtypes.filtered(lambda sub: sub.internal or sub.default)).ids if project_subtypes else None
if not subtype_ids or task_subtypes:
self.mapped('tasks').message_subscribe(
partner_ids=partner_ids, channel_ids=channel_ids, subtype_ids=task_subtypes)
if partner_ids:
all_users = self.env['res.partner'].browse(partner_ids).user_ids
portal_users = all_users.filtered('share')
internal_users = all_users - portal_users
self.allowed_portal_user_ids |= portal_users
self.allowed_internal_user_ids |= internal_users
return res
def message_unsubscribe(self, partner_ids=None, channel_ids=None):
""" Unsubscribe from all tasks when unsubscribing from a project """
self.mapped('tasks').message_unsubscribe(partner_ids=partner_ids, channel_ids=channel_ids)
return super(Project, self).message_unsubscribe(partner_ids=partner_ids, channel_ids=channel_ids)
def _alias_get_creation_values(self):
values = super(Project, self)._alias_get_creation_values()
values['alias_model_id'] = self.env['ir.model']._get('project.task').id
if self.id:
values['alias_defaults'] = defaults = ast.literal_eval(self.alias_defaults or "{}")
defaults['project_id'] = self.id
return values
# ---------------------------------------------------
# Actions
# ---------------------------------------------------
def action_view_account_analytic_line(self):
""" return the action to see all the analytic lines of the project's analytic account """
action = self.env["ir.actions.actions"]._for_xml_id("analytic.account_analytic_line_action")
action['context'] = {'default_account_id': self.analytic_account_id.id}
action['domain'] = [('account_id', '=', self.analytic_account_id.id)]
return action
def action_view_all_rating(self):
""" return the action to see all the rating of the project and activate default filters"""
action = self.env['ir.actions.act_window']._for_xml_id('project.rating_rating_action_view_project_rating')
action['name'] = _('Ratings of %s') % (self.name,)
action_context = ast.literal_eval(action['context']) if action['context'] else {}
action_context.update(self._context)
action_context['search_default_parent_res_name'] = self.name
action_context.pop('group_by', None)
return dict(action, context=action_context)
# ---------------------------------------------------
# Business Methods
# ---------------------------------------------------
# ---------------------------------------------------
# Rating business
# ---------------------------------------------------
# This method should be called once a day by the scheduler
def _is_recurrence_valid(self):
self.ensure_one()
return self.repeat_interval > 0 and\
(not self.repeat_show_dow or self._get_weekdays()) and\
(self.repeat_type != 'after' or self.repeat_number) and\
(self.repeat_type != 'until' or self.repeat_until and self.repeat_until > fields.Date.today())
def _inverse_partner_email(self):
for task in self:
if task.partner_id and task.partner_email != task.partner_id.email:
task.partner_id.email = task.partner_email
def _compute_attachment_ids(self):
for task in self:
attachment_ids = self.env['ir.attachment'].search([('res_id', '=', task.id), ('res_model', '=', 'project.task')]).ids
message_attachment_ids = task.mapped('message_ids.attachment_ids').ids # from mail_thread
task.attachment_ids = [(6, 0, list(set(attachment_ids) - set(message_attachment_ids)))]
def _compute_access_url(self):
super(Task, self)._compute_access_url()
for task in self:
task.access_url = '/my/task/%s' % task.id
def message_subscribe(self, partner_ids=None, channel_ids=None, subtype_ids=None):
"""
Add the users subscribed to allowed portal users
"""
res = super(Task, self).message_subscribe(partner_ids=partner_ids, channel_ids=channel_ids, subtype_ids=subtype_ids)
if partner_ids:
new_allowed_users = self.env['res.partner'].browse(partner_ids).user_ids.filtered('share')
tasks = self.filtered(lambda task: task.project_id.privacy_visibility == 'portal')
tasks.sudo().write({'allowed_user_ids': [(4, user.id) for user in new_allowed_users]})
return res
# ----------------------------------------
# Case management
# ----------------------------------------
def stage_find(self, section_id, domain=[], order='sequence'):
""" Override of the base.stage method
Parameter of the stage search taken from the lead:
- section_id: if set, stages must belong to this section or
be a default stage; if not set, stages must be default
stages
"""
# collect all section_ids
section_ids = []
if section_id:
section_ids.append(section_id)
section_ids.extend(self.mapped('project_id').ids)
search_domain = []
if section_ids:
search_domain = [('|')] * (len(section_ids) - 1)
for section_id in section_ids:
search_domain.append(('project_ids', '=', section_id))
search_domain += list(domain)
# perform search, return the first found
return self.env['project.task.type'].search(search_domain, order=order, limit=1).id
# ------------------------------------------------
# CRUD overrides
# ------------------------------------------------
def write(self, vals):
now = fields.Datetime.now()
if 'parent_id' in vals and vals['parent_id'] in self.ids:
raise UserError(_("Sorry. You can't set a task as its parent task."))
if 'active' in vals and not vals.get('active') and any(self.mapped('recurrence_id')):
# TODO: show a dialog to stop the recurrence
raise UserError(_('You cannot archive recurring tasks. Please, disable the recurrence first.'))
# stage change: update date_last_stage_update
if 'stage_id' in vals:
vals.update(self.update_date_end(vals['stage_id']))
vals['date_last_stage_update'] = now
# reset kanban state when changing stage
if 'kanban_state' not in vals:
vals['kanban_state'] = 'normal'
# user_id change: update date_assign
if vals.get('user_id') and 'date_assign' not in vals:
vals['date_assign'] = now
# recurrence fields
rec_fields = vals.keys() & self._get_recurrence_fields()
if rec_fields:
rec_values = {rec_field: vals[rec_field] for rec_field in rec_fields}
for task in self:
if task.recurrence_id:
task.recurrence_id.write(rec_values)
elif vals.get('recurring_task'):
rec_values['next_recurrence_date'] = fields.Datetime.today()
recurrence = self.env['project.task.recurrence'].create(rec_values)
task.recurrence_id = recurrence.id
if 'recurring_task' in vals and not vals.get('recurring_task'):
self.recurrence_id.unlink()
tasks = self
recurrence_update = vals.pop('recurrence_update', 'this')
if recurrence_update != 'this':
recurrence_domain = []
if recurrence_update == 'subsequent':
for task in self:
recurrence_domain = OR([recurrence_domain, ['&', ('recurrence_id', '=', task.recurrence_id.id), ('create_date', '>=', task.create_date)]])
else:
recurrence_domain = [('recurrence_id', 'in', self.recurrence_id.ids)]
tasks |= self.env['project.task'].search(recurrence_domain)
result = super(Task, tasks).write(vals)
# rating on stage
if 'stage_id' in vals and vals.get('stage_id'):
self.filtered(lambda x: x.project_id.rating_active and x.project_id.rating_status == 'stage')._send_task_rating_mail(force_send=True)
return result
def update_date_end(self, stage_id):
project_task_type = self.env['project.task.type'].browse(stage_id)
if project_task_type.fold or project_task_type.is_closed:
return {'date_end': fields.Datetime.now()}
return {'date_end': False}
def unlink(self):
if any(self.mapped('recurrence_id')):
# TODO: show a dialog to stop the recurrence
raise UserError(_('You cannot delete recurring tasks. Please, disable the recurrence first.'))
return super().unlink()
# ---------------------------------------------------
# Subtasks
# ---------------------------------------------------
# ---------------------------------------------------
# Mail gateway
# ---------------------------------------------------
def _track_template(self, changes):
res = super(Task, self)._track_template(changes)
test_task = self[0]
if 'stage_id' in changes and test_task.stage_id.mail_template_id:
res['stage_id'] = (test_task.stage_id.mail_template_id, {
'auto_delete_message': True,
'subtype_id': self.env['ir.model.data'].xmlid_to_res_id('mail.mt_note'),
'email_layout_xmlid': 'mail.mail_notification_light'
})
return res
def _creation_subtype(self):
return self.env.ref('project.mt_task_new')
def _track_subtype(self, init_values):
self.ensure_one()
if 'kanban_state_label' in init_values and self.kanban_state == 'blocked':
return self.env.ref('project.mt_task_blocked')
elif 'kanban_state_label' in init_values and self.kanban_state == 'done':
return self.env.ref('project.mt_task_ready')
elif 'stage_id' in init_values:
return self.env.ref('project.mt_task_stage')
return super(Task, self)._track_subtype(init_values)
def _notify_get_groups(self, msg_vals=None):
""" Handle project users and managers recipients that can assign
tasks and create new one directly from notification emails. Also give
access button to portal users and portal customers. If they are notified
they should probably have access to the document. """
groups = super(Task, self)._notify_get_groups(msg_vals=msg_vals)
local_msg_vals = dict(msg_vals or {})
self.ensure_one()
project_user_group_id = self.env.ref('project.group_project_user').id
group_func = lambda pdata: pdata['type'] == 'user' and project_user_group_id in pdata['groups']
if self.project_id.privacy_visibility == 'followers':
allowed_user_ids = self.project_id.allowed_internal_user_ids.partner_id.ids
group_func = lambda pdata: pdata['type'] == 'user' and project_user_group_id in pdata['groups'] and pdata['id'] in allowed_user_ids
new_group = ('group_project_user', group_func, {})
if not self.user_id and not self.stage_id.fold:
take_action = self._notify_get_action_link('assign', **local_msg_vals)
project_actions = [{'url': take_action, 'title': _('I take it')}]
new_group[2]['actions'] = project_actions
groups = [new_group] + groups
if self.project_id.privacy_visibility == 'portal':
allowed_user_ids = self.project_id.allowed_portal_user_ids.partner_id.ids
groups.insert(0, (
'allowed_portal_users',
lambda pdata: pdata['type'] == 'portal' and pdata['id'] in allowed_user_ids,
{}
))
portal_privacy = self.project_id.privacy_visibility == 'portal'
for group_name, group_method, group_data in groups:
if group_name in ('customer', 'user') or group_name == 'portal_customer' and not portal_privacy:
group_data['has_button_access'] = False
elif group_name == 'portal_customer' and portal_privacy:
group_data['has_button_access'] = True
return groups
def _notify_get_reply_to(self, default=None, records=None, company=None, doc_names=None):
""" Override to set alias of tasks to their project if any. """
aliases = self.sudo().mapped('project_id')._notify_get_reply_to(default=default, records=None, company=company, doc_names=None)
res = {task.id: aliases.get(task.project_id.id) for task in self}
leftover = self.filtered(lambda rec: not rec.project_id)
if leftover:
res.update(super(Task, leftover)._notify_get_reply_to(default=default, records=None, company=company, doc_names=doc_names))
return res
def email_split(self, msg):
email_list = tools.email_split((msg.get('to') or '') + ',' + (msg.get('cc') or ''))
# check left-part is not already an alias
aliases = self.mapped('project_id.alias_name')
return [x for x in email_list if x.split('@')[0] not in aliases]
def message_update(self, msg, update_vals=None):
""" Override to update the task according to the email. """
email_list = self.email_split(msg)
partner_ids = [p.id for p in self.env['mail.thread']._mail_find_partner_from_emails(email_list, records=self, force_create=False) if p]
self.message_subscribe(partner_ids)
return super(Task, self).message_update(msg, update_vals=update_vals)
# If depth == 1, return only direct children
# If depth == 3, return children to third generation
# If depth <= 0, return all children without depth limit
# ---------------------------------------------------
# Rating business
# ---------------------------------------------------
class ProjectTags(models.Model):
""" Tags of project's tasks """
_name = "project.tags"
_description = "Project Tags"
name = fields.Char('Name', required=True)
color = fields.Integer(string='Color', default=_get_default_color)
_sql_constraints = [
('name_uniq', 'unique (name)', "Tag name already exists!"),
]
| 51.550209 | 249 | 0.645577 |
7c8849369fcbb1dad3eb48e7b50645532c6e90e9 | 1,670 | py | Python | app/config.py | Maethorin/pivocram | f1709f5ee76d0280601efa87f3af8e89c2968f43 | [
"MIT"
] | 5 | 2016-04-02T15:07:03.000Z | 2021-06-25T14:48:55.000Z | app/config.py | Maethorin/pivocram | f1709f5ee76d0280601efa87f3af8e89c2968f43 | [
"MIT"
] | 2 | 2016-04-28T20:14:04.000Z | 2016-05-01T18:37:05.000Z | app/config.py | Maethorin/pivocram | f1709f5ee76d0280601efa87f3af8e89c2968f43 | [
"MIT"
] | 1 | 2018-07-27T10:52:04.000Z | 2018-07-27T10:52:04.000Z | # -*- coding: utf-8 -*-
"""
Config File for enviroment variables
"""
import os
from importlib import import_module
def get_config():
"""
Get the Config Class instance defined in APP_SETTINGS environment variable
:return The config class instance
:rtype: Config
"""
config_imports = os.environ['APP_SETTINGS'].split('.')
config_class_name = config_imports[-1]
config_module = import_module('.'.join(config_imports[:-1]))
config_class = getattr(config_module, config_class_name, None)
if not config_class:
raise ConfigClassNotFound('Unable to find a config class in {}'.format(os.environ['APP_SETTINGS']))
return config_class() | 23.521127 | 115 | 0.671856 |
7c88b8dca0946deb62b53070c85ee8a8bd47974e | 845 | py | Python | initial_load.py | hongyuanChrisLi/RealEstateDBConvert | 0fd04f5213ff3fd3548db3f322828bd80cf41791 | [
"Apache-2.0"
] | null | null | null | initial_load.py | hongyuanChrisLi/RealEstateDBConvert | 0fd04f5213ff3fd3548db3f322828bd80cf41791 | [
"Apache-2.0"
] | null | null | null | initial_load.py | hongyuanChrisLi/RealEstateDBConvert | 0fd04f5213ff3fd3548db3f322828bd80cf41791 | [
"Apache-2.0"
] | null | null | null | from mysql_dao.select_dao import SelectDao as MysqlSelectDao
from postgres_dao.ddl_dao import DdlDao
from postgres_dao.dml_dao import DmlDao as PsqlDmlDao
psql_ddl_dao = DdlDao()
mysql_select_dao = MysqlSelectDao()
psql_dml_dao = PsqlDmlDao()
psql_ddl_dao.create_tables()
county_data = mysql_select_dao.select_all_counties()
psql_dml_dao.insert_county(county_data)
city_data = mysql_select_dao.select_all_cities()
psql_dml_dao.insert_city(city_data)
zipcode_data = mysql_select_dao.select_all_zipcodes()
psql_dml_dao.insert_zipcode(zipcode_data)
data = mysql_select_dao.select_full_addr_month_rpt()
psql_dml_dao.trunc_addr_month_rpt()
psql_dml_dao.insert_addr_month_rpt(data)
data = mysql_select_dao.select_full_mls_daily_rpt()
psql_dml_dao.trunc_mls_rpt()
psql_dml_dao.insert_mls_rpt(data)
mysql_select_dao.close()
psql_dml_dao.close()
| 28.166667 | 60 | 0.857988 |
7c88cdba00ccf459ff19909681f6bd97e0741c61 | 6,306 | py | Python | pytests/docs/docs.py | ramalingam-cb/testrunner | 81cea7a5a493cf0c67fca7f97c667cd3c6ad2142 | [
"Apache-2.0"
] | null | null | null | pytests/docs/docs.py | ramalingam-cb/testrunner | 81cea7a5a493cf0c67fca7f97c667cd3c6ad2142 | [
"Apache-2.0"
] | null | null | null | pytests/docs/docs.py | ramalingam-cb/testrunner | 81cea7a5a493cf0c67fca7f97c667cd3c6ad2142 | [
"Apache-2.0"
] | null | null | null | import time
import logger
from basetestcase import BaseTestCase
from couchbase_helper.documentgenerator import DocumentGenerator
from membase.api.rest_client import RestConnection
from couchbase_helper.documentgenerator import BlobGenerator
| 55.80531 | 121 | 0.579607 |
7c898d721c85859465a77ce43f10791adda1d063 | 1,890 | py | Python | lichthi.py | truongaxin123/lichthidtu | 77ba75974769ab1fdd1281b6088a1734dc0a3a83 | [
"MIT"
] | null | null | null | lichthi.py | truongaxin123/lichthidtu | 77ba75974769ab1fdd1281b6088a1734dc0a3a83 | [
"MIT"
] | null | null | null | lichthi.py | truongaxin123/lichthidtu | 77ba75974769ab1fdd1281b6088a1734dc0a3a83 | [
"MIT"
] | null | null | null | from bs4 import BeautifulSoup
import requests
from urllib.request import urlretrieve
ROOT = 'http://pdaotao.duytan.edu.vn'
# a = get_excel_url('http://pdaotao.duytan.edu.vn/EXAM_LIST_Detail/?ID=52289&lang=VN')
main()
| 35 | 93 | 0.595767 |
7c8a2cc8e8cd0ae17cdb81c0889eb3b2e10339c2 | 10,998 | py | Python | appengine/uploader/main.py | isabella232/feedloader | c0417480804d406a83d1aedcb7e7d719058fdbfd | [
"Apache-2.0"
] | 5 | 2021-02-15T12:49:12.000Z | 2022-01-12T06:28:41.000Z | appengine/uploader/main.py | google/feedloader | f6a25569bc3d7d4ee326961fd3b01e45fc3858e4 | [
"Apache-2.0"
] | 1 | 2021-06-18T15:30:16.000Z | 2021-06-18T15:30:16.000Z | appengine/uploader/main.py | isabella232/feedloader | c0417480804d406a83d1aedcb7e7d719058fdbfd | [
"Apache-2.0"
] | 4 | 2021-02-16T17:28:00.000Z | 2021-06-18T15:27:52.000Z | # coding=utf-8
# Copyright 2021 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Uploader module that handles batch jobs sent from Task Queue.
This module receives batch jobs from TaskQueue. For each job, the module loads
data from BigQuery and sends it to Merchant Center.
"""
import http
import json
import logging
import socket
from typing import List, Tuple
import flask
from google.cloud import bigquery
from google.cloud import logging as cloud_logging
from googleapiclient import errors
import batch_creator
import bigquery_client
import constants
import content_api_client
import result_recorder
import shoptimizer_client
from models import failure
from models import process_result
from models import upload_task
app = flask.Flask(__name__)
_logging_client = cloud_logging.Client()
_logging_client.setup_logging(log_level=logging.DEBUG)
_SHOPTIMIZER_CONFIG_FILE_PATH = 'config/shoptimizer_config.json'
OPERATION_TO_METHOD = {
constants.Operation.UPSERT: constants.Method.INSERT,
constants.Operation.DELETE: constants.Method.DELETE,
constants.Operation.PREVENT_EXPIRING: constants.Method.INSERT
}
# Used to check if this is the last retry for alerting purposes.
# Should match task_retry_limit in appengine/initiator/queue.yaml.
TASK_RETRY_LIMIT = 5
def _run_process(operation: constants.Operation) -> Tuple[str, http.HTTPStatus]:
"""Handles tasks pushed from Task Queue.
When tasks are enqueued to Task Queue by initiator, this method will be
called. It extracts necessary information from a Task Queue message. The
following processes are executed in this function:
- Loading items to process from BigQuery.
- Converts items into a batch that can be sent to Content API for Shopping.
- Sending items to Content API for Shopping (Merchant Center).
- Records the results of the Content API for Shopping call.
Args:
operation: Type of operation to perform on the items.
Returns:
The result of HTTP request.
"""
request_body = json.loads(flask.request.data.decode('utf-8'))
task = upload_task.UploadTask.from_json(request_body)
if task.batch_size == 0:
return 'OK', http.HTTPStatus.OK
batch_number = int(task.start_index / task.batch_size) + 1
logging.info(
'%s started. Batch #%d info: start_index: %d, batch_size: %d,'
'initiation timestamp: %s', operation.value, batch_number,
task.start_index, task.batch_size, task.timestamp)
try:
items = _load_items_from_bigquery(operation, task)
except errors.HttpError:
return 'Error loading items from BigQuery', http.HTTPStatus.INTERNAL_SERVER_ERROR
result = process_result.ProcessResult([], [], [])
try:
if not items:
logging.error(
'Batch #%d, operation %s: 0 items loaded from BigQuery so batch not sent to Content API. Start_index: %d, batch_size: %d,'
'initiation timestamp: %s', batch_number, operation.value,
task.start_index, task.batch_size, task.timestamp)
return 'No items to process', http.HTTPStatus.OK
method = OPERATION_TO_METHOD.get(operation)
# Creates batch from items loaded from BigQuery
original_batch, skipped_item_ids, batch_id_to_item_id = batch_creator.create_batch(
batch_number, items, method)
# Optimizes batch via Shoptimizer for upsert/prevent_expiring operations
if operation != constants.Operation.DELETE and constants.SHOPTIMIZER_API_INTEGRATION_ON:
batch_to_send_to_content_api = _create_optimized_batch(
original_batch, batch_number, operation)
else:
batch_to_send_to_content_api = original_batch
# Sends batch of items to Content API for Shopping
api_client = content_api_client.ContentApiClient()
successful_item_ids, item_failures = api_client.process_items(
batch_to_send_to_content_api, batch_number, batch_id_to_item_id, method)
result = process_result.ProcessResult(
successfully_processed_item_ids=successful_item_ids,
content_api_failures=item_failures,
skipped_item_ids=skipped_item_ids)
except errors.HttpError as http_error:
error_status_code = http_error.resp.status
error_reason = http_error.resp.reason
result = _handle_content_api_error(error_status_code, error_reason,
batch_number, http_error, items,
operation, task)
return error_reason, error_status_code
except socket.timeout as timeout_error:
error_status_code = http.HTTPStatus.REQUEST_TIMEOUT
error_reason = 'Socket timeout'
result = _handle_content_api_error(error_status_code, error_reason,
batch_number, timeout_error, items,
operation, task)
return error_reason, error_status_code
else:
logging.info(
'Batch #%d with operation %s and initiation timestamp %s successfully processed %s items, failed to process %s items and skipped %s items.',
batch_number, operation.value, task.timestamp,
result.get_success_count(), result.get_failure_count(),
result.get_skipped_count())
finally:
recorder = result_recorder.ResultRecorder.from_service_account_json(
constants.GCP_SERVICE_ACCOUNT_PATH, constants.DATASET_ID_FOR_MONITORING,
constants.TABLE_ID_FOR_RESULT_COUNTS_MONITORING,
constants.TABLE_ID_FOR_ITEM_RESULTS_MONITORING)
recorder.insert_result(operation.value, result, task.timestamp,
batch_number)
return 'OK', http.HTTPStatus.OK
def _load_items_from_bigquery(
operation: constants.Operation,
task: upload_task.UploadTask) -> List[bigquery.Row]:
"""Loads items from BigQuery.
Args:
operation: The operation to be performed on this batch of items.
task: The Cloud Task object that initiated this request.
Returns:
The list of items loaded from BigQuery.
"""
table_id = f'process_items_to_{operation.value}_{task.timestamp}'
bq_client = bigquery_client.BigQueryClient.from_service_account_json(
constants.GCP_SERVICE_ACCOUNT_PATH, constants.DATASET_ID_FOR_PROCESSING,
table_id)
try:
items_iterator = bq_client.load_items(task.start_index, task.batch_size)
except errors.HttpError as http_error:
logging.exception(
'Error loading items from %s.%s. HTTP status: %s. Error: %s',
constants.DATASET_ID_FOR_PROCESSING, table_id, http_error.resp.status,
http_error.resp.reason)
raise
return list(items_iterator)
def _create_optimized_batch(batch: constants.Batch, batch_number: int,
operation: constants.Operation) -> constants.Batch:
"""Creates an optimized batch by calling the Shoptimizer API.
Args:
batch: The batch of product data to be optimized.
batch_number: The number that identifies this batch.
operation: The operation to be performed on this batch (upsert, delete,
prevent_expiring).
Returns:
The batch returned from the Shoptimizer API Client.
"""
try:
optimization_client = shoptimizer_client.ShoptimizerClient(
batch_number, operation)
except (OSError, ValueError):
return batch
return optimization_client.shoptimize(batch)
def _handle_content_api_error(
error_status_code: int, error_reason: str, batch_num: int, error: Exception,
item_rows: List[bigquery.Row], operation: constants.Operation,
task: upload_task.UploadTask) -> process_result.ProcessResult:
"""Logs network related errors returned from Content API and returns a list of item failures.
Args:
error_status_code: HTTP status code from Content API.
error_reason: The reason for the error.
batch_num: The batch number.
error: The error thrown by Content API.
item_rows: The items being processed in this batch.
operation: The operation to be performed on this batch of items.
task: The Cloud Task object that initiated this request.
Returns:
The list of items that failed due to the error, wrapped in a
process_result.
"""
logging.warning(
'Batch #%d with operation %s and initiation timestamp %s failed. HTTP status: %s. Error: %s',
batch_num, operation.value, task.timestamp, error_status_code,
error_reason)
# If the batch API call received an HttpError, mark every id as failed.
item_failures = [
failure.Failure(str(item_row.get('item_id', 'Missing ID')), error_reason)
for item_row in item_rows
]
api_result = process_result.ProcessResult([], item_failures, [])
if content_api_client.suggest_retry(
error_status_code) and _get_execution_attempt() < TASK_RETRY_LIMIT:
logging.warning(
'Batch #%d with operation %s and initiation timestamp %s will be requeued for retry',
batch_num, operation.value, task.timestamp)
else:
logging.error(
'Batch #%d with operation %s and initiation timestamp %s failed and will not be retried. Error: %s',
batch_num, operation.value, task.timestamp, error)
return api_result
def _get_execution_attempt() -> int:
"""Returns the number of times this task has previously been executed.
If the execution count header does not exist, it means the request did not
come from Cloud Tasks.
In this case, there will be no retry, so set execution attempt to the retry
limit.
Returns:
int, the number of times this task has previously been executed.
"""
execution_attempt = flask.request.headers.get(
'X-AppEngine-TaskExecutionCount', '')
if execution_attempt:
return int(execution_attempt)
else:
return TASK_RETRY_LIMIT
if __name__ == '__main__':
# This is used when running locally. Gunicorn is used to run the
# application on Google App Engine. See entrypoint in app.yaml.
app.run(host='127.0.0.1', port=8080, debug=True)
| 38.055363 | 148 | 0.738498 |
7c8a6aee7b7a77f1d1c85df07a12dedc044587d5 | 17,730 | py | Python | dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/transforms.py | BadDevCode/lumberyard | 3d688932f919dbf5821f0cb8a210ce24abe39e9e | [
"AML"
] | 1,738 | 2017-09-21T10:59:12.000Z | 2022-03-31T21:05:46.000Z | dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/transforms.py | olivier-be/lumberyard | 3d688932f919dbf5821f0cb8a210ce24abe39e9e | [
"AML"
] | 427 | 2017-09-29T22:54:36.000Z | 2022-02-15T19:26:50.000Z | dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/transforms.py | olivier-be/lumberyard | 3d688932f919dbf5821f0cb8a210ce24abe39e9e | [
"AML"
] | 671 | 2017-09-21T08:04:01.000Z | 2022-03-29T14:30:07.000Z | """
Implement transformation on Numba IR
"""
from __future__ import absolute_import, print_function
from collections import namedtuple, defaultdict
import logging
from numba.analysis import compute_cfg_from_blocks, find_top_level_loops
from numba import ir, errors, ir_utils
from numba.analysis import compute_use_defs
_logger = logging.getLogger(__name__)
def _extract_loop_lifting_candidates(cfg, blocks):
"""
Returns a list of loops that are candidate for loop lifting
"""
# check well-formed-ness of the loop
def same_exit_point(loop):
"all exits must point to the same location"
outedges = set()
for k in loop.exits:
succs = set(x for x, _ in cfg.successors(k))
if not succs:
# If the exit point has no successor, it contains an return
# statement, which is not handled by the looplifting code.
# Thus, this loop is not a candidate.
_logger.debug("return-statement in loop.")
return False
outedges |= succs
ok = len(outedges) == 1
_logger.debug("same_exit_point=%s (%s)", ok, outedges)
return ok
def one_entry(loop):
"there is one entry"
ok = len(loop.entries) == 1
_logger.debug("one_entry=%s", ok)
return ok
def cannot_yield(loop):
"cannot have yield inside the loop"
insiders = set(loop.body) | set(loop.entries) | set(loop.exits)
for blk in map(blocks.__getitem__, insiders):
for inst in blk.body:
if isinstance(inst, ir.Assign):
if isinstance(inst.value, ir.Yield):
_logger.debug("has yield")
return False
_logger.debug("no yield")
return True
_logger.info('finding looplift candidates')
# the check for cfg.entry_point in the loop.entries is to prevent a bad
# rewrite where a prelude for a lifted loop would get written into block -1
# if a loop entry were in block 0
candidates = []
for loop in find_top_level_loops(cfg):
_logger.debug("top-level loop: %s", loop)
if (same_exit_point(loop) and one_entry(loop) and cannot_yield(loop) and
cfg.entry_point() not in loop.entries):
candidates.append(loop)
_logger.debug("add candidate: %s", loop)
return candidates
def find_region_inout_vars(blocks, livemap, callfrom, returnto, body_block_ids):
"""Find input and output variables to a block region.
"""
inputs = livemap[callfrom]
outputs = livemap[returnto]
# ensure live variables are actually used in the blocks, else remove,
# saves having to create something valid to run through postproc
# to achieve similar
loopblocks = {}
for k in body_block_ids:
loopblocks[k] = blocks[k]
used_vars = set()
def_vars = set()
defs = compute_use_defs(loopblocks)
for vs in defs.usemap.values():
used_vars |= vs
for vs in defs.defmap.values():
def_vars |= vs
used_or_defined = used_vars | def_vars
# note: sorted for stable ordering
inputs = sorted(set(inputs) & used_or_defined)
outputs = sorted(set(outputs) & used_or_defined & def_vars)
return inputs, outputs
_loop_lift_info = namedtuple('loop_lift_info',
'loop,inputs,outputs,callfrom,returnto')
def _loop_lift_get_candidate_infos(cfg, blocks, livemap):
"""
Returns information on looplifting candidates.
"""
loops = _extract_loop_lifting_candidates(cfg, blocks)
loopinfos = []
for loop in loops:
[callfrom] = loop.entries # requirement checked earlier
an_exit = next(iter(loop.exits)) # anyone of the exit block
if len(loop.exits) > 1:
# Pre-Py3.8 may have multiple exits
[(returnto, _)] = cfg.successors(an_exit) # requirement checked earlier
else:
# Post-Py3.8 DO NOT have multiple exits
returnto = an_exit
local_block_ids = set(loop.body) | set(loop.entries)
inputs, outputs = find_region_inout_vars(
blocks=blocks,
livemap=livemap,
callfrom=callfrom,
returnto=returnto,
body_block_ids=local_block_ids,
)
lli = _loop_lift_info(loop=loop, inputs=inputs, outputs=outputs,
callfrom=callfrom, returnto=returnto)
loopinfos.append(lli)
return loopinfos
def _loop_lift_modify_call_block(liftedloop, block, inputs, outputs, returnto):
"""
Transform calling block from top-level function to call the lifted loop.
"""
scope = block.scope
loc = block.loc
blk = ir.Block(scope=scope, loc=loc)
ir_utils.fill_block_with_call(
newblock=blk,
callee=liftedloop,
label_next=returnto,
inputs=inputs,
outputs=outputs,
)
return blk
def _loop_lift_prepare_loop_func(loopinfo, blocks):
"""
Inplace transform loop blocks for use as lifted loop.
"""
entry_block = blocks[loopinfo.callfrom]
scope = entry_block.scope
loc = entry_block.loc
# Lowering assumes the first block to be the one with the smallest offset
firstblk = min(blocks) - 1
blocks[firstblk] = ir_utils.fill_callee_prologue(
block=ir.Block(scope=scope, loc=loc),
inputs=loopinfo.inputs,
label_next=loopinfo.callfrom,
)
blocks[loopinfo.returnto] = ir_utils.fill_callee_epilogue(
block=ir.Block(scope=scope, loc=loc),
outputs=loopinfo.outputs,
)
def _loop_lift_modify_blocks(func_ir, loopinfo, blocks,
typingctx, targetctx, flags, locals):
"""
Modify the block inplace to call to the lifted-loop.
Returns a dictionary of blocks of the lifted-loop.
"""
from numba.dispatcher import LiftedLoop
# Copy loop blocks
loop = loopinfo.loop
loopblockkeys = set(loop.body) | set(loop.entries)
if len(loop.exits) > 1:
# Pre-Py3.8 may have multiple exits
loopblockkeys |= loop.exits
loopblocks = dict((k, blocks[k].copy()) for k in loopblockkeys)
# Modify the loop blocks
_loop_lift_prepare_loop_func(loopinfo, loopblocks)
# Create a new IR for the lifted loop
lifted_ir = func_ir.derive(blocks=loopblocks,
arg_names=tuple(loopinfo.inputs),
arg_count=len(loopinfo.inputs),
force_non_generator=True)
liftedloop = LiftedLoop(lifted_ir,
typingctx, targetctx, flags, locals)
# modify for calling into liftedloop
callblock = _loop_lift_modify_call_block(liftedloop, blocks[loopinfo.callfrom],
loopinfo.inputs, loopinfo.outputs,
loopinfo.returnto)
# remove blocks
for k in loopblockkeys:
del blocks[k]
# update main interpreter callsite into the liftedloop
blocks[loopinfo.callfrom] = callblock
return liftedloop
def loop_lifting(func_ir, typingctx, targetctx, flags, locals):
"""
Loop lifting transformation.
Given a interpreter `func_ir` returns a 2 tuple of
`(toplevel_interp, [loop0_interp, loop1_interp, ....])`
"""
blocks = func_ir.blocks.copy()
cfg = compute_cfg_from_blocks(blocks)
loopinfos = _loop_lift_get_candidate_infos(cfg, blocks,
func_ir.variable_lifetime.livemap)
loops = []
if loopinfos:
_logger.debug('loop lifting this IR with %d candidates:\n%s',
len(loopinfos), func_ir.dump_to_string())
for loopinfo in loopinfos:
lifted = _loop_lift_modify_blocks(func_ir, loopinfo, blocks,
typingctx, targetctx, flags, locals)
loops.append(lifted)
# Make main IR
main = func_ir.derive(blocks=blocks)
return main, loops
def canonicalize_cfg(blocks):
"""
Rewrite the given blocks to canonicalize the CFG.
Returns a new dictionary of blocks.
"""
return canonicalize_cfg_single_backedge(blocks)
def with_lifting(func_ir, typingctx, targetctx, flags, locals):
"""With-lifting transformation
Rewrite the IR to extract all withs.
Only the top-level withs are extracted.
Returns the (the_new_ir, the_lifted_with_ir)
"""
from numba import postproc
postproc.PostProcessor(func_ir).run() # ensure we have variable lifetime
assert func_ir.variable_lifetime
vlt = func_ir.variable_lifetime
blocks = func_ir.blocks.copy()
# find where with-contexts regions are
withs = find_setupwiths(blocks)
cfg = vlt.cfg
_legalize_withs_cfg(withs, cfg, blocks)
# For each with-regions, mutate them according to
# the kind of contextmanager
sub_irs = []
for (blk_start, blk_end) in withs:
body_blocks = []
for node in _cfg_nodes_in_region(cfg, blk_start, blk_end):
body_blocks.append(node)
_legalize_with_head(blocks[blk_start])
# Find the contextmanager
cmkind, extra = _get_with_contextmanager(func_ir, blocks, blk_start)
# Mutate the body and get new IR
sub = cmkind.mutate_with_body(func_ir, blocks, blk_start, blk_end,
body_blocks, dispatcher_factory,
extra)
sub_irs.append(sub)
if not sub_irs:
# Unchanged
new_ir = func_ir
else:
new_ir = func_ir.derive(blocks)
return new_ir, sub_irs
def _get_with_contextmanager(func_ir, blocks, blk_start):
"""Get the global object used for the context manager
"""
_illegal_cm_msg = "Illegal use of context-manager."
def get_var_dfn(var):
"""Get the definition given a variable"""
return func_ir.get_definition(var)
def get_ctxmgr_obj(var_ref):
"""Return the context-manager object and extra info.
The extra contains the arguments if the context-manager is used
as a call.
"""
# If the contextmanager used as a Call
dfn = func_ir.get_definition(var_ref)
if isinstance(dfn, ir.Expr) and dfn.op == 'call':
args = [get_var_dfn(x) for x in dfn.args]
kws = {k: get_var_dfn(v) for k, v in dfn.kws}
extra = {'args': args, 'kwargs': kws}
var_ref = dfn.func
else:
extra = None
ctxobj = ir_utils.guard(ir_utils.find_global_value, func_ir, var_ref)
# check the contextmanager object
if ctxobj is ir.UNDEFINED:
raise errors.CompilerError(
"Undefined variable used as context manager",
loc=blocks[blk_start].loc,
)
if ctxobj is None:
raise errors.CompilerError(_illegal_cm_msg, loc=dfn.loc)
return ctxobj, extra
# Scan the start of the with-region for the contextmanager
for stmt in blocks[blk_start].body:
if isinstance(stmt, ir.EnterWith):
var_ref = stmt.contextmanager
ctxobj, extra = get_ctxmgr_obj(var_ref)
if not hasattr(ctxobj, 'mutate_with_body'):
raise errors.CompilerError(
"Unsupported context manager in use",
loc=blocks[blk_start].loc,
)
return ctxobj, extra
# No contextmanager found?
raise errors.CompilerError(
"malformed with-context usage",
loc=blocks[blk_start].loc,
)
def _legalize_with_head(blk):
"""Given *blk*, the head block of the with-context, check that it doesn't
do anything else.
"""
counters = defaultdict(int)
for stmt in blk.body:
counters[type(stmt)] += 1
if counters.pop(ir.EnterWith) != 1:
raise errors.CompilerError(
"with's head-block must have exactly 1 ENTER_WITH",
loc=blk.loc,
)
if counters.pop(ir.Jump) != 1:
raise errors.CompilerError(
"with's head-block must have exactly 1 JUMP",
loc=blk.loc,
)
# Can have any number of del
counters.pop(ir.Del, None)
# There MUST NOT be any other statements
if counters:
raise errors.CompilerError(
"illegal statements in with's head-block",
loc=blk.loc,
)
def _cfg_nodes_in_region(cfg, region_begin, region_end):
"""Find the set of CFG nodes that are in the given region
"""
region_nodes = set()
stack = [region_begin]
while stack:
tos = stack.pop()
succs, _ = zip(*cfg.successors(tos))
nodes = set([node for node in succs
if node not in region_nodes and
node != region_end])
stack.extend(nodes)
region_nodes |= nodes
return region_nodes
def _legalize_withs_cfg(withs, cfg, blocks):
"""Verify the CFG of the with-context(s).
"""
doms = cfg.dominators()
postdoms = cfg.post_dominators()
# Verify that the with-context has no side-exits
for s, e in withs:
loc = blocks[s].loc
if s not in doms[e]:
# Not sure what condition can trigger this error.
msg = "Entry of with-context not dominating the exit."
raise errors.CompilerError(msg, loc=loc)
if e not in postdoms[s]:
msg = (
"Does not support with-context that contain branches "
"(i.e. break/return/raise) that can leave the with-context. "
"Details: exit of with-context not post-dominating the entry. "
)
raise errors.CompilerError(msg, loc=loc)
| 33.579545 | 84 | 0.60846 |
7c8a815c2ee01b343fc690c138951a4c479fece7 | 6,453 | py | Python | tests/test_masked_inference_wsi_dataset.py | HabibMrad/MONAI | 1314701c15623422574b0153d746666dc6004454 | [
"Apache-2.0"
] | 1 | 2022-01-04T21:38:23.000Z | 2022-01-04T21:38:23.000Z | tests/test_masked_inference_wsi_dataset.py | HabibMrad/MONAI | 1314701c15623422574b0153d746666dc6004454 | [
"Apache-2.0"
] | null | null | null | tests/test_masked_inference_wsi_dataset.py | HabibMrad/MONAI | 1314701c15623422574b0153d746666dc6004454 | [
"Apache-2.0"
] | null | null | null | import os
import unittest
from unittest import skipUnless
import numpy as np
from numpy.testing import assert_array_equal
from parameterized import parameterized
from monai.apps.pathology.datasets import MaskedInferenceWSIDataset
from monai.apps.utils import download_url
from monai.utils import optional_import
from tests.utils import skip_if_quick
_, has_cim = optional_import("cucim")
_, has_osl = optional_import("openslide")
FILE_URL = "http://openslide.cs.cmu.edu/download/openslide-testdata/Generic-TIFF/CMU-1.tiff"
FILE_PATH = os.path.join(os.path.dirname(__file__), "testing_data", os.path.basename(FILE_URL))
MASK1 = os.path.join(os.path.dirname(__file__), "testing_data", "tissue_mask1.npy")
MASK2 = os.path.join(os.path.dirname(__file__), "testing_data", "tissue_mask2.npy")
MASK4 = os.path.join(os.path.dirname(__file__), "testing_data", "tissue_mask4.npy")
HEIGHT = 32914
WIDTH = 46000
TEST_CASE_0 = [
{
"data": [
{"image": FILE_PATH, "mask": MASK1},
],
"patch_size": 1,
"image_reader_name": "cuCIM",
},
[
{
"image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8),
"name": "CMU-1",
"mask_location": [100, 100],
},
],
]
TEST_CASE_1 = [
{
"data": [{"image": FILE_PATH, "mask": MASK2}],
"patch_size": 1,
"image_reader_name": "cuCIM",
},
[
{
"image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8),
"name": "CMU-1",
"mask_location": [100, 100],
},
{
"image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8),
"name": "CMU-1",
"mask_location": [101, 100],
},
],
]
TEST_CASE_2 = [
{
"data": [{"image": FILE_PATH, "mask": MASK4}],
"patch_size": 1,
"image_reader_name": "cuCIM",
},
[
{
"image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8),
"name": "CMU-1",
"mask_location": [100, 100],
},
{
"image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8),
"name": "CMU-1",
"mask_location": [100, 101],
},
{
"image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8),
"name": "CMU-1",
"mask_location": [101, 100],
},
{
"image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8),
"name": "CMU-1",
"mask_location": [101, 101],
},
],
]
TEST_CASE_3 = [
{
"data": [
{"image": FILE_PATH, "mask": MASK1},
],
"patch_size": 2,
"image_reader_name": "cuCIM",
},
[
{
"image": np.array(
[
[[243, 243], [243, 243]],
[[243, 243], [243, 243]],
[[243, 243], [243, 243]],
],
dtype=np.uint8,
),
"name": "CMU-1",
"mask_location": [100, 100],
},
],
]
TEST_CASE_4 = [
{
"data": [
{"image": FILE_PATH, "mask": MASK1},
{"image": FILE_PATH, "mask": MASK2},
],
"patch_size": 1,
"image_reader_name": "cuCIM",
},
[
{
"image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8),
"name": "CMU-1",
"mask_location": [100, 100],
},
{
"image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8),
"name": "CMU-1",
"mask_location": [100, 100],
},
{
"image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8),
"name": "CMU-1",
"mask_location": [101, 100],
},
],
]
TEST_CASE_OPENSLIDE_0 = [
{
"data": [
{"image": FILE_PATH, "mask": MASK1},
],
"patch_size": 1,
"image_reader_name": "OpenSlide",
},
[
{
"image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8),
"name": "CMU-1",
"mask_location": [100, 100],
},
],
]
TEST_CASE_OPENSLIDE_1 = [
{
"data": [{"image": FILE_PATH, "mask": MASK2}],
"patch_size": 1,
"image_reader_name": "OpenSlide",
},
[
{
"image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8),
"name": "CMU-1",
"mask_location": [100, 100],
},
{
"image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8),
"name": "CMU-1",
"mask_location": [101, 100],
},
],
]
if __name__ == "__main__":
unittest.main()
| 27 | 95 | 0.501937 |
7c8d38953001878c9a523157e3f09b0df0983623 | 913 | py | Python | manga_py/providers/doujins_com.py | paulolimac/manga-py | 3d180846750a4e770b5024eb8cd15629362875b1 | [
"MIT"
] | 1 | 2020-11-19T00:40:49.000Z | 2020-11-19T00:40:49.000Z | manga_py/providers/doujins_com.py | paulolimac/manga-py | 3d180846750a4e770b5024eb8cd15629362875b1 | [
"MIT"
] | null | null | null | manga_py/providers/doujins_com.py | paulolimac/manga-py | 3d180846750a4e770b5024eb8cd15629362875b1 | [
"MIT"
] | null | null | null | from manga_py.provider import Provider
from .helpers.std import Std
main = DoujinsCom
| 23.410256 | 73 | 0.634173 |
7c8e9965cc893f149c68d0938c7cdd288fb5e3a7 | 980 | py | Python | src/urh/ui/delegates/CheckBoxDelegate.py | awesome-archive/urh | c8c3aabc9d637ca660d8c72c3d8372055e0f3ec7 | [
"Apache-2.0"
] | 1 | 2017-06-21T02:37:16.000Z | 2017-06-21T02:37:16.000Z | src/urh/ui/delegates/CheckBoxDelegate.py | dspmandavid/urh | 30643c1a68634b1c97eb9989485a4e96a3b038ae | [
"Apache-2.0"
] | null | null | null | src/urh/ui/delegates/CheckBoxDelegate.py | dspmandavid/urh | 30643c1a68634b1c97eb9989485a4e96a3b038ae | [
"Apache-2.0"
] | null | null | null | from PyQt5.QtCore import QModelIndex, QAbstractItemModel, Qt, pyqtSlot
from PyQt5.QtWidgets import QItemDelegate, QWidget, QStyleOptionViewItem, QCheckBox
| 37.692308 | 94 | 0.715306 |
7c8eb61b685c469f781463c9f7be05e90e8308c7 | 1,408 | py | Python | neural_network/backup_casestudy/denbigh/tf_RNN.py | acceleratedmaterials/AMDworkshop_demo | e7c2b931e023fc00ff7494b8acb2181f5c75bc4e | [
"MIT"
] | 5 | 2019-04-02T03:20:43.000Z | 2021-07-13T18:23:26.000Z | neural_network/backup_casestudy/denbigh/tf_RNN.py | NUS-SSE/AMDworkshop_demo | edbd6c60957dd0d83c3ef43c7e9e28ef1fef3bd9 | [
"MIT"
] | null | null | null | neural_network/backup_casestudy/denbigh/tf_RNN.py | NUS-SSE/AMDworkshop_demo | edbd6c60957dd0d83c3ef43c7e9e28ef1fef3bd9 | [
"MIT"
] | 5 | 2019-05-12T17:41:58.000Z | 2021-06-08T04:38:35.000Z | # -*- coding: utf-8 -*-
'''
Framework: Tensorflow
Training samples: 1600
Validation samples: 400
RNN with 128 units
Optimizer: Adam
Epoch: 100
Loss: Cross Entropy
Activation function: Relu for network and Soft-max for regression
Regularization: Drop-out, keep_prob = 0.8
Accuracy of Validation set: 95%
'''
from __future__ import division, print_function, absolute_import
import tflearn
from tflearn.data_utils import to_categorical, pad_sequences
from data_denbigh import *
X, Y = getDenbighData()
#Hyperparams
neurons_num = 128 # Number of neurons in the RNN layer
keep_prob = 0.5 # Keep probability for the drop-out regularization
learning_rate = 0.001 # Learning rate for mini-batch SGD
batch_size = 32 # Batch size
n_epoch = 100 # Number of epoch
#Data preprocessing/ Converting data to vector for the
X = pad_sequences(X, maxlen=5, value=0.)
Y = to_categorical(Y, 2)
#Build the network
net = tflearn.input_data([None, 5])
net = tflearn.embedding(net, input_dim=10000, output_dim=128)
net = tflearn.simple_rnn(net, neurons_num, dropout=keep_prob)
net = tflearn.fully_connected(net, 2, activation='softmax')
net = tflearn.regression(net, optimizer='adam', learning_rate=learning_rate,
loss='categorical_crossentropy')
model = tflearn.DNN(net, tensorboard_verbose=0)
model.fit(X, Y, validation_set=0.2, show_metric=True,
batch_size=batch_size, n_epoch=n_epoch)
model.save('./model.tfl') | 37.052632 | 76 | 0.769176 |
7c8eba30a07960e7e0f748300f8823eed9acd88c | 5,569 | py | Python | code/tests/test_tile_tf.py | Nocty-chan/cs224n-squad | 0c0b342621e038aba8e20ff411da13dfa173351d | [
"Apache-2.0"
] | 2 | 2018-04-15T06:13:41.000Z | 2019-07-25T20:22:34.000Z | code/tests/test_tile_tf.py | Nocty-chan/cs224n-squad | 0c0b342621e038aba8e20ff411da13dfa173351d | [
"Apache-2.0"
] | 1 | 2020-11-10T04:51:36.000Z | 2020-11-10T04:51:36.000Z | code/tests/test_tile_tf.py | Nocty-chan/cs224n-squad | 0c0b342621e038aba8e20ff411da13dfa173351d | [
"Apache-2.0"
] | 3 | 2018-08-08T08:48:04.000Z | 2020-02-10T09:52:41.000Z | import numpy as np
import tensorflow as tf
H = 2
N = 2
M = 3
BS = 10
def masked_softmax(logits, mask, dim):
"""
Takes masked softmax over given dimension of logits.
Inputs:
logits: Numpy array. We want to take softmax over dimension dim.
mask: Numpy array of same shape as logits.
Has 1s where there's real data in logits, 0 where there's padding
dim: int. dimension over which to take softmax
Returns:
masked_logits: Numpy array same shape as logits.
This is the same as logits, but with 1e30 subtracted
(i.e. very large negative number) in the padding locations.
prob_dist: Numpy array same shape as logits.
The result of taking softmax over masked_logits in given dimension.
Should be 0 in padding locations.
Should sum to 1 over given dimension.
"""
exp_mask = (1 - tf.cast(mask, 'float64')) * (-1e30) # -large where there's padding, 0 elsewhere
print (exp_mask)
masked_logits = tf.add(logits, exp_mask) # where there's padding, set logits to -large
prob_dist = tf.nn.softmax(masked_logits, dim)
return masked_logits, prob_dist
if __name__== "__main__":
w_1 = np.array([1., 2., 3., 4.])
w_2 = np.array([5., 6., 7., 8.])
w_3 = np.array([13., 12., 11., 10.])
c = np.array([[[1., 2., 3., 4.], [5., 6., 7., 8.]]]) # BS x N x 2H
q = np.array([[[1., 2., 3., 0.], [5., 6., 7., 4.], [8., 9. , 10., 11.]]]) # BS x M x 2H
c = np.tile(c, [BS, 1, 1])
q = np.tile(q, [BS, 1, 1])
questions = tf.get_variable('questions', initializer=q)
contexts = tf.get_variable('contexts', initializer=c)
S = test_build_similarity(contexts, questions)
mask = test_build_sim_mask()
c2q = test_build_c2q(S, mask, questions)
m, beta, q2c = test_build_q2c(S, mask, contexts)
output = test_concatenation(c2q, q2c)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
S_result, mask_result, c2q_r = sess.run([S, mask, c2q])
actual_result = np.tile(np.array([[228, 772, 1372], [548, 1828, 3140]]), [BS, 1, 1])
assert np.array_equal(actual_result, S_result), 'Arrays are not equal'
print ("Building similarity matrix is successful!")
print ("Context 2 Question attention")
m_r, beta_r, q2c_r = sess.run([m, beta, q2c])
output_r = sess.run(output)
| 41.87218 | 99 | 0.625606 |
7c8edd5a1cedfd0895ce2bb9c6148ce0241c7af7 | 7,174 | py | Python | specutils/tests/test_smoothing.py | hamogu/specutils | b873f2ac9b3c207c9e670246d102f46a9606d6ed | [
"BSD-3-Clause"
] | null | null | null | specutils/tests/test_smoothing.py | hamogu/specutils | b873f2ac9b3c207c9e670246d102f46a9606d6ed | [
"BSD-3-Clause"
] | null | null | null | specutils/tests/test_smoothing.py | hamogu/specutils | b873f2ac9b3c207c9e670246d102f46a9606d6ed | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
import pytest
from astropy import convolution
from scipy.signal import medfilt
import astropy.units as u
from ..spectra.spectrum1d import Spectrum1D
from ..tests.spectral_examples import simulated_spectra
from ..manipulation.smoothing import (convolution_smooth, box_smooth,
gaussian_smooth, trapezoid_smooth,
median_smooth)
def compare_flux(flux_smooth1, flux_smooth2, flux_original, rtol=0.01):
"""
There are two things to compare for each set of smoothing:
1. Compare the smoothed flux from the astropy machinery vs
the smoothed flux from specutils. This is done by
comparing flux_smooth1 and flux_smooth2.
2. Next we want to compare the smoothed flux to the original
flux. This is a little more difficult as smoothing will
make a difference for median filter, but less so for
convolution based smoothing if the kernel is normalized
(area under the kernel = 1).
In this second case the rtol (relative tolerance) is used
judiciously.
"""
# Compare, element by element, the two smoothed fluxes.
assert np.allclose(flux_smooth1, flux_smooth2)
# Compare the total spectral flux of the smoothed to the original.
assert np.allclose(sum(flux_smooth1), sum(flux_original), rtol=rtol)
def test_smooth_custom_kernel(simulated_spectra):
"""
Test CustomKernel smoothing with correct parmaeters.
"""
# Create the original spectrum
spec1 = simulated_spectra.s1_um_mJy_e1
flux_original = spec1.flux
# Create a custom kernel (some weird asymmetric-ness)
numpy_kernel = np.array([0.5, 1, 2, 0.5, 0.2])
numpy_kernel = numpy_kernel / np.sum(numpy_kernel)
custom_kernel = convolution.CustomKernel(numpy_kernel)
flux_smoothed_astropy = convolution.convolve(flux_original, custom_kernel)
# Calculate the custom smoothed
spec1_smoothed = convolution_smooth(spec1, custom_kernel)
compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value)
| 32.609091 | 98 | 0.730694 |
7c909452f19de7c50d60c569038b33d1b55f15c0 | 909 | py | Python | modules/interpolator.py | buulikduong/1d_sgl_solver | 03ce0b362d45acbbd3bb35e7b604ba97982eea92 | [
"BSD-2-Clause"
] | null | null | null | modules/interpolator.py | buulikduong/1d_sgl_solver | 03ce0b362d45acbbd3bb35e7b604ba97982eea92 | [
"BSD-2-Clause"
] | null | null | null | modules/interpolator.py | buulikduong/1d_sgl_solver | 03ce0b362d45acbbd3bb35e7b604ba97982eea92 | [
"BSD-2-Clause"
] | 2 | 2020-09-01T13:02:49.000Z | 2021-08-15T09:10:17.000Z | """Module interpolating mathematical functions out of support points"""
from scipy.interpolate import interp1d, lagrange, CubicSpline
def interpolator(x_sup, y_sup, method):
"""Interpolates a mathematical function from a given set of
points using either linear, polynomial or cubic spline for the
interpolation.
Args:
x_sup (list): x-coordinates of the function
y_sup (list): y-coordinates of the function
method (string): name of the interpolation method to be used
Returns:
intfunc: interpolated function
"""
if method == "linear":
intfunc = interp1d(x_sup, y_sup, kind="linear")
return intfunc
elif method == "polynomial":
intfunc = lagrange(x_sup, y_sup)
return intfunc
elif method == "cspline":
intfunc = CubicSpline(x_sup, y_sup, bc_type="natural")
return intfunc
return None
| 29.322581 | 71 | 0.672167 |
7c9109fd0312f441ea7db6be13582d7563d361c0 | 196 | py | Python | frappe/patches/v13_0/remove_web_view.py | chentaoz/frappe | ee3c4943bf6177ad3b410cdb0d802af486751a65 | [
"MIT"
] | 3,755 | 2015-01-06T07:47:43.000Z | 2022-03-31T20:54:23.000Z | frappe/patches/v13_0/remove_web_view.py | chentaoz/frappe | ee3c4943bf6177ad3b410cdb0d802af486751a65 | [
"MIT"
] | 7,369 | 2015-01-01T19:59:41.000Z | 2022-03-31T23:02:05.000Z | frappe/patches/v13_0/remove_web_view.py | chentaoz/frappe | ee3c4943bf6177ad3b410cdb0d802af486751a65 | [
"MIT"
] | 2,685 | 2015-01-07T17:51:03.000Z | 2022-03-31T23:16:24.000Z | import frappe | 32.666667 | 61 | 0.77551 |
7c9227a3cbdbdfda32f8e1f7af19e23d5f84fca1 | 946 | py | Python | games.py | cpratim/DSA-Research-Paper | ebb856ef62f8a04aa72380e39afdde958eed529a | [
"MIT"
] | null | null | null | games.py | cpratim/DSA-Research-Paper | ebb856ef62f8a04aa72380e39afdde958eed529a | [
"MIT"
] | null | null | null | games.py | cpratim/DSA-Research-Paper | ebb856ef62f8a04aa72380e39afdde958eed529a | [
"MIT"
] | null | null | null | import json
import matplotlib.pyplot as plt
from pprint import pprint
import numpy as np
from scipy.stats import linregress
from util.stats import *
with open('data/game_stats.json', 'r') as f:
df = json.load(f)
X, y = [], []
for match, stats in df.items():
home, away = stats['home'], stats['away']
if home['mp'] != away['mp'] != '240': continue
try:
ft_dif = float(home['fta']) - float(away['fta'])
pt_dif = float(home['pts']) - float(away['pts'])
if abs(pt_dif) > 10: continue
except:
continue
X.append(ft_dif)
y.append(pt_dif)
c = 0
for f, p in zip(X, y):
if f * p > 0:
c += 1
print(c / len(X))
slope, intercept, r, p, std = linregress(X, y)
f = lambda x: x*slope + intercept
fit_y = [f(min(X)), f(max(X))]
plt.xlabel('Free Throw Attempts')
plt.ylabel('Point Differential')
plt.title('FTA vs Point Differential')
print(correlation(X, y))
plt.plot([min(X), max(X)], fit_y, color = 'red')
plt.scatter(X, y)
plt.show() | 22 | 51 | 0.64482 |
7c924b0af1eb750ce0d3f38bab21b79619b4ba48 | 6,255 | py | Python | src/generate_data.py | gycggd/leaf-classification | b37dd4a6a262562c454038218c1472329e54128b | [
"MIT"
] | null | null | null | src/generate_data.py | gycggd/leaf-classification | b37dd4a6a262562c454038218c1472329e54128b | [
"MIT"
] | null | null | null | src/generate_data.py | gycggd/leaf-classification | b37dd4a6a262562c454038218c1472329e54128b | [
"MIT"
] | null | null | null | import os
import numpy as np
import pandas as pd
import tensorflow as tf
from keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing.image import img_to_array, load_img
from keras.utils.np_utils import to_categorical
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.preprocessing import LabelEncoder, StandardScaler
print('Loading train data ...')
(ID_train, X_num_tr, X_img_tr, y_tr), (ID_val, X_num_val, X_img_val, y_val) = load_train_data()
# Prepare ID-to-label and ID-to-numerical dictionary
ID_y_dic, ID_num_dic = {}, {}
for i in range(len(ID_train)):
ID_y_dic[ID_train[i]] = y_tr[i]
ID_num_dic[ID_train[i]] = X_num_tr[i, :]
print('Loading test data ...')
ID_test, X_num_test, X_img_test = load_test_data()
# Convert label to categorical/one-hot
ID_train, y_tr, y_val = to_categorical(ID_train), to_categorical(y_tr), to_categorical((y_val))
write_val_data()
| 36.794118 | 116 | 0.672422 |
7c9293b09122efb5181f7494471359a909feb339 | 201 | py | Python | 2650-construindo-muralhas.py | ErickSimoes/URI-Online-Judge | 7e6f141db2647b1d0d69951b064bd95b0ce4ba1a | [
"MIT"
] | null | null | null | 2650-construindo-muralhas.py | ErickSimoes/URI-Online-Judge | 7e6f141db2647b1d0d69951b064bd95b0ce4ba1a | [
"MIT"
] | null | null | null | 2650-construindo-muralhas.py | ErickSimoes/URI-Online-Judge | 7e6f141db2647b1d0d69951b064bd95b0ce4ba1a | [
"MIT"
] | 1 | 2019-10-29T16:51:29.000Z | 2019-10-29T16:51:29.000Z | # -*- coding: utf-8 -*-
n, w = map(int, input().split())
for _ in range(n):
entrada = input()
last_space = entrada.rfind(' ')
if int(entrada[last_space:]) > w:
print(entrada[:last_space])
| 20.1 | 36 | 0.59204 |
7c938029fd9d5d4852f7e0ef36d2f9a92b855733 | 2,962 | py | Python | tests/assemblers/test_ensemble.py | yarix/m2cgen | f1aa01e4c70a6d1a8893e27bfbe3c36fcb1e8546 | [
"MIT"
] | 1 | 2021-05-28T06:59:21.000Z | 2021-05-28T06:59:21.000Z | tests/assemblers/test_ensemble.py | yarix/m2cgen | f1aa01e4c70a6d1a8893e27bfbe3c36fcb1e8546 | [
"MIT"
] | null | null | null | tests/assemblers/test_ensemble.py | yarix/m2cgen | f1aa01e4c70a6d1a8893e27bfbe3c36fcb1e8546 | [
"MIT"
] | null | null | null | from sklearn import ensemble
from m2cgen import assemblers, ast
from tests import utils
| 29.326733 | 79 | 0.502701 |
7c93f115e357ee6abe4ee6a425a0e90b87246382 | 1,834 | py | Python | setup.py | Parquery/pynumenc | f14abab40b7d08c55824bf1da5b2a7026c0a7282 | [
"MIT"
] | 1 | 2018-11-09T16:16:08.000Z | 2018-11-09T16:16:08.000Z | setup.py | Parquery/numenc-py | f14abab40b7d08c55824bf1da5b2a7026c0a7282 | [
"MIT"
] | 2 | 2018-11-09T12:51:40.000Z | 2018-11-09T12:53:55.000Z | setup.py | Parquery/pynumenc | f14abab40b7d08c55824bf1da5b2a7026c0a7282 | [
"MIT"
] | 2 | 2019-02-26T12:40:11.000Z | 2019-06-17T07:42:35.000Z | """A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
import os
from setuptools import setup, find_packages, Extension
import pynumenc_meta
# pylint: disable=redefined-builtin
here = os.path.abspath(os.path.dirname(__file__)) # pylint: disable=invalid-name
with open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read() # pylint: disable=invalid-name
setup(
name=pynumenc_meta.__title__,
version=pynumenc_meta.__version__,
description=pynumenc_meta.__description__,
long_description=long_description,
url=pynumenc_meta.__url__,
author=pynumenc_meta.__author__,
author_email=pynumenc_meta.__author_email__,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6'
],
license='License :: OSI Approved :: MIT License',
keywords='C++ encode decode bytes encoding decoding sorted',
packages=find_packages(exclude=['docs', 'tests']),
install_requires=[],
extras_require={
'dev': [
# yapf: disable,
'docutils>=0.14,<1',
'mypy==0.641',
'hypothesis==3.82.1',
'pygments>=2.2.0,<3',
'pydocstyle>=3.0.0,<4',
'pylint==2.1.1',
'yapf==0.24.0'
# yapf: enable
]
},
ext_modules=[
Extension('numenc', sources=['numenc-cpp/encoder_decoder.cpp'])
],
scripts=['bin/pynumenc'],
py_modules=['pynumenc_meta'],
package_data={'pynumenc': ['py.typed']},
data_files=[('.', ['LICENSE.txt', 'README.rst'])])
| 31.084746 | 81 | 0.630316 |
7c95786ebe742f8164fbbe85994a95220ade7338 | 3,074 | py | Python | Models/License-Plate-Recognition-Nigerian-vehicles-master/License-Plate-Recognition-Nigerian-vehicles-master/ocr.py | nipunjain099/AutoGuard | 8217cd03af7927590ef3a160ecb7d9bc9f50d101 | [
"MIT"
] | 147 | 2018-12-23T09:44:36.000Z | 2022-03-03T15:38:33.000Z | Models/License-Plate-Recognition-Nigerian-vehicles-master/License-Plate-Recognition-Nigerian-vehicles-master/ocr.py | nipunjain099/AutoGuard | 8217cd03af7927590ef3a160ecb7d9bc9f50d101 | [
"MIT"
] | 17 | 2018-12-25T16:04:34.000Z | 2022-01-13T00:44:21.000Z | Models/License-Plate-Recognition-Nigerian-vehicles-master/License-Plate-Recognition-Nigerian-vehicles-master/ocr.py | nipunjain099/AutoGuard | 8217cd03af7927590ef3a160ecb7d9bc9f50d101 | [
"MIT"
] | 77 | 2018-12-19T03:03:14.000Z | 2022-03-13T17:00:38.000Z | import numpy as np
from skimage.transform import resize
from skimage import measure
from skimage.measure import regionprops | 43.914286 | 155 | 0.59987 |
7c9666a6d0704c6c5a1d15ed10e9ce79d7670676 | 3,215 | py | Python | project/server/models.py | mvlima/flask-jwt-auth | 6cb210b50888b1e9a41ea9e63a80eafcbe436560 | [
"MIT"
] | null | null | null | project/server/models.py | mvlima/flask-jwt-auth | 6cb210b50888b1e9a41ea9e63a80eafcbe436560 | [
"MIT"
] | null | null | null | project/server/models.py | mvlima/flask-jwt-auth | 6cb210b50888b1e9a41ea9e63a80eafcbe436560 | [
"MIT"
] | null | null | null | # project/server/models.py
import jwt
import datetime
from project.server import app, db, bcrypt
| 32.806122 | 90 | 0.612753 |
7c974ea9b476fd86b7ac61a4ae4dbd0512a02f64 | 1,711 | py | Python | letsencrypt/setup.py | ccppuu/certbot | 9fead41aaf93dde0d36d4aef6fded8dd306c1ddc | [
"Apache-2.0"
] | 1 | 2017-12-20T20:06:11.000Z | 2017-12-20T20:06:11.000Z | letsencrypt/setup.py | cpu/certbot | 9fead41aaf93dde0d36d4aef6fded8dd306c1ddc | [
"Apache-2.0"
] | null | null | null | letsencrypt/setup.py | cpu/certbot | 9fead41aaf93dde0d36d4aef6fded8dd306c1ddc | [
"Apache-2.0"
] | null | null | null | import codecs
import os
import sys
from setuptools import setup
from setuptools import find_packages
def read_file(filename, encoding='utf8'):
"""Read unicode from given file."""
with codecs.open(filename, encoding=encoding) as fd:
return fd.read()
here = os.path.abspath(os.path.dirname(__file__))
readme = read_file(os.path.join(here, 'README.rst'))
# This package is a simple shim around certbot
install_requires = ['certbot']
version = '0.7.0.dev0'
setup(
name='letsencrypt',
version=version,
description="ACME client",
long_description=readme,
url='https://github.com/letsencrypt/letsencrypt',
author="Certbot Project",
author_email='[email protected]',
license='Apache License 2.0',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Environment :: Console :: Curses',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Security',
'Topic :: System :: Installation/Setup',
'Topic :: System :: Networking',
'Topic :: System :: Systems Administration',
'Topic :: Utilities',
],
packages=find_packages(),
include_package_data=True,
install_requires=install_requires,
entry_points={
'console_scripts': [
'letsencrypt = certbot.main:main',
],
},
)
| 27.15873 | 61 | 0.630625 |
7c98495a22a6d3d8755497c989624d8a5c427192 | 60,943 | py | Python | elastalert/alerts.py | dekhrekh/elastalert | 0c1ce30302c575bd0be404582cd452f38c01c774 | [
"Apache-2.0"
] | null | null | null | elastalert/alerts.py | dekhrekh/elastalert | 0c1ce30302c575bd0be404582cd452f38c01c774 | [
"Apache-2.0"
] | null | null | null | elastalert/alerts.py | dekhrekh/elastalert | 0c1ce30302c575bd0be404582cd452f38c01c774 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import copy
import datetime
import json
import logging
import subprocess
import sys
import warnings
from email.mime.text import MIMEText
from email.utils import formatdate
from smtplib import SMTP
from smtplib import SMTP_SSL
from smtplib import SMTPAuthenticationError
from smtplib import SMTPException
from socket import error
import boto3
import requests
import stomp
from exotel import Exotel
from jira.client import JIRA
from jira.exceptions import JIRAError
from requests.exceptions import RequestException
from staticconf.loader import yaml_loader
from texttable import Texttable
from twilio.base.exceptions import TwilioRestException
from twilio.rest import Client as TwilioClient
from util import EAException
from util import elastalert_logger
from util import lookup_es_key
from util import pretty_ts
from util import ts_now
from util import ts_to_dt
| 44.289971 | 137 | 0.607814 |
7c9ab847564a9551bd26274412cd272cd155cf72 | 69,601 | py | Python | tests/unit/python/fledge/services/core/scheduler/test_scheduler.py | DDC-NDRS/fledge-iot_fledge | 27a5e66a55daaab1aca14ce6e66f9f1e6efaef51 | [
"Apache-2.0"
] | 69 | 2019-12-03T17:54:33.000Z | 2022-03-13T07:05:23.000Z | tests/unit/python/fledge/services/core/scheduler/test_scheduler.py | DDC-NDRS/fledge-iot_fledge | 27a5e66a55daaab1aca14ce6e66f9f1e6efaef51 | [
"Apache-2.0"
] | 125 | 2020-02-13T15:11:28.000Z | 2022-03-29T14:42:36.000Z | tests/unit/python/fledge/services/core/scheduler/test_scheduler.py | DDC-NDRS/fledge-iot_fledge | 27a5e66a55daaab1aca14ce6e66f9f1e6efaef51 | [
"Apache-2.0"
] | 24 | 2019-12-27T07:48:45.000Z | 2022-03-13T07:05:28.000Z | # -*- coding: utf-8 -*-
# FLEDGE_BEGIN
# See: http://fledge-iot.readthedocs.io/
# FLEDGE_END
import asyncio
import datetime
import uuid
import time
import json
from unittest.mock import MagicMock, call
import sys
import copy
import pytest
from fledge.services.core.scheduler.scheduler import Scheduler, AuditLogger, ConfigurationManager
from fledge.services.core.scheduler.entities import *
from fledge.services.core.scheduler.exceptions import *
from fledge.common.storage_client.storage_client import StorageClientAsync
__author__ = "Amarendra K Sinha"
__copyright__ = "Copyright (c) 2017 OSIsoft, LLC"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
class MockStorage(StorageClientAsync):
class MockStorageAsync(StorageClientAsync):
schedules = [
{
"id": "cea17db8-6ccc-11e7-907b-a6006ad3dba0",
"process_name": "purge",
"schedule_name": "purge",
"schedule_type": 4,
"schedule_interval": "01:00:00",
"schedule_time": "",
"schedule_day": 0,
"exclusive": "t",
"enabled": "t"
},
{
"id": "2176eb68-7303-11e7-8cf7-a6006ad3dba0",
"process_name": "stats collector",
"schedule_name": "stats collection",
"schedule_type": 2,
"schedule_interval": "00:00:15",
"schedule_time": "00:00:15",
"schedule_day": 3,
"exclusive": "f",
"enabled": "t"
},
{
"id": "d1631422-9ec6-11e7-abc4-cec278b6b50a",
"process_name": "backup",
"schedule_name": "backup hourly",
"schedule_type": 3,
"schedule_interval": "01:00:00",
"schedule_time": "",
"schedule_day": 0,
"exclusive": "t",
"enabled": "f"
},
{
"id": "ada12840-68d3-11e7-907b-a6006ad3dba0",
"process_name": "COAP",
"schedule_name": "COAP listener south",
"schedule_type": 1,
"schedule_interval": "00:00:00",
"schedule_time": "",
"schedule_day": 0,
"exclusive": "t",
"enabled": "t"
},
{
"id": "2b614d26-760f-11e7-b5a5-be2e44b06b34",
"process_name": "North Readings to PI",
"schedule_name": "OMF to PI north",
"schedule_type": 3,
"schedule_interval": "00:00:30",
"schedule_time": "",
"schedule_day": 0,
"exclusive": "t",
"enabled": "t"
},
{
"id": "5d7fed92-fb9a-11e7-8c3f-9a214cf093ae",
"process_name": "North Readings to OCS",
"schedule_name": "OMF to OCS north",
"schedule_type": 3,
"schedule_interval": "1 day 00:00:40",
"schedule_time": "",
"schedule_day": 0,
"exclusive": "t",
"enabled": "f"
},
]
scheduled_processes = [
{
"name": "purge",
"script": [
"tasks/purge"
]
},
{
"name": "stats collector",
"script": [
"tasks/statistics"
]
},
{
"name": "backup",
"script": [
"tasks/backup_postgres"
]
},
{
"name": "COAP",
"script": [
"services/south"
]
},
{
"name": "North Readings to PI",
"script": [
"tasks/north",
"--stream_id",
"1",
"--debug_level",
"1"
]
},
{
"name": "North Readings to OCS",
"script": [
"tasks/north",
"--stream_id",
"4",
"--debug_level",
"1"
]
},
]
tasks = [
{
"id": "259b8570-65c1-4b92-8c62-e9642631a600",
"process_name": "North Readings to PI",
"state": 1,
"start_time": "2018-02-06 13:28:14.477868",
"end_time": "2018-02-06 13:28:14.856375",
"exit_code": "0",
"reason": ""
}
]
| 42.621555 | 339 | 0.652936 |
7c9af51ba1243be5af3bd0e724c771174bb964d2 | 1,007 | py | Python | problem_solving/python/algorithms/greedy/marcs_cakewalk.py | kcc3/hackerrank-solutions | f862b44b840bd447d99dc148f6bb5e2f5bfb8a86 | [
"MIT"
] | null | null | null | problem_solving/python/algorithms/greedy/marcs_cakewalk.py | kcc3/hackerrank-solutions | f862b44b840bd447d99dc148f6bb5e2f5bfb8a86 | [
"MIT"
] | null | null | null | problem_solving/python/algorithms/greedy/marcs_cakewalk.py | kcc3/hackerrank-solutions | f862b44b840bd447d99dc148f6bb5e2f5bfb8a86 | [
"MIT"
] | 1 | 2020-06-04T09:23:19.000Z | 2020-06-04T09:23:19.000Z | def marcs_cakewalk(calorie):
"""Hackerrank Problem: https://www.hackerrank.com/challenges/marcs-cakewalk/problem
Marc loves cupcakes, but he also likes to stay fit. Each cupcake has a calorie count, and Marc can walk a distance
to expend those calories. If Marc has eaten j cupcakes so far, after eating a cupcake with c calories he must walk
at least 2**j x c miles to maintain his weight.
Solve:
To calculate the minimum miles, you solve based on the highest calorie to lowest calorie cupcake
Args:
calorie (list): List of integers denoting the calories for each cupcake
Returns:
int: The minimum number of miels Marc must walk to maintain his weight
"""
calories = 0
for i, c in enumerate(sorted(calorie, reverse=True)):
calories += (2 ** i * c)
return calories
if __name__ == "__main__":
assert marcs_cakewalk([5, 10, 7]) == 44
assert marcs_cakewalk([1, 3, 2]) == 11
assert marcs_cakewalk([7, 4, 9, 6]) == 79
| 37.296296 | 118 | 0.683217 |
7c9bc57e7e9891072399e9288ee87401c640bfb4 | 1,583 | py | Python | coronaindiatracker/coronatracker/views.py | ankitgoswami23/CoronaIndiaTracker | b2e116a595b3c69ccefa93b60833c09aa07b5eed | [
"Unlicense"
] | 2 | 2020-07-26T05:57:27.000Z | 2020-07-26T07:12:15.000Z | coronaindiatracker/coronatracker/views.py | ankee23/CoronaIndiaTracker | b2e116a595b3c69ccefa93b60833c09aa07b5eed | [
"Unlicense"
] | null | null | null | coronaindiatracker/coronatracker/views.py | ankee23/CoronaIndiaTracker | b2e116a595b3c69ccefa93b60833c09aa07b5eed | [
"Unlicense"
] | 1 | 2020-11-26T08:52:11.000Z | 2020-11-26T08:52:11.000Z | from django.shortcuts import render
import requests
from bs4 import BeautifulSoup
def corona_data(request):
"Testaaaa"
corona_html = requests.get("https://www.mygov.in/covid-19")
soup = BeautifulSoup(corona_html.content, 'html.parser')
state_wise_data = soup.find_all('div', class_='views-row')
information = soup.find('div', class_='information_row')
info = {
'update_data': information.find('div', class_='info_title').find('span').string,
'active_case': information.find('div', class_='active-case').find('span', class_='icount').string,
'discharge': information.find('div', class_='discharge').find('span', class_='icount').string,
'death': information.find('div', class_='death_case').find('span', class_='icount').string
}
corona_info = [
{
"state_name": state.find_all('span', class_='st_name')[0].string,
"confirm_case": state.find_all('div', class_='tick-confirmed')[0].find_all('small')[0].string,
"active_case": state.find_all('div', class_='tick-active')[0].find_all('small')[0].string,
"discharge": state.find_all('div', class_='tick-discharged')[0].find_all('small')[0].string,
"death": state.find_all('div', class_='tick-death')[0].find_all('small')[0].string
} for state in state_wise_data
]
context = {
'corona_info': info,
'data': sorted(corona_info, key=lambda i: int(''.join(i['confirm_case'].replace(',', ''))), reverse=True)
}
return render(request, 'coronainfo/index.html', context)
| 45.228571 | 113 | 0.642451 |
7c9c1524555fded271e617bca48b5b1e6a1e9ace | 6,082 | py | Python | compare.py | geohackweek/ghw2019_wiggles | 9b636db8d97986e038a301e36b808e820ccc525f | [
"BSD-3-Clause"
] | 3 | 2019-10-09T19:42:12.000Z | 2021-05-28T00:10:54.000Z | compare.py | geohackweek/ghw2019_wiggles | 9b636db8d97986e038a301e36b808e820ccc525f | [
"BSD-3-Clause"
] | 1 | 2019-09-11T16:37:59.000Z | 2019-09-11T16:37:59.000Z | compare.py | geohackweek/ghw2019_wiggles | 9b636db8d97986e038a301e36b808e820ccc525f | [
"BSD-3-Clause"
] | 3 | 2019-09-10T20:41:59.000Z | 2019-09-10T20:42:57.000Z | # Script tests GPD model using UW truth data
# Test outputs:
# - type of event tested [EQS, EQP, SUS, SUP, THS, THP, SNS, SNP, PXS, PXP]
# - phase [P, S, N] Note: N - not detected
# - model time offset (t_truth - t_model_pick)
import numpy
import math
import string
import datetime
import sys
import os
import csv
from datetime import datetime
from datetime import timedelta
# params
padding_time = 10
fudge_factor = timedelta(seconds=27)
time_diff = timedelta(seconds=10)
# file dirs
parsed_arrivals = []
model_in = []
model_out = []
comp_out = []
for etype in ['EQS','EQP','SUS','SUP','THS','THP','SNS','SNP','PXS','PXP']:
arrival = "parsed_arrivals/" + etype + ".arrivals.txt"
infile = "input_files/GPD." + etype + ".in"
outfile = "output_files/GPD." + etype + ".out"
parsed_arrivals.append(arrival)
model_in.append(infile)
model_out.append(outfile)
comp_out.append("comparison_out/comp." + etype + ".out")
# ------------------
# read in UW arrival times as an array
# read in Caltech model output and create a dictionary
# lookup time in the dictionary
# search for arrivals within the padding time window
for i in range(len(model_out)):
execute_script(parsed_arrivals[i], model_in[i], model_out[i], comp_out[i])
| 37.312883 | 132 | 0.561822 |
7c9c7b65355934d322e4085f42e442dbe2ee0d7d | 7,012 | py | Python | ultitrackerapi/ultitrackerapi/extract_and_upload_video.py | atheheath/ultitracker-api | 5d7ea7ae97c53faf02416f17baf11ed09fd55276 | [
"MIT"
] | null | null | null | ultitrackerapi/ultitrackerapi/extract_and_upload_video.py | atheheath/ultitracker-api | 5d7ea7ae97c53faf02416f17baf11ed09fd55276 | [
"MIT"
] | 7 | 2020-03-27T03:33:52.000Z | 2020-03-30T02:33:04.000Z | ultitrackerapi/ultitrackerapi/extract_and_upload_video.py | atheheath/ultitracker-api | 5d7ea7ae97c53faf02416f17baf11ed09fd55276 | [
"MIT"
] | null | null | null | import argparse
import boto3
import datetime
import json
import os
import posixpath
import re
import shutil
import tempfile
import uuid
from concurrent import futures
from multiprocessing import Pool
from ultitrackerapi import get_backend, get_logger, get_s3Client, video
backend_instance = get_backend()
logger = get_logger(__name__, level="DEBUG")
s3Client = get_s3Client()
if __name__ == "__main__":
main() | 33.711538 | 138 | 0.673987 |
7c9c87d15e24804e84e87528b8a9f5ba5b08422f | 3,265 | py | Python | Chapter03/scikit_soft_voting_2knn.py | PacktPublishing/Hands-On-Ensemble-Learning-with-Python | db9b90189dbebbc6ab5ebba0e2e173ba80197c35 | [
"MIT"
] | 31 | 2019-07-21T00:36:52.000Z | 2022-02-25T15:38:21.000Z | Chapter03/scikit_soft_voting_2knn.py | tokiran/Hands-On-Ensemble-Learning-with-Python | 739ecda33fb75dc1df1366abf4a79c34cc0c2026 | [
"MIT"
] | null | null | null | Chapter03/scikit_soft_voting_2knn.py | tokiran/Hands-On-Ensemble-Learning-with-Python | 739ecda33fb75dc1df1366abf4a79c34cc0c2026 | [
"MIT"
] | 30 | 2019-07-06T00:22:44.000Z | 2022-02-04T02:44:17.000Z | # --- SECTION 1 ---
# Import the required libraries
from sklearn import datasets, naive_bayes, svm, neighbors
from sklearn.ensemble import VotingClassifier
from sklearn.metrics import accuracy_score
# Load the dataset
breast_cancer = datasets.load_breast_cancer()
x, y = breast_cancer.data, breast_cancer.target
# Split the train and test samples
test_samples = 100
x_train, y_train = x[:-test_samples], y[:-test_samples]
x_test, y_test = x[-test_samples:], y[-test_samples:]
# --- SECTION 2 ---
# Instantiate the learners (classifiers)
learner_1 = neighbors.KNeighborsClassifier(n_neighbors=5)
learner_2 = naive_bayes.GaussianNB()
learner_3 = neighbors.KNeighborsClassifier(n_neighbors=50)
# --- SECTION 3 ---
# Instantiate the voting classifier
voting = VotingClassifier([('5NN', learner_1),
('NB', learner_2),
('50NN', learner_3)],
voting='soft')
# --- SECTION 4 ---
# Fit classifier with the training data
voting.fit(x_train, y_train)
learner_1.fit(x_train, y_train)
learner_2.fit(x_train, y_train)
learner_3.fit(x_train, y_train)
# --- SECTION 5 ---
# Predict the most probable class
hard_predictions = voting.predict(x_test)
# --- SECTION 6 ---
# Get the base learner predictions
predictions_1 = learner_1.predict(x_test)
predictions_2 = learner_2.predict(x_test)
predictions_3 = learner_3.predict(x_test)
# --- SECTION 7 ---
# Accuracies of base learners
print('L1:', accuracy_score(y_test, predictions_1))
print('L2:', accuracy_score(y_test, predictions_2))
print('L3:', accuracy_score(y_test, predictions_3))
# Accuracy of hard voting
print('-'*30)
print('Hard Voting:', accuracy_score(y_test, hard_predictions))
# --- SECTION 1 ---
# Import the required libraries
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.style.use('seaborn-paper')
# --- SECTION 2 ---
# Get the wrongly predicted instances
# and the predicted probabilities for the whole test set
errors = y_test-hard_predictions
probabilities_1 = learner_1.predict_proba(x_test)
probabilities_2 = learner_2.predict_proba(x_test)
probabilities_3 = learner_3.predict_proba(x_test)
# --- SECTION 2 ---
# Store the predicted probability for
# each wrongly predicted instance, for each base learner
# as well as the average predicted probability
#
x=[]
y_1=[]
y_2=[]
y_3=[]
y_avg=[]
for i in range(len(errors)):
if not errors[i] == 0:
x.append(i)
y_1.append(probabilities_1[i][0])
y_2.append(probabilities_2[i][0])
y_3.append(probabilities_3[i][0])
y_avg.append((probabilities_1[i][0]+probabilities_2[i][0]+probabilities_3[i][0])/3)
# --- SECTION 3 ---
# Plot the predicted probaiblity of each base learner as
# a bar and the average probability as an X
plt.bar(x, y_1, 3, label='5NN')
plt.bar(x, y_2, 2, label='NB')
plt.bar(x, y_3, 1, label='50NN')
plt.scatter(x, y_avg, marker='x', c='k', s=150, label='Average Positive', zorder=10)
y = [0.5 for x in range(len(errors))]
plt.plot(y, c='k', linestyle='--')
plt.title('Positive Probability')
plt.xlabel('Test sample')
plt.ylabel('probability')
plt.legend()
| 28.640351 | 92 | 0.67902 |
7c9e60fb8b9a1847e8db908d6cfa14b5a53e1aaf | 623 | py | Python | API/migrations/0005_alter_news_date_time_alter_news_headline.py | kgarchie/ReSTful-Django-API | 851c76eb75747042ceac0a6c164266409ca935d4 | [
"MIT"
] | null | null | null | API/migrations/0005_alter_news_date_time_alter_news_headline.py | kgarchie/ReSTful-Django-API | 851c76eb75747042ceac0a6c164266409ca935d4 | [
"MIT"
] | null | null | null | API/migrations/0005_alter_news_date_time_alter_news_headline.py | kgarchie/ReSTful-Django-API | 851c76eb75747042ceac0a6c164266409ca935d4 | [
"MIT"
] | null | null | null | # Generated by Django 4.0.3 on 2022-03-23 14:31
import datetime
from django.db import migrations, models
| 24.92 | 98 | 0.603531 |
7ca170e48f979878209316e327d77080c8c15058 | 2,662 | py | Python | qiskit/ml/datasets/iris.py | stefan-woerner/aqua | 12e1b867e254977d9c5992612a7919d8fe016cb4 | [
"Apache-2.0"
] | 504 | 2018-12-15T16:34:03.000Z | 2022-03-26T11:24:53.000Z | qiskit/ml/datasets/iris.py | stefan-woerner/aqua | 12e1b867e254977d9c5992612a7919d8fe016cb4 | [
"Apache-2.0"
] | 746 | 2018-12-16T16:44:42.000Z | 2021-07-10T16:59:43.000Z | qiskit/ml/datasets/iris.py | stefan-woerner/aqua | 12e1b867e254977d9c5992612a7919d8fe016cb4 | [
"Apache-2.0"
] | 421 | 2018-12-22T14:49:00.000Z | 2022-03-04T09:47:07.000Z | # This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
iris dataset
"""
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.decomposition import PCA
from qiskit.aqua import MissingOptionalLibraryError
def iris(training_size, test_size, n, plot_data=False):
""" returns iris dataset """
class_labels = [r'A', r'B', r'C']
data, target = datasets.load_iris(return_X_y=True)
sample_train, sample_test, label_train, label_test = \
train_test_split(data, target, test_size=1, random_state=42)
# Now we standardize for gaussian around 0 with unit variance
std_scale = StandardScaler().fit(sample_train)
sample_train = std_scale.transform(sample_train)
sample_test = std_scale.transform(sample_test)
# Now reduce number of features to number of qubits
pca = PCA(n_components=n).fit(sample_train)
sample_train = pca.transform(sample_train)
sample_test = pca.transform(sample_test)
# Scale to the range (-1,+1)
samples = np.append(sample_train, sample_test, axis=0)
minmax_scale = MinMaxScaler((-1, 1)).fit(samples)
sample_train = minmax_scale.transform(sample_train)
sample_test = minmax_scale.transform(sample_test)
# Pick training size number of samples from each distro
training_input = {key: (sample_train[label_train == k, :])[:training_size]
for k, key in enumerate(class_labels)}
test_input = {key: (sample_test[label_test == k, :])[:test_size]
for k, key in enumerate(class_labels)}
if plot_data:
try:
import matplotlib.pyplot as plt
except ImportError as ex:
raise MissingOptionalLibraryError(
libname='Matplotlib',
name='iris',
pip_install='pip install matplotlib') from ex
for k in range(0, 3):
plt.scatter(sample_train[label_train == k, 0][:training_size],
sample_train[label_train == k, 1][:training_size])
plt.title("Iris dataset")
plt.show()
return sample_train, training_input, test_input, class_labels
| 38.028571 | 78 | 0.696093 |
7ca18b95086348a6dec0e89454f15ffded086574 | 16,864 | py | Python | tests/h/views/api_auth_test.py | discodavey/h | 7bff8478b3a5b936de82ac9fcd89b355f4afd3aa | [
"MIT"
] | null | null | null | tests/h/views/api_auth_test.py | discodavey/h | 7bff8478b3a5b936de82ac9fcd89b355f4afd3aa | [
"MIT"
] | 5 | 2017-12-26T14:22:20.000Z | 2018-04-02T02:56:38.000Z | tests/h/views/api_auth_test.py | discodavey/h | 7bff8478b3a5b936de82ac9fcd89b355f4afd3aa | [
"MIT"
] | 1 | 2021-03-12T09:45:04.000Z | 2021-03-12T09:45:04.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
import json
import mock
import pytest
from oauthlib.oauth2 import InvalidRequestFatalError
from oauthlib.common import Request as OAuthRequest
from pyramid import httpexceptions
from h._compat import urlparse
from h.exceptions import OAuthTokenError
from h.models.auth_client import ResponseType
from h.services.auth_token import auth_token_service_factory
from h.services.oauth_provider import OAuthProviderService
from h.services.oauth_validator import DEFAULT_SCOPES
from h.services.user import user_service_factory
from h.util.datetime import utc_iso8601
from h.views import api_auth as views
class TestDebugToken(object):
| 41.131707 | 147 | 0.694141 |
7ca1d5b32a32a25d088eb63410921b9a5e64742f | 1,306 | py | Python | tools/build/v2/test/conditionals.py | juslee/boost-svn | 6d5a03c1f5ed3e2b23bd0f3ad98d13ff33d4dcbb | [
"BSL-1.0"
] | 1 | 2018-12-15T19:55:56.000Z | 2018-12-15T19:55:56.000Z | tools/build/v2/test/conditionals.py | smart-make/boost | 46509a094f8a844eefd5bb8a0030b739a04d79e1 | [
"BSL-1.0"
] | null | null | null | tools/build/v2/test/conditionals.py | smart-make/boost | 46509a094f8a844eefd5bb8a0030b739a04d79e1 | [
"BSL-1.0"
] | null | null | null | #!/usr/bin/python
# Copyright 2003 Dave Abrahams
# Copyright 2002, 2003, 2004 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
# Test conditional properties.
import BoostBuild
t = BoostBuild.Tester()
# Arrange a project which will build only if 'a.cpp' is compiled with "STATIC"
# define.
t.write("a.cpp", """\
#ifdef STATIC
int main() {}
#endif
""")
# Test conditionals in target requirements.
t.write("jamroot.jam", "exe a : a.cpp : <link>static:<define>STATIC ;")
t.run_build_system(["link=static"])
t.expect_addition("bin/$toolset/debug/link-static/a.exe")
t.rm("bin")
# Test conditionals in project requirements.
t.write("jamroot.jam", """
project : requirements <link>static:<define>STATIC ;
exe a : a.cpp ;
""")
t.run_build_system(["link=static"])
t.expect_addition("bin/$toolset/debug/link-static/a.exe")
t.rm("bin")
# Regression test for a bug found by Ali Azarbayejani. Conditionals inside
# usage requirement were not being evaluated.
t.write("jamroot.jam", """
lib l : l.cpp : : : <link>static:<define>STATIC ;
exe a : a.cpp l ;
""")
t.write("l.cpp", "int i;")
t.run_build_system(["link=static"])
t.expect_addition("bin/$toolset/debug/link-static/a.exe")
t.cleanup()
| 26.653061 | 78 | 0.712864 |
7ca33bba047d555eff412922059b6da8837f7980 | 270 | py | Python | examples/setuptools-rust-starter/tests/test_setuptools_rust_starter.py | FriendRat/pyo3 | 5446fe2062cb3bf11bf61bd4a2c58a7ed8b408d2 | [
"Apache-2.0"
] | 1 | 2021-06-18T16:27:31.000Z | 2021-06-18T16:27:31.000Z | examples/setuptools-rust-starter/tests/test_setuptools_rust_starter.py | FriendRat/pyo3 | 5446fe2062cb3bf11bf61bd4a2c58a7ed8b408d2 | [
"Apache-2.0"
] | 5 | 2021-11-08T22:05:41.000Z | 2022-03-28T22:07:04.000Z | examples/setuptools-rust-starter/tests/test_setuptools_rust_starter.py | FriendRat/pyo3 | 5446fe2062cb3bf11bf61bd4a2c58a7ed8b408d2 | [
"Apache-2.0"
] | null | null | null | from setuptools_rust_starter import PythonClass, ExampleClass
| 22.5 | 61 | 0.733333 |
7ca44058ba24c0424d8558e54e0f3abd230491fa | 12,813 | py | Python | spiders/juejin_spider.py | sunhailin-Leo/TeamLeoX_BlogsCrawler | 389ff31e02bdff415c8bc470a3a48da1acb14c4c | [
"MIT"
] | null | null | null | spiders/juejin_spider.py | sunhailin-Leo/TeamLeoX_BlogsCrawler | 389ff31e02bdff415c8bc470a3a48da1acb14c4c | [
"MIT"
] | null | null | null | spiders/juejin_spider.py | sunhailin-Leo/TeamLeoX_BlogsCrawler | 389ff31e02bdff415c8bc470a3a48da1acb14c4c | [
"MIT"
] | null | null | null | import time
from typing import Dict, List, Tuple, Optional
from utils.logger_utils import LogManager
from utils.str_utils import check_is_json
from config import LOG_LEVEL, PROCESS_STATUS_FAIL
from utils.time_utils import datetime_str_change_fmt
from utils.exception_utils import LoginException, ParseDataException
from spiders import BaseSpider, BaseSpiderParseMethodType, CookieUtils
from utils.str_utils import check_is_phone_number, check_is_email_address
logger = LogManager(__name__).get_logger_and_add_handlers(
formatter_template=5, log_level_int=LOG_LEVEL
)
| 42.287129 | 157 | 0.558807 |
7ca486af10b1cca3904ea233b441a3077ec0bb6b | 3,653 | py | Python | NAS/PaddleSlim/train_supernet.py | naviocean/SimpleCVReproduction | 61b43e3583977f42e6f91ef176ec5e1701e98d33 | [
"Apache-2.0"
] | 923 | 2020-01-11T06:36:53.000Z | 2022-03-31T00:26:57.000Z | NAS/PaddleSlim/train_supernet.py | Twenty3hree/SimpleCVReproduction | 9939f8340c54dbd69b0017cecad875dccf428f26 | [
"Apache-2.0"
] | 25 | 2020-02-27T08:35:46.000Z | 2022-01-25T08:54:19.000Z | NAS/PaddleSlim/train_supernet.py | Twenty3hree/SimpleCVReproduction | 9939f8340c54dbd69b0017cecad875dccf428f26 | [
"Apache-2.0"
] | 262 | 2020-01-02T02:19:40.000Z | 2022-03-23T04:56:16.000Z | from paddle.vision.transforms import (
ToTensor, RandomHorizontalFlip, RandomResizedCrop, SaturationTransform, Compose,
HueTransform, BrightnessTransform, ContrastTransform, RandomCrop, Normalize, RandomRotation
)
from paddle.vision.datasets import Cifar100
from paddle.io import DataLoader
from paddle.optimizer.lr import CosineAnnealingDecay, MultiStepDecay, LinearWarmup
import random
from resnet20 import *
import paddle
# supernet trainning paddleslim
# https://github.com/PaddlePaddle/PaddleSlim star
from paddleslim.nas.ofa.convert_super import Convert, supernet
from paddleslim.nas.ofa import OFA, RunConfig, DistillConfig
from paddleslim.nas.ofa.utils import utils
channel_list = []
for i in range(1, 21):
if 0 < i <= 7:
# channel_list.append(random.choice([ 4, 8, 12, 16]))
channel_list.append(16)
elif 7 < i <= 13:
# channel_list.append(random.choice([ 4, 8, 12, 16, 20, 24, 28, 32]))
channel_list.append(32)
elif 13 < i <= 19:
# channel_list.append(random.choice([ 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56,60, 64]))
channel_list.append(64)
else:
# channel_list.append(random.choice([ 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56,60, 64]))
channel_list.append(64)
net = ResNet20(100, channel_list)
net2 = ResNet20(100, channel_list)
net2.set_state_dict(paddle.load('./pretrained_model/resnet20.pdparams'))
channel_optional = []
for i in range(0, 23):
if i <= 7:
channel_optional.append([4, 8, 12, 16])
# channel_optional.append([12, 16])
elif 7 < i <= 14:
channel_optional.append([4, 8, 12, 16, 20, 24, 28, 32])
# channel_optional.append([20, 24, 28, 32])
elif 14 < i <= 21:
channel_optional.append(
[4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, 64])
# channel_optional.append([36, 40, 44, 48, 52, 56,60, 64])
else:
channel_optional.append(
[4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, 64])
# channel_optional.append([36, 40, 44, 48, 52, 56,60, 64])
distill_config = DistillConfig(teacher_model=net2)
sp_net_config = supernet(channel=channel_optional)
sp_model = Convert(sp_net_config).convert(net)
ofa_net = OFA(sp_model, distill_config=distill_config)
ofa_net.set_task('channel')
model = paddle.Model(ofa_net)
MAX_EPOCH = 300
LR = 0.1
WEIGHT_DECAY = 5e-4
MOMENTUM = 0.9
BATCH_SIZE = 128
CIFAR_MEAN = [0.5071, 0.4865, 0.4409]
CIFAR_STD = [0.1942, 0.1918, 0.1958]
DATA_FILE = './data/data76994/cifar-100-python.tar.gz'
model.prepare(
paddle.optimizer.Momentum(
learning_rate=LinearWarmup(
CosineAnnealingDecay(LR, MAX_EPOCH), 2000, 0., LR),
momentum=MOMENTUM,
parameters=model.parameters(),
weight_decay=WEIGHT_DECAY),
CrossEntropyLoss(),
paddle.metric.Accuracy(topk=(1, 5)))
transforms = Compose([
RandomCrop(32, padding=4),
RandomApply(BrightnessTransform(0.1)),
RandomApply(ContrastTransform(0.1)),
RandomHorizontalFlip(),
RandomRotation(15),
ToArray(),
Normalize(CIFAR_MEAN, CIFAR_STD),
])
val_transforms = Compose([ToArray(), Normalize(CIFAR_MEAN, CIFAR_STD)])
train_set = Cifar100(DATA_FILE, mode='train', transform=transforms)
test_set = Cifar100(DATA_FILE, mode='test', transform=val_transforms)
callbacks = [LRSchedulerM(), callbacks.VisualDL('vis_logs/ofa_resnet20')]
model.fit(
train_set,
test_set,
epochs=MAX_EPOCH,
batch_size=BATCH_SIZE,
save_dir='checkpoints',
save_freq=100,
shuffle=True,
num_workers=4,
verbose=1,
callbacks=callbacks,
)
| 33.209091 | 108 | 0.680537 |
7ca4b5308f48cb161081920789f0cfaed577f79d | 28,560 | py | Python | slashtags/mixins/commands.py | Myst1c-a/phen-cogs | 672f9022ddbbd9a84b0a05357347e99e64a776fc | [
"MIT"
] | null | null | null | slashtags/mixins/commands.py | Myst1c-a/phen-cogs | 672f9022ddbbd9a84b0a05357347e99e64a776fc | [
"MIT"
] | null | null | null | slashtags/mixins/commands.py | Myst1c-a/phen-cogs | 672f9022ddbbd9a84b0a05357347e99e64a776fc | [
"MIT"
] | null | null | null | """
MIT License
Copyright (c) 2020-present phenom4n4n
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import asyncio
import logging
import re
import types
from collections import Counter
from copy import copy
from typing import Dict, List, Union
import discord
from redbot.core import commands
from redbot.core.utils.chat_formatting import box, humanize_list, inline, pagify
from redbot.core.utils.menus import DEFAULT_CONTROLS, menu
from redbot.core.utils.predicates import MessagePredicate
from tabulate import tabulate
from ..abc import MixinMeta
from ..converters import (
GlobalTagConverter,
GuildTagConverter,
PastebinConverter,
TagConverter,
TagName,
TagScriptConverter,
)
from ..http import ApplicationOptionChoice, SlashOptionType
from ..objects import ApplicationCommand, ApplicationCommandType, SlashOption, SlashTag
from ..testing.button_menus import menu as button_menu
from ..utils import ARGUMENT_NAME_DESCRIPTION, chunks, dev_check
TAG_RE = re.compile(r"(?i)(\[p\])?\b(slash\s?)?tag'?s?\b")
CHOICE_RE = re.compile(r".{1,100}:.{1,100}")
CHOICE_LIMIT = 25
log = logging.getLogger("red.phenom4n4n.slashtags.commands")
| 37.777778 | 165 | 0.630882 |
7ca4f5192c61086f4f74975da17d1bf4fd80a81d | 184 | py | Python | prgm6.py | pooja-bs-3003/Project_21 | dc46e66ccf10937be6f2f8369ef02eb52e139eff | [
"MIT"
] | null | null | null | prgm6.py | pooja-bs-3003/Project_21 | dc46e66ccf10937be6f2f8369ef02eb52e139eff | [
"MIT"
] | null | null | null | prgm6.py | pooja-bs-3003/Project_21 | dc46e66ccf10937be6f2f8369ef02eb52e139eff | [
"MIT"
] | 1 | 2021-01-22T15:15:02.000Z | 2021-01-22T15:15:02.000Z | str1= input("enter a string :")
l1 =""
for i in str1 [::-1]:
l1 = i+l1
print(l1)
if str1 == l1:
print("string is a palindrome")
else :
print("string is not a palindrome")
| 18.4 | 40 | 0.592391 |
7ca53e88f3dd2e94b942b008b7daf5b989a2b7df | 3,493 | py | Python | product_spider/spiders/jk_spider.py | Pandaaaa906/product_spider | cc7f865f53fd3ed68f4869be3ba917c8373dfcf2 | [
"MIT"
] | null | null | null | product_spider/spiders/jk_spider.py | Pandaaaa906/product_spider | cc7f865f53fd3ed68f4869be3ba917c8373dfcf2 | [
"MIT"
] | null | null | null | product_spider/spiders/jk_spider.py | Pandaaaa906/product_spider | cc7f865f53fd3ed68f4869be3ba917c8373dfcf2 | [
"MIT"
] | null | null | null | import json
import re
from string import ascii_uppercase
from time import time
from urllib.parse import urljoin
import scrapy
from more_itertools import first
from scrapy import Request
from product_spider.items import JkProduct, JKPackage
from product_spider.utils.functions import strip
| 45.960526 | 142 | 0.566562 |
7ca69cf8d56aa0d6b0aa339f56249d7cdfe3ab0c | 15,717 | py | Python | env/LaneChangeEnv_v2.py | byq-luo/Lane_change_RL | 3409238db939e6722441219b4c2dc66033611069 | [
"MIT"
] | 4 | 2021-03-11T03:05:31.000Z | 2022-03-22T08:45:20.000Z | env/LaneChangeEnv_v2.py | byq-luo/Lane_change_RL | 3409238db939e6722441219b4c2dc66033611069 | [
"MIT"
] | null | null | null | env/LaneChangeEnv_v2.py | byq-luo/Lane_change_RL | 3409238db939e6722441219b4c2dc66033611069 | [
"MIT"
] | 5 | 2021-06-18T11:32:34.000Z | 2021-12-31T08:10:51.000Z | import os
import sys
import random
import datetime
import gym
from gym import spaces
import numpy as np
from env.IDM import IDM
from env.Road import Road
from env.Vehicle import Vehicle
import math
# add sumo/tools into python environment
if 'SUMO_HOME' in os.environ:
tools = os.path.join(os.environ['SUMO_HOME'], 'tools')
sys.path.append(tools)
print('success')
else:
sys.exit("please declare environment variable 'SUMO_HOME'")
import traci
######################################################################
# simulation environments
| 40.507732 | 131 | 0.56283 |
7ca6e02f50310d3c489fa5cfd14dac9866b27eaa | 5,466 | py | Python | cidr/o365/o365.py | jblukach/distillery | 4087debb496d7dfc4c425c2e68246e1b0726168b | [
"Apache-2.0"
] | 1 | 2021-11-10T12:56:09.000Z | 2021-11-10T12:56:09.000Z | cidr/o365/o365.py | jblukach/distillery | 4087debb496d7dfc4c425c2e68246e1b0726168b | [
"Apache-2.0"
] | null | null | null | cidr/o365/o365.py | jblukach/distillery | 4087debb496d7dfc4c425c2e68246e1b0726168b | [
"Apache-2.0"
] | 1 | 2021-11-05T03:16:32.000Z | 2021-11-05T03:16:32.000Z | import boto3
import ipaddress
import json
import logging
import os
import requests
import uuid
logger = logging.getLogger()
logger.setLevel(logging.INFO)
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table(os.environ['DYNAMODB_TABLE'])
client = boto3.client('ssm')
| 47.12069 | 115 | 0.51921 |
7ca783f400a2f552b7d64e3767e01fb3717ef036 | 582 | py | Python | exampleinc.py | zulip/finbot | dcb6bfe54a674f4ff98370677a648b6cc1706e16 | [
"Apache-2.0"
] | 7 | 2017-02-19T16:35:24.000Z | 2022-03-09T20:05:49.000Z | exampleinc.py | zulip/finbot | dcb6bfe54a674f4ff98370677a648b6cc1706e16 | [
"Apache-2.0"
] | null | null | null | exampleinc.py | zulip/finbot | dcb6bfe54a674f4ff98370677a648b6cc1706e16 | [
"Apache-2.0"
] | 3 | 2020-02-13T18:06:46.000Z | 2021-06-10T19:56:30.000Z | #!/usr/bin/python
from money import *
c = Company("Example Inc")
c.add_flow(FixedCost("Initial Cash", -500000))
c.add_flow(FixedCost("Incorporation", 500))
c.add_flow(ConstantCost("Office", 50000))
c.add_flow(PeriodicCost("Subscription", 4000, "2012-01-05", 14))
c.add_flow(DelayedCost("2012-02-01", ConstantCost("Office", 50000)))
c.add_flow(DelayedCost("2012-02-01", FixedCost("Financing", 50000)))
c.add_flow(SemiMonthlyCost("Payroll", 4000, "2012-01-01"))
c.add_flow(SemiMonthlyWages("Payroll", 6000, "2012-01-01"))
print(c)
c.cash_monthly_summary("2012-01-01", "2013-07-01")
| 36.375 | 68 | 0.730241 |
7ca7926bc8bb9c6d96d0fde91ed69d0cb52091a0 | 847 | py | Python | guardian/validators.py | dawid1stanek/guardian | 89359c93d5f36c8b458428e147000352fa7ad01d | [
"Apache-2.0"
] | null | null | null | guardian/validators.py | dawid1stanek/guardian | 89359c93d5f36c8b458428e147000352fa7ad01d | [
"Apache-2.0"
] | null | null | null | guardian/validators.py | dawid1stanek/guardian | 89359c93d5f36c8b458428e147000352fa7ad01d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import os
import socket
import subprocess
import argparse
import logging
LOGGER = logging.getLogger(__name__)
ping.short_name = 'PING'
port.short_name = 'PORT'
| 24.2 | 112 | 0.663518 |
7ca89ca7169c0f1670fef9182b15d74e96bdbeae | 131 | py | Python | tests/data/udf_noop.py | Open-EO/openeo-geopyspark-driver | afd5902f426d2aa456d70ed6f2d51b6907de1cab | [
"Apache-2.0"
] | 12 | 2018-03-22T15:02:24.000Z | 2022-03-30T20:13:29.000Z | tests/data/udf_noop.py | Open-EO/openeo-geopyspark-driver | afd5902f426d2aa456d70ed6f2d51b6907de1cab | [
"Apache-2.0"
] | 116 | 2018-09-27T17:17:14.000Z | 2022-03-30T18:32:29.000Z | tests/data/udf_noop.py | Open-EO/openeo-geopyspark-driver | afd5902f426d2aa456d70ed6f2d51b6907de1cab | [
"Apache-2.0"
] | 3 | 2019-06-28T15:44:32.000Z | 2021-10-30T07:05:54.000Z | from openeo.udf import XarrayDataCube
| 21.833333 | 74 | 0.78626 |
7caa2f54344b5d827f792813f87cde352f46a120 | 827 | py | Python | StateGoHome.py | LHGames-2017/superintelligence | bd9ea3d444e571a0f9607bf0f6799807f7e644ca | [
"MIT"
] | null | null | null | StateGoHome.py | LHGames-2017/superintelligence | bd9ea3d444e571a0f9607bf0f6799807f7e644ca | [
"MIT"
] | null | null | null | StateGoHome.py | LHGames-2017/superintelligence | bd9ea3d444e571a0f9607bf0f6799807f7e644ca | [
"MIT"
] | null | null | null | from PlayerState import *
from pathFinder import PathFinder
from StateLook4Resources import *
| 31.807692 | 71 | 0.689238 |
7caad7d95f67042bb7aad81b10bf684a91160170 | 9,603 | py | Python | hoomd/mpcd/test-py/stream_slit_test.py | schwendp/hoomd-blue | df7970121b19bc4f8674348ab3241055ac87153b | [
"BSD-3-Clause"
] | 2 | 2020-03-30T14:38:50.000Z | 2020-06-02T05:53:41.000Z | hoomd/mpcd/test-py/stream_slit_test.py | schwendp/hoomd-blue | df7970121b19bc4f8674348ab3241055ac87153b | [
"BSD-3-Clause"
] | null | null | null | hoomd/mpcd/test-py/stream_slit_test.py | schwendp/hoomd-blue | df7970121b19bc4f8674348ab3241055ac87153b | [
"BSD-3-Clause"
] | 1 | 2020-05-20T07:00:08.000Z | 2020-05-20T07:00:08.000Z | # Copyright (c) 2009-2019 The Regents of the University of Michigan
# This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
# Maintainer: mphoward
import unittest
import numpy as np
import hoomd
from hoomd import md
from hoomd import mpcd
# unit tests for mpcd slit streaming geometry
if __name__ == '__main__':
unittest.main(argv = ['test.py', '-v'])
| 43.06278 | 101 | 0.64157 |
7caae2b0b77242e98f5f62bea314586497fa86a7 | 7,261 | py | Python | tests/functional/model_models.py | haoyuchen1992/CourseBuilder | ba8f0e05c53cc74bb4e46235a7855fdfbd63dff7 | [
"Apache-2.0"
] | 1 | 2015-04-15T08:38:08.000Z | 2015-04-15T08:38:08.000Z | tests/functional/model_models.py | haoyuchen1992/CourseBuilder | ba8f0e05c53cc74bb4e46235a7855fdfbd63dff7 | [
"Apache-2.0"
] | 1 | 2021-06-08T09:49:12.000Z | 2021-06-08T09:49:12.000Z | tests/functional/model_models.py | haoyuchen1992/CourseBuilder | ba8f0e05c53cc74bb4e46235a7855fdfbd63dff7 | [
"Apache-2.0"
] | 3 | 2015-10-25T12:39:07.000Z | 2021-06-08T09:47:34.000Z | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functional tests for models.models."""
__author__ = [
'[email protected] (John Cox)',
]
import datetime
from models import models
from tests.functional import actions
# Disable complaints about docstrings for self-documenting tests.
# pylint: disable-msg=g-missing-docstring
| 39.677596 | 80 | 0.700454 |
7cab92b7f1c079530e07c1c01ef7a728efea9d02 | 43,187 | py | Python | torchaudio/functional/functional.py | iseessel/audio | 64551a69186d28db1f499ba373f1b19c6a7ed894 | [
"BSD-2-Clause"
] | null | null | null | torchaudio/functional/functional.py | iseessel/audio | 64551a69186d28db1f499ba373f1b19c6a7ed894 | [
"BSD-2-Clause"
] | null | null | null | torchaudio/functional/functional.py | iseessel/audio | 64551a69186d28db1f499ba373f1b19c6a7ed894 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import io
import math
import warnings
from typing import Optional, Tuple
import torch
from torch import Tensor
from torchaudio._internal import module_utils as _mod_utils
import torchaudio
__all__ = [
"spectrogram",
"griffinlim",
"amplitude_to_DB",
"DB_to_amplitude",
"compute_deltas",
"compute_kaldi_pitch",
"create_fb_matrix",
"create_dct",
"compute_deltas",
"detect_pitch_frequency",
"DB_to_amplitude",
"mu_law_encoding",
"mu_law_decoding",
"complex_norm",
"angle",
"magphase",
"phase_vocoder",
'mask_along_axis',
'mask_along_axis_iid',
'sliding_window_cmn',
"spectral_centroid",
"apply_codec",
]
def spectrogram(
waveform: Tensor,
pad: int,
window: Tensor,
n_fft: int,
hop_length: int,
win_length: int,
power: Optional[float],
normalized: bool,
center: bool = True,
pad_mode: str = "reflect",
onesided: bool = True
) -> Tensor:
r"""Create a spectrogram or a batch of spectrograms from a raw audio signal.
The spectrogram can be either magnitude-only or complex.
Args:
waveform (Tensor): Tensor of audio of dimension (..., time)
pad (int): Two sided padding of signal
window (Tensor): Window tensor that is applied/multiplied to each frame/window
n_fft (int): Size of FFT
hop_length (int): Length of hop between STFT windows
win_length (int): Window size
power (float or None): Exponent for the magnitude spectrogram,
(must be > 0) e.g., 1 for energy, 2 for power, etc.
If None, then the complex spectrum is returned instead.
normalized (bool): Whether to normalize by magnitude after stft
center (bool, optional): whether to pad :attr:`waveform` on both sides so
that the :math:`t`-th frame is centered at time :math:`t \times \text{hop\_length}`.
Default: ``True``
pad_mode (string, optional): controls the padding method used when
:attr:`center` is ``True``. Default: ``"reflect"``
onesided (bool, optional): controls whether to return half of results to
avoid redundancy. Default: ``True``
Returns:
Tensor: Dimension (..., freq, time), freq is
``n_fft // 2 + 1`` and ``n_fft`` is the number of
Fourier bins, and time is the number of window hops (n_frame).
"""
if pad > 0:
# TODO add "with torch.no_grad():" back when JIT supports it
waveform = torch.nn.functional.pad(waveform, (pad, pad), "constant")
# pack batch
shape = waveform.size()
waveform = waveform.reshape(-1, shape[-1])
# default values are consistent with librosa.core.spectrum._spectrogram
spec_f = torch.stft(
input=waveform,
n_fft=n_fft,
hop_length=hop_length,
win_length=win_length,
window=window,
center=center,
pad_mode=pad_mode,
normalized=False,
onesided=onesided,
return_complex=True,
)
# unpack batch
spec_f = spec_f.reshape(shape[:-1] + spec_f.shape[-2:])
if normalized:
spec_f /= window.pow(2.).sum().sqrt()
if power is not None:
if power == 1.0:
return spec_f.abs()
return spec_f.abs().pow(power)
return torch.view_as_real(spec_f)
def griffinlim(
specgram: Tensor,
window: Tensor,
n_fft: int,
hop_length: int,
win_length: int,
power: float,
normalized: bool,
n_iter: int,
momentum: float,
length: Optional[int],
rand_init: bool
) -> Tensor:
r"""Compute waveform from a linear scale magnitude spectrogram using the Griffin-Lim transformation.
Implementation ported from `librosa`.
* [1] McFee, Brian, Colin Raffel, Dawen Liang, Daniel PW Ellis, Matt McVicar, Eric Battenberg, and Oriol Nieto.
"librosa: Audio and music signal analysis in python."
In Proceedings of the 14th python in science conference, pp. 18-25. 2015.
* [2] Perraudin, N., Balazs, P., & Sndergaard, P. L.
"A fast Griffin-Lim algorithm,"
IEEE Workshop on Applications of Signal Processing to Audio and Acoustics (pp. 1-4),
Oct. 2013.
* [3] D. W. Griffin and J. S. Lim,
"Signal estimation from modified short-time Fourier transform,"
IEEE Trans. ASSP, vol.32, no.2, pp.236243, Apr. 1984.
Args:
specgram (Tensor): A magnitude-only STFT spectrogram of dimension (..., freq, frames)
where freq is ``n_fft // 2 + 1``.
window (Tensor): Window tensor that is applied/multiplied to each frame/window
n_fft (int): Size of FFT, creates ``n_fft // 2 + 1`` bins
hop_length (int): Length of hop between STFT windows. (
Default: ``win_length // 2``)
win_length (int): Window size. (Default: ``n_fft``)
power (float): Exponent for the magnitude spectrogram,
(must be > 0) e.g., 1 for energy, 2 for power, etc.
normalized (bool): Whether to normalize by magnitude after stft.
n_iter (int): Number of iteration for phase recovery process.
momentum (float): The momentum parameter for fast Griffin-Lim.
Setting this to 0 recovers the original Griffin-Lim method.
Values near 1 can lead to faster convergence, but above 1 may not converge.
length (int or None): Array length of the expected output.
rand_init (bool): Initializes phase randomly if True, to zero otherwise.
Returns:
torch.Tensor: waveform of (..., time), where time equals the ``length`` parameter if given.
"""
assert momentum < 1, 'momentum={} > 1 can be unstable'.format(momentum)
assert momentum >= 0, 'momentum={} < 0'.format(momentum)
if normalized:
warnings.warn(
"The argument normalized is not used in Griffin-Lim, "
"and will be removed in v0.9.0 release. To suppress this warning, "
"please use `normalized=False`.")
# pack batch
shape = specgram.size()
specgram = specgram.reshape([-1] + list(shape[-2:]))
specgram = specgram.pow(1 / power)
# randomly initialize the phase
batch, freq, frames = specgram.size()
if rand_init:
angles = 2 * math.pi * torch.rand(batch, freq, frames)
else:
angles = torch.zeros(batch, freq, frames)
angles = torch.stack([angles.cos(), angles.sin()], dim=-1) \
.to(dtype=specgram.dtype, device=specgram.device)
specgram = specgram.unsqueeze(-1).expand_as(angles)
# And initialize the previous iterate to 0
rebuilt = torch.tensor(0.)
for _ in range(n_iter):
# Store the previous iterate
tprev = rebuilt
# Invert with our current estimate of the phases
inverse = torch.istft(specgram * angles,
n_fft=n_fft,
hop_length=hop_length,
win_length=win_length,
window=window,
length=length).float()
# Rebuild the spectrogram
rebuilt = torch.view_as_real(
torch.stft(
input=inverse,
n_fft=n_fft,
hop_length=hop_length,
win_length=win_length,
window=window,
center=True,
pad_mode='reflect',
normalized=False,
onesided=True,
return_complex=True,
)
)
# Update our phase estimates
angles = rebuilt
if momentum:
angles = angles - tprev.mul_(momentum / (1 + momentum))
angles = angles.div(complex_norm(angles).add(1e-16).unsqueeze(-1).expand_as(angles))
# Return the final phase estimates
waveform = torch.istft(specgram * angles,
n_fft=n_fft,
hop_length=hop_length,
win_length=win_length,
window=window,
length=length)
# unpack batch
waveform = waveform.reshape(shape[:-2] + waveform.shape[-1:])
return waveform
def amplitude_to_DB(
x: Tensor,
multiplier: float,
amin: float,
db_multiplier: float,
top_db: Optional[float] = None
) -> Tensor:
r"""Turn a spectrogram from the power/amplitude scale to the decibel scale.
The output of each tensor in a batch depends on the maximum value of that tensor,
and so may return different values for an audio clip split into snippets vs. a full clip.
Args:
x (Tensor): Input spectrogram(s) before being converted to decibel scale. Input should take
the form `(..., freq, time)`. Batched inputs should include a channel dimension and
have the form `(batch, channel, freq, time)`.
multiplier (float): Use 10. for power and 20. for amplitude
amin (float): Number to clamp ``x``
db_multiplier (float): Log10(max(reference value and amin))
top_db (float or None, optional): Minimum negative cut-off in decibels. A reasonable number
is 80. (Default: ``None``)
Returns:
Tensor: Output tensor in decibel scale
"""
x_db = multiplier * torch.log10(torch.clamp(x, min=amin))
x_db -= multiplier * db_multiplier
if top_db is not None:
# Expand batch
shape = x_db.size()
packed_channels = shape[-3] if x_db.dim() > 2 else 1
x_db = x_db.reshape(-1, packed_channels, shape[-2], shape[-1])
x_db = torch.max(x_db, (x_db.amax(dim=(-3, -2, -1)) - top_db).view(-1, 1, 1, 1))
# Repack batch
x_db = x_db.reshape(shape)
return x_db
def DB_to_amplitude(
x: Tensor,
ref: float,
power: float
) -> Tensor:
r"""Turn a tensor from the decibel scale to the power/amplitude scale.
Args:
x (Tensor): Input tensor before being converted to power/amplitude scale.
ref (float): Reference which the output will be scaled by.
power (float): If power equals 1, will compute DB to power. If 0.5, will compute DB to amplitude.
Returns:
Tensor: Output tensor in power/amplitude scale.
"""
return ref * torch.pow(torch.pow(10.0, 0.1 * x), power)
def _hz_to_mel(freq: float, mel_scale: str = "htk") -> float:
r"""Convert Hz to Mels.
Args:
freqs (float): Frequencies in Hz
mel_scale (str, optional): Scale to use: ``htk`` or ``slaney``. (Default: ``htk``)
Returns:
mels (float): Frequency in Mels
"""
if mel_scale not in ['slaney', 'htk']:
raise ValueError('mel_scale should be one of "htk" or "slaney".')
if mel_scale == "htk":
return 2595.0 * math.log10(1.0 + (freq / 700.0))
# Fill in the linear part
f_min = 0.0
f_sp = 200.0 / 3
mels = (freq - f_min) / f_sp
# Fill in the log-scale part
min_log_hz = 1000.0
min_log_mel = (min_log_hz - f_min) / f_sp
logstep = math.log(6.4) / 27.0
if freq >= min_log_hz:
mels = min_log_mel + math.log(freq / min_log_hz) / logstep
return mels
def _mel_to_hz(mels: Tensor, mel_scale: str = "htk") -> Tensor:
"""Convert mel bin numbers to frequencies.
Args:
mels (Tensor): Mel frequencies
mel_scale (str, optional): Scale to use: ``htk`` or ``slaney``. (Default: ``htk``)
Returns:
freqs (Tensor): Mels converted in Hz
"""
if mel_scale not in ['slaney', 'htk']:
raise ValueError('mel_scale should be one of "htk" or "slaney".')
if mel_scale == "htk":
return 700.0 * (10.0**(mels / 2595.0) - 1.0)
# Fill in the linear scale
f_min = 0.0
f_sp = 200.0 / 3
freqs = f_min + f_sp * mels
# And now the nonlinear scale
min_log_hz = 1000.0
min_log_mel = (min_log_hz - f_min) / f_sp
logstep = math.log(6.4) / 27.0
log_t = (mels >= min_log_mel)
freqs[log_t] = min_log_hz * torch.exp(logstep * (mels[log_t] - min_log_mel))
return freqs
def create_fb_matrix(
n_freqs: int,
f_min: float,
f_max: float,
n_mels: int,
sample_rate: int,
norm: Optional[str] = None,
mel_scale: str = "htk",
) -> Tensor:
r"""Create a frequency bin conversion matrix.
Args:
n_freqs (int): Number of frequencies to highlight/apply
f_min (float): Minimum frequency (Hz)
f_max (float): Maximum frequency (Hz)
n_mels (int): Number of mel filterbanks
sample_rate (int): Sample rate of the audio waveform
norm (Optional[str]): If 'slaney', divide the triangular mel weights by the width of the mel band
(area normalization). (Default: ``None``)
mel_scale (str, optional): Scale to use: ``htk`` or ``slaney``. (Default: ``htk``)
Returns:
Tensor: Triangular filter banks (fb matrix) of size (``n_freqs``, ``n_mels``)
meaning number of frequencies to highlight/apply to x the number of filterbanks.
Each column is a filterbank so that assuming there is a matrix A of
size (..., ``n_freqs``), the applied result would be
``A * create_fb_matrix(A.size(-1), ...)``.
"""
if norm is not None and norm != "slaney":
raise ValueError("norm must be one of None or 'slaney'")
# freq bins
# Equivalent filterbank construction by Librosa
all_freqs = torch.linspace(0, sample_rate // 2, n_freqs)
# calculate mel freq bins
m_min = _hz_to_mel(f_min, mel_scale=mel_scale)
m_max = _hz_to_mel(f_max, mel_scale=mel_scale)
m_pts = torch.linspace(m_min, m_max, n_mels + 2)
f_pts = _mel_to_hz(m_pts, mel_scale=mel_scale)
# calculate the difference between each mel point and each stft freq point in hertz
f_diff = f_pts[1:] - f_pts[:-1] # (n_mels + 1)
slopes = f_pts.unsqueeze(0) - all_freqs.unsqueeze(1) # (n_freqs, n_mels + 2)
# create overlapping triangles
zero = torch.zeros(1)
down_slopes = (-1.0 * slopes[:, :-2]) / f_diff[:-1] # (n_freqs, n_mels)
up_slopes = slopes[:, 2:] / f_diff[1:] # (n_freqs, n_mels)
fb = torch.max(zero, torch.min(down_slopes, up_slopes))
if norm is not None and norm == "slaney":
# Slaney-style mel is scaled to be approx constant energy per channel
enorm = 2.0 / (f_pts[2:n_mels + 2] - f_pts[:n_mels])
fb *= enorm.unsqueeze(0)
if (fb.max(dim=0).values == 0.).any():
warnings.warn(
"At least one mel filterbank has all zero values. "
f"The value for `n_mels` ({n_mels}) may be set too high. "
f"Or, the value for `n_freqs` ({n_freqs}) may be set too low."
)
return fb
def create_dct(
n_mfcc: int,
n_mels: int,
norm: Optional[str]
) -> Tensor:
r"""Create a DCT transformation matrix with shape (``n_mels``, ``n_mfcc``),
normalized depending on norm.
Args:
n_mfcc (int): Number of mfc coefficients to retain
n_mels (int): Number of mel filterbanks
norm (str or None): Norm to use (either 'ortho' or None)
Returns:
Tensor: The transformation matrix, to be right-multiplied to
row-wise data of size (``n_mels``, ``n_mfcc``).
"""
# http://en.wikipedia.org/wiki/Discrete_cosine_transform#DCT-II
n = torch.arange(float(n_mels))
k = torch.arange(float(n_mfcc)).unsqueeze(1)
dct = torch.cos(math.pi / float(n_mels) * (n + 0.5) * k) # size (n_mfcc, n_mels)
if norm is None:
dct *= 2.0
else:
assert norm == "ortho"
dct[0] *= 1.0 / math.sqrt(2.0)
dct *= math.sqrt(2.0 / float(n_mels))
return dct.t()
def mu_law_encoding(
x: Tensor,
quantization_channels: int
) -> Tensor:
r"""Encode signal based on mu-law companding. For more info see the
`Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_
This algorithm assumes the signal has been scaled to between -1 and 1 and
returns a signal encoded with values from 0 to quantization_channels - 1.
Args:
x (Tensor): Input tensor
quantization_channels (int): Number of channels
Returns:
Tensor: Input after mu-law encoding
"""
mu = quantization_channels - 1.0
if not x.is_floating_point():
x = x.to(torch.float)
mu = torch.tensor(mu, dtype=x.dtype)
x_mu = torch.sign(x) * torch.log1p(mu * torch.abs(x)) / torch.log1p(mu)
x_mu = ((x_mu + 1) / 2 * mu + 0.5).to(torch.int64)
return x_mu
def mu_law_decoding(
x_mu: Tensor,
quantization_channels: int
) -> Tensor:
r"""Decode mu-law encoded signal. For more info see the
`Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_
This expects an input with values between 0 and quantization_channels - 1
and returns a signal scaled between -1 and 1.
Args:
x_mu (Tensor): Input tensor
quantization_channels (int): Number of channels
Returns:
Tensor: Input after mu-law decoding
"""
mu = quantization_channels - 1.0
if not x_mu.is_floating_point():
x_mu = x_mu.to(torch.float)
mu = torch.tensor(mu, dtype=x_mu.dtype)
x = ((x_mu) / mu) * 2 - 1.0
x = torch.sign(x) * (torch.exp(torch.abs(x) * torch.log1p(mu)) - 1.0) / mu
return x
def complex_norm(
complex_tensor: Tensor,
power: float = 1.0
) -> Tensor:
r"""Compute the norm of complex tensor input.
Args:
complex_tensor (Tensor): Tensor shape of `(..., complex=2)`
power (float): Power of the norm. (Default: `1.0`).
Returns:
Tensor: Power of the normed input tensor. Shape of `(..., )`
"""
# Replace by torch.norm once issue is fixed
# https://github.com/pytorch/pytorch/issues/34279
return complex_tensor.pow(2.).sum(-1).pow(0.5 * power)
def angle(
complex_tensor: Tensor
) -> Tensor:
r"""Compute the angle of complex tensor input.
Args:
complex_tensor (Tensor): Tensor shape of `(..., complex=2)`
Return:
Tensor: Angle of a complex tensor. Shape of `(..., )`
"""
return torch.atan2(complex_tensor[..., 1], complex_tensor[..., 0])
def magphase(
complex_tensor: Tensor,
power: float = 1.0
) -> Tuple[Tensor, Tensor]:
r"""Separate a complex-valued spectrogram with shape `(..., 2)` into its magnitude and phase.
Args:
complex_tensor (Tensor): Tensor shape of `(..., complex=2)`
power (float): Power of the norm. (Default: `1.0`)
Returns:
(Tensor, Tensor): The magnitude and phase of the complex tensor
"""
mag = complex_norm(complex_tensor, power)
phase = angle(complex_tensor)
return mag, phase
def phase_vocoder(
complex_specgrams: Tensor,
rate: float,
phase_advance: Tensor
) -> Tensor:
r"""Given a STFT tensor, speed up in time without modifying pitch by a
factor of ``rate``.
Args:
complex_specgrams (Tensor): Dimension of `(..., freq, time, complex=2)`
rate (float): Speed-up factor
phase_advance (Tensor): Expected phase advance in each bin. Dimension of (freq, 1)
Returns:
Tensor: Complex Specgrams Stretch with dimension of `(..., freq, ceil(time/rate), complex=2)`
Example
>>> freq, hop_length = 1025, 512
>>> # (channel, freq, time, complex=2)
>>> complex_specgrams = torch.randn(2, freq, 300, 2)
>>> rate = 1.3 # Speed up by 30%
>>> phase_advance = torch.linspace(
>>> 0, math.pi * hop_length, freq)[..., None]
>>> x = phase_vocoder(complex_specgrams, rate, phase_advance)
>>> x.shape # with 231 == ceil(300 / 1.3)
torch.Size([2, 1025, 231, 2])
"""
# pack batch
shape = complex_specgrams.size()
complex_specgrams = complex_specgrams.reshape([-1] + list(shape[-3:]))
time_steps = torch.arange(0,
complex_specgrams.size(-2),
rate,
device=complex_specgrams.device,
dtype=complex_specgrams.dtype)
alphas = time_steps % 1.0
phase_0 = angle(complex_specgrams[..., :1, :])
# Time Padding
complex_specgrams = torch.nn.functional.pad(complex_specgrams, [0, 0, 0, 2])
# (new_bins, freq, 2)
complex_specgrams_0 = complex_specgrams.index_select(-2, time_steps.long())
complex_specgrams_1 = complex_specgrams.index_select(-2, (time_steps + 1).long())
angle_0 = angle(complex_specgrams_0)
angle_1 = angle(complex_specgrams_1)
norm_0 = torch.norm(complex_specgrams_0, p=2, dim=-1)
norm_1 = torch.norm(complex_specgrams_1, p=2, dim=-1)
phase = angle_1 - angle_0 - phase_advance
phase = phase - 2 * math.pi * torch.round(phase / (2 * math.pi))
# Compute Phase Accum
phase = phase + phase_advance
phase = torch.cat([phase_0, phase[..., :-1]], dim=-1)
phase_acc = torch.cumsum(phase, -1)
mag = alphas * norm_1 + (1 - alphas) * norm_0
real_stretch = mag * torch.cos(phase_acc)
imag_stretch = mag * torch.sin(phase_acc)
complex_specgrams_stretch = torch.stack([real_stretch, imag_stretch], dim=-1)
# unpack batch
complex_specgrams_stretch = complex_specgrams_stretch.reshape(shape[:-3] + complex_specgrams_stretch.shape[1:])
return complex_specgrams_stretch
def mask_along_axis_iid(
specgrams: Tensor,
mask_param: int,
mask_value: float,
axis: int
) -> Tensor:
r"""
Apply a mask along ``axis``. Mask will be applied from indices ``[v_0, v_0 + v)``, where
``v`` is sampled from ``uniform(0, mask_param)``, and ``v_0`` from ``uniform(0, max_v - v)``.
Args:
specgrams (Tensor): Real spectrograms (batch, channel, freq, time)
mask_param (int): Number of columns to be masked will be uniformly sampled from [0, mask_param]
mask_value (float): Value to assign to the masked columns
axis (int): Axis to apply masking on (2 -> frequency, 3 -> time)
Returns:
Tensor: Masked spectrograms of dimensions (batch, channel, freq, time)
"""
if axis != 2 and axis != 3:
raise ValueError('Only Frequency and Time masking are supported')
device = specgrams.device
dtype = specgrams.dtype
value = torch.rand(specgrams.shape[:2], device=device, dtype=dtype) * mask_param
min_value = torch.rand(specgrams.shape[:2], device=device, dtype=dtype) * (specgrams.size(axis) - value)
# Create broadcastable mask
mask_start = min_value[..., None, None]
mask_end = (min_value + value)[..., None, None]
mask = torch.arange(0, specgrams.size(axis), device=device, dtype=dtype)
# Per batch example masking
specgrams = specgrams.transpose(axis, -1)
specgrams.masked_fill_((mask >= mask_start) & (mask < mask_end), mask_value)
specgrams = specgrams.transpose(axis, -1)
return specgrams
def mask_along_axis(
specgram: Tensor,
mask_param: int,
mask_value: float,
axis: int
) -> Tensor:
r"""
Apply a mask along ``axis``. Mask will be applied from indices ``[v_0, v_0 + v)``, where
``v`` is sampled from ``uniform(0, mask_param)``, and ``v_0`` from ``uniform(0, max_v - v)``.
All examples will have the same mask interval.
Args:
specgram (Tensor): Real spectrogram (channel, freq, time)
mask_param (int): Number of columns to be masked will be uniformly sampled from [0, mask_param]
mask_value (float): Value to assign to the masked columns
axis (int): Axis to apply masking on (1 -> frequency, 2 -> time)
Returns:
Tensor: Masked spectrogram of dimensions (channel, freq, time)
"""
# pack batch
shape = specgram.size()
specgram = specgram.reshape([-1] + list(shape[-2:]))
value = torch.rand(1) * mask_param
min_value = torch.rand(1) * (specgram.size(axis) - value)
mask_start = (min_value.long()).squeeze()
mask_end = (min_value.long() + value.long()).squeeze()
assert mask_end - mask_start < mask_param
if axis == 1:
specgram[:, mask_start:mask_end] = mask_value
elif axis == 2:
specgram[:, :, mask_start:mask_end] = mask_value
else:
raise ValueError('Only Frequency and Time masking are supported')
# unpack batch
specgram = specgram.reshape(shape[:-2] + specgram.shape[-2:])
return specgram
def compute_deltas(
specgram: Tensor,
win_length: int = 5,
mode: str = "replicate"
) -> Tensor:
r"""Compute delta coefficients of a tensor, usually a spectrogram:
.. math::
d_t = \frac{\sum_{n=1}^{\text{N}} n (c_{t+n} - c_{t-n})}{2 \sum_{n=1}^{\text{N}} n^2}
where :math:`d_t` is the deltas at time :math:`t`,
:math:`c_t` is the spectrogram coeffcients at time :math:`t`,
:math:`N` is ``(win_length-1)//2``.
Args:
specgram (Tensor): Tensor of audio of dimension (..., freq, time)
win_length (int, optional): The window length used for computing delta (Default: ``5``)
mode (str, optional): Mode parameter passed to padding (Default: ``"replicate"``)
Returns:
Tensor: Tensor of deltas of dimension (..., freq, time)
Example
>>> specgram = torch.randn(1, 40, 1000)
>>> delta = compute_deltas(specgram)
>>> delta2 = compute_deltas(delta)
"""
device = specgram.device
dtype = specgram.dtype
# pack batch
shape = specgram.size()
specgram = specgram.reshape(1, -1, shape[-1])
assert win_length >= 3
n = (win_length - 1) // 2
# twice sum of integer squared
denom = n * (n + 1) * (2 * n + 1) / 3
specgram = torch.nn.functional.pad(specgram, (n, n), mode=mode)
kernel = torch.arange(-n, n + 1, 1, device=device, dtype=dtype).repeat(specgram.shape[1], 1, 1)
output = torch.nn.functional.conv1d(specgram, kernel, groups=specgram.shape[1]) / denom
# unpack batch
output = output.reshape(shape)
return output
def _compute_nccf(
waveform: Tensor,
sample_rate: int,
frame_time: float,
freq_low: int
) -> Tensor:
r"""
Compute Normalized Cross-Correlation Function (NCCF).
.. math::
\phi_i(m) = \frac{\sum_{n=b_i}^{b_i + N-1} w(n) w(m+n)}{\sqrt{E(b_i) E(m+b_i)}},
where
:math:`\phi_i(m)` is the NCCF at frame :math:`i` with lag :math:`m`,
:math:`w` is the waveform,
:math:`N` is the length of a frame,
:math:`b_i` is the beginning of frame :math:`i`,
:math:`E(j)` is the energy :math:`\sum_{n=j}^{j+N-1} w^2(n)`.
"""
EPSILON = 10 ** (-9)
# Number of lags to check
lags = int(math.ceil(sample_rate / freq_low))
frame_size = int(math.ceil(sample_rate * frame_time))
waveform_length = waveform.size()[-1]
num_of_frames = int(math.ceil(waveform_length / frame_size))
p = lags + num_of_frames * frame_size - waveform_length
waveform = torch.nn.functional.pad(waveform, (0, p))
# Compute lags
output_lag = []
for lag in range(1, lags + 1):
s1 = waveform[..., :-lag].unfold(-1, frame_size, frame_size)[..., :num_of_frames, :]
s2 = waveform[..., lag:].unfold(-1, frame_size, frame_size)[..., :num_of_frames, :]
output_frames = (
(s1 * s2).sum(-1)
/ (EPSILON + torch.norm(s1, p=2, dim=-1)).pow(2)
/ (EPSILON + torch.norm(s2, p=2, dim=-1)).pow(2)
)
output_lag.append(output_frames.unsqueeze(-1))
nccf = torch.cat(output_lag, -1)
return nccf
def _combine_max(
a: Tuple[Tensor, Tensor],
b: Tuple[Tensor, Tensor],
thresh: float = 0.99
) -> Tuple[Tensor, Tensor]:
"""
Take value from first if bigger than a multiplicative factor of the second, elementwise.
"""
mask = (a[0] > thresh * b[0])
values = mask * a[0] + ~mask * b[0]
indices = mask * a[1] + ~mask * b[1]
return values, indices
def _find_max_per_frame(
nccf: Tensor,
sample_rate: int,
freq_high: int
) -> Tensor:
r"""
For each frame, take the highest value of NCCF,
apply centered median smoothing, and convert to frequency.
Note: If the max among all the lags is very close
to the first half of lags, then the latter is taken.
"""
lag_min = int(math.ceil(sample_rate / freq_high))
# Find near enough max that is smallest
best = torch.max(nccf[..., lag_min:], -1)
half_size = nccf.shape[-1] // 2
half = torch.max(nccf[..., lag_min:half_size], -1)
best = _combine_max(half, best)
indices = best[1]
# Add back minimal lag
indices += lag_min
# Add 1 empirical calibration offset
indices += 1
return indices
def _median_smoothing(
indices: Tensor,
win_length: int
) -> Tensor:
r"""
Apply median smoothing to the 1D tensor over the given window.
"""
# Centered windowed
pad_length = (win_length - 1) // 2
# "replicate" padding in any dimension
indices = torch.nn.functional.pad(
indices, (pad_length, 0), mode="constant", value=0.
)
indices[..., :pad_length] = torch.cat(pad_length * [indices[..., pad_length].unsqueeze(-1)], dim=-1)
roll = indices.unfold(-1, win_length, 1)
values, _ = torch.median(roll, -1)
return values
def detect_pitch_frequency(
waveform: Tensor,
sample_rate: int,
frame_time: float = 10 ** (-2),
win_length: int = 30,
freq_low: int = 85,
freq_high: int = 3400,
) -> Tensor:
r"""Detect pitch frequency.
It is implemented using normalized cross-correlation function and median smoothing.
Args:
waveform (Tensor): Tensor of audio of dimension (..., freq, time)
sample_rate (int): The sample rate of the waveform (Hz)
frame_time (float, optional): Duration of a frame (Default: ``10 ** (-2)``).
win_length (int, optional): The window length for median smoothing (in number of frames) (Default: ``30``).
freq_low (int, optional): Lowest frequency that can be detected (Hz) (Default: ``85``).
freq_high (int, optional): Highest frequency that can be detected (Hz) (Default: ``3400``).
Returns:
Tensor: Tensor of freq of dimension (..., frame)
"""
# pack batch
shape = list(waveform.size())
waveform = waveform.reshape([-1] + shape[-1:])
nccf = _compute_nccf(waveform, sample_rate, frame_time, freq_low)
indices = _find_max_per_frame(nccf, sample_rate, freq_high)
indices = _median_smoothing(indices, win_length)
# Convert indices to frequency
EPSILON = 10 ** (-9)
freq = sample_rate / (EPSILON + indices.to(torch.float))
# unpack batch
freq = freq.reshape(shape[:-1] + list(freq.shape[-1:]))
return freq
def sliding_window_cmn(
waveform: Tensor,
cmn_window: int = 600,
min_cmn_window: int = 100,
center: bool = False,
norm_vars: bool = False,
) -> Tensor:
r"""
Apply sliding-window cepstral mean (and optionally variance) normalization per utterance.
Args:
waveform (Tensor): Tensor of audio of dimension (..., freq, time)
cmn_window (int, optional): Window in frames for running average CMN computation (int, default = 600)
min_cmn_window (int, optional): Minimum CMN window used at start of decoding (adds latency only at start).
Only applicable if center == false, ignored if center==true (int, default = 100)
center (bool, optional): If true, use a window centered on the current frame
(to the extent possible, modulo end effects). If false, window is to the left. (bool, default = false)
norm_vars (bool, optional): If true, normalize variance to one. (bool, default = false)
Returns:
Tensor: Tensor of freq of dimension (..., frame)
"""
input_shape = waveform.shape
num_frames, num_feats = input_shape[-2:]
waveform = waveform.view(-1, num_frames, num_feats)
num_channels = waveform.shape[0]
dtype = waveform.dtype
device = waveform.device
last_window_start = last_window_end = -1
cur_sum = torch.zeros(num_channels, num_feats, dtype=dtype, device=device)
cur_sumsq = torch.zeros(num_channels, num_feats, dtype=dtype, device=device)
cmn_waveform = torch.zeros(
num_channels, num_frames, num_feats, dtype=dtype, device=device)
for t in range(num_frames):
window_start = 0
window_end = 0
if center:
window_start = t - cmn_window // 2
window_end = window_start + cmn_window
else:
window_start = t - cmn_window
window_end = t + 1
if window_start < 0:
window_end -= window_start
window_start = 0
if not center:
if window_end > t:
window_end = max(t + 1, min_cmn_window)
if window_end > num_frames:
window_start -= (window_end - num_frames)
window_end = num_frames
if window_start < 0:
window_start = 0
if last_window_start == -1:
input_part = waveform[:, window_start: window_end - window_start, :]
cur_sum += torch.sum(input_part, 1)
if norm_vars:
cur_sumsq += torch.cumsum(input_part ** 2, 1)[:, -1, :]
else:
if window_start > last_window_start:
frame_to_remove = waveform[:, last_window_start, :]
cur_sum -= frame_to_remove
if norm_vars:
cur_sumsq -= (frame_to_remove ** 2)
if window_end > last_window_end:
frame_to_add = waveform[:, last_window_end, :]
cur_sum += frame_to_add
if norm_vars:
cur_sumsq += (frame_to_add ** 2)
window_frames = window_end - window_start
last_window_start = window_start
last_window_end = window_end
cmn_waveform[:, t, :] = waveform[:, t, :] - cur_sum / window_frames
if norm_vars:
if window_frames == 1:
cmn_waveform[:, t, :] = torch.zeros(
num_channels, num_feats, dtype=dtype, device=device)
else:
variance = cur_sumsq
variance = variance / window_frames
variance -= ((cur_sum ** 2) / (window_frames ** 2))
variance = torch.pow(variance, -0.5)
cmn_waveform[:, t, :] *= variance
cmn_waveform = cmn_waveform.view(input_shape[:-2] + (num_frames, num_feats))
if len(input_shape) == 2:
cmn_waveform = cmn_waveform.squeeze(0)
return cmn_waveform
def spectral_centroid(
waveform: Tensor,
sample_rate: int,
pad: int,
window: Tensor,
n_fft: int,
hop_length: int,
win_length: int,
) -> Tensor:
r"""
Compute the spectral centroid for each channel along the time axis.
The spectral centroid is defined as the weighted average of the
frequency values, weighted by their magnitude.
Args:
waveform (Tensor): Tensor of audio of dimension (..., time)
sample_rate (int): Sample rate of the audio waveform
pad (int): Two sided padding of signal
window (Tensor): Window tensor that is applied/multiplied to each frame/window
n_fft (int): Size of FFT
hop_length (int): Length of hop between STFT windows
win_length (int): Window size
Returns:
Tensor: Dimension (..., time)
"""
specgram = spectrogram(waveform, pad=pad, window=window, n_fft=n_fft, hop_length=hop_length,
win_length=win_length, power=1., normalized=False)
freqs = torch.linspace(0, sample_rate // 2, steps=1 + n_fft // 2,
device=specgram.device).reshape((-1, 1))
freq_dim = -2
return (freqs * specgram).sum(dim=freq_dim) / specgram.sum(dim=freq_dim)
| 35.39918 | 116 | 0.611666 |
7cabc4e8d6c4275c91322768679e9a68335e86e0 | 12,964 | py | Python | src/status_node.py | Faust-Wang/vswarm | d18ce643218c18ef1e762f40562104b2a0926ad7 | [
"MIT"
] | 21 | 2021-03-03T10:51:46.000Z | 2022-03-28T11:00:35.000Z | src/status_node.py | Faust-Wang/vswarm | d18ce643218c18ef1e762f40562104b2a0926ad7 | [
"MIT"
] | 2 | 2021-07-21T07:57:16.000Z | 2022-03-17T12:41:51.000Z | src/status_node.py | hvourtsis/vswarm | d18ce643218c18ef1e762f40562104b2a0926ad7 | [
"MIT"
] | 8 | 2021-02-27T14:29:55.000Z | 2022-01-05T19:40:38.000Z | #!/usr/bin/env python3
from __future__ import absolute_import, division, print_function
import curses
import sys
from collections import deque
from datetime import datetime
import numpy as np
import rospy
from diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus
from geometry_msgs.msg import PoseStamped
from mavros_msgs.msg import ExtendedState, PositionTarget, State # StatusText
from scipy.spatial.transform import Rotation as R
from sensor_msgs.msg import BatteryState, Image, NavSatFix
GPS_FIX_DICT = {
0: ('No GPS', curses.COLOR_RED),
1: ('No fix', curses.COLOR_RED),
2: ('2D lock', curses.COLOR_BLUE),
3: ('3D lock', curses.COLOR_BLUE),
4: ('DGPS', curses.COLOR_MAGENTA),
5: ('RTK float', curses.COLOR_YELLOW),
6: ('RTK fix', curses.COLOR_GREEN)
}
if __name__ == '__main__':
main()
| 35.132791 | 100 | 0.57166 |
7cabe7391066e68e59a6eee1bcca21b689be0897 | 5,010 | py | Python | bin/boxplot_param.py | mo-schmid/MIALab | 8a7e183df7007993e8a28513a73dca20bfd60737 | [
"Apache-2.0"
] | null | null | null | bin/boxplot_param.py | mo-schmid/MIALab | 8a7e183df7007993e8a28513a73dca20bfd60737 | [
"Apache-2.0"
] | null | null | null | bin/boxplot_param.py | mo-schmid/MIALab | 8a7e183df7007993e8a28513a73dca20bfd60737 | [
"Apache-2.0"
] | null | null | null | import argparse
import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pandas as pd
from pathlib import Path
def main(results: [ResultParam], plot_dir: Path):
"""generates box plots comparing two or more result sets for all labels
Args:
results ([ResultParam]): a list of result parameters (Path and description)
plot_dir: ath to the desired result folder to store the qq-plots
"""
metrics = ('DICE', 'HDRFDST') # the metrics we want to plot the results for
metrics_yaxis_limits = ((0.0, 1.0), (0.0, 18)) # tuples of y-axis limits (min, max) for each metric. Use None if unknown
labels = ('WhiteMatter','GreyMatter', 'Hippocampus','Amygdala','Thalamus') # the brain structures/tissues you are interested in
# load the CSVs. We usually want to compare different methods (e.g. a set of different features), therefore,
# we load two CSV (for simplicity, it is the same here)
# todo: adapt to your needs to compare different methods (e.g. load different CSVs)
dfs = []
methods = []
for res in results:
dfs.append(pd.read_csv(res.path, sep=';'))
methods.append(res.param_str)
# todo: read parameter values from text file, use them to plot the information about the paramter
# some parameters to improve the plot's readability
title = '{}'
for label in labels:
for metric, (min_, max_) in zip(metrics, metrics_yaxis_limits):
boxplot(os.path.join(plot_dir, '{}_{}.png'.format(label, metric)),
[format_data(df, label, metric) for df in dfs],
title.format(label),
'Method', metric_to_readable_text(metric),
methods,
min_, max_
)
if __name__ == '__main__':
results = []
results.append(ResultParam(Path(Path.cwd() / "mia-result\gridsearch_PKF/2020-12-11-09-51-54/no_PP/results.csv"),
"no pp"))
results.append(ResultParam(Path(Path.cwd() /"mia-result/gridsearch_PKF/2020-12-11-09-51-54/with_PP/PP-V-20_0-BG-True/results.csv"),
"with pp"))
main(results, Path(Path.cwd() / 'mia-result/plot_results'))
| 35.531915 | 135 | 0.641916 |
7cae45b970c1385083dad6bbec98b3cd495bf626 | 3,948 | py | Python | EMeRGE/dssmetrics/constants.py | NREL/EMeRGE | 573e86ca8e62080c664998e8cc79e9231e7ad502 | [
"BSD-3-Clause"
] | 6 | 2020-04-11T18:09:00.000Z | 2022-01-23T20:38:38.000Z | EMeRGE/dssmetrics/constants.py | NREL/EMeRGE | 573e86ca8e62080c664998e8cc79e9231e7ad502 | [
"BSD-3-Clause"
] | null | null | null | EMeRGE/dssmetrics/constants.py | NREL/EMeRGE | 573e86ca8e62080c664998e8cc79e9231e7ad502 | [
"BSD-3-Clause"
] | 3 | 2020-06-11T02:48:49.000Z | 2021-08-10T07:13:57.000Z |
""" Default values : DO NOT CHANGE !!!"""
LOG_FORMAT = "%(asctime)s: %(levelname)s: %(message)s"
DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
MAXITERATIONS = 100
LIFE_PARAMETERS = {"theta_i":30,"theta_fl":36,"theta_gfl":28.6,
"R":4.87,"n":1,"tau":3.5,"m":1,"A":-13.391,
"B":6972.15,"num_of_iteration":4,}
DEFAULT_TEMP = 25
MAX_TRANS_LOADING = 1.5
DEFAULT_CONFIGURATION = {
"dss_filepath": "",
"dss_filename":"",
"extra_data_path": ".",
"export_folder":"",
"start_time":"2018-1-1 0:0:0",
"end_time":"2018-2-1 0:0:0",
"simulation_time_step (minute)": 15,
"frequency": 50,
"upper_voltage": 1.1,
"lower_voltage":0.9,
"record_every": 96,
"export_voltages": False,
"export_lineloadings": False,
"export_transloadings":False,
"export_start_date": "",
"export_end_date": "",
"volt_var": {
"enabled": False,
"yarray": [0.44,0.44,0,0,-0.44,-0.44],
"xarray": [0.7,0.90,0.95,1.05,1.10,1.3]
},
"log_settings": {
"save_in_file": False,
"log_folder": ".",
"log_filename":"logs.log",
"clear_old_log_file": True
}
}
DEFAULT_ADVANCED_CONFIGURATION = {
"project_path": "C:\\Users\\KDUWADI\\Desktop\\NREL_Projects\\CIFF-TANGEDCO\\TANGEDCO\\EMERGE\\Projects",
"active_project":"GR_PALAYAM",
"active_scenario": "FullYear",
"dss_filename":"gr_palayam.dss",
"start_time":"2018-1-1 0:0:0",
"end_time":"2018-1-2 0:0:0",
"simulation_time_step (minute)": 60,
"frequency": 50,
"upper_voltage": 1.1,
"lower_voltage":0.9,
"record_every": 4,
"parallel_simulation":True,
"parallel_process": 1,
"export_voltages": False,
"export_lineloadings": False,
"export_transloadings":False,
"export_start_date": "",
"export_end_date": "",
"volt_var": {
"enabled": True,
"yarray": [0.44,0.44,0,0,-0.44,-0.44],
"xarray": [0.7,0.90,0.95,1.05,1.10,1.3]
},
"log_settings": {
"save_in_file": False,
"log_filename":"",
"clear_old_log_file": True
}
}
VALID_SETTINGS = {
"project_path":{'type':str},
"active_project":{'type':str},
"active_scenario":{'type':str},
"dss_filepath": {'type': str},
"dss_filename":{'type':str},
"export_folder":{'type':str},
"start_time":{'type':str},
"end_time":{'type':str},
"simulation_time_step (minute)":{'type':int},
"frequency": {'type':int,'options':[50,60]},
"upper_voltage": {'type':float,'range':[1,1.5]},
"lower_voltage":{'type':float,'range':[0.8,1]},
"record_every": {'type':int},
"extra_data_path":{'type':str},
"parallel_simulation":{'type':bool},
"parallel_process": {'type':int,'range':[1,4]},
"export_voltages": {'type':bool},
"export_lineloadings": {'type':bool},
"export_transloadings":{'type':bool},
"export_start_date": {'type':str},
"export_end_date": {'type':str},
"volt_var": {
"enabled": {'type':bool},
"yarray": {'type':list},
"xarray": {'type':list}
},
"log_settings": {
"save_in_file": {'type':bool},
"log_folder": {'type':str},
"log_filename":{'type':str},
"clear_old_log_file": {'type':bool}
}
}
| 35.567568 | 108 | 0.472898 |
7cae7acd2ab857e48bf48cfdcc2ed083e6292337 | 12,669 | py | Python | minesweeper/game.py | MathisFederico/Minesweeper | b66b41066e325813b24497d2caca0a11c048e18b | [
"MIT"
] | 1 | 2020-12-23T11:52:40.000Z | 2020-12-23T11:52:40.000Z | minesweeper/game.py | MathisFederico/Minesweeper | b66b41066e325813b24497d2caca0a11c048e18b | [
"MIT"
] | null | null | null | minesweeper/game.py | MathisFederico/Minesweeper | b66b41066e325813b24497d2caca0a11c048e18b | [
"MIT"
] | null | null | null | try:
import importlib.resources as pkg_resources
except ImportError:
# Try backported to PY<37 `importlib_resources`.
import importlib_resources as pkg_resources
from . import images
from gym import Env, spaces
from time import time
import numpy as np
from copy import copy
import colorsys
import pygame
from pygame.transform import scale
| 41.950331 | 136 | 0.582209 |
7caf56de8045038d74971a889dbed39c31d7bb50 | 1,306 | py | Python | tests/python/gaia-ui-tests/gaiatest/tests/functional/lockscreen/test_lockscreen_unlock_to_camera_with_passcode.py | BReduardokramer/gaia | c00302cdcd435ab193e8365917cfc6abac9e4f2e | [
"Apache-2.0"
] | 1 | 2021-11-09T00:27:34.000Z | 2021-11-09T00:27:34.000Z | tests/python/gaia-ui-tests/gaiatest/tests/functional/lockscreen/test_lockscreen_unlock_to_camera_with_passcode.py | AmyYLee/gaia | a5dbae8235163d7f985bdeb7d649268f02749a8b | [
"Apache-2.0"
] | null | null | null | tests/python/gaia-ui-tests/gaiatest/tests/functional/lockscreen/test_lockscreen_unlock_to_camera_with_passcode.py | AmyYLee/gaia | a5dbae8235163d7f985bdeb7d649268f02749a8b | [
"Apache-2.0"
] | null | null | null | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from gaiatest import GaiaTestCase
from gaiatest.apps.lockscreen.app import LockScreen
| 31.095238 | 90 | 0.717458 |
7caf8da8a5f682874ecef410bafcd6662e5de11b | 3,440 | py | Python | models/layers/mesh_conv.py | CallumMcMahon/MeshCNN | 343950a8d69807ed4afa13f1843edb37c4cd042c | [
"MIT"
] | 2 | 2022-01-05T09:21:17.000Z | 2022-03-24T15:20:14.000Z | models/layers/mesh_conv.py | CallumMcMahon/MeshCNN | 343950a8d69807ed4afa13f1843edb37c4cd042c | [
"MIT"
] | null | null | null | models/layers/mesh_conv.py | CallumMcMahon/MeshCNN | 343950a8d69807ed4afa13f1843edb37c4cd042c | [
"MIT"
] | 1 | 2022-03-24T15:20:20.000Z | 2022-03-24T15:20:20.000Z | import torch
import torch.nn as nn
import torch.nn.functional as F | 41.445783 | 123 | 0.585756 |
7cafa8674f41ef3d6dc02a39822b470f9b548e16 | 334 | py | Python | code/0-input/create_hdf5/check_hdf5.py | AvinWangZH/3D-convolutional-speaker-recognition | 61969eb2dba6004bdecb4f7100047015ca665348 | [
"Apache-2.0"
] | 1 | 2021-04-04T17:08:05.000Z | 2021-04-04T17:08:05.000Z | code/0-input/create_hdf5/check_hdf5.py | subvin/3D-convolutional-speaker-recognition | 61969eb2dba6004bdecb4f7100047015ca665348 | [
"Apache-2.0"
] | null | null | null | code/0-input/create_hdf5/check_hdf5.py | subvin/3D-convolutional-speaker-recognition | 61969eb2dba6004bdecb4f7100047015ca665348 | [
"Apache-2.0"
] | null | null | null | import tables
import numpy as np
import matplotlib.pyplot as plt
# Reading the file.
fileh = tables.open_file('development.hdf5', mode='r')
# Dimentionality of the data structure.
print(fileh.root.utterance_test.shape)
print(fileh.root.utterance_train.shape)
print(fileh.root.label_train.shape)
print(fileh.root.label_test.shape)
| 22.266667 | 54 | 0.796407 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.