blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f571d7fd74861a01016aa8d5246617c43b02e16f | 311f21fb14439cc6d09b8dcc1ade0070303092a1 | /slt_Code/change_detection_new/getExcelData.py | 8feacc66bca9b23409259a64f4fc520b57613455 | [] | no_license | janardangit/web_service | a1aeded8694ae78494b4a69d91b8cc16f37aeef3 | 6ca09d8b7b2e746e3c184f32e6bbb1e3be36bb86 | refs/heads/master | 2016-08-06T08:49:26.008001 | 2015-09-01T07:38:26 | 2015-09-01T07:38:26 | 41,724,349 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,276 | py | #from openpyxl import Workbook, load_workbook
import xlrd
def __readExcelX(fname):
#wb2 = load_workbook(fname)
#sheets = wb2.get_sheet_names()
#print sheets
return
def __readExcel(fname):
wb = xlrd.open_workbook(fname)
sheet = wb.sheet_by_index(0)
sheets = wb.sheets()
bookDict = {}
for i, each_sheet in enumerate(sheets):
#print 'sheet Name : ', each_sheet.name
#print 'no of cols : ', each_sheet.ncols
#print 'no of rows : ', each_sheet.nrows
bookDict[i] = {}
bookDict[i]['sheet_name'] = each_sheet.name
bookDict[i]['ncols'] = each_sheet.ncols
bookDict[i]['nrows'] = each_sheet.nrows
cellDict = {}
for row in range(each_sheet.nrows):
for col in range(each_sheet.ncols):
#print each_sheet.cell_value(row, col).strip(), '\t',
cellDict[(row, col)] = {}
cellDict[(row, col)]['data'] = each_sheet.cell_value(row, col)
#print
bookDict[i]['cell_dict'] = cellDict
#data = [sheet.cell_value(0, col) for col in range(sheet.ncols)]
#print data
return bookDict
def getFileData(fname):
return __readExcel(fname)
fname = '../M_A_URL_2015_05_06.xlsx'
print getFileData(fname)
| [
"[email protected]"
] | |
aafe85502a3639873aba5094ad36db7af8bc0de7 | 68a4c0522b3cec8458ec4d8fba032cb0e9cfcbbb | /data.py | a864b29370063d464c9d22194a97786fdd2aac17 | [] | no_license | satorres1/Tarea4TI | 80a7af4737cc8aa32ada31e206b29e5dc5f2b2bb | ade407c065bc33158371ff00f08b946a0766a3a6 | refs/heads/main | 2023-05-30T14:45:22.240451 | 2021-06-14T08:43:17 | 2021-06-14T08:43:17 | 376,757,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,514 | py | from gspread.models import Worksheet
import requests
import xml.etree.ElementTree as ET
import pandas as pd
import gspread
import df2gspread as d2g
from gspread_dataframe import get_as_dataframe, set_with_dataframe
albania='http://tarea-4.2021-1.tallerdeintegracion.cl/gho_ALB.xml'
nueva_zelanda='http://tarea-4.2021-1.tallerdeintegracion.cl/gho_NZL.xml'
canada='http://tarea-4.2021-1.tallerdeintegracion.cl/gho_CAN.xml'
australia='http://tarea-4.2021-1.tallerdeintegracion.cl/gho_AUS.xml'
japon='http://tarea-4.2021-1.tallerdeintegracion.cl/gho_JPN.xml'
españa='http://tarea-4.2021-1.tallerdeintegracion.cl/gho_ESP.xml'
r_albania = requests.get(albania)
r_nueva_zelanda = requests.get(nueva_zelanda)
r_canada = requests.get(canada)
r_australia = requests.get(australia)
r_japon = requests.get(japon)
r_españa = requests.get(españa)
albania_tree = ET.fromstring(r_albania.content)
nueva_zelanda_tree = ET.fromstring(r_nueva_zelanda.content)
canada_tree = ET.fromstring(r_canada.content)
australia_tree = ET.fromstring(r_australia.content)
japon_tree = ET.fromstring(r_japon.content)
españa_tree = ET.fromstring(r_españa.content)
all_trees = [albania_tree, nueva_zelanda_tree, canada_tree,
australia_tree, japon_tree, españa_tree]
all_data = []
gho_index = ["Number of deaths", "Number of infant deaths",
"Number of under-five deaths", "Mortality rate for 5-14 year-olds (probability of dying per 1000 children aged 5-14 years)",
"Adult mortality rate (probability of dying between 15 and 60 years per 1000 population)",
"Estimates of number of homicides", "Crude suicide rates (per 100 000 population)",
"Mortality rate attributed to unintentional poisoning (per 100 000 population)",
"Number of deaths attributed to non-communicable diseases, by type of disease and sex",
"Estimated road traffic death rate (per 100 000 population)", "Estimated number of road traffic deaths",
"Mean BMI (kg/m²) (crude estimate)", "Mean BMI (kg/m²) (age-standardized estimate)",
"Prevalence of obesity among adults, BMI ≥ 30 (age-standardized estimate) (%)",
"Prevalence of obesity among children and adolescents, BMI > +2 standard deviations above the median (crude estimate) (%)",
"Prevalence of overweight among adults, BMI ≥ 25 (crude estimate) (%)",
"Prevalence of overweight among children and adolescents, BMI > +1 standard deviations above the median (crude estimate) (%)",
"Prevalence of underweight among adults, BMI < 18.5 (age-standardized estimate) (%)",
"Prevalence of thinness among children and adolescents, BMI < -2 standard deviations below the median (crude estimate) (%)",
"Alcohol, recorded per capita (15+) consumption (in litres of pure alcohol)",
"Estimate of daily cigarette smoking prevalence (%)", "Estimate of daily tobacco smoking prevalence (%)",
"Estimate of current cigarette smoking prevalence (%)", "Estimate of current tobacco smoking prevalence (%)",
"Mean systolic blood pressure (crude estimate)", "Mean fasting blood glucose (mmol/l) (crude estimate)",
"Mean Total Cholesterol (crude estimate)"]
for tree in all_trees:
for row in tree.findall('Fact'):
gho = row.find('GHO').text
if gho in gho_index:
try:
country = row.find('COUNTRY').text
sex = row.find('SEX').text
year = row.find('YEAR').text
ghecauses = row.find('GHECAUSES').text
agegroup = row.find('AGEGROUP').text
display = row.find('Display').text
numeric = row.find('Numeric').text
low = row.find('Low').text
high = row.find('High').text
result = {'GHO':gho, 'COUNTRY':country, 'SEX':sex, 'YEAR':year, 'GHECAUSES':ghecauses,
'AGEGROUP':agegroup, 'Display':display, 'Numeric':numeric, 'Low':low, 'High':high}
all_data.append(result)
except:
pass
df = pd.DataFrame(data=all_data)
print(df)
spreadsheet_key = '1Yt0xRNR94tJf0TFak2oq-5KxK9d3JK-UsTMEZB6xRY8'
gc=gspread.service_account(filename='credentials.json')
sh=gc.open_by_key('1Yt0xRNR94tJf0TFak2oq-5KxK9d3JK-UsTMEZB6xRY8')
worksheet = sh.get_worksheet(0)
worksheet.clear()
set_with_dataframe(worksheet, df)
| [
"[email protected]"
] | |
731934b122d5bcdc8e8a9343a05eec4b8e5f89b4 | 727f1bc2205c88577b419cf0036c029b8c6f7766 | /out-bin/py/google/fhir/models/model_test.runfiles/com_google_fhir/external/pypi__tensorflow_1_12_0/tensorflow-1.12.0.data/purelib/tensorflow/contrib/signal/python/ops/window_ops.py | 1ba1a5a2ec5ddd557b4b98870ad7721d7904c398 | [
"Apache-2.0"
] | permissive | rasalt/fhir | 55cf78feed3596a3101b86f9e9bbf6652c6ed4ad | d49883cc4d4986e11ca66058d5a327691e6e048a | refs/heads/master | 2020-04-13T00:16:54.050913 | 2019-01-15T14:22:15 | 2019-01-15T14:22:15 | 160,260,223 | 0 | 0 | Apache-2.0 | 2018-12-03T22:07:01 | 2018-12-03T22:07:01 | null | UTF-8 | Python | false | false | 191 | py | /home/rkharwar/.cache/bazel/_bazel_rkharwar/c4bcd65252c8f8250f091ba96375f9a5/external/pypi__tensorflow_1_12_0/tensorflow-1.12.0.data/purelib/tensorflow/contrib/signal/python/ops/window_ops.py | [
"[email protected]"
] | |
eff87214ef891dd2b1e40fed52039191e02f5b3d | c8e5d2aa94b21a2102213176ff7cbeeb93dca89b | /multilayer_p_project1/classifier.py | f04438fd5c2eb2e1bfc1b65c31e3639cc1d8c43a | [] | no_license | jurajmaslej/neuronky | c67348e0606bd6e7bac78b19940eadda8ab2d530 | 6bbb55cd8d913a772b5aa3d18b0b63355cd986c8 | refs/heads/master | 2020-03-19T00:40:37.131584 | 2018-07-02T20:54:45 | 2018-07-02T20:54:45 | 135,496,040 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,890 | py | import numpy as np
from mlp import *
from util import *
import data_handler
class MLPRegressor(MLP):
def __init__(self, dim_in, dim_hid, dim_out, validation_data, validation_label):
super().__init__(dim_in, dim_hid, dim_out, validation_data, validation_label)
## functions
def cost(self, targets, outputs): # new
return np.sum((targets - outputs)**2, axis=0)
def f_hid(self, x): # override
return(1/(1 + np.exp(-x))) # sigmoid
def df_hid(self, x): # override
return self.f_hid(x)*(1 - self.f_hid(x)) # derivation of sigmoid
def f_out(self, x): # override
#return(1/(1 + np.exp(-x))) # sigmoid
"""Compute the softmax of vector x in a numerically stable way."""
shiftx = x - np.max(x)
exps = np.exp(shiftx)
return exps / np.sum(exps) #softmax
def df_out(self, x): # override
#f_h = self.f_hid(x)*(1 - self.f_hid(x))
#print(" fh sh ", f_h.shape)
#return jac
return self.f_out(x)*(1 - self.f_out(x)) # derivation of sigmoid
def stablesoftmax(self, x):
"""Compute the softmax of vector x in a numerically stable way."""
shiftx = x - np.max(x)
exps = np.exp(shiftx)
return exps / np.sum(exps)
def der_softmax(self, x):
x = np.atleast_2d(x)
J = - x[..., None] * x[:, None, :] # off-diagonal Jacobian
iy, ix = np.diag_indices_from(J[0])
J[:, iy, ix] = x * (1. - x) # diagonal
return J.sum(axis=1) # sum across-rows for each sample
## prediction pass
def predict(self, inputs):
outputs, *_ = self.forward(inputs) # if self.forward() can take a whole batch
# outputs = np.stack([self.forward(x)[0] for x in inputs.T]) # otherwise
return outputs
def early_stopping(self, train_errors, val_errors):
quotient = 0
#print('val errors ', val_errors[-1], min(val_errors))
#print('pomer val errors ', (val_errors[-1] / min(val_errors)))
gener_loss = 100 * ((val_errors[-1] / min(val_errors)) - 1)
#print ('gener loss ', gener_loss)
#print('train train_progress ', sum(train_errors) / (len(train_errors) * (min(train_errors))))
#print('train errors ', train_errors)
train_progress = 1000 * (sum(train_errors) / (len(train_errors) * (min(train_errors))) - 1)
try:
quotient = gener_loss / train_progress
except:
#print('was 0 ', gener_loss, '#', train_progress)
quotient = 0
#print('train_progress ', train_progress)
#print('quotient ', quotient)
return quotient
## training
def train(self, inputs, targets, alpha=0.1, eps=100, early_stop_slice_len = 10, quotient_lvl = 1, early_stopping = True):
(_, count) = inputs.shape
errors = []
CEs = []
REs = []
valE = []
temp_REs = []
all_weights = []
#print('inputs sh', targets.shape)
for ep in range(eps):
print('Ep {:3d}/{}: '.format(ep+1, eps), end='')
E = 0
RE = 0
for i in np.random.permutation(count):
x = inputs[:, i] # FIXME
#print (x)
#print ("x sh ", x.shape)
d = targets[:, i] # FIXME
#print(d)
y, dW_hid, dW_mid, dW_out = self.backward(x, d)
#print('dw hid shape ', dW_hid.shape)
#print(d)
#print(self.decode_argmax(y))
#print(RE)
#print(self.cost(d,y))
#print('@@@@')
E += self.cost(d,y)
RE += self.decode_argmax(d) != self.decode_argmax(y)
#print (RE)
#print('####')
self.W_hid += alpha * dW_hid # FIXME
self.W_firstmid += alpha * dW_mid
self.W_out += alpha * dW_out # FIXME
all_weights.append((self.W_hid, self.W_firstmid, self.W_out))
E /= count #get percentage
RE /= count
temp_REs.append(RE) # list of errors for last 'early_stop_slice_len' epochs
if ep > 0 and (ep % early_stop_slice_len == 0) and early_stopping: # we have done 'early_stop_slice_len' epochs, now we want check if we are overfitting
#print('in early early_stopping')
outputs = self.predict(self.validation_data)
outputs = data_handler.Handler().decode_labels(outputs).T #3,1600
validation_error = self.evaluate_model(self.validation_label, outputs)
#print('vali error ', validation_error)
valE.append(validation_error)
quotient = self.early_stopping(temp_REs, valE)
if quotient > quotient_lvl:
print('OVERFITTING , pls stop trainin at epoch ', ep, ' with train error ', temp_REs)
#print('last validation err ', validation_error)
#self.W_hid -= alpha * dW_hid # FIXME
#self.W_out -= alpha * dW_out # FIXME
#print('go "slice" episodes back, ep to goto: ', ep - early_stop_slice_len)
#print('previous slice val err ', valE)
self.W_hid = all_weights[ep - early_stop_slice_len][0]
self.W_firstmid = all_weights[ep - early_stop_slice_len][1]
self.W_out = all_weights[ep - early_stop_slice_len][2]
return (errors, REs)
temp_REs = []
errors.append(E) #append only after we sure early stop did not interrupt
REs.append(RE)
#print('E = {:.3f}'.format(E))
#print(E)
#print(RE)
#print('RE = {:.5f}'.format(RE))
print('CE = {:6.2%}, RE = {:.5f}'.format(E, RE))
return (errors, REs)
def decode_argmax(self, to_decode):
max_index = to_decode.argmax(axis = 0)
if max_index == 0:
return 'A'
if max_index == 1:
return 'B'
if max_index == 2:
return 'C'
def evaluate_model(self, targets, outputs):
targets = targets.T
outputs = outputs.T
#print(targets.shape)
#print(outputs.shape)
data_count,_ = outputs.shape
count_errors = 0
for i in range(0, data_count):
if (outputs[i] == targets[i]).all() == False:
count_errors += 1
#print(self.validation_label.T[i])
#print('#')
#print(outputs.T[i])
#print('@@@@')
#print('num of count_errors in prediction ', count_errors)
#print('count_errors / num of samples ', count_errors/data_count)
return (count_errors / data_count)
| [
"[email protected]"
] | |
0f78526bcafec34871cd9dace8e966cf5eb716ac | cf5077d06c5145d93b44c0c00bb93f93fbf4d59d | /static_page/middleware.py | b9781cdb1dd5bc55c28f71d93a202db0d403174c | [] | no_license | su-danny/famdates | 16a9ee01d259c9978278415943d918fd47bdfc9e | 301cf997985172c146d917c832390e0db57c03c5 | refs/heads/master | 2016-08-06T18:17:30.345319 | 2014-03-11T10:34:31 | 2014-03-11T10:34:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 706 | py | from static_page.views import static_page
from django.http import Http404
from django.conf import settings
class StaticpageFallbackMiddleware(object):
def process_response(self, request, response):
if response.status_code != 404:
return response # No need to check for a flatpage for non-404 responses.
try:
return static_page(request, request.path_info)
# Return the original response if any errors happened. Because this
# is a middleware, we can't assume the errors will be caught elsewhere.
except Http404:
return response
except:
if settings.DEBUG:
raise
return response
| [
"[email protected]"
] | |
9e022ee4eb24c50cf3826993896166dd171db0fa | aba0055290156515e6befc47d06183dee2663aec | /tensorflow2/tf2cv/models/pnasnet.py | 42c4d3657a4aec53bc30e3f77e60430c214ba3da | [
"MIT"
] | permissive | piyop/imgclsmob | f320aeb4675be57042cf661190e19d9299d46731 | 780dc56eddc95ce58c34eb6f6e48d2fb4c566571 | refs/heads/master | 2022-11-05T17:34:34.076150 | 2020-06-26T12:06:16 | 2020-06-26T12:06:16 | 275,135,197 | 1 | 0 | MIT | 2020-06-26T10:59:46 | 2020-06-26T10:59:45 | null | UTF-8 | Python | false | false | 23,517 | py | """
PNASNet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Progressive Neural Architecture Search,' https://arxiv.org/abs/1712.00559.
"""
__all__ = ['PNASNet', 'pnasnet5large']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import MaxPool2d, conv1x1, flatten, is_channels_first, get_channel_axis
from .nasnet import nasnet_dual_path_sequential, nasnet_batch_norm, NasConv, NasDwsConv, NasPathBlock, NASNetInitBlock
class PnasMaxPoolBlock(nn.Layer):
"""
PNASNet specific Max pooling layer with extra padding.
Parameters:
----------
strides : int or tuple/list of 2 int, default 2
Strides of the convolution.
extra_padding : bool, default False
Whether to use extra padding.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
strides=2,
extra_padding=False,
data_format="channels_last",
**kwargs):
super(PnasMaxPoolBlock, self).__init__(**kwargs)
self.extra_padding = extra_padding
self.data_format = data_format
self.pool = MaxPool2d(
pool_size=3,
strides=strides,
padding=1,
data_format=data_format,
name="pool")
if self.extra_padding:
self.pad = nn.ZeroPadding2D(
padding=((1, 0), (1, 0)),
data_format=data_format)
def call(self, x, training=None):
if self.extra_padding:
x = self.pad(x)
x = self.pool(x)
if self.extra_padding:
if is_channels_first(self.data_format):
x = x[:, :, 1:, 1:]
else:
x = x[:, 1:, 1:, :]
return x
def pnas_conv1x1(in_channels,
out_channels,
strides=1,
data_format="channels_last",
**kwargs):
"""
1x1 version of the PNASNet specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return NasConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=strides,
padding=0,
groups=1,
data_format=data_format,
**kwargs)
class DwsBranch(nn.Layer):
"""
PNASNet specific block with depthwise separable convolution layers.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
extra_padding : bool, default False
Whether to use extra padding.
stem : bool, default False
Whether to use squeeze reduction if False.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
extra_padding=False,
stem=False,
data_format="channels_last",
**kwargs):
super(DwsBranch, self).__init__(**kwargs)
assert (not stem) or (not extra_padding)
mid_channels = out_channels if stem else in_channels
padding = kernel_size // 2
self.conv1 = NasDwsConv(
in_channels=in_channels,
out_channels=mid_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
extra_padding=extra_padding,
data_format=data_format,
name="conv1")
self.conv2 = NasDwsConv(
in_channels=mid_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=1,
padding=padding,
data_format=data_format,
name="conv2")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
return x
def dws_branch_k3(in_channels,
out_channels,
strides=2,
extra_padding=False,
stem=False,
data_format="channels_last",
**kwargs):
"""
3x3 version of the PNASNet specific depthwise separable convolution branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 2
Strides of the convolution.
extra_padding : bool, default False
Whether to use extra padding.
stem : bool, default False
Whether to use squeeze reduction if False.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return DwsBranch(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
extra_padding=extra_padding,
stem=stem,
data_format=data_format,
**kwargs)
def dws_branch_k5(in_channels,
out_channels,
strides=2,
extra_padding=False,
stem=False,
data_format="channels_last",
**kwargs):
"""
5x5 version of the PNASNet specific depthwise separable convolution branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 2
Strides of the convolution.
extra_padding : bool, default False
Whether to use extra padding.
stem : bool, default False
Whether to use squeeze reduction if False.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return DwsBranch(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=5,
strides=strides,
extra_padding=extra_padding,
stem=stem,
data_format=data_format,
**kwargs)
def dws_branch_k7(in_channels,
out_channels,
strides=2,
extra_padding=False,
data_format="channels_last",
**kwargs):
"""
7x7 version of the PNASNet specific depthwise separable convolution branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 2
Strides of the convolution.
extra_padding : bool, default False
Whether to use extra padding.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return DwsBranch(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=7,
strides=strides,
extra_padding=extra_padding,
stem=False,
data_format=data_format,
**kwargs)
class PnasMaxPathBlock(nn.Layer):
"""
PNASNet specific `max path` auxiliary block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
data_format="channels_last",
**kwargs):
super(PnasMaxPathBlock, self).__init__(**kwargs)
self.maxpool = PnasMaxPoolBlock(
data_format=data_format,
name="maxpool")
self.conv = conv1x1(
in_channels=in_channels,
out_channels=out_channels,
data_format=data_format,
name="conv")
self.bn = nasnet_batch_norm(
channels=out_channels,
data_format=data_format,
name="bn")
def call(self, x, training=None):
x = self.maxpool(x)
x = self.conv(x)
x = self.bn(x, training=training)
return x
class PnasBaseUnit(nn.Layer):
"""
PNASNet base unit.
Parameters:
----------
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
data_format="channels_last",
**kwargs):
super(PnasBaseUnit, self).__init__(**kwargs)
self.data_format = data_format
def cell_forward(self, x, x_prev, training=None):
assert (hasattr(self, 'comb0_left'))
x_left = x_prev
x_right = x
x0 = self.comb0_left(x_left, training=training) + self.comb0_right(x_left, training=training)
x1 = self.comb1_left(x_right, training=training) + self.comb1_right(x_right, training=training)
x2 = self.comb2_left(x_right, training=training) + self.comb2_right(x_right, training=training)
x3 = self.comb3_left(x2, training=training) + self.comb3_right(x_right, training=training)
x4 = self.comb4_left(x_left, training=training) + (self.comb4_right(x_right, training=training) if
self.comb4_right else x_right)
x_out = tf.concat([x0, x1, x2, x3, x4], axis=get_channel_axis(self.data_format))
return x_out
class Stem1Unit(PnasBaseUnit):
"""
PNASNet Stem1 unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
data_format="channels_last",
**kwargs):
super(Stem1Unit, self).__init__(**kwargs)
mid_channels = out_channels // 5
self.conv_1x1 = pnas_conv1x1(
in_channels=in_channels,
out_channels=mid_channels,
data_format=data_format,
name="conv_1x1")
self.comb0_left = dws_branch_k5(
in_channels=in_channels,
out_channels=mid_channels,
stem=True,
data_format=data_format,
name="comb0_left")
self.comb0_right = PnasMaxPathBlock(
in_channels=in_channels,
out_channels=mid_channels,
data_format=data_format,
name="comb0_right")
self.comb1_left = dws_branch_k7(
in_channels=mid_channels,
out_channels=mid_channels,
data_format=data_format,
name="comb1_left")
self.comb1_right = PnasMaxPoolBlock(
data_format=data_format,
name="comb1_right")
self.comb2_left = dws_branch_k5(
in_channels=mid_channels,
out_channels=mid_channels,
data_format=data_format,
name="comb2_left")
self.comb2_right = dws_branch_k3(
in_channels=mid_channels,
out_channels=mid_channels,
data_format=data_format,
name="comb2_right")
self.comb3_left = dws_branch_k3(
in_channels=mid_channels,
out_channels=mid_channels,
strides=1,
data_format=data_format,
name="comb3_left")
self.comb3_right = PnasMaxPoolBlock(
data_format=data_format,
name="comb3_right")
self.comb4_left = dws_branch_k3(
in_channels=in_channels,
out_channels=mid_channels,
stem=True,
data_format=data_format,
name="comb4_left")
self.comb4_right = pnas_conv1x1(
in_channels=mid_channels,
out_channels=mid_channels,
strides=2,
data_format=data_format,
name="comb4_right")
def call(self, x, training=None):
x_prev = x
x = self.conv_1x1(x, training=training)
x_out = self.cell_forward(x, x_prev, training=training)
return x_out
class PnasUnit(PnasBaseUnit):
"""
PNASNet ordinary unit.
Parameters:
----------
in_channels : int
Number of input channels.
prev_in_channels : int
Number of input channels in previous input.
out_channels : int
Number of output channels.
reduction : bool, default False
Whether to use reduction.
extra_padding : bool, default False
Whether to use extra padding.
match_prev_layer_dimensions : bool, default False
Whether to match previous layer dimensions.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
prev_in_channels,
out_channels,
reduction=False,
extra_padding=False,
match_prev_layer_dimensions=False,
data_format="channels_last",
**kwargs):
super(PnasUnit, self).__init__(**kwargs)
mid_channels = out_channels // 5
stride = 2 if reduction else 1
if match_prev_layer_dimensions:
self.conv_prev_1x1 = NasPathBlock(
in_channels=prev_in_channels,
out_channels=mid_channels,
data_format=data_format,
name="conv_prev_1x1")
else:
self.conv_prev_1x1 = pnas_conv1x1(
in_channels=prev_in_channels,
out_channels=mid_channels,
data_format=data_format,
name="conv_prev_1x1")
self.conv_1x1 = pnas_conv1x1(
in_channels=in_channels,
out_channels=mid_channels,
data_format=data_format,
name="conv_1x1")
self.comb0_left = dws_branch_k5(
in_channels=mid_channels,
out_channels=mid_channels,
strides=stride,
extra_padding=extra_padding,
data_format=data_format,
name="comb0_left")
self.comb0_right = PnasMaxPoolBlock(
strides=stride,
extra_padding=extra_padding,
data_format=data_format,
name="comb0_right")
self.comb1_left = dws_branch_k7(
in_channels=mid_channels,
out_channels=mid_channels,
strides=stride,
extra_padding=extra_padding,
data_format=data_format,
name="comb1_left")
self.comb1_right = PnasMaxPoolBlock(
strides=stride,
extra_padding=extra_padding,
data_format=data_format,
name="comb1_right")
self.comb2_left = dws_branch_k5(
in_channels=mid_channels,
out_channels=mid_channels,
strides=stride,
extra_padding=extra_padding,
data_format=data_format,
name="comb2_left")
self.comb2_right = dws_branch_k3(
in_channels=mid_channels,
out_channels=mid_channels,
strides=stride,
extra_padding=extra_padding,
data_format=data_format,
name="comb2_right")
self.comb3_left = dws_branch_k3(
in_channels=mid_channels,
out_channels=mid_channels,
strides=1,
data_format=data_format,
name="comb3_left")
self.comb3_right = PnasMaxPoolBlock(
strides=stride,
extra_padding=extra_padding,
data_format=data_format,
name="comb3_right")
self.comb4_left = dws_branch_k3(
in_channels=mid_channels,
out_channels=mid_channels,
strides=stride,
extra_padding=extra_padding,
data_format=data_format,
name="comb4_left")
if reduction:
self.comb4_right = pnas_conv1x1(
in_channels=mid_channels,
out_channels=mid_channels,
strides=stride,
data_format=data_format,
name="comb4_right")
else:
self.comb4_right = None
def call(self, x, x_prev, training=None):
x_prev = self.conv_prev_1x1(x_prev, training=training)
x = self.conv_1x1(x, training=training)
x_out = self.cell_forward(x, x_prev, training=training)
return x_out
class PNASNet(tf.keras.Model):
"""
PNASNet model from 'Progressive Neural Architecture Search,' https://arxiv.org/abs/1712.00559.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
stem1_blocks_channels : list of 2 int
Number of output channels for the Stem1 unit.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (331, 331)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
stem1_blocks_channels,
in_channels=3,
in_size=(331, 331),
classes=1000,
data_format="channels_last",
**kwargs):
super(PNASNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = nasnet_dual_path_sequential(
return_two=False,
first_ordinals=2,
last_ordinals=2,
name="features")
self.features.add(NASNetInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
self.features.add(Stem1Unit(
in_channels=in_channels,
out_channels=stem1_blocks_channels,
data_format=data_format,
name="stem1_unit"))
prev_in_channels = in_channels
in_channels = stem1_blocks_channels
for i, channels_per_stage in enumerate(channels):
stage = nasnet_dual_path_sequential(
name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
reduction = (j == 0)
extra_padding = (j == 0) and (i not in [0, 2])
match_prev_layer_dimensions = (j == 1) or ((j == 0) and (i == 0))
stage.add(PnasUnit(
in_channels=in_channels,
prev_in_channels=prev_in_channels,
out_channels=out_channels,
reduction=reduction,
extra_padding=extra_padding,
match_prev_layer_dimensions=match_prev_layer_dimensions,
data_format=data_format,
name="unit{}".format(j + 1)))
prev_in_channels = in_channels
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.ReLU(name="activ"))
self.features.add(nn.AveragePooling2D(
pool_size=11,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = tf.keras.Sequential(name="output1")
self.output1.add(nn.Dropout(
rate=0.5,
name="dropout"))
self.output1.add(nn.Dense(
units=classes,
input_dim=in_channels,
name="fc"))
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_pnasnet(model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create PNASNet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
repeat = 4
init_block_channels = 96
stem_blocks_channels = [270, 540]
norm_channels = [1080, 2160, 4320]
channels = [[ci] * repeat for ci in norm_channels]
stem1_blocks_channels = stem_blocks_channels[0]
channels[0] = [stem_blocks_channels[1]] + channels[0]
net = PNASNet(
channels=channels,
init_block_channels=init_block_channels,
stem1_blocks_channels=stem1_blocks_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def pnasnet5large(**kwargs):
"""
PNASNet-5-Large model from 'Progressive Neural Architecture Search,' https://arxiv.org/abs/1712.00559.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_pnasnet(model_name="pnasnet5large", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
pretrained = False
models = [
pnasnet5large,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch_saze = 14
x = tf.random.normal((batch_saze, 3, 331, 331) if is_channels_first(data_format) else (batch_saze, 331, 331, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch_saze, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != pnasnet5large or weight_count == 86057668)
if __name__ == "__main__":
_test()
| [
"[email protected]"
] | |
45c3da155377e5eb40278aa304fefbe65f4e8e04 | ac2142d192bde034ae3c6d7e07045c39d9a34aa3 | /面试题/打乱一个排好序的list对象alist.py | 2ce47af69449852469742a5d2902fec7d4baacc3 | [] | no_license | budaLi/-500- | ee33a93a6c7f7d36e30a29dd1e12634034712d12 | 69c42389717f003198f652035bfc922eac8a6fef | refs/heads/master | 2022-11-20T08:42:24.255264 | 2020-07-22T07:41:52 | 2020-07-22T07:41:52 | 281,352,944 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 325 | py | # @Time : 2020/7/22 15:12
# @Author : Libuda
# @FileName: 打乱一个排好序的list对象alist.py
# @Software: PyCharm
list = list(range(1,100))
import random
def shuffle(list):
# 要注意 不会返回
random.shuffle(list)
return list
if __name__ == '__main__':
res = shuffle(list)
print(res)
| [
"[email protected]"
] | |
28c8ccf78fb493260749111e0f7eaf2d01e32a19 | 8767c6db0699133e96447f56624dadb9eb89ef32 | /Leetcode/Factorial Trailing Zeroes.py | eac9d8fd54aa18ad4ab426a8ff54e14fef7191ef | [] | no_license | ran0527/Coding | 8667f34e568fa16e38d5050be0d1f9f892b1051f | e613aea74c43274793653eb160b36b12db1fb605 | refs/heads/master | 2021-01-10T05:37:53.348116 | 2015-12-01T21:54:04 | 2015-12-01T21:54:04 | 45,228,996 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | py | class Solution(object):
def trailingZeroes(self, n):
"""
:type n: int
:rtype: int
"""
i = 5
result = 0
while n >= i:
result += n / i
i *= 5
return result
| [
"[email protected]"
] | |
db786b96b9bd2c680d18def2be8b505743bdebec | 715239e1d30f384ec040fe0cd49053252420ec15 | /exampleSite/local_public.py | 2a365d6d0a38cedee9b0fcc0fabbbd435763b537 | [
"MIT"
] | permissive | brangpd/hugo-xmin | cac3dfec737e3621f4b06b25931876990d651e6c | 08264a4a30d6bf2fc08a9e89309892d3b5ceb604 | refs/heads/master | 2023-08-18T14:13:53.689813 | 2021-10-12T04:51:12 | 2021-10-12T04:51:12 | 284,719,103 | 0 | 0 | MIT | 2020-08-03T14:16:18 | 2020-08-03T14:16:17 | null | UTF-8 | Python | false | false | 205 | py | import os
import tempfile
def get_local_public_dir():
return '{tempdir}{sep}{public}'.format(
tempdir=tempfile.gettempdir(),
sep=os.path.sep,
public='hugo_local_public'
)
| [
"[email protected]"
] | |
7faf1390cd057e273a7e64857234173695677762 | 3aafaa865594aa58d056a79fdae4d0658774d3ab | /setup.py | eede62ba499ebbc973b5357cc960f237d47318d5 | [
"Apache-2.0",
"MIT",
"Intel"
] | permissive | asamarah1/lpot | 56aac0d46692e1864de2f06390ab435cd079e741 | 881bde402db387b04c2f33cc96fb817f47c4d623 | refs/heads/master | 2023-01-20T15:55:39.088923 | 2020-12-01T13:22:59 | 2020-12-01T14:25:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,520 | py | from io import open
from setuptools import find_packages, setup
import re
import sys
try:
filepath = './lpot/version.py'
with open( filepath ) as version_file:
__version__ ,= re.findall( '__version__ = "(.*)"', version_file.read() )
except Exception as error:
assert False, "Error: Could not open '%s' due %s\n" % (filepath, error)
setup(
name="lpot",
version=__version__,
author="Intel MLP/MLPC Team",
author_email="[email protected], [email protected], [email protected], [email protected], [email protected], [email protected], [email protected]",
description="Repository of Intel® Low Precision Optimization Tool",
long_description=open("README.md", "r", encoding='utf-8').read(),
long_description_content_type="text/markdown",
keywords='quantization, auto-tuning, post-training static quantization, post-training dynamic quantization, quantization-aware training, tuning strategy',
license='',
url="https://github.com/intel/lpot",
packages = find_packages(),
package_dir = {'':'.'},
package_data={'': ['*.py', '*.yaml']},
install_requires=['numpy', 'pyyaml', 'scikit-learn', 'schema', 'py-cpuinfo', 'hyperopt', 'pandas'],
entry_points={
'console_scripts': [""]
},
python_requires='>=3.5.0',
classifiers=[
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
)
| [
"[email protected]"
] | |
200fcefe0011f3bc6221235c3a84e460fd5bf3aa | fee22561b220389e14a5ba3c09d5795b92299e64 | /article/migrations/0003_auto_20160325_1638.py | df39a190f838a857549b956e6a4eb772dd7af8fc | [] | no_license | zionchao/myhome | 88687e369e24456eaab983c415d554bf64e15799 | 38343f9cc6c928f9622565a2c909b96b7e0942e3 | refs/heads/master | 2021-01-10T01:48:44.846660 | 2016-03-31T07:13:00 | 2016-03-31T07:13:00 | 54,965,012 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('article', '0002_auto_20160325_1621'),
]
operations = [
migrations.AlterField(
model_name='classification',
name='name',
field=models.CharField(max_length=50),
),
]
| [
"[email protected]"
] | |
4c1b490c17b1790112416a464aac3b5646a91ef4 | 5ae8e983ec6159c82d6551ff8333ebe4ead3526d | /Hangman_Game/hangman.py | 0526f1e36af3123952698f8a264bcbd17b97b377 | [] | no_license | karan-47/turtle-python | 0aa7d0ed9d66671a73d196a31f1b8a7fcf526239 | 70319407298b1721a98a5fe2a026dcc483d5a1f9 | refs/heads/main | 2023-04-10T10:32:09.402686 | 2021-04-25T15:01:11 | 2021-04-25T15:01:11 | 361,450,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,849 | py | import turtle
alphabets = []
answer = ""
current = ""
guessedanswer = []
count = 10
clearedlist = []
isGuessed = False
display_turtle = turtle.Turtle()
display_turtle.hideturtle()
display_turtle.speed(0)
display_count = turtle.Turtle()
display_count.hideturtle()
display_count.speed(0)
turtle.tracer(0)
def countDisplay():
global count
display_count.clear()
display_count.hideturtle()
display_count.penup()
display_count.goto(100, 100)
display_count.write(count)
def displayAnswer():
global current
display_turtle.clear()
display_turtle.hideturtle()
display_turtle.penup()
display_turtle.goto(0,100)
cn = ""
for i in range (len(current)):
cn += current[i]+" "
display_turtle.write(cn)
def setAnswer():
global alphabets,answer,guessedanswer,isGuessed,current,clearedlist,count
setAlphabets()
count = 10
answer = 'hello'.upper()
for i in range(len(answer)):
current = current + '_'
guessedanswer = list(current)
clearedlist = []
isGuessed = False
def resetAnswer():
global alphabets,answer,guessedanswer,isGuessed,current,clearedlist,count
current = ''
count = 10
clearedlist = []
alphabets = []
answer = ""
guessedanswer = []
isGuessed = False
def setAlphabets():
global alphabets
for i in range(26):
t = turtle.Turtle()
t.hideturtle()
t.penup()
t.speed(0)
alphabets.append(t)
t.goto(i*25 - 320, -200)
t.write(chr(i+65))
def removeAlphabets():
global alphabets
for i in range(26):
alphabets[i].clear()
def checkAnswer( char ):
global answer,clearedlist,count
global guessedanswer,current,alphabets
if char not in clearedlist:
clearedlist.append(char)
if char in answer and char not in current:
for i in range(len(answer)):
if answer[i] == char:
guessedanswer[i] = char
current = "".join(guessedanswer)
else:
count -= 1
displayAnswer()
countDisplay()
t1 = list(char)
clearAlphabet(ord(t1[0])-65)
def clearAlphabet(i):
global alphabets
alphabets[i].clear()
def a():
checkAnswer('A')
def b():
checkAnswer('B')
def c():
checkAnswer('C')
def d():
checkAnswer('D')
def e():
checkAnswer('E')
def f():
checkAnswer('F')
def g():
checkAnswer('G')
def h():
checkAnswer('H')
def i():
checkAnswer('I')
def j():
checkAnswer('J')
def k():
checkAnswer('K')
def l():
checkAnswer('L')
def m():
checkAnswer('M')
def n():
checkAnswer('N')
def o():
checkAnswer('O')
def p():
checkAnswer('P')
def q():
checkAnswer('Q')
def r():
checkAnswer('R')
def s():
checkAnswer('S')
def t():
checkAnswer('T')
def u():
checkAnswer('U')
def v():
checkAnswer('V')
def w():
checkAnswer('W')
def x():
checkAnswer('X')
def y():
checkAnswer('Y')
def z():
checkAnswer('Z')
turtle.listen()
setAnswer()
displayAnswer()
countDisplay()
turtle.onkey(a,"a")
turtle.onkey(b,"b")
turtle.onkey(c,"c")
turtle.onkey(d,"d")
turtle.onkey(e,"e")
turtle.onkey(f,"f")
turtle.onkey(g,"g")
turtle.onkey(h,"h")
turtle.onkey(i,"i")
turtle.onkey(j,"j")
turtle.onkey(k,"k")
turtle.onkey(l,"l")
turtle.onkey(m,"m")
turtle.onkey(n,"n")
turtle.onkey(o,"o")
turtle.onkey(p,"p")
turtle.onkey(q,"q")
turtle.onkey(r,"r")
turtle.onkey(s,"s")
turtle.onkey(t,"t")
turtle.onkey(u,"u")
turtle.onkey(v,"v")
turtle.onkey(w,"w")
turtle.onkey(x,"x")
turtle.onkey(y,"y")
turtle.onkey(z,"z")
# while True:
# turtle.update()
# turtle.tracer(0, 0)
turtle.mainloop() | [
"[email protected]"
] | |
a0ff3272428bf429d2bdb5f83aef3f90e3f4a975 | 878365a0283fb353bf82b561e9004652b50c84bc | /SPN-DAG Project/Important/src/kNN.py | 971ea003544a53de65c8e7497c8974e785472a0f | [
"BSD-3-Clause"
] | permissive | atcbosselut/spn-discriminative | 7e844b97f38c607d4da9d5b5c8878042a45f98c0 | 9baca470dc17079bbbb1ec8c6426cb1ee7163373 | refs/heads/master | 2021-01-19T07:29:44.702944 | 2017-07-03T18:42:21 | 2017-07-03T18:42:21 | 28,318,902 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,437 | py | from __future__ import division
import numpy as np
import scipy as sp
from scipy import linalg
import cPickle
import random as rd
import SPN
import Nodes
import Tensors
import Data
import time
import sklearn.neighbors as sk
#Set Hyperparameters
epsilon = .00005
eta = .1
N=10000
N_test=10000
pictureSize = 32
tensorSize = 3
numColors = 3
print "Get Data"
#Unpickle Data
data_batch1 = Data.unpickle("cifar-10-batches-py/data_batch_1")
#data_batch2 = Data.unpickle("cifar-10-batches-py/data_batch_2")
#data_batch3 = Data.unpickle("cifar-10-batches-py/data_batch_3")
#data_batch4 = Data.unpickle("cifar-10-batches-py/data_batch_4")
#data_batch5 = Data.unpickle("cifar-10-batches-py/data_batch_5")
test_batch = Data.unpickle("cifar-10-batches-py/test_batch")
#Get Data
batch1_labels, batch1_data = Data.GetLabelsAndData(data_batch1)
#batch2_labels, batch2_data = Data.GetLabelsAndData(data_batch2)
#batch3_labels, batch3_data = Data.GetLabelsAndData(data_batch3)
#batch4_labels, batch4_data = Data.GetLabelsAndData(data_batch4)
#batch5_labels, batch5_data = Data.GetLabelsAndData(data_batch5)
test_batch_labels, test_batch_data = Data.GetLabelsAndData(test_batch)
MPGaussianTrain = np.loadtxt('Input_Train1.txt')
Y=batch1_labels
start = time.time()
nbrs = sk.KNeighborsClassifier(n_neighbors = 10, weights = 'distance')
nbrs.fit(MPGaussianTrain,Y)
end=time.time()
Ypredtrain = nbrs.predict(MPGaussianTrain)
end=time.time()
print "Compute Training Error"
#Calculate Training Error
train_error = np.sum(np.array(Y) != np.array(Ypredtrain))/10000
print train_error
Ytest=test_batch_labels
MPGaussianTest = np.loadtxt("Input_test.txt")
"Compute Test Error"
#Get Test Error
Ypredtest = nbrs.predict(MPGaussianTest)
test_error = np.sum(np.array(Ytest) != np.array(Ypredtest))/10000
np.savetxt('YpredtestkNN.txt', Ypredtest)
print test_error
timing = end-start
f = open('10000-10NNResults.txt', 'w+')
f.write('Training Error: %s\n' %train_error)
f.write('Test Error: %s\n' %test_error)
f.write('Timing: %s\n' %timing)
f.close()
| [
"[email protected]"
] | |
7ab0180b3df108a22aee4895e8a24eeca98b83b3 | 8a43bcad26f266dab04b3430bb83c8ebe822bfd9 | /기초알고리즘/조합,순열_알고리즘/combinations.py | 11631f14ca49c05429dd376fbb4d507d783a0a0e | [] | no_license | Ha-Young/algorithm | e80b3b4dafc4f55bbfc38fb4f356c316d9307618 | 376a4f23d92d52ac30cd3817a37fb137c657a4af | refs/heads/master | 2022-06-24T12:00:04.440801 | 2020-05-07T14:42:27 | 2020-05-07T14:42:27 | 250,194,650 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 316 | py | # 조합 알고리즘
# 제네레이터 사용
def combinations(arr, r):
for i in range(len(arr)):
if r == 1:
yield [arr[i]]
else:
for next in combinations(arr[i + 1:], r - 1):
yield [arr[i]] + next
combi = combinations([1,2,3,4,5],3)
print(list(combi)) | [
"[email protected]"
] | |
6667031e2bc9b493017b306fe5752e9c86966c61 | bed0735d6e067d7d1491d463acf2409374abcd79 | /content/migrations/0013_auto_20170529_0350.py | 3d366602d69096a4f9b5cc30ea90f04a8a197eef | [] | no_license | bitapardaz/magia_wifi | 9ba515c3e162989280336f214788bb4577b45bac | 282978a64586af2486580ac56238aed3b35582f7 | refs/heads/master | 2021-01-22T23:05:28.511187 | 2017-05-30T04:13:00 | 2017-05-30T04:13:00 | 92,797,045 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 616 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-29 03:50
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('content', '0012_category_file_path'),
]
operations = [
migrations.RemoveField(
model_name='category',
name='file_path',
),
migrations.AddField(
model_name='movie',
name='file_path',
field=models.FilePathField(blank=True, null=True, path='/mnt/FlashDrive/wifi_storage/movies/'),
),
]
| [
"[email protected]"
] | |
f655308c6590a8d2eec45dc2d83e1818e2d5ee36 | c839bb0899fb6665f59e8ce8644d094035f3d3bc | /thomas/edsurface.py | b9d7010135378c14a9558ea623888256ac91f27f | [
"Apache-2.0"
] | permissive | ThomasDerZweifler/pyPro | 53467f029c9ca057e546b2d74766927fd1657cf3 | 8bc6531483f08982b19c08a4cdb1a0c9dbd03737 | refs/heads/master | 2023-03-17T13:58:13.813111 | 2021-03-28T09:30:59 | 2021-03-28T09:30:59 | 280,870,639 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 615 | py | import matplotlib.pyplot as plt
import numpy as np
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# Create the mesh in polar coordinates and compute corresponding Z.
r = np.linspace(0, 1.25, 50)
p = np.linspace(0, 2*np.pi, 50)
R, P = np.meshgrid(r, p)
Z = ((R**2 - 1)**2)
# Express the mesh in the cartesian system.
X, Y = R*np.cos(P), R*np.sin(P)
# Plot the surface.
ax.plot_surface(X, Y, Z, cmap=plt.cm.YlGnBu_r)
# Tweak the limits and add latex math labels.
ax.set_zlim(0, 1)
ax.set_xlabel(r'$\phi_\mathrm{real}$')
ax.set_ylabel(r'$\phi_\mathrm{im}$')
ax.set_zlabel(r'$V(\phi)$')
plt.show() | [
"[email protected]"
] | |
0440e6fe39352fb099147feba38f8f2123481e02 | d1601687464225857b492f4b5009283932f91e2f | /student/urls.py | fc0f3c7e7bec0f53a5dc149d4e925f11c030978d | [] | no_license | Barolina/mp_06_08 | 010a4906153c297f21098ef90811dba1f1aacc0b | 0f3e30ccb8fe178f11918836d958533365a1d99a | refs/heads/master | 2021-06-28T20:09:38.345537 | 2017-09-19T08:50:55 | 2017-09-19T08:50:55 | 103,932,600 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 422 | py |
# -*- coding: utf-8 -*-
from django.conf.urls import url, include
from django.contrib import admin
from django.views.generic import TemplateView
from material.frontend import urls as frontend_urls
from student.views import AddStudentWizard, FORMS, StudentsView
urlpatterns = [
url(r'^addstudent/$', AddStudentWizard.as_view(FORMS), name='addstudent'),
url(r'', StudentsView.as_view(), name='liststudent'),
]
| [
"[email protected]"
] | |
28d8b073824c0f7153f5892108c1e04a83927893 | 086ab7897780542bb26b46fa10cd42e31319e496 | /Marcas/Examen Marcas/Funciones.py | f5257464c8f77b277760462293841e5eb509cda6 | [] | no_license | alexrr12341/1ASIR | 2d75bffbaaabb773cf8f38e6f222889423c64387 | 5d148a30ebafce452957ada842e2511fae40fbbe | refs/heads/master | 2021-07-22T19:30:18.646010 | 2021-07-10T09:16:36 | 2021-07-10T09:16:36 | 189,486,719 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,542 | py | '''Nombre:Alejandro Rodríguez Rojas
1) Queremos gestionar los precios en un supermercado para ello vamos a crear dos pequeñas funciones:
CalcularPrecio: Esta función recibe el nombre de un artículo, su precio y la cantidad que ha comprado el cliente,
y devuelve el precio final. Para realizar el cálculo tenemos que usar la siguiente función que nos indica si el articulo esta rebajado.
Si está rebajado tendremos que utilizar el 50% del precio.
EstaRebajado: Recibe el nombre de un artículo, si el nombre contiene la palabra “Rebajas” el artículo está rebajado. Esta función devuelve si el articulo esta rebajado o no.
Crea estas dos funciones en un fichero, y a continuación crea dos programas (en dos ficheros distintos) que hagan lo siguiente:
2) Realiza un programa que vaya pidiendo artículos (nombre y precio) y la cantidad que el cliente ha comprado y te vaya mostrando el precio final
(utilizando las funciones anteriores). El programa termina cuando introducimos un * como nombre de artículo.
3) Realiza un programa que lea el siguiente fichero de texto (el contenido es variable, y lo puedes cambiar, este es sólo un ejemplo),
con la siguiente información: nombre del artículo, precio, cantidad comprada:
Fregona Rebajas, 4.5, 3
Detergente, 2.0, 5
Escoba, 1.5, 7'''
def CalcularPrecio(articulo,precio,cantidad):
preciofinal=precio*cantidad
return articulo,preciofinal
def EstaRebajado(articulo,precio,cantidad):
preciofinal=(precio*cantidad)*0.5
return articulo,preciofinal
| [
"[email protected]"
] | |
b7fc81ac1762dbaa2af2112824262c538c6a3aaa | ea713b507484d485bfce58a5869e069fc679cce2 | /Interpolation/0.2_convolutionMamba.py | 37d55aacde51c4b51afd9e55b40b54aeb65ffdad | [] | no_license | TPhilippon/SATELITIMEscripts | e689bc83f5c71600c69beca82d40bf73b7ed05bc | 5e97e21cccd9b335dc3b3926293798cb6880f8db | refs/heads/master | 2020-12-24T18:23:18.544761 | 2016-06-06T13:44:38 | 2016-06-06T13:44:38 | 57,040,095 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,376 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 26 14:55:40 2016
@author: terencephilippon
"""
import os,sys
#from pyhdf.SD import SD, SDC
from pylab import mpl as mpl
from mpl_toolkits.axes_grid1 import make_axes_locatable
#from netCDF4 import Dataset
import matplotlib.pyplot as plt
import numpy as np
import scipy.signal
import glob
from PIL import Image
from astropy.convolution import convolve, Gaussian2DKernel
from matplotlib.colors import Colormap
from mamba import *
import mambaDisplay.extra
#==============================================================================
# DEFINITIONS
#==============================================================================
homepath = os.environ['HOMEPATH'] # Windows = os.environ['HOMEPATH'] ;;; Linux = os.environ['HOME']
path = homepath+'//SATELITIME//data//ZR//' # Windows = '//SATELITIME//data//ZR//' ;;; Linux = '/SATELITIME/data/ZR/'
path2 = homepath+'//SATELITIME//data//convolve//'
outpath = homepath+'//SATELITIME//data//convolve//' # Windows = //SATELITIME//data//Output//' ;;; Linux = '/SATELITIME/data/Output/'
# **Colormap Chl de ref**
# COULEUR
norm_chl=mpl.colors.LogNorm(vmin=0.01, vmax=20)
colors = [(0.33,0.33,0.33)] + [(plt.cm.jet(i)) for i in xrange(1,256)]
new_map_chl = mpl.colors.LinearSegmentedColormap.from_list('new_map_chl', colors, N=256)
new_map_chl._init(); new_map_chl._lut[0,:] = new_map_chl._lut[1,:] # Replace lowest value of colormap (which is gray) with the one before (dark blue)
Colormap.set_under(new_map_chl,color=new_map_chl._lut[0,:]) # Set color for values outside colormap to be the lowest of the colormap (dark blue)
##Colormap.set_over(new_map_chl,color=(0.0, 0.0, 0.517825311942959, 1.0))
## to get rgba from colormap for a specific value : new_map_chl(specificvalue for example ex : 0.2)
# BLACK AND WHITE
grays = [(0.33,0.33,0.33)] + [(plt.cm.gray(i)) for i in xrange(1,256)]
new_map_gray_chl = mpl.colors.LinearSegmentedColormap.from_list('new_map_gray_chl', grays, N=256)
# ****
#path = '/Users/terencephilippon/Desktop/Python/Input/'
#outpath = '/Users/terencephilippon/Desktop/Python/Output/'
print 'starting...'
print path
# Data we want to read
data = glob.glob(path+'*.npy')
data.sort()
print data
# Def kernels
gauss = Gaussian2DKernel(stddev=1)
#gauss_fft = Gaussian2DKernel(stddev=1)
#==============================================================================
# LOOP
#==============================================================================
for myfile in data:
print 'reading data...'
print myfile
zr = np.load(myfile)
#==============================================================================
# CONVOLVE
#==============================================================================
zr_conv = convolve(zr,gauss)
# zr_convfft = convolve_fft(zr,gauss_fft)
# fig1 = plt.gcf()
# fig, (ax1, ax2) = plt.subplots(1,2)
fig, (ax1) = plt.subplots(1,1)
# plt.imshow(zr_conv, norm=norm_chl, origin='upper', cmap=new_map_chl,)
# ax1.imshow(zr, norm=norm_chl, origin='upper', cmap=new_map_chl,)
ax1.imshow(zr_conv, norm=norm_chl, origin='upper', cmap=new_map_gray_chl,)
# ax3.imshow(zr_convfft, norm=norm_chl, origin='upper', cmap=new_map_chl,)
plt.show()
fig.savefig(outpath+myfile[-46:-4]+'_convolve'+'.npy', dpi=200, bbox_inches='tight')
plt.close()
#==============================================================================
# MAMBA
#==============================================================================
data2 = glob.glob(path2+'*Gray.png')
data2.sort()
print data2
for myfile in data2:
im = imageMb(myfile)
imSeg = imageMb(im, 32)
print(mambaDisplay.extra.interactiveSegment(im, imSeg))
# fig, (ax1, ax2) = plt.subplots(1,2)
## plt.imshow(zr_conv, norm=norm_chl, origin='upper', cmap=new_map_chl,)
# ax1.imshow(zr, norm=norm_chl, origin='upper', cmap=new_map_chl,)
# ax2.imshow(zr_conv, norm=norm_chl, origin='upper', cmap=new_map_chl,)
## ax3.imshow(zr_convfft, norm=norm_chl, origin='upper', cmap=new_map_chl,)
# plt.show()
# fig.savefig(outpath+myfile[-46:-4]+'_convolve'+'.png', dpi=200, bbox_inches='tight')
# plt.close()
| [
"[email protected]"
] | |
df1d2d6e5379d791010f6961de52b537702755ee | fa8c6888e3ccd25a605c971c9e2345cd5ce3d4b2 | /FTacv_experiments/dispersion_class.py | 28f5f0896ef19ec59e10fd7921f3f5ffee5509fd | [] | no_license | HOLL95/FTacV_2 | c48b4c1827e2b58e5d4e519dae5df4b6db4077bd | e8e7dab1bbc7b6e2c62c3777b9de169593a5d586 | refs/heads/master | 2022-12-08T20:42:01.955678 | 2020-09-24T15:51:33 | 2020-09-24T15:51:33 | 179,301,880 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,994 | py | from scipy.stats import norm, lognorm
import numpy as np
import itertools
import copy
import math
class dispersion:
def __init__(self, simulation_options, optim_list):
self.simulation_options=simulation_options
if "dispersion_parameters" not in self.simulation_options:
raise ValueError("Dispersion parameters not defined")
if len(self.simulation_options["dispersion_bins"])!=len(self.simulation_options["dispersion_parameters"]):
print(self.simulation_options["dispersion_bins"],self.simulation_options["dispersion_parameters"])
raise ValueError("Need to define number of bins for each parameter")
if len(self.simulation_options["dispersion_distributions"])!=len(self.simulation_options["dispersion_parameters"]):
print(self.simulation_options["dispersion_distributions"],self.simulation_options["dispersion_parameters"])
raise ValueError("Need to define distributions for each parameter")
for i in range(0, len(self.simulation_options["dispersion_parameters"])):
if self.simulation_options["dispersion_distributions"][i]=="uniform":
if (self.simulation_options["dispersion_parameters"][i]+"_lower" not in optim_list) or (self.simulation_options["dispersion_parameters"][i]+"_upper" not in optim_list):
raise ValueError("Uniform distribution requires "+self.simulation_options["dispersion_parameters"][i]+"_lower and " + self.simulation_options["dispersion_parameters"][i]+"_upper")
elif self.simulation_options["dispersion_distributions"][i]=="normal":
if (self.simulation_options["dispersion_parameters"][i]+"_std" not in optim_list):
raise ValueError("Normal distribution requires "+self.simulation_options["dispersion_parameters"][i]+"_mean and " + self.simulation_options["dispersion_parameters"][i]+"_std")
elif self.simulation_options["dispersion_distributions"][i]=="lognormal":
if (self.simulation_options["dispersion_parameters"][i]+"_shape" not in optim_list) or (self.simulation_options["dispersion_parameters"][i]+"_scale" not in optim_list):
raise ValueError("Lognormal distribution requires "+self.simulation_options["dispersion_parameters"][i]+"_shape and " + self.simulation_options["dispersion_parameters"][i]+"_loc and " + self.simulation_options["dispersion_parameters"][i]+"_scale")
else:
raise KeyError(self.simulation_options["dispersion_distributions"][i]+" distribution not implemented")
def generic_dispersion(self, nd_dict, GH_dict=None):
weight_arrays=[]
value_arrays=[]
for i in range(0, len(self.simulation_options["dispersion_parameters"])):
if self.simulation_options["dispersion_distributions"][i]=="uniform":
value_arrays.append(np.linspace(self.simulation_options["dispersion_parameters"][i]+"_lower", self.simulation_options["dispersion_parameters"][i]+"_upper", self.simulation_options["dispersion_bins"][i]))
weight_arrays.append([1/self.simulation_options["dispersion_bins"][i]]*self.simulation_options["dispersion_bins"][i])
elif self.simulation_options["dispersion_distributions"][i]=="normal":
param_mean=nd_dict[self.simulation_options["dispersion_parameters"][i]+"_mean"]
param_std=nd_dict[self.simulation_options["dispersion_parameters"][i]+"_std"]
if type(GH_dict) is dict:
param_vals=[(param_std*math.sqrt(2)*node)+param_mean for node in GH_dict["nodes"]]
param_weights=GH_dict["normal_weights"]
else:
min_val=norm.ppf(1e-4, loc=param_mean, scale=param_std)
max_val=norm.ppf(1-1e-4, loc=param_mean, scale=param_std)
param_vals=np.linspace(min_val, max_val, self.simulation_options["dispersion_bins"][i])
param_weights=np.zeros(self.simulation_options["dispersion_bins"][i])
param_weights[0]=norm.cdf(param_vals[0],loc=param_mean, scale=param_std)
param_midpoints=np.zeros(self.simulation_options["dispersion_bins"][i])
param_midpoints[0]=norm.ppf((1e-4/2), loc=param_mean, scale=param_std)
for j in range(1, self.simulation_options["dispersion_bins"][i]):
param_weights[j]=norm.cdf(param_vals[j],loc=param_mean, scale=param_std)-norm.cdf(param_vals[j-1],loc=param_mean, scale=param_std)
param_midpoints[j]=(param_vals[j-1]+param_vals[j])/2
param_midpoints=param_vals
value_arrays.append(param_vals)
weight_arrays.append(param_weights)
elif self.simulation_options["dispersion_distributions"][i]=="lognormal":
param_loc=0
param_shape=nd_dict[self.simulation_options["dispersion_parameters"][i]+"_shape"]
param_scale=nd_dict[self.simulation_options["dispersion_parameters"][i]+"_scale"]
min_val=lognorm.ppf(1e-4, param_shape, loc=param_loc, scale=param_scale)
max_val=lognorm.ppf(1-1e-4, param_shape, loc=param_loc, scale=param_scale)
param_vals=np.linspace(min_val, max_val, self.simulation_options["dispersion_bins"][i])
param_weights=np.zeros(self.simulation_options["dispersion_bins"][i])
param_weights[0]=lognorm.cdf(param_vals[0],param_shape, loc=param_loc, scale=param_scale)
param_midpoints=np.zeros(self.simulation_options["dispersion_bins"][i])
param_midpoints[0]=lognorm.ppf((1e-4/2),param_shape, loc=param_loc, scale=param_scale)
print(param_loc)
for j in range(1, self.simulation_options["dispersion_bins"][i]):
param_weights[j]=lognorm.cdf(param_vals[j],param_shape, loc=param_loc, scale=param_scale)-lognorm.cdf(param_vals[j-1],param_shape, loc=param_loc, scale=param_scale)
param_midpoints[j]=(param_vals[j-1]+param_vals[j])/2
value_arrays.append(param_midpoints)
weight_arrays.append(param_weights)
total_len=np.prod(self.simulation_options["dispersion_bins"])
weight_combinations=list(itertools.product(*weight_arrays))
value_combinations=list(itertools.product(*value_arrays))
sim_params=copy.deepcopy(self.simulation_options["dispersion_parameters"])
print(len(value_combinations))
for i in range(0, len(sim_params)):
if sim_params[i]=="E0":
sim_params[i]="E_0"
if sim_params[i]=="k0":
sim_params[i]="k_0"
return sim_params, value_combinations, weight_combinations
| [
"[email protected]"
] | |
38b714655a56fb14337d07fc43bdf153b0262b4c | 63cf47ff7a2bf9b1c73d1874dc7182473e392d95 | /0x08-python-more_classes/0-rectangle.py | 28abf4d9dd6dafd5a69920366300443bcc6db82a | [] | no_license | paurbano/holbertonschool-higher_level_programming | cddbf9fd7145e3ba059df3d155312e0d9845abea | 2c055b5240ddd5298996400d8f2a7bc4d33c0ea4 | refs/heads/master | 2020-09-29T04:14:33.125185 | 2020-05-16T05:18:42 | 2020-05-16T05:18:42 | 226,903,807 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | #!/usr/bin/python3
class Rectangle():
'''empty Class that define a rectangle'''
pass
| [
"[email protected]"
] | |
14853ce04e6da9955110846824a06e6f747aa1ca | f7719066ca2e8abdfc38e69d3747ffabe62d774a | /project/misc/baby_bakrepo/experiment_N_SECONDS_SPLIT/mfcc_0.0.py | f80ce7286dada090af02916c8ee4a8e70934572b | [] | no_license | wubinbai/2020 | 2ca5dfaee85d80621631ed034135429daf7bc025 | bd99f5cf495f8a17532c1939aef7782b7f8e629f | refs/heads/master | 2021-08-15T18:51:19.036470 | 2021-07-24T09:14:29 | 2021-07-24T09:14:29 | 231,207,351 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,403 | py | import numpy as np
import config as cfg
## data_all.py imports
import os
import wave
import librosa
import numpy as np
from tqdm import tqdm
import pickle as pkl
import librosa
from sklearn.preprocessing import normalize
import config as cfg
### end of data_all.py imports
### functions of data_all.py
def extract_logmel(y, sr, size):
# normalization
y = y.astype(np.float32)
normalization_factor = 1 / np.max(np.abs(y))
y = y * normalization_factor
# random crop
if len(y) <= size * sr:
new_y = np.zeros((int(size * sr)+1, ))
new_y[:len(y)] = y
y = new_y
# extract log mel spectrogram #####
melspectrogram = librosa.feature.melspectrogram(
y=y, sr=sr, n_fft=2048, hop_length=1024, n_mels=cfg.N_MEL)
logmelspec = librosa.power_to_db(melspectrogram)
return logmelspec.T
def extract_mfcc(y,sr,size):
# normalization
y = y.astype(np.float32)
normalization_factor = 1 / np.max(np.abs(y))
y = y * normalization_factor
# random crop
if len(y) <= size * sr:
new_y = np.zeros((int(size * sr)+1, ))
new_y[:len(y)] = y
y = new_y
mfccs = librosa.feature.mfcc(y,sr,n_mfcc=40)
return mfccs.T
def get_wave_norm(file):
y, sr = librosa.load(file, sr=cfg.SR)
####### this +0.3 from 0.51 -> 0.54
# add trim for comparison
#y_trimmed, idx = librosa.effects.trim(y)
y_trimmed = y.copy()
# add hpss for comparison, use harmonic (h)
h,p = librosa.effects.hpss(y_trimmed)
####### great code
## more experiment below: this doesn't improve a lot, instead it goes from 0.535 back to 0.49, and there exist file test_210.wav empty error, solved manually by replacing this file with some other 1 file. Also, this may work but you may add in extra time difference information and also take in to account: examine each file processed result, also, experiment more on this, e.g. .2 seconds or something else.
# split using librosa, using harmonic component
'''
yhs = librosa.effects.split(h,top_db=30,hop_length=64)
select = np.diff(yhs/sr)>.15
select_audio = np.array([],dtype=h.dtype)
for i in range(select.shape[0]):
if select[i][0]:
temp_y = h[yhs[i][0]:yhs[i][1]]
new = np.concatenate([select_audio,temp_y])
select_audio = new
data = select_audio
'''
return h, sr
### end of functions of data_all.py
### model.py imports
import keras.backend as K
from keras import regularizers
from keras import layers
from keras.models import Sequential
import keras
import os
import wave
import numpy as np
import pickle as pkl
from keras.layers import GaussianNoise
import config as cfg
import json
### end of model.py
### test.py imports
import keras.backend as K
from keras import regularizers
from keras import layers
from keras.models import Sequential
import keras
import os
import wave
import numpy as np
import pickle as pkl
from tqdm import tqdm
import pandas as pd
from keras.models import load_model
import config as cfg
### end of test.py imports
# for constants
start = 1.4#1.385#0.5#1.39
end = 1.41#1.390#10.5#1.41
increment = 0.1#0.005#0.005
for duration in np.arange(start,end,increment):
cfg.TIME_SEG = duration
### data_all.py
if True:#not os.path.isfile('data.pkl'):
DATA_DIR = './input/train'
file_glob = []
for i, cls_fold in tqdm(enumerate(cfg.LABELS)):
cls_base = os.path.join(DATA_DIR, cls_fold)
files = os.listdir(cls_base)
print('{} train num:'.format(cls_fold), len(files))
for pt in files:
file_pt = os.path.join(cls_base, pt)
file_glob.append((file_pt, cfg.LABELS.index(cls_fold)))
print('done.')
data = []
for file, lbl in tqdm(file_glob):
raw, sr = get_wave_norm(file)
seg = int(sr * cfg.TIME_SEG)
length = raw.shape[0]
for i in range((length//seg)*cfg.STRIDE+1):
start = i * int(seg/cfg.STRIDE)#seg/cfg.STRIDE means "walk length = segment length/cfg.STRIDE"
end = start + seg
if end <= length:
x = raw[start:end]
y = np.zeros(cfg.N_CLASS)
y[lbl] = 1
#x = extract_logmel(x, sr, size=cfg.TIME_SEG)
x = extract_mfcc(x,sr,cfg.TIME_SEG)
data.append((x, y))
print(len(data))
with open('data.pkl', 'wb') as f:
pkl.dump(data, f)
### end of data_all.py
### data_test.py
if True:#not os.path.isfile('data_test.pkl'):
DATA_DIR = './input/test'
file_glob = []
for cls_fold in tqdm(os.listdir(DATA_DIR)):
file_pt = os.path.join(DATA_DIR, cls_fold)
file_glob.append(file_pt)
print(len(file_glob))
print('done.')
data = {}
for file in tqdm(file_glob):
temp = []
raw, sr = get_wave_norm(file)
length = raw.shape[0]
seg = int(sr * cfg.TIME_SEG)
for i in range((length//seg)*cfg.STRIDE+1):
start = i * int(seg/cfg.STRIDE)
end = start + seg
if end <= length:
x = raw[start:end]
#x = extract_logmel(x, sr, size=cfg.TIME_SEG)
x = extract_mfcc(x,sr,size=cfg.TIME_SEG)
temp.append(x)
data[file] = np.array(temp)
with open('data_test.pkl', 'wb') as f:
pkl.dump(data, f)
### end of data_test.py
### data_val.py
if True:#not os.path.isfile('data_val.pkl'):
DATA_DIR = './input/val'
file_glob = []
for i, cls_fold in tqdm(enumerate(cfg.LABELS)):
cls_base = os.path.join(DATA_DIR, cls_fold)
files = os.listdir(cls_base)
print('{} train num:'.format(cls_fold), len(files))
for pt in files:
file_pt = os.path.join(cls_base, pt)
file_glob.append((file_pt, cfg.LABELS.index(cls_fold)))
print('done.')
data = []
for file, lbl in tqdm(file_glob):
raw, sr = get_wave_norm(file)
seg = int(sr * cfg.TIME_SEG)
length = raw.shape[0]
for i in range((length//seg)*cfg.STRIDE+1):
start = i * int(seg/cfg.STRIDE)#seg/cfg.STRIDE means "walk length = segment length/cfg.STRIDE"
end = start + seg
if end <= length:
x = raw[start:end]
y = np.zeros(cfg.N_CLASS)
y[lbl] = 1
#x = extract_logmel(x, sr, size=cfg.TIME_SEG)
x = extract_mfcc(x,sr,size=cfg.TIME_SEG)
data.append((x, y))
print(len(data))
with open('data_val.pkl', 'wb') as f:
pkl.dump(data, f)
### end of data_val.py
### model.py
with open('./data.pkl', 'rb') as f:
raw_data = pkl.load(f)
with open('./data_val.pkl', 'rb') as f:
raw_data_val = pkl.load(f)
raw_x = []
raw_y = []
raw_x_val = []
raw_y_val = []
for x, y in raw_data:
raw_x.append(x)
raw_y.append(y)
for x, y in raw_data_val:
raw_x_val.append(x)
raw_y_val.append(y)
np.random.seed(5)
np.random.shuffle(raw_x)
np.random.shuffle(raw_x_val)
np.random.seed(5)
np.random.shuffle(raw_y)
np.random.shuffle(raw_y_val)
print(len(raw_x), raw_x[0].shape)
print(len(raw_x_val), raw_x_val[0].shape)
train_x = np.array(raw_x)
val_x = np.array(raw_x_val)
train_y = np.array(raw_y)
val_y = np.array(raw_y_val)
print(train_x.shape)
model = Sequential()
model.add(layers.Conv1D(32*2, 3, input_shape=(train_x.shape[1], train_x.shape[2]),
kernel_regularizer=regularizers.l2(1e-7),
activity_regularizer=regularizers.l1(1e-7)))
model.add(GaussianNoise(0.1))
model.add(layers.Dropout(0.5))
model.add(layers.Conv1D(32*2, 3, activation='elu',
kernel_regularizer=regularizers.l1_l2(1e-7)))
model.add(layers.BatchNormalization())
model.add(layers.MaxPool1D())
model.add(GaussianNoise(0.1))
model.add(layers.Dropout(0.5))
model.add(layers.Bidirectional(layers.LSTM(32*4, dropout=0.5, return_sequences=True,
kernel_regularizer=regularizers.l1_l2(1e-7))))
model.add(GaussianNoise(0.1))
model.add(layers.Bidirectional(layers.LSTM(32*4, dropout=0.5, return_sequences=True,
kernel_regularizer=regularizers.l1_l2(1e-7))))
model.add(layers.LSTM(32*2,
kernel_regularizer=regularizers.l1_l2(1e-7)))
model.add(GaussianNoise(0.1))
model.add(layers.Dense(16*2, activation='elu',
kernel_regularizer=regularizers.l1_l2(1e-7)))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(cfg.N_CLASS, activation="softmax"))
model.summary()
adam = keras.optimizers.adam(2e-5)
model.compile(loss='categorical_crossentropy',
optimizer=adam, metrics=['accuracy'])
# Train model on dataset
batch_size = cfg.BATCH_SIZE
steps = len(train_x) // batch_size
# model.load_weights('./my_model.h5')
history = model.fit(x=train_x, y=train_y, batch_size=batch_size,
epochs=cfg.EPOCHES, validation_data=(val_x,val_y), shuffle=True)
model.save('./my_model.h5')
# may be used with "with open xxx"
json.dump(history.history,open('output/fit_history_duration_{}.json'.format(duration),'w'))
# Read data from file:
# data = json.load( open('fit_history_duration_{}.json'.format(duration)))
### end of model.py
### test.py
with open('./data_test.pkl', 'rb') as f:
raw_data = pkl.load(f)
#model = load_model('my_model.h5')
result = {'id': [], 'label': []}
for key, value in tqdm(raw_data.items()):
x = np.array(value)
y = model.predict(x)
y = np.mean(y, axis=0)
pred = cfg.LABELS[np.argmax(y)]
result['id'].append(os.path.split(key)[-1])
result['label'].append(pred)
result = pd.DataFrame(result)
result.to_csv('./submission.csv', index=False)
### end of test.py
| [
"[email protected]"
] | |
32fcd8b993858d0113216f757fc9230001723120 | 1b59d3bcc76401d86a0ea0d0e9cb60ccf9f6de5b | /test/py/test_parsemd.py | 6a78290a1abbab3a2e3d5aa3a7b2cae63f7346ac | [
"Apache-2.0"
] | permissive | uogbuji/versa | baebe45018abed103eafdc1246d121cf7ae1907e | 9fd34102d6c04eba8890c5bba1bcbfccecda097c | refs/heads/master | 2022-05-09T11:16:51.960946 | 2022-04-19T15:37:41 | 2022-04-19T15:37:41 | 17,493,493 | 8 | 2 | Apache-2.0 | 2022-04-12T16:02:19 | 2014-03-06T21:36:26 | Python | UTF-8 | Python | false | false | 3,366 | py | import os
import logging
from versa import I
from versa.driver import memory
from versa.serial import literate
from versa.driver.memory import newmodel
VERSA_BASEIRI = 'http://bibfra.me/purl/versa/'
VERSA_LITERATE1 = """<!--
Test Versa literate model
-->
# @docheader
* @iri:
* @base: http://bibfra.me/vocab/
* @schema: http://bibfra.me/purl/versa/support
# Resource
* synonyms: http://bibframe.org/vocab/Resource http://schema.org/Thing
* label: Resource
* description: Conceptual Resource
* properties: label description image link
"""
def Xtest_versa_syntax1():
# logging.debug(recs)
m = newmodel()
m.create_space()
# from_markdown(VERSA_LITERATE1, m, encoding='utf-8')
literate.parse(VERSA_LITERATE1, m)
logging.debug('VERSA LITERATE EXAMPLE 1')
for link in m.match():
logging.debug('Result: {0}'.format(repr(link)))
# assert result == ()
# assert results == None, "Boo! "
def test_versa_syntax1(testresourcepath):
config = {
'autotype-h1': 'http://example.org/r1',
'autotype-h2': 'http://example.org/r2',
'interpretations': {
VERSA_BASEIRI + 'refines': VERSA_BASEIRI + 'resourceset',
VERSA_BASEIRI + 'properties': VERSA_BASEIRI + 'resourceset',
VERSA_BASEIRI + 'synonyms': VERSA_BASEIRI + 'resourceset'
}
}
m1 = newmodel(baseiri='http://example.org/')
# from_markdown(VERSA_LITERATE1, m, encoding='utf-8')
doc = open(os.path.join(testresourcepath, 'doc1.md')).read()
literate.parse(doc, m1, config=config)
# Use -s to see this
print('='*10, 'test_versa_syntax1, pt 1', '='*10)
literate.write(m1)
m2 = newmodel(baseiri='http://example.org/')
# from_markdown(VERSA_LITERATE1, m, encoding='utf-8')
doc = open(os.path.join(testresourcepath, 'doc1.abbr.md')).read()
literate.parse(doc, m2, config=config)
# Use -s to see this
print('='*10, 'test_versa_syntax1, pt 2', '='*10)
literate.write(m2)
# logging.debug('VERSA LITERATE EXAMPLE 1')
equiv_results = [list(m1.match()), list(m2.match())]
for results in equiv_results:
import pprint; pprint.pprint(results)
assert len(results) == 6
assert (I('http://uche.ogbuji.net/ndewo/'), I('http://bibfra.me/purl/versa/type'), 'http://www.w3.org/TR/html5/#Document', {}) in results
assert (I('http://uche.ogbuji.net/ndewo/'), I('http://www.w3.org/TR/html5/title'), 'Ndewo, Colorado', {}) in results
# assert (I('http://uche.ogbuji.net/ndewo/'), I('http://www.w3.org/TR/html5/title'), 'Ndewo, Colorado', {'@lang': None}) in results
assert (I('http://uche.ogbuji.net/ndewo/'), I('http://www.w3.org/TR/html5/link-type/author'), I('http://uche.ogbuji.net/'), {I('http://www.w3.org/TR/html5/link/description'): 'Uche Ogbuji'}) in results
assert (I('http://uche.ogbuji.net/ndewo/'), I('http://www.w3.org/TR/html5/link-type/see-also'), I('http://www.goodreads.com/book/show/18714145-ndewo-colorado'), {I('http://www.w3.org/TR/html5/link/label'): 'Goodreads'}) in results
assert (I('http://uche.ogbuji.net/'), I('http://bibfra.me/purl/versa/type'), 'http://www.w3.org/TR/html5/#Document', {}) in results
assert (I('http://uche.ogbuji.net/'), I('http://www.w3.org/TR/html5/link-type/see-also'), I('http://uche.ogbuji.net/ndewo/'), {}) in results
| [
"[email protected]"
] | |
7237aa4a102bc3d588885c99560fa78789531184 | 31226e1b0400dd5435ce91cf8602eb6858c2e1bc | /make_trajectory_graphic.py | 3b8e4583538ba05d4f9cf976c69eb98096477461 | [] | no_license | fmailhot/acq_evo_vh_phd | f14cb337b109e2cc9b244d0ffed9e2c985545213 | c2312c5aac52790ac12a661cc011b652f76064c5 | refs/heads/master | 2021-09-28T09:51:22.905304 | 2018-11-16T14:56:37 | 2018-11-16T14:56:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,556 | py | #!/usr/bin/python
import sys
from LIbPhon import LIbPhon
from matplotlib import rc
rc('font', **{'family': 'serif', 'serif': ['Palatino']})
import pylab as plt
params = {'backend': 'ps',
'legend.fontsize': 14,
'xtick.labelsize': 14,
'ytick.labelsize': 14,
'text.usetex': True}
plt.rcParams.update(params)
if len(sys.argv) != 3:
print '\nUsage: %s <LEX MEANING> <opaq|trans>\n' % sys.argv[0]
sys.exit(2)
teach = LIbPhon(teacher=True,
lex="teacher_lexicon_hTrue_cTrue_pTrue_n%s.pck" % sys.argv[2])
nom = teach.produce("%s NOM" % sys.argv[1])
acc = teach.produce("%s ACC" % sys.argv[1])
pl_nom = teach.produce("%s PL NOM" % sys.argv[1])
pl_acc = teach.produce("%s PL ACC" % sys.argv[1])
plt.subplot(211)
l1 = plt.plot(pl_nom[:10][:, 0], "k-", linewidth=3, label="F1")
l2 = plt.plot(pl_nom[:10][:, 1], "k--", linewidth=3, label="F2")
plt.legend()
plt.text(0.5, 2700, r"\textbf{\textsc{%s nom}}" % sys.argv[1], size="x-large")
plt.text(5.5, 2700, r"\textbf{\texttt{%sgu}}" % sys.argv[1], size="x-large")
plt.xlim(0, 12)
plt.ylim(0, 3250)
if sys.argv[2] == "opaq":
suff = "bo"
else:
suff = "be"
plt.subplot(212)
l1 = plt.plot(pl_acc[:12][:, 0], "k-", linewidth=3, label="F1")
l2 = plt.plot(pl_acc[:12][:, 1], "k--", linewidth=3, label="F2")
plt.legend()
plt.text(0.5, 2700, r"\textbf{\textsc{%s acc}}" % sys.argv[1],
size="x-large")
plt.text(5.5, 2700, r"\textbf{\texttt{%sgu%s}}" % (sys.argv[1], suff),
size="x-large")
plt.xlim(0, 12)
plt.ylim(0, 3250)
plt.show()
| [
"[email protected]"
] | |
2fbe178a8e92eaa3d97e9be259350bba2545582d | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_118/2987.py | 8932bed90c351ccec4bf80077cad58a7b656c257 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 831 | py | from math import *
import fileinput
def isPalindrome(n):
t = n
r = 0
while(n > 0):
r = r*10 + n%10
n = n//10
if(t == r):
return(1)
else:
return(0)
fairAndSquare_memo = {}
def isFairAndSquare(n):
if n in fairAndSquare_memo:
return fairAndSquare_memo[n]
if(isPalindrome(n) and modf(sqrt(n))[0] == 0 and isPalindrome(sqrt(n))):
fairAndSquare_memo[n] = 1
return 1
else:
fairAndSquare_memo[n] = 0
return 0
def isFairAndSquareRanged(m, n):
sum = 0
for i in range(m, n+1):
if(isFairAndSquare(i)):
sum = sum + 1
return sum
f = open("C-small-attempt0.in")
t = int(f.readline().rstrip())
for i in range(1, t+1):
line = f.readline().rstrip().split(" ")
print("Case #" + str(i) + ": " + str(isFairAndSquareRanged(int(line[0]), int(line[1]))))
f.close()
| [
"[email protected]"
] | |
18fd2025d50c8970a17a74adb3741a9ec3e2e6e3 | 1fdeb21f10e99e4ce52da5d1a51a12ab78af6261 | /send_mail.py | 17b9c06d093569632160c3fb5dcb4c25ebc60968 | [] | no_license | techgirlariin/scraping-stocks | 8f040e8cf309387deae70fb122139abce567ecea | 85dbc1c5afef5670c319239ee8993af8c723f068 | refs/heads/master | 2022-12-01T13:56:27.240277 | 2020-07-27T14:09:45 | 2020-07-27T14:09:45 | 282,904,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 987 | py | import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email import encoders
from_add='[email protected]'
to_add='[email protected]'
subject = "Finance BY Aradhana"
def send(filename):
#header
msg= MIMEMultipart()
msg['From']=from_add
msg['To']=to_add
msg['Subject']= subject
#body
body="<i>Stock REport</i>"
msg.attach(MIMEText(body,'html'))
my_file=open("filename","rb")
part= MIMEBase('application','octet-stream')
part.set_payload((my_file).read())
encoders.encode_base64(part)
part.add_header('Content-Disposition','attachment;filename=' + 'filename')
msg.attach(part)
message=msg.as_string()
server = smtplib.SMTP('smtp.gmail.com',587)
server.starttls()
server.login('[email protected]','zpvrplezkomwsxkb')
server.sendmail(from_add,to_add,message)
server.quit()
| [
"[email protected]"
] | |
89cbf924f90063217548abf38a8aae1c32aa5837 | b6b08275f058b8c4cad0a2dc37cb061fe1c776ba | /collective/leadingmedia/indexers.py | f5b531f14f5c832e64d14751be388b0f3a70d6b1 | [] | no_license | intk/collective.media | 0d4b07e9390bd896b1d9e5664094f667a72b0aad | 3336946d2aeace8110ac305e81e9f98d62861f70 | refs/heads/master | 2021-01-19T05:19:39.039670 | 2014-11-07T11:25:14 | 2014-11-07T11:25:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,343 | py | # Handling indexes
from plone.indexer.decorator import indexer
from collective.leadingmedia.interfaces import ICanContainMedia
from plone.dexterity.interfaces import IDexterityContainer
from plone.app.contenttypes.interfaces import ICollection, IFolder
@indexer(IDexterityContainer)
def hasMedia(object, **kw):
return ICanContainMedia(object).hasMedia()
@indexer(IDexterityContainer)
def leadMedia(object, **kw):
lead = ICanContainMedia(object).getLeadMedia()
if lead is not None:
if hasattr(lead, 'getURL'):
return lead.getURL()
else:
return lead.absolute_url()
@indexer(ICollection)
def collection_hasMedia(object, **kw):
return ICanContainMedia(object).hasMedia()
@indexer(ICollection)
def collection_leadMedia(object, **kw):
lead = ICanContainMedia(object).getLeadMedia()
if lead is not None:
if hasattr(lead, 'getURL'):
return lead.getURL()
else:
return lead.absolute_url()
@indexer(IFolder)
def folder_hasMedia(object, **kw):
return ICanContainMedia(object).hasMedia()
@indexer(IFolder)
def folder_leadMedia(object, **kw):
lead = ICanContainMedia(object).getLeadMedia()
if lead is not None:
if hasattr(lead, 'getURL'):
return lead.getURL()
else:
return lead.absolute_url()
| [
"[email protected]"
] | |
52756449b9d37312fca86c6bc3c3f3aacfb0fdab | dbf137f3e42c3f963c7e02a7581bc56375900420 | /Analysis/_scp_lpc.py | 3fb039d449412804217a6d75a399ca39bf4549dd | [] | no_license | abdollah110/BoostedHTT | 41dcc31ea864e0595441e981befc103caf9b6d84 | 55536aa0631f8423776080dc3ed7486f73408f47 | refs/heads/master | 2023-08-22T16:59:28.946116 | 2020-08-19T12:37:54 | 2020-08-19T12:37:54 | 181,378,000 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 172 | py | import os
import sys
for i in sys.argv:
print i
os.system('scp %s cmslpc26.fnal.gov:/uscms_data/d3/abdollah/Analysis/Limit/CMSSW_8_1_0/src/auxiliaries/shapes/'%i)
| [
"[email protected]"
] | |
32144e38a4a3e556fc65dad9a3425e538831844e | 19564fe219ff687d5190f58edeb01887ed65091e | /pytrap/pytrap-data-access.py | 8cd4e9e9399c1b6abd1ea0df40c33680a94fdf00 | [
"BSD-3-Clause"
] | permissive | krkos/Nemea-Framework | 24be5f1de43df0083ea6b2ab81329a6e583f2633 | f9c3265724649968cdc260c3c4c4fbd548b304ff | refs/heads/master | 2021-08-28T21:00:02.661192 | 2021-07-30T16:22:34 | 2021-07-30T16:22:34 | 245,998,946 | 1 | 0 | null | 2020-03-09T09:53:50 | 2020-03-09T09:53:50 | null | UTF-8 | Python | false | false | 803 | py | # coding: utf-8
import sys
import pdb
import pytrap
ctx = pytrap.TrapCtx()
ctx.init(sys.argv)
ctx.setRequiredFmt(0)
print("\nReceiving one UniRec message")
try:
a = ctx.recv(0)
except pytrap.FormatChanged as e:
fmt = ctx.getDataFmt(0)
rec = pytrap.UnirecTemplate(fmt[1])
a = e.data
del(e)
print(rec)
rec.setData(a)
print("\nDirect access using index")
for i in range(len(rec)):
print(rec.get(i, a))
print("\nAttribute access")
print(rec.SRC_IP)
for i in ["SRC_IP", "DST_IP", "SRC_PORT", "DST_PORT"]:
v = getattr(rec, i)
print(v)
print("\nIteration over all fields")
for i in rec:
print(i)
print("\nPrint values, ids and names of fields")
print(rec.strRecord())
print("\nDict from all fields")
d = {}
for k, v in rec:
d[k] = str(v)
print(d)
ctx.finalize()
| [
"[email protected]"
] | |
7ec24322e91532daee50c59e27c003388a072525 | eb85c96c3783be407b396956c13448d89f5e5fee | /building_more_python_design_patterns/7-python-design-patterns-building-more-m7-exercise-files/Composite/tree.py | db30708f1df216f30dbfb95537eec7637c234eb6 | [] | no_license | ForeverDreamer/python_learning | 83c2c290271dbf060ee1718140b8dfd128b82b20 | ff905c4811ddb688f8ee44aed8c4d8067db6168b | refs/heads/master | 2022-04-30T03:23:45.162498 | 2019-07-05T07:55:01 | 2019-07-05T07:55:01 | 181,037,513 | 1 | 0 | null | 2022-04-22T21:12:46 | 2019-04-12T15:41:44 | Jupyter Notebook | UTF-8 | Python | false | false | 627 | py | from collections import Iterable
from functools import reduce
from datetime import date
from abs_composite import AbsComposite
class Tree(Iterable, AbsComposite):
def __init__(self, members):
self.members = members
def __iter__(self):
return iter(self.members)
def get_oldest(self):
def f(t1, t2):
t1_, t2_ = t1.get_oldest(), t2.get_oldest()
return t1_ if t1_.birthdate < t2_.birthdate else t2_
return reduce(f, self, NullPerson())
class NullPerson(AbsComposite):
name = None
birthdate = date.max
def get_oldest(self):
return self
| [
"[email protected]"
] | |
1985108aa2ca37b00d6a95e9161685201a33adbc | 03c42654c3fedca05a4627b8b6a228af9725bb88 | /myproject/geno/admin.py | 3e1862b9a2269245dcc0bb3fd0a82150d4f29cab | [] | no_license | ftconsult/geno | 091c6848fe7316d6357a76e752b1f8baeca30145 | 794753f24b906def87ce85612ee8bb827dcc3430 | refs/heads/master | 2021-01-19T00:43:15.027690 | 2013-03-20T01:27:55 | 2013-03-20T01:27:55 | 8,880,236 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 318 | py | import models
from django.contrib import admin
from myproject.geno.models import Nodo, NodoLog
class NodoAdmin(admin.ModelAdmin):
list_display = ('year_born','nombre','a_paterno','a_materno')
search_fields = ('nombre','a_paterno','a_materno')
admin.site.register(Nodo,NodoAdmin)
admin.site.register(NodoLog) | [
"[email protected]"
] | |
3469bc511211215a25b8bff121d757849742b955 | e23236b63dbcd74767ba2da627b2e0490faf45e9 | /naanalmart/seller/migrations/0001_initial.py | 17c9b8f9d23e38f1d1eb2ce21745027eff2b695c | [] | no_license | JanardhanReddyMeeniga/project | d964c738b3da5244c82887a7b272a7408f29864b | 3d2b4ee55e5ea654d92ac27e946273d88a0317a4 | refs/heads/master | 2021-01-11T13:36:12.515735 | 2017-06-21T13:05:26 | 2017-06-21T13:05:26 | 95,003,898 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,131 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('auth', '0006_require_contenttypes_0002'),
]
operations = [
migrations.CreateModel(
name='Seller',
fields=[
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(null=True, verbose_name='last login', blank=True)),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('seller_id', models.AutoField(serialize=False, primary_key=True)),
('email', models.EmailField(unique=True, max_length=254)),
('seller_firstname', models.CharField(max_length=254)),
('seller_lastname', models.CharField(max_length=254)),
('subscription_grand_date', models.DateTimeField(default=django.utils.timezone.now)),
('subscription_end_date', models.DateTimeField(null=True, blank=True)),
('grant_access', models.BooleanField(default=False)),
('is_staff', models.BooleanField(default=False)),
('is_active', models.BooleanField(default=True)),
('date_joined', models.DateTimeField(default=django.utils.timezone.now)),
('groups', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Group', blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', verbose_name='groups')),
('user_permissions', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Permission', blank=True, help_text='Specific permissions for this user.', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| [
"[email protected]"
] | |
4cae035dc2efad2ff69323b90c19e2423188273c | 38aa92a75a52d17eb78705aa1e514a98f6ea16e5 | /oldboyedu/memtest.py | 1157611a4dc2ae486cb7383de7b2912e55db9ef5 | [] | no_license | roguewang/python | 89acc3e869a8ec372a298f7de86ed8ceec8fe6b7 | 0f3e9edb65d67d43c827d6d75325fb1dfcf8c124 | refs/heads/master | 2020-07-02T08:55:19.646488 | 2018-12-07T04:31:19 | 2018-12-07T04:31:19 | 66,704,129 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 185 | py | #!/usr/bin/env python
# Author:rogue
def fun(name1):
print(name1)
name1 = input("函数内的赋值:")
print(name1)
name = input("press any word:")
fun(name)
print(name)
| [
"[email protected]"
] | |
42e034bc50a68991d35c7521feb74d490e5d4ff4 | f66c0484ba5ad176fb006287a58fbcc824d58500 | /14 - 236A - Boy or Girl.py | d6d56afcd22832322c9bbfc3ff313853aa3a9791 | [] | no_license | love1024/codeforces-journery | a9388f2bb5d2fab6e89638e715120b3c2c219609 | b7c121486f7e4ff132eb3bbf57200d3a95b8a668 | refs/heads/main | 2023-03-12T01:57:46.948788 | 2021-03-06T18:31:40 | 2021-03-06T18:31:40 | 323,282,208 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 309 | py | """
Problem: http://codeforces.com/problemset/problem/236/A
"""
def solve():
# Take string input and convert to set and check even or odd
string = input()
if(len(set(string)) % 2 == 0):
print("CHAT WITH HER!")
else:
print("IGNORE HIM!")
if __name__ == '__main__':
solve() | [
"[email protected]"
] | |
c72ec5d5e9f00aa58651292a079e78452986823d | ff1f833b6c8b7ac83213c1a517d2830104e75b64 | /FlattenedFiles.py | f026b1bce28c742d85039dcb7f2b19593fcf328c | [] | no_license | rorymulcahey/MathSystems | 215b5ca607172c118c863186c064e1333120c135 | 68c8580f5f012f402b40c69ac7c947a1c100a1ea | refs/heads/master | 2020-03-17T08:25:41.159814 | 2018-06-04T16:33:14 | 2018-06-04T16:33:14 | 133,437,928 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 171 | py | '''
Flattened Files: string[] GetAllFiles(string[] paths, string rootPath)
Given some array of file.
Give a function that returns all paths with the given root path.
''' | [
"[email protected]"
] | |
d2cf15d94e7122584dd7596ee2ce7b1ef5ff1eac | fb9f01b6200bd2576acf2d6ffd5782f91af995e6 | /django_project/settings.py | 453b88b44a55c18fb5035aa899912ab2a140c2db | [] | no_license | bigsandip/Django_Blog | 5bc8277a1efa30c0efa8c6faf0038725a11f5b3d | a7181f6cd0554e5608250ec1c34ca69f3bcc8364 | refs/heads/master | 2022-11-24T12:56:52.948405 | 2020-01-02T15:10:47 | 2020-01-02T15:10:47 | 229,528,606 | 3 | 0 | null | 2022-11-22T04:56:27 | 2019-12-22T06:38:14 | Python | UTF-8 | Python | false | false | 4,040 | py | """
Django settings for django_project project.
Generated by 'django-admin startproject' using Django 3.0.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
import django_heroku # for heroku
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# SECRET_KEY = 'vgr7m)k8mzslgvlzzbdw=6qdz1&4t#0y6*z4vj$0rc)lxllh(s'
SECRET_KEY = os.environ.get('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = (os.environ.get('DEBUG_VALUE')=='True')
ALLOWED_HOSTS = ['hamro-blog.herokuapp.com']
# Application definition
INSTALLED_APPS = [
'blog.apps.BlogConfig',
'users.apps.UsersConfig',
'crispy_forms', # look down
'django_cleanup.apps.CleanupConfig', # for auto replacing old profile pic
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'django_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'django_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles') # added for heroku deployment
STATIC_URL = '/static/'
# below are self added
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
CRISPY_TEMPLATE_PACK = 'bootstrap4'
LOGIN_REDIRECT_URL = 'blog-home' # to redirect to homepage after successful login
LOGIN_URL = 'login' # for login required decoretor to show path
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
EMAIL_HOST_USER = os.environ.get('EMAIL_USER')
EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_PASS')
django_heroku.settings(locals())
| [
"[email protected]"
] | |
1162fae836ec236f0544189549a2863490f0fc0c | bff3053fcc40995bbd8c8bf251042ada99d15427 | /re Module - How to Write and Match Regular Expressions (Regex).py | 6960e617c405862d01e742ef55bd6224d3deee8b | [] | no_license | iampaavan/Pure_Python | e67213eb42229614517c249b4f5b0a01c71c8ce9 | e488b05ea526ab104ebc76a8e5e621301bed8827 | refs/heads/master | 2020-04-27T23:10:48.482213 | 2019-06-30T19:32:08 | 2019-06-30T19:32:08 | 174,765,589 | 1 | 0 | null | 2019-04-21T01:39:53 | 2019-03-10T01:39:13 | Python | UTF-8 | Python | false | false | 1,750 | py | import re
text_to_search = '''
abcdefghijklmnopqurtuvwxyz
ABCDEFGHIJKLMNOPQRSTUVWXYZ
1234567890
Ha HaHa
MetaCharacters (Need to be escaped):
. ^ $ * + ? { } [ ] \ | ( )
paavan.com
321-555-4321
123.555.1234
123*555*1234
800-555-1234
900-555-1234
Mr. Schafer
Mr Smith
Ms Davis
Mrs. Robinson
Mr. T
cat
mat
pat
bat
'''
sentence = 'Start a sentence and then bring it to an end'
# pattern = re.compile(r'abc')
# pattern = re.compile(r'\.')
# pattern = re.compile(r'paavan\.com')
# pattern = re.compile(r'\d')
# pattern = re.compile(r'\D')
# pattern = re.compile(r'\BHa')
# pattern = re.compile(r'^Start')
# pattern = re.compile(r'end$')
# pattern = re.compile(r'\d\d\d[.]\d\d\d[.]\d\d\d\d')
# pattern = re.compile(r'[89]00[-]\d\d\d[-]\d\d\d\d')
# pattern = re.compile(r'[1-5]')
# pattern = re.compile(r'[a-zA-Z]')
# pattern = re.compile(r'[^a-zA-Z]')
# pattern = re.compile(r'[^b]at')
# pattern = re.compile(r'\d{3}.\d{3}.\d{4}')
# pattern = re.compile(r'M(r|s|rs)\.?\s[A-Z]\w*')
pattern = re.compile(r'(Mr|Ms|Mrs)\.?\s[A-Z]\w*')
# pattern = re.compile(r'\d{3}.\d{3}.\d{4}')
# pattern = re.compile(r'Start')
# matches = pattern.findall(text_to_search)
matches = pattern.finditer(text_to_search)
# matches = pattern.match(sentence)
for match in matches:
print(match)
print()
print('**************************************************')
with open('data.txt', 'r') as f:
contents = f.read()
# pattern = re.compile(r'\d\d\d[-]\d\d\d[-]\d\d\d\d')
# pattern = re.compile(r'[89]00[-]\d\d\d[-]\d\d\d\d')
pattern = re.compile(r'[89]00[-]\d{3}[-]\d{4}')
matches = pattern.finditer(contents)
# print(matches)
for match in matches:
print(match)
# print(text_to_search[1:4]) | [
"[email protected]"
] | |
25fc4fe9e428fa70179a9f3a6a69991b043f20bc | c8d652dca4ca71e32482eefe17aeb9cf1d0b2b59 | /practice/contagions/Hypergraph SI SIS SIR/SIR/Hypergraph_SIR_CP.py | 2bac3efcb9592e5a31308583501ce7660fc1a869 | [] | no_license | chqlee/Hypergraphs | 6701e7273f35c0fcb32b4dd26138d4145c738669 | 42026f77b758168d59bc1d11ae643a5cadc7ce0d | refs/heads/master | 2023-07-12T16:42:25.552346 | 2021-08-12T11:43:30 | 2021-08-12T11:43:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,422 | py | # @Title : 超图上的传播
# @Author : tony
# @Date : 2021/8/5
# @Dec : CP strategy
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import time
import random
from tqdm import tqdm
def constructMatrix():
"""
构造超图的点边矩阵
:return: 超图的点边矩阵 matrix
"""
matrix = np.random.randint(0, 2, size=(100, 10))
for i in range(100):
if sum(matrix[i]) == 0:
j = np.random.randint(0, 10)
matrix[i, j] = 1
return matrix
def findAdjNode(inode, df_hyper_matrix):
"""
找到邻居节点集合
:param I_list: 感染节点集
:param df_hyper_matrix: 超图的点边矩阵
:return: 不重复的邻居节点集 np.unique(nodes_in_edges)
"""
# 找到该点所属的超边集合
edges_conclude_nodes = np.where(np.array(df_hyper_matrix.loc[inode]) == 1)[0]
# 找到可能传播到超边中的顶点集合
edge = random.sample(list(edges_conclude_nodes), 1)[0]
nodes = np.where(np.array(df_hyper_matrix[edge]) == 1)[0]
return nodes
def formatInfectedList(I_list, infected_list, infected_T):
"""
筛选出不在I_list当中的节点
:param I_list: 感染节点集
:param infected_list: 本次受感染的节点(未筛选)
:return: 本次受感染的节点(筛选后)format_list
"""
format_list = []
for i in range(0, len(infected_list)):
if infected_list[i] not in I_list and infected_list[i] not in infected_T:
format_list.append(infected_list[i])
return format_list
def getTrueStateNode(adj_nodes, I_list, R_list):
"""
从所有可能感染节点中排查筛选只是S态的节点
:param adj_nodes: 所有可能感染节点
:param I_list: 截至上一时刻全部感染节点
:param R_list: 截至上一时刻全部恢复节点
:return:
"""
adj_list = list(adj_nodes)
for i in range(0, len(adj_nodes)):
if adj_nodes[i] in I_list or adj_nodes[i] in R_list:
adj_list.remove(adj_nodes[i])
return np.array(adj_list)
if __name__ == '__main__':
start = time.perf_counter()
# 构造超图矩阵
hyper_matrix = constructMatrix()
df_hyper_matrix = pd.DataFrame(hyper_matrix)
# 初始态赋值一个感染节点
N = len(df_hyper_matrix.index.values)
total_matrix = []
total_matrix_R = []
for i_node in tqdm(range(N), desc="Loading..."):
I_list = [i_node]
R_list = []
# 开始传播
beta = 0.02
gamma = 0.1
iters = 50
I_total_list = [1]
R_total_list = [0]
for t in range(0, iters):
infected_T = []
for inode in I_list:
# 找到邻居节点集
adj_nodes = findAdjNode(inode, df_hyper_matrix)
# 排查筛选只是S态的节点
adj_nodes = getTrueStateNode(adj_nodes, I_list, R_list)
# 开始对邻节点传播
random_list = np.random.random(size=len(adj_nodes))
index_list = np.where(random_list < beta)[0]
infected_list = adj_nodes[index_list]
infected_list_unique = formatInfectedList(I_list, infected_list, infected_T)
infected_T.extend(infected_list_unique)
# 上次感染的节点开始恢复
for each in I_list:
if random.random() < gamma and each not in R_list:
I_list.remove(each)
R_list.append(each)
# 加入本次所感染的节点
I_list.extend(infected_T)
I_total_list.append(len(I_list))
R_total_list.append(len(R_list))
total_matrix.append(I_total_list)
total_matrix_R.append(R_total_list)
# 计算均值并绘图
final_I_list = pd.DataFrame(total_matrix).mean(axis=0) / N
final_R_list = pd.DataFrame(total_matrix_R).mean(axis=0) / N
final_S_list = 1 - final_I_list - final_R_list
T_list = np.arange(len(final_I_list))
plt.title("Hypergraph SIR of CP strategy " + "beta:" + str(beta) + " gamma:" + str(gamma))
plt.plot(T_list, final_I_list, label='i(t)', color='r')
plt.plot(T_list, final_R_list, label='r(t)', color='g')
plt.plot(T_list, final_S_list, label='s(t)')
plt.legend()
plt.show()
end = time.perf_counter()
print(str(end - start))
| [
"[email protected]"
] | |
d2d8c5342f11ff8d50bfa0ea3c1cc6e60990afbf | e0b47c8d885e43fb61a29320c83a85211c0389b3 | /bio1/freq_with_mismatches.py | 4c3f057f2601993b5452f4901cb171ac7a292877 | [] | no_license | mdk2029/rl | f3e2ddad33b4045e0391baea1e3ca4edd91329a8 | 00be11906e63c47280f65c7c0d4ed9124a8946d5 | refs/heads/master | 2020-03-11T07:03:59.735123 | 2018-05-02T02:34:49 | 2018-05-02T02:34:49 | 54,302,264 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,785 | py | import sys
from freqArray import patternToNumber, numberToPattern
from base import complement,hamming_neighborhood
def freqarray_with_mismatches_reverse(genome,k,distance):
all_kmers_freq = [0]*(pow(4,k))
max_freq = 0
for idx in xrange(0,len(genome)-k+1):
for current in [genome[idx:idx+k], complement(genome[idx:idx+k])] :
dneighbors = hamming_neighborhood(current,distance)
for neigh in dneighbors :
rank = patternToNumber(neigh)
all_kmers_freq[rank] = all_kmers_freq[rank]+1
max_freq = max(max_freq,all_kmers_freq[rank])
return (all_kmers_freq,max_freq)
def freq_with_mismatches_reverse(genome, k, distance) :
all_kmers_freq,max_freq = freqarray_with_mismatches_reverse(genome,k,distance)
most_freq = []
for rank,freq in enumerate(all_kmers_freq):
if freq == max_freq :
most_freq.append(rank)
ret = [numberToPattern(rank,k) for rank in most_freq]
return ret
#def freq_with_mismatches_reverse(genome,k,distance) :
if __name__ == '__main__' :
# print hamming_neighborhood("AT", 1)
# print hamming_neighborhood("AT", 2)
# genome = 'ACGTTGCATGTCGCATGATGCATGAGAGCT'
# k = 4
# d = 1
# ret = freq_with_mismatches_reverse(genome,k,d)
# map(lambda x : sys.stdout.write("%s " % x), ret)
#
# with open("Downloads/dataset_9_8.txt") as f:
#
# genome = f.readline().strip()
# kd = f.readline().strip().split()
# k = int(kd[0])
# d = int(kd[1])
#
# ret = freq_with_mismatches_reverse(genome,k,d)
# map(lambda x : sys.stdout.write("%s " % x), ret)
a = hamming_neighborhood('TGCAT', 2)
print len(a)
# map(lambda x : sys.stdout.write("%s " % x), a) | [
"[email protected]"
] | |
d30edb238944fcda7e202e56d3cdb666786c310f | 10542df4e26a9553cad3bb973b746eebaf1cffdb | /relationship_extraction/binary_classification_rel/f1_binary_classification_rel.py | cd2c6070ffa32d7265736978e1ec60d5f3bdea38 | [] | no_license | taotao033/information-extraction-baseline2.0 | 87ef4971a0ea364a55194106d8777f588bd4ad46 | a788c8b8d1bd06f33d6bd4da1ae0fbe68087d0b3 | refs/heads/master | 2022-01-16T13:06:40.583319 | 2019-05-16T10:33:37 | 2019-05-16T10:33:40 | 186,995,416 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 790 | py | from sklearn.metrics import f1_score, precision_score, recall_score, classification_report
import numpy as np
gold_file = './runs_logs/2019514/dev_gold.txt'
prediction_file = './runs_logs/2019514/logs/predictions.txt'
ture = []
with open(gold_file, 'r') as gold_f:
for line in gold_f.readlines():
ture.append(int(line))
gold_f.close()
pred = []
with open(prediction_file, 'r') as pred_f:
for line in pred_f.readlines():
pred.append(int(line.split('\t')[1]))
pred_f.close()
ture = np.array(ture)
pred = np.array(pred)
binary_classification_report = classification_report(ture, pred)
print(binary_classification_report)
with open('./runs_logs/2019514/binary_classification_report.txt', 'w') as report:
report.write(binary_classification_report)
report.close() | [
"[email protected]"
] | |
94ec9cf8bb7d7c701718257565dd7a03589b7388 | 55e2bec3ad5a937a9914fcddec62827a84e50441 | /frontend/keras.py | fa5964f8764667c40de77bdeba3e72bae489cf33 | [
"MIT"
] | permissive | mp-chet/Neural-Network-Translator | b59ad4d1b71cdafb21dd1d25d8ed0c195666fc9a | 5ce95d9d0a3603800bff2576760984d068ad841d | refs/heads/master | 2021-05-21T17:35:00.334584 | 2020-03-29T14:05:06 | 2020-03-29T14:05:06 | 252,736,594 | 0 | 0 | MIT | 2020-04-03T13:15:25 | 2020-04-03T13:15:24 | null | UTF-8 | Python | false | false | 1,841 | py | from plugin_collection import FrontendPlugin
import json
class Keras(FrontendPlugin):
"""Keras frontend plugin transforms given Keras h5-file to the intermediate format"""
def __init__(self):
super().__init__('keras', 'Keras Frontend Plugin')
def transform_to_intermediate_format(self, input):
"""Returns the intermediate format represenation of the given h5-file"""
from tensorflow import keras
#? Loading the given model and transforming it to a json object
model = keras.models.load_model(input)
model_json = json.loads(model.to_json())
count=0
#? Adding batch_input_shape, units, weight- and bias-values for each layer to the generated json object
for layer in model_json['config']['layers']:
if (layer['class_name']=='Dense'):
weights = model.layers[count].get_weights()[0]
biases = model.layers[count].get_weights()[1]
layer['kernel_values'] = weights.tolist()
layer['bias_values'] = biases.tolist()
count+=1
#? Deleting unnecessary information from the json object
del model_json['keras_version']
del model_json['backend']
#? Removing unnecessary information from config object
for layer in model_json['config']['layers']:
layer['config'].pop('trainable', None)
layer['config'].pop('kernel_initializer', None)
layer['config'].pop('bias_initializer', None)
layer['config'].pop('kernel_regularizer', None)
layer['config'].pop('bias_regularizer', None)
layer['config'].pop('activity_regularizer', None)
layer['config'].pop('kernel_constraint', None)
layer['config'].pop('bias_constraint', None)
return model_json | [
"[email protected]"
] | |
c5a787c18a915d3a1cf69841583549dd5a9cc2cf | 1f290bd29534a719ed94d30eea6a9bff241908af | /Find Minimum in Rotated Sorted Array.py | 79e83a60738fd04332d195192b91b5e425a31f03 | [] | no_license | nan0445/Leetcode-Python | 3a838a9178cd86220ace85da2d6a8b084975552d | bccd0f6ebb00e9569093f8ec18ebf0e94035dce6 | refs/heads/master | 2020-03-18T18:19:19.010566 | 2018-08-11T02:17:36 | 2018-08-11T02:17:36 | 135,084,509 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 367 | py | class Solution:
def findMin(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if nums[0]<=nums[-1]: return nums[0]
l, r = 0, len(nums)-1
while l<r-1:
mid = (l+r)//2
if nums[mid]>nums[r]:
l = mid + 1
else: r = mid
return min(nums[l],nums[r])
| [
"[email protected]"
] | |
12717307e45c601099299047d414b8dd0913ccb3 | 3e4071f68b45ab88c4110aacbf3c33e5f8bb60c9 | /hyde/ast_printer.py | ef86f4d46bf6144557f21078d4f6f0e4080c0f11 | [
"MIT"
] | permissive | ty-porter/hyde | 661eb39802341939843cab16d3ae4de4da8c767d | 69387e072e80adf2b2f72c3660da8093d6e2297c | refs/heads/main | 2023-08-31T00:20:05.396617 | 2021-09-30T14:25:43 | 2021-09-30T14:25:43 | 405,208,896 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 791 | py | from hyde.expressions import Visitor
class ASTPrinter(Visitor):
def generate(self, expr):
return self.visit(expr)
def print(self, expr):
print(self.generate(expr))
def visit_binary(self, binary):
return self.parenthesize(binary.operator.lexeme, binary.left, binary.right)
def visit_grouping(self, grouping):
return self.parenthesize('group', grouping.expression)
def visit_literal(self, literal):
return str(literal.value)
def visit_unary(self, unary):
return self.parenthesize(unary.operator.lexeme, unary.right)
def parenthesize(self, name, *exprs):
text = f'({name}'
for expr in exprs:
text += ' '
text += self.visit(expr)
text += ')'
return text | [
"[email protected]"
] | |
90741935a4a1dc373bf6e279fe3a00a4291bba9a | f55353533f1fe1f0cbbf29f97e9222ae8d705937 | /InstaFernando/manage.py | bb3a39c651da5e496ff7bfef99f12260cac89ffb | [] | no_license | fernandosfar/proyecto | 9e75fbc3001d837dea6cc954e351d52460068b33 | a9a28666494189e064dde7a9d1632e13e653e21e | refs/heads/master | 2022-12-04T08:42:52.070826 | 2020-08-20T00:45:50 | 2020-08-20T00:45:50 | 275,403,478 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 633 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'InstaFernando.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
bfd3762733762e2ac2f360dfa7cf87d8b7043810 | 28a3483eb15fe904cf72ba63efd422058776cf61 | /bissextile.py | 12078bda5a1783fb6852b39fc55d0f42e92a7729 | [] | no_license | cyaoyapi/bissextile | c8f77250e874cb0309d8ca9ee67e418671bc5d65 | 47874b26b0027f24e50191462de5682a48894c5c | refs/heads/master | 2021-01-23T06:02:07.295380 | 2017-03-27T12:35:44 | 2017-03-27T12:35:44 | 86,333,489 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 931 | py | #!/usr/bin/python
#-*- coding:utf-8 -*-
"""Ce module vous permet de tester si une année est bissexitle ou pas"""
print("Ce programme vous permet de tester si une année est bissexitle ou pas.\n")
quitter = raw_input("Voulez-vous demarrez le programme ?\nTapez 'o' pour 'oui' et 'n' pour 'non'\n")
while quitter.upper() == "O":
try:
annee = int(raw_input("Entrez l'année : \n"))
if annee < 0:
raise ValueError("L'année que vous que vous avez saisie est négative.\n")
except ValueError:
print "L'année que vous avez saisie est soit négative ou est une alphanumérique.\n"
else:
if (annee%400 == 0) or (annee%4 == 0 and annee%100 != 0):
print annee," est une année bissextile.\n"
else:
print annee," n'est pas une année bissextile.\n"
finally:
quitter = raw_input("Voulez-vous continuez le programme ?\nTapez 'o' pour 'oui' et 'n' pour 'non'\n")
print("Fin du programme. Merci et à bientôt!\n") | [
"[email protected]"
] | |
1d1d7b72a08cd500f9882a19ec17f0ac932e83ba | e35a20601622605ccee79e8a15b424e04618a73f | /koku/reporting/models.py | 6ebd2ea2722d8940ffc4892857cae18e0e30e3d9 | [
"Apache-2.0"
] | permissive | erhwenkuo/koku | 1e59796cc1a49e15c5f4c30bbcdf312e4b65db20 | e3924c15240d97ba8852467016f3e10b2505f785 | refs/heads/main | 2023-08-10T21:46:55.484467 | 2021-09-10T19:31:48 | 2021-09-10T19:31:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,452 | py | #
# Copyright 2021 Red Hat Inc.
# SPDX-License-Identifier: Apache-2.0
#
"""Models for cost entry tables."""
# flake8: noqa
from reporting.currency.models import CurrencySettings
from reporting.partition.models import PartitionedTable
from reporting.provider.all.openshift.models import OCPAllComputeSummary
from reporting.provider.all.openshift.models import OCPAllCostLineItemDailySummary
from reporting.provider.all.openshift.models import OCPAllCostLineItemProjectDailySummary
from reporting.provider.all.openshift.models import OCPAllCostSummary
from reporting.provider.all.openshift.models import OCPAllCostSummaryByAccount
from reporting.provider.all.openshift.models import OCPAllCostSummaryByRegion
from reporting.provider.all.openshift.models import OCPAllCostSummaryByService
from reporting.provider.all.openshift.models import OCPAllDatabaseSummary
from reporting.provider.all.openshift.models import OCPAllNetworkSummary
from reporting.provider.all.openshift.models import OCPAllStorageSummary
from reporting.provider.aws.models import AWSAccountAlias
from reporting.provider.aws.models import AWSComputeSummary
from reporting.provider.aws.models import AWSComputeSummaryByAccount
from reporting.provider.aws.models import AWSComputeSummaryByRegion
from reporting.provider.aws.models import AWSComputeSummaryByService
from reporting.provider.aws.models import AWSCostEntry
from reporting.provider.aws.models import AWSCostEntryBill
from reporting.provider.aws.models import AWSCostEntryLineItem
from reporting.provider.aws.models import AWSCostEntryLineItemDaily
from reporting.provider.aws.models import AWSCostEntryLineItemDailySummary
from reporting.provider.aws.models import AWSCostEntryPricing
from reporting.provider.aws.models import AWSCostEntryProduct
from reporting.provider.aws.models import AWSCostEntryReservation
from reporting.provider.aws.models import AWSCostSummary
from reporting.provider.aws.models import AWSCostSummaryByAccount
from reporting.provider.aws.models import AWSCostSummaryByRegion
from reporting.provider.aws.models import AWSCostSummaryByService
from reporting.provider.aws.models import AWSDatabaseSummary
from reporting.provider.aws.models import AWSEnabledTagKeys
from reporting.provider.aws.models import AWSNetworkSummary
from reporting.provider.aws.models import AWSOrganizationalUnit
from reporting.provider.aws.models import AWSStorageSummary
from reporting.provider.aws.models import AWSStorageSummaryByAccount
from reporting.provider.aws.models import AWSStorageSummaryByRegion
from reporting.provider.aws.models import AWSStorageSummaryByService
from reporting.provider.aws.models import AWSTagsSummary
from reporting.provider.aws.openshift.models import OCPAWSComputeSummary
from reporting.provider.aws.openshift.models import OCPAWSCostLineItemDailySummary
from reporting.provider.aws.openshift.models import OCPAWSCostLineItemProjectDailySummary
from reporting.provider.aws.openshift.models import OCPAWSCostSummary
from reporting.provider.aws.openshift.models import OCPAWSCostSummaryByAccount
from reporting.provider.aws.openshift.models import OCPAWSCostSummaryByRegion
from reporting.provider.aws.openshift.models import OCPAWSCostSummaryByService
from reporting.provider.aws.openshift.models import OCPAWSDatabaseSummary
from reporting.provider.aws.openshift.models import OCPAWSNetworkSummary
from reporting.provider.aws.openshift.models import OCPAWSStorageSummary
from reporting.provider.aws.openshift.models import OCPAWSTagsSummary
from reporting.provider.azure.models import AzureComputeSummary
from reporting.provider.azure.models import AzureCostEntryBill
from reporting.provider.azure.models import AzureCostEntryLineItemDaily
from reporting.provider.azure.models import AzureCostEntryLineItemDailySummary
from reporting.provider.azure.models import AzureCostEntryProductService
from reporting.provider.azure.models import AzureCostSummary
from reporting.provider.azure.models import AzureCostSummaryByAccount
from reporting.provider.azure.models import AzureCostSummaryByLocation
from reporting.provider.azure.models import AzureCostSummaryByService
from reporting.provider.azure.models import AzureDatabaseSummary
from reporting.provider.azure.models import AzureEnabledTagKeys
from reporting.provider.azure.models import AzureMeter
from reporting.provider.azure.models import AzureNetworkSummary
from reporting.provider.azure.models import AzureStorageSummary
from reporting.provider.azure.models import AzureTagsSummary
from reporting.provider.azure.openshift.models import OCPAzureComputeSummary
from reporting.provider.azure.openshift.models import OCPAzureCostLineItemDailySummary
from reporting.provider.azure.openshift.models import OCPAzureCostLineItemProjectDailySummary
from reporting.provider.azure.openshift.models import OCPAzureCostSummary
from reporting.provider.azure.openshift.models import OCPAzureCostSummaryByAccount
from reporting.provider.azure.openshift.models import OCPAzureCostSummaryByLocation
from reporting.provider.azure.openshift.models import OCPAzureCostSummaryByService
from reporting.provider.azure.openshift.models import OCPAzureDatabaseSummary
from reporting.provider.azure.openshift.models import OCPAzureNetworkSummary
from reporting.provider.azure.openshift.models import OCPAzureStorageSummary
from reporting.provider.azure.openshift.models import OCPAzureTagsSummary
from reporting.provider.gcp.models import GCPComputeSummary
from reporting.provider.gcp.models import GCPComputeSummaryByAccount
from reporting.provider.gcp.models import GCPComputeSummaryByProject
from reporting.provider.gcp.models import GCPComputeSummaryByRegion
from reporting.provider.gcp.models import GCPComputeSummaryByService
from reporting.provider.gcp.models import GCPCostEntryBill
from reporting.provider.gcp.models import GCPCostEntryLineItemDailySummary
from reporting.provider.gcp.models import GCPCostEntryProductService
from reporting.provider.gcp.models import GCPCostSummary
from reporting.provider.gcp.models import GCPCostSummaryByAccount
from reporting.provider.gcp.models import GCPCostSummaryByProject
from reporting.provider.gcp.models import GCPCostSummaryByRegion
from reporting.provider.gcp.models import GCPCostSummaryByService
from reporting.provider.gcp.models import GCPDatabaseSummary
from reporting.provider.gcp.models import GCPEnabledTagKeys
from reporting.provider.gcp.models import GCPNetworkSummary
from reporting.provider.gcp.models import GCPStorageSummary
from reporting.provider.gcp.models import GCPStorageSummaryByAccount
from reporting.provider.gcp.models import GCPStorageSummaryByProject
from reporting.provider.gcp.models import GCPStorageSummaryByRegion
from reporting.provider.gcp.models import GCPStorageSummaryByService
from reporting.provider.gcp.models import GCPTagsSummary
from reporting.provider.ocp.costs.models import CostSummary
from reporting.provider.ocp.models import OCPCostSummary
from reporting.provider.ocp.models import OCPCostSummaryByNode
from reporting.provider.ocp.models import OCPCostSummaryByProject
from reporting.provider.ocp.models import OCPEnabledTagKeys
from reporting.provider.ocp.models import OCPNodeLabelLineItem
from reporting.provider.ocp.models import OCPNodeLabelLineItemDaily
from reporting.provider.ocp.models import OCPPodSummary
from reporting.provider.ocp.models import OCPPodSummaryByProject
from reporting.provider.ocp.models import OCPStorageLineItem
from reporting.provider.ocp.models import OCPStorageLineItemDaily
from reporting.provider.ocp.models import OCPStorageVolumeLabelSummary
from reporting.provider.ocp.models import OCPUsageLineItem
from reporting.provider.ocp.models import OCPUsageLineItemDaily
from reporting.provider.ocp.models import OCPUsageLineItemDailySummary
from reporting.provider.ocp.models import OCPUsagePodLabelSummary
from reporting.provider.ocp.models import OCPUsageReport
from reporting.provider.ocp.models import OCPUsageReportPeriod
from reporting.provider.ocp.models import OCPVolumeSummary
from reporting.provider.ocp.models import OCPVolumeSummaryByProject
AWS_MATERIALIZED_VIEWS = (
AWSComputeSummary,
AWSComputeSummaryByAccount,
AWSComputeSummaryByRegion,
AWSComputeSummaryByService,
AWSCostSummary,
AWSCostSummaryByAccount,
AWSCostSummaryByRegion,
AWSCostSummaryByService,
AWSDatabaseSummary,
AWSNetworkSummary,
AWSStorageSummary,
AWSStorageSummaryByAccount,
AWSStorageSummaryByRegion,
AWSStorageSummaryByService,
)
AZURE_MATERIALIZED_VIEWS = (
AzureCostSummary,
AzureCostSummaryByAccount,
AzureCostSummaryByLocation,
AzureCostSummaryByService,
AzureComputeSummary,
AzureStorageSummary,
AzureNetworkSummary,
AzureDatabaseSummary,
)
OCP_MATERIALIZED_VIEWS = (
OCPPodSummary,
OCPPodSummaryByProject,
OCPVolumeSummary,
OCPVolumeSummaryByProject,
OCPCostSummary,
OCPCostSummaryByProject,
OCPCostSummaryByNode,
)
OCP_ON_AWS_MATERIALIZED_VIEWS = (
OCPAWSCostSummary,
OCPAWSCostSummaryByAccount,
OCPAWSCostSummaryByService,
OCPAWSCostSummaryByRegion,
OCPAWSComputeSummary,
OCPAWSStorageSummary,
OCPAWSNetworkSummary,
OCPAWSDatabaseSummary,
)
OCP_ON_AZURE_MATERIALIZED_VIEWS = (
OCPAzureCostSummary,
OCPAzureCostSummaryByAccount,
OCPAzureCostSummaryByService,
OCPAzureCostSummaryByLocation,
OCPAzureComputeSummary,
OCPAzureStorageSummary,
OCPAzureNetworkSummary,
OCPAzureDatabaseSummary,
)
OCP_ON_INFRASTRUCTURE_MATERIALIZED_VIEWS = (
# OCPAllCostLineItemDailySummary,
# OCPAllCostSummary,
# OCPAllCostSummaryByAccount,
# OCPAllCostSummaryByService,
# OCPAllCostSummaryByRegion,
# OCPAllComputeSummary,
# OCPAllDatabaseSummary,
# OCPAllNetworkSummary,
# OCPAllStorageSummary,
# OCPAllCostLineItemProjectDailySummary,
OCPCostSummary,
OCPCostSummaryByProject,
OCPCostSummaryByNode,
)
GCP_MATERIALIZED_VIEWS = (
GCPCostSummary,
GCPCostSummaryByAccount,
GCPCostSummaryByProject,
GCPCostSummaryByRegion,
GCPCostSummaryByService,
GCPComputeSummary,
GCPComputeSummaryByProject,
GCPComputeSummaryByAccount,
GCPComputeSummaryByService,
GCPComputeSummaryByRegion,
GCPStorageSummary,
GCPStorageSummaryByProject,
GCPStorageSummaryByService,
GCPStorageSummaryByAccount,
GCPStorageSummaryByRegion,
GCPNetworkSummary,
GCPDatabaseSummary,
)
| [
"[email protected]"
] | |
e226ec01ee58cfd6403fc00dd962dcf900f3a5c2 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03030/s415212285.py | b6d01ec72d51213db9fcd1d248a31bdd56180770 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 343 | py | N = int(input())
R = [input().split() for i in range(N)]
R2 = [{'i':i+1, 's':R[i][0], 'p':int(R[i][1])} for i in range(N)]
S = []
for r in R:
if r[0] not in S:
S.append(r[0])
S.sort()
for s in S:
rs = [r for r in R2 if r['s'] == s]
sorted_rs = sorted(rs, key=lambda x:x['p'], reverse=True)
for r in sorted_rs:
print(r['i']) | [
"[email protected]"
] | |
c7cf457bd820b7994ff62baedbbfdfc38c0a4abb | eb7281135d73e253500421c0464329e12fcac3b1 | /anvilCore/service_pb2.py | a2795dff5cd04bb3715d22f213be9f399f98338d | [] | no_license | ladybug-tools/honeybee-anvil | 8f1820e4d17ef2314a534bb1c0afa00569a62e89 | 5be53caaf412d247ca88c90ebe4e193b2ff7a780 | refs/heads/master | 2021-11-20T04:55:33.967003 | 2018-01-03T00:51:31 | 2018-01-03T00:51:31 | 114,935,599 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | true | 40,120 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: anvilCore/service.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='anvilCore/service.proto',
package='Autodesk.Anvil.Protos',
syntax='proto3',
serialized_pb=_b('\n\x17\x61nvilCore/service.proto\x12\x15\x41utodesk.Anvil.Protos\"\x14\n\x12\x44\x65scriptionRequest\"\xfd\x01\n\x13\x44\x65scriptionResponse\x12\x14\n\x0c\x63ompany_name\x18\x01 \x01(\t\x12\x14\n\x0cservice_name\x18\x02 \x01(\t\x12\x1b\n\x13service_description\x18\x03 \x01(\t\x12\x61\n\x14\x61pplication_metadata\x18\x04 \x03(\x0b\x32\x43.Autodesk.Anvil.Protos.DescriptionResponse.ApplicationMetadataEntry\x1a:\n\x18\x41pplicationMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x0f\n\rProtosRequest\"$\n\x0eProtosResponse\x12\x12\n\nfile_paths\x18\x01 \x03(\t\"\x11\n\x0f\x45xamplesRequest\"&\n\x10\x45xamplesResponse\x12\x12\n\nfile_paths\x18\x01 \x03(\t\"\x0e\n\x0cTestsRequest\"#\n\rTestsResponse\x12\x12\n\nfile_paths\x18\x01 \x03(\t\"\x0f\n\rGuidesRequest\"$\n\x0eGuidesResponse\x12\x12\n\nfile_paths\x18\x01 \x03(\t\"\x1c\n\x0b\x46ileRequest\x12\r\n\x05paths\x18\x01 \x03(\t\":\n\x0c\x46ileResponse\x12*\n\x05\x66iles\x18\x01 \x03(\x0b\x32\x1b.Autodesk.Anvil.Protos.File\"&\n\x04\x46ile\x12\x0c\n\x04path\x18\x01 \x01(\t\x12\x10\n\x08\x63ontents\x18\x02 \x01(\x0c\"\x0f\n\rStatusRequest\"x\n\x0eStatusResponse\x12<\n\x06status\x18\x01 \x01(\x0e\x32,.Autodesk.Anvil.Protos.StatusResponse.Status\"(\n\x06Status\x12\t\n\x05READY\x10\x00\x12\x08\n\x04\x42USY\x10\x01\x12\t\n\x05\x45RROR\x10\x64\"\xf8\x01\n\x17ReservationAddedRequest\x12\x11\n\tuser_data\x18\x01 \x01(\x0c\x12\x61\n\x12session_decorators\x18\x02 \x03(\x0b\x32\x45.Autodesk.Anvil.Protos.ReservationAddedRequest.SessionDecoratorsEntry\x12\x19\n\x11reservation_token\x18\x03 \x01(\t\x12\x12\n\nsession_id\x18\x04 \x01(\t\x1a\x38\n\x16SessionDecoratorsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x95\x01\n\x18ReservationAddedResponse\x12\x46\n\x06status\x18\x01 \x01(\x0e\x32\x36.Autodesk.Anvil.Protos.ReservationAddedResponse.Status\"1\n\x06Status\x12\x06\n\x02OK\x10\x00\x12\x15\n\x11PERMISSION_DENIED\x10\x01\x12\x08\n\x04\x42USY\x10\x02\"7\n\x1aReservationReleasedRequest\x12\x19\n\x11reservation_token\x18\x01 \x01(\t\"\x87\x01\n\x1bReservationReleasedResponse\x12I\n\x06status\x18\x01 \x01(\x0e\x32\x39.Autodesk.Anvil.Protos.ReservationReleasedResponse.Status\"\x1d\n\x06Status\x12\x06\n\x02OK\x10\x00\x12\x0b\n\x07RESTART\x10\x01\x32\xb0\x05\n\nReservable\x12P\n\x05\x46iles\x12\".Autodesk.Anvil.Protos.FileRequest\x1a#.Autodesk.Anvil.Protos.FileResponse\x12U\n\x06Protos\x12$.Autodesk.Anvil.Protos.ProtosRequest\x1a%.Autodesk.Anvil.Protos.ProtosResponse\x12[\n\x08\x45xamples\x12&.Autodesk.Anvil.Protos.ExamplesRequest\x1a\'.Autodesk.Anvil.Protos.ExamplesResponse\x12R\n\x05Tests\x12#.Autodesk.Anvil.Protos.TestsRequest\x1a$.Autodesk.Anvil.Protos.TestsResponse\x12U\n\x06Guides\x12$.Autodesk.Anvil.Protos.GuidesRequest\x1a%.Autodesk.Anvil.Protos.GuidesResponse\x12s\n\x10ReservationAdded\x12..Autodesk.Anvil.Protos.ReservationAddedRequest\x1a/.Autodesk.Anvil.Protos.ReservationAddedResponse\x12|\n\x13ReservationReleased\x12\x31.Autodesk.Anvil.Protos.ReservationReleasedRequest\x1a\x32.Autodesk.Anvil.Protos.ReservationReleasedResponse2\xc8\x01\n\tReadiness\x12\x64\n\x0b\x44\x65scription\x12).Autodesk.Anvil.Protos.DescriptionRequest\x1a*.Autodesk.Anvil.Protos.DescriptionResponse\x12U\n\x06Status\x12$.Autodesk.Anvil.Protos.StatusRequest\x1a%.Autodesk.Anvil.Protos.StatusResponse2\xef\x06\n\x0cMicroservice\x12\x64\n\x0b\x44\x65scription\x12).Autodesk.Anvil.Protos.DescriptionRequest\x1a*.Autodesk.Anvil.Protos.DescriptionResponse\x12U\n\x06Status\x12$.Autodesk.Anvil.Protos.StatusRequest\x1a%.Autodesk.Anvil.Protos.StatusResponse\x12P\n\x05\x46iles\x12\".Autodesk.Anvil.Protos.FileRequest\x1a#.Autodesk.Anvil.Protos.FileResponse\x12U\n\x06Protos\x12$.Autodesk.Anvil.Protos.ProtosRequest\x1a%.Autodesk.Anvil.Protos.ProtosResponse\x12[\n\x08\x45xamples\x12&.Autodesk.Anvil.Protos.ExamplesRequest\x1a\'.Autodesk.Anvil.Protos.ExamplesResponse\x12R\n\x05Tests\x12#.Autodesk.Anvil.Protos.TestsRequest\x1a$.Autodesk.Anvil.Protos.TestsResponse\x12U\n\x06Guides\x12$.Autodesk.Anvil.Protos.GuidesRequest\x1a%.Autodesk.Anvil.Protos.GuidesResponse\x12s\n\x10ReservationAdded\x12..Autodesk.Anvil.Protos.ReservationAddedRequest\x1a/.Autodesk.Anvil.Protos.ReservationAddedResponse\x12|\n\x13ReservationReleased\x12\x31.Autodesk.Anvil.Protos.ReservationReleasedRequest\x1a\x32.Autodesk.Anvil.Protos.ReservationReleasedResponseB\x08Z\x06protosb\x06proto3')
)
_STATUSRESPONSE_STATUS = _descriptor.EnumDescriptor(
name='Status',
full_name='Autodesk.Anvil.Protos.StatusResponse.Status',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='READY', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BUSY', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ERROR', index=2, number=100,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=777,
serialized_end=817,
)
_sym_db.RegisterEnumDescriptor(_STATUSRESPONSE_STATUS)
_RESERVATIONADDEDRESPONSE_STATUS = _descriptor.EnumDescriptor(
name='Status',
full_name='Autodesk.Anvil.Protos.ReservationAddedResponse.Status',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='OK', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PERMISSION_DENIED', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BUSY', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=1171,
serialized_end=1220,
)
_sym_db.RegisterEnumDescriptor(_RESERVATIONADDEDRESPONSE_STATUS)
_RESERVATIONRELEASEDRESPONSE_STATUS = _descriptor.EnumDescriptor(
name='Status',
full_name='Autodesk.Anvil.Protos.ReservationReleasedResponse.Status',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='OK', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RESTART', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=1386,
serialized_end=1415,
)
_sym_db.RegisterEnumDescriptor(_RESERVATIONRELEASEDRESPONSE_STATUS)
_DESCRIPTIONREQUEST = _descriptor.Descriptor(
name='DescriptionRequest',
full_name='Autodesk.Anvil.Protos.DescriptionRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=50,
serialized_end=70,
)
_DESCRIPTIONRESPONSE_APPLICATIONMETADATAENTRY = _descriptor.Descriptor(
name='ApplicationMetadataEntry',
full_name='Autodesk.Anvil.Protos.DescriptionResponse.ApplicationMetadataEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='Autodesk.Anvil.Protos.DescriptionResponse.ApplicationMetadataEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='Autodesk.Anvil.Protos.DescriptionResponse.ApplicationMetadataEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=268,
serialized_end=326,
)
_DESCRIPTIONRESPONSE = _descriptor.Descriptor(
name='DescriptionResponse',
full_name='Autodesk.Anvil.Protos.DescriptionResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='company_name', full_name='Autodesk.Anvil.Protos.DescriptionResponse.company_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='service_name', full_name='Autodesk.Anvil.Protos.DescriptionResponse.service_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='service_description', full_name='Autodesk.Anvil.Protos.DescriptionResponse.service_description', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='application_metadata', full_name='Autodesk.Anvil.Protos.DescriptionResponse.application_metadata', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_DESCRIPTIONRESPONSE_APPLICATIONMETADATAENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=73,
serialized_end=326,
)
_PROTOSREQUEST = _descriptor.Descriptor(
name='ProtosRequest',
full_name='Autodesk.Anvil.Protos.ProtosRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=328,
serialized_end=343,
)
_PROTOSRESPONSE = _descriptor.Descriptor(
name='ProtosResponse',
full_name='Autodesk.Anvil.Protos.ProtosResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='file_paths', full_name='Autodesk.Anvil.Protos.ProtosResponse.file_paths', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=345,
serialized_end=381,
)
_EXAMPLESREQUEST = _descriptor.Descriptor(
name='ExamplesRequest',
full_name='Autodesk.Anvil.Protos.ExamplesRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=383,
serialized_end=400,
)
_EXAMPLESRESPONSE = _descriptor.Descriptor(
name='ExamplesResponse',
full_name='Autodesk.Anvil.Protos.ExamplesResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='file_paths', full_name='Autodesk.Anvil.Protos.ExamplesResponse.file_paths', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=402,
serialized_end=440,
)
_TESTSREQUEST = _descriptor.Descriptor(
name='TestsRequest',
full_name='Autodesk.Anvil.Protos.TestsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=442,
serialized_end=456,
)
_TESTSRESPONSE = _descriptor.Descriptor(
name='TestsResponse',
full_name='Autodesk.Anvil.Protos.TestsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='file_paths', full_name='Autodesk.Anvil.Protos.TestsResponse.file_paths', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=458,
serialized_end=493,
)
_GUIDESREQUEST = _descriptor.Descriptor(
name='GuidesRequest',
full_name='Autodesk.Anvil.Protos.GuidesRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=495,
serialized_end=510,
)
_GUIDESRESPONSE = _descriptor.Descriptor(
name='GuidesResponse',
full_name='Autodesk.Anvil.Protos.GuidesResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='file_paths', full_name='Autodesk.Anvil.Protos.GuidesResponse.file_paths', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=512,
serialized_end=548,
)
_FILEREQUEST = _descriptor.Descriptor(
name='FileRequest',
full_name='Autodesk.Anvil.Protos.FileRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='paths', full_name='Autodesk.Anvil.Protos.FileRequest.paths', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=550,
serialized_end=578,
)
_FILERESPONSE = _descriptor.Descriptor(
name='FileResponse',
full_name='Autodesk.Anvil.Protos.FileResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='files', full_name='Autodesk.Anvil.Protos.FileResponse.files', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=580,
serialized_end=638,
)
_FILE = _descriptor.Descriptor(
name='File',
full_name='Autodesk.Anvil.Protos.File',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='path', full_name='Autodesk.Anvil.Protos.File.path', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='contents', full_name='Autodesk.Anvil.Protos.File.contents', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=640,
serialized_end=678,
)
_STATUSREQUEST = _descriptor.Descriptor(
name='StatusRequest',
full_name='Autodesk.Anvil.Protos.StatusRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=680,
serialized_end=695,
)
_STATUSRESPONSE = _descriptor.Descriptor(
name='StatusResponse',
full_name='Autodesk.Anvil.Protos.StatusResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='Autodesk.Anvil.Protos.StatusResponse.status', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_STATUSRESPONSE_STATUS,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=697,
serialized_end=817,
)
_RESERVATIONADDEDREQUEST_SESSIONDECORATORSENTRY = _descriptor.Descriptor(
name='SessionDecoratorsEntry',
full_name='Autodesk.Anvil.Protos.ReservationAddedRequest.SessionDecoratorsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='Autodesk.Anvil.Protos.ReservationAddedRequest.SessionDecoratorsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='Autodesk.Anvil.Protos.ReservationAddedRequest.SessionDecoratorsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1012,
serialized_end=1068,
)
_RESERVATIONADDEDREQUEST = _descriptor.Descriptor(
name='ReservationAddedRequest',
full_name='Autodesk.Anvil.Protos.ReservationAddedRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='user_data', full_name='Autodesk.Anvil.Protos.ReservationAddedRequest.user_data', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='session_decorators', full_name='Autodesk.Anvil.Protos.ReservationAddedRequest.session_decorators', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='reservation_token', full_name='Autodesk.Anvil.Protos.ReservationAddedRequest.reservation_token', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='session_id', full_name='Autodesk.Anvil.Protos.ReservationAddedRequest.session_id', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_RESERVATIONADDEDREQUEST_SESSIONDECORATORSENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=820,
serialized_end=1068,
)
_RESERVATIONADDEDRESPONSE = _descriptor.Descriptor(
name='ReservationAddedResponse',
full_name='Autodesk.Anvil.Protos.ReservationAddedResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='Autodesk.Anvil.Protos.ReservationAddedResponse.status', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_RESERVATIONADDEDRESPONSE_STATUS,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1071,
serialized_end=1220,
)
_RESERVATIONRELEASEDREQUEST = _descriptor.Descriptor(
name='ReservationReleasedRequest',
full_name='Autodesk.Anvil.Protos.ReservationReleasedRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='reservation_token', full_name='Autodesk.Anvil.Protos.ReservationReleasedRequest.reservation_token', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1222,
serialized_end=1277,
)
_RESERVATIONRELEASEDRESPONSE = _descriptor.Descriptor(
name='ReservationReleasedResponse',
full_name='Autodesk.Anvil.Protos.ReservationReleasedResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='Autodesk.Anvil.Protos.ReservationReleasedResponse.status', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_RESERVATIONRELEASEDRESPONSE_STATUS,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1280,
serialized_end=1415,
)
_DESCRIPTIONRESPONSE_APPLICATIONMETADATAENTRY.containing_type = _DESCRIPTIONRESPONSE
_DESCRIPTIONRESPONSE.fields_by_name['application_metadata'].message_type = _DESCRIPTIONRESPONSE_APPLICATIONMETADATAENTRY
_FILERESPONSE.fields_by_name['files'].message_type = _FILE
_STATUSRESPONSE.fields_by_name['status'].enum_type = _STATUSRESPONSE_STATUS
_STATUSRESPONSE_STATUS.containing_type = _STATUSRESPONSE
_RESERVATIONADDEDREQUEST_SESSIONDECORATORSENTRY.containing_type = _RESERVATIONADDEDREQUEST
_RESERVATIONADDEDREQUEST.fields_by_name['session_decorators'].message_type = _RESERVATIONADDEDREQUEST_SESSIONDECORATORSENTRY
_RESERVATIONADDEDRESPONSE.fields_by_name['status'].enum_type = _RESERVATIONADDEDRESPONSE_STATUS
_RESERVATIONADDEDRESPONSE_STATUS.containing_type = _RESERVATIONADDEDRESPONSE
_RESERVATIONRELEASEDRESPONSE.fields_by_name['status'].enum_type = _RESERVATIONRELEASEDRESPONSE_STATUS
_RESERVATIONRELEASEDRESPONSE_STATUS.containing_type = _RESERVATIONRELEASEDRESPONSE
DESCRIPTOR.message_types_by_name['DescriptionRequest'] = _DESCRIPTIONREQUEST
DESCRIPTOR.message_types_by_name['DescriptionResponse'] = _DESCRIPTIONRESPONSE
DESCRIPTOR.message_types_by_name['ProtosRequest'] = _PROTOSREQUEST
DESCRIPTOR.message_types_by_name['ProtosResponse'] = _PROTOSRESPONSE
DESCRIPTOR.message_types_by_name['ExamplesRequest'] = _EXAMPLESREQUEST
DESCRIPTOR.message_types_by_name['ExamplesResponse'] = _EXAMPLESRESPONSE
DESCRIPTOR.message_types_by_name['TestsRequest'] = _TESTSREQUEST
DESCRIPTOR.message_types_by_name['TestsResponse'] = _TESTSRESPONSE
DESCRIPTOR.message_types_by_name['GuidesRequest'] = _GUIDESREQUEST
DESCRIPTOR.message_types_by_name['GuidesResponse'] = _GUIDESRESPONSE
DESCRIPTOR.message_types_by_name['FileRequest'] = _FILEREQUEST
DESCRIPTOR.message_types_by_name['FileResponse'] = _FILERESPONSE
DESCRIPTOR.message_types_by_name['File'] = _FILE
DESCRIPTOR.message_types_by_name['StatusRequest'] = _STATUSREQUEST
DESCRIPTOR.message_types_by_name['StatusResponse'] = _STATUSRESPONSE
DESCRIPTOR.message_types_by_name['ReservationAddedRequest'] = _RESERVATIONADDEDREQUEST
DESCRIPTOR.message_types_by_name['ReservationAddedResponse'] = _RESERVATIONADDEDRESPONSE
DESCRIPTOR.message_types_by_name['ReservationReleasedRequest'] = _RESERVATIONRELEASEDREQUEST
DESCRIPTOR.message_types_by_name['ReservationReleasedResponse'] = _RESERVATIONRELEASEDRESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DescriptionRequest = _reflection.GeneratedProtocolMessageType('DescriptionRequest', (_message.Message,), dict(
DESCRIPTOR = _DESCRIPTIONREQUEST,
__module__ = 'anvilCore.service_pb2'
# @@protoc_insertion_point(class_scope:Autodesk.Anvil.Protos.DescriptionRequest)
))
_sym_db.RegisterMessage(DescriptionRequest)
DescriptionResponse = _reflection.GeneratedProtocolMessageType('DescriptionResponse', (_message.Message,), dict(
ApplicationMetadataEntry = _reflection.GeneratedProtocolMessageType('ApplicationMetadataEntry', (_message.Message,), dict(
DESCRIPTOR = _DESCRIPTIONRESPONSE_APPLICATIONMETADATAENTRY,
__module__ = 'anvilCore.service_pb2'
# @@protoc_insertion_point(class_scope:Autodesk.Anvil.Protos.DescriptionResponse.ApplicationMetadataEntry)
))
,
DESCRIPTOR = _DESCRIPTIONRESPONSE,
__module__ = 'anvilCore.service_pb2'
# @@protoc_insertion_point(class_scope:Autodesk.Anvil.Protos.DescriptionResponse)
))
_sym_db.RegisterMessage(DescriptionResponse)
_sym_db.RegisterMessage(DescriptionResponse.ApplicationMetadataEntry)
ProtosRequest = _reflection.GeneratedProtocolMessageType('ProtosRequest', (_message.Message,), dict(
DESCRIPTOR = _PROTOSREQUEST,
__module__ = 'anvilCore.service_pb2'
# @@protoc_insertion_point(class_scope:Autodesk.Anvil.Protos.ProtosRequest)
))
_sym_db.RegisterMessage(ProtosRequest)
ProtosResponse = _reflection.GeneratedProtocolMessageType('ProtosResponse', (_message.Message,), dict(
DESCRIPTOR = _PROTOSRESPONSE,
__module__ = 'anvilCore.service_pb2'
# @@protoc_insertion_point(class_scope:Autodesk.Anvil.Protos.ProtosResponse)
))
_sym_db.RegisterMessage(ProtosResponse)
ExamplesRequest = _reflection.GeneratedProtocolMessageType('ExamplesRequest', (_message.Message,), dict(
DESCRIPTOR = _EXAMPLESREQUEST,
__module__ = 'anvilCore.service_pb2'
# @@protoc_insertion_point(class_scope:Autodesk.Anvil.Protos.ExamplesRequest)
))
_sym_db.RegisterMessage(ExamplesRequest)
ExamplesResponse = _reflection.GeneratedProtocolMessageType('ExamplesResponse', (_message.Message,), dict(
DESCRIPTOR = _EXAMPLESRESPONSE,
__module__ = 'anvilCore.service_pb2'
# @@protoc_insertion_point(class_scope:Autodesk.Anvil.Protos.ExamplesResponse)
))
_sym_db.RegisterMessage(ExamplesResponse)
TestsRequest = _reflection.GeneratedProtocolMessageType('TestsRequest', (_message.Message,), dict(
DESCRIPTOR = _TESTSREQUEST,
__module__ = 'anvilCore.service_pb2'
# @@protoc_insertion_point(class_scope:Autodesk.Anvil.Protos.TestsRequest)
))
_sym_db.RegisterMessage(TestsRequest)
TestsResponse = _reflection.GeneratedProtocolMessageType('TestsResponse', (_message.Message,), dict(
DESCRIPTOR = _TESTSRESPONSE,
__module__ = 'anvilCore.service_pb2'
# @@protoc_insertion_point(class_scope:Autodesk.Anvil.Protos.TestsResponse)
))
_sym_db.RegisterMessage(TestsResponse)
GuidesRequest = _reflection.GeneratedProtocolMessageType('GuidesRequest', (_message.Message,), dict(
DESCRIPTOR = _GUIDESREQUEST,
__module__ = 'anvilCore.service_pb2'
# @@protoc_insertion_point(class_scope:Autodesk.Anvil.Protos.GuidesRequest)
))
_sym_db.RegisterMessage(GuidesRequest)
GuidesResponse = _reflection.GeneratedProtocolMessageType('GuidesResponse', (_message.Message,), dict(
DESCRIPTOR = _GUIDESRESPONSE,
__module__ = 'anvilCore.service_pb2'
# @@protoc_insertion_point(class_scope:Autodesk.Anvil.Protos.GuidesResponse)
))
_sym_db.RegisterMessage(GuidesResponse)
FileRequest = _reflection.GeneratedProtocolMessageType('FileRequest', (_message.Message,), dict(
DESCRIPTOR = _FILEREQUEST,
__module__ = 'anvilCore.service_pb2'
# @@protoc_insertion_point(class_scope:Autodesk.Anvil.Protos.FileRequest)
))
_sym_db.RegisterMessage(FileRequest)
FileResponse = _reflection.GeneratedProtocolMessageType('FileResponse', (_message.Message,), dict(
DESCRIPTOR = _FILERESPONSE,
__module__ = 'anvilCore.service_pb2'
# @@protoc_insertion_point(class_scope:Autodesk.Anvil.Protos.FileResponse)
))
_sym_db.RegisterMessage(FileResponse)
File = _reflection.GeneratedProtocolMessageType('File', (_message.Message,), dict(
DESCRIPTOR = _FILE,
__module__ = 'anvilCore.service_pb2'
# @@protoc_insertion_point(class_scope:Autodesk.Anvil.Protos.File)
))
_sym_db.RegisterMessage(File)
StatusRequest = _reflection.GeneratedProtocolMessageType('StatusRequest', (_message.Message,), dict(
DESCRIPTOR = _STATUSREQUEST,
__module__ = 'anvilCore.service_pb2'
# @@protoc_insertion_point(class_scope:Autodesk.Anvil.Protos.StatusRequest)
))
_sym_db.RegisterMessage(StatusRequest)
StatusResponse = _reflection.GeneratedProtocolMessageType('StatusResponse', (_message.Message,), dict(
DESCRIPTOR = _STATUSRESPONSE,
__module__ = 'anvilCore.service_pb2'
# @@protoc_insertion_point(class_scope:Autodesk.Anvil.Protos.StatusResponse)
))
_sym_db.RegisterMessage(StatusResponse)
ReservationAddedRequest = _reflection.GeneratedProtocolMessageType('ReservationAddedRequest', (_message.Message,), dict(
SessionDecoratorsEntry = _reflection.GeneratedProtocolMessageType('SessionDecoratorsEntry', (_message.Message,), dict(
DESCRIPTOR = _RESERVATIONADDEDREQUEST_SESSIONDECORATORSENTRY,
__module__ = 'anvilCore.service_pb2'
# @@protoc_insertion_point(class_scope:Autodesk.Anvil.Protos.ReservationAddedRequest.SessionDecoratorsEntry)
))
,
DESCRIPTOR = _RESERVATIONADDEDREQUEST,
__module__ = 'anvilCore.service_pb2'
# @@protoc_insertion_point(class_scope:Autodesk.Anvil.Protos.ReservationAddedRequest)
))
_sym_db.RegisterMessage(ReservationAddedRequest)
_sym_db.RegisterMessage(ReservationAddedRequest.SessionDecoratorsEntry)
ReservationAddedResponse = _reflection.GeneratedProtocolMessageType('ReservationAddedResponse', (_message.Message,), dict(
DESCRIPTOR = _RESERVATIONADDEDRESPONSE,
__module__ = 'anvilCore.service_pb2'
# @@protoc_insertion_point(class_scope:Autodesk.Anvil.Protos.ReservationAddedResponse)
))
_sym_db.RegisterMessage(ReservationAddedResponse)
ReservationReleasedRequest = _reflection.GeneratedProtocolMessageType('ReservationReleasedRequest', (_message.Message,), dict(
DESCRIPTOR = _RESERVATIONRELEASEDREQUEST,
__module__ = 'anvilCore.service_pb2'
# @@protoc_insertion_point(class_scope:Autodesk.Anvil.Protos.ReservationReleasedRequest)
))
_sym_db.RegisterMessage(ReservationReleasedRequest)
ReservationReleasedResponse = _reflection.GeneratedProtocolMessageType('ReservationReleasedResponse', (_message.Message,), dict(
DESCRIPTOR = _RESERVATIONRELEASEDRESPONSE,
__module__ = 'anvilCore.service_pb2'
# @@protoc_insertion_point(class_scope:Autodesk.Anvil.Protos.ReservationReleasedResponse)
))
_sym_db.RegisterMessage(ReservationReleasedResponse)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('Z\006protos'))
_DESCRIPTIONRESPONSE_APPLICATIONMETADATAENTRY.has_options = True
_DESCRIPTIONRESPONSE_APPLICATIONMETADATAENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_RESERVATIONADDEDREQUEST_SESSIONDECORATORSENTRY.has_options = True
_RESERVATIONADDEDREQUEST_SESSIONDECORATORSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_RESERVABLE = _descriptor.ServiceDescriptor(
name='Reservable',
full_name='Autodesk.Anvil.Protos.Reservable',
file=DESCRIPTOR,
index=0,
options=None,
serialized_start=1418,
serialized_end=2106,
methods=[
_descriptor.MethodDescriptor(
name='Files',
full_name='Autodesk.Anvil.Protos.Reservable.Files',
index=0,
containing_service=None,
input_type=_FILEREQUEST,
output_type=_FILERESPONSE,
options=None,
),
_descriptor.MethodDescriptor(
name='Protos',
full_name='Autodesk.Anvil.Protos.Reservable.Protos',
index=1,
containing_service=None,
input_type=_PROTOSREQUEST,
output_type=_PROTOSRESPONSE,
options=None,
),
_descriptor.MethodDescriptor(
name='Examples',
full_name='Autodesk.Anvil.Protos.Reservable.Examples',
index=2,
containing_service=None,
input_type=_EXAMPLESREQUEST,
output_type=_EXAMPLESRESPONSE,
options=None,
),
_descriptor.MethodDescriptor(
name='Tests',
full_name='Autodesk.Anvil.Protos.Reservable.Tests',
index=3,
containing_service=None,
input_type=_TESTSREQUEST,
output_type=_TESTSRESPONSE,
options=None,
),
_descriptor.MethodDescriptor(
name='Guides',
full_name='Autodesk.Anvil.Protos.Reservable.Guides',
index=4,
containing_service=None,
input_type=_GUIDESREQUEST,
output_type=_GUIDESRESPONSE,
options=None,
),
_descriptor.MethodDescriptor(
name='ReservationAdded',
full_name='Autodesk.Anvil.Protos.Reservable.ReservationAdded',
index=5,
containing_service=None,
input_type=_RESERVATIONADDEDREQUEST,
output_type=_RESERVATIONADDEDRESPONSE,
options=None,
),
_descriptor.MethodDescriptor(
name='ReservationReleased',
full_name='Autodesk.Anvil.Protos.Reservable.ReservationReleased',
index=6,
containing_service=None,
input_type=_RESERVATIONRELEASEDREQUEST,
output_type=_RESERVATIONRELEASEDRESPONSE,
options=None,
),
])
_sym_db.RegisterServiceDescriptor(_RESERVABLE)
DESCRIPTOR.services_by_name['Reservable'] = _RESERVABLE
_READINESS = _descriptor.ServiceDescriptor(
name='Readiness',
full_name='Autodesk.Anvil.Protos.Readiness',
file=DESCRIPTOR,
index=1,
options=None,
serialized_start=2109,
serialized_end=2309,
methods=[
_descriptor.MethodDescriptor(
name='Description',
full_name='Autodesk.Anvil.Protos.Readiness.Description',
index=0,
containing_service=None,
input_type=_DESCRIPTIONREQUEST,
output_type=_DESCRIPTIONRESPONSE,
options=None,
),
_descriptor.MethodDescriptor(
name='Status',
full_name='Autodesk.Anvil.Protos.Readiness.Status',
index=1,
containing_service=None,
input_type=_STATUSREQUEST,
output_type=_STATUSRESPONSE,
options=None,
),
])
_sym_db.RegisterServiceDescriptor(_READINESS)
DESCRIPTOR.services_by_name['Readiness'] = _READINESS
_MICROSERVICE = _descriptor.ServiceDescriptor(
name='Microservice',
full_name='Autodesk.Anvil.Protos.Microservice',
file=DESCRIPTOR,
index=2,
options=None,
serialized_start=2312,
serialized_end=3191,
methods=[
_descriptor.MethodDescriptor(
name='Description',
full_name='Autodesk.Anvil.Protos.Microservice.Description',
index=0,
containing_service=None,
input_type=_DESCRIPTIONREQUEST,
output_type=_DESCRIPTIONRESPONSE,
options=None,
),
_descriptor.MethodDescriptor(
name='Status',
full_name='Autodesk.Anvil.Protos.Microservice.Status',
index=1,
containing_service=None,
input_type=_STATUSREQUEST,
output_type=_STATUSRESPONSE,
options=None,
),
_descriptor.MethodDescriptor(
name='Files',
full_name='Autodesk.Anvil.Protos.Microservice.Files',
index=2,
containing_service=None,
input_type=_FILEREQUEST,
output_type=_FILERESPONSE,
options=None,
),
_descriptor.MethodDescriptor(
name='Protos',
full_name='Autodesk.Anvil.Protos.Microservice.Protos',
index=3,
containing_service=None,
input_type=_PROTOSREQUEST,
output_type=_PROTOSRESPONSE,
options=None,
),
_descriptor.MethodDescriptor(
name='Examples',
full_name='Autodesk.Anvil.Protos.Microservice.Examples',
index=4,
containing_service=None,
input_type=_EXAMPLESREQUEST,
output_type=_EXAMPLESRESPONSE,
options=None,
),
_descriptor.MethodDescriptor(
name='Tests',
full_name='Autodesk.Anvil.Protos.Microservice.Tests',
index=5,
containing_service=None,
input_type=_TESTSREQUEST,
output_type=_TESTSRESPONSE,
options=None,
),
_descriptor.MethodDescriptor(
name='Guides',
full_name='Autodesk.Anvil.Protos.Microservice.Guides',
index=6,
containing_service=None,
input_type=_GUIDESREQUEST,
output_type=_GUIDESRESPONSE,
options=None,
),
_descriptor.MethodDescriptor(
name='ReservationAdded',
full_name='Autodesk.Anvil.Protos.Microservice.ReservationAdded',
index=7,
containing_service=None,
input_type=_RESERVATIONADDEDREQUEST,
output_type=_RESERVATIONADDEDRESPONSE,
options=None,
),
_descriptor.MethodDescriptor(
name='ReservationReleased',
full_name='Autodesk.Anvil.Protos.Microservice.ReservationReleased',
index=8,
containing_service=None,
input_type=_RESERVATIONRELEASEDREQUEST,
output_type=_RESERVATIONRELEASEDRESPONSE,
options=None,
),
])
_sym_db.RegisterServiceDescriptor(_MICROSERVICE)
DESCRIPTOR.services_by_name['Microservice'] = _MICROSERVICE
# @@protoc_insertion_point(module_scope)
| [
"[email protected]"
] | |
be4a4c4e765b75796466260e45263b74991714ce | 982390f1f83cb0d1c0544434b376cac25fb0966a | /beakerx/beakerx/object/__init__.py | ddba819f8c7fa1cfa43de34c4f053f78729b3109 | [
"Apache-2.0",
"EPL-1.0",
"MPL-2.0",
"LicenseRef-scancode-proprietary-license",
"BSD-3-Clause",
"MIT"
] | permissive | splicemachine/beakerx | a72365614eef98c94e37fdf7ba43e17447962b42 | afce8900a08d6cbea540608f0c9c88af7b3f7bc0 | refs/heads/master | 2021-07-13T21:13:45.196374 | 2020-01-30T21:38:34 | 2020-01-30T21:38:34 | 236,809,369 | 1 | 0 | Apache-2.0 | 2021-02-18T06:51:54 | 2020-01-28T18:34:30 | Java | UTF-8 | Python | false | false | 645 | py | # Copyright 2014 TWO SIGMA OPEN SOURCE, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..runtime import BeakerX
beakerx = BeakerX()
| [
"[email protected]"
] | |
d07a279ceb53a60b79996d5a1dff2ba6f27a2bd8 | 1f9900bf0876bc70d9c02c59317c12193f727870 | /Projeto/program.py | 95369794cdfcf9677902819397d61e7fd7e67b69 | [] | no_license | nhortensioUAL/ualap2021_grupo1 | 652ad5085ab9b7c7fa8cc67f39ab787fce5cb8ba | 9e4fcc04d42141f3e0ad9a74094f567ec5367cb6 | refs/heads/main | 2023-02-25T16:11:20.059864 | 2021-02-01T21:26:08 | 2021-02-01T21:26:08 | 310,278,359 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 119 | py | from views import cli
import sys
if __name__ == "__main__":
sys.stdout.reconfigure(encoding="UTF-8")
cli.cli() | [
"[email protected]"
] | |
eeed939904651f42894f02c40e68bf68d1d98f7a | 889a31c40749d9c3d7ac1e2b96f2fb4acce1b640 | /index.py | 21d1551c1b941f7a9da166c07bbcedc3207463fa | [
"MIT"
] | permissive | dogerish/mc-copy-paste | f0e0318a19ba0610d5f195f786b21da4771200ee | e256aa22d9474e14ace48749d06723b2d650ab2f | refs/heads/master | 2023-06-26T17:30:57.889528 | 2021-07-26T00:59:26 | 2021-07-26T00:59:26 | 387,288,728 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,287 | py | # builtin
import asyncio
# third party
from aioconsole import ainput
# custom
from utils import cfg, MCP
from selection import copysel
from commands import parse
mc = MCP()
async def cmdloop() -> None:
try: parse(mc, cfg["autoexec"], fatal=True)
except Exception as e:
print(f"Error in autoexec; quitting.")
exit(1)
mc.log("Ready")
while True: parse(mc, await ainput() or "list")
async def blockhitloop() -> None:
while True:
for e in mc.events.pollBlockHits():
mc.coords.append(e.pos)
mc.log(f"Selected block at {tuple(e.pos)}")
if (mc.mode == "normal" or mc.mode == "copy") and len(mc.coords) == 2:
mc.log("Copying...")
mc.sel = copysel(*mc.coords, mc)
mc.done()
if mc.mode == "copy": mc.coords.clear()
elif mc.mode == "paste" or (mc.mode == "normal" and len(mc.coords) > 2):
mc.log("Pasting...")
mc.sel.paste(mc.coords[2] if mc.mode == "normal" else mc.coords[0])
mc.coords.clear()
mc.done()
await asyncio.sleep(0.1)
async def main() -> None:
await asyncio.gather(cmdloop(), blockhitloop())
if __name__ == "__main__": asyncio.run(main())
| [
"[email protected]"
] | |
842c3a971ee8072cd3907667b4b99dee4ad1446b | add72f4d6f9f7af1f437d19213c14efb218b2194 | /glamkit_collections/migrations/0004_geographiclocation_slug.py | 22f6e481c9855aeeb7ed0fd74eb7909ed143de04 | [
"MIT"
] | permissive | ic-labs/django-icekit | 6abe859f97c709fcf51207b54778501b50436ff7 | c507ea5b1864303732c53ad7c5800571fca5fa94 | refs/heads/develop | 2022-08-08T21:26:04.144852 | 2018-01-08T02:55:17 | 2018-01-08T02:55:17 | 65,470,395 | 53 | 12 | MIT | 2022-07-06T19:59:39 | 2016-08-11T13:11:02 | Python | UTF-8 | Python | false | false | 1,304 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.template.defaultfilters import slugify
from forms_builder.forms.utils import unique_slug
def create_slugs(apps, _):
GL = apps.get_model('glamkit_collections', 'GeographicLocation')
for self in GL.objects.all():
if not self.slug:
levels = [x for x in (
self.neighborhood, self.city, self.state_province) if
x]
if self.country:
levels.append(self.country.title)
r = ", ".join(levels)
if self.colloquial_historical:
if r:
r ="{0} ({1})".format(self.colloquial_historical, r)
else:
r = self.colloquial_historical
self.slug = unique_slug(type(self).objects, 'slug', slugify(unicode(r)))
self.save()
class Migration(migrations.Migration):
dependencies = [
('glamkit_collections', '0003_auto_20170412_1742'),
]
operations = [
migrations.AddField(
model_name='geographiclocation',
name='slug',
field=models.SlugField(blank=True),
),
migrations.RunPython(create_slugs, lambda x, y: None)
]
| [
"[email protected]"
] | |
e2695b6d9241f820810b705e59de67da5f7a870b | a14c2c92722fff497d79baa6ce1e03cba6d3ab42 | /root/localeinfo.py | f660406fe3309b6574f63af6e5010327b373b7e8 | [] | no_license | MMOTutkunlari/AltyapiPack | f12c3789d9e9ca8ac131bd51d4653abda349832c | a7982d7ff9a162fc561d89f7be7d0a9c327ca04e | refs/heads/master | 2023-07-10T00:17:57.170770 | 2021-04-30T14:39:18 | 2021-04-30T14:39:18 | 278,165,483 | 2 | 0 | null | null | null | null | UHC | Python | false | false | 32,904 | py | import app
import constInfo
MAP_TRENT02 = "MAP_TRENT02" # 임시
MAP_WL = "MAP_WL" # 임시
MAP_NUSLUCK = "MAP_NUSLUCK" # 임시
MAP_TREE2 = "MAP_TREE2"
BLEND_POTION_NO_TIME = "BLEND_POTION_NO_TIME"
BLEND_POTION_NO_INFO = "BLEND_POTION_NO_INFO"
APP_TITLE = "METIN2"
GUILD_HEADQUARTER = "Main Building"
GUILD_FACILITY = "Facility"
GUILD_OBJECT = "Object"
GUILD_MEMBER_COUNT_INFINITY = "INFINITY"
LOGIN_FAILURE_WEB_BLOCK = "BLOCK_LOGIN(WEB)"
LOGIN_FAILURE_BLOCK_LOGIN = "BLOCK_LOGIN"
CHANNEL_NOTIFY_FULL = "CHANNEL_NOTIFY_FULL"
GUILD_BUILDING_LIST_TXT = app.GetLocalePath() + "/GuildBuildingList.txt"
GUILD_MARK_MIN_LEVEL = "3"
GUILD_MARK_NOT_ENOUGH_LEVEL = "길드레벨 3이상 부터 가능합니다."
ERROR_MARK_UPLOAD_NEED_RECONNECT = "UploadMark: Reconnect to game"
ERROR_MARK_CHECK_NEED_RECONNECT = "CheckMark: Reconnect to game"
VIRTUAL_KEY_ALPHABET_LOWERS = r"[1234567890]/qwertyuiop\=asdfghjkl;`'zxcvbnm.,"
VIRTUAL_KEY_ALPHABET_UPPERS = r'{1234567890}?QWERTYUIOP|+ASDFGHJKL:~"ZXCVBNM<>'
VIRTUAL_KEY_SYMBOLS = '!@#$%^&*()_+|{}:"<>?~'
VIRTUAL_KEY_NUMBERS = "1234567890-=\[];',./`"
VIRTUAL_KEY_SYMBOLS_BR = '!@#$%^&*()_+|{}:"<>?~蓀呻郵悠壬蛭衝銜'
__IS_ENGLISH = "ENGLISH" == app.GetLocaleServiceName()
__IS_HONGKONG = "HONGKONG" == app.GetLocaleServiceName()
__IS_NEWCIBN = "locale/newcibn" == app.GetLocalePath()
__IS_EUROPE = "EUROPE" == app.GetLocaleServiceName()
__IS_CANADA = "locale/ca" == app.GetLocalePath()
__IS_BRAZIL = "locale/br" == app.GetLocalePath()
__IS_SINGAPORE = "locale/sg" == app.GetLocalePath()
__IS_VIETNAM = "locale/vn" == app.GetLocalePath()
__IS_ARABIC = "locale/ae" == app.GetLocalePath()
__IS_CIBN10 = "locale/cibn10" == app.GetLocalePath()
__IS_WE_KOREA = "locale/we_korea" == app.GetLocalePath()
__IS_TAIWAN = "locale/taiwan" == app.GetLocalePath()
__IS_JAPAN = "locale/japan" == app.GetLocalePath()
LOGIN_FAILURE_WRONG_SOCIALID = "ASDF"
LOGIN_FAILURE_SHUTDOWN_TIME = "ASDF"
if __IS_CANADA:
__IS_EUROPE = True
def IsYMIR():
return "locale/ymir" == app.GetLocalePath()
def IsJAPAN():
return "locale/japan" == app.GetLocalePath()
def IsENGLISH():
global __IS_ENGLISH
return __IS_ENGLISH
def IsHONGKONG():
global __IS_HONGKONG
return __IS_HONGKONG
def IsTAIWAN():
return "locale/taiwan" == app.GetLocalePath()
def IsNEWCIBN():
return "locale/newcibn" == app.GetLocalePath()
def IsCIBN10():
global __IS_CIBN10
return __IS_CIBN10
def IsEUROPE():
global __IS_EUROPE
return __IS_EUROPE
def IsCANADA():
global __IS_CANADA
return __IS_CANADA
def IsBRAZIL():
global __IS_BRAZIL
return __IS_BRAZIL
def IsVIETNAM():
global __IS_VIETNAM
return __IS_VIETNAM
def IsSINGAPORE():
global __IS_SINGAPORE
return __IS_SINGAPORE
def IsARABIC():
global __IS_ARABIC
return __IS_ARABIC
def IsWE_KOREA():
return "locale/we_korea" == app.GetLocalePath()
# SUPPORT_NEW_KOREA_SERVER
def LoadLocaleData():
if IsYMIR():
import net
SERVER = "쾌도 서버"
if SERVER == net.GetServerInfo()[:len(SERVER)]:
app.SetCHEONMA(0)
app.LoadLocaleData("locale/we_korea")
constInfo.ADD_DEF_BONUS_ENABLE = 0
else:
app.SetCHEONMA(1)
app.LoadLocaleData("locale/ymir")
constInfo.ADD_DEF_BONUS_ENABLE = 1
else:
app.LoadLocaleData(app.GetLocalePath())
def IsCHEONMA():
return IsYMIR() # 이제 YMIR 로케일은 무조건 천마서버임. 천마서버가 문을 닫기 전까지 변할 일 없음.
# END_OF_SUPPORT_NEW_KOREA_SERVER
def mapping(**kwargs): return kwargs
def SNA(text):
def f(x):
return text
return f
def SA(text):
def f(x):
return text % x
return f
def LoadLocaleFile(srcFileName, localeDict):
funcDict = {"SA":SA, "SNA":SNA}
lineIndex = 1
try:
lines = pack_open(srcFileName, "r").readlines()
except IOError:
import dbg
dbg.LogBox("LoadLocaleError(%(srcFileName)s)" % locals())
app.Abort()
for line in lines:
try:
tokens = line[:-1].split("\t")
if len(tokens) == 2:
localeDict[tokens[0]] = tokens[1]
elif len(tokens) >= 3:
type = tokens[2].strip()
if type:
localeDict[tokens[0]] = funcDict[type](tokens[1])
else:
localeDict[tokens[0]] = tokens[1]
else:
raise RuntimeError, "Unknown TokenSize"
lineIndex += 1
except:
import dbg
dbg.LogBox("%s: line(%d): %s" % (srcFileName, lineIndex, line), "Error")
raise
all = ["locale","error"]
if IsEUROPE() and IsBRAZIL() :
FN_GM_MARK = "%s/effect/gm.mse" % app.GetLocalePath()
LOCALE_FILE_NAME = "%s/locale_game.txt" % app.GetLocalePath()
constInfo.IN_GAME_SHOP_ENABLE = 0
elif IsSINGAPORE() :
FN_GM_MARK = "%s/effect/gm.mse" % app.GetLocalePath()
LOCALE_FILE_NAME = "%s/locale_game.txt" % app.GetLocalePath()
constInfo.IN_GAME_SHOP_ENABLE = 0
elif IsNEWCIBN() :
##게임명이깨진다.
APP_TITLE = "劤祿莖2"
FN_GM_MARK = "%s/effect/gm.mse" % app.GetLocalePath()
LOCALE_FILE_NAME = "%s/locale_game.txt" % app.GetLocalePath()
constInfo.IN_GAME_SHOP_ENABLE = 1
elif IsTAIWAN():
APP_TITLE = "갓III곌"
FN_GM_MARK = "%s/effect/gm.mse" % app.GetLocalePath()
LOCALE_FILE_NAME = "%s/locale_game.txt" % app.GetLocalePath()
constInfo.IN_GAME_SHOP_ENABLE = 1
else:
FN_GM_MARK = "%s/effect/gm.mse" % app.GetLocalePath()
LOCALE_FILE_NAME = "%s/locale_game.txt" % app.GetLocalePath()
constInfo.IN_GAME_SHOP_ENABLE = 1
LoadLocaleFile(LOCALE_FILE_NAME, locals())
########################################################################################################
## NOTE : 아이템을 버릴때 "무엇을/를 버리시겠습니까?" 문자열의 조사 선택을 위한 코드
dictSingleWord = {
"m":1, "n":1, "r":1, "M":1, "N":1, "R":1, "l":1, "L":1, "1":1, "3":1, "6":1, "7":1, "8":1, "0":1,
}
dictDoubleWord = {
"가":1, "갸":1, "거":1, "겨":1, "고":1, "교":1, "구":1, "규":1, "그":1, "기":1, "개":1, "걔":1, "게":1, "계":1, "과":1, "괘":1, "궈":1, "궤":1, "괴":1, "귀":1, "긔":1,
"까":1, "꺄":1, "꺼":1, "껴":1, "꼬":1, "꾜":1, "꾸":1, "뀨":1, "끄":1, "끼":1, "깨":1, "꺠":1, "께":1, "꼐":1, "꽈":1, "꽤":1, "꿔":1, "꿰":1, "꾀":1, "뀌":1, "끠":1,
"나":1, "냐":1, "너":1, "녀":1, "노":1, "뇨":1, "누":1, "뉴":1, "느":1, "니":1, "내":1, "냬":1, "네":1, "녜":1, "놔":1, "놰":1, "눠":1, "눼":1, "뇌":1, "뉘":1, "늬":1,
"다":1, "댜":1, "더":1, "뎌":1, "도":1, "됴":1, "두":1, "듀":1, "드":1, "디":1, "대":1, "댸":1, "데":1, "뎨":1, "돠":1, "돼":1, "둬":1, "뒈":1, "되":1, "뒤":1, "듸":1,
"따":1, "땨":1, "떠":1, "뗘":1, "또":1, "뚀":1, "뚜":1, "뜌":1, "뜨":1, "띠":1, "때":1, "떄":1, "떼":1, "뗴":1, "똬":1, "뙈":1, "뚸":1, "뛔":1, "뙤":1, "뛰":1, "띄":1,
"라":1, "랴":1, "러":1, "려":1, "로":1, "료":1, "루":1, "류":1, "르":1, "리":1, "래":1, "럐":1, "레":1, "례":1, "롸":1, "뢔":1, "뤄":1, "뤠":1, "뢰":1, "뤼":1, "릐":1,
"마":1, "먀":1, "머":1, "며":1, "모":1, "묘":1, "무":1, "뮤":1, "므":1, "미":1, "매":1, "먜":1, "메":1, "몌":1, "뫄":1, "뫠":1, "뭐":1, "뭬":1, "뫼":1, "뮈":1, "믜":1,
"바":1, "뱌":1, "버":1, "벼":1, "보":1, "뵤":1, "부":1, "뷰":1, "브":1, "비":1, "배":1, "뱨":1, "베":1, "볘":1, "봐":1, "봬":1, "붜":1, "붸":1, "뵈":1, "뷔":1, "븨":1,
"빠":1, "뺘":1, "뻐":1, "뼈":1, "뽀":1, "뾰":1, "뿌":1, "쀼":1, "쁘":1, "삐":1, "빼":1, "뺴":1, "뻬":1, "뼤":1, "뽜":1, "뽸":1, "뿨":1, "쀄":1, "뾔":1, "쀠":1, "쁴":1,
"사":1, "샤":1, "서":1, "셔":1, "소":1, "쇼":1, "수":1, "슈":1, "스":1, "시":1, "새":1, "섀":1, "세":1, "셰":1, "솨":1, "쇄":1, "숴":1, "쉐":1, "쇠":1, "쉬":1, "싀":1,
"싸":1, "쌰":1, "써":1, "쎠":1, "쏘":1, "쑈":1, "쑤":1, "쓔":1, "쓰":1, "씨":1, "쌔":1, "썌":1, "쎄":1, "쎼":1, "쏴":1, "쐐":1, "쒀":1, "쒜":1, "쐬":1, "쒸":1, "씌":1,
"아":1, "야":1, "어":1, "여":1, "오":1, "요":1, "우":1, "유":1, "으":1, "이":1, "애":1, "얘":1, "에":1, "예":1, "와":1, "왜":1, "워":1, "웨":1, "외":1, "위":1, "의":1,
"자":1, "쟈":1, "저":1, "져":1, "조":1, "죠":1, "주":1, "쥬":1, "즈":1, "지":1, "재":1, "쟤":1, "제":1, "졔":1, "좌":1, "좨":1, "줘":1, "줴":1, "죄":1, "쥐":1, "즤":1,
"짜":1, "쨔":1, "쩌":1, "쪄":1, "쪼":1, "쬬":1, "쭈":1, "쮸":1, "쯔":1, "찌":1, "째":1, "쨰":1, "쩨":1, "쪠":1, "쫘":1, "쫴":1, "쭤":1, "쮀":1, "쬐":1, "쮜":1, "쯰":1,
"차":1, "챠":1, "처":1, "쳐":1, "초":1, "쵸":1, "추":1, "츄":1, "츠":1, "치":1, "채":1, "챼":1, "체":1, "쳬":1, "촤":1, "쵀":1, "춰":1, "췌":1, "최":1, "취":1, "츼":1,
"카":1, "캬":1, "커":1, "켜":1, "코":1, "쿄":1, "쿠":1, "큐":1, "크":1, "키":1, "캐":1, "컈":1, "케":1, "켸":1, "콰":1, "쾌":1, "쿼":1, "퀘":1, "쾨":1, "퀴":1, "킈":1,
"타":1, "탸":1, "터":1, "텨":1, "토":1, "툐":1, "투":1, "튜":1, "트":1, "티":1, "태":1, "턔":1, "테":1, "톄":1, "톼":1, "퇘":1, "퉈":1, "퉤":1, "퇴":1, "튀":1, "틔":1,
"파":1, "퍄":1, "퍼":1, "펴":1, "포":1, "표":1, "푸":1, "퓨":1, "프":1, "피":1, "패":1, "퍠":1, "페":1, "폐":1, "퐈":1, "퐤":1, "풔":1, "풰":1, "푀":1, "퓌":1, "픠":1,
"하":1, "햐":1, "허":1, "혀":1, "호":1, "효":1, "후":1, "휴":1, "흐":1, "히":1, "해":1, "햬":1, "헤":1, "혜":1, "화":1, "홰":1, "훠":1, "훼":1, "회":1, "휘":1, "희":1,
}
locale = mapping(
)
def GetAuxiliaryWordType(text):
textLength = len(text)
if textLength > 1:
singleWord = text[-1]
if (singleWord >= '0' and singleWord <= '9') or\
(singleWord >= 'a' and singleWord <= 'z') or\
(singleWord >= 'A' and singleWord <= 'Z'):
if not dictSingleWord.has_key(singleWord):
return 1
elif dictDoubleWord.has_key(text[-2:]):
return 1
return 0
def CutMoneyString(sourceText, startIndex, endIndex, insertingText, backText):
sourceLength = len(sourceText)
if sourceLength < startIndex:
return backText
text = sourceText[max(0, sourceLength-endIndex):sourceLength-startIndex]
if not text:
return backText
if int(text) <= 0:
return backText
text = str(int(text))
if backText:
backText = " " + backText
return text + insertingText + backText
def SecondToDHM(time):
if time < 60:
if IsARABIC():
return "%.2f %s" % (time, SECOND)
else:
return "0" + MINUTE
second = int(time % 60)
minute = int((time / 60) % 60)
hour = int((time / 60) / 60) % 24
day = int(int((time / 60) / 60) / 24)
text = ""
if day > 0:
text += str(day) + DAY
text += " "
if hour > 0:
text += str(hour) + HOUR
text += " "
if minute > 0:
text += str(minute) + MINUTE
return text
def SecondToHM(time):
if time < 60:
if IsARABIC():
return "%.2f %s" % (time, SECOND)
else:
return "0" + MINUTE
second = int(time % 60)
minute = int((time / 60) % 60)
hour = int((time / 60) / 60)
text = ""
if hour > 0:
text += str(hour) + HOUR
if hour > 0:
text += " "
if minute > 0:
text += str(minute) + MINUTE
return text
def GetAlignmentTitleName(alignment):
if alignment >= 12000:
return TITLE_NAME_LIST[0]
elif alignment >= 8000:
return TITLE_NAME_LIST[1]
elif alignment >= 4000:
return TITLE_NAME_LIST[2]
elif alignment >= 1000:
return TITLE_NAME_LIST[3]
elif alignment >= 0:
return TITLE_NAME_LIST[4]
elif alignment > -4000:
return TITLE_NAME_LIST[5]
elif alignment > -8000:
return TITLE_NAME_LIST[6]
elif alignment > -12000:
return TITLE_NAME_LIST[7]
return TITLE_NAME_LIST[8]
OPTION_PVPMODE_MESSAGE_DICT = {
0 : PVP_MODE_NORMAL,
1 : PVP_MODE_REVENGE,
2 : PVP_MODE_KILL,
3 : PVP_MODE_PROTECT,
4 : PVP_MODE_GUILD,
}
error = mapping(
CREATE_WINDOW = GAME_INIT_ERROR_MAIN_WINDOW,
CREATE_CURSOR = GAME_INIT_ERROR_CURSOR,
CREATE_NETWORK = GAME_INIT_ERROR_NETWORK,
CREATE_ITEM_PROTO = GAME_INIT_ERROR_ITEM_PROTO,
CREATE_MOB_PROTO = GAME_INIT_ERROR_MOB_PROTO,
CREATE_NO_DIRECTX = GAME_INIT_ERROR_DIRECTX,
CREATE_DEVICE = GAME_INIT_ERROR_GRAPHICS_NOT_EXIST,
CREATE_NO_APPROPRIATE_DEVICE = GAME_INIT_ERROR_GRAPHICS_BAD_PERFORMANCE,
CREATE_FORMAT = GAME_INIT_ERROR_GRAPHICS_NOT_SUPPORT_32BIT,
NO_ERROR = ""
)
GUILDWAR_NORMAL_DESCLIST = [GUILD_WAR_USE_NORMAL_MAP, GUILD_WAR_LIMIT_30MIN, GUILD_WAR_WIN_CHECK_SCORE]
GUILDWAR_WARP_DESCLIST = [GUILD_WAR_USE_BATTLE_MAP, GUILD_WAR_WIN_WIPE_OUT_GUILD, GUILD_WAR_REWARD_POTION]
GUILDWAR_CTF_DESCLIST = [GUILD_WAR_USE_BATTLE_MAP, GUILD_WAR_WIN_TAKE_AWAY_FLAG1, GUILD_WAR_WIN_TAKE_AWAY_FLAG2, GUILD_WAR_REWARD_POTION]
MINIMAP_ZONE_NAME_DICT = {
"metin2_map_a1" : MAP_A1,
"map_a2" : MAP_A2,
"metin2_map_a3" : MAP_A3,
"metin2_map_b1" : MAP_B1,
"map_b2" : MAP_B2,
"metin2_map_b3" : MAP_B3,
"metin2_map_c1" : MAP_C1,
"map_c2" : MAP_C2,
"metin2_map_c3" : MAP_C3,
"map_n_snowm_01" : MAP_SNOW,
"metin2_map_n_flame_01" : MAP_FLAME,
"metin2_map_n_desert_01" : MAP_DESERT,
"metin2_map_milgyo" : MAP_TEMPLE,
"metin2_map_spiderdungeon" : MAP_SPIDER,
"metin2_map_deviltower1" : MAP_SKELTOWER,
"metin2_map_guild_01" : MAP_AG,
"metin2_map_guild_02" : MAP_BG,
"metin2_map_guild_03" : MAP_CG,
"metin2_map_trent" : MAP_TREE,
"metin2_map_trent02" : MAP_TREE2,
"season1/metin2_map_WL_01" : MAP_WL,
"season1/metin2_map_nusluck01" : MAP_NUSLUCK,
"Metin2_map_CapeDragonHead" : MAP_CAPE,
"metin2_map_Mt_Thunder" : MAP_THUNDER,
"metin2_map_dawnmistwood" : MAP_DAWN,
"metin2_map_BayBlackSand" : MAP_BAY,
}
JOBINFO_TITLE = [
[JOB_WARRIOR0, JOB_WARRIOR1, JOB_WARRIOR2,],
[JOB_ASSASSIN0, JOB_ASSASSIN1, JOB_ASSASSIN2,],
[JOB_SURA0, JOB_SURA1, JOB_SURA2,],
[JOB_SHAMAN0, JOB_SHAMAN1, JOB_SHAMAN2,],
]
JOBINFO_DATA_LIST = [
[
["타고난 용맹과 굽히지 않는 무사의",
"기개를 사람들은 일컬어 [용자]라고",
"부른다. 어떠한 위기에서도 그들은 ",
"뒤로 물러서지 않으며, 다치고 움직",
"이기 힘든 동료를 위해 단신으로",
"적들과 마주 싸우기도 한다. 이들은",
"잘 단련된 근육과 힘, 강력한 공격력",
"으로 전장 최선두에서 공격진으로",
"활약한다. ",],
["가장 일반적인 공격형 무사로, ",
"적접전에 따른 직접 공격으로 전장",
"에서 활약한다. 군직 특성상 근력을",
"메인으로 스텟 포인트를 투자하되, ",
"적접전에 따른 생명력 / 방어력",
"확보를 위해 체력을 올린다. 또한",
"공격의 정확성을 높이기 위해 민첩",
"에도 포인트를 투자할 필요가 있다.",],
["상당 수준의 정신력을 이용하는",
"중/근거리 접전형 무사로, 각 기술",
"하나하나의 높은 공격력으로 전장에서",
"활약한다. 군직 특성상 근력을 메인",
"으로 스탯 포인트를 투자하되, ",
"중/근거리 공격의 정확성과 명중률을",
"위해 민첩을 올린다. 또한 접전 시 ",
"적 공격에 따른 생명력 / 방어력",
"확보를 위해 체력에도 포인트를",
"투자할 필요가 있다. ",],
],
[
["자객은 어떠한 상황에서도 자신의",
"몸을 숨기고 은밀한 어둠의 임무를",
"수행하면서 전장의 후위를 지원하는",
"자들이다. 이들은 아주 빠르고 신속",
"하며, 비할 데 없이 과감하고 절제된",
"행동으로 적의 급소에 치명타를 날리",
"되, 전장에선 적진을 향해 무수한",
"화살을 내뿜으며 자신의 용맹을",
"선보인다. "],
["두손 단검을 주무기로 다루며, 신속",
"하게 치고 빠지는 자객 특유의 움직임",
"으로 전장에서 활약한다. 군직 특성상",
"민첩을 메인으로 스텟 포인트를 투자",
"하되, 근력을 올려 공격력을 높인다.",
"또한 근접전에 따른 생명력/방어력 ",
"상승을 위해 체력에도 포인트를",
"투자할 필요가 있다. ",],
["활을 주무기로 다루며, 긴 시야와",
"사정거리에 따른 원거리 공격으로",
"전장에서 활약한다. 군직 특성상",
"공격 성공률의 증가를 위해 민첩을",
"메인으로 올려야 하며, 원거리",
"공격의 데미지 증가를 위해 근력을",
"올릴 필요가 있다. 또한 적들에게",
"포위되었을 시, 적 공격에 버티기",
"위한 생명력/방어력 상승을 위해",
"체력에도 포인트를 투자할 필요가",
"있다. ", ],
],
[
["수라는 [독은 독으로]의 속성으로",
"창설된 특수 속성의 군직이다. ",
"그들은 전장에서 적들의 사기를 저하",
"시키고, 악마의 힘을 실은 마탄으로",
"적의 영혼과 육신을 짓뭉갠다. 때로",
"이들은 자신의 검과 갑옷에 어둠의",
"힘을 실어, 전장에서 무사 못지 않은",
"공격력을 발휘하기도 하는데, 적들을",
"죽여대는그 모습이 워낙에 끔찍해",
"사람들은 수라를 일컬어 [마신]이라",
"부르기를 주저 앉는다."],
["환무군의 수라는 악마의 씨에서",
"얻어지는 마력을 무기나 방어구에",
"실어 무사 못지 않은 전투력으로",
"전장에서 활약한다. 군직 특성상",
"지능이 높아질수록 착용 장비에",
"실리는 마력의 위력이 증대되므로,",
"지능과 근력을 메인으로 스탯",
"포인트를 투자하되, 접전에 따른",
"생명력/방어력 확보를 위해 체력을",
"올린다. 또한 공격의 정확성과",
"회피를 위해서 민첩에도 포인트를",
"투자할 필요가 있다. ",],
["흑마군의 수라들은 각종 어둠의",
"주문과 악마의 마법으로 전장에서",
"활약한다. 군직 특성상 마법 공격이",
"주이므로 지능을 메인으로 스텟",
"포인트를 투자하되, 원거리 마법",
"공격의 정확성을 위해 민첩을 올린다.",
"또한 포위 되었을시, 적 공격에 따른",
"생명력 / 방어력 확보를 위해 체력에도",
"포인트를 투자할 필요가 있다. ",],
],
[
["무당은 용신과 자연, 두 고대의",
"힘을 다룰 수 있는 유일한 직종이다.",
"그들은 후방에서 아군을 보조하고",
"다친 동료의 부상을 회복 시키며",
"떨어진 사기를 상승시킨다. 그들은",
"아군의 수면과 휴식을 방해하는 자를 ",
"절대 용서하지 않으며, 그런 자들",
"에게는 한 점 주저 없이 주문을",
"터트려 그 비겁함을 엄히 징계한다.",],
["천룡군의 무당들은 각종 부적술과",
"보조주문에 능하며, 적의 직 / 간접",
"공격으로부터 아군을 지킨다. 군직",
"특성상 마법 능력이 주이므로 지능을",
"메인으로 스텟 포인트를 투자하되,",
"포위되었을 시, 적 공격에 따른",
"생명력 / 방어력 확보를 위해 체력을",
"올린다. 또한 원거리 마법 공격의",
"정확성을 위에 민첩에도 포인트를",
"투자할 필요가 있다. ",],
["광뢰군의 무당들은 자연의 힘을",
"빌려 아군을 회복하고, 뇌신의 ",
"힘으로 밀집한 적들에게 큰 충격을",
"입힐 수 있는 이들이다. 군직의",
"특성상 마법 능력이 주이므로 지능을",
"메인으로 스텟 포인트를 투자하되,",
"포위되었을시, 적 공격에 따른",
"생명력 / 방어력 확보를 위해 체력을",
"올린다. 또한 원거리 마법 공격의",
"정확성을 위에 민첩에도 포인트를",
"투자할 필요가 있다. "],
],
]
WHISPER_ERROR = {
1 : CANNOT_WHISPER_NOT_LOGON,
2 : CANNOT_WHISPER_DEST_REFUSE,
3 : CANNOT_WHISPER_SELF_REFUSE,
}
NOTIFY_MESSAGE = {
"CANNOT_EQUIP_SHOP" : CANNOT_EQUIP_IN_SHOP,
"CANNOT_EQUIP_EXCHANGE" : CANNOT_EQUIP_IN_EXCHANGE,
}
ATTACK_ERROR_TAIL_DICT = {
"IN_SAFE" : CANNOT_ATTACK_SELF_IN_SAFE,
"DEST_IN_SAFE" : CANNOT_ATTACK_DEST_IN_SAFE,
}
SHOT_ERROR_TAIL_DICT = {
"EMPTY_ARROW" : CANNOT_SHOOT_EMPTY_ARROW,
"IN_SAFE" : CANNOT_SHOOT_SELF_IN_SAFE,
"DEST_IN_SAFE" : CANNOT_SHOOT_DEST_IN_SAFE,
}
USE_SKILL_ERROR_TAIL_DICT = {
"IN_SAFE" : CANNOT_SKILL_SELF_IN_SAFE,
"NEED_TARGET" : CANNOT_SKILL_NEED_TARGET,
"NEED_EMPTY_BOTTLE" : CANNOT_SKILL_NEED_EMPTY_BOTTLE,
"NEED_POISON_BOTTLE" : CANNOT_SKILL_NEED_POISON_BOTTLE,
"REMOVE_FISHING_ROD" : CANNOT_SKILL_REMOVE_FISHING_ROD,
"NOT_YET_LEARN" : CANNOT_SKILL_NOT_YET_LEARN,
"NOT_MATCHABLE_WEAPON" : CANNOT_SKILL_NOT_MATCHABLE_WEAPON,
"WAIT_COOLTIME" : CANNOT_SKILL_WAIT_COOLTIME,
"NOT_ENOUGH_HP" : CANNOT_SKILL_NOT_ENOUGH_HP,
"NOT_ENOUGH_SP" : CANNOT_SKILL_NOT_ENOUGH_SP,
"CANNOT_USE_SELF" : CANNOT_SKILL_USE_SELF,
"ONLY_FOR_ALLIANCE" : CANNOT_SKILL_ONLY_FOR_ALLIANCE,
"CANNOT_ATTACK_ENEMY_IN_SAFE_AREA" : CANNOT_SKILL_DEST_IN_SAFE,
"CANNOT_APPROACH" : CANNOT_SKILL_APPROACH,
"CANNOT_ATTACK" : CANNOT_SKILL_ATTACK,
"ONLY_FOR_CORPSE" : CANNOT_SKILL_ONLY_FOR_CORPSE,
"EQUIP_FISHING_ROD" : CANNOT_SKILL_EQUIP_FISHING_ROD,
"NOT_HORSE_SKILL" : CANNOT_SKILL_NOT_HORSE_SKILL,
"HAVE_TO_RIDE" : CANNOT_SKILL_HAVE_TO_RIDE,
}
LEVEL_LIST=["", HORSE_LEVEL1, HORSE_LEVEL2, HORSE_LEVEL3]
HEALTH_LIST=[
HORSE_HEALTH0,
HORSE_HEALTH1,
HORSE_HEALTH2,
HORSE_HEALTH3,
]
USE_SKILL_ERROR_CHAT_DICT = {
"NEED_EMPTY_BOTTLE" : SKILL_NEED_EMPTY_BOTTLE,
"NEED_POISON_BOTTLE" : SKILL_NEED_POISON_BOTTLE,
"ONLY_FOR_GUILD_WAR" : SKILL_ONLY_FOR_GUILD_WAR,
}
SHOP_ERROR_DICT = {
"NOT_ENOUGH_MONEY" : SHOP_NOT_ENOUGH_MONEY,
"SOLDOUT" : SHOP_SOLDOUT,
"INVENTORY_FULL" : SHOP_INVENTORY_FULL,
"INVALID_POS" : SHOP_INVALID_POS,
"NOT_ENOUGH_MONEY_EX" : SHOP_NOT_ENOUGH_MONEY_EX,
}
STAT_MINUS_DESCRIPTION = {
"HTH-" : STAT_MINUS_CON,
"INT-" : STAT_MINUS_INT,
"STR-" : STAT_MINUS_STR,
"DEX-" : STAT_MINUS_DEX,
}
MODE_NAME_LIST = ( PVP_OPTION_NORMAL, PVP_OPTION_REVENGE, PVP_OPTION_KILL, PVP_OPTION_PROTECT, )
TITLE_NAME_LIST = ( PVP_LEVEL0, PVP_LEVEL1, PVP_LEVEL2, PVP_LEVEL3, PVP_LEVEL4, PVP_LEVEL5, PVP_LEVEL6, PVP_LEVEL7, PVP_LEVEL8, )
def GetLetterImageName():
return "season1/icon/scroll_close.tga"
def GetLetterOpenImageName():
return "season1/icon/scroll_open.tga"
def GetLetterCloseImageName():
return "season1/icon/scroll_close.tga"
if 949 == app.GetDefaultCodePage():
def EUL(name):
if GetAuxiliaryWordType(name):
return "를 "
else:
return "을 "
def I(name):
if GetAuxiliaryWordType(name):
return "가 "
else:
return "이 "
def DO_YOU_SELL_ITEM(sellItemName, sellItemCount, sellItemPrice):
name = sellItemName
if sellItemCount > 1:
name += " "
name += str(sellItemCount)
name += "개"
return name + EUL(name) + str(sellItemPrice) + "냥에 파시겠습니까?"
def DO_YOU_BUY_ITEM(sellItemName, sellItemCount, sellItemPrice):
name = sellItemName
if sellItemCount > 1:
name += " "
name += str(sellItemCount)
name += "개"
return name + EUL(name) + str(sellItemPrice) + "에 사시겠습니까?"
def REFINE_FAILURE_CAN_NOT_ATTACH(attachedItemName):
return attachedItemName+EUL(attachedItemName)+"부착할 수 없는 아이템입니다"
def REFINE_FAILURE_NO_SOCKET(attachedItemName):
return attachedItemName+EUL(attachedItemName)+"부착할 수 있는 소켓이 없습니다"
def REFINE_FAILURE_NO_GOLD_SOCKET(attachedItemName):
return attachedItemName+EUL(attachedItemName)+"부착할 수 있는 황금 소켓이 없습니다"
def HOW_MANY_ITEM_DO_YOU_DROP(dropItemName, dropItemCount):
name = dropItemName
if dropItemCount > 1:
name += " "
name += str(dropItemCount)
name += "개"
return name+EUL(name)+"버리시겠습니까?"
def NumberToMoneyString(number):
if number <= 0:
return "0냥"
number = str(number)
result = CutMoneyString(number, 0, 4, "", "")
result = CutMoneyString(number, 4, 8, "만", result)
result = CutMoneyString(number, 8, 12, "억", result)
result = result + "냥"
return result
def NumberToSecondaryCoinString(number):
if number <= 0:
return "0전"
number = str(number)
result = CutMoneyString(number, 0, 4, "", "")
result = CutMoneyString(number, 4, 8, "만", result)
result = CutMoneyString(number, 8, 12, "억", result)
result = result + "전"
return result
def FISHING_NOTIFY(isFish, fishName):
if isFish:
return fishName + I(fishName) + "문 듯 합니다."
else:
return fishName + I(fishName) + "걸린듯 합니다."
def FISHING_SUCCESS(isFish, fishName):
if isFish:
return fishName + EUL(fishName) + "잡았습니다!"
else:
return fishName + EUL(fishName) + "얻었습니다!"
elif 932 == app.GetDefaultCodePage():
def DO_YOU_SELL_ITEM(sellItemName, sellItemCount, sellItemPrice):
if sellItemCount > 1 :
return "%s %s 뙿귩 %s궸봽귟귏궥궔갎" % ( sellItemName, sellItemCount, NumberToMoneyString(sellItemPrice) )
else:
return "%s 귩 %s궳봽귟귏궥궔갎" % (sellItemName, NumberToMoneyString(sellItemPrice) )
def DO_YOU_BUY_ITEM(buyItemName, buyItemCount, buyItemPrice) :
if buyItemCount > 1 :
return "%s %s뙿귩 %s궳봼궋귏궥궔갎" % ( buyItemName, buyItemCount, buyItemPrice )
else:
return "%s귩 %s궳봼궋귏궥궔갎" % ( buyItemName, buyItemPrice )
def REFINE_FAILURE_CAN_NOT_ATTACH(attachedItemName) :
return "%s귩몧뭶궳궖궶궋귺귽긡?궳궥갃" % (attachedItemName)
def REFINE_FAILURE_NO_SOCKET(attachedItemName) :
return "%s귩몧뭶궥귡?긑긞긣궕궇귟귏궧귪갃" % (attachedItemName)
def REFINE_FAILURE_NO_GOLD_SOCKET(attachedItemName) :
return "%s귩몧뭶궳궖귡돥뗠?긑긞긣궕궇귟귏궧귪갃" % (attachedItemName)
def HOW_MANY_ITEM_DO_YOU_DROP(dropItemName, dropItemCount) :
if dropItemCount > 1 :
return "%s %d 뙿귩롆궲귏궥궔갎" % (dropItemName, dropItemCount)
else :
return "%s귩롆궲귏궥궔갎" % (dropItemName)
def FISHING_NOTIFY(isFish, fishName) :
if isFish :
return "%s 궕륣궋궰궋궫귝궎궳궥" % ( fishName )
else :
return "%s 궕궔궔궯궫귝궎궳궥" % ( fishName )
def FISHING_SUCCESS(isFish, fishName) :
if isFish :
return "%s 귩뺕귏궑귏궢궫갏" % (fishName)
else :
return "%s 귩롨궸볺귢귏궢궫갏" % (fishName)
def NumberToMoneyString(number) :
if number <= 0 :
return "0뿼"
number = str(number)
result = CutMoneyString(number, 0, 4, "", "")
result = CutMoneyString(number, 4, 8, "뼔", result)
result = CutMoneyString(number, 8, 12, "돪", result)
result = result + "뿼"
return result
def NumberToSecondaryCoinString(number) :
if number <= 0 :
return "0jun"
number = str(number)
result = CutMoneyString(number, 0, 4, "", "")
result = CutMoneyString(number, 4, 8, "뼔", result)
result = CutMoneyString(number, 8, 12, "돪", result)
result = result + "jun"
return result
elif IsHONGKONG():
def DO_YOU_SELL_ITEM(sellItemName, sellItemCount, sellItemPrice):
if sellItemCount > 1 :
return DO_YOU_SELL_ITEM2 % (sellItemName, sellItemCount, NumberToMoneyString(sellItemPrice) )
else:
return DO_YOU_SELL_ITEM1 % (sellItemName, NumberToMoneyString(sellItemPrice) )
def DO_YOU_BUY_ITEM(buyItemName, buyItemCount, buyItemPrice) :
if buyItemCount > 1 :
return DO_YOU_BUY_ITEM2 % ( buyItemName, buyItemCount, buyItemPrice )
else:
return DO_YOU_BUY_ITEM1 % ( buyItemName, buyItemPrice )
def REFINE_FAILURE_CAN_NOT_ATTACH(attachedItemName) :
return REFINE_FAILURE_CAN_NOT_ATTACH0 % (attachedItemName)
def REFINE_FAILURE_NO_SOCKET(attachedItemName) :
return REFINE_FAILURE_NO_SOCKET0 % (attachedItemName)
def REFINE_FAILURE_NO_GOLD_SOCKET(attachedItemName) :
return REFINE_FAILURE_NO_GOLD_SOCKET0 % (attachedItemName)
def HOW_MANY_ITEM_DO_YOU_DROP(dropItemName, dropItemCount) :
if dropItemCount > 1 :
return HOW_MANY_ITEM_DO_YOU_DROP2 % (dropItemName, dropItemCount)
else :
return HOW_MANY_ITEM_DO_YOU_DROP1 % (dropItemName)
def FISHING_NOTIFY(isFish, fishName) :
if isFish :
return FISHING_NOTIFY1 % ( fishName )
else :
return FISHING_NOTIFY2 % ( fishName )
def FISHING_SUCCESS(isFish, fishName) :
if isFish :
return FISHING_SUCCESS1 % (fishName)
else :
return FISHING_SUCCESS2 % (fishName)
def NumberToMoneyString(number) :
if number <= 0 :
return "0 %s" % (MONETARY_UNIT0)
number = str(number)
result = CutMoneyString(number, 0, 4, "", "")
result = CutMoneyString(number, 4, 8, MONETARY_UNIT1, result)
result = CutMoneyString(number, 8, 12, MONETARY_UNIT2, result)
result = result + MONETARY_UNIT0
return result
def NumberToSecondaryCoinString(number) :
if number <= 0 :
return "0 %s" % (MONETARY_UNIT_JUN)
number = str(number)
result = CutMoneyString(number, 0, 4, "", "")
result = CutMoneyString(number, 4, 8, MONETARY_UNIT1, result)
result = CutMoneyString(number, 8, 12, MONETARY_UNIT2, result)
result = result + MONETARY_UNIT_JUN
return result
elif IsNEWCIBN() or IsCIBN10():
def DO_YOU_SELL_ITEM(sellItemName, sellItemCount, sellItemPrice):
if sellItemCount>1:
return "횅땍狼겉%s몸%s鹿%s쏜귑찡딜찐?" % (str(sellItemCount), sellItemName, str(sellItemPrice))
else:
return "횅땍狼겉%s鹿%s쏜귑찡딜찐?" % (sellItemName, str(sellItemPrice))
def DO_YOU_BUY_ITEM(sellItemName, sellItemCount, sellItemPrice):
if sellItemCount>1:
return "횅땍狼겉%s몸%s鹿%s쏜귑찜쏵찐?" % (str(sellItemCount), sellItemName, str(sellItemPrice))
else:
return "횅땍狼겉%s鹿%s쏜귑찜쏵찐?" % (sellItemName, str(sellItemPrice))
def REFINE_FAILURE_CAN_NOT_ATTACH(attachedItemName):
return "轟랬穹퓌%s 돨陋구" % (attachedItemName)
def REFINE_FAILURE_NO_SOCKET(attachedItemName):
return "청唐옵鹿穹퓌%s 돨왝" % (attachedItemName)
def REFINE_FAILURE_NO_GOLD_SOCKET(attachedItemName):
return "청唐옵鹿穹퓌%s 돨뼝쏜왝" % (attachedItemName)
def HOW_MANY_ITEM_DO_YOU_DROP(dropItemName, dropItemCount):
if dropItemCount>1:
return "횅땍狼휀딜%d몸%s찐?" % (dropItemCount, dropItemName)
else:
return "횅땍狼휀딜%s찐?" % (dropItemName)
def FISHING_NOTIFY(isFish, fishName):
if isFish:
return fishName # 본래 여기에 어떤 말이 붙어있는데, 인코딩이 깨져있어서 복원할 수가 없다 ㅠㅠ... cython에서 인코딩 에러 나서 지워버림...
else:
return "딥淪" + fishName + "죄。"
def FISHING_SUCCESS(isFish, fishName):
if isFish:
return "딥淪" + fishName + "죄。"
else:
return "삿돤" + fishName + "죄。"
def NumberToMoneyString(number):
if number <= 0:
return "0좃"
number = str(number)
result = CutMoneyString(number, 0, 4, "", "")
result = CutMoneyString(number, 4, 8, "拱", result)
result = CutMoneyString(number, 8, 12, "聾", result)
result = result + "좃"
return result
def NumberToSecondaryCoinString(number):
if number <= 0:
return "0JUN"
number = str(number)
result = CutMoneyString(number, 0, 4, "", "")
result = CutMoneyString(number, 4, 8, "拱", result)
result = CutMoneyString(number, 8, 12, "聾", result)
result = result + "JUN"
return result
elif IsEUROPE() and not IsWE_KOREA() and not IsYMIR():
def DO_YOU_SELL_ITEM(sellItemName, sellItemCount, sellItemPrice):
if sellItemCount > 1 :
return DO_YOU_SELL_ITEM2 % (sellItemName, sellItemCount, NumberToMoneyString(sellItemPrice) )
else:
return DO_YOU_SELL_ITEM1 % (sellItemName, NumberToMoneyString(sellItemPrice) )
def DO_YOU_BUY_ITEM(buyItemName, buyItemCount, buyItemPrice) :
if buyItemCount > 1 :
return DO_YOU_BUY_ITEM2 % ( buyItemName, buyItemCount, buyItemPrice )
else:
return DO_YOU_BUY_ITEM1 % ( buyItemName, buyItemPrice )
def REFINE_FAILURE_CAN_NOT_ATTACH(attachedItemName) :
return REFINE_FAILURE_CAN_NOT_ATTACH0 % (attachedItemName)
def REFINE_FAILURE_NO_SOCKET(attachedItemName) :
return REFINE_FAILURE_NO_SOCKET0 % (attachedItemName)
def REFINE_FAILURE_NO_GOLD_SOCKET(attachedItemName) :
return REFINE_FAILURE_NO_GOLD_SOCKET0 % (attachedItemName)
def HOW_MANY_ITEM_DO_YOU_DROP(dropItemName, dropItemCount) :
if dropItemCount > 1 :
return HOW_MANY_ITEM_DO_YOU_DROP2 % (dropItemName, dropItemCount)
else :
return HOW_MANY_ITEM_DO_YOU_DROP1 % (dropItemName)
def FISHING_NOTIFY(isFish, fishName) :
if isFish :
return FISHING_NOTIFY1 % ( fishName )
else :
return FISHING_NOTIFY2 % ( fishName )
def FISHING_SUCCESS(isFish, fishName) :
if isFish :
return FISHING_SUCCESS1 % (fishName)
else :
return FISHING_SUCCESS2 % (fishName)
def NumberToMoneyString(n) :
if n <= 0 :
return "0 %s" % (MONETARY_UNIT0)
return "%s %s" % ('.'.join([ i-3<0 and str(n)[:i] or str(n)[i-3:i] for i in range(len(str(n))%3, len(str(n))+1, 3) if i ]), MONETARY_UNIT0)
def NumberToSecondaryCoinString(n) :
if n <= 0 :
return "0 %s" % (MONETARY_UNIT_JUN)
return "%s %s" % ('.'.join([ i-3<0 and str(n)[:i] or str(n)[i-3:i] for i in range(len(str(n))%3, len(str(n))+1, 3) if i ]), MONETARY_UNIT_JUN)
| [
"[email protected]"
] | |
f8510941bd63a0e9d1c3dc21992422e227aa330c | 0aa2627e32c9fc5b81d76c0bd979ec741eb2094f | /Practice/A.Babintsev/Task_4/task4_5.py | 8fa73362cb5dfe81c33780a9184b934a9057c8a1 | [] | no_license | leksiam/PythonCourse | 7548596f57273fe4f90f56f6fe7fc18e03f58b0a | f626c6ec636f712cb65e89afc63774abb86f9725 | refs/heads/master | 2020-08-28T20:42:31.485332 | 2019-10-25T22:25:25 | 2019-10-25T22:25:25 | 206,125,407 | 0 | 0 | null | 2019-09-03T16:44:19 | 2019-09-03T16:44:18 | null | UTF-8 | Python | false | false | 556 | py | """
Интерполировать некие шаблоны в строке. Есть строка с определенного вида
форматированием. необходимо заменить в этой строке все вхождения шаблонов на их
значение из словаря.
"""
str1 = 'Привет, rod_f nam_f!'
dict1 = {'rod_m': 'дядя',
'rod_f': 'тетя',
'nam_m': 'Ваня',
'nam_f': 'Мотя'}
for k, v in dict1.items():
str1 = str1.replace(k, v)
print(str1)
| [
"[email protected]"
] | |
cd3200f21c9114aabd3bf8de8f8f25fcd28713bd | f925499f896b012624118cfafd02fef76ff5075a | /src/testcase/GN_Y201H/case/GN_Y201H_NORMAL_TIMER/GN_Y201H_NORMAL_TIMER_001.py | db553092c8decb6ca656271e96b02093737f7c6e | [
"Apache-2.0"
] | permissive | maiyajj/AutoTest_script-Appium_Connect | f7c06db1d2f58682d1a9d6f534f7dd5fb65d766d | f9c2c42c281a9e2f984acb4a72dda0694b053f22 | HEAD | 2019-07-26T01:39:48.413753 | 2018-04-11T02:11:38 | 2018-04-11T02:11:38 | 112,449,369 | 30 | 22 | null | null | null | null | UTF-8 | Python | false | false | 1,252 | py | # coding=utf-8
from src.testcase.GN_Y201H.WidgetOperation import *
class GNY201HNormalTimer1(WidgetOperation):
@case_run(False)
def run(self):
self.case_module = u"普通定时(#246)" # 用例所属模块
self.case_title = u'在线状态,临界点1组开与1组关的定时执行状态检查' # 用例名称
self.zentao_id = "2079" # 禅道ID
# 用例动作
def case(self):
self.choose_home_device(conf["MAC"]["HW"][0])
self.delete_normal_timer()
self.delete_delay_timer()
self.set_power("power_off")
self.widget_click(self.page["control_device_page"]["normal_timer"],
self.page["normal_timer_page"]["title"])
now = time.strftime("%H:%M")
time_1, time_2 = ["point", "23:59"], ["point", "00:00"]
start_time_1, set_time_1, start_time_2, set_time_2, cycle1, cycle2 = \
self.create_normal_timer(now, time_1, time_2)
self.widget_click(self.page["normal_timer_page"]["to_return"],
self.page["control_device_page"]["title"])
self.check_timer(start_time_1, set_time_1, u"电源已开启")
self.check_timer(start_time_2, set_time_2, u"电源已关闭")
| [
"[email protected]"
] | |
a1d5cc1ea0836818f5d5db326760c1213c307d11 | 033cd27a3b2c1e62525dc30c2e57b95bc06a497f | /C1/C1S10JSON/ZhengZe2.py | 53dbae1c258b52bafd207bc78c8d7ff585985f75 | [] | no_license | lpjlsing/LearnwithVS | 4d2e24c30830c4eded5e30c570bdd6ca259a8eef | 3551f3a398647eeb2dc4ad003c4d934c211b6baf | refs/heads/master | 2020-05-31T10:49:11.845784 | 2019-09-30T08:19:43 | 2019-09-30T08:19:43 | 190,248,895 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,995 | py | """
正则表达式
re.sub实现函数替换
re.search
re.match
分组:group()函数:可以传递一个参数,指定获取的组号,默认为0
group(0)返回的是完整的匹配结果,跟有多少个组无关
"""
import re
s = 'ABC345629867396dGainWHAT'
s1 = 'ABC345629867396dGainWHAT'
def convert(value):
matched = value.group()
if int(matched) >= 6: # 需要先把字符数字转换为整数数字
return '9' # 这里的输出应该符合所调用的规则,在正则中必须是字符串而不是数字9
else:
return '0'
r = re.sub('\d', convert, s)
print(r)
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
# re.search, match
print('其它正则表达式函数:')
s2 = re.match('\d',s1) # match从字符串首字母匹配,如果没有符合的字符串将返回为空
print(s2)
s3 = re.search('\d',s1)
print(s3)
print(s3.group())
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
#分组group方法
print('~~~~~~~~~匹配字符串中间的部分:')
life = 'life is short, I use python, I love python'
life1 = re.search('life.*python',life) # * 前面的字符串处\n外全部匹配
life2 = re.search('life(.*)python',life)
life3 = re.search('life(.*)python(.*)python',life)
life4 = re.findall('life(.*)python',life)
print('search仍包括用于匹配的字符:没有()组')
print(life1.group()) # 这里匹配出了包括用于匹配字符的字符life和python
# group(0)返回的是完整的匹配结果,跟有多少个组无关
print('search通过group匹配指定位置字符串;返回字符串,多个组时同时返回时返回 元组')
print(life3.group(0,1,2)) # group(0)返回的是完整的匹配结果,跟有多少个组无关
print(life3.groups()) # 只返回组之间的字符串 元组
print('findall通过group匹配指定位置字符串:返回 列表')
print(life4) #匹配指定的第一个字符串组
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
| [
"[email protected]"
] | |
ea70a6ea9cfe96cf9c8ccaeae282c589bd2ace7a | dbffcac24ad03d4d21505b4161645e410304aa5d | /editor/dbloader.py | eb480b21c49cfb8e26dcb3f61eae1d50ef9b5b19 | [] | no_license | evertqin/RogerBlog | c3e7c376f482b81eb876994e9ff7b74c94d2b65e | 64e60195bbce0800a98af80956b3f16d7dafae97 | refs/heads/master | 2021-01-22T11:07:19.146197 | 2017-03-19T20:49:50 | 2017-03-19T20:49:50 | 36,478,656 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,438 | py | #!/usr/local/bin/python3.4
# the purpose of this script is to help loading blog entries into the mongodb
# will support following actions
# 1) insert blog entries
# 2) retrive certain entries
# 3) update certain entry
# 4) delete certain entry
# blog entry structure
from pymongo import MongoClient
from bson.objectid import ObjectId
from logger import MongoLogger
logger = MongoLogger().getLogger()
logger.info("Program started")
DB_CONNECTION_STRING = 'mongodb://evertqin:[email protected]:47632/blog'
class MongoConnector:
_client = None
_db = None
_posts = None
def __init__(self, dbname):
try:
logger.info("Connecting to mongo client")
self._client = MongoClient(DB_CONNECTION_STRING)
logger.info("Successfully conntect to mongodb")
except e:
print(e)
logger.info("Connecting to db")
self._db = self._client[dbname]
logger.info("Successfully connected to " + dbname)
self._posts = self._db.posts
def listAllDBCollection(self):
print(self._db.collection_names(include_system_collections=False))
def listAllDBEntries(self):
for post in self._posts.find():
yield
def getCollection(self, name):
return self._db[name]
if __name__ == "__main__":
mongodb = MongoConnector("blog")
mongodb.listAllDBCollection()
#mongodb.listAllDBEntries()
| [
"[email protected]"
] | |
93fbf3872d3bc072632736a89eb7442b9e7c7fc2 | a5616b991606bc76fe2a44b422a570f0acc7f9fa | /deep-learning-face-detection-class1/detect_faces.py | 98f16ea53dc6be3010a868b894589f08aaa3d23e | [] | no_license | Adiiigo/ComputerVision | 0c2b39f166dc38bce8ecdacbbf6a9ba2432ec214 | dbfa9dc1b83ee9ddbadaaf9b8f6390855f369cba | refs/heads/master | 2022-11-13T17:18:45.601706 | 2020-06-20T13:01:27 | 2020-06-20T13:01:27 | 272,775,008 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,367 | py | # USAGE python detect_faces.py --image rooster.jpg --prototxt deploy.prototxt.txt --model
# res10_300x300_ssd_iter_140000.caffemodel
# import the necessary packages
import numpy as np
import argparse
import cv2
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="path to input image")
ap.add_argument("-p", "--prototxt", required=True, help="path to Caffe 'deploy' prototxt file")
ap.add_argument("-m", "--model", required=True, help="path to Caffe pre-trained model")
ap.add_argument("-c", "--confidence", type=float, default=0.5, help="minimum probability to filter weak detections")
args = vars(ap.parse_args())
# load our serialized model from disk
print("[INFO] loading model...")
net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])
# load the input image and construct an input blob for the image
# by resizing to a fixed 300x300 pixels and then normalizing it
image = cv2.imread(args["image"])
(h, w) = image.shape[:2]
blob = cv2.dnn.blobFromImage(cv2.resize(image, (300, 300)), 1.0,
(300, 300), (104.0, 177.0, 123.0))
# pass the blob through the network and obtain the detections and
# predictions
print("[INFO] computing object detections...")
net.setInput(blob)
detections = net.forward()
# loop over the detections
for i in range(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated with the
# prediction
confidence = detections[0, 0, i, 2]
# filter out weak detections by ensuring the `confidence` is
# greater than the minimum confidence
if confidence > args["confidence"]:
# compute the (x, y)-coordinates of the bounding box for the
# object
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
# draw the bounding box of the face along with the associated
# probability
text = "{:.2f}%".format(confidence * 100)
y = startY - 10 if startY - 10 > 10 else startY + 10
cv2.rectangle(image, (startX, startY), (endX, endY),
(0, 0, 255), 2)
cv2.putText(image, text, (startX, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
# show the output image
cv2.imshow("Output", image)
cv2.waitKey(0)
| [
"[email protected]"
] | |
716a4428e5e13bcb8880401933b12900fb520ba6 | 9fdb870bb8011ab7c7d37fb584a4b353f24629ac | /Deployment/pm_install/update_ip.py | 21c1aa99902140f37362f4e3295abaa63af038af | [] | no_license | DeploymentHZ/zonekey | aacfee5d41e817be37a74248e1aa00330b27f0c8 | 5729f530a107b0f45fca449c3677c5a0d14f1b3b | refs/heads/master | 2021-01-01T16:59:39.125116 | 2015-04-27T02:23:03 | 2015-04-27T02:23:03 | 33,844,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,247 | py | #!/usr/bin/python
import os
import re
import ConfigParser
import check_netfile
from socket import socket, SOCK_DGRAM, AF_INET
config=ConfigParser.ConfigParser()
config.readfp(open('./conf/config.ini',"rb"))
NetInterface=config.get("global",'NetInterface')
ip=config.get("global",'ip')
netmask=config.get("global",'netmask')
gateway=config.get("global",'gateway')
dns1=config.get("global",'dns1')
dns2=config.get("global",'dns2')
path='/etc/sysconfig/network-scripts/'
def update_onboot():
a=open(path+NetInterface,'r')
match=re.compile(r'ONBOOT')
list=[]
while 1:
c= a.readline()
if not c:
break
elif match.search(c):
pass
else:
list.append(c)
a.close()
list.append("ONBOOT=yes\n")
a=open(path+NetInterface,'w')
for i in list:
a.write(i)
a.close()
def update_ip():
a=open(path+NetInterface,'r')
match_bootproto=re.compile(r'BOOTPROTO')
match_ip=re.compile(r'IPADDR')
match_netmask=re.compile(r'NETMASK')
match_gateway=re.compile(r'GATEWAY')
match_dns1=re.compile(r'DNS1')
match_dns2=re.compile(r'DNS2')
list=[]
while 1:
c= a.readline()
if not c:
break
elif match_bootproto.search(c):
pass
elif match_ip.search(c):
pass
elif match_netmask.search(c):
pass
elif match_gateway.search(c):
pass
elif match_dns1.search(c):
pass
elif match_dns2.search(c):
pass
else:
list.append(c)
a.close()
list.append("BOOTPROTO=static\n")
list.append("IPADDR=%s\n"%ip)
list.append("NETMASK=%s\n"%netmask)
list.append("GATEWAY=%s\n"%gateway)
list.append("DNS1=%s\n"%dns1)
list.append("DNS2=%s\n"%dns2)
a=open(path+NetInterface,'w')
for i in list:
a.write(i)
a.close()
def restart_ip():
if check_netfile.isfile()==True:
update_onboot()
update_ip()
os.system('/etc/init.d/network restart >>/dev/null')
else:
print "No found network files"
def get_ip():
s=socket(AF_INET,SOCK_DGRAM)
s.connect(('baidu.com',0))
return s.getsockname()
| [
"[email protected]"
] | |
9a8814ec3bdddcd2c38ad5fdae72c9647dea261a | 90fb8ce0e374248214b4aa676dd0e6c2c8c21fc6 | /kanga/cdaudio/cd.py | 86b8cf5494d32998339240305fe13f19c9ac672e | [
"Apache-2.0"
] | permissive | dacut/kanga-cdaudio | 7a4dc4ecd576770652a6dce624f15d682ae6321f | cb93c4d9a8104eac44ac41f2c4a9192dfe57eae6 | refs/heads/master | 2020-06-01T05:05:45.302932 | 2019-06-24T03:32:55 | 2019-06-24T03:32:55 | 190,648,123 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,682 | py | """
Constants in the CD audio world.
"""
from base64 import b64encode
from enum import auto, Enum, IntFlag
from hashlib import sha1
from typing import NamedTuple, Tuple
SECONDS_PER_MINUTE = 60
FRAMES_PER_SECOND = 75
FRAMES_PER_MINUTE = FRAMES_PER_SECOND * SECONDS_PER_MINUTE
BYTES_PER_FRAME = 2048 # Bytes per frame without error correction headers
BYTES_PER_FRAME_RAW = 2352 # Bytes per frame with error correction headers
GAP_FRAMES = 150 # Standard leadin gap size
TRACK_MAX = 99
INDEX_MAX = 99
LEADOUT_TRACK = 0xAA # Leadout track identifier
class TrackType(Enum):
"""
The type of track on a CD (audio, data, or the leadout track).
"""
audio = auto()
data = auto()
leadout = auto()
class TrackFlags(IntFlag):
"""
Flags applied to a track.
"""
# pylint: disable=C0326
QUAD_CHANNEL = 0b1000 # Audio tracks only
DATA_TRACK = 0b0100
COPY_PERMITTED = 0b0010
PREEMPHASIS = 0b0001 # Audio tracks -- preemphasis applied
INCREMENTAL = 0b0001 # Data tracks -- data recorded incrementally
class TrackInformation(NamedTuple):
"""
Information about a track.
"""
track: int # LEADOUT_TRACK (0xAA) if this is the leadout
track_type: TrackType
flags: TrackFlags
start_frame: int
class DiscInformation(NamedTuple):
"""
Information about the tracks on a disc.
"""
first_track: int
last_track: int
track_information: Tuple[TrackInformation, ...]
@property
def musicbrainz_id(self) -> str:
"""
The MusicBrainz disc ID.
"""
leadout = self.track_information[-1]
assert leadout.track_type == TrackType.leadout
hasher = sha1(
f"{self.first_track:02X}{self.last_track:02X}"
f"{leadout.start_frame + GAP_FRAMES:08X}"
.encode("ascii"))
n_audio_tracks = 0
for track in self.track_information:
if track.track_type == TrackType.audio:
hasher.update(
f"{track.start_frame + GAP_FRAMES:08X}".encode("ascii"))
n_audio_tracks += 1
# We always encode 99 track offsets; the remainder are 0.
for _ in range(n_audio_tracks, 99):
hasher.update(b"00000000")
return (
b64encode(hasher.digest(), altchars=b"._").replace(b"=", b"-")
.decode("ascii"))
class MSF(NamedTuple):
"""
Position on a disc specified in minutes, seconds, and frames.
"""
minute: int
second: int
frame: int
@property
def lba(self) -> int:
"""
Returns this MSF position to a logical block address (LBA) -- i.e.
pure frame count.
"""
return (self.minute * FRAMES_PER_MINUTE +
self.second * FRAMES_PER_SECOND +
self.frame)
@property
def is_valid(self):
"""
Indicates whether this position is valid: all fields are non-negative,
frame < 75, and second < 60.
"""
# pylint: disable=C0122
return (0 <= self.minute and
0 <= self.second < SECONDS_PER_MINUTE and
0 <= self.frame < FRAMES_PER_SECOND)
@staticmethod
def from_lba(frame: int) -> "MSF":
"""
Convert a logical block address (in frames) to MSF.
"""
minute, frame = divmod(frame, FRAMES_PER_MINUTE)
second, frame = divmod(frame, FRAMES_PER_SECOND)
return MSF(minute=minute, second=second, frame=frame)
class TrackIndex:
"""
Position on a disc specified in track and index.
"""
__slots__ = ("_track", "_index")
def __init__(self, track: int, index: int) -> None:
super(TrackIndex, self).__init__()
if not isinstance(track, int):
raise TypeError("track must be an int")
if not 0 <= track <= TRACK_MAX:
raise ValueError(
f"track must be between 0 and {TRACK_MAX}, inclusive: {track}")
if not isinstance(index, int):
raise TypeError("index must be an int")
if not 0 <= index <= INDEX_MAX:
raise ValueError(
f"index must be between 0 and {INDEX_MAX}, inclusive: {index}")
self._track = track
self._index = index
@property
def track(self) -> int:
"""
The track on the disc.
"""
return self._track
@property
def index(self) -> int:
"""
The index within the track.
"""
return self._index
def __repr__(self) -> str:
return f"TrackIndex(track={self.track}, index={self.index})"
| [
"[email protected]"
] | |
13f8adf1c5b6a70c92addcf66ba9d5242f299965 | d8e88d6ee4bd75bbfbd16f9a5ee2cba1f85c524a | /report/migrations/0001_initial.py | 150639e72f7975b45efcee2f37ea50e50b41cac6 | [] | no_license | grahamgilbert/imagr_server | 5c813f662086811d63026afa2d54e84d374fc309 | 7865954167aa0e007f415d8ffc520963b316e659 | refs/heads/master | 2021-06-27T03:28:57.393777 | 2017-02-09T00:56:14 | 2017-02-09T00:56:14 | 37,206,092 | 18 | 3 | null | 2017-02-09T00:56:15 | 2015-06-10T15:38:03 | JavaScript | UTF-8 | Python | false | false | 566 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Computer',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('serial_number', models.CharField(max_length=200)),
('date_added', models.DateTimeField(auto_now_add=True)),
],
),
]
| [
"[email protected]"
] | |
3e90bac4074c33e939a8c0524831f3abee050c39 | 2853afc099ad4e56e0b1a3304a74d5401eb6c268 | /diffopt/tests/test_logreg.py | 52fb8daca7d49d7abc66339158057da775ecd9be | [] | no_license | tomMoral/diffopt | 6f0211129da66ced6f0245e45efd3b0a093b217c | e32f4584cecbb88683d0ad4e3a747df0a9c6287c | refs/heads/master | 2022-04-05T11:30:09.275398 | 2020-02-07T01:50:00 | 2020-02-07T01:50:00 | 238,401,092 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,239 | py | import pytest
import autograd.numpy as np
from diffopt.logreg import LogReg
from autograd import grad, jacobian
def logreg_loss(x, D, z, lbda):
res = - x * np.dot(D, z)
return np.mean(np.log1p(np.exp(res))) + .5 * lbda * np.sum(z ** 2)
def gradient_descent(x, D, lbda, step, n_iter):
n, p = D.shape
z = np.zeros(p)
for i in range(n_iter):
grad_z = np.dot(D.T, - x / (1. + np.exp(x * np.dot(D, z)))) / n
grad_z += lbda * z
z -= step * grad_z
return z
def gradient_descent_loss(x, D, lbda, step, n_iter):
z = gradient_descent(x, D, lbda, step, n_iter)
return logreg_loss(x, D, z, lbda)
def d2(x, D, z, lbda):
n, p = D.shape
u = np.dot(D, z)
res = x * u
f_res = np.exp(res) / (1 + np.exp(res)) ** 2
dzz = np.dot(D.T, (x ** 2 * f_res)[:, None] * D) / n
dzz += lbda * np.eye(p)
dxz = D * (u * x * f_res - 1 / (1. + np.exp(res)))[:, None] / n
return dzz, dxz
def grad_analytic(x, D, lbda, step, n_iter):
n, p = D.shape
z = gradient_descent(x, D, lbda, step, n_iter)
return -np.dot(D, z) / (1. + np.exp(x * np.dot(D, z))) / n
def grad_implicit(x, D, lbda, step, n_iter):
n, p = D.shape
z = gradient_descent(x, D, lbda, step, n_iter)
dzz, dxz = d2(x, D, z, lbda)
dx = -np.dot(D, z) / (1. + np.exp(x * np.dot(D, z))) / n
dz = np.dot(D.T, - x / (1. + np.exp(x * np.dot(D, z)))) / n
dz += lbda * z
return dx - np.dot(dxz, np.linalg.solve(dzz, dz))
grad_autodiff = grad(gradient_descent_loss)
@pytest.mark.parametrize('n_iter', [1, 10, 100, 1000])
def test_logreg_np(n_iter):
n, p = 10, 30
reg = 1.3
rng = np.random.RandomState(0)
D = rng.randn(n, p)
x = rng.randn(1, n)
# Compute true minimizer
logreg = LogReg(n_layers=n_iter)
z_star, _ = logreg.transform(x, D, reg)
step = 1 / (np.linalg.norm(D, ord=2) ** 2 / 4 / n + reg)
print(np.linalg.norm(D, ord=2))
z_np = gradient_descent(x.reshape(-1), D, reg, step, n_iter)
assert np.allclose(z_np[None], z_star)
loss = logreg.score(x, D, reg)
loss_np = logreg_loss(x, D, z_np, reg)
assert np.isclose(loss, loss_np)
def test_gradient_definition():
n_iter = 1000
n, p = 10, 30
reg = 1.3
rng = np.random.RandomState(0)
D = rng.randn(n, p)
x = rng.randn(1, n)
step = 1 / (np.linalg.norm(D, ord=2) ** 2 / 4 / n + reg)
g1 = grad_analytic(x.reshape(-1), D, reg, step, n_iter)
g2 = grad_autodiff(x.reshape(-1), D, reg, step, n_iter)
g3 = grad_implicit(x.reshape(-1), D, reg, step, n_iter)
assert np.allclose(g2, g1)
assert np.allclose(g2, g3)
@pytest.mark.parametrize('n_iter', [1, 10, 100, 1000])
@pytest.mark.parametrize('grad, f_grad', [('analytic', grad_analytic),
('implicit', grad_implicit),
('autodiff', grad_autodiff)])
def test_gradient(n_iter, grad, f_grad):
n, p = 10, 30
reg = 1.3
rng = np.random.RandomState(0)
D = rng.randn(n, p)
x = rng.randn(1, n)
step = 1 / (np.linalg.norm(D, ord=2) ** 2 / 4 / n + reg)
g_np = f_grad(x.reshape(-1), D, reg, step, n_iter)
# Compute gradient with default parameters
logreg_ana = LogReg(n_layers=n_iter, gradient_computation=grad)
g_star = logreg_ana.get_grad_x(x, D, reg)
assert np.allclose(g_np[None], g_star)
# Compute gradient changing the parameter
with pytest.raises(NotImplementedError):
g_star = logreg_ana.get_grad_x(x, D, reg, computation='fake')
g_star = logreg_ana.get_grad_x(x, D, reg, computation=grad)
assert np.allclose(g_np[None], g_star)
@pytest.mark.parametrize('n_iter', [1, 10, 100, 1000])
def test_jacobian(n_iter):
n, p = 10, 30
reg = 1.3
rng = np.random.RandomState(0)
D = rng.randn(n, p)
x = rng.randn(1, n)
# Compute true minimizer
logreg_ana = LogReg(n_layers=n_iter, gradient_computation='autodiff')
z_star, J_star, _ = logreg_ana.transform_with_jacobian(x, D, reg)
step = 1 / (np.linalg.norm(D, ord=2) ** 2 / 4 / n + reg)
auto_jacobian = jacobian(gradient_descent)
J_np = auto_jacobian(x.reshape(-1), D, reg, step, n_iter)
assert np.allclose(J_np[None], J_star)
| [
"[email protected]"
] | |
b91185af076db82111f12b7f041c859ee9ad7af4 | be9820090b95012d95f90deaa9f8007934dedc37 | /scripts/longevity_analysis/dailyScansPlot.py | d530106a9fad1d4f13ca4cdf9629b735e4ad0b69 | [] | no_license | apingault/webdcs-GIF | c97d4c0d6206976dddf0fce200798138b8d889d5 | b4980bd72c9486ffc802a614013ca879edb01327 | refs/heads/master | 2020-04-02T17:36:59.124251 | 2018-09-15T10:11:53 | 2018-09-15T10:11:53 | 154,664,598 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,789 | py | import sys,os,glob
import ROOT
import MySQLdb
import shutil
from optparse import OptionParser
from subprocess import call, check_output
from array import array
import ROOT
ROOT.gROOT.SetBatch()
ROOT.gStyle.SetOptStat(0)
ROOT.gStyle.SetOptTitle(0)
# load the GIFPP library
execfile("GIFppLib.py")
parser = OptionParser()
parser.add_option("", "--chamber", dest='chamber', type='string', help="Chamber name")
(opts,args) = parser.parse_args()
if opts.chamber is None: parser.error('Please provide chamber name')
# Definition of the CMS RE scan modes
# WP > 8500, STBY < 8500, OFF < 2000
scan_modes = ["DG_WP", "SG_BOT_WP", "SG_TOP_WP", "SG_TN_WP", "SG_TW_WP", "DG_STBY", "SG_BOT_STBY", "SG_TN_STBY", "SG_TW_STBY"]
scan_labels = ["Double gap - working point", "Single gap BOT - working point", "Single gap TN+TW - working point", "Single gap TN - working point", "Single gap TW - working point", "Double gap - standby", "Single gap BOT - standby", "Single gap TN - standby", "Single gap TW - standby"]
'''
longevity_daily
'''
# select all runs
HVbound1 = 8500
HVbound2 = 2000
# Dict holding arrays of all the data
curr = {}
rate = {}
xTime = []
xQint = []
for mode in scan_modes:
curr[mode] = []
rate[mode] = []
# Select all run IDs for daily scan
db = MySQLdb.connect(host='localhost', user='root', passwd='UserlabGIF++', db='webdcs', cursorclass=MySQLdb.cursors.DictCursor)
cu = db.cursor()
runids = []
cu.execute("SELECT id FROM hvscan WHERE label = 'longevity_daily' AND id > 1935 AND id")
for l in cu.fetchall():
if 1947 == int(l['id']): continue
runids.append(int(l['id']))
if __name__ == "__main__":
for id in runids:
print "Analyze run %d" % id
scan = GIFppLib() # load the scan object
scan.loadScan(id) # set the scan ID
pointFound = False
#xQint.append()
# loop over all the HV points in the scan
for i in scan.getHVPoints():
print " - HVPoint %s" % i
HV_BOT = scan.getHV(opts.chamber, "BOT", i)
HV_TN = scan.getHV(opts.chamber, "TN", i)
HV_TW = scan.getHV(opts.chamber, "TW", i)
scan_mode = ""
if HV_BOT > HVbound2 and HV_TN > HVbound2 and HV_TW > HVbound2: scan_mode = "DG"
if HV_BOT > HVbound2 and HV_TN < HVbound2 and HV_TW < HVbound2: scan_mode = "SG_BOT"
if HV_BOT < HVbound2 and HV_TN > HVbound2 and HV_TW > HVbound2: scan_mode = "SG_TOP"
if HV_BOT < HVbound2 and HV_TN > HVbound2 and HV_TW < HVbound2: scan_mode = "SG_TN"
if HV_BOT < HVbound2 and HV_TN < HVbound2 and HV_TW > HVbound2: scan_mode = "SG_TW"
if scan_mode == "": continue
if max([HV_BOT, HV_TN, HV_TW]) > HVbound1: scan_mode += "_WP"
else: scan_mode += "_STBY"
if not (scan_mode == "DG_WP" or scan_mode == "DG_STBY"): continue
print scan_mode
# Get currents
I_BOT = scan.getADC(opts.chamber, "BOT", i)*11694.25
I_TN = scan.getADC(opts.chamber, "TN", i)*6432.00
I_TW = scan.getADC(opts.chamber, "TW", i)*4582.82
I_TOT = I_BOT + I_TN + I_TW
# Get rates
R_A = scan.getRate(opts.chamber, "A", i)
R_B = scan.getRate(opts.chamber, "B", i)
R_C = scan.getRate(opts.chamber, "C", i)
R_TOT = scan.getRate(opts.chamber, "TOT", i)
# Calculate charge deposition
#area = ch['area'] # chamber area in cm2
#charge_dep = 1e6 * I_TOT / ( R_TOT)
#print charge_dep
#charge_dep_err = 0
# Fill data
curr[scan_mode].append(I_TOT)
rate[scan_mode].append(R_TOT)
pointFound = True
if pointFound:
xTime.append(scan.time_start)
# make the plots
print len(rate["DG_STBY"])
print len(xTime)
#sys.exit()
c = ROOT.TCanvas("c", "c", 600, 600)
c.SetTopMargin(0.06)
c.SetRightMargin(.05)
c.SetBottomMargin(1)
c.SetLeftMargin(0.12)
for mode in ["DG_WP", "DG_STBY"]:
for param in ["rate", "curr"]:
for t in ["time", "qint"]:
if t == "qint":
continue
xLabel = "Integrated charge [mC/cm#{2}]"
x = None
else:
xLabel = "Date"
x = xTime
if param == "rate":
yLabel = "Rate [Hz/cm]"
y = rate[mode]
else:
yLabel = "Current [#muA]"
y = curr[mode]
g = ROOT.TGraph(len(x), array('d', x), array('d', y))
g.GetXaxis().SetTitleSize(.04);
g.GetXaxis().SetTitle(xLabel)
if t == "time":
g.GetXaxis().SetTimeDisplay(1);
g.GetXaxis().SetNdivisions(-505);
g.GetXaxis().SetTimeFormat("%d/%m %F 1970-01-01 00:00:00");
g.GetYaxis().SetTitleOffset(1.3)
g.GetYaxis().SetTitleSize(.04)
g.GetYaxis().SetTitle(yLabel)
g.SetMarkerStyle(21)
g.SetMarkerSize(.8)
g.SetLineWidth(2)
g.SetMarkerStyle(21)
g.SetMarkerSize(.8)
g.SetLineWidth(2)
g.SetLineColor(ROOT.kRed)
g.SetMarkerColor(ROOT.kRed)
miny = .95*ROOT.TMath.MinElement(g.GetN(), g.GetY())
maxy = 1.15*ROOT.TMath.MaxElement(g.GetN(), g.GetY())
g.GetYaxis().SetRangeUser(miny, maxy)
g.SetMinimum(miny)
g.SetMaximum(maxy)
g.Draw("ALP")
# topText LEFT
leftText = ROOT.TLatex()
leftText.SetNDC()
leftText.SetTextFont(43)
leftText.SetTextSize(20)
leftText.SetTextAlign(11)
leftText.DrawLatex(.12, .95, scan_labels[scan_modes.index(mode)])
# topText RIGHT
right = ROOT.TLatex()
right.SetNDC()
right.SetTextFont(43)
right.SetTextSize(20)
right.SetTextAlign(31)
right.DrawLatex(.95, .95, "")
# CMS flag
text1 = ROOT.TLatex()
text1.SetTextFont(42);
text1.SetNDC();
text1.DrawLatex(c.GetLeftMargin()+ 0.02, 1-c.GetTopMargin()- 0.05, "#bf{CMS},#scale[0.75]{ #it{Work in progress}}");
c.SaveAs("/var/operation/STABILITY/SUMMARY/%s/Daily_Scan/%s_%s_%s.pdf" % (opts.chamber, param, mode, t))
c.SaveAs("/var/operation/STABILITY/SUMMARY/%s/Daily_Scan/%s_%s_%s.png" % (opts.chamber, param, mode, t))
c.Clear()
| [
"[email protected]"
] | |
fcdd4b11c39b95c8a0ee94ecc2ef64ebe2ac8717 | e9eed586eb25a8805411a0c1069f79fb70be957d | /Home/migrations/0021_auto_20190423_2056.py | 10ea2db9049f62e62370fb769a5a163e8687c091 | [
"MIT"
] | permissive | jay1999ke/PureQPA | 61d250f85889867502a46f87385d825b764bab0c | c5ba6d7998d5fb1544b81bc076dbd19c3017fa9e | refs/heads/master | 2020-04-24T18:05:00.321716 | 2019-06-21T17:39:51 | 2019-06-21T17:39:51 | 172,169,063 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 372 | py | # Generated by Django 2.1.5 on 2019-04-23 15:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Home', '0020_scoreexam_outof'),
]
operations = [
migrations.AlterField(
model_name='scoreexam',
name='outof',
field=models.IntegerField(),
),
]
| [
"[email protected]"
] | |
9e28733749a2689a4eb44106562c0bee9d47cee7 | d721258b53f0f44b1010cb8e8efac8e2a5c96c26 | /player/migrations/0009_move_activity_log.py | a3ede81b0cbaf3374740ff87efa489c26a11c33c | [
"LicenseRef-scancode-proprietary-license",
"MIT"
] | permissive | kdechant/eamon | a6662285f51a6cad5797bb9be92ca709ae36921c | 080a43aa80c3a1605c402e68616545a8e9c7975c | refs/heads/master | 2023-05-24T08:20:18.551604 | 2022-08-14T10:27:01 | 2023-04-08T07:31:45 | 49,559,304 | 28 | 7 | MIT | 2023-03-14T21:09:55 | 2016-01-13T08:07:28 | TypeScript | UTF-8 | Python | false | false | 665 | py | # Generated by Django 3.2.12 on 2022-05-15 08:36
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('player', '0008_activitylog'),
]
operations = [
migrations.RunSQL('SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0'),
migrations.RunSQL(
'REPLACE INTO eamon.player_activitylog '
'(id, `type`, value, created, adventure_id, player_id) '
' SELECT id, `type`, value, created, adventure_id, player_id '
' FROM adventure_activitylog'),
migrations.RunSQL('SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS')
]
| [
"[email protected]"
] | |
5187813ddac759a286058ca54516eb8487319722 | dd07c7d498e7f8193383b9e30b983d792d2f6708 | /src/data/make_dataset_3.py | 6fdd4c9462fa9e59349f2951953de14c0fe8b313 | [] | no_license | saurabh-kataria/9-jhu | 5b5d3ea94287db332dde1e45f44e4a164fb15a5b | fb00c5635a3662fd0f1ababfb31f7bfceeed5b2a | refs/heads/master | 2020-03-18T10:41:37.997529 | 2018-04-11T17:56:16 | 2018-04-11T17:56:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,295 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 25 10:50:15 2018
@author: peter
"""
from torch.utils.data import DataLoader
import os
from src.data.data_functions import Data_synthesis_1, save_audio_
from src.features.features_functions import makedir
from src.datasets.make_dataset_1 import save_preproccesing_parameters
def make_dataset_3(output_dir):
"""
Dataset 1 consists of 6*5 hours of synthetic noisy speech. About the same length
as the noise, however it has been sampled with It has been sampled with
replacemnt. The Clean files are from switchboard and the noise is anotated
noise only segments from the lre17_dev set. Each sample is 5 seconds long.
SNR
10 % is reserved for the validation set
"""
# output_dir a = Path('.') / 'data' / 'processed' / 'dataset_1'
train_len = int(6.6 * .9 * 3600 / 5 * 5) # synthesise 5 times the train noise
test_len = int(6.6 * .1 * 3600 / 5 * 5) # synthesise 5 times the test noise
train_set = Data_synthesis_1(length=train_len, speech_list='lre_train')
training_data_loader = DataLoader(train_set, batch_size=1, num_workers=2)
t_path_str_x = os.path.join(output_dir, 'train', 'x', 'sample_{}.wav')
t_path_str_y = os.path.join(output_dir, 'train', 'y', 'sample_{}.wav')
validation_set = Data_synthesis_1(length=test_len, test=True, speech_list='lre_train')
validation_data_loader = DataLoader(validation_set, batch_size=1, num_workers=2)
v_path_str_x = os.path.join(output_dir, 'val', 'x', 'sample_{}.wav')
v_path_str_y = os.path.join(output_dir, 'val', 'y', 'sample_{}.wav')
list_ = ((t_path_str_x, t_path_str_y, training_data_loader),
(v_path_str_x, v_path_str_y, validation_data_loader)
)
for path_str_x, path_str_y, data_loader in list_:
makedir(os.path.dirname(path_str_x))
makedir(os.path.dirname(path_str_y))
for i, (x, y) in enumerate(data_loader):
x, y = x.numpy()[0], y.numpy()[0]
save_audio_(x, path_str_x.format(i))
save_audio_(y, path_str_y.format(i))
if __name__ == '__main__':
dataset_dir = os.path.join(*['data', 'processed', 'dataset_3'])
make_dataset_3(dataset_dir)
save_preproccesing_parameters(dataset_dir)
| [
"[email protected]"
] | |
070e718d595c6d3fb558b4037c8a23039926c5b7 | 7118408358390c53d13e2993a75cac4b8e745675 | /unittests/heuristic_tests.py | 5b94bc240d0e504d45cc2da5d3d32e6a25deb110 | [] | no_license | wsgan001/repeated_pattern_discovery | aefcab272968c29ac461d767729fb3514d2efdc3 | 53e813743780bfdc9ca80ea7c2512b6df9fdf246 | refs/heads/master | 2020-03-31T04:19:20.364370 | 2018-03-27T08:21:40 | 2018-03-27T09:20:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 954 | py | import unittest
from tec import TEC
from vector import Vector
from dataset import Dataset
import heuristics
class HeuristicTest(unittest.TestCase):
def test_bounding_box(self):
dataset = Dataset('unittest_data/heuristics_test.csv')
dataset = Dataset.sort_ascending(dataset)
tec = TEC([Vector([1, 3]), Vector([3, 5]), Vector([4, 1]), Vector([5, 3])], [0, 3, 4, 6], [Vector([0, 0])])
self.assertEqual(heuristics.bounding_box_compactness(tec, dataset), 4/9)
def test_pattern_width(self):
tec = TEC([Vector([1, 3, 4]), Vector([1, 1, 5]), Vector([5, 1, 2])], [0, 1, 2], [Vector([0, 0, 0])])
self.assertEqual(heuristics.pattern_width(tec), 4)
def test_pattern_volume(self):
tec = TEC([Vector([2, -1, 0]), Vector([-1, 2, -1]), Vector([0, 1, 2])], [0, 1, 2], [Vector([0, 0, 0])])
self.assertEqual(heuristics.pattern_volume(tec), 27)
if __name__ == '__main__':
unittest.main() | [
"[email protected]"
] | |
afb5a14d99e8e12b35967c08435773a376ea4b94 | bb771867f01da0df9750dd0e06ac12c491326c57 | /server.py | 9a99e8ad64cc25eb3ea991a73a3dfae440aa51cb | [] | no_license | prasannakarki77/portfo | c373623ea6b62eff22e986e94d8e86d1defab1bb | 8c5d2c8de93066f46e4c76f75874e7002b198a38 | refs/heads/master | 2023-01-12T04:56:56.782759 | 2020-11-18T08:11:50 | 2020-11-18T08:11:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,206 | py | from flask import Flask, render_template, url_for, request, redirect
import csv
app = Flask(__name__)
@app.route('/')
def my_home():
return render_template('index.html')
@app.route('/<string:page_name>')
def html_page(page_name):
return render_template(page_name)
def write_to_file(data):
with open('database.txt', mode='a') as database:
email= data["email"]
subject= data["message"]
message = data["message"]
file = database.write(f'\n{email},{subject},{message}')
def write_to_csv(data):
with open('database.csv',newline='', mode='a') as database2:
email= data["email"]
subject= data["message"]
message = data["message"]
csv_writer = csv.writer(database2, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
csv_writer.writerow([email,subject,message])
@app.route('/submit_form', methods=['POST', 'GET'])
def submit_form():
if request.method=='POST':
try:
data = request.form.to_dict()
write_to_csv(data)
return redirect('/thank_you.html')
except:
return 'did not save database'
else:
return 'something went wrong. Try again' | [
"[email protected]"
] | |
160692ada22f13ed67894780e73e61c7aff4f97c | de59ece5d773d8607ba7afe747088ff07062494c | /py-core/None/none.check-variable-is-none.py | b552c37b1721f7e771989f267bdcddc3d8d68aba | [] | no_license | loggar/py | 4094c6919b040dfc0bb5453dc752145b5f3b46ba | 1116969fa6de00bbc30fe8dcf6445aa46190e506 | refs/heads/master | 2023-08-21T16:47:41.721298 | 2023-08-14T16:12:27 | 2023-08-14T16:12:27 | 114,955,782 | 0 | 0 | null | 2023-07-20T15:11:04 | 2017-12-21T03:01:54 | Python | UTF-8 | Python | false | false | 544 | py | null_variable = None
not_null_variable = 'Hello There!'
# The is keyword
if null_variable is None:
print('null_variable is None')
else:
print('null_variable is not None')
if not_null_variable is None:
print('not_null_variable is None')
else:
print('not_null_variable is not None')
# The == operator
if null_variable == None:
print('null_variable is None')
else:
print('null_variable is not None')
if not_null_variable == None:
print('not_null_variable is None')
else:
print('not_null_variable is not None')
| [
"[email protected]"
] | |
21a3f7ff6f84770d3750f3c4df26b2baba6fd5a2 | e8ebcccc0d810d0da1fe6dbec855a13e75f47f60 | /plone/app/toolbar/testing.py | 936139369f23a5de2c4d71fd89a49304b0ca54b1 | [] | no_license | izak/plone.app.toolbar | 3254be14c0ac769d2f0890e00821698b5d1db9db | 1cbe514a9216bbbe6f6f42f7701523a9ee14d1f7 | refs/heads/master | 2021-01-16T21:31:08.976374 | 2012-02-28T12:53:41 | 2012-02-28T12:53:41 | 3,371,208 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,770 | py | from plone.app.testing import PLONE_FIXTURE
from plone.app.testing import PloneSandboxLayer
from plone.app.testing import TEST_USER_NAME
from plone.app.testing import TEST_USER_PASSWORD
from plone.app.testing import applyProfile
from plone.app.testing.layers import FunctionalTesting
from plone.app.testing.layers import IntegrationTesting
from Products.CMFCore.utils import getToolByName
from zope.configuration import xmlconfig
class Toolbar(PloneSandboxLayer):
defaultBases = (PLONE_FIXTURE,)
def setUpZope(self, app, configurationContext):
# load ZCML
import plone.app.toolbar
xmlconfig.file('configure.zcml', plone.app.toolbar, context=configurationContext)
def setUpPloneSite(self, portal):
# install into the Plone site
applyProfile(portal, 'plone.app.toolbar:default')
workflowTool = getToolByName(portal, 'portal_workflow')
workflowTool.setDefaultChain('plone_workflow')
TOOLBAR_FIXTURE = Toolbar()
TOOLBAR_INTEGRATION_TESTING = IntegrationTesting(bases=(TOOLBAR_FIXTURE,), name="TOOLBAR:Integration")
TOOLBAR_FUNCTIONAL_TESTING = FunctionalTesting(bases=(TOOLBAR_FIXTURE,), name="TOOLBAR:Functional")
def browser_login(portal, browser, username=None, password=None):
handleErrors = browser.handleErrors
try:
browser.handleErrors = False
browser.open(portal.absolute_url() + '/login_form')
if username is None:
username = TEST_USER_NAME
if password is None:
password = TEST_USER_PASSWORD
browser.getControl(name='__ac_name').value = username
browser.getControl(name='__ac_password').value = password
browser.getControl(name='submit').click()
finally:
browser.handleErrors = handleErrors
| [
"[email protected]"
] | |
93835ef44bb55e7086e23d364a0db3f238cb6244 | 80ab0c5d43c4d65562c7d7918b477418d49c38de | /tutorial/tutorial_04.py | e421383039911da2c256a06f6be39bbdbf2ad413 | [] | no_license | peterpeter5/shallot | 7cb30c723417031bff8bb2ec4b0ab1dd3d5dd788 | bd6be27ad73e648f148c4aa222306e8be4eef303 | refs/heads/master | 2021-08-17T03:08:56.523537 | 2020-04-18T10:23:54 | 2020-04-18T10:23:54 | 162,873,412 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,241 | py |
import time
import asyncio
from shallot import websocket, standard_not_found, build_server
from shallot.response import ws_send
from shallot.middlewares import wrap_routes, apply_middleware
@websocket
async def fan_in(request, receiver):
async for message in receiver:
# do something usefull. For example print the data
print(message)
@websocket
async def fan_out(request, receiver):
while True:
yield(ws_send(f"current-time-stamp {time.time()}"))
await asyncio.sleep(1)
@websocket
async def one_to_one(request, receiver):
async for message in receiver:
if message == "hello":
yield ws_send("hello beautiful")
elif message == "exit":
yield ws_send("byebye")
break
elif message == "i like you":
yield ws_send("That is very nice! I like you too!")
else:
yield ws_send("pardon me. I do not have a reply to this")
routes = [
("/fan-in", ["WS"], fan_in),
("/fan-out", ["WS"], fan_out),
("/chatbot", ["WS"], one_to_one),
]
app = build_server(apply_middleware(
wrap_routes(routes)
)(standard_not_found))
if __name__ == "__main__":
import uvicorn
uvicorn.run(app) | [
"dev.peterpeter5@gmailcom"
] | dev.peterpeter5@gmailcom |
1560783a20acc84a7d8df5bb1bed611ed7028089 | e6d6a0a1998cc2cca6ae9ed6643bd6704ecd245e | /apps/orgs/migrations/0005_orginfo_is_famous.py | b7c90e23e23d3f1e35d1292b3383ca76118047a9 | [] | no_license | a371057600/Guliedu-1 | 9341b5393360e9b9df67c0522d1e54c4aa21d474 | 22ebdd7dfb53d1e326ee2d586960bc0309633fb0 | refs/heads/master | 2020-06-11T23:25:55.664582 | 2018-11-25T00:39:25 | 2018-11-25T00:39:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 478 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-08-28 20:21
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('orgs', '0004_auto_20180826_1017'),
]
operations = [
migrations.AddField(
model_name='orginfo',
name='is_famous',
field=models.BooleanField(default=0, verbose_name='是否经典'),
),
]
| [
"[email protected]"
] | |
97f12a6f607074ec0cddd991b1b388544bee6cb3 | ca773907880a5a15351c1b9fe7a9fbff9891631a | /netrunner/connections/__init__.py | 9b4efc06a2cfa796622826bdd30039a6fc6c078a | [] | no_license | rbraddev/netrunner | 88c32ade798ec126679b2d16497a816d73be84cb | 964c145b940a7a80487616641c84b23af8d8884e | refs/heads/main | 2023-08-11T08:52:40.164771 | 2021-09-24T09:00:03 | 2021-09-24T09:00:03 | 401,962,224 | 0 | 0 | null | 2021-09-21T17:27:16 | 2021-09-01T06:55:32 | Python | UTF-8 | Python | false | false | 56 | py | from netrunner.connections.ssh import SSH # noqa: F401
| [
"[email protected]"
] | |
8c6b6ae33e61b724026b1fb0892ab0df127fccba | 3784495ba55d26e22302a803861c4ba197fd82c7 | /venv/lib/python3.6/site-packages/torchx/version.py | 2075926507396aafc624de6753f05dc7d0a3619b | [
"MIT"
] | permissive | databill86/HyperFoods | cf7c31f5a6eb5c0d0ddb250fd045ca68eb5e0789 | 9267937c8c70fd84017c0f153c241d2686a356dd | refs/heads/master | 2021-01-06T17:08:48.736498 | 2020-02-11T05:02:18 | 2020-02-11T05:02:18 | 241,407,659 | 3 | 0 | MIT | 2020-02-18T16:15:48 | 2020-02-18T16:15:47 | null | UTF-8 | Python | false | false | 48 | py | __version__ = '0.4.1'
debug = False
cuda = None
| [
"[email protected]"
] | |
93e23d4818b5f7cf902ae367d5601fefa6793f76 | d629301e2c41fc5d1c7f1a6ca4688117bbd93117 | /RoBO/build/lib.linux-x86_64-2.7/robo/task/ml/var_size_data_freeze_convnet_cifar_2para.py | 8aee0a32cc5ffe8e4fc2d1026ee3e5b2f0356565 | [
"BSD-3-Clause"
] | permissive | mrenoon/datafreezethaw | db1dd377e9bf8c6fe22c442128e6d1eda1a3bb13 | c2ce2e78bd98236618c99fe3453fc24389d48ead | refs/heads/master | 2021-01-19T01:13:18.384209 | 2017-04-05T04:32:26 | 2017-04-05T04:32:26 | 87,232,729 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,513 | py | import numpy as np
import time
import theano
import theano.tensor as T
import lasagne
import os, subprocess
from lasagne.regularization import regularize_layer_params, l2, l1
from robo.task.base_task import BaseTask
ROOT_ML_DIR = "/data/ml/"
EXAMPLES_LINK = "/data/ml/RoBo/examples/"
class VarSizeDataConvNetCifar(BaseTask):
# def __init__(self, train, train_targets,
# valid, valid_targets,
# test, test_targets,
# n_classes, num_epochs=500,
# save=False, file_name=None):
def __init__(self, save=False, file_name=None, num_epochs=500, train_range=5):
# the last dimension of input is always the data-size
# currently, its the [7]
self.num_epochs = num_epochs
# self.save = save
self.file_name = file_name
self.base_name = "VarSizeFreezeConvNetCifar"
self.is_old = False
# self.filename_to_epochs = dict()
# self.train_range = train_range
# 1 Dim Learning Rate:
# 2 Dim L2 regularization: 0 to 1
# 3 Dim Batch size: 20 to 2000
# 4 Dim Dropout rate: 0 to 0.75
# 5 Dim L1 regularization: 0.1 to 20
# 6 Dim Epochs Number: 1 to 100
# X_lower = np.array([np.log(1e-6), 0.0, 20, 0, 0.1, 1])
X_lower = np.array([0.00001, 0.00001, 10])
self.params = X_lower
#X_lower = np.array([np.log(1e-6), 0.0, 1000, 0, 0.1, 1])
#X_upper = np.array([np.log(1e-1), 1.0, 2000, 0.75, 20, 100])
#X_upper = np.array([np.log(1e-1), 1.0, 2000, 0.75, 20, 10])
# X_upper = np.array([np.log(1e-1), 1.0, 2000, 0.75, 20, 7])
X_upper = np.array([0.01, 0.02, 500])
#X_upper = np.array([np.log(1e-1), 1.0, 2000, 0.75, 20, 3])
super(VarSizeDataConvNetCifar, self).__init__(X_lower, X_upper)
def set_weights(self, old_file_name): # FREEZE **********************************TODO
#actually dont need to do anything since the config is already in filename.cfg
#while the data is at filename.data
pass
# file_name = old_file_name + '.npz'
# with np.load(file_name) as f:
# param_values = [f['arr_%d' % i] for i in range(len(f.files))]
# lasagne.layers.set_all_param_values(self.network, param_values)
def set_epochs(self, n_epochs): # FREEZE **********************************TODO
self.num_epochs = n_epochs
def set_save_modus(self, is_old=True, file_old=None, file_new=None): # FREEZE **********************************TODO
self.is_old = is_old
self.save = True
if self.is_old:
self.file_name = file_old
else:
self.file_name = file_new
def objective_function(self, x):
print 'in objective_function x: ', x
#well now if its even an old model, we don't need to care about the past data
#and just learn from fresh.
# print("filename = " + self.file_name)
# dir_path = os.path.dirname(os.path.realpath(__file__))
# return [ np.random.rand() ]
dir_path = ROOT_ML_DIR + "RoBO/examples/"
os.chdir(ROOT_ML_DIR + "/cuda-convnet2-modified")
dataPath = ROOT_ML_DIR + "/data/vu-cifar-10"
save_file = os.path.join(dir_path,self.file_name + ".data")
layersCfg = ROOT_ML_DIR + "/Spearmint-EI/examples/convnetcifar/layers-80sec.cfg"
layersParams = os.path.join(dir_path, self.file_name + ".cfg")
# testFreq = self.num_epochs * 5
# testFreq = str(self.train_range)
layersParamsTemplatePath = ROOT_ML_DIR + "RoBO/examples/dataFreeze/layer-params-template_2para.cfg"
#write the layersParams file
# if os.path.exists(save_file):
# subprocess.call("rm -rf " + save_file, shell=True)
# subprocess.call("rm " + layersParams, shell=True)
template = open(layersParamsTemplatePath,"r").read()
epsW = x[0][0]
epsB = x[0][1]
open(layersParams,"w").write(template % ( epsW, epsB, epsW, epsB, epsW, epsB, epsW, epsB, epsW, epsB ))
dataSize = x[0][2]
num_epochs = int( self.num_epochs * 500 / dataSize )
dataSize = int(dataSize+0.00000000001)
dataSize = min(dataSize, 500)
testRange = str( max(500 + dataSize/5,502) )
testRange = "600"
testFreq = str(4 * dataSize) #Lets just make it 3 * dataSize for now
if not self.is_old:
#its a new model, we need to write the layersParams file
if os.path.exists(save_file):
temp = subprocess.check_output("rm -rf " + save_file, shell=True)
# temp = subprocess.check_output("rm " + layersParams, shell=True)
# self.filename_to_epochs[self.file_name] = self.num_epochs
command = "python convnet.py --data-provider cifar --test-range 501-" + testRange + " --train-range 1-" + str(dataSize) + " --data-path " + dataPath + " --inner-size 24 --save-file " + save_file + " --gpu 0 --layer-def " + layersCfg + " --layer-params " + layersParams + " --epochs " + str(num_epochs) + " --test-freq " + testFreq
# command = "python convnet.py --data-provider cifar --test-range 6 --train-range 1-" + str(self.train_range) + " --data-path " + dataPath + " --inner-size 24 --save-file " + save_file + " --gpu 0 --layer-def " + layersCfg + " --layer-params " + layersParams + " --epochs " + str(self.num_epochs) + " --test-freq " + testFreq
else:
# self.filename_to_epochs[self.file_name] += self.num_epochs
# if the model is already run, we need to load file
command = "python convnet.py --data-provider cifar --test-range 501-" + testRange + " --train-range 1-" + str(dataSize) + " --data-path " + dataPath + " --inner-size 24 --save-file " + save_file + " --gpu 0 --layer-def " + layersCfg + " --layer-params " + layersParams + " --epochs " + str(num_epochs) + " --test-freq " + testFreq + " --load-file " + save_file
# command = "python convnet.py --data-provider cifar --test-range 501-" + testRange + " --train-range 1-" + str(dataSize) + " --data-path " + dataPath + " --inner-size 24 --save-file " + save_file + " --gpu 0 --layer-def " + layersCfg + " --layer-params " + layersParams + " --epochs " + str(num_epochs) + " --test-freq " + testFreq
output = subprocess.check_output(command, shell=True)
# print("+++++ Command = _" + command)
# print("+++++ ml.convnet_cifar_freeze, In objective function, output from command: ")
# print(output)
open(layersParams+".log","a").write(command+"\n"+output+"\n\n\n\n")
losses_strings = output.split("STOPPING TRAINING")[1]
# def get_val_loss(s):
# AveragesId = s.find("Averages-")
# output = s[:AveragesId]
# resultString = output.split(" ")[3][:-1]
# return float( resultString )
stoppingStringId = losses_strings.find("logprob")
val_loss = losses_strings[stoppingStringId:].split(", ")[1]
print "::::: ml.convnet_cifar_freeze, In objective function, after training, filtered, got this val_loss: ", val_loss
return [float(val_loss),]
def objective_function_test(self, x):
self.objective_function(x)
return self.test_error | [
"[email protected]"
] | |
147dcdf93727142f1333c7fa452dc503f0d433c8 | 0f1a60e09c8db4b2914a7b6aca26576b9b991b97 | /Урок 3. Практическое задание/urls.py | 2827c0824c7e4ce28163b5798578c736fe8b1af4 | [] | no_license | daniilro/python_patterns | 798cccf87e49c9fb47ec8fbae2b0e59bc36067b8 | 89c3410be3607309fb1b3007a19ef71c5b9487d5 | refs/heads/master | 2023-04-26T04:26:17.459561 | 2021-05-19T14:10:25 | 2021-05-19T14:10:25 | 360,465,187 | 0 | 0 | null | 2021-04-24T09:55:07 | 2021-04-22T09:33:47 | HTML | UTF-8 | Python | false | false | 597 | py | '''
'''
import time
from datetime import date
# Front controllers
#############################################################
def fc_base(request):
print("fc_base")
request['timestamp'] = time.time()
request['data'] = date.today()
#############################################################
def fc_debug(request):
print("fc_debug")
if True:
request['debug'] = True
request['key'] = 'key'
#############################################################
fc_list = [fc_base,
fc_debug]
#############################################################
| [
"[email protected]"
] | |
139fa8c391640ecabacc71828360a09224f658ba | 3ae0a536d08e871c5b67fafac3c068c15d53a792 | /app/__init__.py | 7ba5faa28dbde8964c239cf31c1aca7cc96d5765 | [] | no_license | bethconna/Library | 2034f359d8f9ec5fe70568b0cd7035c966baa3ae | ebea0f713404d54306a202fc4f1a0cd9a1212d93 | refs/heads/master | 2022-04-11T07:31:13.584455 | 2020-03-19T19:40:52 | 2020-03-19T19:40:52 | 248,577,763 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,011 | py | # -*- coding:utf-8 -*-
from flask import Flask
from app.models.base import db
from flask_login import LoginManager
from flask_mail import Mail
login_manager = LoginManager()
mail = Mail()
def create_app():
# app:flask全局只有一个
# flask静态文件夹:默认static_folder = 'statics'
app = Flask(__name__)
# 导入配置文件
# from_object导入的配置文件要求:大写字母
app.config.from_object('app.secure')
app.config.from_object('app.setting')
# 注册蓝图
register_blueprint(app)
# LoginManager初始化
login_manager.init_app(app)
login_manager.login_view = 'web.login'
login_manager.login_message = '请登录或注册'
# 注册Mail
mail.init_app(app)
# 数据库初始化
db.init_app(app)
# 手动将app推入栈中:current.app
with app.app_context():
db.create_all()
return app
def register_blueprint(app):
from app.web.blueprint import web
app.register_blueprint(web)
| [
"[email protected]"
] | |
a32fbb9bc02d3dac7f26f8e022e0b96da9fc8686 | 837fb6ca890d109371bc60558327c007af1991f5 | /mainEmtelco.py | 3c6bc1721f608ed600dd279b06e9f31ccc9157ef | [] | no_license | JhomarDaza/Automatizaci-n | b73c6f0b80eda710bcf8d54710662cceb8144ae8 | 235e7ac26cd9891adeb5bb1f4a4a6799004dc42b | refs/heads/master | 2022-12-10T13:13:48.061158 | 2020-09-05T19:49:43 | 2020-09-05T19:49:43 | 279,809,929 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,376 | py | import time
from selenium import webdriver
from selenium.webdriver.support.ui import Select
driver = webdriver.Chrome('chromedriver.exe')
driver.get('http://automationpractice.com/index.php?controller=authentication')
time.sleep(5)
user_box = driver.find_element_by_id('email')
pass_box = driver.find_element_by_id('passwd')
sing_button = driver.find_element_by_id('SubmitLogin')
user_box.send_keys('[email protected]')
pass_box.send_keys('pruebas1234')
sing_button.click()
address_button = driver.find_element_by_xpath('/html/body/div/div[2]/div/div[3]/div/div/div[1]/ul/li[3]/a')
address_button.click()
time.sleep(5)
new_address_button = driver.find_element_by_xpath('//*[@id="center_column"]/div[2]/a')
new_address_button.click()
address_box = driver.find_element_by_id('address1')
city_box = driver.find_element_by_id('city')
state_Dropdawn = Select(driver.find_element_by_name('id_state'))
zipcode_box = driver.find_element_by_id('postcode')
phone_box = driver.find_element_by_id('phone')
title_address_box = driver.find_element_by_id('alias')
save_button =driver.find_element_by_css_selector('#submitAddress')
address_box.send_keys('Cra 123 # 45')
city_box.send_keys('prueba')
state_Dropdawn.select_by_value("3")
zipcode_box.send_keys('00010')
phone_box.send_keys('3333333')
title_address_box.send_keys('dir_prueba1')
save_button.click()
time.sleep(5)
driver.quit() | [
"[email protected]"
] | |
7988181c24bb1148b29d1dd823e47e340401062f | 5ddf2118455c8285bbfc657f3a2cabdf224beaf4 | /modules/pose_estimator/head_position.py | 322d9cc799efb10163e6baa700ca7ba677d323c3 | [] | no_license | Vadbeg/human_pose_estimation | 004b7ac49514561cc1615c94e44f8b8bb6547c41 | 64dbef23ab9e671f9ab039e14d86956a4d6e07a0 | refs/heads/master | 2023-04-27T12:22:26.941687 | 2021-05-13T23:38:23 | 2021-05-13T23:38:23 | 354,088,045 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,052 | py | """Module for checking if head position is good"""
import math
from typing import Tuple, List
import numpy as np
from modules.models.utils import get_annotated_facial_landmarks
class HeadPositionChecker:
def __init__(self, edge_value: float = 15.0):
self.edge_value = edge_value
self.max_value = 90
@staticmethod
def __get_nose_landmarks(facial_landmarks: np.ndarray) -> List[Tuple[int, int]]:
annotated_facial_landmarks = get_annotated_facial_landmarks(
landmarks=facial_landmarks
)
nose_landmarks = annotated_facial_landmarks['nose']
return nose_landmarks
def is_head_position_good(self, facial_landmarks):
nose_landmarks = self.__get_nose_landmarks(
facial_landmarks=facial_landmarks
)
top_nose_point, bottom_nose_point = self.__get_nose_line_points(
nose_landmarks=nose_landmarks
)
angle = self.__get_angle_between_vertical_and_nose(
top_nose_point=top_nose_point,
bottom_nose_point=bottom_nose_point
)
if np.abs(self.max_value - angle) > self.edge_value:
return False
return True
@staticmethod
def __get_nose_line_points(
nose_landmarks: List[Tuple[int, int]]
) -> Tuple[Tuple[int, int], Tuple[int, int]]:
top_nose_point = nose_landmarks[0]
all_bottom_nose_points = nose_landmarks[3:9]
bottom_nose_point = np.sum(np.array(all_bottom_nose_points), axis=0) / len(all_bottom_nose_points)
bottom_nose_point = np.uint16(bottom_nose_point)
return top_nose_point, bottom_nose_point
@staticmethod
def __get_angle_between_vertical_and_nose(
top_nose_point: Tuple[int, int],
bottom_nose_point: Tuple[int, int]
) -> float:
tg_angle = np.abs(top_nose_point[1] - bottom_nose_point[1]) / \
np.abs(top_nose_point[0] - bottom_nose_point[0])
angle = (math.atan(tg_angle) / np.pi) * 180
return angle
| [
"[email protected]"
] | |
26c2c8eb47aacbf4b90c8f01845dee35cd36f251 | 66b38de752573365d25ff637cdd7f69eb6afbc90 | /text_normalizer/normalizer.py | e6d019a53e22860757b7abf3380c55bd4602ef9a | [] | no_license | mailtonfcarvalho/Thesaurus | 211d3113bea0c245e5ecb93e6c096631bea5c6dc | e5c2d40cbf3fdae3a202cad9e6fe9f7cc07700f6 | refs/heads/master | 2020-06-16T04:57:27.636854 | 2019-07-06T02:45:34 | 2019-07-06T02:45:34 | 195,484,899 | 0 | 0 | null | 2019-10-31T05:15:30 | 2019-07-06T01:33:38 | Python | UTF-8 | Python | false | false | 2,089 | py | import nltk
import re
import string
from unidecode import unidecode
from utils import paths
class Normalizer(object):
@staticmethod
def remove_html(text):
return unidecode(re.sub(re.compile("<.*?>"), '', text))
@staticmethod
def has_digit(sentence):
return any(char.isdigit() for char in sentence)
def remove_characteres(self,text):
text_no_links = self.remove_html(text)
links = re.findall(r"https?://[\w:/.'\"_%#-]+", text_no_links)
for link in links:
text_no_links = text_no_links.replace(link, '')
chars_to_remove = nltk.word_tokenize(string.punctuation)
regex = '[' + re.escape(' '.join(chars_to_remove)) + ']'
clean_text = re.sub(regex, ' ',text_no_links)
clean_text = ' '.join(
[
word for word in clean_text.split()
if not self.has_digit(word)
]
)
clean_text = clean_text.lower()
return clean_text
@staticmethod
def remove_stopwords(text):
STOP_WORDS = open(paths.STOPWORDS_FILE_PATH, 'r').read().split()
text_tokenize = nltk.word_tokenize(text)
no_stopwords = ' '.join(
[
word.replace('\"', '')
for word in text_tokenize
if word not in STOP_WORDS
]
)
return no_stopwords
@staticmethod
def retrieve_nouns_and_verbs(text):
tokens = nltk.word_tokenize(text)
words = (
[
n for n, t in nltk.pos_tag(tokens)
if t in ('NN', 'VB')
]
)
return words
def normalize(self, text):
delete_chars = self.remove_characteres(text)
delete_stopwords = self.remove_stopwords(delete_chars)
nouns_verbs = self.retrieve_nouns_and_verbs(delete_stopwords)
return nouns_verbs
if __name__ == '__main__':
w = Normalizer()
data = open(paths.DOCUMENTS_FILE_PATH, 'r').read()
words = w.normalize(data)
words_list = list(set(words))
print(words_list) | [
"[email protected]"
] | |
fcfa912fbbde293329c2a0f645dbbb9eb913faff | 33d46ee082d496af1775c1124d0a5b61eeac6e43 | /OldCode/Extra Files/helloworld.py | 9579a7d359c45a1e021336c5ae4b47c225bac008 | [] | no_license | SophiaMVaughn/Treeo | 7355ab5eeb004bb13a4b79000374b9fad3dade3f | f619e2f673d13fb7852fee5874a0b7e3f68aba6f | refs/heads/master | 2023-04-18T06:46:57.339396 | 2021-04-21T05:18:53 | 2021-04-21T05:18:53 | 264,577,585 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,833 | py | from flask import Flask
from flask import Flask, flash, redirect, render_template, request, session, abort
from password_strength import PasswordPolicy
import password_strength
app = Flask(__name__)
@app.route("/")
def index():
option = []
option.append("Provider1")
option.append("Provider2")
option.append("Provider3")
option.append("Provider4")
return render_template("testForm.html", options=option)
@app.route('/approve', methods=['POST','GET'])
def approve():
return "help"
@app.route('/approve/<username>', methods=['POST','GET'])
def approveForm(username):
#mySQL_userDB.verifyProvider(username, cursor, cnx)
return render_template("approveTest.html",
providername = username)
# def usernamencheck():
# #text = request.args.get('jsdata')
# policy = PasswordPolicy.from_names(
# length=8, # min length: 8
# uppercase=2, # need min. 2 uppercase letters
# numbers=2 # need min. 2 digits
# )
# ##PASSWORD STRENGTH
# #isEnough = policy.test("abcAAAaa")
# if len(isEnough):
# #print(type(isEnough[0]))
# if len(isEnough)==1:
# if type(isEnough[0])==password_strength.tests.Length:
# return "<8 characters"
# elif type(isEnough[0])==password_strength.tests.Uppercase:
# return "<2 capital letters"
# elif type(isEnough[0])==password_strength.tests.Numbers:
# return "<2 digits"
# elif len(isEnough)==2: #any 2 combinationsS
# if type(isEnough[0])==password_strength.tests.Length:
# if type(isEnough[1])==password_strength.tests.Uppercase:
# return "<8 characters\n<2 capital letters"
# elif type(isEnough[1])==password_strength.tests.Numbers:
# return "<8 characters\n<2 digits"
# elif type(isEnough[0])==password_strength.tests.Uppercase:
# if type(isEnough[1])==password_strength.tests.Numbers:
# return "<2 capital letters\n<2 digits"
# elif type(isEnough[1])==password_strength.tests.Length:
# return "<2 capital letters\n<8 characters"
# elif type(isEnough[0])==password_strength.tests.Numbers:
# if type(isEnough[1])==password_strength.tests.Uppercase:
# return "<2 digits\n<2 capital letters"
# elif type(isEnough[1])==password_strength.tests.Length:
# return "<2 digits\n<8 characters"
# else: #all 3
# return "<8 characters\n<2 capital letters\n<2 digits"
# #CHANGE THE FORMAT of the message
#[x]/[x]/[x]?
#usernamecheck()
if __name__ == "__main__":
app.run(host='0.0.0.0', port=4000)
| [
"[email protected]"
] | |
5433f326ab81dd4476ccb946c13c972d8677d656 | 930e9a18693fc56efa4f32c634290afed5034c3f | /build-time-graph.py | ce92d0f6407bfad6b9e6bc69494f261884092163 | [] | no_license | doloopwhile/aggregate-jenkins-job | aa3a4b7d42a89182aead12b6a35f9cec2b39f3a5 | 5827862ee096b4f15f291b8a282938fc939c129d | refs/heads/master | 2016-09-09T17:49:06.450128 | 2014-06-05T10:29:20 | 2014-06-05T10:29:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,778 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from argparse import ArgumentParser
from pathlib import Path
from urllib.parse import urljoin
from urllib.request import urlopen
import urllib.request
from operator import itemgetter
import pyjq as jq
import math
import webbrowser
def main():
parser = ArgumentParser()
parser.add_argument('host', action='store')
parser.add_argument('job', action='store')
parser.add_argument('--port', action='store', default=80)
args = parser.parse_args()
def api_url(url):
return urljoin(url, 'api/json')
job_api_url = api_url('http://{0.host}:{0.port}/job/{0.job}/'.format(args))
# builds = []
# for build_url in jq.all('.builds[].url', url=job_api_url):
# build = jq.one('{duration: .duration, number: .number, result: .result}', url=api_url(build_url))
# builds.append(build)
builds = [{'number': 415, 'duration': 1817465, 'result': 'SUCCESS'}
,{'number': 416, 'duration': 1490033, 'result': 'SUCCESS'}
,{'number': 419, 'duration': 1803128, 'result': 'SUCCESS'}
,{'number': 421, 'duration': 1753199, 'result': 'SUCCESS'}
,{'number': 426, 'duration': 1686575, 'result': 'SUCCESS'}
,{'number': 427, 'duration': 2449128, 'result': 'SUCCESS'}
,{'number': 428, 'duration': 1752961, 'result': 'SUCCESS'}
,{'number': 429, 'duration': 1424184, 'result': 'SUCCESS'}
,{'number': 430, 'duration': 1540526, 'result': 'SUCCESS'}
,{'number': 431, 'duration': 1776849, 'result': 'SUCCESS'}
,{'number': 432, 'duration': 1380645, 'result': 'SUCCESS'}
,{'number': 433, 'duration': 2087693, 'result': 'SUCCESS'}
,{'number': 435, 'duration': 1629043, 'result': 'SUCCESS'}]
builds = [build for build in builds if build['result'] == 'SUCCESS']
builds.sort(key=itemgetter('number'))
import string
import json
graph_data = [['build', 'duration']] + [[str(b['number']), (b['duration'] // 1000 / 60)] for b in builds]
from pprint import pprint
pprint(graph_data)
pprint([
['x', 'Blanket 2'],
['A', 0.5],
['B', 1],
['C', 0.5],
['D', 1],
['E', 0.5],
['F', 1],
['G', 0.5],
['H', 1],
['I', 0.5],
['J', 1],
['K', 0.5],
['L', 1],
['M', 0.5],
['N', 1]
])
html = string.Template('''\
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8"/>
<title>
Google Visualization API Sample
</title>
<script type="text/javascript" src="http://www.google.com/jsapi"></script>
<script type="text/javascript">
google.load('visualization', '1', {packages: ['corechart']});
</script>
<script type="text/javascript">
function drawVisualization() {
// Create and populate the data table.
var data = google.visualization.arrayToDataTable($json_graph_data);
// Create and draw the visualization.
new google.visualization.LineChart(document.getElementById('visualization')).
draw(data, {curveType: "none",
width: 800, height: 400,
vAxis: {maxValue: 10}}
);
}
google.setOnLoadCallback(drawVisualization);
</script>
</head>
<body style="font-family: Arial;border: 0 none;">
<div id="visualization" style="width: 500px; height: 400px;"></div>
</body>
</html>''').substitute(json_graph_data=json.dumps(graph_data))
import tempfile
t = tempfile.NamedTemporaryFile(delete=False)
t.write(html.encode('ascii'))
t.close()
from pathlib import Path
webbrowser.open(Path(t.name).as_uri())
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
388fcbd63eb525dcea423f765b12cc6b3cc9a56f | 4c57cd567f665e6350c8e49996e1ac51a9d059b0 | /caffe_in/apps/cafe/models.py | 6dfd47fd4b0e5551d1b009bcec264c1a78552729 | [] | no_license | dayatz/caffe_in_project | ba568c4c3354f637db1388581ed06baede83d916 | c884e8ba8fd9382b00c54ea698dbc0c2dbcaa2c9 | refs/heads/master | 2021-01-13T16:31:09.775539 | 2016-09-25T22:56:33 | 2016-09-25T22:56:33 | 69,189,378 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,477 | py | from django.db import models
from versatileimagefield.fields import VersatileImageField
class NameDescriptionMixin(models.Model):
name = models.CharField(max_length=50)
description = models.TextField()
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
class Cafe(NameDescriptionMixin):
photo = VersatileImageField(upload_to='cafe/')
# Contact
phone = models.CharField(max_length=12)
fb = models.CharField(max_length=30, null=True, blank=True)
tw = models.CharField(max_length=30, null=True, blank=True)
# Location
address = models.TextField()
lng = models.CharField(max_length=20)
lat = models.CharField(max_length=20)
class Meta:
verbose_name = "Cafe"
verbose_name_plural = "Cafes"
def __str__(self):
return self.name
class Menu(NameDescriptionMixin):
cafe = models.ForeignKey(Cafe, related_name='menus')
photo = VersatileImageField(upload_to='menu/')
price = models.FloatField()
class Meta:
verbose_name = "Menu"
verbose_name_plural = "Menus"
def __str__(self):
return "%s: %s" % (self.cafe.name, self.name)
class Gallery(models.Model):
cafe = models.ForeignKey(Cafe)
caption = models.CharField(max_length=50, null=True, blank=True)
photo = VersatileImageField(upload_to='cafe/')
def __str__(self):
return self.cafe.name
| [
"[email protected]"
] | |
3bf45def4e1931283ba586eda4d1f670f7a94bca | 81ffa8fd63ff894a2f8e65692c974ef5b7c2c259 | /nn/data/Easy_Image_Annotation_Tool--master/Easy_Image_Annotation_Tool--master/Manual_Image_Annotation.py | f138cdc191597ceaccc9fb3dda4e65dadc764840 | [] | no_license | n0lean/enet_deploy | 73ef8ebffd6276eea66bbd3f89eb9f6dc5e52836 | 093f3e3c0471cbc0070b3fb063b4b0135c7a151f | refs/heads/master | 2021-06-23T12:37:57.348175 | 2017-08-16T19:44:38 | 2017-08-16T19:44:38 | 100,507,587 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,517 | py | # Free Hand Drawing for image Annotation
# Author :- Harish Pullagurla ( [email protected] )
# Last Updated :- 19th March 2017
# How to use :-
# 1. put images in the images folder
# 2. draw pattern on image
# 3. press Esc
# 4. Enter the image label value
# 5. Press 1 to continue annotating the same image
# Handling errors
# 6. Press 0 in the image lable value if you feel you did some mistake in selecting during free hand drawing as '0' is the base class
# 7. Try to draw closed loops during free hand drawing , else fulling dosent happen
import cv2
import numpy as np
import os
'''
drawing=False # true if mouse is pressed
mode=True # if True, draw rectangle. Press 'm' to toggle to curve
pt = []
file_locations = []
# mouse callback function
def freehand_draw(event,former_x,former_y,flags,param):
global current_former_x,current_former_y,drawing, mode
if event==cv2.EVENT_LBUTTONDOWN:
drawing=True
current_former_x,current_former_y=former_x,former_y
pt.append([former_x,former_y])
elif event==cv2.EVENT_MOUSEMOVE:
if drawing==True:
if mode==True:
cv2.line(im,(current_former_x,current_former_y),(former_x,former_y),(255,255,255),2)
cv2.line(im2, (current_former_x, current_former_y), (former_x, former_y), 255, 2)
current_former_x = former_x
current_former_y = former_y
pt.append([former_x, former_y])
#print former_x,former_y
elif event==cv2.EVENT_LBUTTONUP:
drawing=False
if mode==True:
cv2.line(im,(current_former_x,current_former_y),(former_x,former_y),(255,255,255),2)
cv2.line(im2,(current_former_x,current_former_y),(former_x,former_y),255,2)
current_former_x = former_x
current_former_y = former_y
pt.append([former_x, former_y])
return former_x,former_y
# Main Program starts here
directory = os.getcwd()
directory_1 = directory + '\\Images\\'
for filename in os.listdir(directory_1):
if filename.endswith(".jpg") or filename.endswith(".png"):
file_locations.append(os.path.join(directory_1, filename))
continue
else:
continue
for h in range(len(file_locations)):
filename = file_locations[h]
im_base = cv2.imread(filename)
#im = cv2.resize(im_base,(400,300),fx = 1,fy =1)
im = im_base
im_annotated = 100*np.ones((np.size(im,0),np.size(im,1)),dtype='uint8')
response = '1'
while(response == '1'):
#print response
im2 = np.zeros((np.size(im,0),np.size(im,1)),dtype='uint8')
cv2.namedWindow("colour image")
cv2.setMouseCallback('colour image',freehand_draw)
while(1):
cv2.imshow('colour image',im)
k=cv2.waitKey(1)&0xFF
if k==27:
break
kernel = np.ones((5,5),np.uint8)
closing = cv2.morphologyEx(im2, cv2.MORPH_CLOSE, kernel)
# Copy the thresholded image.
im_floodfill = closing.copy()
# Mask used to flood filling.
# Notice the size needs to be 2 pixels than the image.
h, w = im2.shape[:2]
mask = np.zeros((h + 2, w + 2), np.uint8)
# Floodfill from point (0, 0)
cv2.floodFill(im_floodfill, mask, (0, 0), 255)
# Invert floodfilled image
im_floodfill_inv = cv2.bitwise_not(im_floodfill)
# Combine the two images to get the foreground.
im_out = closing | im_floodfill_inv
cv2.imshow("Foreground", im_out)
cv2.waitKey(10)
category = raw_input("enter the label number")
if category == 500:
print 'invalied_catogree'
continue
img_rows, img_cols = im_out.shape
for i in range(img_rows):
for j in range(img_cols):
if im_out[i,j] == 255 :
im_annotated[i,j] = category
#cv2.imshow("final label map", im_annotated)
k=cv2.waitKey(1)&0xFF
if k==97:
break
response = raw_input('press 1 to continue')
cv2.destroyAllWindows()
image_name = filename.split(directory_1)
image_name_1 = image_name[1].split('.')
filename_annotated_image = directory +'\\Annotated\\'+image_name_1[0]+'_annotated_image.png'
finished_labeling = directory +'\\Finished_Labeling\\'+ str(image_name[1])
os.rename(filename,finished_labeling)
cv2.imwrite(filename_annotated_image,im_annotated)
cv2.destroyAllWindows()
''' | [
"[email protected]"
] | |
fc65c7f5983670a239b99ce19baf7bf53c1ed218 | 37fc0a4886070ca56375393c05b6368060309273 | /curves.py | b79fc0e11d7afca717bb795f477191331aa91b8c | [
"Apache-2.0"
] | permissive | Green-Resilience/Orchestration_HollyFerguson | 86e8f56f8ae1d75c4d047f6f859146e9e164de48 | 238ab738a1a05e9bf716768490804f6d407715a1 | refs/heads/master | 2021-01-01T18:06:01.084692 | 2017-07-25T01:29:56 | 2017-07-25T01:29:56 | 98,250,052 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,125 | py | # -------------------------------------------------------------------------------
# Name: curves.py
# Purpose: Use USGS data to create and query hazard curves
#
# Author: Holly Tina Ferguson [email protected]
#
# Created: 07/06/2017
# Copyright: (c) Holly Tina Ferguson 2017
# Licence: The University of Notre Dame
# Acknowledgement: S. Nagrecha 2017
# -------------------------------------------------------------------------------
# #!/usr/bin/python
import os
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import UnivariateSpline
def ImputeZeros(_x, _y):
"""Returns modified in-place versions _x & _y where the value of zero is slightly shifted by DELTA"""
_x = list(_x) #because tuples are special creatures...
_y = list(_y)
# Do not worry about overflow errors: (RuntimeWarning: overflow encountered in power)...
# Numbers will still compute and print...see output, this same example running is something like [-9.62392027] for that one model
DELTA = 2**(-256)
for i in range(len(_x)):
if _x[i]==0:
_x[i] += DELTA
if _y[i]==0:
_y[i] += DELTA
return tuple(_x), tuple(_y) #re-cast the modified lists as tuples befoire returning
# import multipolyfit as mpf
def InferSpline(x,y,cityname,modelname,savefigures,degree=3,GRANULARITY=500):
x_lin = np.linspace(min(x),max(x),GRANULARITY)
# make sure you don't have any zeroes around, or else you'll get an -Inf.
# I don't know what that does to splines, all I know is that it can't be good
#print "X = ", x
#print "Y = ", y
#print "log(X) = ", np.log(x)
#print "log(Y) = ", np.log(y)
x_clean,y_clean = ImputeZeros(x,y)
spl = UnivariateSpline(np.log(x_clean),np.log(y_clean),k=degree)
y_lin = np.exp(spl(np.log(x_lin)))
if savefigures:
plt.plot(x, y, 'kx')
plt.plot(x_lin, y_lin, 'b-')
plt.title(cityname + "\n" + modelname)
plt.xscale("log")
plt.yscale("log")
plt.savefig(os.path.join("figures",cityname + modelname + ".png"),dpi=500)
return spl
class Curves():
# Input parameters
def querycurves(self,citydatanesteddict,savefigs):
"""
Builds an interpolated spline for each model for each city
citydatanesteddict: looks like this {city_name: {model: (X,Y) }}
savefigs: Boolean. Saves figures into a common directory for now if 'True'
returns: {city_name: {model: spline}}
In a future version, something more advanced / modular than splines can be swapped out and vars can be renamed
"""
model_splines = {}
for _city in citydatanesteddict:
model_splines[_city] = {}
for _model in citydatanesteddict[_city]:
hazardcurve_coarse = citydatanesteddict[_city][_model]
hazard_x, hazard_y = zip(*hazardcurve_coarse)
hazard_spl = InferSpline(hazard_x,hazard_y,cityname=_city, modelname=_model, savefigures=savefigs)
model_splines[_city][_model] = hazard_spl
return model_splines
| [
"[email protected]"
] | |
272435e3dc1c043b9bf0dafded30b3c2c4e0a782 | bbb633cf5a156714fd072c116c802b0a45745513 | /BasantBookFestival/BookFest/migrations/0002_book_link.py | 43ed2b004e6e0136ea97a81917dbcc7fe941bdc9 | [] | no_license | darmis007/Basant-Book-Festival-Backend | e41613885abcb385d80eb08057b13272b6658ef9 | f3aca9e79d876f00d2fb312d9535922469fcd5df | refs/heads/main | 2023-03-09T16:52:11.315240 | 2021-02-25T15:46:20 | 2021-02-25T15:46:20 | 329,369,536 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 377 | py | # Generated by Django 3.0.7 on 2021-01-27 14:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('BookFest', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='book',
name='link',
field=models.URLField(blank=True, null=True),
),
]
| [
"[email protected]"
] | |
8d26e14f8102dc796688c29472bb87e5230ae97d | a259e74299ddf20bb821a667f9656e8d25389651 | /parser/lc_quad_linked.py | d9c61ba9a8646f52f6021311b60aa80da01c84c6 | [] | no_license | karthi2016/query_generation | 1dad6a5eff462768df46e0d130ee8067fe35617f | 6290bdef582bb425d982b0dab2379484c23db724 | refs/heads/master | 2021-07-07T00:46:21.144462 | 2017-10-04T11:42:26 | 2017-10-04T11:42:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,317 | py | import json
import re
from common.qapair import QApair
from common.uri import Uri
from kb.dbpedia import DBpedia
from answerparser import AnswerParser
class LC_Qaud_Linked:
def __init__(self, path="./data/LC-QUAD/linked.json"):
self.raw_data = []
self.qapairs = []
self.path = path
self.parser = LC_Qaud_LinkedParser()
def load(self):
with open(self.path) as data_file:
self.raw_data = json.load(data_file)
def parse(self):
for raw_row in self.raw_data:
self.qapairs.append(
QApair(raw_row["question"], raw_row.get("answers"), raw_row["sparql_query"], raw_row, raw_row["id"],
self.parser))
def print_pairs(self, n=-1):
for item in self.qapairs[0:n]:
print item
print ""
class LC_Qaud_LinkedParser(AnswerParser):
def __init__(self):
super(LC_Qaud_LinkedParser, self).__init__(DBpedia())
def parse_question(self, raw_question):
return raw_question
def parse_answerset(self, raw_answers):
return self.parse_queryresult(raw_answers)
def parse_sparql(self, raw_query):
uris = [Uri(raw_uri, self.kb.parse_uri) for raw_uri in re.findall('(<[^>]*>|\?[^ ]*)', raw_query)]
return raw_query, True, uris
| [
"[email protected]"
] | |
1ede98ce26d5624c114d96d33e1a04c1d3265fe8 | 969b2895158993c593596881e1957463111f95e1 | /Mxnet/CNN/DenseNet/DenseBlock.py | efb41004bbb32263db035fe8ed2e1250a15ed7b1 | [] | no_license | JYLFamily/Python_Study_Note | 4f3fda1a4374df48db3aeeac2c27b8ef28673795 | eb6d5a7f359e24659054b61a382668b3ef3e9234 | refs/heads/master | 2021-01-02T09:46:15.687725 | 2018-04-28T11:21:35 | 2018-04-28T11:21:35 | 99,296,027 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 774 | py | # coding:utf-8
from mxnet import nd
from mxnet.gluon import nn
def conv_block(channels):
out = nn.Sequential()
out.add(
nn.BatchNorm(),
nn.Activation("relu"),
nn.Conv2D(channels, kernel_size=3, padding=1)
)
return out
class DenseBlock(nn.Block):
# layers 这个 DenseBlock 中包含 layer 个 conv_block
# 每个 layers 的 output_channels 是 growth_rate
def __init__(self, layers, growth_rate, **kwargs):
super(DenseBlock, self).__init__(**kwargs)
self.net = nn.Sequential()
for i in range(layers):
self.net.add(conv_block(growth_rate))
def forward(self, x):
for layer in self.net:
out = layer(x)
x = nd.concat(x, out, dim=1)
return x | [
"[email protected]"
] | |
7e2c40d30cf8b10bd6dd710e7ee833f008999675 | 44132a86288fb076e72b1ee9c1ddd711bb1af272 | /DavinciStripping/MCSelDetachedN.py | 58e564f349c4be492ec26b5091aa26ee3e6ebd67 | [] | no_license | mboubdir/lhcb_analysis | 40f00ac3e734513ec57a9a9f198a25c8b788c0d6 | fbac1dbcf13d5a800327aa6b12847e7687dbe5f4 | refs/heads/master | 2020-04-17T18:41:05.883872 | 2019-01-21T17:36:50 | 2019-01-21T17:36:50 | 166,836,983 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,922 | py | # LHCb standard definitions
# -*- coding: utf-8 -*-
## from os import environ
## import math
from Gaudi.Configuration import *
from GaudiKernel.SystemOfUnits import MeV, GeV, mm
from GaudiConfUtils import ConfigurableGenerators
from Configurables import FilterDesktop, CombineParticles, TupleToolDecayTreeFitter, TupleToolDecay, OfflineVertexFitter
from PhysSelPython.Wrappers import Selection, SelectionSequence, DataOnDemand, AutomaticData
from Configurables import DecayTreeTuple, BTaggingTool, SubstitutePID, TrackScaleState, CheckPV, CondDB
from Configurables import TupleToolPid, TupleToolTrackInfo, TupleToolKinematic, TupleToolPropertime, TupleToolPrimaries, TupleToolEventInfo, TupleToolGeometry, TupleToolRecoStats, TupleToolTrackPosition, TupleToolMCBackgroundInfo , TupleToolMCTruth, MCTupleToolKinematic, MCTupleToolHierarchy , MCTupleToolEventType , MCTupleToolInteractions , TupleToolGeneration , MCTupleToolReconstructed, TupleToolTrigger, TupleToolTISTOS, TupleToolANNPID
from Configurables import DaVinci, HltSelReportsDecoder, HltVertexReportsDecoder, HltDecReportsDecoder, LoKi__Hybrid__TupleTool, TupleToolJets
from DecayTreeTuple.Configuration import *
from Configurables import LoKi__Hybrid__PlotTool as PlotTool
from Configurables import LoKi__VertexFitter as VertexFitter
from Configurables import AddRelatedInfo, RelInfoConeVariables, RelInfoTrackIsolationBDT, RelInfoVertexIsolationBDT, RelInfoVertexIsolation
from PhysSelPython.Wrappers import AutomaticData, Selection, SelectionSequence
from StrippingConf.Configuration import StrippingConf, StrippingStream
from StrippingSettings.Utils import strippingConfiguration
from StrippingArchive.Utils import buildStreams
from StrippingArchive import strippingArchive
#---------------------------------------------------------------
#Use Stripping21r0p1 on MC for Run I
#---------------------------------------------------------------
from StrippingArchive.Stripping21r0p1.StrippingRD.StrippingB2Lambda0MuLines import B2Lambda0MuLines
from StrippingSettings.Stripping21r0p1.LineConfigDictionaries_RD import B2Lambda0Mu
#from StrippingArchive.Stripping23.StrippingRD.StrippingB2Lambda0MuLines import B2Lambda0MuLines
#from StrippingSettings.Stripping23r1.LineConfigDictionaries_RD import B2Lambda0Mu
B2Lambda0MuConf = B2Lambda0MuLines('B2Lambda0MuLines', B2Lambda0Mu['CONFIG'])
B2Lambda0MuLines = B2Lambda0MuConf.lines()
sc = StrippingConf( HDRLocation = "DecReports" )
sstream = StrippingStream("TestStream")
sstream.appendLines( B2Lambda0MuLines )
sstream.OutputLevel = 2
sc.appendStream( sstream )
#---------------------------------------------------------------
#Use specific Stripping23r1 on MC for Run II
#---------------------------------------------------------------
## from StrippingArchive.Stripping23.StrippingRD.StrippingB2Lambda0MuLines import B2Lambda0MuLines
## from StrippingSettings.Stripping23r1.LineConfigDictionaries_RD import B2Lambda0Mu
## B2Lambda0MuConf = B2Lambda0MuLines('B2Lambda0MuLines', B2Lambda0Mu['CONFIG'])
## B2Lambda0MuLines = B2Lambda0MuConf.lines()
## ## stripline = B2Lambda0MuLines[0]
## sc = StrippingConf( HDRLocation = "DecReports" )
## sstream = StrippingStream("TestStream")
## sstream.appendLines( B2Lambda0MuLines )
## sstream.OutputLevel = 2
## sc.appendStream( sstream )
#---------------------------
# Make Ntuples
#---------------------------
#from Configurables import PrintDecayTree, PrintDecayTreeTool
#printer = PrintDecayTree("Printer")
#printer.addTool( PrintDecayTreeTool, name = "PrintDecay" )
#printer.PrintDecay.Information = "Name M P Px Py Pz Pt chi2"
#printer.Inputs = TupleInputs
#---------------------------
# Configure lines and Decay
#---------------------------
tuple = DecayTreeTuple('DetachedN')
TupleInputs = []
for line in B2Lambda0MuLines :
TupleInputs.append( line.outputLocation() )
tuple.Inputs = TupleInputs
tuple.OutputLevel = INFO
tuple.Decay = "[B- -> ^(Lambda0 -> ^mu- ^pi+) ^mu-]CC"
tuple.addBranches({
"B" : "[B- -> (Lambda0 -> mu- pi+) mu-]CC",
"N" : "[B- -> ^(Lambda0 -> mu- pi+) mu-]CC",
"mu_prim" : "[B- -> (Lambda0 -> mu- pi+) ^mu-]CC",
"mu_sec" : "[B- -> (Lambda0 -> ^mu- pi+) mu-]CC",
"pi" : "[B- -> (Lambda0 -> mu- ^pi+) mu-]CC"
})
#---------------------------
# Define nTuple Variables
#---------------------------
tuple.ToolList = [
"TupleToolKinematic",
"TupleToolPid",
"TupleToolGeometry",
"TupleToolPrimaries",
"TupleToolTrackInfo",
"TupleToolEventInfo",
"TupleToolIsolationTwoBody",
"TupleToolRecoStats",
"TupleToolAngles",
"TupleToolANNPID",
"TupleToolMCBackgroundInfo",
"TupleToolMCTruth",
"TupleToolTrigger",
"TupleToolDira",
"TupleToolEventInfo",
"TupleToolPropertime",
"TupleToolRecoStats",
]
coneIso = tuple.addTupleTool("TupleToolTrackIsolation") # cone isolation
#coneIso.MinConeAngle() # Set the minimal deltaR of the cone (default = 0.5), in radians
#coneIso.MaxConeAngle() # Set the maximum deltaR of the cone (default = 1.0), in radians
#coneIso.StepSize() # Set the step of deltaR between two iterations (default = 0.1), in radians
#coneIso.TrackType() # Set the type of tracks which are considered inside the cone (default = 3)
#coneIso.FillAsymmetry() # Flag to fill the asymmetry variables (default = false)
#coneIso.FillDeltaAngles() # Flag to fill the delta angle variables (default = false) ")
# gregs isolation
from Configurables import TupleToolApplyIsolation
tuple.B.addTupleTool(TupleToolApplyIsolation, name="TupleToolApplyIsolationHard")
tuple.B.TupleToolApplyIsolationHard.OutputSuffix="_Hard"
tuple.B.TupleToolApplyIsolationHard.WeightsFile="weights_110614_Lc_pX.xml"
tuple.B.ToolList+=["TupleToolApplyIsolation/TupleToolApplyIsolationHard"]
#tuple.B.addTupleTool(TupleToolApplyIsolation, name="TupleToolApplyIsolationSoft")
#tuple.B.TupleToolApplyIsolationSoft.OutputSuffix="_Soft"
#tuple.B.TupleToolApplyIsolationSoft.WeightsFile="weightsSoft.xml"
#tuple.B.ToolList+=["TupleToolApplyIsolation/TupleToolApplyIsolationSoft"]
trigger_list = [
'L0MuonDecision'
,'L0HadronDecision'
,'L0DiMuonDecision'
,'Hlt1TrackAllL0Decision'
,'Hlt1TrackMuonDecision'
,'Hlt2TopoMu2BodyBBDTDecision'
,'Hlt2TopoMu3BodyBBDTDecision'
,'Hlt2TopoMu4BodyBBDTDecision'
,'Hlt2Topo2BodyBBDTDecision'
,'Hlt2Topo3BodyBBDTDecision'
,'Hlt2Topo4BodyBBDTDecision'
,'Hlt2Topo2BodySimpleBBDTDecision'
,'Hlt2Topo3BodySimpleBBDTDecision'
,'Hlt2Topo4BodySimpleBBDTDecision'
]
#trigger config
trigger = tuple.addTupleTool(TupleToolTISTOS)
trigger.TriggerList = trigger_list
trigger.Verbose = True
trigger.VerboseL0 = True
trigger.VerboseHlt1 = True
trigger.VerboseHlt2 = True
stripping_line = 'B2Lambda0MuBu2LambdaSSMuLine'
stream = 'AllStreams'
LoKiTool = tuple.addTupleTool("LoKi::Hybrid::TupleTool/LoKiTool")
LoKiTool.Variables = {
"InAccMuon" : "PPINFO(LHCb.ProtoParticle.InAccMuon, -1)",
"ETA" : "ETA",
"LOKI_DTF_CTAU" : "DTF_CTAU( 0, True )",
"LOKI_DTF_CTAUS" : "DTF_CTAUSIGNIFICANCE( 0, True )",
"LOKI_DTF_CHI2NDOF" : "DTF_CHI2NDOF( True )",
"LOKI_DTF_CTAUERR" : "DTF_CTAUERR( 0, True )",
"LOKI_DTF_MASS" : "DTF_FUN ( M , True )" ,
"LOKI_DTF_VCHI2NDOF" : "DTF_FUN ( VFASPF(VCHI2/VDOF) , True )"}
#MC Information#
MCTruth = TupleToolMCTruth()
MCTruth.addTool(MCTupleToolHierarchy())
MCTruth.addTool(MCTupleToolKinematic())
MCTruth.addTool(MCTupleToolReconstructed())
MCTruth.ToolList += ["MCTupleToolHierarchy", "MCTupleToolKinematic", "MCTupleToolReconstructed" ]
#---------------------------
# Configure DaVinci
#---------------------------
from Configurables import DaVinci
DaVinci().UserAlgorithms = [sc.sequence(), tuple]
DaVinci().InputType = 'DST'
DaVinci().DataType = '2012'
DaVinci().Simulation = True
DaVinci().Lumi = False
DaVinci().PrintFreq = 10000
DaVinci().EvtMax = -1
| [
"[email protected]"
] | |
f6506f8e94f4a7cca99932feaaa520aeec30af64 | 6ce97c840d75672c2b880b5b249dd817f3a0e887 | /utils.py | 09a746b0496594b8d24d6de89d01f48204fc5c4f | [] | no_license | grzegorznowacki/tsp | b4c208ead9518dcf2c9975ec8ceb240155159885 | 6f1399c6a892cbde6b57d2912cd72b6077091946 | refs/heads/master | 2020-05-14T14:39:19.778549 | 2019-06-02T08:47:55 | 2019-06-02T08:47:55 | 181,837,816 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,099 | py | import csv
import random
from random import randint
from itertools import groupby
import algorithms_utils
import numpy as np
import matplotlib.pyplot as plt
from config import *
def load_file_to_list(input_file_path):
points_list = []
with open(input_file_path) as csvfile:
reader = csv.reader(csvfile)
next(reader)
for row in reader:
points_list.append((int(row[1]), int(row[2])))
return points_list
def load_file_to_dict(input_file_path):
point_index_dict = {}
with open(input_file_path) as csvfile:
reader = csv.reader(csvfile)
next(reader)
for row in reader:
point_index_dict[(int(row[1]), int(row[2]))] = int(row[0])
return point_index_dict
def draw_starting_point(points_list):
return random.choice(points_list)
def draw_starting_point_index(points_list):
return randint(0, len(points_list) - 1)
def save_paths_to_file(found_path1, found_path2, output_file_path):
with open(output_file_path, 'w') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['path1', 'path2'])
for index1, index2 in zip(found_path1, found_path2):
writer.writerow([index1, index2])
def save_edges_len_to_file(edges_len1, edges_len2, output_file_path):
with open(output_file_path, 'w') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['edges_len1', 'edges_len2'])
for index1, index2 in zip(edges_len1, edges_len2):
writer.writerow([index1, index2])
def create_points_list_from_indices_list(indices_list, points_list_from_file):
result_points_list = []
for index in indices_list:
result_points_list.append(points_list_from_file[index])
return result_points_list
def find_outer_square_size(points_list):
max_x = 0
max_y = 0
for point in points_list:
if point[0] > max_x:
max_x = point[0]
if point[1] > max_y:
max_y = point[1]
if max_x >= max_y:
return max_x
else:
return max_y
def divide_range(portion, start=0, end=2**32):
threshold_list_start = []
threshold_list_end = []
for i in range(start, end, portion):
threshold_list_start.append(i+1)
threshold_list_end.append(i)
threshold_list_start[0] = start
threshold_list_end = threshold_list_end[1:]
threshold_list_end.append(end)
ip_range_list = list(zip(threshold_list_start, threshold_list_end))
return ip_range_list
def divide_list(my_list, n):
return [my_list[i * n:(i + 1) * n] for i in range((len(my_list) + n - 1) // n)]
def create_buckets(divided_range_list):
buckets_list = []
for range_x in divided_range_list:
for range_y in divided_range_list:
buckets_list.append((range_x, range_y))
divided_list = divide_list(buckets_list, len(divided_range_list))
even_list = divided_list[0::2]
odd_list = divided_list[1::2]
for sublist in odd_list:
sublist.reverse()
result = [None] * (len(even_list) + len(odd_list))
result[::2] = even_list
result[1::2] = odd_list
final_list = [j for i in result for j in i]
return final_list
def list_bucketing(points_list, buckets_list):
bucket_points_dict = dict()
bucket_points_list = []
for point in points_list:
for bucket in buckets_list:
if bucket[0][0] <= point[0] <= bucket[0][1] and bucket[1][0] <= point[1] <= bucket[1][1]:
if bucket not in bucket_points_dict:
bucket_points_dict[bucket] = [point]
else:
bucket_points_dict[bucket].append(point)
for bucket in buckets_list:
bucket_points_list.append((bucket, bucket_points_dict[bucket]))
bucket_points_list = [list(grp) for k, grp in groupby(bucket_points_list)]
bucket_points_list_final = []
for elem in bucket_points_list:
[unpacked_elem] = elem
bucket_points_list_final.append(unpacked_elem)
return bucket_points_list_final
def find_starting_point_for_square(bucket_points_list):
starting_points_in_squares_list = []
for bucket_pointslist_tuple in bucket_points_list:
edge_coords = (bucket_pointslist_tuple[0][0][0], bucket_pointslist_tuple[0][1][0])
starting_point_index = algorithms_utils.find_nearest_neighbour(edge_coords, bucket_pointslist_tuple[1])[1]
starting_point = bucket_pointslist_tuple[1][starting_point_index]
starting_points_in_squares_list.append(starting_point)
return starting_points_in_squares_list
def visualize_path(indices_path_list, points_list, grid=False):
list_for_numpy = []
for index in indices_path_list:
list_for_numpy.append([points_list[index][0], points_list[index][1]])
data = np.array(list_for_numpy)
if grid == True:
fig = plt.figure()
ax = fig.gca()
ax.set_xticks(np.arange(0, 20001, SQUARE_DIVIDOR))
ax.set_yticks(np.arange(0, 20001, SQUARE_DIVIDOR))
plt.grid()
plt.plot(data[:, 0], data[:, 1], linewidth=LINE_WIDTH)
plt.show()
| [
"[email protected]"
] | |
abfb14599399def4b82f686676637764d4d74c00 | 98f730ec6a43d8be4a34b0f2a44a9d35989d2287 | /pynifi_client/models/prioritizer_types_entity.py | 077f502372d2648325475e08fbe1e818db88e730 | [] | no_license | scottwr98/pynifi-client | 9337a4f322536ee466d419a788b8b5948cdc62d7 | 013ac2ffa591284a0d6cbb9ed552681cc6f91165 | refs/heads/master | 2020-04-18T08:47:03.680749 | 2017-11-04T23:59:58 | 2017-11-04T23:59:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,782 | py | # coding: utf-8
"""
NiFi Rest Api
The Rest Api provides programmatic access to command and control a NiFi instance in real time. Start and stop processors, monitor queues, query provenance data, and more. Each endpoint below includes a description, definitions of the expected input and output, potential response codes, and the authorizations required to invoke each service. # noqa: E501
OpenAPI spec version: 1.4.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from pynifi_client.models.documented_type_dto import DocumentedTypeDTO # noqa: F401,E501
class PrioritizerTypesEntity(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'prioritizer_types': 'list[DocumentedTypeDTO]'
}
attribute_map = {
'prioritizer_types': 'prioritizerTypes'
}
def __init__(self, prioritizer_types=None): # noqa: E501
"""PrioritizerTypesEntity - a model defined in Swagger""" # noqa: E501
self._prioritizer_types = None
self.discriminator = None
if prioritizer_types is not None:
self.prioritizer_types = prioritizer_types
@property
def prioritizer_types(self):
"""Gets the prioritizer_types of this PrioritizerTypesEntity. # noqa: E501
:return: The prioritizer_types of this PrioritizerTypesEntity. # noqa: E501
:rtype: list[DocumentedTypeDTO]
"""
return self._prioritizer_types
@prioritizer_types.setter
def prioritizer_types(self, prioritizer_types):
"""Sets the prioritizer_types of this PrioritizerTypesEntity.
:param prioritizer_types: The prioritizer_types of this PrioritizerTypesEntity. # noqa: E501
:type: list[DocumentedTypeDTO]
"""
self._prioritizer_types = prioritizer_types
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PrioritizerTypesEntity):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
66379b8547c3b4354da6d9e4f77292594852c4e9 | 5d2f12197894a7998e609c40bfb5dfcb6eb93382 | /textbook-work/stacks_and_queues.py | dfc22ed71b3ba7431a9e67120d3d8330ba67d302 | [] | no_license | dawes206/leet-code | cb5167c6e3f17c24dad9c4b69e5c5f98c9bb892c | d4895982ff07db0e44f7c30afb501f4ef9ecb9c2 | refs/heads/master | 2020-05-30T10:47:42.932138 | 2019-06-07T23:43:20 | 2019-06-07T23:43:20 | 189,681,692 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,595 | py | class Stack(list):
def __init__(self, size):
self.top = 0
for i in [None]*size:
self.append(i)
def push(self,num):
if self[self.top]==None:
self[self.top] = num
else:
self.top = self.top + 1
self[self.top] = num
def pop(self):
if len(self) == 0:
print('underflow')
else:
popped = self[self.top]
self.top -= 1
return popped
# test = Stack(5)
# test.push(3)
# test.push(4)
# test.push(3)
# print('test after pushing', test)
# r= test.pop()
# print('pointer and val after pop: ',test.top, test[test.top])
# print('value of popped: ', r)
# r2 = test.pop()
# print('pointer and val after pop2: ',test.top, test[test.top])
# print('value of popped2: ', r2)
class Queue(list):
def __init__(self,size):
for i in [None]*size:
self.append(i)
self.head = 0
self.tail = 0
def enqueue(self,num):
# print('old tail: ', self[self.tail])
self[self.tail]=num
self.tail += 1
if self.tail == len(self):
self.tail = 0
# print('new tail -1 : ', self[self.tail -1 ])
def dequeue(self):
x = self[self.head]
self.head += 1
if self.head == len(self) + 1:
self.head = 0
return x
# test= Queue(5)
# print(test)
# test.enqueue(3)
# test.enqueue(5)
# test.enqueue(6)
# print(test)
# d1 = test.dequeue()
# print('d1: ', d1)
# print('head', test[test.head])
# d2 = test.dequeue()
# print('d2: ', d2)
# print(test)
| [
"[email protected]"
] | |
cf1489565952ed71607ef160b9f9c9ee52dbdcbd | 939abe018ff10248ae2236575b0d2f3410c096d4 | /1 exercise-rekognition/FlaskApp/application.py | 243a1aa360fe88e724ab5816137b9a864bb39108 | [] | no_license | ge8/photo-app | f0bd911c4980dc58ff3156ad20a91e3971e648e6 | c18e7f0c25629cd84e115615cbc630f87832e376 | refs/heads/master | 2020-03-22T16:51:32.227428 | 2018-08-22T13:55:03 | 2018-08-22T13:55:03 | 140,356,745 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,393 | py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except
# in compliance with the License. A copy of the License is located at
#
# https://aws.amazon.com/apache-2-0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
"Demo Flask application"
import sys
import requests
import boto3
from flask import Flask, render_template_string
from flask_wtf import FlaskForm
from flask_wtf.file import FileField, FileRequired
import config
import util
application = Flask(__name__)
application.secret_key = config.FLASK_SECRET
### FlaskForm set up
class PhotoForm(FlaskForm):
"""flask_wtf form class the file upload"""
photo = FileField('image', validators=[
FileRequired()
])
@application.route("/", methods=('GET', 'POST'))
def home():
"""Homepage route"""
all_labels = ["No labels yet"]
#####
# s3 getting a list of photos in the bucket
#####
s3_client = boto3.client('s3')
prefix = "photos/"
response = s3_client.list_objects(
Bucket=config.PHOTOS_BUCKET,
Prefix=prefix
)
photos = []
if 'Contents' in response and response['Contents']:
photos = [s3_client.generate_presigned_url(
'get_object',
Params={'Bucket': config.PHOTOS_BUCKET, 'Key': content['Key']}
) for content in response['Contents']]
form = PhotoForm()
url = None
if form.validate_on_submit():
image_bytes = util.resize_image(form.photo.data, (300, 300))
if image_bytes:
#######
# s3 excercise - save the file to a bucket
#######
key = prefix + util.random_hex_bytes(8) + '.png'
s3_client.put_object(
Bucket=config.PHOTOS_BUCKET,
Key=key,
Body=image_bytes,
ContentType='image/png'
)
# http://boto3.readthedocs.io/en/latest/guide/s3.html#generating-presigned-urls
url = s3_client.generate_presigned_url(
'get_object',
Params={'Bucket': config.PHOTOS_BUCKET, 'Key': key})
#######
# rekcognition exercise
#######
rek = boto3.client('rekognition')
response = rek.detect_labels(
Image={
'S3Object': {
'Bucket': config.PHOTOS_BUCKET,
'Name': key
}
})
all_labels = [label['Name'] for label in response['Labels']]
return render_template_string("""
{% extends "main.html" %}
{% block content %}
<h4>Upload Photo</h4>
<form method="POST" enctype="multipart/form-data" action="{{ url_for('home') }}">
{{ form.csrf_token }}
<div class="control-group">
<label class="control-label">Photo</label>
{{ form.photo() }}
</div>
<div class="control-group">
<div class="controls">
<input class="btn btn-primary" type="submit" value="Upload">
</div>
</div>
</form>
{% if url %}
<hr/>
<h3>Uploaded!</h3>
<img src="{{url}}" /><br/>
{% for label in all_labels %}
<span class="label label-info">{{label}}</span>
{% endfor %}
{% endif %}
{% if photos %}
<hr/>
<h4>Photos</h4>
{% for photo in photos %}
<img width="150" src="{{photo}}" />
{% endfor %}
{% endif %}
{% endblock %}
""", form=form, url=url, photos=photos, all_labels=all_labels)
@application.route("/info")
def info():
"Webserver info route"
metadata = "http://169.254.169.254"
instance_id = requests.get(metadata +
"/latest/meta-data/instance-id").text
availability_zone = requests.get(metadata +
"/latest/meta-data/placement/availability-zone").text
return render_template_string("""
{% extends "main.html" %}
{% block content %}
<b>instance_id</b>: {{instance_id}} <br/>
<b>availability_zone</b>: {{availability_zone}} <br/>
<b>sys.version</b>: {{sys_version}} <br/>
{% endblock %}""",
instance_id=instance_id,
availability_zone=availability_zone,
sys_version=sys.version)
if __name__ == "__main__":
# http://flask.pocoo.org/docs/0.12/errorhandling/#working-with-debuggers
# https://docs.aws.amazon.com/cloud9/latest/user-guide/app-preview.html#app-preview-share
use_c9_debugger = False
application.run(use_debugger=not use_c9_debugger, debug=True,
use_reloader=not use_c9_debugger, host='0.0.0.0')
| [
"[email protected]"
] | |
6bb39bf0814bcf4b4e5787dcdfa7a670bd2b8146 | 16fa0fd1cc6fa4f8996a626ab4cc625e5e33207c | /Contact_me/migrations/0001_initial.py | a75e17d6b2553a210609651e27dd2ba25621407f | [] | no_license | Arash3f/zoro_blog | ead1fba404f8140f4f7b13b23515fa280063072f | 9157afe5352481b8befa15bdd2ef093297b52cf0 | refs/heads/master | 2023-07-19T04:31:27.348756 | 2021-08-29T00:30:45 | 2021-08-29T00:30:45 | 400,917,468 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 993 | py | # Generated by Django 3.2.6 on 2021-08-28 12:44
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Contact',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=30, null=True, verbose_name='Name')),
('email', models.EmailField(blank=True, max_length=254, null=True, verbose_name='Email')),
('subject', models.CharField(blank=True, max_length=30, null=True, verbose_name='Subject')),
('message', models.TextField(blank=True, max_length=300, null=True, verbose_name='Mesmessage')),
],
options={
'verbose_name': 'Contact',
'verbose_name_plural': 'Contacts',
},
),
]
| [
"[email protected]"
] | |
c9b56f5d05836f4ddbd82f57359b3e2ec025ed94 | 46fdd2668392639d4f21e660ab3523958af17b30 | /avgn/custom_parsing/bird_db.py | 59e96922468691bb4969c4cf717359abc943a175 | [
"MIT"
] | permissive | timsainb/avgn_paper | fe45d241597f9167e87f41123a405e9ca315663c | 63e25ca535a230f96b7fb1017728ead6ee0bf36b | refs/heads/V2 | 2023-04-12T05:10:19.641026 | 2020-11-20T00:04:17 | 2020-11-20T00:04:17 | 192,464,252 | 39 | 17 | MIT | 2022-09-01T17:27:02 | 2019-06-18T04:19:23 | Jupyter Notebook | UTF-8 | Python | false | false | 2,883 | py | import librosa
from avgn.utils.json import NoIndent, NoIndentEncoder
import pandas as pd
from datetime import datetime
from praatio import tgio
from avgn.utils.paths import DATA_DIR, ensure_dir
from avgn.utils.audio import get_samplerate
import json
from datetime import time as dtt
def generate_json(wavfile, DT_ID, song_db):
indv = wavfile.parent.parent.stem
dt = datetime.strptime(wavfile.stem, "%Y-%m-%d_%H-%M-%S-%f")
datestring = dt.strftime("%Y-%m-%d")
row = song_db[
(song_db.SubjectName == indv)
& (song_db.recording_date == datestring)
& (song_db.recording_time == dt.time())
].iloc[0]
# make json dictionary
json_dict = {}
for key in dict(row).keys():
if type(row[key]) == pd._libs.tslibs.timestamps.Timestamp:
json_dict[key] = row[key].strftime("%Y-%m-%d_%H-%M-%S")
elif type(row[key]) == dtt:
json_dict[key] = row[key].strftime("%H:%M:%S")
elif type(row[key]) == pd._libs.tslibs.nattype.NaTType:
continue
else:
json_dict[key] = row[key]
species_dict = {
"CAVI": {"species": "Vireo cassinii", "common_name": "Cassin's vireo"},
"CATH": {
"species": "Toxostoma redivivum",
"common_name": "California thrasher",
},
}
DATASET_ID = "BIRD_DB_" + species_dict[row.Species_short_name]["species"].replace(" ", "_")
row.Species_short_name
json_dict["species"] = species_dict[row.Species_short_name]["species"]
json_dict["common_name"] = species_dict[row.Species_short_name]["common_name"]
json_dict["datetime"] = datestring
sr = get_samplerate(wavfile.as_posix())
wav_duration = librosa.get_duration(filename=wavfile.as_posix())
json_dict["wav_loc"] = wavfile.as_posix()
# rate and length
json_dict["samplerate_hz"] = sr
json_dict["length_s"] = wav_duration
tg = wavfile.parent.parent / "TextGrids" / (wavfile.stem + ".TextGrid")
if not tg.exists():
print(tg.as_posix(), 'File does not exist')
return
textgrid = tgio.openTextgrid(fnFullPath=tg)
tierlist = textgrid.tierDict[textgrid.tierNameList[0]].entryList
start_times = [i.start for i in tierlist]
end_times = [i.end for i in tierlist]
labels = [i.label for i in tierlist]
json_dict["indvs"] = {
indv: {
"syllables": {
"start_times": NoIndent(start_times),
"end_times": NoIndent(end_times),
"labels": NoIndent(labels),
}
}
}
# generate json
json_txt = json.dumps(json_dict, cls=NoIndentEncoder, indent=2)
json_out = (
DATA_DIR / "processed" / DATASET_ID / DT_ID / "JSON" / (wavfile.stem + ".JSON")
)
# save json
ensure_dir(json_out.as_posix())
print(json_txt, file=open(json_out.as_posix(), "w"))
| [
"[email protected]"
] | |
9eb1a6445fdf8895df59d5415988e23973d31f8d | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/built-in/cv/object_tracking/SiamMask_for_Pytorch/experiments/siammask_sharp/custom.py | 455d4557b8a40ebc871dd01fb38c856582b605f1 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 7,820 | py | # Copyright (c) Facebook, Inc. and its affiliates.
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from models.siammask_sharp import SiamMask
from models.features import MultiStageFeature
from models.rpn import RPN, DepthCorr
from models.mask import Mask
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils.load_helper import load_pretrain
from resnet import resnet50
class ResDownS(nn.Module):
def __init__(self, inplane, outplane):
super(ResDownS, self).__init__()
self.downsample = nn.Sequential(
nn.Conv2d(inplane, outplane, kernel_size=1, bias=False),
nn.BatchNorm2d(outplane))
def forward(self, x):
x = self.downsample(x)
if x.size(3) < 20:
l = 4
r = -4
x = x[:, :, l:r, l:r]
return x
class ResDown(MultiStageFeature):
def __init__(self, pretrain=False):
super(ResDown, self).__init__()
self.features = resnet50(layer3=True, layer4=False)
if pretrain:
load_pretrain(self.features, 'resnet.model')
self.downsample = ResDownS(1024, 256)
self.layers = [self.downsample, self.features.layer2, self.features.layer3]
self.train_nums = [1, 3]
self.change_point = [0, 0.5]
self.unfix(0.0)
def param_groups(self, start_lr, feature_mult=1):
lr = start_lr * feature_mult
def _params(module, mult=1):
params = list(filter(lambda x:x.requires_grad, module.parameters()))
if len(params):
return [{'params': params, 'lr': lr * mult}]
else:
return []
groups = []
groups += _params(self.downsample)
groups += _params(self.features, 0.1)
return groups
def forward(self, x):
output = self.features(x)
p3 = self.downsample(output[-1])
return p3
def forward_all(self, x):
output = self.features(x)
p3 = self.downsample(output[-1])
return output, p3
class UP(RPN):
def __init__(self, anchor_num=5, feature_in=256, feature_out=256):
super(UP, self).__init__()
self.anchor_num = anchor_num
self.feature_in = feature_in
self.feature_out = feature_out
self.cls_output = 2 * self.anchor_num
self.loc_output = 4 * self.anchor_num
self.cls = DepthCorr(feature_in, feature_out, self.cls_output)
self.loc = DepthCorr(feature_in, feature_out, self.loc_output)
def forward(self, z_f, x_f):
cls = self.cls(z_f, x_f)
loc = self.loc(z_f, x_f)
return cls, loc
class MaskCorr(Mask):
def __init__(self, oSz=63):
super(MaskCorr, self).__init__()
self.oSz = oSz
self.mask = DepthCorr(256, 256, self.oSz**2)
def forward(self, z, x):
return self.mask(z, x)
class Refine(nn.Module):
def __init__(self):
super(Refine, self).__init__()
self.v0 = nn.Sequential(nn.Conv2d(64, 16, 3, padding=1), nn.ReLU(),
nn.Conv2d(16, 4, 3, padding=1),nn.ReLU())
self.v1 = nn.Sequential(nn.Conv2d(256, 64, 3, padding=1), nn.ReLU(),
nn.Conv2d(64, 16, 3, padding=1), nn.ReLU())
self.v2 = nn.Sequential(nn.Conv2d(512, 128, 3, padding=1), nn.ReLU(),
nn.Conv2d(128, 32, 3, padding=1), nn.ReLU())
self.h2 = nn.Sequential(nn.Conv2d(32, 32, 3, padding=1), nn.ReLU(),
nn.Conv2d(32, 32, 3, padding=1), nn.ReLU())
self.h1 = nn.Sequential(nn.Conv2d(16, 16, 3, padding=1), nn.ReLU(),
nn.Conv2d(16, 16, 3, padding=1), nn.ReLU())
self.h0 = nn.Sequential(nn.Conv2d(4, 4, 3, padding=1), nn.ReLU(),
nn.Conv2d(4, 4, 3, padding=1), nn.ReLU())
self.deconv = nn.ConvTranspose2d(256, 32, 15, 15)
self.post0 = nn.Conv2d(32, 16, 3, padding=1)
self.post1 = nn.Conv2d(16, 4, 3, padding=1)
self.post2 = nn.Conv2d(4, 1, 3, padding=1)
for modules in [self.v0, self.v1, self.v2, self.h2, self.h1, self.h0, self.deconv, self.post0, self.post1, self.post2,]:
for l in modules.modules():
if isinstance(l, nn.Conv2d):
nn.init.kaiming_uniform_(l.weight, a=1)
def forward(self, f, corr_feature, pos=None, test=False):
if test:
p0 = torch.nn.functional.pad(f[0], [16, 16, 16, 16])[:, :, 4*pos[0]:4*pos[0]+61, 4*pos[1]:4*pos[1]+61]
p1 = torch.nn.functional.pad(f[1], [8, 8, 8, 8])[:, :, 2 * pos[0]:2 * pos[0] + 31, 2 * pos[1]:2 * pos[1] + 31]
p2 = torch.nn.functional.pad(f[2], [4, 4, 4, 4])[:, :, pos[0]:pos[0] + 15, pos[1]:pos[1] + 15]
else:
p0 = F.unfold(f[0], (61, 61), padding=0, stride=4).permute(0, 2, 1).contiguous().view(-1, 64, 61, 61)
if not (pos is None): p0 = torch.index_select(p0, 0, pos)
p1 = F.unfold(f[1], (31, 31), padding=0, stride=2).permute(0, 2, 1).contiguous().view(-1, 256, 31, 31)
if not (pos is None): p1 = torch.index_select(p1, 0, pos)
p2 = F.unfold(f[2], (15, 15), padding=0, stride=1).permute(0, 2, 1).contiguous().view(-1, 512, 15, 15)
if not (pos is None): p2 = torch.index_select(p2, 0, pos)
if not(pos is None):
p3 = corr_feature[:, :, pos[0], pos[1]].view(-1, 256, 1, 1)
else:
p3 = corr_feature.permute(0, 2, 3, 1).contiguous().view(-1, 256, 1, 1)
out = self.deconv(p3)
out = self.post0(F.upsample(self.h2(out) + self.v2(p2), size=(31, 31)))
out = self.post1(F.upsample(self.h1(out) + self.v1(p1), size=(61, 61)))
out = self.post2(F.upsample(self.h0(out) + self.v0(p0), size=(127, 127)))
out = out.view(-1, 127*127)
return out
def param_groups(self, start_lr, feature_mult=1):
params = filter(lambda x:x.requires_grad, self.parameters())
params = [{'params': params, 'lr': start_lr * feature_mult}]
return params
class Custom(SiamMask):
def __init__(self, pretrain=False, **kwargs):
super(Custom, self).__init__(**kwargs)
self.features = ResDown(pretrain=pretrain)
self.rpn_model = UP(anchor_num=self.anchor_num, feature_in=256, feature_out=256)
self.mask_model = MaskCorr()
self.refine_model = Refine()
def refine(self, f, pos=None):
return self.refine_model(f, pos)
def template(self, template):
self.zf = self.features(template)
def track(self, search):
search = self.features(search)
rpn_pred_cls, rpn_pred_loc = self.rpn(self.zf, search)
return rpn_pred_cls, rpn_pred_loc
def track_mask(self, search):
self.feature, self.search = self.features.forward_all(search)
rpn_pred_cls, rpn_pred_loc = self.rpn(self.zf, self.search)
self.corr_feature = self.mask_model.mask.forward_corr(self.zf, self.search)
pred_mask = self.mask_model.mask.head(self.corr_feature)
return rpn_pred_cls, rpn_pred_loc, pred_mask
def track_refine(self, pos):
pred_mask = self.refine_model(self.feature, self.corr_feature, pos=pos, test=True)
return pred_mask
| [
"[email protected]"
] | |
5157e077ce5ffc745c29f7e368d3e5a341315683 | 394b9ab2160d7a04c01e1c0fac6bf8eb11cd443c | /morty.py | 52dfbaabc8f0c582c8fb59422207bfb4f2d595e8 | [] | no_license | skbmir/Morty | 426fdb8ac142e7c683cab85d5336d717e6b13383 | 43fed219b2aa62486a1994dd9757a49646e788ae | refs/heads/master | 2020-04-12T11:02:14.096657 | 2018-12-19T18:29:43 | 2018-12-19T18:29:43 | 162,447,936 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,373 | py | import os
import time
import random
import re
from slackclient import SlackClient
class Morty():
def __init__(self, token):
self.token = token
self.id = -1
self.name = 'Morty'
self._client = SlackClient(self.token)
def connect(self):
if self._client.rtm_connect(with_team_state=False):
print('Morty connected.')
self._query_id()
print('My Slack id is: {}.'.format(self.id))
return True
else:
print('Connection failed.')
return False
def main_loop(self):
while True:
command, channel = self._handle_events(self._client.rtm_read())
if command:
self._handle_commands(command, channel)
time.sleep(0.5)
def _query_id(self):
self.id = self._client.api_call('auth.test')['user_id']
def _handle_events(self, events):
if len(events) > 0:
for event in events:
if event['type'] == 'message' and not 'subtype' in event:
user_id, message = self._get_mention(event['text'])
if user_id == self.id:
return message, event['channel']
return None, None
def _get_mention(self, msg):
matches = re.search('^<@(.+)>.(.*)', msg)
return (matches.group(1), matches.group(2).strip()) if matches else (None, None)
def _handle_commands(self, cmd, chnl):
def_msgs = [
'хз че это. ¯\_(ツ)_/¯',
'╮ (. ❛ ᴗ ❛.) ╭',
'(・_・ヾ'
]
response = None
if cmd.startswith('/test'):
response = 'tested. ヘ( ^o^)ノ\(^_^ )'
elif cmd.startswith('/help'):
response = ''' Help:
/help - эта справка
/test - test bot
привет - приветствие
остальное - ему не понятно
'''
elif cmd.startswith('привет') or cmd.startswith('Привет'):
response = 'Хуй тебе в ответ.'
else:
response = random.choice(def_msgs)
self._client.api_call(
'chat.postMessage',
channel=chnl,
text=response
)
| [
"[email protected]"
] | |
2f6d6e8502daa5a977294adc0b6e50a2d2e9437f | 504f47c6421cb691e7b2381d46f2ed079bb1f6b4 | /carlist/admin.py | 2d6f24f99d384765d99f91e127268dc8d1460084 | [] | no_license | kremerNK/cardealership | 3e4dd10b338f9c5fd22351213b347216ac27d99b | e3c409187fc9e2acb6d61c215c38655d06922f5d | refs/heads/master | 2022-05-31T21:24:45.065635 | 2020-04-28T18:37:22 | 2020-04-28T18:37:22 | 248,649,838 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 120 | py | from django.contrib import admin
from .models import Vehicle
# Register your models here.
admin.site.register(Vehicle) | [
"[email protected]"
] | |
3f93d691c1cf7bd55311c6b0acb04564ea975929 | d8b10ffdd20256520f551ae62779e4f604d60c3c | /Client1.py | 785595993f807e9c8452abf473f96af7c5b2c047 | [] | no_license | Ammar-Abid92/Chat-Application | 4c7d6f8c852e2a3603cce9b1d34de47c740e4b0b | c8c22e3e049d069f3969b486dccbd479fc825df4 | refs/heads/main | 2023-04-03T17:23:59.812955 | 2021-04-07T16:11:33 | 2021-04-07T16:11:33 | 355,607,845 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,038 | py | # import all the required modules
import socket
import threading
from tkinter import *
from tkinter import font
from tkinter import ttk
# from chat import *
PORT = 5000
SERVER = "192.168.0.153"
ADDRESS = (SERVER, PORT)
FORMAT = "utf-8"
# Creating a new client socket and connect to the server
client = socket.socket(socket.AF_INET,
socket.SOCK_STREAM)
client.connect(ADDRESS)
# GUI class for the chat
class CampCo:
# constructor method
def __init__(self):
# chat window
self.Window = Tk()
self.Window.iconbitmap("E:\Ammar work\CAMCom\icon.ico")
self.Window.withdraw()
self.login = Toplevel()
self.login.iconbitmap("E:\Ammar work\CAMCom\icon.ico")
self.bg = PhotoImage(file='E:\Ammar work\CAMCom\Logo.png')
self.l = Label(self.login, image=self.bg)
self.l.place(x=0, y=0, relwidth=1, relheight=1)
# self.l.after(3000, self.l.destroy)
# set the title
self.login.title("Login")
self.login.resizable(width=False,
height=False)
self.login.geometry("587x480+400+100")
def clear_login(event):
self.entryName.delete(0, END)
self.entryName = Entry(self.login,
font="Helvetica 14")
self.entryName.insert(0, "Name")
self.entryName.bind("<Button-1>", clear_login)
self.entryName.place(relwidth=0.4,
relheight=0.08,
relx=0.25,
rely=0.7)
self.entryName.focus()
self.go = Button(self.login,
text="CONTINUE",
font="Helvetica 14 bold", bg="#008080",
command=lambda: self.goAhead(self.entryName.get()))
self.go.place(relx=0.68,
rely=0.7)
self.Window.mainloop()
def goAhead(self, name):
self.login.destroy()
self.layout(name)
# the thread to receive messages
rcv = threading.Thread(target=self.receive)
rcv.start()
# The main layout of the chat
def layout(self, name):
self.name = name
# to show chat window
self.Window.deiconify()
self.Window.title("CampCo")
self.Window.geometry("500x600+450+50")
self.Window.resizable(width=False,
height=False)
self.Window.configure(bg="#02D6D9")
self.labelHead = Label(self.Window,
bg="#008080",
fg="#EAECEE",
text=self.name,
font="Helvetica 13 bold",
pady=5)
self.labelHead.place(relwidth=1)
self.line = Label(self.Window,
width=450,
bg="#02D6D9")
self.line.place(relwidth=1,
rely=0.07,
relheight=0.012)
self.textCons = Text(self.Window,
width=20,
height=10,
bg="#008080",
fg="#EAECEE",
font="Helvetica 14",
padx=5,
pady=5)
self.textCons.place(relheight=0.745,
relwidth=1,
rely=0.08)
self.labelBottom = Label(self.Window,
bg="#02D6D9",
height=50)
self.labelBottom.place(relwidth=1,
rely=0.825)
self.entryMsg = Entry(self.labelBottom,
bg="#018788",
fg="#EAECEE",
font="Georgia 18")
self.entryMsg.place(relwidth=0.74,
relheight=0.07,
rely=0.035,
relx=0.011)
self.buttonMsg = Button(self.labelBottom,
text="Send",
font="Helvetica 10 bold",
width=20,
bg="#018788",
command=lambda: self.sendButton(self.entryMsg.get()))
self.buttonMsg.place(relx=0.77,
rely=0.035,
relheight=0.07,
relwidth=0.22)
self.textCons.config(cursor="arrow")
scrollbar = Scrollbar(self.textCons)
scrollbar.place(relheight=1,
relx=0.974)
scrollbar.config(command=self.textCons.yview)
self.textCons.config(state=DISABLED)
def sendButton(self, msg):
self.textCons.config(state=DISABLED)
self.msg = msg
self.entryMsg.delete(0, END)
snd = threading.Thread(target=self.sendMessage)
snd.start()
def receive(self):
while True:
try:
message = client.recv(1024).decode(FORMAT)
if message == 'NAME':
client.send(self.name.encode(FORMAT))
else:
self.textCons.config(state=NORMAL)
self.textCons.insert(END, message + "\n\n")
self.textCons.config(state=DISABLED)
self.textCons.see(END)
except:
print("An error occured!")
client.close()
break
def sendMessage(self):
self.textCons.config(state=DISABLED)
while True:
message = f"{self.name}: {self.msg}"
client.send(message.encode(FORMAT))
break
p = CampCo()
| [
"[email protected]"
] | |
819377a42faca137e6f3c8552ba40278d718d397 | 61dfa0ac80a6979d135e969b5b7b78a370c16904 | /analysis/sph/sph_to_grid.py | b8e2e61e6f5694b4914d9dc415446eab48893b98 | [] | no_license | bvillasen/cosmo_tools | 574d84f9c18d92d2a9610d1d156113730d80f5a4 | 6bb54534f2242a15a6edcf696f29a3cf22edd342 | refs/heads/master | 2021-07-13T06:43:32.902153 | 2020-10-05T21:17:30 | 2020-10-05T21:17:30 | 207,036,538 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,248 | py | import sys, os, time
import numpy as np
import h5py as h5
import matplotlib.pyplot as plt
from scipy.spatial import KDTree
cosmo_dir = os.path.dirname(os.path.dirname(os.getcwd())) + '/'
subDirectories = [x[0] for x in os.walk(cosmo_dir)]
sys.path.extend(subDirectories)
from tools import *
from sph_functions import *
from domain_decomposition import get_domain_block
from internal_energy import get_temp
X = 0.75984603480 + 1.53965115054e-4
Y = 0.23999999997 + 9.59999999903e-15 + 9.59999999903e-18
use_mpi = True
if use_mpi :
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
nprocs = comm.Get_size()
else:
rank = 0
nprocs = 1
print_out = False
if rank == 0: print_out = True
dataDir = '/data/groups/comp-astro/bruno/'
inDir = dataDir + 'cosmo_sims/ewald_512/particles_files/'
output_dir = dataDir + 'cosmo_sims/ewald_512/grid_files/'
if rank == 0: create_directory( output_dir )
if use_mpi: comm.Barrier()
Lbox = 10.
proc_grid = [ 8, 8, 8]
box_size = [ Lbox, Lbox, Lbox ]
grid_size = [ 512, 512, 512 ]
domain = get_domain_block( proc_grid, box_size, grid_size )
domain_x = domain[rank]['box']['x']
domain_y = domain[rank]['box']['y']
domain_z = domain[rank]['box']['z']
grid_x = domain[rank]['grid']['x']
grid_y = domain[rank]['grid']['y']
grid_z = domain[rank]['grid']['z']
dx = Lbox / grid_size[0]
dy = Lbox / grid_size[1]
dz = Lbox / grid_size[2]
nSnap = 11
in_file_name = inDir + '{0}_particles.h5.{1}'.format(nSnap, rank)
if print_out: print("Loading File: ", in_file_name)
inFile = h5.File( in_file_name, 'r' )
current_z = inFile.attrs['current_z']
Lbox = inFile.attrs['Lbox']
Omega_M = inFile.attrs['Omega_M']
Omega_L = inFile.attrs['Omega_L']
h = inFile.attrs['h']
N_local = inFile.attrs['N_local']
hsml_max = inFile.attrs['hsml_max']
if print_out: print("N_local: ", N_local)
data = {}
if print_out: print('Loading Data ')
fields = [ 'mass', 'rho', 'u', 'hsml', 'pos_x', 'pos_y', 'pos_z', 'Nh', 'HeI', 'HeII' , 'vel_x' ]
for field in fields:
if print_out: print(" Loading Field ", field)
data[field] = inFile[field][...]
inFile.close()
if use_mpi: comm.Barrier()
pos_x = data['pos_x']
pos_y = data['pos_y']
pos_z = data['pos_z']
pos = np.array([ pos_x, pos_y, pos_z ]).T
mass = data['mass']
rho = data['rho']
u = data['u']
Nh = data['Nh']
HeI = data['HeI']
HeII = data['HeII']
hsml = data['hsml']
vel_x = data['vel_x']
mass_HI = Nh * X * mass
HI_rho = Nh * X * rho
HII_rho = X * rho - HI_rho
HeI_rho = HeI * X * rho * 4
HeII_rho = HeII * X * rho * 4
HeIII_rho = Y * rho - HeI_rho - HeII_rho
mu = rho / ( HI_rho + 2*HII_rho + ( HeI_rho + 2*HeII_rho + 3*HeIII_rho) / 4 )
# print mu.min(), mu.max()
if print_out: print('Building Tree')
tree = KDTree( pos )
offset = np.array([ grid_x[0], grid_y[0], grid_z[0] ])
dims_local = np.array([ grid_x[1] - grid_x[0], grid_y[1] - grid_y[0], grid_z[1] - grid_z[0] ])
data_kernel = {}
data_kernel['smooth'] = {}
data_kernel['scatter'] = {}
data_kernel['smooth']['density'] = np.zeros(dims_local)
data_kernel['smooth']['mu'] = np.zeros(dims_local)
data_kernel['smooth']['u'] = np.zeros(dims_local)
data_kernel['smooth']['vel_x'] = np.zeros(dims_local)
data_kernel['smooth']['HI_density_0'] = np.zeros(dims_local)
data_kernel['smooth']['HI_density'] = np.zeros(dims_local)
data_kernel['scatter']['density'] = np.zeros(dims_local)
data_kernel['scatter']['mu'] = np.zeros(dims_local)
data_kernel['scatter']['u'] = np.zeros(dims_local)
data_kernel['scatter']['vel_x'] = np.zeros(dims_local)
data_kernel['scatter']['HI_density_0'] = np.zeros(dims_local)
data_kernel['scatter']['HI_density'] = np.zeros(dims_local)
if print_out: print('Starting Grid Interpolation')
if use_mpi: comm.Barrier()
N_smooth = 64
n_total = dims_local[0] * dims_local[1] * dims_local[2]
counter = 0
start = time.time()
for indx_x in range( dims_local[0] ):
for indx_y in range( dims_local[1] ):
for indx_z in range( dims_local[2] ):
if ( counter % (n_total/128) == 0 ):
line = " Interpolating to Grid {0:.0f} %".format( 100.0 * float(counter)/ n_total)
print_line_flush( line )
# if counter > n_total/100: break
c_pos_x = ( offset[0] + indx_x + 0.5 ) * dx
c_pos_y = ( offset[1] + indx_y + 0.5 ) * dy
c_pos_z = ( offset[2] + indx_z + 0.5 ) * dz
c_pos = np.array([ c_pos_x, c_pos_y, c_pos_z])
r = hsml_max
neig_indices = tree.query_ball_point( c_pos, r )
N = len(neig_indices)
while N < N_smooth:
r = 2*r
neig_indices = tree.query_ball_point( c_pos, r )
N = len(neig_indices)
neig_indices = np.array( neig_indices )
neig_pos = pos[neig_indices]
delta_pos = neig_pos - c_pos
neig_distances = np.sqrt( (delta_pos**2).sum( axis = 1) )
neig_indices_sort = np.argsort( neig_distances )
neig_distances = neig_distances[neig_indices_sort]
neig_indices = neig_indices[neig_indices_sort]
h_smooth = neig_distances[N_smooth-1]
if h_smooth == 0.0: print("ERROR: h=0 in rank: {0} indx: [ {1} {2} {3} ]".format( rank, indx_x, indx_y, indx_z ))
# Initializa the smooth values
smooth_mass = 0
smooth_rho = 0
smooth_GE = 0
smooth_mu_rho = 0
smooth_px = 0
smooth_HI_rho = 0
smooth_mass_HI = 0
# Initializa the scatter values
scatter_mass = 0
scatter_rho = 0
scatter_GE = 0
scatter_mu_rho = 0
scatter_px = 0
scatter_HI_rho = 0
scatter_mass_HI = 0
# Loop over the neighbors
for i,neig_id in enumerate(neig_indices):
neig_mass = mass[neig_id]
neig_rho = rho[neig_id]
neig_u = u[neig_id]
neig_mu = mu[neig_id]
neig_vx = vel_x[neig_id]
neig_hsml = hsml[neig_id]
neig_dist = neig_distances[i]
neig_HI_rho = HI_rho[neig_id]
neig_mass_HI = mass_HI[neig_id]
# Add to the scatter kernel values
if neig_dist <= neig_hsml:
W_scatter = kernel_gadget( neig_dist, neig_hsml )
scatter_mass += neig_mass * W_scatter
scatter_rho += neig_rho * W_scatter
scatter_GE += neig_rho * neig_u * W_scatter
scatter_mu_rho += neig_rho * neig_mu * W_scatter
scatter_px += neig_rho * neig_vx * W_scatter
scatter_HI_rho += neig_rho * neig_HI_rho * W_scatter
scatter_mass_HI += neig_mass_HI * W_scatter
# Add to the smooth kernel values
if i < N_smooth:
W_smooth = kernel_gadget( neig_dist, h_smooth )
smooth_mass += neig_mass * W_smooth
smooth_rho += neig_rho * W_smooth
smooth_GE += neig_rho * neig_u * W_smooth
smooth_mu_rho += neig_rho * neig_mu * W_smooth
smooth_px += neig_rho * neig_vx * W_smooth
smooth_HI_rho += neig_rho * neig_HI_rho * W_smooth
smooth_mass_HI += neig_mass_HI * W_smooth
# Write the kernel data to the 3D arrays
dens_smooth = smooth_mass * 10
u_smooth = smooth_GE / smooth_rho
mu_smooth = smooth_mu_rho / smooth_rho
vx_smooth = smooth_px / smooth_rho
HI_density_smooth_0 = smooth_HI_rho / smooth_rho * 10
HI_density_smooth = smooth_mass_HI * 10
data_kernel['smooth']['density'][indx_x, indx_y, indx_z] = dens_smooth
data_kernel['smooth']['u'][indx_x, indx_y, indx_z] = u_smooth
data_kernel['smooth']['mu'][indx_x, indx_y, indx_z] = mu_smooth
data_kernel['smooth']['vel_x'][indx_x, indx_y, indx_z] = vx_smooth
data_kernel['smooth']['HI_density_0'][indx_x, indx_y, indx_z] = HI_density_smooth_0
data_kernel['smooth']['HI_density'][indx_x, indx_y, indx_z] = HI_density_smooth
dens_scatter = scatter_mass * 10
u_scatter = scatter_GE / scatter_rho
mu_scatter = scatter_mu_rho / scatter_rho
vx_scatter = scatter_px / scatter_rho
HI_density_scatter_0 = scatter_HI_rho / scatter_rho * 10
HI_density_scatter = scatter_mass_HI * 10
data_kernel['scatter']['density'][indx_x, indx_y, indx_z] = dens_scatter
data_kernel['scatter']['u'][indx_x, indx_y, indx_z] = u_scatter
data_kernel['scatter']['mu'][indx_x, indx_y, indx_z] = mu_scatter
data_kernel['scatter']['vel_x'][indx_x, indx_y, indx_z] = vx_scatter
data_kernel['scatter']['HI_density_0'][indx_x, indx_y, indx_z] = HI_density_scatter_0
data_kernel['scatter']['HI_density'][indx_x, indx_y, indx_z] = HI_density_scatter
# temp = get_temp( u_local * 1e6, mu=mu_local)
# print dens_smooth
# if rank == 0: print dens_smooth / dens_scatter
counter += 1
if use_mpi: comm.Barrier()
if print_out: print("")
end = time.time()
if print_out: print(( ' Elapsed Time: {0:.2f} min'.format((end - start)/60.) ))
outputFileName = output_dir + "{0}.h5.{1}".format( nSnap, rank )
if print_out: print("Writing File: ", outputFileName)
outFile = h5.File( outputFileName, 'w' )
outFile.attrs['Current_z'] = np.array([current_z])
outFile.attrs['offset'] = offset
outFile.attrs['dims_local'] = dims_local
group_smooth = outFile.create_group( 'smooth' )
group_smooth.create_dataset('density', data=data_kernel['smooth']['density'] )
group_smooth.create_dataset('u', data=data_kernel['smooth']['u'] )
group_smooth.create_dataset('mu', data=data_kernel['smooth']['mu'] )
group_smooth.create_dataset('vel_x', data=data_kernel['smooth']['vel_x'] )
group_smooth.create_dataset('HI_density_0', data=data_kernel['smooth']['HI_density_0'] )
group_smooth.create_dataset('HI_density', data=data_kernel['smooth']['HI_density'] )
group_scatter = outFile.create_group( 'scatter' )
group_scatter.create_dataset('density', data=data_kernel['scatter']['density'] )
group_scatter.create_dataset('u', data=data_kernel['scatter']['u'] )
group_scatter.create_dataset('mu', data=data_kernel['scatter']['mu'] )
group_scatter.create_dataset('vel_x', data=data_kernel['scatter']['vel_x'] )
group_scatter.create_dataset('HI_density_0', data=data_kernel['scatter']['HI_density_0'] )
group_scatter.create_dataset('HI_density', data=data_kernel['scatter']['HI_density'] )
outFile.close()
if print_out: print("Saved File: ", outputFileName)
| [
"[email protected]"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.