blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
eb920e9f0de524db868175ec191b9a196ff81020 | 77f82fcab63aaa5972c2c26faf97989dc4a22f33 | /tictactoe-javiluna/Tic_Tac_Toe__repaso_funciones_.py | ec0f44b961769029ac7f055a9fa9c2c70e4a1378 | [] | no_license | albertogcmr/data-analytics-examples-lessons-stuff | 64ea20d395e8b8fc9348729e2c1b53b3ffad7684 | d565ed2a21d43c0420a73387a3976efcf70364cc | refs/heads/master | 2020-06-08T10:53:56.009487 | 2019-10-05T12:38:48 | 2019-10-05T12:38:48 | 193,216,571 | 1 | 4 | null | null | null | null | UTF-8 | Python | false | false | 2,719 | py | import json
CELL_EMPTY = " "
CELL_PLAYER_1 = "O"
CELL_PLAYER_2 = "X"
def initialize_game_state():
return {'turn': 0, 'board': initialize_board()}
def initialize_board():
"""
Creates a new empty board
:return: A board
"""
return [[CELL_EMPTY for _ in range(3)] for _ in range(3)]
def display_board(board):
"""
Displays a board
:param board: Board to be displayed
"""
print("\t" + "\t".join([str(i + 1) for i, cell in enumerate(board[0])]))
for i, row in enumerate(board):
print(i + 1, end='\t')
for j, cell in enumerate(row):
print(cell, end='\t')
print()
def set_chip_to_board(board, row, column, chip):
"""
Sets a chip to a cell in a board
:param board: Board to be modified
:param row: Row in which we will set the chip
:param column: Column in which we will set the chip
:param chip: Chip to be set in the board
:return: Modified board
"""
board[row][column] = chip
return board
def check_empty_cell(board, row, column):
"""
Checks if a determined cell is empty
:param board: Board
:param row: Row
:param column: Column
:return: Whether the determined cell is empty or not
"""
return board[row][column] == CELL_EMPTY
def check_cell_in_board(board, row, column):
"""
:param board:
:param row:
:param column:
:return:
"""
return 0 <= row < len(board) and 0 <= column < len(board[0])
def safe_set_chip(board, row, column, chip):
"""
Checks if the chip can be set and sets it if its alright
:param board: Board to be modified
:param row: Row
:param column: Column
:param chip: Chip to be set to the board
:return: Whether the chip has been set or not
"""
# ~~~~DANGER ZONE~~~~~~
if not check_cell_in_board(board, row, column):
print("Illo you cannot go outside the board!")
return False
elif not check_empty_cell(board, row, column):
print("Illo you have to have a lil bit of respect for other chips in the board!")
return False
# ~~~~END OF DANGER~~~~~
set_chip_to_board(board, row, column, chip)
return True
"""
game_state = initialize_game_state()
with open('game.holis', 'w') as output_file:
json.dump(game_state, output_file, indent=4, sort_keys=True)
"""
with open('game.holis', 'r') as input_file:
game_state = json.load(input_file)
def int_input(text=""):
while 1:
try:
return int(input(text))
except:
pass
int_input("> ")
display_board(game_state['board']) | [
"[email protected]"
] | |
ecd28a40a8d91ac0a2c2d3e1803050e423261aae | 9b6b3f4b30e9bd8a821d8df16bd71e62b9c6eb98 | /day3/modules/mod_a.py | 2e50a02f49096cd18895d8fb24323b210b42966b | [] | no_license | shobhit-nigam/snape_mar | b7f2155cfcd83482230c339fe45f9ea851061318 | b7b33a767cc00d35a22e40c940b4331e4898c8d5 | refs/heads/main | 2023-03-25T05:44:21.244078 | 2021-03-26T05:27:28 | 2021-03-26T05:27:28 | 350,555,721 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 91 | py | # modules
import os
import time
print(os.getcwd())
time.sleep(5)
print("hi")
| [
"[email protected]"
] | |
c6fd29782994efa2e50c0c1e5ac53afafd2fd765 | 67925cfa807f767013eb9df1207d48980bce100c | /model.py | dae151371c9336b4b3910aeb26c750bb0e3dafbe | [] | no_license | Gryton/carnd-behavioral-cloning | 9ea8d48440aea9d37f42e13fd69968dada5f8b6b | 7f480c0c18c737dcc6429c9f8392d9cb8ee0d581 | refs/heads/master | 2021-01-24T08:54:19.481768 | 2017-06-05T10:45:11 | 2017-06-05T10:45:11 | 93,392,051 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,041 | py | import csv
import cv2
import numpy as np
import random
import os
from keras.models import Sequential
from keras.layers import Flatten, Dense, Lambda, Convolution2D, Cropping2D, Dropout, Activation
from keras.layers.pooling import MaxPooling2D
import sklearn
from sklearn.model_selection import train_test_split
main_source = './train_data'
with open(main_source + '/driving_log.csv') as csv_file:
reader = csv.reader(csv_file)
lines = [line for line in reader]
train_samples, validation_samples = train_test_split(lines, test_size=0.2)
def generator(samples, batch_size=32):
num_samples = len(samples)
while 1: # Loop forever so the generator never terminates
sklearn.utils.shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
images = []
angles = []
for batch_sample in batch_samples:
name = main_source+'/IMG/'+os.path.basename(batch_sample[0])
center_image = cv2.imread(name)
center_angle = float(batch_sample[3])
left_image = cv2.imread(main_source+'/IMG/'+os.path.basename(batch_sample[1]))
right_image = cv2.imread(main_source+'/IMG/'+os.path.basename(batch_sample[2]))
images.append(center_image)
images.append(left_image)
images.append(right_image)
images.append(cv2.flip(center_image, 1))
angles.append(center_angle)
angles.append(center_angle+0.3)
angles.append(center_angle-0.3)
angles.append(-1.0*center_angle)
# trim image to only see section with road
X_train = np.array(images)
y_train = np.array(angles)
yield sklearn.utils.shuffle(X_train, y_train)
# compile and train the model using the generator function
train_generator = generator(train_samples, batch_size=32)
validation_generator = generator(validation_samples, batch_size=32)
ch, row, col = 3, 85, 320 # Trimmed image format
model = Sequential()
# normalize data
model.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=(160, 320, 3)))
# crop image to leave road
model.add(Cropping2D(cropping=((55, 20), (0,0))))
model.add(Convolution2D(24, 5, 5, subsample=(2,2)))
model.add(Dropout(0.5))
model.add(Activation('relu'))
model.add(Convolution2D(36, 5, 5, subsample=(2,2), activation = 'relu'))
model.add(Convolution2D(48, 5, 5, subsample=(2,2), activation = 'relu'))
model.add(Convolution2D(64, 3, 3, activation = 'relu'))
model.add(Convolution2D(64, 3, 3))
model.add(Flatten())
model.add(Dense(100))
model.add(Dropout(0.5))
model.add(Activation('relu'))
model.add(Dense(50))
model.add(Dense(10))
model.add(Dense(1))
model.compile(loss='mse', optimizer='adam')
model.fit_generator(train_generator, 4*len(train_samples), validation_data=validation_generator,
nb_val_samples=4*len(validation_samples), nb_epoch=3, verbose=1)
model.save('model.h5')
| [
"[email protected]"
] | |
37804720a48a867b3c2c30c5e078c92dc6a170fd | 571b507e91d24eeb79d44fff4ed16bb9e806d957 | /test1.py | 0037b5ec6880b91e467cd27ff55839633e8ae01f | [] | no_license | 143vsahani/tests | 9a5104a05774c07574ff47fea038778041599cca | 9d349157e453d790c5655a49a8011ce8c3cf5e36 | refs/heads/master | 2023-06-19T04:48:51.559392 | 2021-07-08T08:42:58 | 2021-07-08T08:42:58 | 384,055,823 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,603 | py | from kivy.lang import Builder
from kivymd.app import MDApp
from kivy.uix.screenmanager import ScreenManager, Screen
import json
from kivymd.uix.button import MDFlatButton
from kivymd.uix.dialog import MDDialog
import requests
from kivy.core.window import Window
Window.size=(412, 732)
help_str = '''
ScreenManager:
WelcomeScreen:
MainScreen:
LoginScreen:
SignupScreen:
<WelcomeScreen>:
name:'welcomescreen'
Image:
source: '1.jpg'
allow_stretch: True
MDLabel:
text:'Login'
font_style:'H2'
halign:'center'
pos_hint: {'center_y':0.9}
MDLabel:
text:'&'
font_style:'H2'
halign:'center'
pos_hint: {'center_y':0.7}
MDLabel:
text:'Signup'
font_style:'H2'
halign:'center'
pos_hint: {'center_y':0.5}
MDRaisedButton:
text:'Login'
pos_hint : {'center_x':0.4,'center_y':0.3}
size_hint: (0.13,0.1)
on_press:
root.manager.current = 'loginscreen'
root.manager.transition.direction = 'left'
MDRaisedButton:
text:'Signup'
pos_hint : {'center_x':0.6,'center_y':0.3}
size_hint: (0.13,0.1)
on_press:
root.manager.current = 'signupscreen'
root.manager.transition.direction = 'left'
<LoginScreen>:
name:'loginscreen'
Image:
source: '1.jpg'
allow_stretch: True
MDLabel:
text:'Login'
font_style:'H2'
halign:'center'
pos_hint: {'center_y':0.9}
MDTextField:
id:login_email
pos_hint: {'center_y':0.6,'center_x':0.5}
size_hint : (0.7,0.08)
hint_text: 'Email'
helper_text:'Required'
helper_text_mode: 'on_error'
icon_right: 'account'
icon_right_color: app.theme_cls.primary_color
required: True
mode: "rectangle"
MDTextField:
id:login_password
pos_hint: {'center_y':0.4,'center_x':0.5}
size_hint : (0.7,0.08)
hint_text: 'Password'
helper_text:'Required'
helper_text_mode: 'on_error'
icon_right: 'account'
icon_right_color: app.theme_cls.primary_color
required: True
mode: "rectangle"
MDRaisedButton:
text:'Login'
size_hint:(.5,.08)
pos_hint: {'center_x':0.5,'center_y':0.2}
on_press:
app.login()
# app.username_changer()
MDTextButton:
text: 'Create an account'
pos_hint: {'center_x':0.5,'center_y':0.1}
on_press:
root.manager.current = 'signupscreen'
root.manager.transition.direction = 'up'
<SignupScreen>:
name:'signupscreen'
Image:
source: '1.jpg'
allow_stretch: True
MDLabel:
text:'Signup'
font_style:'H2'
halign:'center'
pos_hint: {'center_y':0.9}
MDTextField:
id:signup_email
pos_hint: {'center_y':0.6,'center_x':0.5}
size_hint : (0.7,0.1)
hint_text: 'Email'
helper_text:'Required'
helper_text_mode: 'on_error'
icon_right: 'account'
icon_right_color: app.theme_cls.primary_color
required: True
mode: "rectangle"
MDTextField:
id:signup_username
pos_hint: {'center_y':0.75,'center_x':0.5}
size_hint : (0.7,0.1)
hint_text: 'Username'
helper_text:'Required'
helper_text_mode: 'on_error'
icon_right: 'account'
icon_right_color: app.theme_cls.primary_color
required: True
MDTextField:
id:signup_password
pos_hint: {'center_y':0.4,'center_x':0.5}
size_hint : (0.7,0.1)
hint_text: 'Password'
helper_text:'Required'
helper_text_mode: 'on_error'
icon_right: 'account'
icon_right_color: app.theme_cls.primary_color
required: True
mode: "rectangle"
MDRaisedButton:
text:'Signup'
size_hint: (0.13,0.07)
pos_hint: {'center_x':0.5,'center_y':0.2}
on_press: app.signup()
MDTextButton:
text: 'Already have an account'
pos_hint: {'center_x':0.5,'center_y':0.1}
on_press:
root.manager.current = 'loginscreen'
root.manager.transition.direction = 'down'
<MainScreen>:
name: 'mainscreen'
MDLabel:
id:username_info
text:'Hello Main'
font_style:'H1'
halign:'center'
'''
class WelcomeScreen(Screen):
pass
class MainScreen(Screen):
pass
class LoginScreen(Screen):
pass
class SignupScreen(Screen):
pass
sm = ScreenManager()
sm.add_widget(WelcomeScreen(name = 'loginscreen'))
sm.add_widget(MainScreen(name = 'mainscreen'))
sm.add_widget(LoginScreen(name = 'loginscreen'))
sm.add_widget(SignupScreen(name = 'signupscreen'))
class LoginApp(MDApp):
def build(self):
self.strng = Builder.load_string(help_str)
return self.strng
def signup(self):
signupEmail = self.strng.get_screen('signupscreen').ids.signup_email.text
signupPassword = self.strng.get_screen('signupscreen').ids.signup_password.text
signupUsername = self.strng.get_screen('signupscreen').ids.signup_username.text
if signupEmail.split() == [] or signupPassword.split() == [] or signupUsername.split() == []:
cancel_btn_username_dialogue = MDFlatButton(text = 'Retry',on_release = self.close_username_dialog)
self.dialog = MDDialog(title = 'Invalid Input',text = 'Please Enter a valid Input',size_hint = (0.7,0.2),buttons = [cancel_btn_username_dialogue])
self.dialog.open()
if len(signupUsername.split())>1:
cancel_btn_username_dialogue = MDFlatButton(text = 'Retry',on_release = self.close_username_dialog)
self.dialog = MDDialog(title = 'Invalid Username',text = 'Please enter username without space',size_hint = (0.7,0.2),buttons = [cancel_btn_username_dialogue])
self.dialog.open()
else:
print(signupEmail,signupPassword)
def login(self):
loginEmail = self.strng.get_screen('loginscreen').ids.login_email.text
loginPassword = self.strng.get_screen('loginscreen').ids.login_password.text
self.login_check = False
supported_loginEmail = loginEmail.replace('.','-')
supported_loginPassword = loginPassword.replace('.','-')
print(supported_loginEmail,supported_loginPassword)
def close_username_dialog(self,obj):
self.dialog.dismiss()
LoginApp().run() | [
"[email protected]"
] | |
7ee7d8d8618f8f470f5e832f43ceaa90554dff09 | 08c83dbf4f1faaa8a7ef3e4596d34241f95fac37 | /server/hubco/dataLogging/generate_report_data.py | 72e83a9e3cb489cb0e7bc81e3f075c6c0b052b19 | [] | no_license | mzh1994/DJANGO_REST | d14a361114e7be4ea5cf8210d6a254191b7b2e61 | 1651238c1913f26a65645932b5eb098aaca379ee | refs/heads/master | 2023-06-27T23:19:15.650712 | 2023-06-27T10:08:03 | 2023-06-27T10:08:03 | 392,342,962 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,945 | py | from json import load
from sqlalchemy import create_engine
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import datetime
from .load_kks_data_table import load_kks_table
def generate_daily_report():
n_days = 10 #threshold for report
# making connection
# conn_str = 'postgresql://gveoaihnqvuuvj:dfe4ed4802ec0afa6cca854eb97b5e5d9a3f1f0bbae6ddba7f7585004f50363c@ec2-34-204-128-77.compute-1.amazonaws.com:5432/dvifbu9viiudm'
# engine = create_engine(conn_str)
conn_str = 'postgresql://postgres:password@localhost:5432/test_db'
engine = create_engine(conn_str)
date_limit = (datetime.datetime.now()-datetime.timedelta(n_days)).strftime('%Y-%m-%d 00:00:00')
# loading data tables
#df_kks_table = pd.read_sql("""SELECT * FROM kks_description;""",con=engine).drop('id',axis=1)
df_kks_table = load_kks_table() #loading from local as this table is fixed
df_data_input_table = pd.read_sql("""SELECT * FROM data_input_table
WHERE date>='{}';""".format(date_limit),con=engine).drop('id',axis=1)
col = ['date', 'shift', 'max_value_breached', 'min_value_breached',
'kks_inactive_count', 'kks_reading_percentage']
if df_data_input_table.empty: #create empty df and put date to record empty values
df = pd.DataFrame(columns=col)
df.date = datetime.datetime.now().strftime('%Y-%m-%d')
return df
else:
latest_date_in_input_table = pd.to_datetime(df_data_input_table.date).max()
#Main report calculation
df_kks_filter = df_kks_table[['kks','min_value','max_value']]
df_join = df_kks_filter.merge(df_data_input_table,on = 'kks',how='left')
df_join.date = pd.to_datetime(df_join.date)
df_join['shift'] = np.where(((df_join.date.dt.hour>=8) & (df_join.date.dt.hour<20)),'Morning Shift','Night Shift')
df_join['max_value_breached'] = np.where(df_join.value>=df_join.max_value,1,0)
df_join['min_value_breached'] = np.where(df_join.value<=df_join.min_value,1,0)
df_join['kks_inactive_count'] = np.where(df_join.inactive.isin(['true','True',1]),1,0)
df_reporting_table = df_join[['date','shift','max_value_breached','min_value_breached','kks_inactive_count','kks']]
df_reporting_table = df_reporting_table.groupby([df_reporting_table.date.dt.date,'shift'])\
.agg({'max_value_breached':sum,'min_value_breached':sum,'kks_inactive_count':sum,'kks':'count'}).reset_index()
df_reporting_table['kks_reading_percentage'] = round(
(df_reporting_table.kks-df_reporting_table.kks_inactive_count)*100/(603-df_reporting_table.kks_inactive_count),2)
df_reporting_table.drop('kks',axis=1,inplace=True)
if df_reporting_table.empty:
df_reporting_table = pd.DataFrame(columns=col)
else:
record = df_reporting_table
return record | [
"[email protected]"
] | |
3503300c1f32e5ec079dd43499facd3a092e0957 | c11b2ef3f8d7a71ec9c8bd7149f148733c7036ef | /swagger_server/models/loan_application.py | fa8f47ee10b93e91f28480bfdee3d97002af2182 | [] | no_license | HebbaleLabs/Python-Assessment-Template | e1e72a8c7dc9db8e779fc471e5d557ee83133fd2 | 0b652638dda3d96cf7c953282ff112e8557b640f | refs/heads/master | 2021-05-02T02:21:35.249188 | 2018-02-13T15:20:54 | 2018-02-13T15:20:54 | 120,881,447 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,512 | py | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from swagger_server.models.base_model_ import Model
from swagger_server import util
class LoanApplication(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, id: str=None, version: int=None, business_pan: str=None, business_name: str=None, org_name: str=None, referred_by: str=None, initiator: str=None, loan_amount_required: str=None, currency: str='INR'): # noqa: E501
"""LoanApplication - a model defined in Swagger
:param id: The id of this LoanApplication. # noqa: E501
:type id: str
:param version: The version of this LoanApplication. # noqa: E501
:type version: int
:param business_pan: The business_pan of this LoanApplication. # noqa: E501
:type business_pan: str
:param business_name: The business_name of this LoanApplication. # noqa: E501
:type business_name: str
:param org_name: The org_name of this LoanApplication. # noqa: E501
:type org_name: str
:param referred_by: The referred_by of this LoanApplication. # noqa: E501
:type referred_by: str
:param initiator: The initiator of this LoanApplication. # noqa: E501
:type initiator: str
:param loan_amount_required: The loan_amount_required of this LoanApplication. # noqa: E501
:type loan_amount_required: str
:param currency: The currency of this LoanApplication. # noqa: E501
:type currency: str
"""
self.swagger_types = {
'id': str,
'version': int,
'business_pan': str,
'business_name': str,
'org_name': str,
'referred_by': str,
'initiator': str,
'loan_amount_required': str,
'currency': str
}
self.attribute_map = {
'id': '_id',
'version': 'version',
'business_pan': 'business_pan',
'business_name': 'business_name',
'org_name': 'org_name',
'referred_by': 'referred_by',
'initiator': 'initiator',
'loan_amount_required': 'loan_amount_required',
'currency': 'currency'
}
self._id = id
self._version = version
self._business_pan = business_pan
self._business_name = business_name
self._org_name = org_name
self._referred_by = referred_by
self._initiator = initiator
self._loan_amount_required = loan_amount_required
self._currency = currency
@classmethod
def from_dict(cls, dikt) -> 'LoanApplication':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The LoanApplication of this LoanApplication. # noqa: E501
:rtype: LoanApplication
"""
return util.deserialize_model(dikt, cls)
@property
def id(self) -> str:
"""Gets the id of this LoanApplication.
:return: The id of this LoanApplication.
:rtype: str
"""
return self._id
@id.setter
def id(self, id: str):
"""Sets the id of this LoanApplication.
:param id: The id of this LoanApplication.
:type id: str
"""
self._id = id
@property
def version(self) -> int:
"""Gets the version of this LoanApplication.
:return: The version of this LoanApplication.
:rtype: int
"""
return self._version
@version.setter
def version(self, version: int):
"""Sets the version of this LoanApplication.
:param version: The version of this LoanApplication.
:type version: int
"""
self._version = version
@property
def business_pan(self) -> str:
"""Gets the business_pan of this LoanApplication.
:return: The business_pan of this LoanApplication.
:rtype: str
"""
return self._business_pan
@business_pan.setter
def business_pan(self, business_pan: str):
"""Sets the business_pan of this LoanApplication.
:param business_pan: The business_pan of this LoanApplication.
:type business_pan: str
"""
if business_pan is None:
raise ValueError("Invalid value for `business_pan`, must not be `None`") # noqa: E501
self._business_pan = business_pan
@property
def business_name(self) -> str:
"""Gets the business_name of this LoanApplication.
:return: The business_name of this LoanApplication.
:rtype: str
"""
return self._business_name
@business_name.setter
def business_name(self, business_name: str):
"""Sets the business_name of this LoanApplication.
:param business_name: The business_name of this LoanApplication.
:type business_name: str
"""
if business_name is None:
raise ValueError("Invalid value for `business_name`, must not be `None`") # noqa: E501
self._business_name = business_name
@property
def org_name(self) -> str:
"""Gets the org_name of this LoanApplication.
:return: The org_name of this LoanApplication.
:rtype: str
"""
return self._org_name
@org_name.setter
def org_name(self, org_name: str):
"""Sets the org_name of this LoanApplication.
:param org_name: The org_name of this LoanApplication.
:type org_name: str
"""
if org_name is None:
raise ValueError("Invalid value for `org_name`, must not be `None`") # noqa: E501
self._org_name = org_name
@property
def referred_by(self) -> str:
"""Gets the referred_by of this LoanApplication.
:return: The referred_by of this LoanApplication.
:rtype: str
"""
return self._referred_by
@referred_by.setter
def referred_by(self, referred_by: str):
"""Sets the referred_by of this LoanApplication.
:param referred_by: The referred_by of this LoanApplication.
:type referred_by: str
"""
self._referred_by = referred_by
@property
def initiator(self) -> str:
"""Gets the initiator of this LoanApplication.
:return: The initiator of this LoanApplication.
:rtype: str
"""
return self._initiator
@initiator.setter
def initiator(self, initiator: str):
"""Sets the initiator of this LoanApplication.
:param initiator: The initiator of this LoanApplication.
:type initiator: str
"""
if initiator is None:
raise ValueError("Invalid value for `initiator`, must not be `None`") # noqa: E501
self._initiator = initiator
@property
def loan_amount_required(self) -> str:
"""Gets the loan_amount_required of this LoanApplication.
:return: The loan_amount_required of this LoanApplication.
:rtype: str
"""
return self._loan_amount_required
@loan_amount_required.setter
def loan_amount_required(self, loan_amount_required: str):
"""Sets the loan_amount_required of this LoanApplication.
:param loan_amount_required: The loan_amount_required of this LoanApplication.
:type loan_amount_required: str
"""
if loan_amount_required is None:
raise ValueError("Invalid value for `loan_amount_required`, must not be `None`") # noqa: E501
self._loan_amount_required = loan_amount_required
@property
def currency(self) -> str:
"""Gets the currency of this LoanApplication.
:return: The currency of this LoanApplication.
:rtype: str
"""
return self._currency
@currency.setter
def currency(self, currency: str):
"""Sets the currency of this LoanApplication.
:param currency: The currency of this LoanApplication.
:type currency: str
"""
allowed_values = ["INR"] # noqa: E501
if currency not in allowed_values:
raise ValueError(
"Invalid value for `currency` ({0}), must be one of {1}"
.format(currency, allowed_values)
)
self._currency = currency
| [
"[email protected]"
] | |
ceebbf785332c097febd92baa4e81421ab4a76b8 | 73ea4a0895a0b79950736cdbd4a386c5a9980714 | /users/tests/test_unit_email_templates.py | 912e06f4191b127d560920f3497c1dec4010b25e | [] | no_license | Code-Poets/sheetstorm | a34881590ba3dde325413b92eaba65444bcc9428 | 338ff87d9617b1f30678d18b1ce8792ab4434319 | refs/heads/develop | 2022-12-01T11:37:52.876469 | 2021-11-29T14:35:10 | 2021-11-29T14:35:10 | 149,592,796 | 1 | 0 | null | 2022-11-22T06:00:42 | 2018-09-20T10:33:23 | Python | UTF-8 | Python | false | false | 1,963 | py | from django.contrib.sites.models import Site
from django.test import TestCase
from common.utils import render_create_user_email
from users.models import CustomUser
class TestRenderCreateUserEmail(TestCase):
def setUp(self) -> None:
self.password = "password"
self.user_type = CustomUser.UserType.EMPLOYEE.name
self.user = CustomUser.objects._create_user(
email="[email protected]",
password=self.password,
is_staff=False,
is_superuser=False,
user_type=self.user_type,
need_change_password=True,
)
self.user.activate_user()
self.domain = self._get_website_domain()
def test_render_create_user_email_pass_proper_parameters_to_email_when_first_name_is_set(self):
self._set_user_first_name_and_last_name()
message = render_create_user_email(user=self.user, domain=self.domain, password=self.password)
self.assertIn(self.user.first_name, message)
self.assertIn(self.domain, message)
self.assertIn(self.password, message)
def test_render_create_user_email_pass_proper_parameters_to_email_when_first_name_is_not_set(self):
self._set_to_empty_string_user_first_name_and_last_name()
message = render_create_user_email(user=self.user, domain=self.domain, password=self.password)
self.assertIn(self.user.email, message)
self.assertIn(self.domain, message)
self.assertIn(self.password, message)
@staticmethod
def _get_website_domain() -> str:
current_site = Site.objects.get_current()
return current_site.domain
def _set_user_first_name_and_last_name(self):
self.user.first_name = "Johnny"
self.user.last_name = "Test"
self.user.save()
def _set_to_empty_string_user_first_name_and_last_name(self):
self.user.first_name = ""
self.user.last_name = ""
self.user.save()
| [
"[email protected]"
] | |
1c36dc839d3a82cad01589ae58fd91abfadc91ac | b505894f001242f2f3a1872c727a0e3232296ed3 | /leetcode/array/454_4sum2.py | d664230cc314b38da59da95c0e6b83c085a98516 | [] | no_license | hanghang2333/zhuoluo | 8eceee81e2886fd5547d218adc26cbc22fcd15af | 32f4d693cb0022b3a2176c881b2c9b09529a46a7 | refs/heads/master | 2021-05-12T06:22:01.070355 | 2018-04-05T13:16:31 | 2018-04-05T13:16:31 | 117,215,718 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 472 | py | class Solution(object):
def fourSumCount(self, A, B, C, D):
N = len(A)
ab = {}
for i in range(N):
for j in range(N):
if A[i]+B[j] in ab:
ab[A[i]+B[j]] += 1
else:
ab[A[i]+B[j]] = 1
count = 0
for i in range(N):
for j in range(N):
if 0-C[i]-D[j] in ab:
count += ab[0-C[i]-D[j]]
return count | [
"[email protected]"
] | |
b573fe2717b46eeb3125dd32c7b4b64642d01cf9 | 267464d448f76a488bd4203ceafe984366fa7fb8 | /백준/소수/11653[소인수분해].py | ea718871914ec038827a8f3737c4239b08303060 | [] | no_license | fkalstj99/problemSolving | af3f6bb1627d532b51171c7c6e7037a4f54037a6 | e1300a94397657592ccd163c54644dc0746e4f9d | refs/heads/master | 2022-12-08T09:13:43.718965 | 2020-08-30T12:05:17 | 2020-08-30T12:05:17 | 281,834,809 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 146 | py | N = int(input())
i = 2
while i <= N:
if N % i == 0:
print(i)
N /= i
else:
i+=1
#나눌때 나머지가 0이 되지 않으면 +1 | [
"[email protected]"
] | |
4dcb6398085c7ba8bd3018735b4dbc0ad2a0437b | 5e0755091efd2d4ed61bead8aa38b45bab5a8b07 | /python/anyascii/_data/_06b.py | d6363dfb69629742715d285e5af94c2c7511d2f9 | [
"ISC"
] | permissive | casept/anyascii | c27261d87257c17c47fe0e9fc77438437de94c1c | d4f426b91751254b68eaa84c6cd23099edd668e6 | refs/heads/master | 2022-12-05T07:13:53.075144 | 2020-08-07T07:55:50 | 2020-08-07T07:55:50 | 285,904,577 | 0 | 0 | ISC | 2020-08-07T19:20:00 | 2020-08-07T19:19:59 | null | UTF-8 | Python | false | false | 1,011 | py | b='Xiang Nong Bo Chan Lan Ju Shuang She Wei Cong Quan Qu Cang Jiu Yu Luo Li Cuan Luan Dang Jue Yan Lan Lan Zhu Lei Li Ba Nang Yu Ling Guang Qian Ci Huan Xin Yu Yi Qian Ou Xu Chao Chu Qi Kai Yi Jue Xi Xu He Yu Kui Lang Kuan Shuo Xi Ai Yi Qi Chua Chi Qin Kuan Kan Kuan Kan Chuan Sha Gua Yin Xin Xie Yu Qian Xiao Ye Ge Wu Tan Jin Ou Hu Ti Huan Xu Pen Xi Xiao Chua She Shan Han Chu Yi E Yu Chuo Huan Zhi Zheng Ci Bu Wu Qi Bu Bu Wai Ju Qian Chi Se Chi Se Zhong Sui Sui Li Ze Yu Li Gui Dai E Si Jian Zhe Mo Mo Yao Mo Cu Yang Tian Sheng Dai Shang Xu Xun Shu Can Jue Piao Qia Qiu Su Qing Yun Lian Yi Fou Zhi Ye Can Hun Dan Ji Die Zhen Yun Wen Chou Bin Ti Jin Shang Yin Diao Jiu Hui Cuan Yi Dan Du Jiang Lian Bin Du Jian Jian Shu Ou Duan Zhu Yin Qing Yi Sha Qiao Ke Xiao Xun Dian Hui Hui Gu Qiao Ji Yi Ou Hui Duan Yi Xiao Wu Guan Mu Mei Mei Ai Jie Du Yu Bi Bi Bi Pi Pi Bi Chan Mao Hao Cai Pi Lie Jia Zhan Sai Mu Tuo Xun Er Rong Xian Ju Mu Hao Qiu Dou Sha Tan Pei Ju Duo Cui Bi San San Mao Sai Shu Shu Tuo He Jian Ta San' | [
"[email protected]"
] | |
ceb277dd66d56d8f9fe1e89835ffa948b8a7a063 | 377dc973a58d30154cf485de141223d7ca5424dd | /havok_classes/hkpRotationalConstraintDataAtoms.py | 90f06db837bcc7e14a23c22bdb553b1f5dc852ee | [
"MIT"
] | permissive | sawich/havok-reflection | d6a5552f2881bb4070ad824fb7180ad296edf4c4 | 1d5b768fb533b3eb36fc9e42793088abeffbad59 | refs/heads/master | 2021-10-11T12:56:44.506674 | 2019-01-25T22:37:31 | 2019-01-25T22:37:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 700 | py | from .hkpSetLocalRotationsConstraintAtom import hkpSetLocalRotationsConstraintAtom
from .hkpAngConstraintAtom import hkpAngConstraintAtom
class hkpRotationalConstraintDataAtoms(object):
rotations: hkpSetLocalRotationsConstraintAtom
ang: hkpAngConstraintAtom
def __init__(self, infile):
self.rotations = hkpSetLocalRotationsConstraintAtom(infile) # TYPE_STRUCT:TYPE_VOID
self.ang = hkpAngConstraintAtom(infile) # TYPE_STRUCT:TYPE_VOID
def __repr__(self):
return "<{class_name} rotations={rotations}, ang={ang}>".format(**{
"class_name": self.__class__.__name__,
"rotations": self.rotations,
"ang": self.ang,
})
| [
"[email protected]"
] | |
73882ceaee9a66d915b0a76ecfdf796b2e046fa2 | baf3996414315ffb60470c40c7ad797bf4e6897f | /02_ai/1_ml/3_data_preparation/code/chapter_24/02_evaluate_model_with_transforms.py | 1c8d129c84e7049a72f57a88decf288a22d315e1 | [
"MIT"
] | permissive | thiago-allue/portfolio | 8fbbecca7ce232567aebe97c19944f444508b7f4 | 0acd8253dc7c5150fef9b2d46eead3db83ca42de | refs/heads/main | 2023-03-15T22:10:21.109707 | 2022-09-14T17:04:35 | 2022-09-14T17:04:35 | 207,919,073 | 0 | 0 | null | 2019-11-13T18:18:23 | 2019-09-11T22:40:46 | Python | UTF-8 | Python | false | false | 1,650 | py | # example of using the ColumnTransformer for the Abalone dataset
from numpy import mean
from numpy import std
from numpy import absolute
from pandas import read_csv
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import MinMaxScaler
from sklearn.svm import SVR
# load dataset
dataframe = read_csv('abalone.csv', header=None)
# split into inputs and outputs
last_ix = len(dataframe.columns) - 1
X, y = dataframe.drop(last_ix, axis=1), dataframe[last_ix]
print(X.shape, y.shape)
# determine categorical and numerical features
numerical_ix = X.select_dtypes(include=['int64', 'float64']).columns
categorical_ix = X.select_dtypes(include=['object', 'bool']).columns
# define the data preparation for the columns
t = [('cat', OneHotEncoder(), categorical_ix), ('num', MinMaxScaler(), numerical_ix)]
col_transform = ColumnTransformer(transformers=t)
# define the model
model = SVR(kernel='rbf',gamma='scale',C=100)
# define the data preparation and modeling pipeline
pipeline = Pipeline(steps=[('prep',col_transform), ('m', model)])
# define the model cross-validation configuration
cv = KFold(n_splits=10, shuffle=True, random_state=1)
# evaluate the pipeline using cross validation and calculate MAE
scores = cross_val_score(pipeline, X, y, scoring='neg_mean_absolute_error', cv=cv, n_jobs=-1)
# convert MAE scores to positive values
scores = absolute(scores)
# summarize the model performance
print('MAE: %.3f (%.3f)' % (mean(scores), std(scores))) | [
"[email protected]"
] | |
22d4d81071f554dc838703c8b549ebcf2b774c45 | c79c2703102949b1bcf079000e9891de28c5fede | /examples/cryptmachine.py | 9aa236192ac59332ef923d887193b70106ed1e41 | [
"MIT"
] | permissive | goffinet/secretpy | 999222c4161d0bfed11c38a9e26f7f41dc9747ff | 6be2e3ffb7e3ef88b99ab9390537e400f8fe5112 | refs/heads/master | 2023-05-06T00:37:44.586487 | 2021-06-01T09:57:44 | 2021-06-01T10:48:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,344 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
from secretpy import Atbash, Caesar, CryptMachine, alphabets
from secretpy.cmdecorators import SaveAll, RemoveNonAlphabet
def encdec(machine, plaintext):
print(plaintext)
enc = machine.encrypt(plaintext)
print(enc)
dec = machine.decrypt(enc)
print(dec)
print("-----------------------------------")
plaintext = u"thequickbrownfoxjumpsoverthelazydog"
key = 3
cipher = Caesar()
cm = CryptMachine(cipher, key)
encdec(cm, plaintext)
cm.set_alphabet(alphabets.GERMAN)
encdec(cm, plaintext)
cm1 = SaveAll(cm)
cm1.set_key(9)
plaintext = u"the quick brown fox jumps over the lazy dog"
encdec(cm1, plaintext)
cm2 = RemoveNonAlphabet(cm)
cm2.set_cipher(Atbash())
plaintext = u"Achtung Minen"
encdec(cm2, plaintext)
'''
Output:
thequickbrownfoxjumpsoverthelazydog
wkhtxlfneurzqiramxpsvryhuwkhodcbgrj
thequickbrownfoxjumpsoverthelazydog
-----------------------------------
thequickbrownfoxjumpsoverthelazydog
wkhtxlfneurzqirämxpsvryhuwkhodüögrj
thequickbrownfoxjumpsoverthelazydog
-----------------------------------
the quick brown fox jumps over the lazy dog
üqn zßrlt käxbw oxc sßvyö xanä üqn ujed mxp
the quick brown fox jumps over the lazy dog
-----------------------------------
Achtung Minen
ßöwkjqxrvqzq
achtungminen
-----------------------------------
'''
| [
"[email protected]"
] | |
6e30eef591902c83ff936151b14a8117be9b08df | d3efc82dfa61fb82e47c82d52c838b38b076084c | /Autocase_Result/FXJSMM/YW_FXJSMM_SZSJ_070_1.py | f7d883d8e4c6382924fb97a77b4bc566b49a2c88 | [] | no_license | nantongzyg/xtp_test | 58ce9f328f62a3ea5904e6ed907a169ef2df9258 | ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f | refs/heads/master | 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,025 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test/xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test/service")
from ServiceConfig import *
from mainService import *
from QueryStkPriceQty import *
from log import *
sys.path.append("/home/yhl2/workspace/xtp_test/mysql")
from CaseParmInsertMysql import *
sys.path.append("/home/yhl2/workspace/xtp_test/utils")
from QueryOrderErrorMsg import queryOrderErrorMsg
class YW_FXJSMM_SZSJ_070_1(xtp_test_case):
# YW_FXJSMM_SZSJ_070_1
def test_YW_FXJSMM_SZSJ_070_1(self):
title = '交易日即成剩撤卖-先卖整数股再卖零头股'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '全成',
'errorID': 0,
'errorMSG': '',
'是否生成报单': '是',
'是否是撤废': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryStkPriceQty('001006', '2', '0', '0', '0', 'S', case_goal['期望状态'], Api)
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'用例测试结果': stkparm['返回结果'],
'测试错误原因': '获取下单参数失败,' + stkparm['错误原因'],
}
self.assertEqual(rs['用例测试结果'], True)
else:
wt_reqs = {
'business_type': Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'order_client_id':2,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SZ_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_SELL'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_BEST_OR_CANCEL'],
'price': stkparm['涨停价'],
'quantity': 99,
'position_effect': Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
CaseParmInsertMysql(case_goal, wt_reqs)
rs = serviceTest(Api, case_goal, wt_reqs)
logger.warning('执行结果为' + str(rs['用例测试结果']) + ','
+ str(rs['用例错误源']) + ',' + str(rs['用例错误原因']))
self.assertEqual(rs['用例测试结果'], True) # 0
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
9b53ca49a35ca37f780f44bc7225210966a4873d | 26fff323560ed7f825f07753c715604423ca102b | /DAGMM/util.py | bafe3a12a4ca80c6a6704e4e1955657bf18ad32b | [] | no_license | Lijer/baseline_ad | 276f42083a8b213f5b630f7a180b74f6f69c39fc | 9cd09bf18d4ab0c8f5a9468dd0abefc7d617ebcb | refs/heads/master | 2023-02-02T22:23:42.046525 | 2020-12-22T10:35:06 | 2020-12-22T10:35:06 | 323,594,983 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,964 | py | import pandas as pd
import random
from joblib import Memory
from sklearn.datasets import load_svmlight_file
from sklearn.metrics import average_precision_score, roc_auc_score
import numpy as np
import scipy.io as sio
import time
import datetime
mem = Memory("./dataset/svm_data")
@mem.cache
def get_data_from_svmlight_file(path):
data = load_svmlight_file(path)
return data[0], data[1]
def dataLoading(path, logfile=None):
file_type = path.split('.')[-1]
if file_type == 'csv':
# loading data
df = pd.read_csv(path)
labels = df['class']
x_df = df.drop(['class'], axis=1)
x = x_df.values
print("Data shape: (%d, %d)" % x.shape)
if logfile:
logfile.write("Data shape: (%d, %d)\n" % x.shape)
if file_type == 'mat':
f = sio.loadmat(path)
x = f['X']
y = f['y']
y = y.reshape((y.shape[0],))
print("Data shape: (%d, %d)" % x.shape)
return x, labels
def dataLoading_mat(path, logfile=None):
# loading data
f = sio.loadmat(path)
x = f['X']
y = f['y']
y = y.reshape((y.shape[0],))
print("Data shape: (%d, %d)" % x.shape)
return x, y
# random sampling with replacement
def random_list(start, stop, length):
if length >= 0:
length = int(length)
start, stop = (int(start), int(stop)) if start <= stop else (int(stop), int(start))
random_list = []
for i in range(length):
random_list.append(random.randint(start, stop)) # including start and stop
return random_list
def aucPerformance(scores, labels, logfile=None):
roc_auc = roc_auc_score(labels, scores)
# print(roc_auc)
ap = average_precision_score(labels, scores)
print("AUC-ROC: %.4f, AUC-PR: %.4f" % (roc_auc, ap))
if logfile:
logfile.write("AUC-ROC: %.4f, AUC-PR: %.4f\n" % (roc_auc, ap))
# plt.title('Receiver Operating Characteristic')
# plt.plot(fpr, tpr, label='AUC = %0.4f'% roc_auc)
# plt.legend(loc='lower right')
# plt.plot([0,1],[0,1],'r--')
# plt.xlim([-0.001, 1])
# plt.ylim([0, 1.001])
# plt.ylabel('True Positive Rate')
# plt.xlabel('False Positive Rate')
# plt.show();
return roc_auc, ap
def tic_time():
print("=====================================================")
tic_datetime = datetime.datetime.now()
print("tic_datetime:", tic_datetime)
print("tic_datetime.strftime:", tic_datetime.strftime('%Y-%m-%d %H:%M:%S.%f'))
tic_walltime = time.time()
print("tic_walltime:", tic_walltime)
tic_cpu = time.clock()
print("tic_cpu:", tic_cpu)
print("=====================================================\n")
def writeResults_my(name, auc, std_auc = 0.0, ap = 0.0, ap_std = 0.0, func = 'dagmm', path = "./results/auc_performance_.csv"):
csv_file = open(path, 'a')
row = name + "," + func+ "," + str(auc) + "," + str(std_auc) + "," + str(ap) + "," + str(ap_std) + "\n"
csv_file.write(row) | [
"[email protected]"
] | |
0c4eea4a48e2c43e530699dd4caf85253d2c7d0e | 10f091bf946bdd6b50c3fa0637504ab19d9c65c2 | /albums/3/challenge232_easy/code.py | 1a5d010c936ea78f8bbc00386478b83fb1d45ddc | [] | no_license | Rich43/rog | ccebee00b982579c46c30a7dab55b4dbe6396fdc | 029dd57c920aa869750b809d22092c9614e67ba9 | refs/heads/master | 2023-01-23T07:07:16.069821 | 2023-01-19T19:10:43 | 2023-01-19T19:10:43 | 109,163,883 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,977 | py | ''' A palindrome is a word or sentence that is spelled the same backwards and forwards.
A simple of example of this is Swedish pop sensation ABBA, which, when written backwards,
is also ABBA. Their hit song (and winner of the 1974 Eurovision Song Contest!) "Waterloo"
is not a palindrome, because "Waterloo" backwards is "Oolretaw".
Palindromes can be longer than one word as well. "Solo gigolos" (the saddest of all gigolos)
is a palindrome, because if you write it backwards it becomes "Sologig olos", and if you move
the space three places back (which you are allowed to do), that becomes "Solo gigolos".
Today, you are going to write a program that detects whether or not a
particular input is a valid palindrome.
On the first line of the input, you will receive a number specifying how many lines of input to read.
After that, the input consists of some number of lines of text that you will read and determine
whether or not it is a palindrome or not.
The only important factor in validating palindromes is whether or not a sequence of letters is the
same backwards and forwards. All other types of characters (spaces, punctuation, newlines, etc.)
should be ignored, and whether a character is lower-case or upper-case is irrelevant.
'''
def palindrome(file):
with open(file) as f:
strng = ''
acc = 0
for line in f:
count = int(line)
for line in f:
line.strip()
strng += line
acc += 1
if acc == count:
break
strng2 = ''
for char in strng:
if char.isalnum():
strng2 += char
strng2 = strng2.lower()
strng3 = strng2[::-1]
if strng2 == strng3:
return('Palindrome!')
else:
return ('not palindrome')
if __name__ == '__main__':
ans = palindrome('232input1.txt')
print(ans)
ans = palindrome('232input2.txt')
print(ans) | [
"[email protected]"
] | |
a4214d6be5e7f6fa478185252aa734774a80071d | e3e2961454c30ff060a475b32ed75f19d0e72d0e | /setup_app/installers/admin_ui.py | 1c00a64f1fcbb728f009070d674fbac2bde97506 | [
"MIT"
] | permissive | klinux/jans-setup | 648cf966d373929d1c05d8c86019d4c3e45dbc6f | 8461556e07ecb6cb31b537959f78bad88b60e69e | refs/heads/master | 2023-07-07T02:08:03.393135 | 2021-08-10T17:54:29 | 2021-08-10T17:54:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,403 | py | import os
import time
import glob
import json
import ruamel.yaml
import ldap3
from string import Template
from setup_app import paths
from setup_app.static import AppType, InstallOption, BackendTypes
from setup_app.utils import base
from setup_app.config import Config
from setup_app.utils.setup_utils import SetupUtils
from setup_app.installers.base import BaseInstaller
from setup_app.pylib.ldif4.ldif import LDIFWriter
class AdminUIInstaller(SetupUtils, BaseInstaller):
def __init__(self):
setattr(base.current_app, self.__class__.__name__, self)
self.service_name = 'gluu-admin-ui'
self.needdb = True
self.app_type = AppType.SERVICE
self.install_type = InstallOption.OPTONAL
self.install_var = 'installAdminUI'
self.register_progess()
self.output_folder = os.path.join(Config.outputFolder,'gluu-admin-ui')
self.clients_ldif_fn = os.path.join(self.output_folder, 'clients.ldif')
self.root_dir = os.path.join(Config.jansOptFolder, 'gluu-admin-ui')
self.gluuOxVersion = '5.0.0-SNAPSHOT'
self.source_files = [
(os.path.join(Config.distJansFolder, 'gluu-admin-ui-app.jar'), 'https://ox.gluu.org/maven/org/gluu/gluu-admin-ui-app/{0}/gluu-admin-ui-app-{0}.jar'.format(self.gluuOxVersion))
]
self.load_ldif_files = []
def install(self):
self.download_files(downloads=[self.source_files[0][0]])
self.copyFile(self.source_files[0][0], self.root_dir)
self.generate_configuration()
self.render_import_templates()
def installed(self):
return os.path.exists(self.root_dir)
def create_folders(self):
for d in (self.root_dir,self.output_folder):
if not os.path.exists(d):
self.createDirs(d)
self.run([paths.cmd_chown, '-R', 'jetty:jetty', self.root_dir])
def generate_configuration(self):
self.check_clients([('admin_ui_client_id', '1901.')])
if not Config.get('admin_ui_client_pw'):
Config.admin_ui_client_pw = self.getPW(32)
Config.admin_ui_client_encoded_pw = self.obscure(Config.admin_ui_client_pw)
createClient = True
config_api_dn = 'inum={},ou=clients,o=jans'.format(Config.admin_ui_client_id)
if Config.installed_instance and self.dbUtils.search('ou=clients,o=jans', search_filter='(&(inum={})(objectClass=jansClnt))'.format(Config.admin_ui_client_id)):
createClient = False
if createClient:
clients_ldif_fd = open(self.clients_ldif_fn, 'wb')
ldif_clients_writer = LDIFWriter(clients_ldif_fd, cols=1000)
ldif_clients_writer.unparse(
config_api_dn, {
'objectClass': ['top', 'jansClnt'],
'del': ['false'],
'displayName': ['Jans Admin UI Client'],
'inum': [Config.admin_ui_client_id],
'jansAccessTknAsJwt': ['false'],
'jansAccessTknSigAlg': ['RS256'],
'jansAppTyp': ['web'],
'jansAttrs': ['{"tlsClientAuthSubjectDn":"","runIntrospectionScriptBeforeAccessTokenAsJwtCreationAndIncludeClaims":false,"keepClientAuthorizationAfterExpiration":false,"allowSpontaneousScopes":false,"spontaneousScopes":[],"spontaneousScopeScriptDns":[],"backchannelLogoutUri":[],"backchannelLogoutSessionRequired":false,"additionalAudience":[],"postAuthnScripts":[],"consentGatheringScripts":[],"introspectionScripts":[],"rptClaimsScripts":[]}'],
'jansClntSecret': [Config.admin_ui_client_encoded_pw],
'jansDefAcrValues': ['simple_password_auth'],
'jansDisabled': ['false'],
'jansGrantTyp': ['authorization_code', 'refresh_token', 'client_credentials'],
'jansIdTknSignedRespAlg': ['RS256'],
'jansInclClaimsInIdTkn': ['false'],
'jansLogoutSessRequired': ['false'],
'jansPersistClntAuthzs': ['true'],
'jansRequireAuthTime': ['false'],
'jansRespTyp': ['code'],
'jansRptAsJwt': ['false'],
'jansPostLogoutRedirectURI': ['http://localhost:4100'],
'jansRedirectURI': ['http://localhost:4100'],
'jansLogoutURI': ['http://localhost:4100/logout'],
'jansScope': ['inum=43F1,ou=scopes,o=jans','inum=6D90,ou=scopes,o=jans','inum=FOC4,ou=scopes,o=jans'],
'jansSubjectTyp': ['pairwise'],
'jansTknEndpointAuthMethod': ['client_secret_basic'],
'jansTrustedClnt': ['false'],
})
clients_ldif_fd.close()
self.load_ldif_files.append(self.clients_ldif_fn)
admin_dn = 'inum={},ou=people,o=jans'.format(Config.admin_inum)
backend_location = self.dbUtils.get_backend_location_for_dn(admin_dn)
result = self.dbUtils.dn_exists(admin_dn)
if result and not 'jansAdminUIRole' in result:
if backend_location == BackendTypes.LDAP:
ldap_operation_result = self.dbUtils.ldap_conn.modify(
admin_dn,
{'jansAdminUIRole': [ldap3.MODIFY_ADD, 'api-admin']})
self.dbUtils.log_ldap_result(ldap_operation_result)
def render_import_templates(self):
self.dbUtils.import_ldif(self.load_ldif_files)
| [
"[email protected]"
] | |
7a72d546d3294f73f1162641774d3ed7bf3f2bc9 | cce6e3fbb235e87e14cdc7557acf15884509566b | /Python codes/Simple Robot Movements/backward.py | c9bd03ab570bbac9d6486fa4d97cb0a6d1dfe084 | [] | no_license | ArpithaGanesh/AutonomousIndoorNavigationRobot | c3bb4ec2a94aaa7dd7e66427358b2483b37f01df | 8927f0ce8cb413e4e1f918969a3bdf59306f0499 | refs/heads/master | 2020-06-04T22:23:26.059161 | 2019-06-16T15:54:19 | 2019-06-16T15:54:19 | 192,188,290 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 577 | py | import RPi.GPIO as GPIO
import time
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BOARD)
Motor1A = 16
Motor1B = 18
Motor1E = 22
Motor2A = 23
Motor2B = 21
Motor2E = 19
GPIO.setup(Motor1A,GPIO.OUT)
GPIO.setup(Motor1B,GPIO.OUT)
GPIO.setup(Motor1E,GPIO.OUT)
GPIO.setup(Motor2A,GPIO.OUT)
GPIO.setup(Motor2B,GPIO.OUT)
GPIO.setup(Motor2E,GPIO.OUT)
print("GOING BACKWARD")
GPIO.output(Motor1A,GPIO.LOW)
GPIO.output(Motor1B,GPIO.HIGH)
GPIO.output(Motor1E,GPIO.HIGH)
GPIO.output(Motor2A,GPIO.LOW)
GPIO.output(Motor2B,GPIO.HIGH)
GPIO.output(Motor2E,GPIO.HIGH)
time.sleep(10)
GPIO.cleanup() | [
"[email protected]"
] | |
84f4c3299373152eb7d4df6231395ae80d30a844 | bc11e10521fa313d83011e77a2c31a0b6ed581af | /lib/rubyfox/server/data/lib/Lib/test/zxjdbc/jndi.py | d384bf3292bf6216162f2bcb8bc3d8bd348fa1b8 | [
"MIT"
] | permissive | neopoly/rubyfox-server | f6f191c68dcc30b8c56d22c8209e4a69251f4f27 | 26d67687fc642111ef8d02507f2b567828bd1ebd | refs/heads/master | 2023-07-20T15:04:32.028192 | 2023-07-17T09:16:36 | 2023-07-17T09:33:20 | 6,457,322 | 3 | 4 | MIT | 2020-08-11T06:53:50 | 2012-10-30T13:06:32 | Python | UTF-8 | Python | false | false | 828 | py |
# Jython Database Specification API 2.0
#
# $Id: jndi.py 1962 2001-12-14 04:20:03Z bzimmer $
#
# Copyright (c) 2001 brian zimmer <[email protected]>
"""
This script is used to bind a JNDI reference for testing purposes only.
"""
from java.util import Hashtable
from org.gjt.mm.mysql import MysqlDataSource
from javax.naming import Context, InitialContext, NameAlreadyBoundException
env = Hashtable()
env.put(Context.INITIAL_CONTEXT_FACTORY, "com.sun.jndi.fscontext.RefFSContextFactory")
ds = MysqlDataSource()
ds.setServerName("localhost")
ds.setDatabaseName("ziclix")
ds.setPort(3306)
ctx = InitialContext(env)
try:
try:
ctx.bind("/jdbc/mysqldb", ds)
except NameAlreadyBoundException, e:
ctx.unbind("/jdbc/mysqldb")
ctx.bind("/jdbc/mysqldb", ds)
finally:
ctx.close()
print "bound [%s] at /jdbc/mysqldb" % (ds)
| [
"[email protected]"
] | |
5da039e71cdb6a91ad5608bdb989cf7ea546be34 | aa0988f0c2639e80392d76afc3d853c61dd10af7 | /venv/bin/easy_install-3.7 | 2702ad329d06bafb37a6eb7f38fc0a5c72a9c7b8 | [] | no_license | lgsxiaosen/crawler_comic | 26a52f1925a3a74b09574e32a5a2dd2bcf406762 | 7b94e2cc94254f4e71ce72557d5e4e64d64e0616 | refs/heads/master | 2022-04-27T07:42:57.072178 | 2020-04-30T10:24:29 | 2020-04-30T10:24:29 | 260,149,882 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 471 | 7 | #!/Users/danlan/Documents/CODE/personal-code/one_person_comic/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install-3.7')()
)
| [
"[email protected]"
] | |
70abbc5cecb282dfd49f08753c9179be6f5f255f | 3388e1171fff7e31837aea83eba0baabcc41350c | /storagebox.py | c918226a4c370831c31118254a580bad83aff93d | [] | no_license | mhermans/3dprinting | 5aafdd8299ac0d13a3e3bd508de60b43107ee841 | 4c2a6002c9ad7247e3917092aafe9e7d9de3ba10 | refs/heads/main | 2023-01-24T04:37:29.030608 | 2020-11-25T20:55:38 | 2020-11-25T20:55:38 | 316,052,008 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,642 | py | # Parametric storage boxes for parts
# ##################################
import cadquery as cq
box_with = 50 # x-axis
box_depth = 100 # y-axis
box_height = 50 # z-axis
wall_with = 1
tab_width = box_depth/2 # default tag along half the wall
tab_depth = 13 # 13mm is Dymo tag height
tab_height = 2
tab_slope_depth = 12
# create box
# ----------
box = cq.Workplane("XY").box(box_with, box_depth, box_height)
box = box.faces(">Z").shell(- wall_with) # create shell with top Z-face open
box = box.edges('|Z').fillet(1) # fillet all edges parallel to Z-axis
box = box.edges('<Z').fillet(1) # fillet edges at bottom of Z-axis
box = box.faces("<Z[-2]").fillet(1) # fillet second to last bottom face of Z-axis (inner edges at bottom)
# create tab
# ----------
tab = cq.Workplane("XY").box(tab_depth, tab_width , tab_height)
# tab slope (suffciently sloping to avoid supports)
tab = tab.faces('<Z').rect(tab_depth, tab_width) # start top of slope on bottom face of tab
# draw end rectangle of slope lower and sloping towards the wall of the box
tab = tab.workplane(offset = tab_slope_depth).moveTo(-(tab_depth/2), 0).rect(0.1, tab_width)
tab = tab.loft(combine=True) # loft between two rectangles, creating slope for tab
# move tab & slope against wall
tab_align_top = (box_height / 2) - (tab_height / 2)
tab_align_wall = (box_with / 2) - (tab_depth / 2) - wall_with
tab = tab.translate( (-tab_align_wall, 0, tab_align_top) )
box = box.union(tab)
show_object(box)
fn = ''.join(['storebox_', str(box_with), 'x', str(box_depth), 'x', str(box_height), '.stl'])
cq.exporters.export(box , ''.join(['/home/mhermans/tmp/', fn]) ) | [
"[email protected]"
] | |
cce21217107dcef9365691283c1fec36ae5c01ec | b3349e55b0f2ae3d0a644ffdf1518f802e2b1e57 | /CodigoPython/Pregunta3CreacionImagen.py | ab61133cebaf06bb07663786dea3e4529eadd971 | [] | no_license | psigelo/tarea1_ipd431 | 8379f0d7bad1f325ee0a6d38bf361b61c9b5aa92 | 260a6bd3ceac46c2d096554f9c1941ac548674e5 | refs/heads/master | 2021-01-20T05:08:25.829955 | 2015-05-06T21:53:54 | 2015-05-06T21:53:54 | 34,429,494 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 919 | py | import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
u = 1.5
x = np.linspace(0, 2 * np.pi, 100)
z = x + 100
plt.ylim([0,1])
plt.xlim([0,1])
plt.fill_between(x, z, x/(u-1), color='black', facecolor='green', interpolate=True, alpha=0.3)
plt.fill_between(x, x, x, color='black', facecolor='green', interpolate=True, alpha=0.3)
plt.fill_between(x, x*(u-1), x-x , color='black', facecolor='red', interpolate=True, alpha=0.3)
plt.text( 0.6, 0.1, "Region A1", ha="center", family='sans-serif', size=11)
plt.text( 0.14, 0.65, "Region A2", ha="center", family='sans-serif', size=11)
plt.text( 0.6, 0.5, "y=x", ha="center", family='sans-serif', size=11)
plt.text( 0.55, 0.3, "y=x(u-1)", ha="center", family='sans-serif', size=11)
plt.text( 0.37, 0.6 , "y=x/(u-1)", ha="center", family='sans-serif', size=11)
plt.xlabel('X')
plt.ylabel('Y')
plt.savefig('../latex/img/pregunta3.pdf', bbox_inches=0)
| [
"[email protected]"
] | |
5180882b117ac289e7fccdcb89a8e62bcac4eae7 | c0e0de3bd94de0cc4e198b888b2025d243db61ee | /code2inv/common/constants.py | dfbc588814087d95422f067b5be25a9ddc8139e6 | [] | no_license | alipourm/code2inv | 8f728c55d78811edf6923e7904403f8a92c8f46f | b4d6f7e3e0c32005a07e9ecaf589a032610d18f2 | refs/heads/master | 2020-04-19T20:50:15.564141 | 2018-12-26T15:19:14 | 2018-12-26T15:19:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 538 | py | AST_EDGE_TYPE = 0
CONTROL_EDGE_TYPE = 2
VAR_LINK_TYPE = 4
NUM_EDGE_TYPES = 6 # 3 edge types x 2 directions
# boogie results
AC_CODE = 0
POST_FAIL_CODE = 1
INDUCTIVE_FAIL_CODE = 2
ENTRY_FAIL_CODE = 3
INVALID_CODE = 4
# z3 pre-check
ALWAYS_TRUE_EXPR_CODE = 3
ALWAYS_FALSE_EXPR_CODE = 4
NORMAL_EXPR_CODE = 5
from cmd_args import cmd_args
LIST_PREDICATES = [w for w in cmd_args.list_pred.split(',')]
LIST_OP = [w for w in cmd_args.list_op.split(',')]
MAX_DEPTH = cmd_args.max_depth
MAX_AND = cmd_args.max_and
MAX_OR = cmd_args.max_or | [
"[email protected]"
] | |
fce4db1949627e363226c20b93f6dbc0bea0f98e | 5fcc3fd608a794d260368318c62547f74d4c1416 | /discard_blank_lines.py | c871672ecd765e8096b9971ff5ff20b2f9f2f1a3 | [] | no_license | ds-gurukandhamoorthi/intro-python-exs | 241fb9158096479a100ef378f291ba83e1a7d5d4 | 68c386e51c13d0f31e273016eefc4e29ddecdc04 | refs/heads/master | 2022-02-25T22:28:41.061722 | 2019-10-22T18:36:46 | 2019-10-22T18:36:46 | 103,829,521 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 109 | py | from sys import stdin
for line in stdin:
if line.strip() == '':
continue
print(line, end='')
| [
"[email protected]"
] | |
d7309ed6406863e1f95db2fe44a7e6108c68d5c2 | 8ddeb2e4d536e11dc25ca580a4e8c5bd62830202 | /old_code.py | f8f6db5fa49c4b66e0374e8300800e3a8da05ab3 | [] | no_license | kmb5/fretboard-learner | 6b827e0c6209f522d8e7e106e3929387a5ec1fd4 | 5a0934b8fc36932f368782603c17842ca479c8fe | refs/heads/master | 2023-02-16T15:59:36.598088 | 2021-01-18T17:42:49 | 2021-01-18T17:42:49 | 287,092,865 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,145 | py | import sys
import time
from random import choice
#import pyaudio
import sounddevice as sd
from scipy.io.wavfile import write
import numpy as np
from aubio import notes, pitch
from helpers import pitch2note, NOTES_PER_STRING
CHUNK = 1024
FORMAT = pyaudio.paFloat32
CHANNELS = 1
RATE = 44100
RECORD_SECONDS = 5000
"""
THIS IS THE OLD VERSION, KEEPING IT JUST COZ IM ATTACHED BUT main.py IS THE WORKING ONE
"""
# Pitch
tolerance = 0.8
downsample = 1
win_s = 4096 // downsample # fft size
hop_s = 1024 // downsample # hop size
#notes_o = notes("default", win_s, hop_s, RATE)
pitch_o = pitch("yinfft", win_s, hop_s, RATE)
pitch_o.set_unit("Hz")
pitch_o.set_silence(-40)
pitch_o.set_tolerance(tolerance)
while True:
inp = input('Which string?\n')
if inp not in('EADGBe'):
print('Need to select from EADGBe')
continue
else:
break
prev_note_choice = None
print('\n\nPress cmd+c to quit at any time\n\n')
while True:
note_choice = choice(NOTES_PER_STRING[inp])
if prev_note_choice == note_choice:
continue
else:
prev_note_choice = note_choice
note_choice_without_number = note_choice[:-1]
print(f'Play this note: {note_choice_without_number}')
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
#print("* recording")
frames = []
notes_list = []
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
buffer = stream.read(CHUNK)
frames.append(buffer)
signal = np.frombuffer(buffer, dtype=np.float32)
#new_note = notes_o(signal) # we get note from pitch as it is more accurate
pitch = pitch_o(signal)[0]
#confidence = pitch_o.get_confidence()
if pitch != 0:
note_played = pitch2note(pitch)
notes_list.append(note_played[:-1]) # we append only note and not number (eg. E and not E2)
if len(notes_list) == 10:
# We listen for 10 signals and then select the most frequent note in the list
# This is because when you pluck a note the frequency can deviate as you pluck it strongly
most_likely_note = max(notes_list, key=notes_list.count)
sys.stdout.write(f'\rYou played: {most_likely_note} ')
sys.stdout.flush()
if most_likely_note == note_choice_without_number:
# we hit the desired note
print('Good job!')
break
else:
# if we don't hit the desired note, we get another 10 signals
notes_list = []
stream.stop_stream()
stream.close()
p.terminate()
time.sleep(1) # so the user is not bombarded by constant new things
'''
if (new_note[0] != 0):
note_str = ' '.join(["%.2f" % i for i in new_note])
print(f'{new_note[0]} - {pitch2note(int(new_note[0]))}')
#print(pitch_o(signal)
'''
#print("* done recording")
| [
"[email protected]"
] | |
87e92eb32a80400e4da5670fc49fc25d5d68ab3a | 579640d4603ee4239ea2b6dd592aa3f30decaca8 | /week09/UserManger/user/urls.py | 13865317fdd1e9223b9c4901e33361ddbc6f837b | [] | no_license | yangguangchao/Python001-class01 | d5d8a8ee4f74dbd963f1e7a1944149dfcd9124c8 | 0c43223a362086257b182ec2f7ca7d034809e96c | refs/heads/master | 2022-12-10T16:52:15.919865 | 2020-09-06T06:12:04 | 2020-09-06T06:12:04 | 273,133,647 | 0 | 0 | null | 2020-06-18T03:33:48 | 2020-06-18T03:33:48 | null | UTF-8 | Python | false | false | 206 | py | from django.urls import path, re_path, register_converter
from . import views
urlpatterns = [
path('', views.index),
path('login', views.login_user),
path('index', views.index, name='index')
]
| [
"[email protected]"
] | |
636743aee653fe77c053c25d57c6367c379f5153 | 026ea2872634d86e1ad2916ffc9c460fc252edad | /hack.py | 39b783cbdf4fb71f625277b0569a1a3f0d9613f0 | [] | no_license | Ushasreetwinky/project-2 | 1cf2c0784106267e313ad59bcd76db4129a9943e | 3262220d70ea76ee5024c4cd381ee66e6cfbbeb8 | refs/heads/master | 2020-12-03T08:05:14.622015 | 2016-09-08T13:28:57 | 2016-09-08T13:28:57 | 95,657,104 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 842 | py | from subprocess import Popen, PIPE
import json
import csv
import sys
def main(argv):
print "argu received",argv[0]
if argv[0]=="output.js":
commandLine="node output.js"
pass
else:
commandLine="python output.py"
p = Popen(commandLine, stderr=PIPE, stdout=PIPE, shell=True)
output, errors = p.communicate()
result=[]
if output=="":
if errors!="":
if argv[0]=="output.js":
er=errors.split(errors[41])
result.extend(er)
pass
elif errors.index("\n")>0:
er=errors.split("\n")
result.extend(er)
else:
result.append(errors.split("\n"))
pass
else:
result.append(output)
with open('ouput.csv', 'wb') as csvfile:
writer = csv.writer(csvfile, quoting=csv.QUOTE_MINIMAL)
writer.writerow(["output"])
for res in result:
writer.writerow([res])
if __name__ == "__main__":
main(sys.argv[1:]) | [
"[email protected]"
] | |
6cd72b223191312ffb848bfd4bc37e80f8d22f97 | 4624906bfa6d9a49b2601d5b3453dcf135023684 | /seguimiento/settings.py | 778163a2d3146e63eef8f1ccdf749043957244e0 | [] | no_license | lobomacz/seguimiento | a04f40588fa4c64d0e65496acfb6f61e6fb6d8e2 | 32d8ed2998f6ee1bc903a8e4ebab8ca3a5b7aaf4 | refs/heads/master | 2023-07-04T23:39:50.528353 | 2021-07-30T18:09:39 | 2021-07-30T18:09:39 | 391,153,865 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,912 | py | """
Django settings for seguimiento project.
Generated by 'django-admin startproject' using Django 3.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-ft^8agycw2x^lrqj(efy!u6zh1!rjz)-s1*(e6q52i#1ef2hy+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.gis',
'bootstrap4',
'django_bootstrap_icons',
'sispro',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'seguimiento.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.template.context_processors.static',
'django.template.context_processors.media',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'seguimiento.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': BASE_DIR / 'db.sqlite3',
# }
# }
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'seguimiento',
'USER': 'seguimiento',
'PASSWORD': '~{8"5Gy]br}K9c~8',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Managua'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static/')
# Almacenamiento de imágenes
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media/')
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"[email protected]"
] | |
b891b84c1fffdf89fa571cec71ef4beed64a0c44 | 187a602feb94ead21bcfacbd5b4c92978424aeff | /angr/exp/08_angr_overflow/16_angr_arbitrary_write.py | 1332a3dab03720a4cdbe1cd4c47344e5fbb21403 | [] | no_license | Taardisaa/0x401RevTrain-Tools | c50ef252731547eaba68c97e12997e845ff31727 | e7ea89037b98ae6bdcb2209ae933a270ac8bcb69 | refs/heads/main | 2023-08-22T07:58:10.669461 | 2021-10-11T07:56:39 | 2021-10-11T07:56:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 845 | py | import angr
import claripy
proj = angr.Project('../dist/17_angr_arbitrary_jump')
payload = claripy.BVS('payload', 64 * 8)
state = proj.factory.entry_state(stdin=payload)
simgr = proj.factory.simgr(
state,
save_unconstrained=True,
stashes={
'active':[state],
'unconstrained': [],
'found': [],
})
while (len(simgr.active) or len(simgr.unconstrained)) and not len(simgr.found):
for unconstrained in simgr.unconstrained:
eip = unconstrained.regs.eip
print_good_addr = 0x42585249
if unconstrained.satisfiable(extra_constraints=[eip == print_good_addr]):
unconstrained.add_constraints(eip == print_good_addr)
simgr.move('unconstrained', 'found')
break
simgr.drop(stash="unconstrained")
simgr.step()
print(simgr.found[0].posix.dumps(0)) | [
"[email protected]"
] | |
3ee9a2aef250a7b26732547c29bf32266b68910d | 2885d654eb99ca71bfeccd8d1f706c07064d8ad6 | /polish_notation.py | 4ca1be683294174e18e29a945d72f7b1707dc000 | [] | no_license | confar/python_challenges | e521775a0c4b6da4e6ce045b3e2cdb3b3ab1fa56 | a03eade2df9d6b1ab63fe5ae269556b38458e0f4 | refs/heads/master | 2023-07-10T04:20:57.146413 | 2021-08-15T20:59:06 | 2021-08-15T20:59:06 | 262,156,632 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 698 | py | import io
import operator
mapping = {'+': operator.add, '*': operator.mul, '-': operator.sub}
import sys
def main(str_buffer):
stack = []
for token in next(str_buffer).split(' '):
if token.isdigit():
stack.append(int(token))
elif token in mapping:
result = stack.pop()
while stack:
operation = mapping[token]
result = operation(result, stack.pop())
stack.append(result)
print(stack[0])
return stack[0]
tst1 = io.StringIO('8 11 +')
tst2 = io.StringIO('2 3 + 4 *')
# tst3 = io.StringIO('6 7 8 + 3 * +')
assert main(tst1) == 19
assert main(tst2) == 20
# assert main(tst3) == 20
| [
"[email protected]"
] | |
f16cdfe295d547f8dc1f3d68e2f1cf83acf58463 | d82995f33a603b61e06deecd644f5a293823b746 | /forward_inverse_kinematics/quiz_rotation.py | c6f25841bacee9da4d1f21c03c89e4df9b1bc1c0 | [] | no_license | RyanCargan/udacity-robond-term1 | 79d065aab42e4e308a93e36fa869bb48477aebef | a92a6e665e72930f9a33eb3a82f64da206b64fcf | refs/heads/master | 2021-09-07T08:39:07.181342 | 2018-02-20T12:27:29 | 2018-02-20T12:27:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,607 | py | # Two coding quizzes follow. The first requires you to perform an
# intrinsic rotation sequence about the Y and then Z axes.
# The second quiz requires you to perform an extrinsic rotation about
# the Z and then Y axes.
from sympy import symbols, cos, sin, pi, sqrt
from sympy.matrices import Matrix
# Create symbols for joint variables
q1, q2 = symbols('q1:3')
dtr = pi / 180.0 # degree to radians
rtd = 180.0 / pi # radians to degrees
q1 = 45 * dtr
q2 = 60 * dtr
# Create a symbolic matrix representing an intrinsic sequence of rotations
# about the Y and then Z axes.
# Let the rotation about the Y axis be described
# by q1 and the rotation about Z by q2.
####### TO DO ########
# Replace R_y and R_z with the appropriate (symbolic) elementary rotation matrices
# and then compute YZ_intrinsic.
R_y = Matrix([[cos(q1), 0, sin(q1)], \
[0, 1, 0], \
[-sin(q1), 0, cos(q1)]])
R_z = Matrix([[cos(q2), -sin(q2), 0], \
[sin(q2), cos(q2), 0], \
[0, 0, 1]])
YZ_intrinsic_sym = R_y.evalf(subs={q1:q1 }) * R_z.evalf(subs={q2:q2})
####### TO DO ########
# Numerically evaluate YZ_intrinsic assuming:
# q1 = 45 degrees and q2 = 60 degrees.
# NOTE: Trigonometric functions in Python assume the input is in radians!
# Intrinsic -> moving axis -> post multiply
# YZ_intrinsic_num = 1 #YZ_intrinsic_sym.evalf(subs={})
YZ_intrinsic_num = YZ_intrinsic_sym.evalf(subs={})
# Alternatively:
# YZ_intrinsic_sym = R_y * R_z
# YZ_intrinsic_num = YZ_intrinsic_sym.evalf(subs={q1: pi/4, q2: pi/3})
print("YZ_intrinsic_num: ")
print(YZ_intrinsic_num) | [
"[email protected]"
] | |
795dff346d3aeb2e1858d868150cdee577936cc3 | dd8ece85ddede58b33e745458323dc2c0c899aba | /Lesson6/ask5.py | 5a86e88ab946a4c25b56fa8b2ed315183e32dbb5 | [] | no_license | michnik3/Python | 48f26c497577eaebade84b12a26fa3585bac32d6 | aade74d8192ddfea5513bbbf5a3edc716d6c4356 | refs/heads/main | 2023-08-25T00:18:48.173146 | 2021-10-07T09:11:13 | 2021-10-07T09:11:13 | 362,200,151 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 98 | py | my_list = [number for number in range(100) if number % 2 ==0 and number % 3 ==0]
print(my_list)
| [
"[email protected]"
] | |
5a3a224eb388887427ba5aaea6b9f9fa0d43402b | 237a72855fad3032c7bfe2cd944ae05d018b17f4 | /model.py | 83fe1df555f51fb0340c48adfe802b5984753e4b | [
"MIT"
] | permissive | wdecay/ShapeClassification | 54a8897ab7cbfb109d96fec346ab92878522341e | 0592a837f272c709322a1d7e74948268e8c82cce | refs/heads/master | 2022-11-19T23:20:08.860894 | 2020-07-23T03:25:52 | 2020-07-23T03:25:52 | 281,619,306 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,927 | py | import tensorflow as tf
import layers
def compose_layers(num_classes, layer_dims, shape_input):
embed_layer = layers.SelfInteractionSimple(layer_dims[0])
input_layer = layers.Input()
model_layers = []
for dim in layer_dims[1:]:
model_layers.append(layers.Convolution())
model_layers.append(layers.Concatenation())
model_layers.append(layers.SelfInteraction(dim))
model_layers.append(layers.Nonlinearity())
output_layer = layers.Output(num_classes)
x, rbf, rij = input_layer(shape_input)
input_tensor_list = {0: [embed_layer(x)]}
for layer in model_layers:
if isinstance(layer, layers.Convolution):
input_tensor_list = layer([input_tensor_list, rbf, rij])
else:
input_tensor_list = layer(input_tensor_list)
output = output_layer(input_tensor_list)
return output
def build_model(num_classes,
layer_dims = [1, 4, 4, 4],
shape_input = tf.keras.Input(shape=(4, 3), dtype=tf.float32)):
output = compose_layers(num_classes, layer_dims, shape_input)
model = tf.keras.Model(inputs = shape_input, outputs = output)
optimizer = tf.keras.optimizers.Adam(learning_rate=1.e-3)
@tf.function
def loss_fn(truth, pred):
return tf.nn.sparse_softmax_cross_entropy_with_logits(
labels = tf.cast(tf.squeeze(truth, axis=-1), tf.int32),
logits = pred)
model.compile(optimizer=optimizer, loss = loss_fn, metrics=['sparse_categorical_accuracy'])
return model
# test with eager execution
if __name__ == "__main__":
from dataset import get_dataset
dataset, num_classes = get_dataset()
test_data = dataset.map(lambda x, y: x).batch(1).take(1).as_numpy_iterator().next()
print("Input:")
print(test_data)
result = compose_layers(num_classes, [1, 4, 4, 4], test_data)
print("Output:")
print(result)
| [
"[email protected]"
] | |
0922a6895c74ad69b3aa2cc387e3b2483dd419a7 | c9a809c5ef2a6b5e7e50da548c182510d203f430 | /tests/unit/states/test_boto_sqs.py | 93f56d4305805729802429fd15f700b3a67e9619 | [
"Apache-2.0"
] | permissive | andyyumiao/saltx | 676a44c075ce06d5ac62fc13de6dcd750b3d0d74 | a05c22a60706b5c4389adbd77581b5cf985763b5 | refs/heads/master | 2022-02-24T00:51:42.420453 | 2022-02-09T06:46:40 | 2022-02-09T06:46:40 | 231,860,568 | 1 | 5 | NOASSERTION | 2022-02-09T06:46:40 | 2020-01-05T03:10:15 | Python | UTF-8 | Python | false | false | 3,159 | py | # -*- coding: utf-8 -*-
'''
:codeauthor: Jayesh Kariya <[email protected]>
'''
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import skipIf, TestCase
from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch
# Import Salt Libs
import salt.states.boto_sqs as boto_sqs
@skipIf(NO_MOCK, NO_MOCK_REASON)
class BotoSqsTestCase(TestCase, LoaderModuleMockMixin):
'''
Test cases for salt.states.boto_sqs
'''
def setup_loader_modules(self):
return {boto_sqs: {}}
# 'present' function tests: 1
def test_present(self):
'''
Test to ensure the SQS queue exists.
'''
name = 'mysqs'
attributes = {'ReceiveMessageWaitTimeSeconds': 20}
ret = {'name': name,
'result': False,
'changes': {},
'comment': ''}
mock = MagicMock(side_effect=[False, False, True, True])
mock_bool = MagicMock(return_value=False)
mock_attr = MagicMock(return_value={})
with patch.dict(boto_sqs.__salt__,
{'boto_sqs.exists': mock,
'boto_sqs.create': mock_bool,
'boto_sqs.get_attributes': mock_attr}):
with patch.dict(boto_sqs.__opts__, {'test': False}):
comt = ('Failed to create {0} AWS queue'.format(name))
ret.update({'comment': comt})
self.assertDictEqual(boto_sqs.present(name), ret)
with patch.dict(boto_sqs.__opts__, {'test': True}):
comt = ('AWS SQS queue {0} is set to be created.'.format(name))
ret.update({'comment': comt, 'result': None})
self.assertDictEqual(boto_sqs.present(name), ret)
comt = ('Attribute(s) ReceiveMessageWaitTimeSeconds'
' to be set on mysqs.')
ret.update({'comment': comt})
self.assertDictEqual(boto_sqs.present(name, attributes), ret)
comt = ('mysqs present. Attributes set.')
ret.update({'comment': comt, 'result': True})
self.assertDictEqual(boto_sqs.present(name), ret)
# 'absent' function tests: 1
def test_absent(self):
'''
Test to ensure the named sqs queue is deleted.
'''
name = 'test.example.com.'
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
mock = MagicMock(side_effect=[False, True])
with patch.dict(boto_sqs.__salt__,
{'boto_sqs.exists': mock}):
comt = ('{0} does not exist in None.'.format(name))
ret.update({'comment': comt})
self.assertDictEqual(boto_sqs.absent(name), ret)
with patch.dict(boto_sqs.__opts__, {'test': True}):
comt = ('AWS SQS queue {0} is set to be removed.'.format(name))
ret.update({'comment': comt, 'result': None})
self.assertDictEqual(boto_sqs.absent(name), ret)
| [
"[email protected]"
] | |
acb7bd4445608d425729d2bd9a6757c8d6b0dccb | 8535bbc7781c4691880c935bd7025646f0dbb7c3 | /area of farm 5 decimals.py | 6f53c191bc9e407b2a7b5ac4f003a0ca36566bce | [] | no_license | Mahadev0317/Codekata | 3b2149f3116ebe4b48b2059b873544c27b23ff39 | c35fa0ed0c4870faea69152638f461e743a9ff69 | refs/heads/master | 2020-04-15T04:59:17.062947 | 2019-05-29T04:46:35 | 2019-05-29T04:46:35 | 164,404,727 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 64 | py | l,b=map(float,input().split())
p=l*b
print("{0:.5f}".format(p))
| [
"[email protected]"
] | |
4db3db6a15d4a0183be6148e2d90c7766e1b73c0 | 7c2c9f0c7ab918004d05e340d1f41e1e914650a5 | /ex5.py | 95b4e991f5460097c5fa9049381ac1b280994927 | [] | no_license | blip-lorist/python-the-hard-way | f6ad2aa9cfcde25efdb0829f54780edda37f1aed | 06a5256b915f364ebd68c518a277a42a92383e07 | refs/heads/master | 2022-10-20T01:17:28.676035 | 2016-01-01T21:36:46 | 2016-01-01T21:36:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 704 | py | name = 'Zed A. Shaw'
age = 35 # not a lie
height_inches = 74 # inches
height_cm = height_inches * 2.54
weight_lbs = 180 #lbs
weight_kg = weight_lbs / 2.2
eyes = 'Blue'
teeth = 'White'
hair = 'Brown'
print "Let's talk about %s." % name
print "He's %d inches tall or %d cm tall." % (height_inches, height_cm)
print "He's %d pounds heavy or %d kg heavy." % (weight_lbs, weight_kg)
print "Actually that's not too heavy."
print "He's got %s eyes and %s hair." % (eyes, hair)
print "His teeth are usually %s depending on the coffee." % teeth
# this line is tricky, try to get it exactly right
print "If I add %d, %d, and %d I get %d" % (
age, height_inches, weight_lbs, age + height_inches + weight_lbs)
| [
"[email protected]"
] | |
608929efefe568898dd796fd04a624caf4af1899 | c2a15c1a4590982250d88c8125d4e6217c43fa17 | /textman.py | 0f003148fc51b826f4facd88708d82dfa01240d7 | [] | no_license | simranbat/pitchfork | 63bd9213db9d0b90acb48f0382d32097abf73a14 | a84961e0d7447cbaa7b8edf16eea3d3be275653e | refs/heads/master | 2020-05-17T22:29:05.165099 | 2019-04-30T13:52:19 | 2019-04-30T13:52:19 | 184,002,591 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,901 | py | import pandas as pd
import nltk
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
import numpy as np
import sqlite3
def import_source(src_file, start_line=None, end_line=None, col_name='line', id_name='line_id', strip=True):
"""Imports a raw text file into a dataframe of lines. Removes extra lines."""
print('src_file', src_file)
df = pd.DataFrame({col_name:open(src_file,'r').readlines()})
if not start_line:
start_line = 0
if not end_line:
end_line = len(df.index)
df = df.loc[start_line:end_line]
df.index.name = id_name
if strip:
df[col_name] = df[col_name].str.strip()
return df
def group_by_milestone(df, div_name, div_pat, src_idx, src_col, tmp_col='div_idx', id_suffix='_id'):
"""Splits text lines by a milestone and groups them into units, such as a chapters from chapter headings."""
df[div_name] = df[src_col].str.match(div_pat)
df2 = df.loc[df[div_name], src_col].copy().reset_index(drop=True).to_frame()\
.rename(columns={src_col:div_name})
df2.index.name = div_name + id_suffix
df[tmp_col] = None
df[tmp_col] = df[df[div_name]].apply(lambda x: x.index)
df[tmp_col] = df[tmp_col].ffill()
df[tmp_col] = df[tmp_col].astype('int')
df2[div_name] = df.groupby(tmp_col)[src_col].apply(lambda x: '\n'.join(x[:]))\
.to_frame().reset_index(drop=True)
df2.index.name = div_name + id_suffix
return df2
def split_by_delimitter(df, div_name=None, div_pat=None, src_col=None, join_pat='\n', id_suffix='_id'):
"""Splits lines in smaller units of text based on a delimitter."""
df2 = df[src_col].str.split(div_pat, expand=True).stack().to_frame()\
.rename(columns={0:div_name}).copy()
df2.index.names = df.index.names + [div_name + id_suffix]
df2[div_name] = df2[div_name].str.replace(join_pat, ' ')
df2 = df2[~df2[div_name].str.match(r'^\s*$')]
return df2
def gather_tokens(df, level=0, col='token', glue=' ', collapse=False):
"""Recombines units into larger units of text."""
idx = df.index.names[:level+1]
df2 = df.groupby(idx)[col].apply(lambda x: glue.join(x))
return df2
def normalize_tokens(df, token_col='token'):
"""Lowercases and strips tokens."""
df['term_str'] = df[token_col].str.lower().str.strip()
return df
def add_pos_to_tokens(tokens, idx=['chap_id','para_id','sent_id'], token_col='token'):
"""Adds POS tages to tokens."""
df = tokens.groupby(idx).token.apply(lambda x: nltk.pos_tag(x.tolist()))\
.apply(pd.Series).stack()\
.to_frame().reset_index()\
.rename(columns={'level_{}'.format(len(idx)):'token_id', 0:'pos'})\
.set_index(idx + ['token_id'])
tokens['pos'] = df.pos.apply(lambda x: x[1])
return tokens
def create_vocab(df, col='term_str'):
"""Creates a vocabulary table from a token table."""
terms = df[col].value_counts()\
.to_frame().reset_index()\
.rename(columns={'index':'term_str',col:'n'})\
.sort_values('term_str').reset_index(drop=True)
terms.index.name = 'term_id'
terms['f'] = terms.n.div(terms.n.sum())
return terms
def add_stems_to_vocab(vocab):
"""Adds stems to vocabulary table."""
ps = PorterStemmer()
vocab['stem'] = vocab['term_str'].apply(lambda x: ps.stem(x))
return vocab
def link_tokens_to_vocab(tokens, vocab, drop=False):
"""Adds term_id column to tokens table."""
tokens['term_id'] = tokens['term_str'].map(vocab.reset_index()\
.set_index('term_str').term_id)
if drop:
del(tokens['term_str'])
return tokens
# Todo: Codify these assumptions in config
def identify_stopwords(vocab):
"""Identifies stopwords in the vocabulary table."""
sw = set(stopwords.words('english'))
vocab['sw'] = vocab.apply(lambda x:
x.term_str in sw
or len(x.term_str) <= 2
or not x.term_str.isalpha()
or x.n < 3,
axis=1)
vocab['go'] = ~vocab.sw
return vocab
def remove_stopwords(df, vocab, term_id_col='term_id'):
"""Removes stopwords from the tokens table."""
df = df[df[term_id_col].isin(vocab[vocab.go].index.values)].copy()
return df
def create_doc_table(tokens, index=['chap_id', 'para_id']):
"""Create the documents table from tokens tables."""
doc = tokens.groupby(index).term_id.count()\
.to_frame().rename(columns={'term_id':'n'})
return doc
def create_bow(tokens, idx, index_name='doc_id'):
"""Create a bag-of-words table from tokens table."""
col = idx[-1]
bow = tokens.groupby(idx)[col].count()\
.to_frame().rename(columns={col:'n'})
if index_name:
bow.index.name = index_name
return bow
def create_dtm(bow, fill_val=0):
"""Create a document-term matrix from a bag-of-words."""
dtm = bow.unstack().fillna(fill_val)
dtm.columns = dtm.columns.droplevel(0)
return dtm
def compute_term_freq(dtm, vocab):
dtm_tf = dtm.apply(lambda x: x / x.sum(), 1)
vocab['tf_sum'] = dtm_tf.sum()
return dtm, vocab
def compute_inv_doc_freq(dtm, vocab):
N = len(dtm.index)
dtm_idf = dtm.apply(lambda x: N / x[x > 0].count())
vocab['idf'] = dtm_idf
return dtm_idf, vocab
def compute_tfidf(dtm, vocab, doc, bow, sw=False):
N = len(dtm.index)
dtm_tfidf = dtm.apply(lambda row: row / row.sum(), 1)\
.apply(lambda col: col * np.log10(N/col[col > 0].count()))
vocab['tfidf_sum'] = dtm_tfidf.sum()
doc['tfidf_sum'] = dtm_tfidf.sum(1)
bow['tfidf'] = dtm_tfidf.stack().to_frame().rename(columns={0:'tfidf'})
return dtm_tfidf, vocab, doc, bow
def get_term_id(vocab, term):
term_id = vocab[vocab.term==term].index[0]
return term_id
def get_term(vocab, term_id):
term = vocab.loc[term_id].term
return term
def create_tokens_and_vocab(paras, idx=['chap_id','para_id','sent_id'],
src_col='para', pos=False, drop=False):
"""Composite function to genereate tokens and vocabulary tables from units (e.g. paragraphs)."""
cfg = dict(
sent = dict(
div_name = 'sent',
div_pat = r'(?:[":;.?!\(\)]|--)',
src_col = src_col,
join_pat = ' '
),
token = dict(
div_name = 'token',
div_pat = r'\W+',
src_col = 'sent',
join_pat = ' '
)
)
sents = split_by_delimitter(paras, **cfg['sent'])
tokens = split_by_delimitter(sents, **cfg['token'])
tokens = normalize_tokens(tokens)
if pos:
tokens = add_pos_to_tokens(tokens, idx=idx)
vocab = create_vocab(tokens)
vocab = add_stems_to_vocab(vocab)
vocab = identify_stopwords(vocab)
tokens = link_tokens_to_vocab(tokens, vocab, drop=drop)
tokens = remove_stopwords(tokens, vocab)
return tokens, vocab
def add_doc_len_features(df, str_col, prefix='doc_'):
len = prefix + 'len'
df[len] = df[str_col].str.len()
df[prefix + 'z'] = (df[len] - df[len].mean()).div(df[len].str())
df[prefix + 's'] = (df[len] / df[len].max()).multiply(100).round().astype('int')
df[prefix + 'p'] = df[len] / df[len].sum()
df[prefix + 'h'] = df[prefix+'p'].multiply(np.log2(df[prefix+'p'])) * -1
return df
def put_to_db(db, df, table_name, index=True, if_exists='replace'):
r = df.to_sql(table_name, db, index=index, if_exists=if_exists)
return r
def get_from_db(db, table_name):
df = pd.read_sql("SELECT * FROM {}".format(table_name), db)
return df
def get_table(table, db_file, fields='*', index_col=None):
if type(fields) is list:
fields = ','.join(fields)
with sqlite3.connect(db_file) as db:
return pd.read_sql("select {} from {}".format(fields, table), db, index_col=index_col)
def get_sql(sql, db_file, params=None):
with sqlite3.connect(db_file) as db:
return pd.read_sql(sql, db, params)
| [
"[email protected]"
] | |
42c5a2d68c0705069fc43d9678d3cec33f9a871d | 5248e096abb7e7b8dcaf522c54f857f62dc8639c | /src/autoregkd/interface/cli.py | d0d958ce5a0316da235931073c497f3968448aab | [] | no_license | haroldrubio/autoregkd | b1858154595a3d2e8d9ab3fbe7621b88b21a5837 | cac6e3d09aa137e5613879d90fed1c878b7460e2 | refs/heads/master | 2023-08-14T15:04:02.906017 | 2021-03-24T14:21:25 | 2021-03-24T14:21:25 | 342,059,131 | 1 | 0 | null | 2021-04-02T18:12:39 | 2021-02-24T23:00:08 | Python | UTF-8 | Python | false | false | 849 | py | import click
@click.command(
context_settings=dict(show_default=True),
)
@click.option(
"--log_batch_size",
type=int,
default=2,
help="batch size for training will be 2**LOG_BATCH_SIZE",
) # Using batch sizes which are 2**n for some integer n may help optimize GPU efficiency
@click.option(
"--log_eval_batch_size",
type=int,
default=2,
help="batch size for eval will be 2**LOG_EVAL_BATCH_SIZE",
) # Using batch sizes which are 2**n for some integer n may help optimize GPU efficiency
@click.option(
"--epochs",
type=int,
default=1,
help="number of training epochs",
)
@click.option(
"--learning_rate",
type=float,
default=2e-5,
help="learning rate",
)
def experiment(**config):
"""Train a BART model"""
from ..training.train import training
training(config)
| [
"[email protected]"
] | |
f9fb8c89a3998efb7df0b951cb1a4a44075afd5b | da099343970eaa367991f08b40c83a580fd09e57 | /download-deveres/para-aulas-do-curso-em-video/aula10a.py | 1400ca4701f8dc3c563aff41b3c010bfab29175f | [
"MIT"
] | permissive | Hugo-Oliveira-RD11/aprendendo-PYTHON-iniciante | dbae493b2f2b49d4a7527986836b6978c6b56829 | b5e41015e2cb95946262678e82197e5f47d56271 | refs/heads/master | 2022-12-16T02:53:48.210283 | 2020-09-11T19:43:57 | 2020-09-11T19:43:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 162 | py | n = str(input('qual e o seu nome?'))
if n == 'hugo':
print('que nome bonito em...')
else:
print('que nome ruim!!')
print('tenha um bom dia {}'.format(n))
| [
"[email protected]"
] | |
0f1db565b35c7cbfb8f04dab6e0e5c69df2458b9 | 34dc1805144d4971286b0b0989ba7417880e038c | /YoutubeNetwork/record_dataformat_version/train/model.py | 4a6ccb67212d98d9733be1b8406fa26cda930ce8 | [
"MIT"
] | permissive | ishigami33/deep_learning | b5856b6c5031dc2f5f0b22811a2b873dc39e13c0 | e64ad4e10fb901b4e7efe777f5bd2efd0d6e0a39 | refs/heads/master | 2020-11-25T05:24:44.227455 | 2019-12-04T05:14:48 | 2019-12-04T05:14:48 | 228,519,783 | 1 | 0 | MIT | 2019-12-17T02:52:07 | 2019-12-17T02:52:07 | null | UTF-8 | Python | false | false | 6,109 | py | import tensorflow as tf
class Model(object):
def __init__(self, args):
# self.sess=sess
self.is_training = args.is_training
# self.input_size=args.input_size
self.embedding_size = args.embedding_size
# self.basic_size=args.basic_size
self.brand_list = args.brand_list
self.msort_list = args.msort_list
self.item_count = args.item_count
self.brand_count = args.brand_count
self.msort_count = args.msort_count
self.checkpoint_dir = args.checkpoint_dir
self.lr = args.lr
def build_model(self, uij):
self.u = uij[3]
self.sub_sample = uij[1]
self.y = uij[2]
self.hist_i = uij[0]
self.sl = uij[4]
self.last = uij[5]
# self.recall_skn_idx
# emb variable
item_emb_w = tf.get_variable("item_emb_w", [self.item_count, self.embedding_size])
item_b = tf.get_variable("item_b", [self.item_count], initializer=tf.constant_initializer(0.0))
brand_emb_w = tf.get_variable("brand_emb_w", [self.brand_count, self.embedding_size])
msort_emb_w = tf.get_variable("msort_emb_w", [self.msort_count, self.embedding_size])
brand_list = tf.convert_to_tensor(self.brand_list, dtype=tf.int32)
msort_list = tf.convert_to_tensor(self.msort_list, dtype=tf.int32)
# historty seq
hist_b = tf.gather(brand_list, self.hist_i)
hist_m = tf.gather(msort_list, self.hist_i)
h_emb = tf.concat([tf.nn.embedding_lookup(item_emb_w, self.hist_i),
tf.nn.embedding_lookup(brand_emb_w, hist_b),
tf.nn.embedding_lookup(msort_emb_w, hist_m)], axis=2)
# historty mask
mask = tf.sequence_mask(self.sl, tf.shape(h_emb)[1], dtype=tf.float32) # [B,T]
mask = tf.expand_dims(mask, -1) # [B,T,1]
mask = tf.tile(mask, [1, 1, tf.shape(h_emb)[2]]) # [B,T,3*e]
h_emb *= mask # [B,T,3*e]
hist = tf.reduce_sum(h_emb, 1) # [B,3*e]
hist = tf.div(hist,
tf.cast(tf.tile(tf.expand_dims(self.sl, 1), [1, 3 * self.embedding_size]), tf.float32)) # [B,3*e]
# last
last_b = tf.gather(brand_list, self.last)
last_m = tf.gather(msort_list, self.last)
l_emb = tf.concat([tf.nn.embedding_lookup(item_emb_w, self.last),
tf.nn.embedding_lookup(brand_emb_w, last_b),
tf.nn.embedding_lookup(msort_emb_w, last_m)], axis=1)
# net input
self.input = tf.concat([hist, l_emb], axis=-1)
# print('',)
# dd net
bn = tf.layers.batch_normalization(inputs=self.input, name='b1')
layer_1 = tf.layers.dense(bn, 1024, activation=tf.nn.relu, name='f1')
self.layer1 = self.input
layer_2 = tf.layers.dense(layer_1, 512, activation=tf.nn.relu, name='f2')
layer_3 = tf.layers.dense(layer_2, 3 * self.embedding_size, activation=tf.nn.relu, name='f3')
self.layer1 = layer_3
# softmax
if self.is_training:
sa_b = tf.gather(brand_list, self.sub_sample)
sa_m = tf.gather(msort_list, self.sub_sample)
sample_w = tf.concat([tf.nn.embedding_lookup(item_emb_w, self.sub_sample),
tf.nn.embedding_lookup(brand_emb_w, sa_b),
tf.nn.embedding_lookup(msort_emb_w, sa_m)], axis=2) # [B,sample,3*e]
# sample_w=tf.nn.embedding_lookup(item_emb_w,self.sub_sample)
sample_b = tf.nn.embedding_lookup(item_b, self.sub_sample) # [B,sample]
user_v = tf.expand_dims(layer_3, 1) # [B,1,3*e]
sample_w = tf.transpose(sample_w, perm=[0, 2, 1]) # [B,3*e,sample]
self.logits = tf.squeeze(tf.matmul(user_v, sample_w), axis=1) + sample_b
# Step variable
self.global_step = tf.Variable(0, trainable=False, name='global_step')
# self.global_epoch_step = tf.Variable(0, trainable=False, name='global_epoch_step')
# self.global_epoch_step_op = tf.assign(self.global_epoch_step, self.global_epoch_step + 1)
'''
self.loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(
logits=self.logits,
labels=self.y)
)
'''
self.yhat = tf.nn.softmax(self.logits)
self.loss = tf.reduce_mean(-tf.cast(self.y, dtype=tf.float32) * tf.log(self.yhat + 1e-24))
trainable_params = tf.trainable_variables()
self.opt = tf.train.GradientDescentOptimizer(learning_rate=self.lr)
gradients = tf.gradients(self.loss, trainable_params)
clip_gradients, _ = tf.clip_by_global_norm(gradients, 5)
self.train_op = self.opt.apply_gradients(
zip(clip_gradients, trainable_params), global_step=self.global_step)
else:
all_emb = tf.concat([item_emb_w,
tf.nn.embedding_lookup(brand_emb_w, brand_list),
tf.nn.embedding_lookup(msort_emb_w, msort_list)], axis=1)
self.logits = tf.matmul(layer_3, all_emb, transpose_b=True) + item_b
self.output = tf.nn.softmax(self.logits)
def save(self, sess, path):
# saver = tf.train.Saver(tf.global_variables(), max_to_keep=10)
saver = tf.train.Saver()
saver.save(sess, save_path=path)
# saver.save(sess, '{}/Ytb-model'.format(self.checkpoint_dir), global_step=self.global_step)
def restore(self, sess, path):
saver = tf.train.Saver()
saver.restore(sess, save_path=path)
'''
ckpt = tf.train.get_checkpoint_state(self.checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
print("Successfully loaded:", ckpt.model_checkpoint_path)
saver.restore(self.sess, ckpt.model_checkpoint_path)
'''
| [
"[email protected]"
] | |
368ce9e63801f449031d431acdc5d7e45ecad9c8 | c9047f676a29d58943865d548ae70e43014a5881 | /tools/99_gdb/gdb_timers.py | 17e393eb6360dd89da1ad58f6a1e191c31090749 | [
"BSD-3-Clause"
] | permissive | danome/prod | 578045cd7a552dced0a5733fcf8f303fe55e15b7 | 8c0bdc2c8e0939bf4d04e46e1171a55e43500346 | refs/heads/tp-master | 2021-01-17T21:10:35.987764 | 2018-10-16T00:29:08 | 2018-10-16T00:29:08 | 44,350,128 | 0 | 0 | null | 2015-10-15T22:55:07 | 2015-10-15T22:55:06 | null | UTF-8 | Python | false | false | 4,385 | py | # Setup required to use this module
#
# copy gdb_timers.py to <app>/.gdb_timers.py
# and add "source .gdb/.gdb_timers.py" to the <app>/.gdbinit file.
#
from __future__ import print_function
from binascii import hexlify
timer_dict = {
}
def simple_timer_name(timer_id):
t_name = 't' + str(timer_id)
return t_name
def timer_name(timer_id):
t_name = timer_dict.get(timer_id, None)
if t_name == None:
t_name = 't' + str(timer_id)
else:
t_name = '{}/{}'.format(t_name, timer_id)
return t_name
class TimerTrace(gdb.Command):
"""Display TinyOS Virtual Timer Trace buffers."""
def __init__ (self):
super(TimerTrace, self).__init__("timerTrace", gdb.COMMAND_USER)
def invoke (self, args, from_tty):
start_format = '{:4d} start {:8x} {:s}'
stop_format = '{:4d} stop {:8x} {:s}'
usecs_format = '{:4d} {:8x} {:s}'
fired_format = '{:4d} fired {:8x} {:s}'
end_format = '{:4d} end {:8x} {:s}'
delta_format = '{:4d} delta {:8x} {:s}'
oops_format = '{:4d} oops {:8x} {:s}'
START_LT = int(gdb.parse_and_eval('VirtualizeTimerImplP__0__TVT_START_LT'))
START_USECS = int(gdb.parse_and_eval('VirtualizeTimerImplP__0__TVT_START_USECS'))
STOPPED = int(gdb.parse_and_eval('VirtualizeTimerImplP__0__TVT_STOPPED'))
FIRED = int(gdb.parse_and_eval('VirtualizeTimerImplP__0__TVT_FIRED'))
END = int(gdb.parse_and_eval('VirtualizeTimerImplP__0__TVT_END'))
DELTA = int(gdb.parse_and_eval('VirtualizeTimerImplP__0__TVT_DELTA'))
nxt = int(gdb.parse_and_eval('VirtualizeTimerImplP__0__nxt_vt'))
xmax = int(gdb.parse_and_eval('sizeof(VirtualizeTimerImplP__0__vtimer_trace)/'
'sizeof(VirtualizeTimerImplP__0__vtimer_trace[0])'))
last = nxt - 1
if last < 0:
last = xmax - 1
cur = nxt
if cur >= xmax: cur = 0
while True:
vtp = gdb.parse_and_eval('VirtualizeTimerImplP__0__vtimer_trace[0d{}]'.format(cur))
timer_num = int(vtp['num'])
ttype = vtp['ttype']
ttype_name = ttype.__str__().replace('VirtualizeTimerImplP__0__TVT_','')
ttype_num = int(ttype)
val = int(vtp['val'])
if ttype == START_LT:
print(start_format.format(timer_num, val, timer_name(timer_num)))
elif ttype == START_USECS:
print(usecs_format.format(timer_num, val, timer_name(timer_num)))
elif ttype == STOPPED:
print(stop_format.format(timer_num, val, timer_name(timer_num)))
elif ttype == FIRED:
print(fired_format.format(timer_num, val, timer_name(timer_num)))
elif ttype == END:
print(end_format.format(timer_num, val, timer_name(timer_num)))
elif ttype == DELTA:
print(delta_format.format(timer_num, val, timer_name(timer_num)))
else:
print(oops_format.format(timer_num, val, ttype_name,
timer_name(timer_num)))
if cur == last:
break
cur += 1
if cur >= xmax:
cur = 0
class DispTimers(gdb.Command):
"""Display TinyOS Virtual Timers."""
def __init__ (self):
super(DispTimers, self).__init__("dispTimers", gdb.COMMAND_USER)
def invoke (self, args, from_tty):
num = int(gdb.parse_and_eval('VirtualizeTimerImplP__0__NUM_TIMERS'))
print(' t state t0 dt max')
for cur in range(num):
tp = gdb.parse_and_eval('VirtualizeTimerImplP__0__m_timers[0d{}]'.format(cur))
t0 = int(tp['t0'])
dt = int(tp['dt'])
fired_max = int(tp['fired_max_us'])
oneshot = int(tp['isoneshot'])
running = int(tp['isrunning'])
print(' {:2d} {:s}{:s} {:8x} {:8x} {:8x} {:s}'.format(
cur,
'O' if oneshot else 'o',
'R' if running else 'r',
t0, dt, fired_max, timer_name(cur)))
TimerTrace()
DispTimers()
| [
"[email protected]"
] | |
9540c5649ece4f24a73054fe3f97086de5daa416 | 5af39f520889df20411a64ac8da65e8f9f173aa7 | /ex3k.py | 823f2a161cab4a06d12d656580e8746ddafcf71a | [] | no_license | Simranbassi/python_grapees | 25661565f475d0b50acda9925441703ed06b574a | 8cadae5a17a74e98e37137538cc963e268a55513 | refs/heads/master | 2020-06-03T14:33:52.975614 | 2019-06-18T10:53:48 | 2019-06-18T10:53:48 | 191,606,877 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 94 | py | num=int(input("enter the number"))
if num>0:
print(num)
else :
num=num*-1
print(num)
| [
"[email protected]"
] | |
c693c124a93e14d681de6049ad1cd8167f791fa9 | 993ef8924418866f932396a58e3ad0c2a940ddd3 | /Production/python/PrivateSamples/EMJ_UL17_mMed-1800_mDark-20_ctau-60_unflavored-down_cff.py | 2a10520ef0ce0dd92221fa106cd5e687f6923570 | [] | no_license | TreeMaker/TreeMaker | 48d81f6c95a17828dbb599d29c15137cd6ef009a | 15dd7fe9e9e6f97d9e52614c900c27d200a6c45f | refs/heads/Run2_UL | 2023-07-07T15:04:56.672709 | 2023-07-03T16:43:17 | 2023-07-03T16:43:17 | 29,192,343 | 16 | 92 | null | 2023-07-03T16:43:28 | 2015-01-13T13:59:30 | Python | UTF-8 | Python | false | false | 1,971 | py | import FWCore.ParameterSet.Config as cms
maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
readFiles = cms.untracked.vstring()
secFiles = cms.untracked.vstring()
source = cms.Source ("PoolSource",fileNames = readFiles, secondaryFileNames = secFiles)
readFiles.extend( [
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL17/step4_MINIAODv2_mMed-1800_mDark-20_ctau-60_unflavored-down_n-500_part-1.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL17/step4_MINIAODv2_mMed-1800_mDark-20_ctau-60_unflavored-down_n-500_part-10.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL17/step4_MINIAODv2_mMed-1800_mDark-20_ctau-60_unflavored-down_n-500_part-2.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL17/step4_MINIAODv2_mMed-1800_mDark-20_ctau-60_unflavored-down_n-500_part-3.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL17/step4_MINIAODv2_mMed-1800_mDark-20_ctau-60_unflavored-down_n-500_part-4.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL17/step4_MINIAODv2_mMed-1800_mDark-20_ctau-60_unflavored-down_n-500_part-5.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL17/step4_MINIAODv2_mMed-1800_mDark-20_ctau-60_unflavored-down_n-500_part-6.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL17/step4_MINIAODv2_mMed-1800_mDark-20_ctau-60_unflavored-down_n-500_part-7.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL17/step4_MINIAODv2_mMed-1800_mDark-20_ctau-60_unflavored-down_n-500_part-8.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL17/step4_MINIAODv2_mMed-1800_mDark-20_ctau-60_unflavored-down_n-500_part-9.root',
] )
| [
"[email protected]"
] | |
0441299322b4b0eead09496baefc06a63c96530a | 96a2e3b24678451cebe8787a7ddaee72e44c29a8 | /leads/migrations/0001_initial.py | 6f72465ecbfb120cb5cf72df8ba14ad28ae5c063 | [] | no_license | maanavshah/leadmanager-react-redux | 09e728cd98e45c5a7400c2ceb24f4f3918ac7569 | f5dd9d626a29d0cbf71787f9bc03c445769a1fb3 | refs/heads/master | 2023-02-22T16:46:55.062335 | 2021-01-26T10:54:38 | 2021-01-26T10:54:38 | 332,445,763 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 707 | py | # Generated by Django 3.1.5 on 2021-01-20 13:54
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Lead',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('email', models.EmailField(max_length=100, unique=True)),
('message', models.CharField(blank=True, max_length=200)),
('created_at', models.DateTimeField(auto_now_add=True)),
],
),
]
| [
"[email protected]"
] | |
c5238ef53cf37785c24d9eb98650e18e36970b92 | 9efd272118e522f269b0e001b09bfbafd20bed61 | /lpp/lexer.py | d2d66355cd8555f3c36db60f827f498d15d5d062 | [] | no_license | UnPolinomio/lpp-language | 9cc37317f93d938ffdd5664510824f26a4d8ef85 | 24d92601e7f73aca43d65164ac4ee4ace74d4f64 | refs/heads/main | 2023-07-01T11:48:28.242742 | 2021-08-02T00:22:16 | 2021-08-02T00:22:16 | 391,224,088 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,801 | py | from lpp.token import Token, TokenType, lookup_token_type
from re import compile, Pattern
from typing import NamedTuple
class TokenRegex(NamedTuple):
n_characters: int
regex: Pattern[str]
token_type: TokenType
TOKEN_REGEX = [
TokenRegex(2, compile(r'^==$'), TokenType.EQ),
TokenRegex(2, compile(r'^!=$'), TokenType.NOT_EQ),
TokenRegex(1, compile(r'^=$'), TokenType.ASSIGN),
TokenRegex(1, compile(r'^\+$'), TokenType.PLUS),
TokenRegex(1, compile(r'^$'), TokenType.EOF),
TokenRegex(1, compile(r'^\($'), TokenType.LPAREN),
TokenRegex(1, compile(r'^\)$'), TokenType.RPAREN),
TokenRegex(1, compile(r'^{$'), TokenType.LBRACE),
TokenRegex(1, compile(r'^}$'), TokenType.RBRACE),
TokenRegex(1, compile(r'^,$'), TokenType.COMMA),
TokenRegex(1, compile(r'^;$'), TokenType.SEMICOLON),
TokenRegex(1, compile(r'^<$'), TokenType.LT),
TokenRegex(1, compile(r'^>$'), TokenType.GT),
TokenRegex(1, compile(r'^-$'), TokenType.MINUS),
TokenRegex(1, compile(r'^/$'), TokenType.DIVISION),
TokenRegex(1, compile(r'^\*$'), TokenType.MULTIPLICATION),
TokenRegex(1, compile(r'^!$'), TokenType.NEGATION),
]
TOKEN_REGEX.sort(key=lambda x: x.n_characters, reverse=True)
LETTER_REGEX = compile(r'^[a-záéíóúA-ZÁÉÍÓÚñÑ_]$')
NUMBER_REGEX = compile(r'^[0-9]$')
WHITESPACE_REGEX = compile(r'^\s+$')
QUOTATION_REGEX = compile(r'^"$')
class Lexer:
def __init__(self, source: str) -> None:
self._source = source
self._character: str = '' # _character is the current character
self._read_position: int = 0 # _read_position is the index of the next character to read
self._position: int = -1 # _position is the index of the current character
self._read_character()
def next_token(self) -> Token:
self._skip_whitespace()
for (n_characters, regex, token_type) in TOKEN_REGEX:
literal = self._character + self._peek_character(n_characters - 1)
if regex.match(literal):
token = Token(token_type, literal)
self._read_character(n_characters=n_characters)
return token
if self._is_letter(self._character):
literal = self._read_identifier()
token_type = lookup_token_type(literal)
return Token(token_type, literal)
if self._is_number(self._character):
literal = self._read_number()
return Token(TokenType.INT, literal)
if QUOTATION_REGEX.match(self._character):
literal = self._read_string()
return Token(TokenType.STRING, literal)
token = Token(TokenType.ILLEGAL, self._character)
self._read_character()
return token
def _is_letter(self, character: str) -> bool:
return bool(LETTER_REGEX.match(character))
def _is_number(self, character: str) -> bool:
return bool(NUMBER_REGEX.match(character))
def _read_character(self, n_characters: int = 1) -> None:
new_position = self._position + n_characters
if new_position >= len(self._source):
self._character = ''
self._read_position = len(self._source) + 1
self._position = len(self._source)
else:
self._character = self._source[new_position]
self._read_position += n_characters
self._position = self._read_position - 1
def _read_identifier(self) -> str:
initial_position = self._position
if not self._is_letter(self._character):
return ''
self._read_character()
while self._is_letter(self._character) or self._is_number(self._character):
self._read_character()
return self._source[initial_position:self._position]
def _read_number(self) -> str:
initial_position = self._position
while self._is_number(self._character):
self._read_character()
return self._source[initial_position:self._position]
def _read_string(self) -> str:
self._read_character()
initial_position = self._position
while not QUOTATION_REGEX.match(self._character) and self._read_position <= len(self._source):
self._read_character()
string = self._source[initial_position:self._position]
self._read_character()
return string
def _skip_whitespace(self) -> None:
while WHITESPACE_REGEX.match(self._character):
self._read_character()
def _peek_character(self, n_characters: int) -> str:
if self._read_position + n_characters >= len(self._source):
return self._source[self._read_position:]
else:
return self._source[self._read_position:self._read_position + n_characters]
| [
"[email protected]"
] | |
28ea271d752771a20d12b624809f486ecf9a9f48 | 9cc714fc5bcd171014ca661ff57a9350e31480a2 | /lib/python3.6/site-packages/Django-3.0a1-py3.6.egg/django/utils/feedgenerator.py | 8f31827ccff1d67ba51262a85367651c437495c6 | [] | no_license | softdeveloper84/currency_tracker | b137a2a4fb8ae7cd3a636c8089ff0f1ea5048d23 | adfe2062bc7c913afa0b753bac7a17af1a9bc71b | refs/heads/master | 2022-02-19T06:20:26.330358 | 2019-09-19T20:45:06 | 2019-09-19T20:45:06 | 208,907,338 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,117 | py | """
Syndication feed generation library -- used for generating RSS, etc.
Sample usage:
>>> from django.utils import feedgenerator
>>> feed = feedgenerator.Rss201rev2Feed(
... title="Poynter E-Media Tidbits",
... link="http://www.poynter.org/column.asp?id=31",
... description="A group Weblog by the sharpest minds in online media/journalism/publishing.",
... language="en",
... )
>>> feed.add_item(
... title="Hello",
... link="http://www.holovaty.com/test/",
... description="Testing."
... )
>>> with open('test.rss', 'w') as fp:
... feed.write(fp, 'utf-8')
For definitions of the different versions of RSS, see:
https://utils.archive.org/utils/20110718035220/http://diveintomark.org/archives/2004/02/04/incompatible-rss
"""
import datetime
import email
from io import StringIO
from urllib.parse import urlparse
from django.utils.encoding import iri_to_uri
from django.utils.timezone import utc
from django.utils.xmlutils import SimplerXMLGenerator
def rfc2822_date(date):
if not isinstance(date, datetime.datetime):
date = datetime.datetime.combine(date, datetime.time())
return email.utils.format_datetime(date)
def rfc3339_date(date):
if not isinstance(date, datetime.datetime):
date = datetime.datetime.combine(date, datetime.time())
return date.isoformat() + ('Z' if date.utcoffset() is None else '')
def get_tag_uri(url, date):
"""
Create a TagURI.
See https://utils.archive.org/utils/20110514113830/http://diveintomark.org/archives/2004/05/28/howto-atom-id
"""
bits = urlparse(url)
d = ''
if date is not None:
d = ',%s' % date.strftime('%Y-%m-%d')
return 'tag:%s%s:%s/%s' % (bits.hostname, d, bits.path, bits.fragment)
class SyndicationFeed:
"Base class for all syndication feeds. Subclasses should provide write()"
def __init__(self, title, link, description, language=None, author_email=None,
author_name=None, author_link=None, subtitle=None, categories=None,
feed_url=None, feed_copyright=None, feed_guid=None, ttl=None, **kwargs):
def to_str(s):
return str(s) if s is not None else s
categories = categories and [str(c) for c in categories]
self.feed = {
'title': to_str(title),
'link': iri_to_uri(link),
'description': to_str(description),
'language': to_str(language),
'author_email': to_str(author_email),
'author_name': to_str(author_name),
'author_link': iri_to_uri(author_link),
'subtitle': to_str(subtitle),
'categories': categories or (),
'feed_url': iri_to_uri(feed_url),
'feed_copyright': to_str(feed_copyright),
'id': feed_guid or link,
'ttl': to_str(ttl),
**kwargs,
}
self.items = []
def add_item(self, title, link, description, author_email=None,
author_name=None, author_link=None, pubdate=None, comments=None,
unique_id=None, unique_id_is_permalink=None, categories=(),
item_copyright=None, ttl=None, updateddate=None, enclosures=None, **kwargs):
"""
Add an item to the feed. All args are expected to be strings except
pubdate and updateddate, which are datetime.datetime objects, and
enclosures, which is an iterable of instances of the Enclosure class.
"""
def to_str(s):
return str(s) if s is not None else s
categories = categories and [to_str(c) for c in categories]
self.items.append({
'title': to_str(title),
'link': iri_to_uri(link),
'description': to_str(description),
'author_email': to_str(author_email),
'author_name': to_str(author_name),
'author_link': iri_to_uri(author_link),
'pubdate': pubdate,
'updateddate': updateddate,
'comments': to_str(comments),
'unique_id': to_str(unique_id),
'unique_id_is_permalink': unique_id_is_permalink,
'enclosures': enclosures or (),
'categories': categories or (),
'item_copyright': to_str(item_copyright),
'ttl': to_str(ttl),
**kwargs,
})
def num_items(self):
return len(self.items)
def root_attributes(self):
"""
Return extra attributes to place on the root (i.e. feed/channel) element.
Called from write().
"""
return {}
def add_root_elements(self, handler):
"""
Add elements in the root (i.e. feed/channel) element. Called
from write().
"""
pass
def item_attributes(self, item):
"""
Return extra attributes to place on each item (i.e. item/entry) element.
"""
return {}
def add_item_elements(self, handler, item):
"""
Add elements on each item (i.e. item/entry) element.
"""
pass
def write(self, outfile, encoding):
"""
Output the feed in the given encoding to outfile, which is a file-like
object. Subclasses should override this.
"""
raise NotImplementedError('subclasses of SyndicationFeed must provide a write() method')
def writeString(self, encoding):
"""
Return the feed in the given encoding as a string.
"""
s = StringIO()
self.write(s, encoding)
return s.getvalue()
def latest_post_date(self):
"""
Return the latest item's pubdate or updateddate. If no items
have either of these attributes this return the current UTC date/time.
"""
latest_date = None
date_keys = ('updateddate', 'pubdate')
for item in self.items:
for date_key in date_keys:
item_date = item.get(date_key)
if item_date:
if latest_date is None or item_date > latest_date:
latest_date = item_date
# datetime.now(tz=utc) is slower, as documented in django.utils.timezone.now
return latest_date or datetime.datetime.utcnow().replace(tzinfo=utc)
class Enclosure:
"""An RSS enclosure"""
def __init__(self, url, length, mime_type):
"All args are expected to be strings"
self.length, self.mime_type = length, mime_type
self.url = iri_to_uri(url)
class RssFeed(SyndicationFeed):
content_type = 'application/rss+xml; charset=utf-8'
def write(self, outfile, encoding):
handler = SimplerXMLGenerator(outfile, encoding)
handler.startDocument()
handler.startElement("rss", self.rss_attributes())
handler.startElement("channel", self.root_attributes())
self.add_root_elements(handler)
self.write_items(handler)
self.endChannelElement(handler)
handler.endElement("rss")
def rss_attributes(self):
return {
'version': self._version,
'xmlns:atom': 'http://www.w3.org/2005/Atom',
}
def write_items(self, handler):
for item in self.items:
handler.startElement('item', self.item_attributes(item))
self.add_item_elements(handler, item)
handler.endElement("item")
def add_root_elements(self, handler):
handler.addQuickElement("title", self.feed['title'])
handler.addQuickElement("link", self.feed['link'])
handler.addQuickElement("description", self.feed['description'])
if self.feed['feed_url'] is not None:
handler.addQuickElement("atom:link", None, {"rel": "self", "href": self.feed['feed_url']})
if self.feed['language'] is not None:
handler.addQuickElement("language", self.feed['language'])
for cat in self.feed['categories']:
handler.addQuickElement("category", cat)
if self.feed['feed_copyright'] is not None:
handler.addQuickElement("copyright", self.feed['feed_copyright'])
handler.addQuickElement("lastBuildDate", rfc2822_date(self.latest_post_date()))
if self.feed['ttl'] is not None:
handler.addQuickElement("ttl", self.feed['ttl'])
def endChannelElement(self, handler):
handler.endElement("channel")
class RssUserland091Feed(RssFeed):
_version = "0.91"
def add_item_elements(self, handler, item):
handler.addQuickElement("title", item['title'])
handler.addQuickElement("link", item['link'])
if item['description'] is not None:
handler.addQuickElement("description", item['description'])
class Rss201rev2Feed(RssFeed):
# Spec: https://cyber.harvard.edu/rss/rss.html
_version = "2.0"
def add_item_elements(self, handler, item):
handler.addQuickElement("title", item['title'])
handler.addQuickElement("link", item['link'])
if item['description'] is not None:
handler.addQuickElement("description", item['description'])
# Author information.
if item["author_name"] and item["author_email"]:
handler.addQuickElement("author", "%s (%s)" % (item['author_email'], item['author_name']))
elif item["author_email"]:
handler.addQuickElement("author", item["author_email"])
elif item["author_name"]:
handler.addQuickElement(
"dc:creator", item["author_name"], {"xmlns:dc": "http://purl.org/dc/elements/1.1/"}
)
if item['pubdate'] is not None:
handler.addQuickElement("pubDate", rfc2822_date(item['pubdate']))
if item['comments'] is not None:
handler.addQuickElement("comments", item['comments'])
if item['unique_id'] is not None:
guid_attrs = {}
if isinstance(item.get('unique_id_is_permalink'), bool):
guid_attrs['isPermaLink'] = str(item['unique_id_is_permalink']).lower()
handler.addQuickElement("guid", item['unique_id'], guid_attrs)
if item['ttl'] is not None:
handler.addQuickElement("ttl", item['ttl'])
# Enclosure.
if item['enclosures']:
enclosures = list(item['enclosures'])
if len(enclosures) > 1:
raise ValueError(
"RSS feed items may only have one enclosure, see "
"http://www.rssboard.org/rss-profile#element-channel-item-enclosure"
)
enclosure = enclosures[0]
handler.addQuickElement('enclosure', '', {
'url': enclosure.url,
'length': enclosure.length,
'type': enclosure.mime_type,
})
# Categories.
for cat in item['categories']:
handler.addQuickElement("category", cat)
class Atom1Feed(SyndicationFeed):
# Spec: https://tools.ietf.org/html/rfc4287
content_type = 'application/atom+xml; charset=utf-8'
ns = "http://www.w3.org/2005/Atom"
def write(self, outfile, encoding):
handler = SimplerXMLGenerator(outfile, encoding)
handler.startDocument()
handler.startElement('feed', self.root_attributes())
self.add_root_elements(handler)
self.write_items(handler)
handler.endElement("feed")
def root_attributes(self):
if self.feed['language'] is not None:
return {"xmlns": self.ns, "xml:lang": self.feed['language']}
else:
return {"xmlns": self.ns}
def add_root_elements(self, handler):
handler.addQuickElement("title", self.feed['title'])
handler.addQuickElement("link", "", {"rel": "alternate", "href": self.feed['link']})
if self.feed['feed_url'] is not None:
handler.addQuickElement("link", "", {"rel": "self", "href": self.feed['feed_url']})
handler.addQuickElement("id", self.feed['id'])
handler.addQuickElement("updated", rfc3339_date(self.latest_post_date()))
if self.feed['author_name'] is not None:
handler.startElement("author", {})
handler.addQuickElement("name", self.feed['author_name'])
if self.feed['author_email'] is not None:
handler.addQuickElement("email", self.feed['author_email'])
if self.feed['author_link'] is not None:
handler.addQuickElement("uri", self.feed['author_link'])
handler.endElement("author")
if self.feed['subtitle'] is not None:
handler.addQuickElement("subtitle", self.feed['subtitle'])
for cat in self.feed['categories']:
handler.addQuickElement("category", "", {"term": cat})
if self.feed['feed_copyright'] is not None:
handler.addQuickElement("rights", self.feed['feed_copyright'])
def write_items(self, handler):
for item in self.items:
handler.startElement("entry", self.item_attributes(item))
self.add_item_elements(handler, item)
handler.endElement("entry")
def add_item_elements(self, handler, item):
handler.addQuickElement("title", item['title'])
handler.addQuickElement("link", "", {"href": item['link'], "rel": "alternate"})
if item['pubdate'] is not None:
handler.addQuickElement('published', rfc3339_date(item['pubdate']))
if item['updateddate'] is not None:
handler.addQuickElement('updated', rfc3339_date(item['updateddate']))
# Author information.
if item['author_name'] is not None:
handler.startElement("author", {})
handler.addQuickElement("name", item['author_name'])
if item['author_email'] is not None:
handler.addQuickElement("email", item['author_email'])
if item['author_link'] is not None:
handler.addQuickElement("uri", item['author_link'])
handler.endElement("author")
# Unique ID.
if item['unique_id'] is not None:
unique_id = item['unique_id']
else:
unique_id = get_tag_uri(item['link'], item['pubdate'])
handler.addQuickElement("id", unique_id)
# Summary.
if item['description'] is not None:
handler.addQuickElement("summary", item['description'], {"type": "html"})
# Enclosures.
for enclosure in item['enclosures']:
handler.addQuickElement('link', '', {
'rel': 'enclosure',
'href': enclosure.url,
'length': enclosure.length,
'type': enclosure.mime_type,
})
# Categories.
for cat in item['categories']:
handler.addQuickElement("category", "", {"term": cat})
# Rights.
if item['item_copyright'] is not None:
handler.addQuickElement("rights", item['item_copyright'])
# This isolates the decision of what the system default is, so calling code can
# do "feedgenerator.DefaultFeed" instead of "feedgenerator.Rss201rev2Feed".
DefaultFeed = Rss201rev2Feed
| [
"[email protected]"
] | |
872e4b1067be2faaeadf9080be75b0cd11135a5c | f374cbc5ce21cad22bb2b8d28d249bcba28b9025 | /generator/group.py | 41991721998b7238bdc788a329eefeecfadc60fb | [
"Apache-2.0"
] | permissive | nechaeva-irina/python_training | 064eb7ec4bff04723bcfcd874f4ffe8e86e369b0 | f4e4c1788525b64ce86eefe261cdb4adfa32f839 | refs/heads/main | 2023-02-08T21:25:12.638509 | 2020-12-31T11:03:04 | 2020-12-31T11:03:04 | 306,890,209 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,021 | py | # -*- coding: utf-8 -*-
from model.group import Group
import random
import string
import os.path
import jsonpickle
import getopt
import sys
try:
opts, args = getopt.getopt(sys.argv[1:], "n:f:", ["number of groups", "file"])
except getopt.GetoptError as err:
print(err)
getopt.usage()
sys.exit(2)
n = 5
f = "data/groups.json"
for o, a in opts:
if o == "-n":
n = int(a)
elif o == "-f":
f = a
def random_string(prefix, maxlen):
symbols = string.ascii_letters + string.digits + " " * 10
return prefix + "".join([random.choice(symbols) for i in range(random.randrange(maxlen))])
testdata = [Group(name="", header="", footer="")] + [
Group(name=random_string("name", 10), header=random_string("header", 20), footer=random_string("footer", 20))
for i in range(n)
]
file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", f)
with open(file, "w") as out:
jsonpickle.set_encoder_options("json", indent=2)
out.write(jsonpickle.encode(testdata))
| [
"[email protected]"
] | |
662df77b0315dc3d3f49efb24dc706cd6a9adf76 | 6ff196d013be61815db06580c98133f8f70dd389 | /bike/settings.py | 27f27f50e232dff07ef0c841a2e475d6628d091b | [] | no_license | B0rjitaaa/bikes | 0629ac5418612c1f9bdafe3b97719cf7dfadc840 | fb7298b6558462ac218768c768b9d1989d8c9c93 | refs/heads/master | 2016-09-01T08:37:22.872400 | 2016-01-09T17:46:06 | 2016-01-09T17:46:06 | 49,059,366 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,910 | py | """
Django settings for bike project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
IS_ON_OPENSHIFT = False
if 'OPENSHIFT_REPO_DIR' in os.environ:
IS_ON_OPENSHIFT = True
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 's7&&h8rw+ev#hp_7t#tl5tmeu-x8s3lkg0vh%6#$+s8xdil7jv'
# SECURITY WARNING: don't run with debug turned on in production!
if IS_ON_OPENSHIFT:
DEBUG = False
TEMPLATE_DEBUG = False
else:
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'shop'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'bike.urls'
WSGI_APPLICATION = 'bike.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'es_ES'
TIME_ZONE = 'Europe/Madrid'
USE_I18N = True
USE_L10N = True
USE_TZ = True
DATE_FORMAT = 'd/m/Y'
DATE_INPUT_FORMATS = ('%d/%m/%Y',)
DATETIME_INPUT_FORMATS = ('%d/%m/%Y %H:%M',)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
if IS_ON_OPENSHIFT:
STATIC_ROOT = os.path.join(os.environ.get('OPENSHIFT_REPO_DIR'), 'wsgi', 'static')
else:
STATIC_ROOT = os.path.join(BASE_DIR, 'wsgi', 'static')
if IS_ON_OPENSHIFT:
STATICFILES_DIRS = (
os.path.join(os.environ.get('OPENSHIFT_REPO_DIR'), 'static'),
)
else:
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'), # Needed for the general static files
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'django.contrib.staticfiles.finders.FileSystemFinder',
)
# Media files (Uploaded files)
if IS_ON_OPENSHIFT:
MEDIA_URL = '/static/media/'
MEDIA_ROOT = os.path.join(os.environ.get('OPENSHIFT_DATA_DIR'), 'media')
else:
MEDIA_URL = '/images/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'images')
# Debug Mode in terminal
LOGGING = {
'version': 1,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'console': {
'level': 'WARNING',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
},
'loggers': {
'django': {
'handlers': ['file'],
'level': 'WARNING',
'propagate': True,
},
}
}
for logger in LOGGING['loggers']:
LOGGING['loggers'][logger]['handlers'] = ['console'] | [
"[email protected]"
] | |
9fe39473b2a83f53bf55e35d99c1defac0fc2043 | a2b60aebc26c35baf8e06300893edb3bd1fa9670 | /extune/tune/utils.py | 6ff16ff6e7fa5923992d5e9902b64809c1a936e6 | [
"MIT"
] | permissive | imsb-uke/podometric_u_net | 81322df451459434e4e36149013d93a5d24bb41c | a33afcc186d618889df73c7ab2941dfbb63574ac | refs/heads/main | 2023-02-05T21:07:46.342749 | 2020-12-23T15:25:51 | 2020-12-23T15:25:51 | 323,162,676 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 879 | py | import tensorflow
class TuneKerasCallback(tensorflow.keras.callbacks.Callback):
def __init__(self, reporter, config, logs={}):
self.reporter = reporter
self.config = config
self.iteration = 0
self.logs_copy = {}
super(TuneKerasCallback, self).__init__()
def on_train_end(self, logs={}):
print(logs)
print(self.logs_copy)
self.reporter(
timesteps_total=self.iteration, done=1,
val_metric=(1.0 - float(self.logs_copy["val_" + self.config.METRIC[self.config.MONITORED_METRIC]])))
def on_epoch_end(self, epoch, logs={}):
print(logs)
self.logs_copy = logs
self.iteration += 1
self.reporter(
timesteps_total=self.iteration,
val_metric=(1.0 - float(self.logs_copy["val_" + self.config.METRIC[self.config.MONITORED_METRIC]])))
| [
"[email protected]"
] | |
04df0299ef957d4333f41b335d3239c4a60fb66b | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/d6fd4eb8e6b84d51b7344f8a2c9321faf1dd0ce3-<create_bgp_import_route>-bug.py | 73177bc6902d60f22d29709d05ea09df2e20461c | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 808 | py | def create_bgp_import_route(self, **kwargs):
' create_bgp_import_route '
module = kwargs['module']
vrf_name = module.params['vrf_name']
af_type = module.params['af_type']
import_protocol = module.params['import_protocol']
import_process_id = module.params['import_process_id']
if ((import_protocol == 'direct') or (import_protocol == 'static')):
import_process_id = '0'
conf_str = (CE_CREATE_BGP_IMPORT_ROUTE % (vrf_name, af_type, import_protocol, import_process_id))
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if ('<ok/>' not in recv_xml):
module.fail_json(msg='Error: Create bgp import route failed.')
cmds = []
cmd = ('import-route %s %s' % (import_protocol, import_process_id))
cmds.append(cmd)
return cmds | [
"[email protected]"
] | |
30311f47ca61590596c3277770b649a5a3a5f480 | 47109d686222fcb36fbdd95994e4458b9bda867b | /Exercises/list_factors.py | a35ca857ca5b8f134f91f1434e98ad9d0b9eef0f | [] | no_license | gormac23/Python | 641fc800775eaa250a59aad3f8e8164ef5e43452 | fead3432302c689f8b0b3d50b7c4a771b7d39262 | refs/heads/master | 2021-07-11T09:40:53.058073 | 2020-11-30T10:27:22 | 2020-11-30T10:27:22 | 222,804,711 | 0 | 0 | null | 2019-11-20T12:27:16 | 2019-11-19T22:47:46 | Python | UTF-8 | Python | false | false | 594 | py | def list_factors(x):
# We will store all factors in factors
factors = []
i = 1
while i*i <= x:
# Check if i divides x without leaving a remainder
if x % i == 0:
factors.append(i)
# If it does, find the matching factor, i.e. how mnay times it divideds
if x//i != i:
factors.append(x//i)
i += 1
# Return the list of factors of x
return factors
def main():
factor = int(input("Enter number you want factor list of..."))
print(list_factors(factor))
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
38483227c506a6ba14ddc65b05ab9de16db49c85 | 496a55a346c08798186c0f4cefbf3aa4c3e598b8 | /test/bot_gateway/bot_gateway_test.py | 649aad50c2ca1e08f3d7dd460f095867331ed288 | [
"MIT"
] | permissive | olivierSaintCyr/BigBot | adb1d8982b60d60b6250371811f953fedaa423ed | 3db5c07fbfc6f66f59cab91318282abfce210314 | refs/heads/main | 2023-03-12T20:27:25.687768 | 2021-03-01T06:31:49 | 2021-03-01T06:31:49 | 343,248,642 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 492 | py | import requests
BASE = "http://localhost:5000/"
query_good = {"server_id":"799761080720687164", "service":"echo_test"}
query_not_sub = {"server_id":"799761080720687164", "service":"not_a_service"}
query_server_not_setup = {"server_id":"799761080720687160", "service":"echo_test"}
response = requests.get(BASE, query_good)
print(response.json())
response = requests.get(BASE, query_not_sub)
print(response.json())
response = requests.get(BASE, query_server_not_setup )
print(response.json()) | [
"[email protected]"
] | |
8b617444a7d6c9cb578d137482b6b235bd235f9a | d6475976a503e78fb52cdba57124f8cd98a178bc | /PagesApp/pages_project/settings.py | 9a18100a92ea8855bfa57cafd5d8503c3d84bf7c | [] | no_license | alexprodan99/TrainingDJango | 0d61db72d3ae58d90e35e46cbdb18aa535505c23 | 6c2543aea68c00d5b65505caa6eb3b276c374d92 | refs/heads/master | 2021-01-01T02:49:18.093373 | 2020-02-29T15:34:59 | 2020-02-29T15:34:59 | 239,149,078 | 0 | 0 | null | 2020-02-09T13:18:17 | 2020-02-08T14:51:37 | Python | UTF-8 | Python | false | false | 3,174 | py | """
Django settings for pages_project project.
Generated by 'django-admin startproject' using Django 2.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'vg!x!k5$u+yr4d*_05gz*se10b!5!)&1&v&9o&wn4s)o$de5j6'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'pages.apps.PagesConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'pages_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'pages_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
a650fc0ebfb9eeaaa1ef45b563caca1ab197d0af | 709034a7ad54aefe36c3fd7c26bf13c975a2afcb | /bin/converter_grpah.py | b8f777f49b541e208c2b2800d884fec371f26cd5 | [
"Apache-2.0"
] | permissive | zh794390558/ML-KWS-for-MCU | d57023ae9c275984c711e2e47009091186b7de85 | 38f4dcec9ac2e74154a024a7795459a3049efd0d | refs/heads/master | 2021-08-18T07:29:17.108746 | 2018-11-12T09:32:36 | 2018-11-12T09:32:36 | 137,883,318 | 0 | 0 | null | 2018-06-19T11:38:35 | 2018-06-19T11:38:35 | null | UTF-8 | Python | false | false | 1,492 | py | # This file is useful for reading the contents of the ops generated by ruby.
# You can read any graph defination in pb/pbtxt format generated by ruby
# or by python and then convert it back and forth from human readable to binary format.
from absl import app as absl_app
from absl import flags
import tensorflow as tf
from google.protobuf import text_format
from tensorflow.python.platform import gfile
def pbtxt_to_graphdef(filename):
with open(filename, 'r') as f:
graph_def = tf.GraphDef()
file_content = f.read()
text_format.Merge(file_content, graph_def)
tf.import_graph_def(graph_def, name='')
tf.train.write_graph(graph_def, 'pbtxt/', 'protobuf.pb', as_text=False)
def graphdef_to_pbtxt(filename):
with gfile.FastGFile(filename,'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
tf.train.write_graph(graph_def, 'pbtxt/', 'protobuf.pbtxt', as_text=True)
return
def main(_):
FLAGS = flags.FLAGS
if FLAGS.binary:
graphdef_to_pbtxt(FLAGS.graph_file) # here you can write the name of the file to be converted
else:
pbtxt_to_graphdef(FLAGS.graph_file)
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
tf.flags.DEFINE_string('graph_file', default=None, help='graph.pb file name')
tf.flags.DEFINE_bool('binary', default=True, help='binary graph file or not')
absl_app.run(main)
# and then a new file will be made in pbtxt directory.
| [
"[email protected]"
] | |
7f7eccfa7cfae87b951c259963ece51c6d6df66f | 884112003b7bd1e200325bba3e3ac9a2f9d3ea8b | /server/samba/actions.py | 5ec098de997fba2224528230be1acdd5d083749a | [] | no_license | pars-linux/pardus-1.0 | e1a4049c17ac9f2fbc2ae50c61d81c15e03e5823 | 4d2196b7558b3870908e799e02568ee9a6eee419 | refs/heads/master | 2021-01-24T19:12:02.809085 | 2006-06-14T07:07:39 | 2006-06-14T07:07:39 | 82,460,626 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,815 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2005 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/copyleft/gpl.txt.
#
# Name <email@address>
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import shelltools
from pisi.actionsapi import get
def setup():
shelltools.cd("source/")
autotools.autoconf()
autotools.configure("--with-fhs \
--sysconfdir=/etc/samba \
--localstatedir=/var \
--with-configdir=/etc/samba \
--with-libdir=/usr/lib/samba \
--with-piddir=/var/run/samba \
--with-lockdir=/var/cache/samba \
--with-logfilebase=/var/log/samba \
--with-privatedir=/var/lib/samba/private \
--with-libsmbclient \
--without-spinlocks \
--with-acl-support \
--with-aio-support \
--enable-cups \
--with-pam \
--with-pam_smbpass \
--with-python \
--with-quotas \
--with-sys-quotas \
--with-readline \
--with-smbmount \
--with-syslog \
--with-expsam=xml \
--without-ldapsam \
--with-winbind \
--with-shared-modules=idmap_rid")
def build():
shelltools.cd("source/")
autotools.make("proto")
autotools.make("everything")
# build python modules
shelltools.system("python python/setup.py build")
def install():
shelltools.cd("source/")
autotools.rawInstall("DESTDIR=%s" % get.installDIR(), "install-everything")
# remove invalid symlink
pisitools.remove("/sbin/mount.smbfs")
# Nsswitch extensions. Make link for wins and winbind resolvers
pisitools.dolib_so("nsswitch/libnss_wins.so")
pisitools.dosym("libnss_wins.so", "/usr/lib/libnss_wins.so.2")
pisitools.dolib_so("/nsswitch/libnss_winbind.so")
pisitools.dosym("libnss_winbind.so", "/usr/lib/libnss_winbind.so.2")
# pam extensions
pisitools.doexe("bin/pam_smbpass.so", "/lib/security")
pisitools.doexe("nsswitch/pam_winbind.so", "/lib/security")
pisitools.dodir("/sbin")
pisitools.dosym("/usr/bin/smbmount", "/sbin/mount.smbfs")
pisitools.dosym("/usr/bin/mount.cifs", "/sbin/mount.cifs")
# needed symlink
pisitools.dosym("samba/libsmbclient.so", "/usr/lib/libsmbclient.so.0")
pisitools.dosym("samba/libsmbclient.so", "/usr/lib/libsmbclient.so")
# cups support
pisitools.dodir("/usr/lib/cups/backend")
pisitools.dosym("/bin/smbspool", "/usr/lib/cups/backend/smb")
# directory things
pisitools.dodir("/var/spool/samba")
pisitools.chown("/var/spool/samba", "01777")
pisitools.dodir("/var/log/samba")
pisitools.dodir("/var/run/samba")
pisitools.dodir("/var/cache/samba")
pisitools.dodir("/var/lib/samba/netlogon")
pisitools.dodir("/var/lib/samba/profiles")
pisitools.dodir("/var/lib/samba/printers/W32X86")
pisitools.dodir("/var/lib/samba/printers/WIN40")
pisitools.dodir("/var/lib/samba/printers/W32ALPHA")
pisitools.dodir("/var/lib/samba/printers/W32MIPS")
pisitools.dodir("/var/lib/samba/printers/W32PPC")
pisitools.dodir("/usr/lib/samba/auth")
pisitools.dodir("/usr/lib/samba/idmap")
pisitools.dodir("/usr/lib/samba/auth")
# install python modules
shelltools.system("python python/setup.py install --root=%s" % get.installDIR())
| [
"[email protected]"
] | |
d3d3080077d5aec86def74dd226754e332a1d179 | 24a1da610a57d9558a7261ed9ca92b20d8689634 | /June/48Day/All_Paths_From_Source_to_Target_144ms_15.8mb.py | a3c26ce5b93ff3ce602dec9785daf5399496efdf | [] | no_license | JadeKim042386/LeetCode | b5b70a8100a19d705150f276ee8e0dc11c5038b2 | 77234a14dc97bd0e023842cd57698b37d1460511 | refs/heads/master | 2023-06-09T20:16:36.352246 | 2021-07-06T09:25:15 | 2021-07-06T09:25:15 | 349,680,046 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 974 | py | from typing import List
from collections import deque
# BFS
class Solution:
def allPathsSourceTarget(self, graph: List[List[int]]) -> List[List[int]]:
answer = []
end = max([max(i) for i in graph if len(i)])
m, n = len(graph), len(graph[0])
q = deque([[0]])
while q:
cur_path = q.popleft()
cur_node = cur_path[-1]
nxt_nodes = graph[cur_node]
if cur_path[-1] == end:
answer.append(cur_path)
else:
for nxt_node in nxt_nodes:
q.append(cur_path + [nxt_node])
return answer
'''
from collections import deque
class Solution:
def allPathsSourceTarget(self, graph):
def dfs(cur, path):
if cur == len(graph) - 1: res.append(path)
else:
for i in graph[cur]: dfs(i, path + [i])
res = []
dfs(0, [0])
return res
''' | [
"[email protected]"
] | |
1f4d159101bca3e1e85456a9c828e3e82757ec28 | 2521e6427a7668d8cc91eabb368a5cf0eb7310f9 | /Cap13_Date&Time/06_gmtime.py | f3cec30d0e447568a0954814f81fa192e849d239 | [] | no_license | frclasso/turma3_Python1_2018 | 4a7bc0ba0eb538100400c15fc5c5b3ac1eeb7e50 | 47cd3aaa6828458b7f5164a8bce717bb8dd83a7c | refs/heads/master | 2020-04-06T16:18:00.889198 | 2019-06-10T15:11:32 | 2019-06-10T15:11:32 | 157,614,408 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 84 | py | #!/usr/bin/env python3
import time
print('gmtime', time.gmtime(1547073589.537185)) | [
"[email protected]"
] | |
8bfacc9a6ebf6d271091d9203ff0b250d4c453c8 | e69f4daddf771f154c55bef4f0b8e667f453ebcd | /Assignment_07/Question_04.py | 9ba49fac0c207551270aa0774df6f73d3ece2d80 | [] | no_license | ismailsinansahin/PythonAssignments | b03eb0000838594e6e3d5e230c29b5de0f793906 | 20bbc5598a3b1dd7cfc114f4e121ba5575e6be17 | refs/heads/master | 2022-04-09T14:39:59.868718 | 2020-03-11T12:26:22 | 2020-03-11T12:26:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 299 | py | row = int(input("Please enter the upper number: "))
for i in range(1,row+1):
for j in range(1,i+1):
if j%2!=0:
print("1",end="")
else:
print("0",end="")
print("")
# Please enter the upper number: 7
# 1
# 10
# 101
# 1010
# 10101
# 101010
# 1010101
| [
"[email protected]"
] | |
cf513806ff9e036486c751330f323fe84b059ffa | 48377e027a031036b9ebc5c9dcfc9a35ceed76d7 | /ykdl/extractors/ifeng/gongkaike.py | 55c7079ea75809bb3f1aeb472bbf3085dc4bc035 | [
"MIT"
] | permissive | MichioY/bilibiliupload | ed83a3836b09365f801ba93d5de818c2de147fbf | 623e0d06e6acb4b5f2c3d6291450f27bbf83667e | refs/heads/master | 2020-06-25T21:22:15.097945 | 2019-07-28T09:54:29 | 2019-07-28T09:54:29 | 199,425,286 | 1 | 0 | MIT | 2019-07-29T09:48:56 | 2019-07-29T09:48:56 | null | UTF-8 | Python | false | false | 1,138 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from ykdl.extractor import VideoExtractor
from ykdl.videoinfo import VideoInfo
from xml.dom.minidom import parseString
from ykdl.util.html import get_content
from ykdl.util.match import match1, matchall
class IfengOpenC(VideoExtractor):
name = u'凤凰公开课 (ifeng open course)'
def prepare(self):
info = VideoInfo(self.name)
if not self.vid:
self.vid= match1(self.url, '#([a-zA-Z0-9\-]+)', '/([a-zA-Z0-9\-]+).shtml')
if not self.vid:
html = get_content(self.url)
self.vid = match1(html, '"vid": "([^"]+)', 'vid: "([^"]+)')
xml = get_content('http://vxml.ifengimg.com/video_info_new/{}/{}/{}.xml'.format(self.vid[-2], self.vid[-2:], self.vid))
info.title = match1(xml, 'SE_Title="([^"]+)')
urls = matchall(xml, ['playurl="([^"]+)'])
urls = ['http://ips.ifeng.com/' + u[7:] for u in urls ]
info.stream_types.append('current')
info.streams['current'] = {'container': 'mp4', 'video_profile': 'current', 'src' : urls, 'size': 0}
return info
site = IfengOpenC()
| [
"[email protected]"
] | |
b91266a173a0d0de9baadd96961442fa038891e4 | be652b0b1b255df320d244caf115e6fabb60c77d | /bhp2/bh_ssh_scp.py | f74c19cdb7079c41cd63bb4427050c5df67a6d92 | [] | no_license | bpinkert/portfolio | 4b505458bbdca3bada05039ea791c6cba3e58ffd | d8ff0582611f9dabaeee6d6ae94113fd17b98ccb | refs/heads/master | 2022-12-05T08:25:10.105766 | 2019-06-24T22:25:59 | 2019-06-24T22:25:59 | 192,994,316 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 597 | py | #!/home/bpubi/Devel/bhp/py2.7.3/bin/python2.7
import threading
import paramiko
import subprocess
def ssh_command(ip, user, passwd, command):
client = paramiko.SSHClient()
#client.load_host_keys('/home/bpubi/.ssh/known_hosts')
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(ip, username=user, password=passwd)
ssh_session = client.get_transport().open_session()
if ssh_session.active:
ssh_session.exec_command(command)
print ssh_session.recv(1024)
return
ssh_command('', '', '', '')
| [
"[email protected]"
] | |
6dcd3e1acaa627be37e9626af756bc8e9d3cf49d | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/8/sT5.py | 06899f2942ca4deac672a79c7a4f6261974d0e27 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'sT5':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
] | |
3c98110e0218be3a1f361b9580924a45f5b84b29 | dab8f93ce0cbe8a63a980c40ff97590a32f74472 | /mp_sort/application.py | b08a5a29d7f3f6319cfc4d5f68a0ce4f2bc52d2b | [
"MIT"
] | permissive | AlkaffAhamed/fip_powerx_mini_projects | 549040f981413d452d209a182970674df1943dd0 | 3bc9c8b4bc7ce49603b0824da354c74f4d223117 | refs/heads/master | 2023-07-20T00:49:09.085118 | 2021-08-12T14:55:28 | 2021-08-12T14:55:28 | 388,461,044 | 0 | 1 | MIT | 2021-07-25T16:50:54 | 2021-07-22T12:52:05 | Python | UTF-8 | Python | false | false | 112 | py | from app import application
if __name__ == "__main__":
application.run(host="0.0.0.0", port=8080, debug=True)
| [
"[email protected]"
] | |
955e461abb4b7873b67c0b2439879de2661d65b1 | b81668a2cc43654cf6a3ed952d781310876838f9 | /venv/Lib/site-packages/spacy/lang/en/syntax_iterators.py | 86695cf6f2a3d3fdb3f04bd8679ff5376299d0b6 | [] | no_license | gowthamr1999/docbot-1 | 6a8b873407f15035fb8b30b69ed66ded343bd1e4 | 3119958d68e95673b4c9187d58d8cad5c18a6b2c | refs/heads/master | 2023-04-07T02:16:55.574750 | 2021-04-16T02:52:38 | 2021-04-16T02:52:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,547 | py | from ...symbols import NOUN, PROPN, PRON
def noun_chunks(obj):
"""
Detect base noun phrases from a dependency parse. Works on both Doc and Span.
"""
labels = [
"nsubj",
"dobj",
"nsubjpass",
"pcomp",
"pobj",
"dative",
"appos",
"attr",
"ROOT",
]
doc = obj.doc # Ensure works on both Doc and Span.
np_deps = [doc.vocab.strings.add(label) for label in labels]
conj = doc.vocab.strings.add("conj")
np_label = doc.vocab.strings.add("NP")
seen = set()
for i, word in enumerate(obj):
if word.pos not in (NOUN, PROPN, PRON):
continue
# Prevent nested chunks from being produced
if word.i in seen:
continue
if word.dep in np_deps:
if any(w.i in seen for w in word.subtree):
continue
seen.update(j for j in range(word.left_edge.i, word.i + 1))
yield word.left_edge.i, word.i + 1, np_label
elif word.dep == conj:
head = word.head
while head.dep == conj and head.head.i < head.i:
head = head.head
# If the head is an NP, and we're coordinated to it, we're an NP
if head.dep in np_deps:
if any(w.i in seen for w in word.subtree):
continue
seen.update(j for j in range(word.left_edge.i, word.i + 1))
yield word.left_edge.i, word.i + 1, np_label
SYNTAX_ITERATORS = {"noun_chunks": noun_chunks}
| [
"[email protected]"
] | |
d466f2fffdbb03cdaea47b83eeaafc9ac11fd8e4 | 52a930e04ebae70eca592eac7bced74aff8a1702 | /test/two.py | 41cd72fb91d718e7c73bd11eb88757b4c362492f | [] | no_license | lifengze97/LI-FENGZE-work-portfolio | cd05f53222bd6d8906b6bc00d80c195e836a8e36 | 668b4641ae7995645e765eb66970ce410967e84b | refs/heads/master | 2020-05-21T15:36:17.841891 | 2019-05-11T06:35:00 | 2019-05-11T06:35:00 | 186,095,306 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,217 | py | import numpy as np
import cv2 as cv
import mnist
from operator import itemgetter
def measure(image):
blur = cv.GaussianBlur(image, (5, 5), 0)
cv.imwrite("./aa/1.jpg", blur)
gray = cv.cvtColor(blur, cv.COLOR_RGB2GRAY)
cv.imwrite("./aa/2.jpg", gray)
_, out = cv.threshold(gray, 0, 255, cv.THRESH_BINARY_INV | cv.THRESH_OTSU)
cv.imwrite("./aa/3.jpg", out)
ret, thresh = cv.threshold(gray, 0, 255, cv.THRESH_BINARY_INV | cv.THRESH_OTSU)
# ------------------------------------------------------------------------------------
# 最重要的调参位置 --- (71,71) --- 要使用奇数 --- 根据手写的图片调节
# ------------------------------------------------------------------------------------
kernel = cv.getStructuringElement(cv.MORPH_RECT, (111,111))
erode = cv.dilate(thresh, kernel)
cv.imwrite("./aa/4.jpg", erode)
clone_image, contours, hierarchy = cv.findContours(erode, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
all = []
for i, contour in enumerate(contours):
x1, y1, w1, h1 = cv.boundingRect(contour)
cv.rectangle(image, (x1, y1), (x1 + w1, y1 + h1), (0, 255, 0), 2)
img = out[y1:(y1 + h1), x1:(x1 + w1)]
clone_image, contours, hierarchy = cv.findContours(img, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
# ------------------------------------------------------------------------------------
#
# 识别两位数字的主要思想 --->
# 使用 --- 使用形态学转换 --- 膨胀 --- 去掉两个数字之间的间隔 --- 得到两个数字的轮廓,外包矩形 ---
# --- 在小矩形内部继续找数字的轮廓 --- 识别出两个数字 --- 这两个数字识别的顺序不定,可能是先识别到前面的数字
# 也可能先识别到后面的数字,所以对两个数字的位子进行判断 --- r得到正确的顺序 --- 在图像上显示正确的数字
#
# 简单说就是在大轮廓中找到小的轮廓,再对小的轮廓进行处理,得到正确的数字 ---
# x, y, w, h = cv.boundingRect(contour) --- 得到大矩形内部小矩形的四个参数 --- 左上角的横坐标,纵坐标,宽,高
# cv.rectangle --- 绘制小矩形
# if h > 50: --- 对小矩形进行一个判断,达到一定大小的时候,才认为矩形框里面确实是存在数字,
# result = img[y:(y + h), x:(x + w)] --- 得到小矩形内部的图像
# constant = cv.copyMakeBorder --- 对小矩形加一个轮廓 --- 原因是训练数据集的图片中的数字都在中间的位置
# cv.imwrite --- 将处理后的图片保存
# predict = mnist.recognition(_dir) --- 识别得到数字
# cv.putText --- 绘制通过神经网络得到的数字
# res.append([predict, x]) --- 将得到的预测数字和小框左上角的横坐标保存在res中 ---
# 之后 --- 一个大矩形循环后 --- 用于判断识别的数字是在第一个位置还是在第二个位置
# cv.imshow('rect', image) --- 显示的作用
#
# ------------------------------------------------------------------------------------
res = []
for j, contour in enumerate(contours):
x, y, w, h = cv.boundingRect(contour)
cv.rectangle(image, (x1+x, y1+y), (x1+x+w, y1+y+h), (0, 0, 255), 2)
if h > 50:
result = img[y:(y + h), x:(x + w)]
black = [0, 0, 0]
constant = cv.copyMakeBorder(result, 40, 40, 40, 40, cv.BORDER_CONSTANT, value=black)
_dir = './data/' + str(j) + '.png'
cv.imwrite('./data/' + str(j) + '.png', constant)
predict = mnist.recognition(_dir)
text = "{}".format(predict[0])
cv.putText(image, text, (int(x1+x), int(y1+y)), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
res.append([predict, x])
cv.imshow('rect', image)
cv.waitKey(0)
print(res)
# ------------------------------------------------------------------------------------
#
# 下面的代码用于判断一个大矩形中识别的数字哪一个是在第一个位置,哪一个是在第二个位置
# if len(res) == 1: --- 这个用于处理单个数字 --- 如果是单个数字直接绘制识别的数字即可 ---
# all.append([x1, y1, str(res[0][0][0])]) --- all --- 用于存放所有的数字(大矩形里面的数字,一位或者两位)
# --- 这个用于最后的排序,因为大矩形框的识别也不是按照顺序识别的 --- 所以需要判断 ---
# if len(res) == 2: --- 这个用于处理两位数字 ---
# res类型 --- [[array([4]), 35], [array([6]), 148]] --- [array([4]), 35] --- 第一个小矩形 --- 内部数字是4
# --- [array([6]), 148] --- 第二个小矩形 --- 内部数字是6
# res[0][1] --- 35, res[1][1] --- 148,可以判断出4所在的小矩形在前面,6所在的小矩形在后面
# --- 于是这样得到了大矩形里面的数字是 --- 46
# 同理 --- 第一个识别个位数的小矩形,第二次得到十位数的小矩形 --- 这样的也可以按照正常的顺序排列
# text = '' --- 初始化一个存放字符的变量
# res --- [[array([4]), 35], [array([6]), 148]]
# res[0] --- [array([4]), 35]
# res[0][0] --- [4]
# res[0][0][0] --- 4
#
# res[1][0][0] --- 得到的是数字,但是要转成字符串的形式 --- str()
#
# ------------------------------------------------------------------------------------
print(res)
if len(res) == 1:
cv.putText(image, str(res[0][0][0]), (int(x1), int(y1-30)), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
all.append([x1, y1, str(res[0][0][0])])
if len(res) == 2:
if res[0][1] > res[1][1]:
text = ''
text += str(res[1][0][0])
text += str(res[0][0][0])
print(text)
cv.putText(image, text, (int(x1), int(y1 - 30)), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
all.append([x1, y1, text])
else:
text = ''
text += str(res[0][0][0])
text += str(res[1][0][0])
print(text)
cv.putText(image, text, (int(x1), int(y1 - 30)), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
all.append([x1, y1, text])
# ------------------------------------------------------------------------------------
#
# 两位数字的保存 --- 可以保存一行数字,或者一列数字,两行数字现在还没有想到合适的算法
# 混乱排序的数字 --- [[1300, 173, '46'], [982, 159, '23'], [721, 151, '11'], [393, 111, '7'], [18, 107, '10']]
# --- 拿出一个解释 --- [1300, 173, '46'] --- 第一个是大轮廓的横坐标,第二个是大轮廓的纵坐标,第三个是大轮廓里面识别出的数字
# 顺序排列的数字 --- [[18, 107, '10'], [393, 111, '7'], [721, 151, '11'], [982, 159, '23'], [1300, 173, '46']]
# sorted(all, key=(lambda x: [x[0], x[1]])) --- 排序 --- 以第一个位置的数据为主关键字,第二个位置为辅助关键字
# --- 这个是按照X的大小来排序 --- 横轴较小的排在前面,也可以按照Y轴的大小进行排序 --- 用于处理竖排的数字
# 处理之后的数字保存在txt文件中,在txt中储存的是字符,所以要转换数据类型,然后在后面加入一个换行符,这样出来的格式比较清晰
# with open('./data/data.txt', 'w') as f: --- 打开txt文件,
# f.write --- 写入数据
#
# ------------------------------------------------------------------------------------
print(all)
all = sorted(all, key=(lambda x: [x[0], x[1]]))
print(all)
with open('./data/data.txt', 'w') as f:
for i in range(0, len(all)):
f.write(str(all[i][2]) + "\n")
src = cv.imread('./data/test10.jpeg')
measure(src)
cv.waitKey(0)
cv.destroyAllWindows()
| [
"[email protected]"
] | |
76a7f7756f97cde7cf7dfa05108ed98396a6ec61 | 6e4ac1a8bb85b7b39c173adfc14d1d4d70a79e03 | /question/permissions.py | 4dbbf6e96f76a960bdbc107041cb7b188622384e | [
"MIT"
] | permissive | salva-imm/backend | 4f860da4efd374daa06349dde96e53820f497596 | 4201eccac5c040caac8330911ed0530385dd1b69 | refs/heads/master | 2023-04-11T16:09:08.182517 | 2021-05-05T12:24:59 | 2021-05-05T12:24:59 | 364,716,745 | 1 | 0 | MIT | 2021-05-05T22:04:11 | 2021-05-05T22:04:10 | null | UTF-8 | Python | false | false | 725 | py | from datetime import datetime, timedelta
from utilities.exceptions import CustomException
from question.models import Question, Answer
def check_create_question_permission(user, company):
last_question = Question.objects.filter(company=company, creator=user).last()
if not last_question:
return
elif last_question.created + timedelta(days=5) < datetime.now():
return
else:
raise CustomException(detail='You question before', code=403)
def check_create_answer_permission(user, question):
answered = Answer.objects.filter(creator=user, question=question).last()
if not answered:
return
else:
raise CustomException(detail='You answered before', code=403)
| [
"[email protected]"
] | |
29eb637fd7c69ee63ff42753a2493565813f803f | db4e95730de8e333480e575df8f9792494524df3 | /Python/Startup/LongestShotMaker.py | e061ef5807b660a1a5b7de467efe19c9fcf58375 | [] | no_license | horitin/dotStudio | 99f2065b598f1fd6c960cee00603e982a55f0d98 | 5958706af127e91dde8d00559cafe17536201054 | refs/heads/master | 2021-01-23T23:02:37.828574 | 2015-09-15T15:15:33 | 2015-09-15T15:15:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,487 | py | import hiero.core
from PySide.QtGui import QAction
# Longest Shot from Sequence
# Creates a new Sequence which contains Shots with the longest range of frames used across all shots.
class LongestSequenceFromSelectionAction(QAction):
def __init__(self):
QAction.__init__(self, "Create Longest Shot Sequence", None)
self.triggered.connect(self.doit)
hiero.core.events.registerInterest((hiero.core.events.EventType.kShowContextMenu, hiero.core.events.EventType.kBin), self.eventHandler)
def doit(self):
selection = list(hiero.ui.activeView().selection())
sequences = [item.activeItem() for item in selection if isinstance(item.activeItem(),hiero.core.Sequence)]
# For every Sequence, we need to build a list of shots
# This will assume that the first track is the master track, as if it were from the original EDL
all_shots = []
for seq in sequences:
tracks = seq.videoTracks()
for track in tracks:
shots = list(track.items())
all_shots.extend(shots)
# We now must determine shots which have the same Source Clip across the selection of Sequences
clipMatches = {}
for shot in all_shots:
print str(shot)
clipName = shot.source().name()
if clipName in clipMatches.keys():
clipMatches[clipName]+=[{'trackItem':shot,
'clip':shot.source(),
'duration':shot.duration(),
'sourceIn':shot.sourceIn(),
'sourceOut':shot.sourceOut()
}]
else:
clipMatches[clipName]=[{'trackItem':shot,
'clip':shot.source(),
'duration':shot.duration(),
'sourceIn':shot.sourceIn(),
'sourceOut':shot.sourceOut()
}]
longestShots = []
hiero.core.clipMatches = clipMatches
for clipName in clipMatches.keys():
MAX = max([item['duration'] for item in clipMatches[clipName]])
print 'Max duration for Shot: %s is %i' % (str(clipName),MAX)
# Now find the dict inside clipMatches which has this duration
longestShot = [item['trackItem'] for item in clipMatches[clipName] if item['duration']==MAX]
longestShots.extend(longestShot)
longestShots = hiero.core.util.uniquify(longestShots)
print 'LONGEST SHOTS WERE: ' + str(longestShots)
# Create a new Sequence
seq2 = hiero.core.Sequence("Longest Shots")
longestTrack = hiero.core.VideoTrack('Longest')
seq2.addTrack(longestTrack)
t0 = 0
for shot in longestShots:
print 'ADDING SHOT: ' + str(shot)
newShot = shot.copy()
newShot.setTimelineIn(t0)
newShot.setTimelineOut(t0+shot.duration()-1)
longestTrack.addTrackItem(newShot)
t0 = t0+shot.duration()
print "Sequence duration is %i" % t0
proj = seq.project()
root = proj.clipsBin()
root.addItem(hiero.core.BinItem(seq2))
def eventHandler(self, event):
if not hasattr(event.sender, 'selection'):
return
# Disable if nothing is selected
selection = event.sender.selection()
selectedSequences = [item for item in selection if isinstance(item.activeItem(),hiero.core.Sequence)]
self.setEnabled( len(selectedSequences) > 1 )
event.menu.addAction(self)
action = LongestSequenceFromSelectionAction() | [
"[email protected]"
] | |
5658a3a70b04b16670fd202bd763cc6512ff4448 | f4b60f5e49baf60976987946c20a8ebca4880602 | /lib/python2.7/site-packages/acimodel-1.3_2j-py2.7.egg/cobra/modelimpl/l2/rtresl2instpol.py | 859da4c1e5fe74c7218a2aa5517591d92b42eaae | [] | no_license | cqbomb/qytang_aci | 12e508d54d9f774b537c33563762e694783d6ba8 | a7fab9d6cda7fadcc995672e55c0ef7e7187696e | refs/heads/master | 2022-12-21T13:30:05.240231 | 2018-12-04T01:46:53 | 2018-12-04T01:46:53 | 159,911,666 | 0 | 0 | null | 2022-12-07T23:53:02 | 2018-12-01T05:17:50 | Python | UTF-8 | Python | false | false | 4,772 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2016 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class RtResL2InstPol(Mo):
"""
A target relation to the layer 2 instance policy information. This is an internal object.
"""
meta = TargetRelationMeta("cobra.model.l2.RtResL2InstPol", "cobra.model.fabric.Inst")
meta.moClassName = "l2RtResL2InstPol"
meta.rnFormat = "rtresL2InstPol"
meta.category = MoCategory.RELATIONSHIP_FROM_LOCAL
meta.label = "Fabric Instance"
meta.writeAccessMask = 0x4000000001
meta.readAccessMask = 0x4000000001
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.parentClasses.add("cobra.model.l2.InstPol")
meta.superClasses.add("cobra.model.reln.From")
meta.superClasses.add("cobra.model.reln.Inst")
meta.superClasses.add("cobra.model.pol.NFromRef")
meta.rnPrefixes = [
('rtresL2InstPol', False),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "tCl", "tCl", 11738, PropCategory.REGULAR)
prop.label = "Target-class"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 1004
prop.defaultValueStr = "fabricInst"
prop._addConstant("fabricInst", None, 1004)
prop._addConstant("unspecified", "unspecified", 0)
meta.props.add("tCl", prop)
prop = PropMeta("str", "tDn", "tDn", 100, PropCategory.REGULAR)
prop.label = "Target-dn"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("tDn", prop)
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Ancestor"
meta.deploymentQueryPaths.append(DeploymentPathMeta("l2InstPolToPortGroups", "Portgroups", "cobra.model.vmm.EpPD"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("l2InstPolToVirtualMachines", "Virtual Machines", "cobra.model.comp.Vm"))
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
5d812f9c38a3ade282d96b5d62081a22e1493992 | 742ef72c0e2455222c9a5b02abb46f4a792940ec | /shop/migrations/0001_initial.py | acb7f8a57feedf8e87809bfa7137e504b41fc3ea | [] | no_license | dongju93/mydogs_website | 7af03ddbde39ce8c4f3907c9b8d7e6434961adec | 86688d62d3b5bfd4da79f1980c0b97b8022b6abb | refs/heads/master | 2021-01-03T10:37:26.559646 | 2020-02-21T10:18:13 | 2020-02-21T10:18:13 | 240,044,192 | 1 | 0 | null | 2020-02-12T15:22:48 | 2020-02-12T15:17:31 | Python | UTF-8 | Python | false | false | 2,228 | py | # Generated by Django 2.2.2 on 2019-06-16 09:48
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, max_length=200)),
('meta_description', models.TextField(blank=True)),
('slug', models.SlugField(allow_unicode=True, max_length=200, unique=True)),
],
options={
'verbose_name': 'category',
'verbose_name_plural': 'categories',
'ordering': ['name'],
},
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, max_length=200)),
('slug', models.SlugField(allow_unicode=True, max_length=200, unique=True)),
('image', models.ImageField(blank=True, upload_to='products/%Y/%m/%d')),
('description', models.TextField(blank=True)),
('meta_description', models.TextField(blank=True)),
('price', models.DecimalField(decimal_places=2, max_digits=10)),
('stock', models.PositiveIntegerField()),
('available_display', models.BooleanField(default=True, verbose_name='Display')),
('available_order', models.BooleanField(default=True, verbose_name='Order')),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('category', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='products', to='shop.Category')),
],
options={
'ordering': ['-created'],
'index_together': {('id', 'slug')},
},
),
]
| [
"[email protected]"
] | |
291112da9b483ea2150b7e5e28754d866fcc58d9 | 572ce2b8a9c687f302ea4953dd9bd978470d0c4b | /pythonOOPS/apartments/apartments/middlewares.py | cc017802cc872bb34397da9a233787a5f2e6543f | [] | no_license | fainaszar/pythonPrograms | 5f539c8b80deb5d57e6aa984b0325389cf3b6f51 | 03f6c8b540981332e6f940308c7407a5038faac9 | refs/heads/master | 2021-09-07T18:10:43.603405 | 2018-02-27T05:27:37 | 2018-02-27T05:27:37 | 106,532,756 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,973 | py | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
import scrapy
from selenium import webdriver
class ApartmentsSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| [
"[email protected]"
] | |
836c3f361ef8f0313135b27ba270a86d9d72085b | 80244495df04ff2e5dc8d3af62b8173625a3379f | /app/appbase.py | 23ef2fe4f49fd3922854e1ca3f783d0c9c1ca26d | [
"Apache-2.0"
] | permissive | Jiyvn/pyautotest | a0c24a90ea1626402d1d403a4cef7c9355220011 | a33281d0a6f6edd9c9c60c1e83e2534436818146 | refs/heads/main | 2023-03-26T22:57:09.256738 | 2021-04-02T18:25:03 | 2021-04-02T18:25:03 | 338,061,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,616 | py |
from appium.webdriver.common.mobileby import MobileBy
appPackage = {
'fake_location': 'com.lerist.fakelocation',
'chrome': "com.android.chrome",
'settings': 'com.android.settings',
'gmail': 'com.google.android.gm',
}
appActivity = {
'fake_location': '.ui.activity.MainActivity',
'chrome': "com.google.android.apps.chrome.Main",
'settings': '.Settings',
'gmail': '.ConversationListActivityGmail',
'gmail_wel': '.welcome.WelcomeTourActivity',
}
appBundleId = {
'chrome': 'com.google.chrome.ios',
'gmail': 'com.google.Gmail',
'safari': 'com.apple.mobilesafari',
'settings': 'com.apple.Preferences',
'appstore': 'com.apple.AppStore',
}
android_base = {
'text': lambda value: (MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().text("{0}")'.format(value)),
'text_contains': lambda value: (
MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().textContains("{0}")'.format(value)),
'text_view': lambda value: (MobileBy.XPATH, '//android.widget.TextView[@text="{0}"]'.format(value)),
'button': lambda value: (
MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().className("android.widget.Button").text("{0}")'.format(value)),
'edittext': lambda value: (MobileBy.XPATH, '//android.widget.EditText[@text="{0}"]'.format(value)),
'desc_contains': lambda value: (
MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().descriptionContains("{0}")'.format(value)), # content-desc属性
}
ios_base = {
'value': lambda v: (MobileBy.XPATH, '//*[@value="{}"]'.format(v)),
'value_contains': lambda v: (MobileBy.XPATH, '//*[contains(@value,"{}")]'.format(v)),
'name': lambda v: (MobileBy.XPATH, '//*[@name="{}"]'.format(v)),
'name_contains': lambda v: (MobileBy.XPATH, '//*[contains(@name,"{}")]'.format(v)),
'btn_name': lambda v: (MobileBy.IOS_PREDICATE, 'type=="XCUIElementTypeButton" AND name=="{}"'.format(v)),
'btn_name_contains': lambda v: (
MobileBy.IOS_PREDICATE, 'type=="XCUIElementTypeButton" AND name CONTAINS "{}"'.format(v)),
'switch_name': lambda v: (MobileBy.IOS_PREDICATE, 'type=="XCUIElementTypeSwitch" AND name=="{}"'.format(v)),
'switch_name_contains': lambda v: (
MobileBy.IOS_PREDICATE, 'type=="XCUIElementTypeSwitch" AND name CONTAINS "{}"'.format(v)),
'cell_name': lambda v: (MobileBy.IOS_PREDICATE, 'type=="XCUIElementTypeCell" AND name=="{}"'.format(v)),
'cell_name_contains': lambda v: (
MobileBy.IOS_PREDICATE, 'type=="XCUIElementTypeCell" AND name CONTAINS "{}"'.format(v)),
}
'''
iOS键盘上的按键, 大小写敏感。特殊键shift, delete, more, space, @, ., Return, Next keyboard(切换文字, 比如中英文)大小写不敏感
'''
ios_keyboard = {
'done': (MobileBy.IOS_CLASS_CHAIN, '**/XCUIElementTypeButton[`label MATCHES "Done|完成"`]'),
'key': lambda k: (MobileBy.ACCESSIBILITY_ID, '{0}'.format(k))
}
def scrollable(locators: [list, str]):
if isinstance(locators, list):
return (MobileBy.ANDROID_UIAUTOMATOR, 'new UiScrollable(new UiSelector().scrollable(true).instance(0)).scrollIntoView(new UiSelector().%s)' % '.'.join(locators))
elif isinstance(locators, str):
return (MobileBy.ANDROID_UIAUTOMATOR, 'new UiScrollable(new UiSelector().scrollable(true).instance(0)).scrollIntoView(new UiSelector().%s)' % locators)
def selector(locators: [list, str]):
if isinstance(locators, list):
return (MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().%s' % '.'.join(locators))
elif isinstance(locators, str):
return (MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().%s' % locators)
settings = {
'nothing_en': (MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().text("%s")' % 'Nothing'),
'nothing_zh': (MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().text("%s")' % '无'),
'None': (MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().text("%s")' % 'None'),
'fake_location': (MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().text("{0}")'.format('Fake Location')),
'select_mock_app_en': (MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().textContains("%s")' % 'Mock'),
'select_mock_app_zh': (MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().textContains("%s")' % '模拟位置'),
'select_text_zh': '选择模拟位置信息应用',
'select_text_en': 'Select mock location app',
# ********* iOS ************
'ios_setting_page_title': (MobileBy.XPATH, '//XCUIElementTypeStaticText[@name="Settings"]'),
'ios_setting_search_field': (MobileBy.ACCESSIBILITY_ID, 'Search'),
# 蓝牙页面元素:开启时,value属性为1,关闭时value属性为0
'ios_bluetooth': (MobileBy.IOS_PREDICATE, 'type=="{0}" AND name=="{1}"'.format('XCUIElementTypeSwitch','Bluetooth')),
'ios_bluetooth2': (MobileBy.XPATH, '//XCUIElementTypeSwitch[@name="Bluetooth"]'),
# setting主页元素:On/Off元素,点击进入蓝牙页面
'ios_bluetooth_item': (MobileBy.XPATH, '//XCUIElementTypeStaticText[@name="Bluetooth"]/following-sibling::XCUIElementTypeStaticText[1]'),
'ios_setting_items': lambda x: (MobileBy.IOS_PREDICATE, 'type=="XCUIElementTypeCell" AND name=="{0}"'.format(x)),
'ios_setting_toggles': lambda x: (MobileBy.XPATH, '//XCUIElementTypeSwitch[@name="{0}"]'.format(x)),
'ios_setting_wifi': (MobileBy.XPATH, '//XCUIElementTypeSwitch[@name="Wi-Fi"]'),
'ios_back_to_current_app': (MobileBy.ACCESSIBILITY_ID, 'breadcrumb'), # 这个记得要加approach = 'p'
'ios_setting_items_title': lambda x: (MobileBy.IOS_PREDICATE, 'type=="XCUIElementTypeOther" AND name=="{0}"'.format(x)),
# 'ios_general': (MobileBy.IOS_PREDICATE, 'type=="{0}" AND name=="{1}"'.format('XCUIElementTypeCell', 'General')),
'ios_general': (MobileBy.IOS_PREDICATE, 'type=="{0}" AND name=="{1}"'.format('XCUIElementTypeCell', '通用')),
'ios_date&time': (MobileBy.IOS_PREDICATE, 'type=="{0}" AND name=="{1}"'.format('XCUIElementTypeCell', 'Date & Time')),
'ios_profile&devicemanagement': (MobileBy.IOS_PREDICATE, 'type=="{0}" AND name CONTAINS "{1}"'.format('XCUIElementTypeCell', 'Device Management')),
'ios_trust_app_btn': lambda x: (MobileBy.IOS_PREDICATE, 'type=="XCUIElementTypeStaticText" AND value=="Trust “{0}”"'.format(x)), # e.g. Trust “Fisher-Price, Inc.”
'ios_trust_app_dialog_title': lambda x: (MobileBy.IOS_PREDICATE, 'type=="XCUIElementTypeStaticText" AND value CONTAINS "Trust “iPhone Distribution: {0}”"'.format(x)), # e.g. Trust “iPhone Distribution: Fisher-Price, Inc.” Apps on This iPhone
'ios_trust_btn': (MobileBy.ACCESSIBILITY_ID, 'Trust'),
# 24小时制的按钮,如果当前为12小时制,其value为0,否则为1
'ios_24hr': (MobileBy.IOS_PREDICATE, 'type=="{0}" AND name=="{1}"'.format('XCUIElementTypeCell','24-Hour Time')),
'ios_24hr_x': (MobileBy.XPATH, '//XCUIElementTypeCell[@name="24-Hour Time"]'),
}
app_store = {
'continue': (MobileBy.IOS_PREDICATE, 'label == "继续" AND name == "继续" AND type == "XCUIElementTypeButton"'), # 继续 弹窗页面
'allow_when_using': (MobileBy.IOS_CLASS_CHAIN, '**/XCUIElementTypeButton[`label == "使用App时允许"`]'),
'app_item': (MobileBy.IOS_CLASS_CHAIN, '**/XCUIElementTypeButton[`label == "App"`]'),
'account': (MobileBy.IOS_CLASS_CHAIN, '**/XCUIElementTypeButton[`label == "我的帐户"`]'),
'search_item': (MobileBy.IOS_CLASS_CHAIN, '**/XCUIElementTypeButton[`label == "搜索"`]'),
'search_field': (MobileBy.IOS_CLASS_CHAIN, '**/XCUIElementTypeNavigationBar[`name == "搜索"`]/XCUIElementTypeSearchField'),
'keyboard_continue': (MobileBy.IOS_CLASS_CHAIN, '**/XCUIElementTypeButton[`label == "继续"`]'), # 点击searchfield后出现
'keyboard_search_btn': (MobileBy.IOS_CLASS_CHAIN, '**/XCUIElementTypeButton[`name == "Search"`]'), # 点击searchfield后出现
'progress_circle': (MobileBy.IOS_CLASS_CHAIN, '**/XCUIElementTypeActivityIndicator[`label MATCHES "正在载入|进行中"`]'), # 加载搜索结果的按钮
'retry': (MobileBy.IOS_CLASS_CHAIN, '**/XCUIElementTypeButton[`label == "重试"`]'), # 搜索失败时
'app': lambda a: (MobileBy.IOS_CLASS_CHAIN, '**/XCUIElementTypeButton[`label CONTAINS "{}"`]'.format(a)), # Fisher-Price® Smart Connect™
'navigate_search_btn': (MobileBy.IOS_CLASS_CHAIN, '**/XCUIElementTypeNavigationBar[`name == "搜索"`]`]'), # app详情页面
'reload_btn': (MobileBy.IOS_CLASS_CHAIN, '**/XCUIElementTypeButton[`label == "重新下载"`]'), # app详情页面
'get_btn': (MobileBy.IOS_CLASS_CHAIN, '**/XCUIElementTypeButton[`label == "获取"`]'), # app详情页面first time download
'upgrade_btn': (MobileBy.IOS_CLASS_CHAIN, ''),
'in_process': (MobileBy.IOS_CLASS_CHAIN, '**/XCUIElementTypeButton[`label == "正在载入"`]'), # circle按钮
'downloading': (MobileBy.IOS_CLASS_CHAIN, '**/XCUIElementTypeButton[`label == "正在下载"`]'), # 暂停按钮
'open_app': (MobileBy.IOS_CLASS_CHAIN, '**/XCUIElementTypeButton[`label == "打开"`]'),
}
notification = {
'ios_notification': lambda msg_title: (MobileBy.IOS_PREDICATE, 'type=="XCUIElementTypeScrollView" AND (name CONTAINS "{0}" OR label CONTAINS "{0}")'.format(msg_title)), # 某个app的通知,多个的时候通常会重叠在一起,需要点击展开。需传入name值,一般为展开的消息的app title,如:SMART CONNECT
'ios_nt_msg': lambda c: (MobileBy.IOS_PREDICATE, 'type=="XCUIElementTypeButton" AND name=="NotificationCell" AND label CONTAINS "{}"'.format(c)), # (右滑可删除)如消息: Animal projection on your My Child's Deluxe Soother is turning off soon.
'ios_nt_clear': lambda msg_title: (MobileBy.IOS_CLASS_CHAIN, '**/XCUIElementTypeButton[`label BEGINSWITH "{}"`]/XCUIElementTypeButton[`label == "Clear"`][1]'.format(msg_title)), # 向左滑动消息出现的Clear button
'ios_nt_clear_all': (MobileBy.IOS_CLASS_CHAIN, '**/XCUIElementTypeButton[`label == "Clear All"`]'), # 向左滑动消息出现的Clear All button
'ios_clear_all_btn': (MobileBy.IOS_PREDICATE, 'type=="XCUIElementTypeButton" AND name=="clear-button" AND value=="Notification Center"'), # 清除所有app消息按钮(x)
'ios_clear_btn': lambda app: (MobileBy.XPATH, '//XCUIElementTypeStaticText[@name="{}"]/../following::XCUIElementTypeButton[@name="clear-button"]'.format(app)), # 清除某个app消息按钮(x),没有label值,如Smart Connect
'ios_confirm_clear': (MobileBy.IOS_PREDICATE, 'type=="XCUIElementTypeButton" AND name=="clear-button" AND label=="Confirm Clear"'), # 点击x出现的clear按钮,通用
}
camera = {
# App内嵌拍照界面
'ios_capture': (MobileBy.ACCESSIBILITY_ID, 'PhotoCapture'),
'ios_cancel_capture': (MobileBy.ACCESSIBILITY_ID, 'Cancel'),
'ios_switch_camera': (MobileBy.ACCESSIBILITY_ID, 'FrontBackFacingCameraChooser'),
# 闪光灯有三个value, 分别是Automatic,On, Off, 不能使用send_keys来设置, 需要点Flash图标然后再在下面的二级菜单中选择
'ios_flash_light': (MobileBy.ACCESSIBILITY_ID, 'Flash'),
'ios_flash_auto': (MobileBy.ACCESSIBILITY_ID, 'Auto'),
'ios_flash_on': (MobileBy.ACCESSIBILITY_ID, 'On'),
'ios_flash_off': (MobileBy.ACCESSIBILITY_ID, 'Off'),
# 预览界面
'ios_retake': (MobileBy.ACCESSIBILITY_ID, 'Retake'),
'ios_use': (MobileBy.ACCESSIBILITY_ID, 'Use Photo'),
# 剪切界面
'ios_crop_use': (MobileBy.ACCESSIBILITY_ID, 'useButton'),
'ios_crop_cancel': (MobileBy.ACCESSIBILITY_ID, 'cancelButton'),
}
albums = {
# App相册界面(选取相)
'ios_cancel': (MobileBy.ACCESSIBILITY_ID, 'Cancel'),
# 各个相册, 使用时引用相册的名字即可, 系统默认的相册一般有: Camera Roll, Recently Added, Screenshots, 还有各个App自建的相册
'ios_albums': lambda x: (MobileBy.ACCESSIBILITY_ID, '{0}'.format(x)),
'ios_back_btn': (MobileBy.ACCESSIBILITY_ID, 'Photos'), # 这个位于左上角的返回键, 是用于返回相册列表的
# 相册里的相, 一般来说最新的相会出现在相册的最后, 可以引入last()来获取, 或者输入数字来获取第N张相, 比如输入200就会获得相册中第200张相
# 这里要注意相册里可以显示的相是有限的, 选取了没显示的相点击第一次, 相册会自动跳转到被选择的相的可显示位置, 再点击一次才最终选择到这张相
'ios_photos_by_position': lambda x: (MobileBy.XPATH, '//XCUIElementTypeCollectionView[@name="PhotosGridView"]/XCUIElementTypeCell[{0}]'.format(x)),
# 剪切界面
'ios_crop_use': (MobileBy.ACCESSIBILITY_ID, 'useButton'),
'ios_crop_cancel': (MobileBy.ACCESSIBILITY_ID, 'cancelButton'),
}
fake_location = {
'menu_btn': (MobileBy.ACCESSIBILITY_ID, 'Open navigation drawer'),
'start_to_fake': (MobileBy.ID, 'com.lerist.fakelocation:id/f_fakeloc_tv_service_switch'),
'add_btn': (MobileBy.ID, 'com.lerist.fakelocation:id/fab'),
'current_coords': (MobileBy.ID, 'com.lerist.fakelocation:id/f_fakeloc_tv_current_latlong'),
'running_mode': (MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().text("{0}")'.format('运行模式')),
'no_root_mode': (MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().textContains("{0}")'.format('NOROOT')),
'root_mode': (MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().textContains("{0}")'.format('ROOT(推荐)')),
'permission_allow': (MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().text("{0}")'.format('允许')),
'permission_allow_id': (MobileBy.ID, 'com.android.packageinstaller:id/dialog_container'),
'title_choose_location': (MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().text("{0}")'.format('选择位置')),
'search_btn': (MobileBy.ID, 'com.lerist.fakelocation:id/m_item_search'),
'search_box': (MobileBy.ID, 'com.lerist.fakelocation:id/l_search_panel_et_input'),
'confirm_btn': (MobileBy.ID, 'com.lerist.fakelocation:id/a_map_btn_done'),
'back_btn': (MobileBy.ACCESSIBILITY_ID, '转到上一层级'),
'update_next_time': (MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().text("{0}")'.format('下次再说')),
'forward_toset': (MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().text("{0}")'.format('前往设置')),
'get_permission': (MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().text("{0}")'.format('前往授权')),
# 通用元素-提示框&提示框的确认取消按钮
'native_dialog': (MobileBy.ID, 'android:id/parentPanel'),
'prompt_dialog': (MobileBy.ID, 'com.lerist.fakelocation:id/parentPanel'),
'dialog_confirm_btn': (MobileBy.ID, 'android:id/button1'),
'dialog_cancel_btn': (MobileBy.ID, 'android:id/button2'),
}
| [
"[email protected]"
] | |
b5304469b0c795fc2fc873e765b6f4fb6c3654ba | f5140d8ccf46eafc47744721fe0c99dbfba9560b | /backend/fitniccs_application_4727/urls.py | f91a0396aeac62b1d8efdf48a0a8d39eeb2a7540 | [] | no_license | crowdbotics-apps/fitniccs-application-4727 | 9b627c7af2975f941c5ba7f5428ffa48a7b8e7df | 9aae1da337c82fa08fa724eebd08860d5cf57493 | refs/heads/master | 2022-12-14T00:14:16.229075 | 2019-06-15T22:54:41 | 2019-06-15T22:54:41 | 192,130,450 | 0 | 0 | null | 2022-12-09T06:28:58 | 2019-06-15T22:54:25 | Python | UTF-8 | Python | false | false | 1,073 | py | """fitniccs_application_4727 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('', include('home.urls')),
path('accounts/', include('allauth.urls')),
path('api/v1/', include('home.api.v1.urls')),
path('admin/', admin.site.urls),
]
admin.site.site_header = 'Fitniccs Application'
admin.site.site_title = 'Fitniccs Application Admin Portal'
admin.site.index_title = 'Fitniccs Application Admin'
| [
"[email protected]"
] | |
62f00d103a9a67819d4e00e25a7ff17f9883ca84 | 8f271650eafdaddc1de637024175517da21397b7 | /src/evalRegularJacobian.py | d8110a3124e8849e202acbbca333771c08a429cd | [] | no_license | ebjarkason/randomTSVDLM | dbf0ea549015892c067f61991ecf97a1b7156118 | 29af2ea6df534c26e0592c88eb18809dd4827c17 | refs/heads/master | 2021-09-27T06:06:02.870026 | 2021-09-14T08:34:10 | 2021-09-14T08:34:10 | 115,983,364 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,045 | py | # Regularization Jacobian
# Coded by: Elvar K. Bjarkason (2017)
import scipy as sp
from scipy.sparse import csr_matrix, lil_matrix
# Generates the Regularization Jacobian for the 2D slice model with 8000 rock-types and 16,000 parameters.
# Could generalize for adjsutable permeabilities in every block this
# by using grid connection information.
def regjac(NRadj):
NRadjHalf = NRadj/2
mpr = sp.load('mprior.npy')
Npr = len(mpr)
Nregsmooth = 2*15820 # 2 times the number of connections between adjustable rock-types
Nreglocalxz = 8000 # Number of adjustable rock-types
Nreg = Nregsmooth + Nreglocalxz + Npr
sp.save('Nreg.npy',Nreg)
rJac = lil_matrix((Nreg,NRadj))
x = 0
# Create horizontal smoothing of log10kx (perm index 1):
for i in range(0,80):
for j in range(0,99):
rJac[x,j + i*100] = 1
rJac[x,j + i*100 + 1] = -1
x += 1
# Create vertical smoothing of log10kx (perm index 1):
for i in range(0,79):
for j in range(0,100):
rJac[x,j + i*100] = 1
rJac[x,j + (i+1)*100] = -1
x += 1
# Create horizontal smoothing of log10kz (perm index 3):
for i in range(0,80):
for j in range(0,99):
rJac[x,j + i*100 + NRadjHalf] = 1
rJac[x,j + i*100 + 1 + NRadjHalf] = -1
x += 1
## Create vertical smoothing of log10kz (perm index 3):
for i in range(0,79):
for j in range(0,100):
rJac[x,j + i*100 + NRadjHalf] = 1
rJac[x,j + (i+1)*100 + NRadjHalf] = -1
x += 1
# Add regularization to make log10kx similar to log10kz:
for i in range(0,Nreglocalxz):
rJac[x,i] = 1
rJac[x,i + NRadjHalf] = -1
x += 1
# Add prior paramater regularisation:
for i in range(0,Npr):
rJac[x,i] = 1*0.001
x += 1
return csr_matrix(rJac)
| [
"[email protected]"
] | |
e9542b66ed5574f740d6ff518ec4553140597af8 | ff9db4805947a40e9cc40d6ebcd2350d960e2ade | /federatedml/nn/homo_nn/enter_point.py | 913f8f062fec765f580333060e2b842fbf56905a | [] | no_license | jiahuanluo/cv_task | 97f40e8380e99ee0ecfb210f259e96381eac7b53 | c904f6e7319ae068bc3ba1980d6ea1aac9be34f0 | refs/heads/master | 2022-10-14T19:59:14.735009 | 2020-06-12T05:40:23 | 2020-06-12T05:40:23 | 266,522,413 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,373 | py | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from arch.api import session
from arch.api.utils.log_utils import LoggerFactory
from fate_flow.entity.metric import MetricType, MetricMeta, Metric
from federatedml.framework.homo.blocks import secure_mean_aggregator, loss_scatter, has_converged
from federatedml.framework.homo.blocks.base import HomoTransferBase
from federatedml.framework.homo.blocks.has_converged import HasConvergedTransVar
from federatedml.framework.homo.blocks.loss_scatter import LossScatterTransVar
from federatedml.framework.homo.blocks.secure_aggregator import SecureAggregatorTransVar
from federatedml.model_base import ModelBase
from federatedml.nn.homo_nn import nn_model
from federatedml.nn.homo_nn.nn_model import restore_nn_model
from federatedml.optim.convergence import converge_func_factory
from federatedml.param.homo_nn_param import HomoNNParam
from federatedml.util import consts
from cv_task import dataloader_detector, net, models
from cv_task.utils.utils import *
# from cv_task.lib.roi_data_layer.roidb import combined_roidb
# from cv_task.lib.roi_data_layer.roibatchLoader import roibatchLoader
# from cv_task.lib.model.utils.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
# from cv_task.lib.model.utils.net_utils import weights_normal_init, save_net, load_net, \
# adjust_learning_rate, save_checkpoint, clip_gradient
# from cv_task.lib.model.faster_rcnn.vgg16 import vgg16
from cv_task.utils.config import opt
from cv_task.data.dataset import Dataset, TestDataset, inverse_normalize
from cv_task.model import FasterRCNNVGG16
from torch.utils import data as data_
from cv_task.fasterrcnntrainer import FasterRCNNTrainer
from cv_task.utils import array_tool as at
from cv_task.utils.vis_tool import visdom_bbox
from cv_task.utils.eval_tool import eval_detection_voc
import torch
from torch.autograd import Variable
from federatedml.nn.backend.pytorch.nn_model import PytorchNNModel
from torch.utils.data import DataLoader
from torch.utils.data.sampler import Sampler
import numpy as np
import time
import os
import warnings
warnings.filterwarnings("ignore")
Logger = LoggerFactory.get_logger()
MODEL_META_NAME = "HomoNNModelMeta"
MODEL_PARAM_NAME = "HomoNNModelParam"
def _build_model_dict(meta, param):
return {MODEL_META_NAME: meta, MODEL_PARAM_NAME: param}
def _extract_param(model_dict: dict):
return model_dict.get(MODEL_PARAM_NAME, None)
def _extract_meta(model_dict: dict):
return model_dict.get(MODEL_META_NAME, None)
class ObjDict(dict):
"""
Makes a dictionary behave like an object,with attribute-style access.
"""
def __getattr__(self, name):
try:
return self[name]
except:
raise AttributeError(name)
def __setattr__(self, name, value):
self[name] = value
class HomoNNBase(ModelBase):
def __init__(self, trans_var):
super().__init__()
self.model_param = HomoNNParam()
self.aggregate_iteration_num = 0
self.transfer_variable = trans_var
def _suffix(self):
return self.aggregate_iteration_num,
def _init_model(self, param: HomoNNParam):
self.param = param
self.enable_secure_aggregate = param.secure_aggregate
self.max_aggregate_iteration_num = param.max_iter
class HomoNNServer(HomoNNBase):
def __init__(self, trans_var):
super().__init__(trans_var=trans_var)
self.model = None
self.aggregator = secure_mean_aggregator.Server(self.transfer_variable.secure_aggregator_trans_var)
self.loss_scatter = loss_scatter.Server(self.transfer_variable.loss_scatter_trans_var)
self.has_converged = has_converged.Server(self.transfer_variable.has_converged_trans_var)
def _init_model(self, param: HomoNNParam):
super()._init_model(param=param)
early_stop = self.model_param.early_stop
self.converge_func = converge_func_factory(early_stop.converge_func, early_stop.eps).is_converge
self.loss_consumed = early_stop.converge_func != "weight_diff"
def callback_loss(self, iter_num, loss):
metric_meta = MetricMeta(name='train',
metric_type="LOSS",
extra_metas={
"unit_name": "iters",
})
self.callback_meta(metric_name='loss', metric_namespace='train', metric_meta=metric_meta)
self.callback_metric(metric_name='loss',
metric_namespace='train',
metric_data=[Metric(iter_num, loss)])
def _is_converged(self):
loss = self.loss_scatter.weighted_loss_mean(suffix=self._suffix())
Logger.info(f"loss at iter {self.aggregate_iteration_num}: {loss}")
self.callback_loss(self.aggregate_iteration_num, loss)
if self.loss_consumed:
is_converged = self.converge_func(loss)
else:
is_converged = self.converge_func(self.model)
self.has_converged.remote_converge_status(is_converge=is_converged, suffix=self._suffix())
return is_converged
def fit(self, data_inst):
while self.aggregate_iteration_num < self.max_aggregate_iteration_num:
self.model = self.aggregator.weighted_mean_model(suffix=self._suffix())
self.aggregator.send_aggregated_model(model=self.model, suffix=self._suffix())
if self._is_converged():
Logger.info(f"early stop at iter {self.aggregate_iteration_num}")
break
self.aggregate_iteration_num += 1
else:
Logger.warn(f"reach max iter: {self.aggregate_iteration_num}, not converged")
def save_model(self):
return self.model
class HomoNNClient(HomoNNBase):
def __init__(self, trans_var):
super().__init__(trans_var=trans_var)
self.aggregator = secure_mean_aggregator.Client(self.transfer_variable.secure_aggregator_trans_var)
self.loss_scatter = loss_scatter.Client(self.transfer_variable.loss_scatter_trans_var)
self.has_converged = has_converged.Client(self.transfer_variable.has_converged_trans_var)
self.nn_model = None
def _init_model(self, param: HomoNNParam):
super()._init_model(param=param)
self.batch_size = param.batch_size
self.aggregate_every_n_epoch = param.aggregate_every_n_epoch
self.nn_define = param.nn_define
self.config_type = param.config_type
self.optimizer = param.optimizer
self.loss = param.loss
self.metrics = param.metrics
self.data_converter = nn_model.get_data_converter(self.config_type)
self.model_builder = nn_model.get_nn_builder(config_type=self.config_type)
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# self.device = torch.device("cpu")
def _is_converged(self, data, epoch_degree):
if self.config_type=="cv":
loss = data
elif self.config_type == "yolo":
loss = data
elif self.config_type == "faster":
loss = data
else:
metrics = self.nn_model.evaluate(data)
Logger.info(f"metrics at iter {self.aggregate_iteration_num}: {metrics}")
loss = metrics["loss"]
self.loss_scatter.send_loss(loss=(loss, epoch_degree), suffix=self._suffix())
is_converged = self.has_converged.get_converge_status(suffix=self._suffix())
return is_converged
def __build_nn_model(self, input_shape):
self.nn_model = self.model_builder(input_shape=input_shape,
nn_define=self.nn_define,
optimizer=self.optimizer,
loss=self.loss,
metrics=self.metrics)
def __build_pytorch_model(self, nn_define):
self.nn_model = self.model_builder(nn_define=nn_define,
optimizer=self.optimizer,
loss=self.loss,
metrics=self.metrics)
def fit(self, data_inst, *args):
if self.config_type == "pytorch":
data = self.data_converter.convert(data_inst, batch_size=self.batch_size)
self.__build_pytorch_model(self.nn_define)
epoch_degree = float(len(data)) * self.aggregate_every_n_epoch
elif self.config_type == "cv":
config_default = ObjDict(self.nn_define[0])
config, model, loss, get_pbb = net.get_model()
dataset_train = dataloader_detector.get_trainloader("train", config, config_default)
optimizer = torch.optim.SGD(
model.parameters(),
config_default.lr,
momentum=config_default.momentum,
weight_decay=config_default.weight_decay)
self.nn_model = PytorchNNModel(model=model,
optimizer=optimizer,
loss=loss)
epoch_degree = float(len(dataset_train))*self.aggregate_every_n_epoch
elif self.config_type == "yolo":
config_default = ObjDict(self.nn_define[0])
model = models.get_model()
dataset_train, _ = dataloader_detector.get_dataset('train')
optimizer = torch.optim.Adam(model.parameters())
self.nn_model = PytorchNNModel(model=model,
optimizer=optimizer,
loss=None)
epoch_degree = float(len(dataset_train))*self.aggregate_every_n_epoch
elif self.config_type == "faster":
config_default = ObjDict(self.nn_define[0])
dataset = Dataset(opt)
base_model = FasterRCNNVGG16()
model = FasterRCNNTrainer(base_model)
optimizer = model.optimizer
self.nn_model = PytorchNNModel(model=model,
optimizer=optimizer,
loss=None)
epoch_degree = float(len(dataset))*self.aggregate_every_n_epoch
else:
data = self.data_converter.convert(data_inst, batch_size=self.batch_size)
self.__build_nn_model(data.get_shape()[0])
epoch_degree = float(len(data)) * self.aggregate_every_n_epoch
while self.aggregate_iteration_num < self.max_aggregate_iteration_num:
Logger.info(f"start {self.aggregate_iteration_num}_th aggregation")
#train
if self.config_type == "cv":
trainloader = DataLoader(dataset_train,
batch_size=config_default.batch_size,
shuffle=True,
pin_memory=False)
epoch_degree = float(len(trainloader))
metrics = []
self.nn_model._model.train()
for i, (data, target, coord) in enumerate(trainloader):
# print('shape of data:', data.shape)
# print('shape of coord:', coord.shape)
# print('*******************', i, "/", str(len(trainloader)))
Logger.info(f"{i}:shape of data: {data.shape}, shape of coord:{coord.shape}")
output = self.nn_model._model(data, coord)
loss_output = loss(output, target)
optimizer.zero_grad()
loss_output[0].backward()
optimizer.step()
metrics.append(loss_output)
Logger.info(f"finish{i}th data")
metrics = np.asarray(metrics, np.float32)
#metrics
acc = (np.sum(metrics[:, 6]) + np.sum(metrics[:, 8])) / (np.sum(metrics[:, 7]) + np.sum(metrics[:, 9]))
tpr = np.sum(metrics[:, 6]) / np.sum(metrics[:, 7])
tnr = np.sum(metrics[:, 8]) / np.sum(metrics[:, 9])
tp = np.sum(metrics[:, 6])
p = np.sum(metrics[:, 7])
tn = np.sum(metrics[:, 8])
n = np.sum(metrics[:, 9])
total_loss = np.mean(metrics[:, 0])
#设定需要用于聚合的loss用于后面判断
data = total_loss
classification_loss = np.mean(metrics[:, 1])
bbox_regressiong_loss_1 = np.mean(metrics[:, 2])
bbox_regressiong_loss_2 = np.mean(metrics[:, 3])
bbox_regressiong_loss_3 = np.mean(metrics[:, 4])
bbox_regressiong_loss_4 = np.mean(metrics[:, 5])
Logger.info('EPOCH {}, acc: {:.2f}, tpr: {:.2f} ({}/{}), tnr: {:.1f} ({}/{}), total_loss: {:.3f}, classification loss: {:.3f}, bbox regression loss: {:.2f}, {:.2f}, {:.2f}, {:.2f}\
'.format(self.aggregate_iteration_num, acc, tpr, tp, p, tnr, tn, n,
total_loss, classification_loss,
bbox_regressiong_loss_1, bbox_regressiong_loss_2,
bbox_regressiong_loss_3, bbox_regressiong_loss_4))
elif self.config_type == "yolo":
trainloader = DataLoader(dataset_train,
batch_size=config_default.batch_size,
shuffle=True,
num_workers=config_default.workers,
pin_memory=False,
collate_fn=dataset_train.collate_fn)
epoch_degree = float(len(trainloader))
self.nn_model._model.to(self.device)
self.nn_model._model.train()
metrics = []
for batch_i, (_, imgs, targets) in enumerate(trainloader):
imgs = Variable(imgs.to(self.device))
targets = Variable(targets.to(self.device), requires_grad=False)
loss, outputs = self.nn_model._model(imgs, targets)
loss.backward()
optimizer.step()
optimizer.zero_grad()
log_str = "---- [Batch %d/%d] ----" % (batch_i, len(trainloader))
log_str += f"---- Total loss {loss.item()}"
metrics.append(loss.item())
Logger.info(log_str)
# assert False
total_loss = np.mean(metrics)
data = total_loss
elif self.config_type == "faster":
trainloader = data_.DataLoader(dataset,
batch_size=1,
shuffle=True,
num_workers=1)
self.nn_model._model.reset_meters()
self.nn_model._model.to(self.device)
self.nn_model._model.train()
for ii, (img, bbox_, label_, scale) in enumerate(trainloader):
scale = at.scalar(scale)
img, bbox, label = img.to(self.device).float(), bbox_.to(self.device), label_.to(self.device)
self.nn_model._model.train_step(img, bbox, label, scale)
loss = self.nn_model._model.get_meter_data()
total_loss = loss["total_loss"]
log_str = "---- [Batch %d/%d] ----" % (ii, len(trainloader))
log_str += f"---- Total loss {total_loss}"
Logger.info(log_str)
data = total_loss
else:
self.nn_model.train(data, aggregate_every_n_epoch=self.aggregate_every_n_epoch)
# send model for aggregate, then set aggregated model to local
self.nn_model._model.to( torch.device("cpu"))
self.aggregator.send_weighted_model(weighted_model=self.nn_model.get_model_weights(),
weight=epoch_degree * self.aggregate_every_n_epoch,
suffix=self._suffix())
weights = self.aggregator.get_aggregated_model(suffix=self._suffix())
self.nn_model.set_model_weights(weights=weights)
self.nn_model._model.to(self.device)
#calc loss and check convergence
if self._is_converged(data, epoch_degree):
Logger.info(f"early stop at iter {self.aggregate_iteration_num}")
break
Logger.info(f"role {self.role} finish {self.aggregate_iteration_num}_th aggregation")
self.aggregate_iteration_num += 1
else:
Logger.warn(f"reach max iter: {self.aggregate_iteration_num}, not converged")
def export_model(self):
return _build_model_dict(meta=self._get_meta(), param=self._get_param())
def _get_meta(self):
from federatedml.protobuf.generated import nn_model_meta_pb2
meta_pb = nn_model_meta_pb2.NNModelMeta()
meta_pb.params.CopyFrom(self.model_param.generate_pb())
meta_pb.aggregate_iter = self.aggregate_iteration_num
return meta_pb
def _get_param(self):
from federatedml.protobuf.generated import nn_model_param_pb2
param_pb = nn_model_param_pb2.NNModelParam()
param_pb.saved_model_bytes = self.nn_model.export_model()
return param_pb
def predict(self, data_inst):
if self.config_type == "cv":
config_default = ObjDict(self.nn_define[0])
Logger.info(f"{self.nn_define}")
#这里只有的model不会用于预测,使用的模型为self.nn_model
config, model, loss, get_pbb = net.get_model()
dataset_validation = dataloader_detector.get_trainloader("validation", config, config_default)
validateloader = DataLoader(dataset_validation,
batch_size=config_default.batch_size,
shuffle=False,
pin_memory=False)
Logger.info("validate begin.")
#模型在这里
self.nn_model._model.eval()
metrics = []
start_time = time.time()
for i, (data, target, coord) in enumerate(validateloader):
print('*******************', i, "/", str(len(validateloader)))
# data = Variable(data.cuda())
data = Variable(data)
# target = Variable(target.cuda())
target = Variable(target)
# coord = Variable(coord.cuda())
coord = Variable(coord)
with torch.no_grad():
output = self.nn_model._model(data, coord)
loss_output = loss(output, target, train=False)
metrics.append(loss_output)
end_time = time.time()
metrics = np.asarray(metrics, np.float32)
epoch = 1
msg = 'EPOCH {} '.format(
epoch) + 'Validation: tpr %3.2f, tnr %3.8f, total pos %d, total neg %d, time %3.2f' % (
100.0 * np.sum(metrics[:, 6]) / np.sum(metrics[:, 7]),
100.0 * np.sum(metrics[:, 8]) / np.sum(metrics[:, 9]),
np.sum(metrics[:, 7]),
np.sum(metrics[:, 9]),
end_time - start_time) + 'loss %2.4f, classify loss %2.4f, regress loss %2.4f, %2.4f, %2.4f, %2.4f' % (
np.mean(metrics[:, 0]),
np.mean(metrics[:, 1]),
np.mean(metrics[:, 2]),
np.mean(metrics[:, 3]),
np.mean(metrics[:, 4]),
np.mean(metrics[:, 5]))
print(msg)
Logger.info(msg)
elif self.config_type == "yolo":
dataset_valid, class_names = dataloader_detector.get_dataset('valid')
validloader = DataLoader(dataset_valid,
batch_size=1,
shuffle=False,
pin_memory=False,
collate_fn=dataset_valid.collate_fn)
if type(self.nn_model._model) == dict:
state_dict = self.nn_model._model["model"]
model = models.get_model()
self.nn_model._model = model
self.nn_model._model.load_state_dict(state_dict)
self.nn_model._model.to(self.device)
self.nn_model._model.eval()
Logger.info("validate begin.")
labels = []
sample_metrics = []
for batch_i, (_, imgs, targets) in enumerate(validloader):
labels += targets[:, 1].tolist()
# Rescale target
targets[:, 2:] = xywh2xyxy(targets[:, 2:])
targets[:, 2:] *= 416
imgs = Variable(imgs.type(torch.FloatTensor).to(self.device), requires_grad=False)
with torch.no_grad():
outputs = self.nn_model._model(imgs)
outputs = non_max_suppression(outputs, conf_thres=0.5, nms_thres=0.5)
sample_metrics += get_batch_statistics(outputs, targets, iou_threshold=0.5)
# Concatenate sample statistics
if len(sample_metrics) == 0:
precision, recall, AP, f1, ap_class = np.array([0]), np.array([0]), np.array([0]), np.array([0]), np.array([0], dtype=np.int)
else:
true_positives, pred_scores, pred_labels = [np.concatenate(x, 0) for x in list(zip(*sample_metrics))]
precision, recall, AP, f1, ap_class = ap_per_class(true_positives, pred_scores, pred_labels, labels)
evaluation_metrics = [
("val_precision", precision.mean()),
("val_recall", recall.mean()),
("val_mAP", AP.mean()),
("val_f1", f1.mean()),
]
Logger.info(evaluation_metrics)
# Print class APs and mAP
# ap_table = [["Index", "Class name", "AP"]]
# for i, c in enumerate(ap_class):
# ap_table += [[c, class_names[c], "%.5f" % AP[i]]]
# Logger.info(AsciiTable(ap_table).table)
Logger.info(f"---- mAP {AP.mean()}")
# assert False
elif self.config_type == "faster":
if type(self.nn_model._model) == dict:
state_dict = self.nn_model._model["model"]
base_model = FasterRCNNVGG16()
model = FasterRCNNTrainer(base_model)
self.nn_model._model = model
self.nn_model._model.load_state_dict(state_dict)
self.nn_model._model.to(self.device)
self.nn_model._model.eval()
testset = TestDataset(opt)
test_dataloader = data_.DataLoader(testset,
batch_size=1,
num_workers=1,
shuffle=False, pin_memory=True)
pred_bboxes, pred_labels, pred_scores = list(), list(), list()
gt_bboxes, gt_labels, gt_difficults = list(), list(), list()
for ii, (imgs, sizes, gt_bboxes_, gt_labels_, gt_difficults_) in enumerate(test_dataloader):
sizes = [sizes[0][0].item(), sizes[1][0].item()]
pred_bboxes_, pred_labels_, pred_scores_ = self.nn_model._model.faster_rcnn.predict(imgs, [sizes])
gt_bboxes += list(gt_bboxes_.numpy())
gt_labels += list(gt_labels_.numpy())
gt_difficults += list(gt_difficults_.numpy())
pred_bboxes += pred_bboxes_
pred_labels += pred_labels_
pred_scores += pred_scores_
result = eval_detection_voc(
pred_bboxes, pred_labels, pred_scores,
gt_bboxes, gt_labels, gt_difficults,
use_07_metric=True)
Logger.info(f"---- test result {result}")
else:
data = self.data_converter.convert(data_inst, batch_size=self.batch_size)
predict = self.nn_model.predict(data)
num_output_units = data.get_shape()[1]
threshold = self.param.predict_param.threshold
if num_output_units[0] == 1:
kv = [(x[0], (0 if x[1][0] <= threshold else 1, x[1][0].item())) for x in zip(data.get_keys(), predict)]
pred_tbl = session.parallelize(kv, include_key=True)
return data_inst.join(pred_tbl, lambda d, pred: [d.label, pred[0], pred[1], {"label": pred[0]}])
else:
kv = [(x[0], (x[1].argmax(), [float(e) for e in x[1]])) for x in zip(data.get_keys(), predict)]
pred_tbl = session.parallelize(kv, include_key=True)
return data_inst.join(pred_tbl,
lambda d, pred: [d.label, pred[0].item(),
pred[1][pred[0]] / (sum(pred[1])),
{"raw_predict": pred[1]}])
def load_model(self, model_dict):
model_dict = list(model_dict["model"].values())[0]
model_obj = _extract_param(model_dict)
meta_obj = _extract_meta(model_dict)
self.model_param.restore_from_pb(meta_obj.params)
self._init_model(self.model_param)
self.aggregate_iteration_num = meta_obj.aggregate_iter
self.nn_model = restore_nn_model(self.config_type, model_obj.saved_model_bytes)
# server: Arbiter, clients: Guest and Hosts
class HomoNNDefaultTransVar(HomoTransferBase):
def __init__(self, server=(consts.ARBITER,), clients=(consts.GUEST, consts.HOST), prefix=None):
super().__init__(server=server, clients=clients, prefix=prefix)
self.secure_aggregator_trans_var = SecureAggregatorTransVar(server=server, clients=clients, prefix=self.prefix)
self.loss_scatter_trans_var = LossScatterTransVar(server=server, clients=clients, prefix=self.prefix)
self.has_converged_trans_var = HasConvergedTransVar(server=server, clients=clients, prefix=self.prefix)
class HomoNNDefaultClient(HomoNNClient):
def __init__(self):
super().__init__(trans_var=HomoNNDefaultTransVar())
class HomoNNDefaultServer(HomoNNServer):
def __init__(self):
super().__init__(trans_var=HomoNNDefaultTransVar())
# server: Arbiter, clients: Guest and Hosts
class HomoNNGuestServerTransVar(HomoNNDefaultTransVar):
def __init__(self, server=(consts.GUEST,), clients=(consts.HOST,), prefix=None):
super().__init__(server=server, clients=clients, prefix=prefix)
class HomoNNGuestServerClient(HomoNNClient):
def __init__(self):
super().__init__(trans_var=HomoNNGuestServerTransVar())
class HomoNNGuestServerServer(HomoNNServer):
def __init__(self):
super().__init__(trans_var=HomoNNGuestServerTransVar())
# server: Arbiter, clients: Hosts
class HomoNNArbiterSubmitTransVar(HomoNNDefaultTransVar):
def __init__(self, server=(consts.ARBITER,), clients=(consts.HOST,), prefix=None):
super().__init__(server=server, clients=clients, prefix=prefix)
class HomoNNArbiterSubmitClient(HomoNNClient):
def __init__(self):
super().__init__(trans_var=HomoNNArbiterSubmitTransVar())
class HomoNNArbiterSubmitServer(HomoNNServer):
def __init__(self):
super().__init__(trans_var=HomoNNArbiterSubmitTransVar())
| [
"[email protected]"
] | |
2672b6cac374b1ef9e6b36c2ec32f204ddea26fe | 8554533e2b19993ec3a78759f3d06444497995c4 | /week4/project/core/models.py | 16b46a035bcdd1d7276e2b7c3051adb64f081c7c | [] | no_license | Aiganymus/django2019 | 45568dd82763de4623ee9e15251263e816ecc895 | 3605562be5f796e0e6f12d4d58c3b16550ea80a8 | refs/heads/master | 2022-12-01T02:17:56.398215 | 2019-10-22T17:27:58 | 2019-10-22T17:27:58 | 206,342,331 | 0 | 1 | null | 2022-11-22T04:15:27 | 2019-09-04T14:44:17 | Python | UTF-8 | Python | false | false | 3,847 | py | from datetime import datetime
from django.db import models
from core.constants import TASK_TYPES, TASK_NEW
from user.models import MainUser
class Project(models.Model):
name = models.CharField(max_length=50)
description = models.TextField(max_length=300, blank=True, default='')
creator = models.ForeignKey(MainUser, on_delete=models.CASCADE, related_name='created_projects')
created_at = models.DateField(auto_now_add=True)
class Meta:
db_table = 'project'
ordering = ('created_at', )
def __str__(self):
return self.name + '\n' + self.description
def get_short_description(self):
return self.description[: 10] + '...'
class ProjectMember(models.Model):
user = models.ForeignKey(MainUser, on_delete=models.CASCADE, related_name='assigned_projects')
project = models.ForeignKey(Project, on_delete=models.CASCADE, related_name='members')
joined_at = models.DateField(auto_now_add=True)
class Meta:
db_table = 'project_member'
ordering = ('joined_at', )
def __str__(self):
return self.project.name + ' member ' + self.user.profile.full_name
class NewTasksBlockManager(models.Manager):
def get_queryset(self):
return super().get_queryset().filter(type=TASK_NEW)
def new_tasks(self):
return self.filter(type=TASK_NEW)
def filter_by_type(self, status):
return self.filter(type=type)
class Block(models.Model):
name = models.CharField(max_length=20)
type = models.PositiveSmallIntegerField(choices=TASK_TYPES, default=TASK_NEW)
project = models.ForeignKey(Project, on_delete=models.CASCADE, related_name='blocks')
objects = models.Manager()
new_tasks_block = NewTasksBlockManager()
class Meta:
db_table = 'block'
ordering = ('type', )
def __str__(self):
return self.name + ' ' + self.project.name
class Task(models.Model):
name = models.CharField(max_length=100)
description = models.CharField(max_length=300, blank=True)
order = models.IntegerField()
creator = models.ForeignKey(MainUser, on_delete=models.CASCADE, related_name='created_tasks')
executor = models.ForeignKey(MainUser, on_delete=models.CASCADE, related_name='assigned_tasks')
block = models.ForeignKey(Block, on_delete=models.CASCADE, related_name='tasks')
created_at = models.DateField(auto_now_add=True)
class Meta:
db_table = 'task'
unique_together = ('block', 'order', )
ordering = ('block', 'order', )
def __str__(self):
return self.name + '\n' + self.description
@classmethod
def get_recent_projects(cls):
today = datetime.now()
return cls.objects.filter(created_at__day=today.day,
created_at__month=today.month,
created_at__year=today.year)
class TaskDocument(models.Model):
document = models.FileField()
task = models.ForeignKey(Task, on_delete=models.CASCADE, related_name='documents')
creator = models.ForeignKey(MainUser, on_delete=models.CASCADE, related_name='uploaded_documents')
created_at = models.DateField(auto_now_add=True)
class Meta:
db_table = 'task_document'
ordering = ('created_at', )
@staticmethod
def get_doc_len(doc):
return len(doc.document)
class TaskComment(models.Model):
body = models.TextField(max_length=200)
creator = models.ForeignKey(MainUser, on_delete=models.CASCADE, related_name='tasks_comments')
task = models.ForeignKey(Task, on_delete=models.CASCADE, related_name='comments')
created_at = models.DateField(auto_now_add=True)
class Meta:
db_table = 'task_comment'
ordering = ('created_at', )
def __str__(self):
return self.creator.username + ': ' + self.body
| [
"[email protected]"
] | |
e3fd5dd342af9721b6cf3269f297b37f79ffd579 | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/226/users/4148/codes/1585_1015.py | 08a5545beb80ec90b3bded9dc3f7fdb8b9ea446b | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 363 | py | # Teste seu codigo aos poucos.
# Nao teste tudo no final, pois fica mais dificil de identificar erros.
# Nao se intimide com as mensagens de erro. Elas ajudam a corrigir seu codigo.
a = int(input("digite um valor a: "))
b = int(input("digite um valor b: "))
c = int(input("digite um valor c: "))
print(min(a, b, c), a+b+c-min(a, b, c)-max(a, b, c), max(a, b, c))
| [
"[email protected]"
] | |
6c80ca7edf3db7c014c4da28b458a3c707d36583 | bc49915f9d6ba300cbef49da2142198a0185dea3 | /zodiac.py | 7cf0d47d37770f36934a7517f9e61e07ebdf2cf2 | [] | no_license | vidyanathan/tele-butt | 3a8a9d70fff6728d5e1b0c693b5b7299c6f91458 | 165d7cab2844b1c72ce98eb3a4f19a8f8ab9e7b9 | refs/heads/master | 2020-12-26T15:31:39.587936 | 2020-02-01T07:12:41 | 2020-02-01T07:12:41 | 237,551,626 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,210 | py |
from datetime import date
def getSign(day, month):
year = 2020
temp = date(year, month,day)
if date(year, 3, 21) <= temp <= date(year, 4, 20):
return "aries"
elif date(year, 4, 21) <= temp <= date(year, 5, 21):
return "taurus"
elif date(year, 5, 22) <= temp <= date(year, 6, 21):
return "gemini"
elif date(year, 6, 22) <= temp <= date(year, 7, 22):
return "cancer"
elif date(year, 7, 23) <= temp <= date(year, 8, 23):
return "leo"
elif date(year, 8, 24) <= temp <= date(year, 9, 22):
return "virgo"
elif date(year, 9, 23) <= temp <= date(year, 10, 23):
return "libra"
elif date(year, 10, 24) <= temp <= date(year, 11, 22):
return "scorpio"
elif date(year, 11, 23) <= temp <= date(year, 12, 21):
return "sagittarius"
elif date(year, 12, 22) <= temp <= date(year, 12, 31):
return "capricorn"
elif date(year, 1, 1) <= temp <= date(year, 1, 20):
return "capricorn"
elif date(year, 1, 21) <= temp <= date(year, 2, 18):
return "aquarius"
elif date(year, 2, 19) <= temp <= date(year, 3, 20):
return "pisces"
#(getSign(25, 12))
| [
"[email protected]"
] | |
7c102741dbc6f69ffd3c01e5b562b46b83ad07f0 | c3fc7865f163a43fbd3e832547005cf3c89547c5 | /bin/oldScripts/bokeh_deneme.py | 194c894fecaba5e15b1f6e143d85898a56650bf0 | [] | no_license | ahmetrifaioglu/PyTorch | 936d5317ca90a147e147241cbf75ae0864477f13 | fff02012eb5f05cdd634b5128f8bdceb9ed3b2a7 | refs/heads/master | 2021-10-24T19:27:53.495478 | 2021-10-12T22:05:08 | 2021-10-12T22:05:08 | 165,693,778 | 0 | 0 | null | 2021-10-12T22:04:20 | 2019-01-14T16:24:32 | Jupyter Notebook | UTF-8 | Python | false | false | 1,375 | py | from numpy import linspace
import numpy as np
from scipy.stats.kde import gaussian_kde
from bokeh.io import output_file, show
from bokeh.models import ColumnDataSource, FixedTicker, PrintfTickFormatter
from bokeh.plotting import figure
from bokeh.sampledata.perceptions import probly
import colorcet as cc
output_file("ridgeplot.html")
def ridge(category, data, scale=20):
return list(zip([category]*len(data), scale*data))
cats = list(reversed(probly.keys()))
palette = [cc.rainbow[i*15] for i in range(17)]
x = linspace(-20,110, 500)
source = ColumnDataSource(data=dict(x=x))
p = figure(y_range=cats, plot_width=700, x_range=(-5, 105), toolbar_location=None)
for i, cat in enumerate(reversed(cats)):
#pdf = gaussian_kde(probly[cat])
# pdf = np.random.normal(0.01, 0.001, 1000)
y = np.random.normal(2, 2, 1000)
source.add(y, cat)
p.patch('x', cat, color=palette[i], alpha=0.6, line_color="black", source=source)
p.outline_line_color = None
p.background_fill_color = "#efefef"
p.xaxis.ticker = FixedTicker(ticks=list(range(0, 101, 10)))
p.xaxis.formatter = PrintfTickFormatter(format="%d%%")
p.ygrid.grid_line_color = None
p.xgrid.grid_line_color = "#dddddd"
p.xgrid.ticker = p.xaxis[0].ticker
p.axis.minor_tick_line_color = None
p.axis.major_tick_line_color = None
p.axis.axis_line_color = None
p.y_range.range_padding = 0.12
show(p) | [
"[email protected]"
] | |
9a9968534d4b4d87eb829b640d1fc781677d07bb | f6f098049c7d49dd44c157796aaf8aaee9cd3861 | /testando.py | bbdcde7c11ed626c5907b53b0fbd83a48ff54ff7 | [] | no_license | emesonfilho/dashboard_stocks | 18c0edde2bf670cd4af73875c8d710ea4e98c4f4 | b5cd4ce3b682833db57c6b89dd9d93147892a8e2 | refs/heads/master | 2022-11-16T06:42:58.165845 | 2020-07-12T03:25:12 | 2020-07-12T03:25:12 | 277,551,687 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 145 | py | import investpy as inv
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import json
from dashboard import dados_dowload
| [
"[email protected]"
] | |
1e81d4557c4ce4b7b3e7043c3952ad5baa848153 | b43b142c80821dd7704a3cd9279e92a89b217f9b | /Authors.py | cced9ad88047115deb7395d7a6ab79f1015656cf | [] | no_license | JoshKallagunta/ITEC-2905-Lab-1 | 711897da6d101911d01172477a3ca6f59ce8021e | e9acf937d23c01e059d4872ce0c6bdaec5d7da7c | refs/heads/master | 2020-07-14T09:04:44.749439 | 2019-09-05T23:59:27 | 2019-09-05T23:59:27 | 205,289,155 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py |
class Author:
def _init_(self, name):
self.name = name
self.books = []
def publish(self, book_title):
self.books.append(book_title)
# def __str__(self, publish)
def main():
josh = Author('Josh Kallagunta')
josh.publish('Book 1')
josh.publish('Book 2')
print(josh)
jake = Author('Jake')
print(jake)
main()
| [
"[email protected]"
] | |
3cbd7aea80a3af1cb159eebbb6e14062ae4b04f1 | c068f19f14749c7e29a5450871da81d0e4b57348 | /inasilentway/last.py | 8f1aeecef4c21702868e97d87f4a1d541f550c01 | [] | no_license | davidmiller/inasilentway | 42b94adea60143a70e7fa82ecd8504fa6a1142f1 | c88ccd1cc0f79aa167f8a2ff082a20a043d64556 | refs/heads/master | 2022-12-10T11:47:51.052379 | 2021-01-22T12:41:33 | 2021-01-22T12:41:33 | 161,032,907 | 0 | 0 | null | 2022-12-08T01:29:25 | 2018-12-09T11:38:22 | HTML | UTF-8 | Python | false | false | 7,025 | py | # """
# interact with Last.fm
# """
# import datetime
# import time
# import pylast
# from inasilentway import utils
# LASTFM_CORRECTIONS = {
# # Sometimes Last.fm "corrects" album titles.
# # Where these are different to Discogs album title versions this means we
# # are unable to match the scrobbles to the album.
# #
# # We keep a mapping of known corrections here so we can move between them
# #
# 'album': {
# 'Desafinado: Bossa Nova & Jazz Samba': 'Desafinado Coleman Hawkins Plays Bossa Nova & Jazz Samba', # noqa
# 'Oscar Peterson Plays the Duke Ellington Songbook': 'The Duke Ellington Songbook', # noqa
# 'Standard Time Vol.2 - Intimacy Calling': 'Standard Time Vol. 2 (Intimacy Calling)', # noqa
# 'White Light/White Heat': 'White Light / White Heat',
# },
# 'artist': {
# 'Duke Ellington & His Orchestra': 'Duke Ellington And His Orchestra'
# }
# }
# SPOTIFY_EQUIVALENTS = {
# 'album': {
# 'Way Out West (OJC Remaster)': 'Way Out West',
# "Workin' (RVG Remaster)": "Workin' With The Miles Davis Quintet"
# }
# }
# def get_lastfm():
# utils.setup_django()
# from django.conf import settings
# lastfm = pylast.LastFMNetwork(
# api_key=settings.LASTFM_API_KEY,
# api_secret=settings.LASTFM_SECRET,
# username=settings.LASTFM_USER,
# password_hash=settings.LASTFM_PASS
# )
# return lastfm
# def scrobble_django_record(record, when):
# # TODO: Rename this
# start_time = time.mktime(when.timetuple())
# tracks = []
# for track in record.track_set.all():
# tracks.append(
# {
# 'artist' : record.artist.first().name,
# 'title' : track.title,
# 'album' : record.title,
# 'timestamp': start_time
# }
# )
# if track.duration:
# mins, secs = track.duration.split(':')
# seconds = int(secs) + (int(mins) * 60)
# start_time += seconds
# else:
# start_time += (3 * 60) + 41
# lastfm = get_lastfm()
# lastfm.scrobble_many(tracks)
# def get_scrobbles():
# """
# Get this user's scrobbles. All of them.
# """
# from django.conf import settings
# from inasilentway.models import Scrobble
# lastfm = get_lastfm()
# if Scrobble.objects.count() > 0:
# # oldest = Scrobble.objects.all().order_by('timestamp').first()
# # oldest_scrobble = oldest.timestamp
# newest = Scrobble.objects.all().order_by('timestamp').last()
# print(newest.title)
# newest_scrobble = newest.timestamp
# user = lastfm.get_user(settings.LASTFM_USER)
# tracks = user.get_recent_tracks(limit=1000, time_from=newest_scrobble)
# print(tracks)
# return tracks
# def save_scrobbles(scrobbles):
# """
# Give some scrobbles, save them to the database
# """
# from inasilentway.models import Scrobble
# from django.utils import timezone
# for scrobble in scrobbles:
# scrob, _ = Scrobble.objects.get_or_create(
# timestamp=scrobble.timestamp,
# title=scrobble.track.title
# )
# scrob.artist = scrobble.track.artist
# scrob.album = scrobble.album
# scrob.timestamp = int(scrobble.timestamp)
# format_string = '%d %b %Y, %H:%M'
# scrob.datetime = timezone.make_aware(
# datetime.datetime.strptime(scrobble.playback_date, format_string)
# )
# scrob.save()
# def save_scrobbles_since(when):
# from django.conf import settings
# lastfm = get_lastfm()
# user = lastfm.get_user(settings.LASTFM_USER)
# tracks = user.get_recent_tracks(
# limit=1000, time_from=time.mktime(when.timetuple()))
# save_scrobbles(tracks)
# def load_scrobbles(args):
# """
# Commandline entrypoint to get scrobbles and print them (warning, big)
# """
# utils.setup_django()
# scrobbles = get_scrobbles()
# save_scrobbles(scrobbles)
# from inasilentway.models import Scrobble
# print('We have {} scrobbles'.format(Scrobble.objects.count()))
# if len(scrobbles) > 0:
# print('Going again')
# load_scrobbles(args)
# def match_artist(artist_name):
# """
# Given the name of an artist, return a matching Artist
# or None
# """
# from inasilentway.models import Artist
# artist_matches = Artist.objects.filter(
# name__iexact=artist_name
# )
# if len(artist_matches) == 0:
# if artist_name in LASTFM_CORRECTIONS['artist']:
# return match_artist(LASTFM_CORRECTIONS['artist'][artist_name])
# return None
# if len(artist_matches) == 1:
# return artist_matches[0]
# if len(artist_matches) > 1:
# import pdb; pdb.set_trace() # noqa
# print(artist_matches)
# def match(artist, album, title):
# """
# Return matches if we have them or none
# """
# from inasilentway.models import Record
# match_album = None
# match_track = None
# matching_artist = match_artist(artist)
# if album in LASTFM_CORRECTIONS['album']:
# album = LASTFM_CORRECTIONS['album'][album]
# album_matches = Record.objects.filter(
# title__iexact=album,
# artist=matching_artist
# )
# if len(album_matches) == 0:
# return matching_artist, match_album, match_track
# if len(album_matches) == 1:
# match_album = album_matches[0]
# if len(album_matches) > 1:
# # E.g. Billie holiday with many "all or nothing at all" albums
# return matching_artist, match_album, match_track
# for track in match_album.track_set.all():
# if track.title.lower() == title.lower():
# match_track = track
# return matching_artist, match_album, match_track
# def make_links(scrobbles):
# print('{} unlinked scrobbles'.format(scrobbles.count()))
# matches_added = 0
# for scrobble in scrobbles:
# matches = match(scrobble.artist, scrobble.album, scrobble.title)
# artist, album, track = matches
# if track:
# scrobble.isw_track = track
# if artist:
# scrobble.isw_album = album
# matches_added += 1
# if album:
# scrobble.isw_artist = artist
# if any([track, artist, album]):
# scrobble.save()
# else:
# print('No matches, next')
# continue
# print('This run added {} album matches'.format(matches_added))
# def link_scrobbles(args):
# """
# Commandline entrypoint to get scrobbles and link them with
# records in our collection
# """
# utils.setup_django()
# from inasilentway.models import Scrobble
# unlinked = Scrobble.objects.filter(
# isw_album__isnull=True,
# album__isnull=False
# )
# make_links(unlinked)
| [
"[email protected]"
] | |
87b353e77d5eca80f7fc31e540192a304eeeaeb6 | a4b21c130ab9b1093b84b925c18579083f05abd7 | /chunk_traj_proc.py | 6b03ebeb1527643c44a366ed63d4de376cd85751 | [] | no_license | fischcheng/CMS_postproc | 3a97e58e7cfda7fb1221da871f9a5ddeedc7469f | 737c21cf2b0d8e0e1945c2106385fe44124ca8d8 | refs/heads/master | 2021-01-11T01:02:31.823488 | 2016-10-17T16:09:39 | 2016-10-17T16:09:39 | 71,063,136 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,946 | py | '''
2016/5/24 Auto chunking for large/eddy separation since single sequence runs too slow.
2016/5/31 modified for HRC07m2d
2016/6/13 modified for HRC07m2d
2016/6/23 modified for shuffle experiment
2016/7/11 modified for chunk traj processing.
Yu Cheng
'''
import os
import glob
# User-defined variables, experiment and so on
case='HRC07p2d'
shuf='shift2'
# Directories
parent='/scratch/projects/cpp/ycheng/CMSexpt/Shuffle/'+shuf
#inputchunk='/scratch/projects/cpp/ycheng/CMSexpt/HRC07m2d/input_HRC07m2d_chunk'
flist=sorted(glob.glob('/projects/rsmas/kirtman/ycheng/rawCMS/ACTmatfiles/HRC07p2d/'+shuf+'/vertTrans_ACTvoltrans_HRC07p2d_*.mat'))
#glob.glob('HRIE_10ATM_01.cam2.1.h0.*.nc') #121-163, ensemble member 1-9
last=2
num=2
for i in range(last,last+num):
os.chdir(parent)
cmd='cp -f ./postprocess_tool_ascii/traj_proc_update.m proc_traj_chunk'+str(i+1).zfill(2)+'.m'
os.system(cmd) # copy the set of separation scr
cmd2='cp -f ./postprocess_tool_ascii/proc_sub proc_sub_chunk'+str(i+1).zfill(2)
os.system(cmd2)
# Find number of trajectory files
trajoutput='expt_'+case+'_'+shuf+'_chunk'+str(i+1).zfill(2)+'/output'
num_traj=len(os.listdir(trajoutput))
# Modify the proc_traj_chunk.m file
Sdate=flist[i][97:107]
YYs=flist[i][97:101]
Yend=int(YYs)+9
old_exptname="exptname='HRC07p2d_shift2_chunk01'"
new_exptname="exptname='"+case+'_'+shuf+'_chunk'+str(i+1).zfill(2)+"'"
old_numtraj='num_traj=64'
new_numtraj='num_traj='+str(num_traj)
old_shuf="shuf='shift2'"
new_shuf="shuf='"+shuf+"'"
old_stdate="stdate='1951-01-01'"
new_stdate="stdate='"+Sdate+"'"
old_endate="endate='1960-12-31'"
new_endate="endate='"+str(Yend)+"-12-31'"
text = open('proc_traj_chunk'+str(i+1).zfill(2)+'.m').read()
open('proc_traj_chunk'+str(i+1).zfill(2)+'.m', "w").write(text.replace(old_exptname,new_exptname))
text = open('proc_traj_chunk'+str(i+1).zfill(2)+'.m').read()
open('proc_traj_chunk'+str(i+1).zfill(2)+'.m', "w").write(text.replace(old_numtraj,new_numtraj))
text = open('proc_traj_chunk'+str(i+1).zfill(2)+'.m').read()
open('proc_traj_chunk'+str(i+1).zfill(2)+'.m', "w").write(text.replace(old_stdate,new_stdate))
text = open('proc_traj_chunk'+str(i+1).zfill(2)+'.m').read()
open('proc_traj_chunk'+str(i+1).zfill(2)+'.m', "w").write(text.replace(old_endate,new_endate))
text = open('proc_traj_chunk'+str(i+1).zfill(2)+'.m').read()
open('proc_traj_chunk'+str(i+1).zfill(2)+'.m', "w").write(text.replace(old_shuf,new_shuf))
# Modify the submit script
old_chunk='chunk01'
new_chunk='chunk'+str(i+1).zfill(2)
text = open('proc_sub_chunk'+str(i+1).zfill(2)).read()
open('proc_sub_chunk'+str(i+1).zfill(2),"w").write(text.replace(old_chunk,new_chunk))
| [
"[email protected]"
] | |
e112803d2a59c825c238dba119194041f92561ef | ae095900b0d28eaa7172ca18c51a2e33586390e1 | /Backgammon/mcts_alphaZero.py | 908727dd25c3a19bb2cb5bfab942fab310192d69 | [] | no_license | krayc425/MobileLab | 095f386e54fc0965ae1e11d4c5a35f952486e9e8 | 73af48dbdaa82954b082efe01453e47acbc0e2fe | refs/heads/master | 2023-08-18T09:03:53.731650 | 2018-07-07T14:18:14 | 2018-07-07T14:18:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,843 | py | # -*- coding: utf-8 -*-
"""
Monte Carlo Tree Search in AlphaGo Zero style, which uses a policy-value
network to guide the tree search and evaluate the leaf nodes
@author: Junxiao Song
"""
import numpy as np
import copy
def softmax(x):
probs = np.exp(x - np.max(x))
probs /= np.sum(probs)
return probs
class TreeNode(object):
"""A node in the MCTS tree.
Each node keeps track of its own value Q, prior probability P, and
its visit-count-adjusted prior score u.
"""
def __init__(self, parent, prior_p):
self._parent = parent
self._children = {} # a map from action to TreeNode
self._n_visits = 0
self._Q = 0
self._u = 0
self._P = prior_p
def expand(self, action_priors):
"""Expand tree by creating new children.
action_priors: a list of tuples of actions and their prior probability
according to the policy function.
"""
for action, prob in action_priors:
if action not in self._children:
self._children[action] = TreeNode(self, prob)
def select(self, c_puct):
"""Select action among children that gives maximum action value Q
plus bonus u(P).
Return: A tuple of (action, next_node)
"""
return max(self._children.items(),
key=lambda act_node: act_node[1].get_value(c_puct))
def update(self, leaf_value):
"""Update node values from leaf evaluation.
leaf_value: the value of subtree evaluation from the current player's
perspective.
"""
# Count visit.
self._n_visits += 1
# Update Q, a running average of values for all visits.
self._Q += 1.0*(leaf_value - self._Q) / self._n_visits
def update_recursive(self, leaf_value):
"""Like a call to update(), but applied recursively for all ancestors.
"""
# If it is not root, this node's parent should be updated first.
if self._parent:
self._parent.update_recursive(-leaf_value)
self.update(leaf_value)
def get_value(self, c_puct):
"""Calculate and return the value for this node.
It is a combination of leaf evaluations Q, and this node's prior
adjusted for its visit count, u.
c_puct: a number in (0, inf) controlling the relative impact of
value Q, and prior probability P, on this node's score.
"""
self._u = (c_puct * self._P *
np.sqrt(self._parent._n_visits) / (1 + self._n_visits))
return self._Q + self._u
def is_leaf(self):
"""Check if leaf node (i.e. no nodes below this have been expanded)."""
return self._children == {}
def is_root(self):
return self._parent is None
class MCTS(object):
"""An implementation of Monte Carlo Tree Search."""
def __init__(self, policy_value_fn, c_puct=5, n_playout=10000):
"""
policy_value_fn: a function that takes in a board state and outputs
a list of (action, probability) tuples and also a score in [-1, 1]
(i.e. the expected value of the end game score from the current
player's perspective) for the current player.
c_puct: a number in (0, inf) that controls how quickly exploration
converges to the maximum-value policy. A higher value means
relying on the prior more.
"""
self._root = TreeNode(None, 1.0)
self._policy = policy_value_fn
self._c_puct = c_puct
self._n_playout = n_playout
def _playout(self, state):
"""Run a single playout from the root to the leaf, getting a value at
the leaf and propagating it back through its parents.
State is modified in-place, so a copy must be provided.
"""
node = self._root
while 1:
if node.is_leaf():
break
# Greedily select next move.
action, node = node.select(self._c_puct)
state.do_move(action)
# Evaluate the leaf using a network which outputs a list of
# (action, probability) tuples p and also a score v in [-1, 1]
# for the current player.
action_probs, leaf_value = self._policy(state)
# Check for end of game.
end, winner = state.game_end()
if not end:
node.expand(action_probs)
else:
# for end state,return the "true" leaf_value
if winner == -1: # tie
leaf_value = 0.0
else:
leaf_value = (
1.0 if winner == state.get_current_player() else -1.0
)
# Update value and visit count of nodes in this traversal.
node.update_recursive(-leaf_value)
def get_move_probs(self, state, temp=1e-3):
"""Run all playouts sequentially and return the available actions and
their corresponding probabilities.
state: the current game state
temp: temperature parameter in (0, 1] controls the level of exploration
"""
for n in range(self._n_playout):
state_copy = copy.deepcopy(state)
self._playout(state_copy)
# calc the move probabilities based on visit counts at the root node
act_visits = [(act, node._n_visits)
for act, node in self._root._children.items()]
acts, visits = zip(*act_visits)
act_probs = softmax(1.0/temp * np.log(np.array(visits) + 1e-10))
return acts, act_probs
def update_with_move(self, last_move):
"""Step forward in the tree, keeping everything we already know
about the subtree.
"""
if last_move in self._root._children:
self._root = self._root._children[last_move]
self._root._parent = None
else:
self._root = TreeNode(None, 1.0)
def __str__(self):
return "MCTS"
class MCTSPlayer(object):
"""AI player based on MCTS"""
def __init__(self, policy_value_function,
c_puct=5, n_playout=2000, is_selfplay=0):
self.mcts = MCTS(policy_value_function, c_puct, n_playout)
self._is_selfplay = is_selfplay
def set_player_ind(self, p):
self.player = p
def reset_player(self):
self.mcts.update_with_move(-1)
def get_action(self, board, temp=1e-3, return_prob=0):
sensible_moves = board.availables
# the pi vector returned by MCTS as in the alphaGo Zero paper
move_probs = np.zeros(board.width*board.height)
if len(sensible_moves) > 0:
acts, probs = self.mcts.get_move_probs(board, temp)
move_probs[list(acts)] = probs
if self._is_selfplay:
# add Dirichlet Noise for exploration (needed for
# self-play training)
move = np.random.choice(
acts,
p=0.75*probs + 0.25*np.random.dirichlet(0.3*np.ones(len(probs)))
)
# update the root node and reuse the search tree
self.mcts.update_with_move(move)
else:
# with the default temp=1e-3, it is almost equivalent
# to choosing the move with the highest prob
move = np.random.choice(acts, p=probs)
# reset the root node
self.mcts.update_with_move(-1)
# location = board.move_to_location(move)
# print("AI move: %d,%d\n" % (location[0], location[1]))
if return_prob:
return move, move_probs
else:
return move
else:
print("WARNING: the board is full")
def __str__(self):
return "MCTS {}".format(self.player)
| [
"[email protected]"
] | |
cd3ad0cf3d1730377cdd51d886b98fbe17ed2a13 | 2bd2ad4931c25491f54d5a8b1808f78e7dda1dbe | /salesman.py | a6482481626516b1f0c7122f210052ebf3a210e1 | [] | no_license | edilsonmassuete/EC016 | 08096c7634810c0de75e24a9f4b8e71ad0aaceff | ea4c75dc5cfa26e434fc5eb1fd142cc3bdf1dedc | refs/heads/master | 2021-01-11T00:07:36.571117 | 2015-12-03T12:12:35 | 2015-12-03T12:12:35 | 47,299,307 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,174 | py | # encoding:utf-8
'''
Solution for Travelling Salesman Problem using PSO (Particle Swarm Optimization)
Autor: Marcos Castro
Adaptado por : Edilson Massuete
'''
from operator import attrgetter
import random, sys, time, copy
# class that represents a graph
class Graph:
def __init__(self, amount_vertices):
self.edges = {} # dictionary of edges
self.vertices = set() # set of vertices
self.amount_vertices = amount_vertices # amount of vertices
# adds a edge linking "src" in "dest" with a "cost"
def addEdge(self, src, dest, cost = 0):
# checks if the edge already exists
if not self.existsEdge(src, dest):
self.edges[(src, dest)] = cost
self.vertices.add(src)
self.vertices.add(dest)
# checks if exists a edge linking "src" in "dest"
def existsEdge(self, src, dest):
return (True if (src, dest) in self.edges else False)
# returns total cost of the path
def getCostPath(self, path):
total_cost = 0
for i in range(self.amount_vertices - 1):
total_cost += self.edges[(path[i], path[i+1])]
# add cost of the last edge
total_cost += self.edges[(path[self.amount_vertices - 1], path[0])]
return total_cost
# gets random unique paths - returns a list of lists of paths
def getRandomPaths(self, max_size):
random_paths, list_vertices = [], list(self.vertices)
initial_vertice = random.choice(list_vertices)
if initial_vertice not in list_vertices:
print('Error: initial vertice %d not exists!' % initial_vertice)
sys.exit(1)
list_vertices.remove(initial_vertice)
list_vertices.insert(0, initial_vertice)
for i in range(max_size):
list_temp = list_vertices[1:]
random.shuffle(list_temp)
list_temp.insert(0, initial_vertice)
if list_temp not in random_paths:
random_paths.append(list_temp)
return random_paths
# class that represents a complete graph
class CompleteGraph(Graph):
# generates a complete graph
def generates(self):
for i in range(self.amount_vertices):
for j in range(self.amount_vertices):
if i != j:
weight = random.randint(1, 10)
self.addEdge(i, j, weight)
# class that represents a particle
class Particle:
def __init__(self, solution, cost):
# current solution
self.solution = solution
# best solution (fitness) it has achieved so far
self.pbest = solution
# set costs
self.cost_current_solution = cost
self.cost_pbest_solution = cost
# velocity of a particle is a sequence of 4-tuple
# (1, 2, 1, 'beta') means SO(1,2), prabability 1 and compares with "beta"
self.velocity = []
# set pbest
def setPBest(self, new_pbest):
self.pbest = new_pbest
# returns the pbest
def getPBest(self):
return self.pbest
# set the new velocity (sequence of swap operators)
def setVelocity(self, new_velocity):
self.velocity = new_velocity
# returns the velocity (sequence of swap operators)
def getVelocity(self):
return self.velocity
# set solution
def setCurrentSolution(self, solution):
self.solution = solution
# gets solution
def getCurrentSolution(self):
return self.solution
# set cost pbest solution
def setCostPBest(self, cost):
self.cost_pbest_solution = cost
# gets cost pbest solution
def getCostPBest(self):
return self.cost_pbest_solution
# set cost current solution
def setCostCurrentSolution(self, cost):
self.cost_current_solution = cost
# gets cost current solution
def getCostCurrentSolution(self):
return self.cost_current_solution
# removes all elements of the list velocity
def clearVelocity(self):
del self.velocity[:]
# PSO algorithm
class PSO:
def __init__(self, graph, iterations, size_population, beta=1, alfa=1):
self.graph = graph # the graph
self.iterations = iterations # max of iterations
self.size_population = size_population # size population
self.particles = [] # list of particles
self.beta = beta # the probability that all swap operators in swap sequence (gbest - x(t-1))
self.alfa = alfa # the probability that all swap operators in swap sequence (pbest - x(t-1))
# initialized with a group of random particles (solutions)
solutions = self.graph.getRandomPaths(self.size_population)
# checks if exists any solution
if not solutions:
print('Initial population empty! Try run the algorithm again...')
sys.exit(1)
# creates the particles and initialization of swap sequences in all the particles
for solution in solutions:
# creates a new particle
particle = Particle(solution=solution, cost=graph.getCostPath(solution))
# add the particle
self.particles.append(particle)
# updates "size_population"
self.size_population = len(self.particles)
# set gbest (best particle of the population)
def setGBest(self, new_gbest):
self.gbest = new_gbest
# returns gbest (best particle of the population)
def getGBest(self):
return self.gbest
# shows the info of the particles
def showsParticles(self):
print('Showing particles...\n')
for particle in self.particles:
print('pbest: %s\t|\tcost pbest: %d\t|\tcurrent solution: %s\t|\tcost current solution: %d' \
% (str(particle.getPBest()), particle.getCostPBest(), str(particle.getCurrentSolution()),
particle.getCostCurrentSolution()))
print('')
def run(self):
# for each time step (iteration)
for t in range(self.iterations):
# updates gbest (best particle of the population)
self.gbest = min(self.particles, key=attrgetter('cost_pbest_solution'))
# for each particle in the swarm
for particle in self.particles:
particle.clearVelocity() # cleans the speed of the particle
temp_velocity = []
solution_gbest = copy.copy(self.gbest.getPBest()) # gets solution of the gbest
solution_pbest = particle.getPBest()[:] # copy of the pbest solution
solution_particle = particle.getCurrentSolution()[:] # gets copy of the current solution of the particle
# generates all swap operators to calculate (pbest - x(t-1))
for i in range(self.graph.amount_vertices):
if solution_particle[i] != solution_pbest[i]:
# generates swap operator
swap_operator = (i, solution_pbest.index(solution_particle[i]), self.alfa)
# append swap operator in the list of velocity
temp_velocity.append(swap_operator)
# makes the swap
aux = solution_pbest[swap_operator[0]]
solution_pbest[swap_operator[0]] = solution_pbest[swap_operator[1]]
solution_pbest[swap_operator[1]] = aux
# generates all swap operators to calculate (gbest - x(t-1))
for i in range(self.graph.amount_vertices):
if solution_particle[i] != solution_gbest[i]:
# generates swap operator
swap_operator = (i, solution_gbest.index(solution_particle[i]), self.beta)
# append swap operator in the list of velocity
temp_velocity.append(swap_operator)
# makes the swap
aux = solution_gbest[swap_operator[0]]
solution_gbest[swap_operator[0]] = solution_gbest[swap_operator[1]]
solution_gbest[swap_operator[1]] = aux
# updates velocity
particle.setVelocity(temp_velocity)
# generates new solution for particle
for swap_operator in temp_velocity:
if random.random() <= swap_operator[2]:
# makes the swap
aux = solution_particle[swap_operator[0]]
solution_particle[swap_operator[0]] = solution_particle[swap_operator[1]]
solution_particle[swap_operator[1]] = aux
# updates the current solution
particle.setCurrentSolution(solution_particle)
# gets cost of the current solution
cost_current_solution = self.graph.getCostPath(solution_particle)
# updates the cost of the current solution
particle.setCostCurrentSolution(cost_current_solution)
# checks if current solution is pbest solution
if cost_current_solution < particle.getCostPBest():
particle.setPBest(solution_particle)
particle.setCostPBest(cost_current_solution)
if __name__ == "__main__":
# Creating a Graph according the first image of article.
graph = Graph(amount_vertices=6)
graph.addEdge(0, 1, 146)
graph.addEdge(1, 0, 146)
graph.addEdge(0, 2, 365)
graph.addEdge(2, 0, 365)
graph.addEdge(0, 3, 300)
graph.addEdge(3, 0, 300)
graph.addEdge(0, 4, 222)
graph.addEdge(4, 0, 222)
graph.addEdge(1, 2, 213)
graph.addEdge(2, 1, 213)
graph.addEdge(1, 3, 456)
graph.addEdge(3, 1, 456)
graph.addEdge(1, 4, 321)
graph.addEdge(4, 1, 321)
graph.addEdge(2, 3, 99)
graph.addEdge(3, 2, 99)
graph.addEdge(2, 4, 321)
graph.addEdge(4, 2, 321)
graph.addEdge(3, 4, 236)
graph.addEdge(4, 3, 236)
graph.addEdge(5, 0, 421)
graph.addEdge(0, 5, 421)
graph.addEdge(5, 3, 78)
graph.addEdge(3, 5, 78)
graph.addEdge(5, 4, 102)
graph.addEdge(4, 5, 102)
graph.addEdge(5, 1, 563)
graph.addEdge(1, 5, 563)
graph.addEdge(2, 5, 134)
graph.addEdge(5, 2, 134)
# creates a PSO instance
# Were made test with diferent numbers of iterations, results on article
pso = PSO(graph, iterations=100, size_population=10, beta=1, alfa=0.1)
pso.run() # runs the PSO algorithm
pso.showsParticles() # shows the particles
# shows the global best particle
print('gbest: %s | cost: %d\n' % (pso.getGBest().getPBest(), pso.getGBest().getCostPBest()))
| [
"[email protected]"
] | |
42e1389f6954d28a78c7f5431a57049e087ba49b | c3ca05c5569393b10f8ebf735884291646bac1e8 | /random/annograms.py | 3668a2ce14601b12b1c30633ba6c001663cdc395 | [] | no_license | weeksghost/snippets | 318582bd8a9fa7603c4997fe25620170dc93377b | 99e118109779cde3b5c3d057baba34dbf3188cc6 | refs/heads/master | 2021-01-10T01:36:31.848065 | 2016-11-02T21:41:31 | 2016-11-02T21:41:31 | 36,667,194 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 932 | py | '''
Question:
----
Implement a function that uses a word list
to return the annograms of a given word.
Answer:
----
'''
def annograms(word):
'''
For every string in WORD.LST return a copy of the string with trailing characters removed
and assign it to the var 'words'
'''
words = [ _.rstrip() for _ in open('WORD.LST')]
'''
Assign list of items in arg (word) to var sword.
Ex: yard would now read: ['a', 'd', 'r', 'y']
'''
sword = sorted(word)
'''
For each arg (word) passed into the function assign it to var 'annogram'
if sorted items from 'WORD.LST' match sorted arg word
'''
annogram = [ _ for _ in words if sorted(_) == sword ]
if annogram:
return annogram
else:
raise NotImplementedError
if __name__ == '__main__':
print annograms('yard')
print '--'
print annograms('drive')
print '--'
print annograms('python')
| [
"[email protected]"
] | |
dea87f5906e21e06b8f57e83a9310e91e6af6e37 | c302376bf97171d64360316aaf955142d8f95dc4 | /code/config/param_2day_label_small_trainable25.py | f4d491eec5438587ab63b86e5968b4434a773b8a | [] | no_license | THODESAIPRAJWAL/candlestick_model | 98c8d406584f24ffec054dd5fb0e48c9d1612471 | 0f6198556285388756171c16a8ae329b0955e125 | refs/heads/master | 2023-03-15T06:05:15.249485 | 2020-08-20T18:58:32 | 2020-08-20T18:58:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,426 | py | import os
# tensorflowのINFOレベルのログを出さないようにする
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
import tensorflow as tf
from tensorflow import keras
def get_class_fine_tuning_parameter_base() -> dict:
"""
Get parameter sample for class fine_tuning (like Keras)
Returns:
dict: parameter sample generated by trial object
"""
my_IDG_options = {
"rescale": 1.0 / 255.0,
#'width_shift_range': 0.2,
#'height_shift_range': 0.2,
#'horizontal_flip': True,
#'vertical_flip': True,
#'shear_range': 20,
#'zoom_range': 0.2,
#'rotation_range': 20,
#'channel_shift_range': 50,
#'brightness_range': [0.3, 1.0],
# "random_erasing_prob": 0.5,
# "random_erasing_maxpixel": 255,
#'mix_up_alpha': 0.2,
#'random_crop': [224,224],
#'ricap_beta': 0.3,
#'ricap_use_same_random_value_on_batch': True,
"randaugment_N": 3,
"randaugment_M": 4,
#'is_kuzushiji_gen': True,
"cutmix_alpha": 1.0,
}
## Augmentor使う場合のoption
# train_augmentor_options = {
# 'input_width': 80,
# 'input_height': 80,
# 'random_dist_prob': 0.3,
# 'zoom_prob': 0.3,
# 'zoom_min': 0.5
# , 'zoom_max': 1.9
# , 'flip_left_right': 0.3
# , 'flip_top_bottom': 0.3
# , 'random_erasing_prob': 0.3
# , 'random_erasing_area': 0.3
# }
return {
"output_dir": r"D:\work\candlestick_model\output\model\ts_dataset_all_2day_label_small\Xception\_25",
"gpu_count": 1,
"img_rows": 80,
"img_cols": 80,
"channels": 3,
"batch_size": 256,
"classes": ["0", "1", "2"],
"num_classes": 3,
# "classes": ["0", "1"],
# "num_classes": 2,
# "train_data_dir": r"D:\work\candlestick_model\output\ts_dataset_all_2day_label_small\train",
# "validation_data_dir": r"D:\work\candlestick_model\output\ts_dataset_all_2day_label_small\test",
# "test_data_dir": r"D:\work\candlestick_model\output\ts_dataset_all_2day_label_small\test",
"train_data_dir": r"D:\work\candlestick_model\output\ts_dataset_all_2day_label_small_class2_reduce\train",
"validation_data_dir": r"D:\work\candlestick_model\output\ts_dataset_all_2day_label_small_class2_reduce\test",
"test_data_dir": r"D:\work\candlestick_model\output\ts_dataset_all_2day_label_small_class2_reduce\test",
"color_mode": "rgb",
"class_mode": "categorical", # generatorのラベルをone-hotベクトルに変換する場合。generatorのラベルを0か1のどちらかに変えるだけなら'binary'
"activation": "softmax",
# "loss": "categorical_crossentropy",
"loss": keras.losses.CategoricalCrossentropy(label_smoothing=0.1),
"metrics": [
"accuracy",
keras.metrics.Precision(name="precision"),
keras.metrics.Recall(name="recall"),
keras.metrics.AUC(name="auc"),
],
"model_path": None,
# "model_path": r"D:\work\candlestick_model\output\model\ts_dataset_all_2day_label\best_val_loss_20200727.h5",
"num_epoch": 3, # 200,
"n_multitask": 1, # マルチタスクのタスク数
"multitask_pred_n_node": 1, # マルチタスクの各クラス数
# model param
"weights": "imagenet",
"choice_model": "Xception",
# "choice_model": "model_paper",
"fcpool": "GlobalAveragePooling2D",
"is_skip_bn": False,
# "trainable": "all", # 249,
"trainable": 25,
"efficientnet_num": 3,
# full layer param
"fcs": [100],
"drop": 0.3,
"is_add_batchnorm": False, # True,
"l2_rate": 1e-4,
# optimizer param
"choice_optim": "sgd",
"lr": 1e-1,
"decay": 1e-5,
"my_IDG_options": my_IDG_options,
#'train_augmentor_options': train_augmentor_options,
"TTA": "", # 'flip',
"TTA_rotate_deg": 0,
"TTA_crop_num": 0,
"TTA_crop_size": [224, 224],
"preprocess": 1.0,
"resize_size": [100, 100],
"is_flow": False,
"is_flow_from_directory": True,
"is_flow_from_dataframe": False,
# "is_lr_finder": False,
"is_lr_finder": True,
"is_class_weight": True,
}
| [
"[email protected]"
] | |
5876f0c0d68dd4f85f24210348646af4c796f8c2 | 3e48d5d460645e767bde0d4769ef97a679e94dbc | /manage.py | 65536e79f44b8aeafb0ea1bfca5bb6c7cbd7c3a2 | [] | no_license | Vabs3/Splitwise | aa1e20e26747a06fb7f561dfeee285369c75e2e2 | ab489cb41d8a50929b51ad76097b14cdd57d22a1 | refs/heads/master | 2022-08-03T17:49:28.477881 | 2020-05-25T06:09:51 | 2020-05-25T06:09:51 | 266,598,928 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 629 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Splitwise.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
fbc443913e03e766600e25b0b04391cdb8c0713e | f8846f9459e6ee07829602cfa61ce16022e7b422 | /mypy/sortdir.py | 8c9d947198217fe68610508bec316dd9899274d6 | [] | no_license | evgiz/fish_config | 3250489b70f74b79c3dcbebe1f08440fe23dd5bd | 46219ddea84f98d1aa52e89e107112c19e7f2186 | refs/heads/master | 2021-07-20T04:53:34.317070 | 2020-09-10T13:28:57 | 2020-09-10T13:28:57 | 213,622,075 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,604 | py |
# Sorts directory by category
# Used to sort downloads folder automatically
import os, sys
import shutil
CATEGORIES = {
"images": ["png", "jpg", "jpeg", "heic", "ico"],
"gifs": ["gif"],
"zips": ["zip", "tar", "gz", "bz2", "tbz2"],
"pdfs": ["pdf"],
"audio": ["wav", "mp3", "aiff"],
"dmgs": ["dmg"],
"office": ["docx", "pptx", "xlsx", "xlsm", "doc", "ppt", "xls", "dotx"],
"java": ["jar", "java"],
"text": ["txt", "csv", "log", "tex", "html", "md", "xml"],
"fonts": ["ttf", "otf"],
"apps": ["app"],
"script": ["py", "lua", "sql", "js", "fish"]
}
MAX_RECENT_FILES = 15
recent_files = []
# Paths
path = os.getcwd()
recent_path = os.path.join(path, "recent")
recent_file_path = os.path.join(path, ".recent")
all_path = os.path.join(path, "all")
def load_recent_files():
global recent_files
# Create .recent file
if not os.path.exists(recent_file_path):
with open(recent_file_path, "w") as f:
pass
# Load recent files
if os.path.exists(".recent"):
with open(recent_file_path, "r") as f:
lines = f.readlines()
for line in lines:
# Assert file still exists
if os.path.exists(os.path.join(path, line.strip())):
recent_files.append(line.strip())
def get_category(filename):
_, ext = os.path.splitext(filename)
ext = ext[1:].lower()
for cat in CATEGORIES:
if ext in CATEGORIES[cat]:
return cat
if os.path.isdir(os.path.join(path, filename)):
return "directories"
return "other"
def move_file(filename):
cat = get_category(filename)
# Make category dir if not exists
cat_path = os.path.join(all_path, cat)
if not os.path.exists(cat_path):
os.mkdir(cat_path)
# Filename split
fn, ext = os.path.splitext(filename)
# Dodge duplicates
move_name = filename
move_path = os.path.join(cat_path, filename)
number = 2
while os.path.exists(move_path):
move_name = fn+"_"+str(number)+ext
move_path = os.path.join(cat_path, move_name)
number += 1
if move_name != filename:
os.rename(filename, move_name)
# Move file
shutil.move(move_name, move_path)
def scan_root():
global recent_files
files = os.listdir(path)
new_files = [f for f in files if f not in recent_files]
for filename in new_files:
fn, ext = os.path.splitext(filename)
ext = ext[1:].lower()
# Ignore meta or downloads
if fn in ["all", ".recent", ".DS_Store"] or ext in ["download", "part"]:
continue
# Check if new file
print(f"Detected new file '{filename}', current recent = {len(recent_files)}")
# Remove oldest file in max limit
if len(recent_files) == MAX_RECENT_FILES:
oldest = recent_files.pop(0)
move_file(oldest)
print(f" ... moved oldest file {oldest}, now {len(recent_files)} remain")
# Add new file
recent_files.append(filename)
# Write recents file
print("\n .recent")
with open(recent_file_path, "w") as f:
for r in recent_files:
f.write(r+"\n")
print(f" {r}")
if __name__ == "__main__":
if len(sys.argv) < 2 or sys.argv[1] != "skip":
if input("Are you sure? [y/n]") not in ["Y","y"]:
print("Aborted")
exit(-1)
# Create directories
if not os.path.exists(all_path):
os.mkdir(all_path)
load_recent_files()
scan_root()
print("Done") | [
"[email protected]"
] | |
b45e2833b634df979dfe3276e9d41a87ff6681ce | 8e7d8e72902ad14c9cf40a5685ac363ac10e7f73 | /贪吃蛇.py | 4c55b9fbba5dbd329519ee688798750045d765cf | [] | no_license | zhr088168853/test | accf8bae86cd639125be103d9651c055ca2f5fb4 | 96e9f264b205ebfac2b205cbf8015931cd7071bb | refs/heads/master | 2022-12-11T03:26:42.454744 | 2020-08-15T02:53:03 | 2020-08-15T02:53:03 | 287,535,425 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,657 | py | import pygame, sys, random
from pygame.locals import *
# 颜色配置
snake_color = pygame.Color("#8B7D1C")
food_color = pygame.Color("#8B0000")
background_color = pygame.Color("#BACB03")
text_color = pygame.Color("#EFEFEF")
speed = 5
# 长度单位
pixel = 15
line = 44
row = 36
window_width = pixel * line
window_high = pixel * row
point_left_up = [pixel * 2, pixel * 4]
point_left_down = [pixel * 2, pixel * (row - 2)]
point_right_up = [pixel * (line - 2), pixel * 4]
point_right_down = [pixel * (line - 2), pixel * (row - 2)]
# 蛇头位置
snake_head = [pixel * 8, pixel * 8]
# 蛇身
snake_body = [[snake_head[0] - x * pixel, snake_head[1]] for x in range(5)]
# 方向
direction_right = 0
direction_up = 90
direction_left = 180
direction_down = 270
move = {direction_right: [pixel, 0], direction_left: [-pixel, 0],
direction_up: [0, -pixel], direction_down: [0, pixel]}
# 分数设置
score = 5
filename = 'db.txt'
def write_score(content):
with open(filename, 'w+') as f:
f.write(str(content))
def read_score():
with open(filename, 'w+') as f:
result = f.readline()
return 0 if result.strip() == '' else int(result)
def init():
# 初始化
pygame.init()
# new a window
my_screen = pygame.display.set_mode((window_width, window_high), 0, 32)
# 设置标题
pygame.display.set_caption("Greedy Snake")
return my_screen
# 游戏结束
def game_over(max_score, current_score):
if max_score < current_score:
write_score(current_score)
pygame.quit()
sys.exit()
screen = init()
time_clock = pygame.time.Clock()
# 画边线
def draw_box():
for point in [[point_left_up, point_right_up], [point_right_up, point_right_down],
[point_right_down, point_left_down], [point_left_down, point_left_up]]:
pygame.draw.line(screen, snake_color, point[0], point[1], 1)
def is_alive():
# 越界 -> game over
if (snake_head[0] < point_left_up[0] or snake_head[0] > (point_right_down[0] - pixel) or
snake_head[1] < point_left_up[1] or snake_head[1] > (point_right_down[1]) - pixel):
return False
# 头触碰到身体 -> game over
if snake_head in snake_body:
return False
return True
# 随机产生食物
def create_food():
while True:
x = random.randint(point_left_up[0] / pixel, point_right_down[0] / pixel - 1) * pixel
y = random.randint(point_left_up[1] / pixel, point_right_down[1] / pixel - 1) * pixel
if [x, y] not in snake_body:
break
return [x, y]
def draw_snake(food_position):
# 画蛇
for point in snake_body:
pygame.draw.rect(screen, snake_color, Rect(point[0], point[1], pixel, pixel))
# 画食物
pygame.draw.rect(screen, food_color, Rect(food_position[0], food_position[1], pixel, pixel))
def display_message(text, color, size, postion):
font = pygame.font.Font(None, size)
text = font.render(text, True, color)
screen.blit(text, postion)
pygame.display.update()
# 入口函数
def run():
food_position = create_food()
max_score = read_score()
current_score = 0
is_dead = False
origin_direction = direction_right
target_direction = origin_direction
while True:
# 监听键盘按键 退出 OR 换方向
for event in pygame.event.get():
if event.type == pygame.QUIT:
game_over(max_score, current_score)
if event.type == KEYDOWN:
# 方向键 or asdw 控制方向
if event.key == K_RIGHT or event.key == K_d:
target_direction = direction_right
if event.key == K_LEFT or event.key == K_a:
target_direction = direction_left
if event.key == K_UP or event.key == K_w:
target_direction = direction_up
if event.key == K_DOWN or event.key == K_s:
target_direction = direction_down
# esc 退出
if event.key == K_ESCAPE:
game_over(max_score, current_score)
# 夹角为 90 or 270 可以转换方向
angle = abs(origin_direction - target_direction)
if angle == 90 or angle == 270:
origin_direction = target_direction
if not is_dead:
snake_head[0] += move[origin_direction][0]
snake_head[1] += move[origin_direction][1]
if not is_dead and is_alive():
# 按 origin_direction 方向运动
snake_body.insert(0, list(snake_head))
# 吃到食物后重新生成
if snake_head[0] == food_position[0] and snake_head[1] == food_position[1]:
food_position = create_food()
current_score += score
else:
# 移除最后一格
snake_body.pop()
else:
is_dead = True
# 画背景
screen.fill(background_color)
# 画边框
draw_box()
# 画蛇
draw_snake(food_position)
# 刷新画面
pygame.display.update()
# 更新分数
display_message(f"{current_score}/{max_score}", text_color, 30, (pixel * 2, pixel * 2))
if is_dead:
display_message(f"Game Over", text_color, 50, (pixel * 16, pixel * 15))
# 控制游戏速度
time_clock.tick(speed)
if __name__ == '__main__':
run()
| [
"[email protected]"
] | |
3efab4aed123784165a00f01d13fd0339ec738f3 | 469aa7b1ec468ed7c22a31f4b0edeecfd660124d | /client/old.py | d93af52fa1169baf4db23e4c89c41b75e1e19b6a | [] | no_license | miksuk28/iskaffe-system | f90cb226ed86cbfe6d6dd1cfcff5a6659bc3df94 | 7dab3e3e0d2bccc886e4f8c64ff175c8c26aa55a | refs/heads/main | 2023-08-16T17:33:05.301733 | 2021-09-18T10:32:29 | 2021-09-18T10:32:29 | 404,483,735 | 0 | 0 | null | 2021-09-10T19:11:18 | 2021-09-08T20:18:09 | Python | UTF-8 | Python | false | false | 825 | py | import PySimpleGUI as sg
from PySimpleGUI.PySimpleGUI import Input
sg.theme("Reddit")
def login_window():
layout = [
[sg.Text("Username")],
[sg.Input(key="USERNAME", size=(40,1))],
[sg.Text("Password")],
[sg.Input(key="PASSWORD", size=(40,1), password_char="*")],
[sg.Button("Sign In", key="SIGN-IN", size=(10,1)), sg.Button("Exit", size=(10,1))]
]
window = sg.Window("Sign in", layout)
while True:
event, values = window.read()
if event == "Exit" or event == sg.WIN_CLOSED:
break
if event == "SIGN-IN":
if values["USERNAME"] == "" or values["PASSWORD"] == "":
sg.Popup("Please enter a valid username and password", keep_on_top=True)
window.close()
if __name__ == "__main__":
login_window() | [
"[email protected]"
] | |
13eef7b59c507152a25d96815aa093ded46399c1 | 092c36abbef360801c2e79346c7776a1341f30ed | /graficado/graficado_simple.py | eb18161ce5c9ac2d58c34448ddd9271920ff384d | [] | no_license | axvargas/algoritmos-python | 06cd500b111af9c5709920cb3654a11ae82c14f7 | cfa491035432f111bc4c8b2e50b8c7eb6a924049 | refs/heads/master | 2023-07-25T11:53:17.013716 | 2021-09-12T05:33:29 | 2021-09-12T05:33:29 | 405,555,679 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 795 | py | '''
Created Date: Saturday September 11th 2021 11:24:18 pm
Author: Andrés X. Vargas
-----
Last Modified: Saturday September 11th 2021 11:33:07 pm
Modified By: the developer known as Andrés X. Vargas at <[email protected]>
-----
Copyright (c) 2021 MattuApps
'''
from bokeh.plotting import figure, output_file, show
if __name__ == "__main__":
output_file("graficado_simple.html")
fig = figure(title="Graficado Simple", x_axis_label='x', y_axis_label='y')
total_values = int(input("Ingrese el número de valores: "))
x_values = list(range(total_values))
y_values=[]
for i in range(total_values):
y_values.append(int(input(f"Ingrese el valor de y para el valor {i}: ")))
fig.line(x_values, y_values, legend_label="Linea 1", line_width=2)
show(fig) | [
"[email protected]"
] | |
29445b20446762745a98720d1ca878dc858f650c | 22d0d9d1acaa9a3652c563c31d6920503a489042 | /puck/stdlib.py | 4e31c852abf510085a4fe6a330867990451fc936 | [
"BSD-3-Clause"
] | permissive | pipermerriam/puck | 6b0d8cdcf3867794741075e0d086033101cdaef2 | 6d78be8867f68c9121d7f8eba4140591d3e6a179 | refs/heads/master | 2023-08-28T09:41:59.879612 | 2014-03-31T23:20:27 | 2014-03-31T23:20:27 | 18,305,777 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 76 | py | import __builtin__
callable = __builtin__.callable
list = __builtin__.list
| [
"[email protected]"
] | |
fc39092389ac6e1d50011d70868744f38c9e1da6 | de97b23e15d3fb1258d9bd1bd980b9c869a2d38a | /canvasapi/__init__.py | 0d5f79f429efa1f4eabc2a3c0a42baf756963a05 | [
"MIT"
] | permissive | a-goetz/canvasapi | 8b0a109a00ad7d87794506d6907544539ae6857c | 20b62e4c9f7f5360a8be8b79579899c6e9804df4 | refs/heads/master | 2021-07-13T12:32:27.237673 | 2017-10-23T20:09:08 | 2017-10-23T20:09:08 | 96,371,191 | 1 | 0 | null | 2017-07-06T00:12:38 | 2017-07-06T00:12:37 | null | UTF-8 | Python | false | false | 190 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from canvasapi.canvas import Canvas
__all__ = ["Canvas"]
__version__ = '0.7.0'
| [
"[email protected]"
] | |
9814103765a376d4375c692490ba9df35f5da243 | b1983c893b2fa9fb71c6a1d398ba4a79cd79fa5b | /src/menu.py | f12df58e7392f4277bf84f2ba6236d01c3dc2d69 | [] | no_license | wmemorgan/Intro-Python-II | c3c02ff0ef3c9a6f5e063eac8648bf51d22bde93 | 9d7b8f3291bc8d5fae44bd91e49bf29a09d90755 | refs/heads/master | 2020-09-13T00:34:51.825597 | 2019-11-27T02:30:35 | 2019-11-27T02:30:35 | 222,607,051 | 0 | 0 | null | 2019-11-19T04:18:35 | 2019-11-19T04:18:34 | null | UTF-8 | Python | false | false | 1,646 | py | from design import Color
class Menu:
"""Game menu system"""
def __init__(self, name, options, instructions):
self.name = name
self.options = options
self.instructions = instructions
def show_menu(self):
size = max(len(k) + len(v) for k, v in self.options.items())
padding = 5
print(f"{Color.GREEN}{'=' * (size + padding)}")
for k, v in self.options.items():
print(f"| {k.upper()}: {v}{' ' * (size-(len(k) + len(v)))}|")
print(f"{'=' * (size + padding)}{Color.END}\n")
def get_selection(self):
selection = input(self.instructions).lower().strip()
return selection
main_menu_options = {
'm': 'Menu',
'i': 'Inventory Menu',
'd': 'Available direction(s) ',
'q': 'Quit the game',
}
main_menu = Menu("main", main_menu_options,
f"\nChoose a selection or press {Color.GREEN}{Color.BOLD}'m'{Color.END} for the menu: ")
inventory_menu = {
'i': 'Inventory Menu',
'p': 'Player inventory',
'r': 'Room inventory',
'm': 'Main menu',
'q': 'Quit the game'
}
inventory_menu = Menu("inventory", inventory_menu,
f"\nChoose from the inventory menu or press {Color.GREEN}{Color.BOLD}'m'{Color.END} to return to the main menu: ")
item_menu = {
'b': 'Previous Menu',
'p': 'Player inventory',
'r': 'Room inventory',
'get': 'Get [ITEM_NAME]',
'drop': 'Drop [ITEM_NAME]',
'q': 'Quit the game',
}
item_menu = Menu("item", item_menu,
f"\nGET or DROP an item or press {Color.GREEN}{Color.BOLD}'b'{Color.END} to return to the previous menu: ")
| [
"[email protected]"
] | |
ba478c6b41b71c5cb27fab0fafa51b0a08945d23 | 52a7271d9ce64ebb1a7ff406122d5738dbd3b4a9 | /python_modules/dagster/dagster/_core/definitions/definitions_class.py | 0f7574839c7e0021bf2dad64f5b7ebdcbd40c0b4 | [
"Apache-2.0"
] | permissive | jmsanders/dagster | 67f188f53ed2cbce7eebdda421e00dd799e42ec5 | 390032c4627730be5a4b57fea456e6060e5f3e69 | refs/heads/master | 2023-08-21T12:58:50.017148 | 2023-08-16T13:58:33 | 2023-08-16T13:58:33 | 313,052,917 | 2 | 1 | Apache-2.0 | 2021-01-12T15:59:57 | 2020-11-15T14:47:10 | null | UTF-8 | Python | false | false | 22,577 | py | from typing import (
TYPE_CHECKING,
Any,
Dict,
Iterable,
List,
Mapping,
NamedTuple,
Optional,
Sequence,
Type,
Union,
)
import dagster._check as check
from dagster._annotations import deprecated, experimental, public
from dagster._config.pythonic_config import (
attach_resource_id_to_key_mapping,
)
from dagster._core.definitions.asset_graph import InternalAssetGraph
from dagster._core.definitions.events import AssetKey, CoercibleToAssetKey
from dagster._core.definitions.executor_definition import ExecutorDefinition
from dagster._core.definitions.logger_definition import LoggerDefinition
from dagster._core.execution.build_resources import wrap_resources_for_execution
from dagster._core.execution.with_resources import with_resources
from dagster._core.executor.base import Executor
from dagster._core.instance import DagsterInstance
from dagster._utils.cached_method import cached_method
from .assets import AssetsDefinition, SourceAsset
from .cacheable_assets import CacheableAssetsDefinition
from .decorators import repository
from .job_definition import JobDefinition, default_job_io_manager
from .partitioned_schedule import UnresolvedPartitionedAssetScheduleDefinition
from .repository_definition import (
SINGLETON_REPOSITORY_NAME,
PendingRepositoryDefinition,
RepositoryDefinition,
)
from .schedule_definition import ScheduleDefinition
from .sensor_definition import SensorDefinition
from .unresolved_asset_job_definition import UnresolvedAssetJobDefinition
if TYPE_CHECKING:
from dagster._core.storage.asset_value_loader import AssetValueLoader
@public
@experimental
def create_repository_using_definitions_args(
name: str,
assets: Optional[
Iterable[Union[AssetsDefinition, SourceAsset, CacheableAssetsDefinition]]
] = None,
schedules: Optional[
Iterable[Union[ScheduleDefinition, UnresolvedPartitionedAssetScheduleDefinition]]
] = None,
sensors: Optional[Iterable[SensorDefinition]] = None,
jobs: Optional[Iterable[Union[JobDefinition, UnresolvedAssetJobDefinition]]] = None,
resources: Optional[Mapping[str, Any]] = None,
executor: Optional[Union[ExecutorDefinition, Executor]] = None,
loggers: Optional[Mapping[str, LoggerDefinition]] = None,
) -> Union[RepositoryDefinition, PendingRepositoryDefinition]:
"""Create a named repository using the same arguments as :py:class:`Definitions`. In older
versions of Dagster, repositories were the mechanism for organizing assets, schedules, sensors,
and jobs. There could be many repositories per code location. This was a complicated ontology but
gave users a way to organize code locations that contained large numbers of heterogenous definitions.
As a stopgap for those who both want to 1) use the new :py:class:`Definitions` API and 2) but still
want multiple logical groups of assets in the same code location, we have introduced this function.
Example usage:
.. code-block:: python
named_repo = create_repository_using_definitions_args(
name="a_repo",
assets=[asset_one, asset_two],
schedules=[a_schedule],
sensors=[a_sensor],
jobs=[a_job],
resources={
"a_resource": some_resource,
}
)
"""
return _create_repository_using_definitions_args(
name=name,
assets=assets,
schedules=schedules,
sensors=sensors,
jobs=jobs,
resources=resources,
executor=executor,
loggers=loggers,
)
class _AttachedObjects(NamedTuple):
jobs: Iterable[Union[JobDefinition, UnresolvedAssetJobDefinition]]
schedules: Iterable[Union[ScheduleDefinition, UnresolvedPartitionedAssetScheduleDefinition]]
sensors: Iterable[SensorDefinition]
def _io_manager_needs_replacement(job: JobDefinition, resource_defs: Mapping[str, Any]) -> bool:
"""Explicitly replace the default IO manager in jobs that don't specify one, if a top-level
I/O manager is provided to Definitions.
"""
return (
job.resource_defs.get("io_manager") == default_job_io_manager
and "io_manager" in resource_defs
)
def _jobs_which_will_have_io_manager_replaced(
jobs: Optional[Iterable[Union[JobDefinition, UnresolvedAssetJobDefinition]]],
resource_defs: Mapping[str, Any],
) -> List[Union[JobDefinition, UnresolvedAssetJobDefinition]]:
"""Returns whether any jobs will have their I/O manager replaced by an `io_manager` override from
the top-level `resource_defs` provided to `Definitions` in 1.3. We will warn users if this is
the case.
"""
jobs = jobs or []
return [
job
for job in jobs
if isinstance(job, JobDefinition) and _io_manager_needs_replacement(job, resource_defs)
]
def _attach_resources_to_jobs_and_instigator_jobs(
jobs: Optional[Iterable[Union[JobDefinition, UnresolvedAssetJobDefinition]]],
schedules: Optional[
Iterable[Union[ScheduleDefinition, UnresolvedPartitionedAssetScheduleDefinition]]
],
sensors: Optional[Iterable[SensorDefinition]],
resource_defs: Mapping[str, Any],
) -> _AttachedObjects:
"""Given a list of jobs, schedules, and sensors along with top-level resource definitions,
attach the resource definitions to the jobs, schedules, and sensors which require them.
"""
jobs = jobs or []
schedules = schedules or []
sensors = sensors or []
# Add jobs in schedules and sensors as well
jobs = [
*jobs,
*[
schedule.job
for schedule in schedules
if isinstance(schedule, ScheduleDefinition)
and schedule.has_loadable_target()
and isinstance(schedule.job, (JobDefinition, UnresolvedAssetJobDefinition))
],
*[
job
for sensor in sensors
if sensor.has_loadable_targets()
for job in sensor.jobs
if isinstance(job, (JobDefinition, UnresolvedAssetJobDefinition))
],
]
# Dedupe
jobs = list({id(job): job for job in jobs}.values())
# Find unsatisfied jobs
unsatisfied_jobs = [
job
for job in jobs
if isinstance(job, JobDefinition)
and (
job.is_missing_required_resources() or _io_manager_needs_replacement(job, resource_defs)
)
]
# Create a mapping of job id to a version of the job with the resource defs bound
unsatisfied_job_to_resource_bound_job = {
id(job): job.with_top_level_resources(
{
**resource_defs,
**job.resource_defs,
# special case for IO manager - the job-level IO manager does not take precedence
# if it is the default and a top-level IO manager is provided
**(
{"io_manager": resource_defs["io_manager"]}
if _io_manager_needs_replacement(job, resource_defs)
else {}
),
}
)
for job in jobs
if job in unsatisfied_jobs
}
# Update all jobs to use the resource bound version
jobs_with_resources = [
unsatisfied_job_to_resource_bound_job[id(job)] if job in unsatisfied_jobs else job
for job in jobs
]
# Update all schedules and sensors to use the resource bound version
updated_schedules = [
(
schedule.with_updated_job(unsatisfied_job_to_resource_bound_job[id(schedule.job)])
if (
isinstance(schedule, ScheduleDefinition)
and schedule.has_loadable_target()
and schedule.job in unsatisfied_jobs
)
else schedule
)
for schedule in schedules
]
updated_sensors = [
(
sensor.with_updated_jobs(
[
(
unsatisfied_job_to_resource_bound_job[id(job)]
if job in unsatisfied_jobs
else job
)
for job in sensor.jobs
]
)
if sensor.has_loadable_targets() and any(job in unsatisfied_jobs for job in sensor.jobs)
else sensor
)
for sensor in sensors
]
return _AttachedObjects(jobs_with_resources, updated_schedules, updated_sensors)
def _create_repository_using_definitions_args(
name: str,
assets: Optional[
Iterable[Union[AssetsDefinition, SourceAsset, CacheableAssetsDefinition]]
] = None,
schedules: Optional[
Iterable[Union[ScheduleDefinition, UnresolvedPartitionedAssetScheduleDefinition]]
] = None,
sensors: Optional[Iterable[SensorDefinition]] = None,
jobs: Optional[Iterable[Union[JobDefinition, UnresolvedAssetJobDefinition]]] = None,
resources: Optional[Mapping[str, Any]] = None,
executor: Optional[Union[ExecutorDefinition, Executor]] = None,
loggers: Optional[Mapping[str, LoggerDefinition]] = None,
):
check.opt_iterable_param(
assets, "assets", (AssetsDefinition, SourceAsset, CacheableAssetsDefinition)
)
check.opt_iterable_param(
schedules, "schedules", (ScheduleDefinition, UnresolvedPartitionedAssetScheduleDefinition)
)
check.opt_iterable_param(sensors, "sensors", SensorDefinition)
check.opt_iterable_param(jobs, "jobs", (JobDefinition, UnresolvedAssetJobDefinition))
check.opt_inst_param(executor, "executor", (ExecutorDefinition, Executor))
executor_def = (
executor
if isinstance(executor, ExecutorDefinition) or executor is None
else ExecutorDefinition.hardcoded_executor(executor)
)
# Generate a mapping from each top-level resource instance ID to its resource key
resource_key_mapping = {id(v): k for k, v in resources.items()} if resources else {}
# Provide this mapping to each resource instance so that it can be used to resolve
# nested resources
resources_with_key_mapping = (
{
k: attach_resource_id_to_key_mapping(v, resource_key_mapping)
for k, v in resources.items()
}
if resources
else {}
)
resource_defs = wrap_resources_for_execution(resources_with_key_mapping)
check.opt_mapping_param(loggers, "loggers", key_type=str, value_type=LoggerDefinition)
# Binds top-level resources to jobs and any jobs attached to schedules or sensors
(
jobs_with_resources,
schedules_with_resources,
sensors_with_resources,
) = _attach_resources_to_jobs_and_instigator_jobs(jobs, schedules, sensors, resource_defs)
@repository(
name=name,
default_executor_def=executor_def,
default_logger_defs=loggers,
_top_level_resources=resource_defs,
_resource_key_mapping=resource_key_mapping,
)
def created_repo():
return [
*with_resources(assets or [], resource_defs),
*(schedules_with_resources),
*(sensors_with_resources),
*(jobs_with_resources),
]
return created_repo
@deprecated(
breaking_version="2.0",
additional_warn_text=(
"Instantiations can be removed. Since it's behavior is now the default, this class is now a"
" no-op."
),
)
class BindResourcesToJobs(list):
"""Used to instruct Dagster to bind top-level resources to jobs and any jobs attached to schedules
and sensors. Now deprecated since this behavior is the default.
"""
class Definitions:
"""A set of definitions explicitly available and loadable by Dagster tools.
Parameters:
assets (Optional[Iterable[Union[AssetsDefinition, SourceAsset, CacheableAssetsDefinition]]]):
A list of assets. Assets can be created by annotating
a function with :py:func:`@asset <asset>` or
:py:func:`@observable_source_asset <observable_source_asset>`.
Or they can by directly instantiating :py:class:`AssetsDefinition`,
:py:class:`SourceAsset`, or :py:class:`CacheableAssetsDefinition`.
schedules (Optional[Iterable[Union[ScheduleDefinition, UnresolvedPartitionedAssetScheduleDefinition]]]):
List of schedules.
sensors (Optional[Iterable[SensorDefinition]]):
List of sensors, typically created with :py:func:`@sensor <sensor>`.
jobs (Optional[Iterable[Union[JobDefinition, UnresolvedAssetJobDefinition]]]):
List of jobs. Typically created with :py:func:`define_asset_job <define_asset_job>`
or with :py:func:`@job <job>` for jobs defined in terms of ops directly.
Jobs created with :py:func:`@job <job>` must already have resources bound
at job creation time. They do not respect the `resources` argument here.
resources (Optional[Mapping[str, Any]]): Dictionary of resources to bind to assets.
The resources dictionary takes raw Python objects,
not just instances of :py:class:`ResourceDefinition`. If that raw object inherits from
:py:class:`IOManager`, it gets coerced to an :py:class:`IOManagerDefinition`.
Any other object is coerced to a :py:class:`ResourceDefinition`.
These resources will be automatically bound
to any assets passed to this Definitions instance using
:py:func:`with_resources <with_resources>`. Assets passed to Definitions with
resources already bound using :py:func:`with_resources <with_resources>` will
override this dictionary.
executor (Optional[Union[ExecutorDefinition, Executor]]):
Default executor for jobs. Individual jobs can override this and define their own executors
by setting the executor on :py:func:`@job <job>` or :py:func:`define_asset_job <define_asset_job>`
explicitly. This executor will also be used for materializing assets directly
outside of the context of jobs. If an :py:class:`Executor` is passed, it is coerced into
an :py:class:`ExecutorDefinition`.
loggers (Optional[Mapping[str, LoggerDefinition]):
Default loggers for jobs. Individual jobs
can define their own loggers by setting them explictly.
Example usage:
.. code-block:: python
defs = Definitions(
assets=[asset_one, asset_two],
schedules=[a_schedule],
sensors=[a_sensor],
jobs=[a_job],
resources={
"a_resource": some_resource,
}
)
Dagster separates user-defined code from system tools such the web server and
the daemon. Rather than loading code directly into process, a tool such as the
webserver interacts with user-defined code over a serialization boundary.
These tools must be able to locate and load this code when they start. Via CLI
arguments or config, they specify a Python module to inspect.
A Python module is loadable by Dagster tools if there is a top-level variable
that is an instance of :py:class:`Definitions`.
Before the introduction of :py:class:`Definitions`,
:py:func:`@repository <repository>` was the API for organizing defintions.
:py:class:`Definitions` provides a few conveniences for dealing with resources
that do not apply to old-style :py:func:`@repository <repository>` declarations:
* It takes a dictionary of top-level resources which are automatically bound
(via :py:func:`with_resources <with_resources>`) to any asset passed to it.
If you need to apply different resources to different assets, use legacy
:py:func:`@repository <repository>` and use
:py:func:`with_resources <with_resources>` as before.
* The resources dictionary takes raw Python objects, not just instances
of :py:class:`ResourceDefinition`. If that raw object inherits from
:py:class:`IOManager`, it gets coerced to an :py:class:`IOManagerDefinition`.
Any other object is coerced to a :py:class:`ResourceDefinition`.
"""
def __init__(
self,
assets: Optional[
Iterable[Union[AssetsDefinition, SourceAsset, CacheableAssetsDefinition]]
] = None,
schedules: Optional[
Iterable[Union[ScheduleDefinition, UnresolvedPartitionedAssetScheduleDefinition]]
] = None,
sensors: Optional[Iterable[SensorDefinition]] = None,
jobs: Optional[Iterable[Union[JobDefinition, UnresolvedAssetJobDefinition]]] = None,
resources: Optional[Mapping[str, Any]] = None,
executor: Optional[Union[ExecutorDefinition, Executor]] = None,
loggers: Optional[Mapping[str, LoggerDefinition]] = None,
):
self._created_pending_or_normal_repo = _create_repository_using_definitions_args(
name=SINGLETON_REPOSITORY_NAME,
assets=assets,
schedules=schedules,
sensors=sensors,
jobs=jobs,
resources=resources,
executor=executor,
loggers=loggers,
)
@public
def get_job_def(self, name: str) -> JobDefinition:
"""Get a job definition by name. If you passed in a an :py:class:`UnresolvedAssetJobDefinition`
(return value of :py:func:`define_asset_job`) it will be resolved to a :py:class:`JobDefinition` when returned
from this function.
"""
check.str_param(name, "name")
return self.get_repository_def().get_job(name)
@public
def get_sensor_def(self, name: str) -> SensorDefinition:
"""Get a sensor definition by name."""
check.str_param(name, "name")
return self.get_repository_def().get_sensor_def(name)
@public
def get_schedule_def(self, name: str) -> ScheduleDefinition:
"""Get a schedule definition by name."""
check.str_param(name, "name")
return self.get_repository_def().get_schedule_def(name)
@public
def load_asset_value(
self,
asset_key: CoercibleToAssetKey,
*,
python_type: Optional[Type] = None,
instance: Optional[DagsterInstance] = None,
partition_key: Optional[str] = None,
metadata: Optional[Dict[str, Any]] = None,
) -> object:
"""Load the contents of an asset as a Python object.
Invokes `load_input` on the :py:class:`IOManager` associated with the asset.
If you want to load the values of multiple assets, it's more efficient to use
:py:meth:`~dagster.Definitions.get_asset_value_loader`, which avoids spinning up
resources separately for each asset.
Args:
asset_key (Union[AssetKey, Sequence[str], str]): The key of the asset to load.
python_type (Optional[Type]): The python type to load the asset as. This is what will
be returned inside `load_input` by `context.dagster_type.typing_type`.
partition_key (Optional[str]): The partition of the asset to load.
metadata (Optional[Dict[str, Any]]): Input metadata to pass to the :py:class:`IOManager`
(is equivalent to setting the metadata argument in `In` or `AssetIn`).
Returns:
The contents of an asset as a Python object.
"""
return self.get_repository_def().load_asset_value(
asset_key=asset_key,
python_type=python_type,
instance=instance,
partition_key=partition_key,
metadata=metadata,
)
@public
def get_asset_value_loader(
self, instance: Optional[DagsterInstance] = None
) -> "AssetValueLoader":
"""Returns an object that can load the contents of assets as Python objects.
Invokes `load_input` on the :py:class:`IOManager` associated with the assets. Avoids
spinning up resources separately for each asset.
Usage:
.. code-block:: python
with defs.get_asset_value_loader() as loader:
asset1 = loader.load_asset_value("asset1")
asset2 = loader.load_asset_value("asset2")
"""
return self.get_repository_def().get_asset_value_loader(
instance=instance,
)
def get_all_job_defs(self) -> Sequence[JobDefinition]:
"""Get all the Job definitions in the code location."""
return self.get_repository_def().get_all_jobs()
def has_implicit_global_asset_job_def(self) -> bool:
return self.get_repository_def().has_implicit_global_asset_job_def()
def get_implicit_global_asset_job_def(self) -> JobDefinition:
"""A useful conveninence method when there is a single defined global asset job.
This occurs when all assets in the code location use a single partitioning scheme.
If there are multiple partitioning schemes you must use get_implicit_job_def_for_assets
instead to access to the correct implicit asset one.
"""
return self.get_repository_def().get_implicit_global_asset_job_def()
def get_implicit_job_def_for_assets(
self, asset_keys: Iterable[AssetKey]
) -> Optional[JobDefinition]:
return self.get_repository_def().get_implicit_job_def_for_assets(asset_keys)
@cached_method
def get_repository_def(self) -> RepositoryDefinition:
"""Definitions is implemented by wrapping RepositoryDefinition. Get that underlying object
in order to access an functionality which is not exposed on Definitions. This method
also resolves a PendingRepositoryDefinition to a RepositoryDefinition.
"""
return (
self._created_pending_or_normal_repo.compute_repository_definition()
if isinstance(self._created_pending_or_normal_repo, PendingRepositoryDefinition)
else self._created_pending_or_normal_repo
)
def get_inner_repository_for_loading_process(
self,
) -> Union[RepositoryDefinition, PendingRepositoryDefinition]:
"""This method is used internally to access the inner repository during the loading process
at CLI entry points. We explicitly do not want to resolve the pending repo because the entire
point is to defer that resolution until later.
"""
return self._created_pending_or_normal_repo
def get_asset_graph(self) -> InternalAssetGraph:
"""Get the AssetGraph for this set of definitions."""
return self.get_repository_def().asset_graph
| [
"[email protected]"
] | |
dbb5da8fa5cfcb7e869dd263729b1301b36b35ce | c6ed9aa97166d4778b89321b580af80c543bacc9 | /hackerrank/implementation/cavitymap.py | 941fc9b2ecf066a2f0a6fc08c4eb96a3d61420b7 | [] | no_license | bradyz/sandbox | 381bcaf2f3719dee142a00858f7062aeff98d1ab | ff90335b918886d5b5956c6c6546dbfde5e7f5b3 | refs/heads/master | 2021-01-23T09:03:54.697325 | 2018-02-27T20:47:48 | 2018-02-27T20:47:48 | 21,292,856 | 10 | 0 | null | 2015-09-03T16:53:15 | 2014-06-28T00:29:18 | Python | UTF-8 | Python | false | false | 730 | py | import sys
def marked_map(d_map, side):
tmp_map = d_map
for x in range(1, side - 1):
for y in range(1, side - 1):
cur = d_map[x][y]
if cur > d_map[x - 1][y] and cur > d_map[x + 1][y]:
if cur > d_map[x][y - 1] and cur > d_map[x][y + 1]:
tmp_map[x][y] = "X"
for row in range(side):
print(''.join([str(x) for x in tmp_map[row]]))
return
if __name__ == "__main__":
dep_map = []
for i, line in enumerate(sys.stdin):
if i == 0:
side_len = int(line.strip("\n"))
elif i > 0:
tmp_line = [int(x) for x in line.strip("\n")]
dep_map.append(tmp_line)
marked_map(dep_map, side_len)
| [
"[email protected]"
] | |
15b59d08369ec7ed415314c70999e3ac58ae20f2 | 6adad4985eb7446cccb9066257b3b9e03cb044ca | /Homework/Hangman.py | a6c9a4a9a8e513c920cbd78634b32935f1428a45 | [] | no_license | MikaMahaputra/Binus | 298a176c8b3c0669819840a8d0ea1748cd5e98d0 | 1fa3fba60d5882ddc99e8f3e6038b4b9c8643b28 | refs/heads/master | 2020-07-27T01:37:47.984077 | 2020-06-20T18:58:07 | 2020-06-20T18:58:07 | 208,825,205 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,523 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Sep 25 11:46:58 2019
@author: user
"""
import random
def get_word() :
words= ["Happy", "Sad", "Angry", "Dog", "Cat", "Mouse", "Air", "Land", "Sea", "January", "May", "June", "December"]
return random.choice(words).upper()
def check (word, guesses, guess):
guess= guess.upper
status= ""
matches= 0
for letter in word:
if letter in guesses:
status += letter
else:
status += "*"
if letter == guess:
matches += 1
if matches == 1:
print("Yes, this word contains the letter", + guess)
else:
print ("Sorry, the word does not contain this letter")
return status
def main ():
word = get_word()
guesses = []
guessed = False
print ("The word contains", len(word), "letters")
while not guessed:
text = ("Please enter one letter: " , len(word))
guess = input (text)
guess = guess.upper()
if guess in guesses:
print ("You've already guessed that letter")
elif len(guess) == 1 :
guesses.append (guess)
result = check(word,guesses,guess)
if result == word:
guessed = True
else:
print(result)
else:
print("Error, please try again")
print("Yes the word is",word )
main ()
#Made By Almada Putra and Mika Mahaputra
| [
"[email protected]"
] | |
cfd2b37c895af427caacb966b2ac30bc49a3feaf | 82c0c89857c1935204c1fb1c03427117275af81c | /upload_form/urls.py | 9329daef402cefa3dc7d4666b65007eabaad5dfc | [] | no_license | KaijiS/Cut_Vowel | 41132c33ff2ca99a7d9d636e85d07869c48d0b62 | 10e1dd8ae8715e6287efdcd8dbffe12319140794 | refs/heads/master | 2020-03-19T18:09:56.133826 | 2018-06-18T10:01:44 | 2018-06-18T10:01:44 | 136,796,831 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 189 | py | from django.urls import path
from . import views
app_name = 'upload_form'
urlpatterns = [
path('', views.form, name='form'),
path('download/', views.download, name='download'),
]
| [
"[email protected]"
] | |
18fc3c3fe3e000f701c5ed4f6e84bacfdedf80f3 | d3019be00f73f6d479e17632a1f2ce1ebfbfba6c | /Sentience/sol.py | 76db5134ca011d29007793c9a71bd9489fd6d016 | [
"MIT"
] | permissive | AJ1479/code-golf-challenges | 2c779c6f5cdd175748305014835b41b6ff4ff747 | ea543f260f6f2fb3f7b7845a8efa436c1b8b6f12 | refs/heads/master | 2023-02-10T11:35:27.779814 | 2020-10-03T18:21:47 | 2020-10-03T18:21:47 | 298,393,031 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 329 | py | mod=pow(10,9)+7
def fibonacci(n):
a = 0
b = 1
if n == 0:
return a
elif n == 1:
return b
else:
for i in range(2,n+1):
c = (a + b)%mod
a = b
b = c
return b
t=int(input())
for _ in range(t):
n=int(input())
print(fibonacci(n+1)) | [
"[email protected]"
] | |
7f386af6ec040149f7dc365a98c0fe74e4cecb11 | 7f7bbe413eefb90bb783c987faf42edb3db05e26 | /users/urls.py | 0b2ce076728ee8ced51d1b65fe5fadb76a75fed4 | [] | no_license | Ka4mar/Geekshop-server | abbff5a93a4f41f4216c71bb828aff7198459dac | 94cfab1d9f5432efee32b20bf32724ff439101e9 | refs/heads/master | 2023-07-15T03:37:40.619547 | 2021-08-24T13:56:17 | 2021-08-24T13:56:17 | 380,510,493 | 0 | 0 | null | 2021-08-20T21:09:03 | 2021-06-26T13:38:45 | CSS | UTF-8 | Python | false | false | 402 | py | from django.urls import path
from users.views import login, registration, profile, logout, verify
app_name = 'users'
urlpatterns = [
path('login/', login, name='login'),
path('registration/', registration, name='registration'),
path('profile/', profile, name='profile'),
path('logout/', logout, name='logout'),
path('verify/<email>/<activation_key>/', verify, name='verify'),
]
| [
"[email protected]"
] | |
e1ec834f94f09b1099ff834e8bfffae534ef1417 | 99b7c99762dcb0c089db759f82d7f544fa8b1b55 | /backend/tstec2020110601_dev_14711/settings.py | 01a73faf2ee732883ee770447aa5040da44a418c | [] | no_license | crowdbotics-apps/tstec2020110601-dev-14711 | a5425bc05f4b40a3127715583d46d21fc041ebaa | d8f065b3475c9d782a5f9dd61d1430a2f3848e34 | refs/heads/master | 2023-01-11T03:31:33.462079 | 2020-11-13T11:18:30 | 2020-11-13T11:18:30 | 310,740,945 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,125 | py | """
Django settings for tstec2020110601_dev_14711 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
# start fcm_django push notifications
'fcm_django',
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'tstec2020110601_dev_14711.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'tstec2020110601_dev_14711.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {
"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")
}
# end fcm_django push notifications
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| [
"[email protected]"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.