blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
947c58bca87d9b8e0f6f80243c7d490482c710ea | a64bfd5ae344f52b9f72cb9fda26c651ff2219fa | /MainUI.py | 3cf6cecc9719666a52e4fd131389049651c11c4c | [] | no_license | fakebear/995 | 6b60fe5cf8c72decab5df8a74b36579de5de0dd9 | b57507aee0f7dd37de1fbde11e2a9346627640dc | refs/heads/master | 2016-08-11T06:36:48.343347 | 2015-12-05T05:40:02 | 2015-12-05T05:40:02 | 47,443,760 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,221 | py | # -*- coding: utf-8 -*-
###########################################################################
## Python code generated with wxFormBuilder (version Jun 17 2015)
## http://www.wxformbuilder.org/
##
## PLEASE DO "NOT" EDIT THIS FILE!
###########################################################################
import wx
import wx.xrc
###########################################################################
## Class MyFrame1
###########################################################################
class MyFrame1 ( wx.Frame ):
def __init__( self, parent ):
wx.Frame.__init__ ( self, parent, id = wx.ID_ANY, title = wx.EmptyString, pos = wx.DefaultPosition, size = wx.Size( 500,300 ), style = wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL )
self.SetSizeHintsSz( wx.DefaultSize, wx.DefaultSize )
self.m_menubar1 = wx.MenuBar( 0 )
self.m_menu1 = wx.Menu()
self.m_menubar1.Append( self.m_menu1, u"MyMenu" )
self.m_menu2 = wx.Menu()
self.m_menuItem1 = wx.MenuItem( self.m_menu2, wx.ID_ANY, u"MyMenuItem", wx.EmptyString, wx.ITEM_NORMAL )
self.m_menu2.AppendItem( self.m_menuItem1 )
self.m_menuItem2 = wx.MenuItem( self.m_menu2, wx.ID_ANY, u"MyMenuItem", wx.EmptyString, wx.ITEM_NORMAL )
self.m_menu2.AppendItem( self.m_menuItem2 )
self.m_menubar1.Append( self.m_menu2, u"MyMenu" )
self.m_menu3 = wx.Menu()
self.m_menuItem3 = wx.MenuItem( self.m_menu3, wx.ID_ANY, u"MyMenuItem", wx.EmptyString, wx.ITEM_NORMAL )
self.m_menu3.AppendItem( self.m_menuItem3 )
self.m_menubar1.Append( self.m_menu3, u"MyMenu" )
self.m_menu4 = wx.Menu()
self.m_menuItem4 = wx.MenuItem( self.m_menu4, wx.ID_ANY, u"MyMenuItem", wx.EmptyString, wx.ITEM_NORMAL )
self.m_menu4.AppendItem( self.m_menuItem4 )
self.m_menuItem5 = wx.MenuItem( self.m_menu4, wx.ID_ANY, u"MyMenuItem", wx.EmptyString, wx.ITEM_NORMAL )
self.m_menu4.AppendItem( self.m_menuItem5 )
self.m_menuItem6 = wx.MenuItem( self.m_menu4, wx.ID_ANY, u"MyMenuItem", wx.EmptyString, wx.ITEM_NORMAL )
self.m_menu4.AppendItem( self.m_menuItem6 )
self.m_menubar1.Append( self.m_menu4, u"MyMenu" )
self.SetMenuBar( self.m_menubar1 )
self.Centre( wx.BOTH )
def __del__( self ):
pass
| [
"[email protected]"
] | |
462b8b65a2ae86358185254430ab2dc42161e2bd | 2883f29bb5caeb5f5db0aac635f1eb98a1211672 | /TeamsWebhook.py | 06e550d8e8588630b50bdfbf5ece026dad67c4e4 | [] | no_license | pennyman/teams | 48cfc9a4fbb0cb022ceec1ea8c5822a5a3840906 | 834ec6ace5da1aeaf90cfa2ac66745547607e2f2 | refs/heads/main | 2023-03-28T21:34:34.242037 | 2021-04-04T14:14:05 | 2021-04-04T14:14:05 | 353,103,189 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,290 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import boto3
import json
import logging
from base64 import b64decode
from urllib2 import Request, urlopen, URLError, HTTPError
TEAMS_CHANNEL = 'prod-aws' # Enter the Slack channel to send a message to
HOOK_URL = "https://outlook.office.com/webhook/xxxxxx/IncomingWebhook/d5c3166cdfb64b04ae5138a4d908e3c2/20738ef7-fe46-4ce7-87d7-a53176474834"
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def lambda_handler(event, context):
logger.info("Event: " + str(event))
message = json.loads(event['Records'][0]['Sns']['Message'])
logger.info("Message: " + str(message))
alarm_name = message['AlarmName']
new_state = message['NewStateValue']
reason = message['NewStateReason']
notification_message = {
'title': " % s" % (alarm_name),
'text': " % s : % s" % (new_state, reason)
}
req = Request(HOOK_URL, json.dumps(notification_message))
try:
response = urlopen(req)
response.read()
# logger.info("Message posted to %s", notification_message['channel'])
except HTTPError as e:
logger.error("Request failed: %d %s", e.code, e.reason)
except URLError as e:
logger.error("Server connection failed: %s", e.reason)
| [
"[email protected]"
] | |
978396f764d0496521889a86c516fe5f658aeb0c | 4542bb0c9c382e97a5d68a5d076c3bb806a72dae | /Final Project/SP500_Composition/Main Code/PlotData.py | 62041ad1fa74a21ddfb70aa1a45a8d925c416125 | [] | no_license | bspeice/experimental-finance | 3878932bbd86e3a2bc896b332e178a2fc7f148a3 | 039e4e54a2b5a30bfbc4ae9322f837ea0bfc8ac5 | refs/heads/master | 2021-01-20T19:49:49.501277 | 2016-11-29T20:43:26 | 2016-11-29T20:44:16 | 67,712,860 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,040 | py | import pandas as pd
import numpy as np
import seaborn as sns
import sqlalchemy
from sqlalchemy import create_engine
import time
import matplotlib.pyplot as plt
from matplotlib.finance import candlestick_ohlc
from matplotlib.dates import date2num
from matplotlib.dates import DateFormatter,WeekdayLocator,DayLocator,MONDAY
CONNECTION_STRING = 'mssql+pymssql://IVYuser:[email protected]'
# Gets the database connection
def get_connection():
engine = create_engine(CONNECTION_STRING)
return engine.connect()
# Query database and return results in dataframe
def query_dataframe(query, connection=None):
# date_col should be a list
if connection is None:
connection = get_connection()
res = pd.read_sql(query, connection)
return res
# Query database using external file and return results in dataframe
def query_dataframe_f(filename, connection=None):
if connection is None:
connection = get_connection()
with open(filename, 'r') as handle:
return pd.read_sql(handle.read(), connection)
# Get stock data
def get_stock_data(file_name, date_diff):
# Get data from DB
sql_raw = open(file_name, 'r').read()
sql_format = sql_raw.format(date_diff = date_diff)
data = query_dataframe(sql_format)
# Parse data
data.Date = pd.to_datetime(data.Date)
data.AnnouncementDate = pd.to_datetime(data.AnnouncementDate)
data.ChangeDate = pd.to_datetime(data.ChangeDate)
return data
# Plot data
def plot_data(data,version=0):
for data_id, group in data.groupby('DataID'):
announcement_date = group.AnnouncementDate.values[0]
announcement_date_str = pd.to_datetime(announcement_date).strftime('%Y-%m-%d')
change_date = group.ChangeDate.values[0]
change_date_str = pd.to_datetime(change_date).strftime('%Y-%m-%d')
in_name = group.In_Name.values[0]
in_ticker = group.In_Ticker.values[0]
in_sec_id = group.In_SecurityID.values[0]
out_name = group.Out_Name.values[0]
out_ticker = group.Out_Ticker.values[0]
out_sec_id = group.Out_SecurityID.values[0]
is_tradable = group.IsTradable.values[0]
is_pair_tradable = group.IsPairTradable.values[0]
is_takeover = group.IsTakeover.values[0]
print('{} - In:{} - Out:{}\nAnnouncement:{} - Change:{}\nTakeover:{} - Tradable:{} - PairTradable:{}'.format(data_id,in_name,out_name,announcement_date_str,change_date_str,is_takeover,is_tradable,is_pair_tradable))
if(version == 0):
fig, (ax1, ax3) = plt.subplots(2, 1, figsize=(18,10))
ax2 = ax1.twinx()
ax4 = ax3.twinx()
ax1.plot(group.Date, group.In_ClosePrice_Adj,'b-o')
ax2.plot(group.Date, group.Out_ClosePrice_Adj,'r-o')
ax3.plot(group.Date, group.In_Volume, 'b.-')
ax4.plot(group.Date, group.Out_Volume, 'r.-')
ax1.legend([in_ticker],loc=2),ax2.legend([out_ticker],loc=1),
ax3.legend([in_ticker],loc=2),ax4.legend([out_ticker],loc=1)
ax1.axvline(x=announcement_date,color='g',ls='dashed')
ax1.axvline(x=change_date,color='k',ls='dashed')
ax3.axvline(x=announcement_date,color='g',ls='dashed')
ax3.axvline(x=change_date,color='k',ls='dashed')
ax1.grid(True), ax3.grid(True)
ax1.set_title('Stock Prices - In:Blue - Out:Red')
ax3.set_title('Volume - In:Blue - Out:Red')
plt.ylabel('Volume')
plt.show()
elif(version == 1):
fig, (ax1, ax3, ax5, ax7) = plt.subplots(4, 1, figsize=(18,20))
ax2 = ax1.twinx()
ax4 = ax3.twinx()
ax6 = ax5.twinx()
ax8 = ax7.twinx()
ax1.plot(group.Date, group.In_ClosePrice_Adj,'b-o')
ax2.plot(group.Date, group.Out_ClosePrice_Adj,'r-o')
ax3.plot(group.Date, group.In_Volume, 'b.-')
ax4.plot(group.Date, group.Out_Volume, 'r.-')
ax5.plot(group.Date, group.In_Call_OI, 'b.-')
ax5.plot(group.Date, group.In_Put_OI, 'b.--')
ax6.plot(group.Date, group.Out_Call_OI, 'r.-')
ax6.plot(group.Date, group.Out_Put_OI, 'r.--')
ax7.plot(group.Date, group.In_Call_Volume, 'b.-')
ax7.plot(group.Date, group.In_Put_Volume, 'b.--')
ax8.plot(group.Date, group.Out_Call_Volume, 'r.-')
ax8.plot(group.Date, group.Out_Put_Volume, 'r.--')
ax1.legend([in_ticker],loc=2),ax2.legend([out_ticker],loc=1)
ax3.legend([in_ticker],loc=2),ax4.legend([out_ticker],loc=1)
ax5.legend(['C','P'],loc=2),ax6.legend(['C','P'],loc=1)
ax7.legend(['C','P'],loc=2),ax8.legend(['C','P'],loc=1)
ax1.axvline(x=announcement_date,color='g',ls='dashed')
ax1.axvline(x=change_date,color='k',ls='dashed')
ax3.axvline(x=announcement_date,color='g',ls='dashed')
ax3.axvline(x=change_date,color='k',ls='dashed')
ax5.axvline(x=announcement_date,color='g',ls='dashed')
ax5.axvline(x=change_date,color='k',ls='dashed')
ax7.axvline(x=announcement_date,color='g',ls='dashed')
ax7.axvline(x=change_date,color='k',ls='dashed')
ax1.grid(True), ax3.grid(True), ax4.grid(True), ax7.grid(True)
ax1.set_title('Stock Prices - In:Blue - Out:Red')
ax3.set_title('Volume - In:Blue - Out:Red')
ax5.set_title('Option Open Interest - In:Blue - Out:Red')
ax7.set_title('Option Volume - In:Blue - Out:Red')
plt.show() | [
"[email protected]"
] | |
af2b7db466509c3e736b595cfc2613129def6abb | 9bd772823a5b116216b0a709da70072b2c461128 | /Locate_dir.py | 6c619ea0b7d743d0356e58850e227a8c8ee255b5 | [] | no_license | Hausdorff94/agglut-detect | 567f007f739547c6364353a97b451370e26fcec1 | 991acf0c0d37c578ff8dcf3764175b48dc2ba0bd | refs/heads/main | 2023-02-06T00:27:03.255863 | 2020-12-24T19:38:43 | 2020-12-24T19:38:43 | 302,480,846 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 162 | py | import os
def location(rel_path):
script_dir = os.path.dirname(__file__)
abs_file_path = os.path.join(script_dir, rel_path)
return abs_file_path | [
"[email protected]"
] | |
66d90542461e3006f9c9db1228402f8278e19773 | 6e187991aab67dee171a5b7af6c77af27d91faa5 | /app/bot/polling.py | 9dcd0909ec3f98c2266ab73d14ea428ba01d2cf6 | [] | no_license | Arwichok/tgbot | c341d2e81d820b354c4f7571a0fe0fd7285ae592 | 6b211b59c02d8b54ca8c5dcee4e0de418894c3d7 | refs/heads/main | 2023-03-31T16:41:34.589056 | 2021-04-04T10:35:48 | 2021-04-04T10:35:48 | 343,109,312 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,289 | py | import asyncio
import logging
from aiogram import Dispatcher
from aiohttp.web import Application
from asyncpg.pool import Pool
from ..models.base import init_pool
from ..utils import config
from .base import init_dp, on_shutdown, on_startup
def run_polling():
dp: Dispatcher = init_dp()
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(_on_startup_polling(dp))
loop.run_forever()
except (KeyboardInterrupt, SystemExit):
pass
finally:
loop.run_until_complete(_on_shutdown_polling(dp))
async def _on_startup_polling(dp: Dispatcher, pool: Pool = init_pool()):
if config.SKIP_UPDATES:
await dp.skip_updates()
await on_startup(dp, pool)
loop = asyncio.get_event_loop()
loop.create_task(dp.start_polling())
async def _on_shutdown_polling(dp: Dispatcher):
dp.stop_polling()
await on_shutdown(dp)
await dp.bot.session.close()
def setup_web_polling(app: Application):
dp: Dispatcher = init_dp()
app["dp"] = dp
logging.warning("\033[1;31mDO NOT USE FOR PRODUCTION\033[0m")
async def _up(_):
await _on_startup_polling(dp, app["pool"])
async def _down(_):
await _on_shutdown_polling(dp)
app.on_startup.append(_up)
app.on_shutdown.append(_down)
| [
"[email protected]"
] | |
c44a9a3ff8e7b0a66bb7b4bb9c1e87f73f4e95a0 | 57509c5ce9f893c33f80c3748eb6698a772922e3 | /main/models.py | d00e77a05e072fbc74154258bf940bf507f36f84 | [] | no_license | seriouswill/django-todo-tutorial | 37c437ba0c1b732aab19b5adfc119c14ff1a247f | 85bbf6f341d2f8195e6f537ad19f80ced23bc998 | refs/heads/main | 2023-05-04T06:41:09.085374 | 2021-05-23T05:47:36 | 2021-05-23T05:47:36 | 369,843,294 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 596 | py | from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class ToDoList(models.Model):
name = models.CharField(max_length=200)
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name="todolist", null=True)
def __str__(self):
return self.name
class Item(models.Model):
todolist = models.ForeignKey(ToDoList, on_delete=models.CASCADE)
text = models.CharField(max_length=300)
complete = models.BooleanField(help_text="Check to Continue")
def __str__(self):
return self.text
| [
"[email protected]"
] | |
1ca9c424e68b8e63a7782bfa946b23b61d59bc57 | 23f6dbacd9b98fdfd08a6f358b876d3d371fc8f6 | /rootfs/usr/lib/pymodules/python2.6/google/protobuf/reflection.py | d245c1d0ffaf580606c4ca02bdc02d99c6c9903c | [] | no_license | xinligg/trainmonitor | 07ed0fa99e54e2857b49ad3435546d13cc0eb17a | 938a8d8f56dc267fceeb65ef7b867f1cac343923 | refs/heads/master | 2021-09-24T15:52:43.195053 | 2018-10-11T07:12:25 | 2018-10-11T07:12:25 | 116,164,395 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 49 | py | /usr/share/pyshared/google/protobuf/reflection.py | [
"[email protected]"
] | |
2fc42d5172ce8f4ac54f3e9c75baff8dfbc1550d | ca3ad9cb9adea3b5e46c614cc151e7fc1de6d5a8 | /league-chongo.py | 821235ad0726080dec553f91ece8839c54cb43d4 | [
"MIT"
] | permissive | sedlak477/simple-league-helper | d8d7dd58a60edde7221d8bd88b5f9d1b09301e57 | b524c5daa807dc59fcfba95ee5849e0f84f6ebed | refs/heads/master | 2022-12-14T03:01:58.749244 | 2020-09-18T00:49:57 | 2020-09-18T00:49:57 | 296,472,702 | 0 | 0 | null | 2020-09-18T00:49:40 | 2020-09-18T00:28:52 | Python | UTF-8 | Python | false | false | 6,886 | py | import requests as r
import bs4 as bs
import socket
import time
import threading
import curses, curses.panel
SSL_CERT = "./riotgames.pem"
GAME_START_POLL_INTERVAL = 10 # In seconds
SKILL_OFFSET_LEFT = 5
SKILL_OFFSET_BOTTOM = 9
ITEM_OFFSET_MID = -6
ITEM_OFFSET_TOP = 5
NAME_OFFSET_TOP = 2
NAME_OFFSET_LEFT = 5
STATS_OFFSET_LEFT = 1
STATS_OFFSET_TOP = 2
CLIENT_ADDRESS = ("localhost", 2999)
def isGameRunning():
test_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
test_socket.settimeout(GAME_START_POLL_INTERVAL)
try:
test_socket.connect(CLIENT_ADDRESS)
test_socket.close()
return True
except:
return False
def waitForGame():
while not isGameRunning():
time.sleep(GAME_START_POLL_INTERVAL)
def parseItemsTable(table):
item_sets = []
rows = table.find_all("ul", class_="champion-stats__list")
for row in rows:
items = row.find_all("li", class_="champion-stats__list__item")
item_set = []
for item in items:
text = item["title"]
mini_soup = bs.BeautifulSoup(text, "lxml")
item_set.append(mini_soup.b.string)
item_sets.append(item_set)
categories = {
"starters": item_sets[:2],
"boots": item_sets[-3:],
"builds": item_sets[2:-3]
}
return categories
def parseAbilitiesTable(table):
abilities = []
abilities_raw = table.find_all("tbody")[1].ul.find_all("li", class_="champion-stats__list__item")
for ability in abilities_raw:
text = ability["title"]
key = ability.span.string
mini_soup = bs.BeautifulSoup(text, "lxml")
abilities.append(key + ": " + mini_soup.b.string)
return abilities
def getOPGGChampionData(champion):
champion = "".join(filter(lambda char: char.isalnum(), champion.lower()))
req = r.get(f"https://euw.op.gg/champion/{champion.lower()}")
soup = bs.BeautifulSoup(req.text, "lxml")
(raw_abilities_table, raw_items_table, _) = soup.find_all("table", class_="champion-overview__table")
abilities_table = parseAbilitiesTable(raw_abilities_table)
items_table = parseItemsTable(raw_items_table)
return {
"items": items_table,
"abilities": abilities_table
}
def getPlayerlist():
req = r.get(f"https://{CLIENT_ADDRESS[0]}:{CLIENT_ADDRESS[1]}/liveclientdata/playerlist", verify=SSL_CERT)
return req.json()
def getSummonerName():
req = r.get(f"https://{CLIENT_ADDRESS[0]}:{CLIENT_ADDRESS[1]}/liveclientdata/activeplayername", verify=SSL_CERT)
return req.json()
def getSummonerData():
activeSummoner = getSummonerName()
playerlist = getPlayerlist()
summoner_data = filter(lambda data: data["summonerName"] == activeSummoner, playerlist)
return next(summoner_data)
def getCurrentStats():
req = r.get(f"https://{CLIENT_ADDRESS[0]}:{CLIENT_ADDRESS[1]}/liveclientdata/activeplayer", verify=SSL_CERT)
return req.json()["championStats"]
def displayStats(win):
data = getCurrentStats() if isGameRunning() else {}
win.clear()
win.addstr(0, 0, f"AD: {data.get('attackDamage', 0):.0f}"); win.addstr(0, 12, f"AP: {data.get('abilityPower', 0):.0f}")
win.addstr(1, 0, f"Res: {data.get('armor', 0):.0f}"); win.addstr(1, 12, f"MR: {data.get('magicResist', 0):.0f}")
win.addstr(2, 0, f"AS: {data.get('attackSpeed', 0):.2f}"); win.addstr(2, 12, f"CD: {data.get('cooldownReduction', 0) * -100:.0f}%")
win.addstr(3, 0, f"Crt: {data.get('critChance', 0) * 100:.0f}%"); win.addstr(3, 12, f"MV: {data.get('moveSpeed', 0):.0f}")
win.refresh()
def run_app(stdscr, championName=None, summonerName=None):
curses.curs_set(0) # Set cursor invisible
height, width = stdscr.getmaxyx()
mid = width // 2
v_mid = height // 2
if championName is None:
stdscr.clear()
WAIT_MSG = "Waiting for game to start..."
stdscr.addstr(v_mid, mid - len(WAIT_MSG) // 2 - 1, WAIT_MSG)
stdscr.refresh()
waitForGame()
stdscr.clear()
LOADING_MSG = "Loading champion information..."
stdscr.addstr(v_mid, mid - len(LOADING_MSG) // 2 - 1, LOADING_MSG)
stdscr.refresh()
if championName is None:
data = getSummonerData()
summonerName = data["summonerName"]
championName = data["championName"]
champion_data = getOPGGChampionData(championName)
stdscr.clear()
# Print champion name
stdscr.addstr(NAME_OFFSET_TOP, NAME_OFFSET_LEFT, f"{summonerName} playing {championName}" if summonerName is not None else championName)
# Print skill order
stdscr.addstr(height - SKILL_OFFSET_BOTTOM, SKILL_OFFSET_LEFT, "Skill order:")
for i, skill in enumerate(champion_data["abilities"]):
stdscr.addstr(height - SKILL_OFFSET_BOTTOM + 2 + i, SKILL_OFFSET_LEFT + 2, f"{i + 1}. {skill}")
# Print starting items
stdscr.addstr(ITEM_OFFSET_TOP, mid + ITEM_OFFSET_MID, "Starting items:")
for i, items in enumerate(champion_data["items"]["starters"]):
stdscr.addstr(ITEM_OFFSET_TOP + 2 + i, mid + ITEM_OFFSET_MID + 2, f"{i + 1}. {', '.join(items)}")
# Print common builds
stdscr.addstr(ITEM_OFFSET_TOP + len(champion_data["items"]["starters"]) + 4, mid + ITEM_OFFSET_MID, "Core items:")
for i, items in enumerate(champion_data["items"]["builds"]):
stdscr.addstr(ITEM_OFFSET_TOP + len(champion_data["items"]["starters"]) + 6 + i, mid + ITEM_OFFSET_MID + 2, f"{i + 1}. {', '.join(items)}")
# Print boots
stdscr.addstr(ITEM_OFFSET_TOP + len(champion_data["items"]["starters"]) + len(champion_data["items"]["builds"]) + 8, mid + ITEM_OFFSET_MID, "Boots:")
for i, items in enumerate(champion_data["items"]["boots"]):
stdscr.addstr(ITEM_OFFSET_TOP + len(champion_data["items"]["starters"]) + len(champion_data["items"]["builds"]) + 10 + i, mid + ITEM_OFFSET_MID + 2, f"{i + 1}. {', '.join(items)}")
stdscr.addstr(height - 1, 0, "| q = Quit | r = Refresh |")
stdscr.refresh()
panel_height = height - (SKILL_OFFSET_BOTTOM + NAME_OFFSET_TOP + 1 + STATS_OFFSET_TOP)
panel_width = mid - (NAME_OFFSET_LEFT - ITEM_OFFSET_MID + STATS_OFFSET_LEFT)
win = curses.newwin(panel_height, panel_width, NAME_OFFSET_TOP + 1 + STATS_OFFSET_TOP, NAME_OFFSET_LEFT + STATS_OFFSET_LEFT)
panel = curses.panel.new_panel(win)
def statsUpdateLoop():
while True:
displayStats(win)
time.sleep(5)
stdscr.refresh()
statsUpdateThread = threading.Thread(target=statsUpdateLoop)
statsUpdateThread.setDaemon(True)
statsUpdateThread.start()
# Wait for command
quit = False
while not quit:
key = stdscr.getkey()
if key == "q":
quit = True
if __name__ == "__main__":
try:
curses.wrapper(run_app)
except KeyboardInterrupt:
pass
| [
"[email protected]"
] | |
66cc2177835892ea8f53377a2c5bf020db3c444c | 7dd2b42f836684ae1e4b7990502425b661d4bb99 | /tasks/lisp.py | 76b23108f9a1d308fadd7a936911dc20bf0adfb1 | [
"MIT"
] | permissive | mromanelli9/python-programming-challenges | 0766238d996a089ede88e6a5b1682699e1915594 | db90c1f99c755dd8d3c0adc3498c5ca8be08b16d | refs/heads/master | 2020-07-30T08:11:42.184000 | 2019-09-23T09:09:06 | 2019-09-23T09:09:06 | 210,149,285 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,041 | py | #!/usr/bin/env python
"""
Difficulty: 2
You are a developer that really loves Lisp (the programming language). Using only
notepad you have implemented a huge expression to try to find the faults and horizons in
a dataset. Given that you don't have a linter, you are not sure whether your
expression is valid. Therefore you decided to implement an algorithm to check if every
parenthesis has the appropriate closing bracket.
"""
def is_valid_lisp(expression: str) -> bool:
"""
Args:
expression: Your expression as a string. Will only contain lower case letters,
spaces and parenthesis.
Returns:
Whether the expression is valid. You can assume it is valid if every
parenthesis has the appropriate closing parenthesis.
Example:
>>> print(is_balanced("(("))
False
>>> print(is_balanced("(()))("))
False
>>> print(is_balanced("()"))
True
>>> print(is_balanced("(h(e)l(l)o)"))
True
"""
raise NotImplementedError()
| [
"[email protected]"
] | |
cba0cc03fa2e9dde3df2866d4b6ad37c06e676c6 | e47a20e443772c5c27ff3c3f043fae0d5df41443 | /lib/tank/__init__.py | 668af85a644d45f09e8f9972046f9ab881cbd1a2 | [
"MIT"
] | permissive | russcollier/Tank | 00f2e0d19c09c0fd57a63cefed8ccebcc2425b0d | 78e03337dac1ada21344ff30c6dc5a653e40a70e | refs/heads/master | 2020-04-09T02:11:26.464441 | 2015-01-06T18:10:13 | 2015-01-06T18:10:13 | 15,642,509 | 1 | 1 | null | 2015-01-06T18:10:13 | 2014-01-05T00:30:02 | Python | UTF-8 | Python | false | false | 243 | py | __author__ = "Russ Collier"
__copyright__ = "Copyright 2014, Russ Collier"
__credits__ = ["Russ Collier"]
__license__ = "MIT"
__version__ = "1.0.0"
__maintainer__ = "Russ Collier"
__email__ = "[email protected]"
__status__ = "Development" | [
"[email protected]"
] | |
0ee6643923af41b05e98008d433ca4e70ebe032b | 6f7be94706d838f5d14d10283a19baec8a73a5ef | /aReclambord_server/UserManagement/views.py | 8fee4956f5e003ed47da40589ff1a4057a0aac96 | [] | no_license | Ryandene/aReclambord_api | 27688d9e54c8cd69031415785898179dc6e4fa07 | 0c9fabb247fb5f7f2aa556e3551d3d15e704efe4 | refs/heads/master | 2023-04-24T18:51:58.297633 | 2021-05-18T20:04:18 | 2021-05-18T20:04:18 | 368,605,834 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,414 | py | import traceback
from urllib.error import HTTPError
from django.http import HttpResponse, JsonResponse
from django.shortcuts import render
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.generics import ListAPIView, RetrieveAPIView
from rest_framework.parsers import JSONParser
# from UserManagement.serializers import AReclambordUserSerializer
from UserManagement.serializers import AReclambordUserSerializer
from aReclambord_server.pyrebase_settings import db, auth
# Create your views here.
# class BookListView(ListAPIView):
# queryset = Book.objects.all()
# serializer_class = BookSerializer
#
#
# class BookDetailView(RetrieveAPIView):
# queryset = Book.objects.all()
# serializer_class = BookSerializer
@api_view(['POST'])
def signup_user(request):
user_data = JSONParser().parse(request)
user_serializer = AReclambordUserSerializer(data=user_data)
# print("asaa")
print(user_data)
print(user_data.get('email'))
try:
user = auth.create_user_with_email_and_password(user_data.get('email'), user_data.get('email'))
return JsonResponse({'message': 'User created successfully'}, status=status.HTTP_201_CREATED)
except Exception as e:
exception_message = traceback.format_exc()
return JsonResponse({'message': 'HttpError Exception occur.'}, status=status.HTTP_400_BAD_REQUEST)
| [
"[email protected]"
] | |
df31bc43482d4455849b1ed736da22028f209df3 | ef31d9a1ccdf236d98d50e4240e07f409ca75f99 | /ENV/root/backend/services/blogservices/oldfiles/blogdata.py | a7dd45f08eae770833afe673f707e50235bff1a4 | [] | no_license | AcePro-Engineer/JBBlogsv2 | 0ab371f91e8430a3b903b6064cf8882ec29bf9be | c08131f1722f2f5b5054cadf6f632d2c18623ecf | refs/heads/master | 2021-08-17T23:09:16.564022 | 2020-03-27T20:58:07 | 2020-03-27T20:58:07 | 239,378,695 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,179 | py | """
Purpose: This script is used to perform all data related
operations for all blog heading model objects.
Date Created: 1/21/2020
"""
"""
# For rendering the stack trace
import sys
import traceback
from blog.models.blog import Heading
from services.utils.errors.exceptions import UserError
#region Heading data logic
##### Data Retrieval #####
def get_heading(heading_key:int) -> Heading:
"""Function returns a heading model instance.
params: heading_key - heading model id.
"""
if heading_key > 0:
return Heading.blogheadings.single_blog_heading(heading_key)
return None
def get_lastest_headings_by_num_of_days(number_of_days: int):
"""Function returns a queryset of headings delimited by the number_of_days
parameter.
params: number_of_days - Number of days used to create the corresponding heading
date range value.
"""
try:
if number_of_days > 0:
return Heading.blogheadings.get_headings_by_number_of_days(number_of_days)
else:
raise InvalidNumberOfDaysError("Number of days must greater than 0", status_code=400)
except UserError as e:
traceback.print_exc()
raise
except Exception as e:
traceback.print_exc()
raise
return None
##### Data manipulation logic #####
def create_heading(new_heading):
"""Method creates a new Heading object in the database."""
try:
new_heading.validate_model()
Heading.blogheadings.create_heading(new_heading)
except UserError as e:
traceback.print_exc()
raise
except Exception as e:
traceback.print_exc()
raise
def update_heading(heading_key, new_heading_data):
"""Method updates an existing Heading record."""
try:
new_heading_data.validate_model()
old_heading_data = get_heading(heading_key)
Heading.blogheadings.edit_heading(old_heading_data, new_heading_data)
except UserError as e:
traceback.print_exc()
raise
except Exception as e:
traceback.print_exc()
raise
#endregion
"""
| [
"[email protected]"
] | |
926b425c438ac0de4f516bf1407dd1051fb1d164 | f1ee253ad14b75c2afcbe01ee09c762f96bed8d3 | /django_mxonline/mxonline/apps/course/migrations/0013_auto_20190602_1801.py | 04c93d0b8639b5c332ff7de97453fe348b10b6cc | [] | no_license | MengGuoJian/mxonline | 326d0059eb8e1c09ffdaf7d32a7ecf012e28f6a3 | 2835e24e41ea350cef9e164e6aa818aee1a78da4 | refs/heads/master | 2022-11-30T19:25:39.220884 | 2019-06-28T16:52:25 | 2019-06-28T16:52:25 | 194,286,291 | 1 | 1 | null | 2022-11-22T01:22:36 | 2019-06-28T14:23:00 | Python | UTF-8 | Python | false | false | 867 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2019-06-02 18:01
from __future__ import unicode_literals
import DjangoUeditor.models
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('course', '0012_course_is_banner'),
]
operations = [
migrations.CreateModel(
name='BannerCourse',
fields=[
],
options={
'verbose_name': '\u8f6e\u64ad\u8bfe\u7a0b',
'proxy': True,
'verbose_name_plural': '\u8f6e\u64ad\u8bfe\u7a0b',
},
bases=('course.course',),
),
migrations.AlterField(
model_name='course',
name='detail',
field=DjangoUeditor.models.UEditorField(default='', verbose_name='\u8bfe\u7a0b\u8be6\u60c5'),
),
]
| [
"[email protected]"
] | |
1dde3b27f442210a23ab27ff1d10cc45105d5427 | 1f27c4697ffd787eb709dc6560823e8d42196342 | /motorhome/migrations/0008_auto_20200306_0005.py | 6f741aa1b869a10fd130a09b4ea76b536b8c4393 | [
"MIT"
] | permissive | Forestriver/Motorhome | 965f387b32919efc57ba28bba292e9f444e8bb97 | 14e2998e06b3063897e4ff8d01eef6a129bec017 | refs/heads/master | 2022-11-14T20:12:03.147399 | 2020-07-09T20:30:34 | 2020-07-09T20:30:34 | 83,025,859 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 449 | py | # Generated by Django 2.2.5 on 2020-03-05 22:05
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('motorhome', '0007_auto_20200305_2316'),
]
operations = [
migrations.AlterModelOptions(
name='profile',
options={'verbose_name': 'Профіль користувача', 'verbose_name_plural': 'Профілі користувачів'},
),
]
| [
"[email protected]"
] | |
7d3f3defa080793d84614139710f26c5c878c1eb | 6f4749e180a582bb3685bb10e09cde3e0ecbcd48 | /D10/twiitest.py | caa21b5ba110f5485e37c037815030ae4027dbcd | [] | no_license | stanford713/Python0714 | 2315793e7ee4cae3d7da4cdcf18f863600aa447d | d8120ac875ec22fb335fe9cee3028bf397796e1c | refs/heads/master | 2022-12-07T14:46:04.513198 | 2020-08-27T13:52:55 | 2020-08-27T13:52:55 | 279,600,370 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 166 | py | import util.twii as t
#pe(本益比), dy(殖利率), pb(股價淨值比)
rows = t.analysys(10, 7, 1)
print(rows)
product=t.getproductbyname("鴻海")
print(product) | [
"[email protected]"
] | |
a5f17fa61ab1f934ab6d365ca3a0960cec60205d | b289cc5016d394ff3137e85cf924d9c361c4d223 | /test/教育部.py | 6954aad2ac827a7ebc695677b8638c5839c06038 | [] | no_license | thundernova/spider | 0b409fccbe21998bb4179794dc46c887e2203608 | ebf5198e5bffa4c1c6282a88245fdf41ec37b86c | refs/heads/master | 2022-12-28T16:28:48.416976 | 2019-09-26T09:43:08 | 2019-09-26T09:43:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,683 | py | #encoding=utf-8
import requests,re,bs4,time,sys,hashlib,uuid,time,json,base64,rsa,platform,datetime,os,urllib
UserAgent = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.92 Safari/537.36'
def get_time(format_date,time_pull = None):
if time_pull:
return int(time.mktime(time.strptime(time_pull, format_date))*1000)
def standard_work_list():
return_data = []
headers = {'User-Agent': UserAgent}
res = requests.get('http://www.moe.gov.cn/jyb_sy/sy_jyyw/', headers=headers)
res.raise_for_status()
reg_content = res.content.decode('utf8')
html_page = bs4.BeautifulSoup(reg_content, 'lxml')
infos = html_page.find(id='list').findAll('a')
for one_info in infos:
_one_info = str(one_info)
if 'mp.weixin.qq' in _one_info or 'fbh/live' in _one_info or 'jyb_xwfb/moe_2082' in _one_info:
continue
content_dir = one_info
if content_dir:
_datetime = 0
_url_tmp = content_dir['href'].replace('../../','http://www.moe.gov.cn/')
print({'url': _url_tmp, 'title': content_dir['title'].strip()})
return_data.append({'url': _url_tmp, 'title': content_dir['title'].strip(), 'datetime': _datetime})
return return_data
def standard_work_article(target_url):
return_data = []
headers = {'User-Agent': UserAgent}
res = requests.get(target_url, headers=headers)
res.raise_for_status()
reg_content = res.content.decode('UTF-8')
html_page = bs4.BeautifulSoup(reg_content, 'lxml')
infos = html_page.find(class_='TRS_Editor').findAll('p')
for one_info in infos:
content_dir = re.search('<p[\\s\\S]+/p>', str(one_info))
if content_dir:
need_content = one_info.text
else:
if isinstance(one_info, bs4.NavigableString):
need_content = one_info
else:
continue
if not need_content.strip():
continue
return_data.append(need_content.strip())
date = re.search(r"(\d{4}-\d{1,2}-\d{1,2})", reg_content)
datetime_dir = re.match('(?P<year>\d{4})-(?P<month>\d+?)-(?P<day>\d+)',
date[0])
tt_tmp = '%s-%s-%s' % (
datetime_dir['year'], datetime_dir['month'], datetime_dir['day'])
_datetime = 0
if datetime_dir:
_datetime = get_time('%Y-%m-%d', tt_tmp)
_title = html_page.find('title').text
return _datetime, '%s<replace title>%s' % (_title, '\n'.join(return_data))
def main():
list = standard_work_list()
for url in list:
print(standard_work_article(url['url']))
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
57104cfb0381d460954e9ddf45d70e32730b17d2 | 6eec2948c0907b5377de51e61014a48dff3d5ce7 | /PyGtk/contador.py | a9a40298b8a018d143d8be31b01268c8090d5efd | [] | no_license | clcneogeek325/Script_python | 4da937cb2caee93a2e0eb945e77ccac8e88ec4bc | 87607c97fa738b3e64aefbe0e8c4425724ecff73 | refs/heads/master | 2021-01-17T07:44:07.124077 | 2016-06-04T03:26:44 | 2016-06-04T03:26:44 | 15,943,668 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 915 | py | #!/usr/bin/env python
import threading
import gtk
import gobject
import time
def dibujar(numero, widget, event):
ctx = widget.window.cairo_create()
ctx.select_font_face('Arial Black')
ctx.set_font_size(60) # em-square height is 90 pixels
ctx.move_to(10, 90) # move to point (x, y) = (10, 90)
ctx.set_source_rgb(1.00, 0.83, 0.00) # yellow
ctx.show_text(str(numero))
ctx.stroke()
gobject.threads_init()
class Contador(threading.Thread):
def __init__(self, areaDibujo):
super(Contador, self).__init__()
self.l = areaDibujo
self.quit = False
def run(self):
numero = 0
while not self.quit:
numero += 1
gobject.idle_add(self.dibujar, numero)
time.sleel(1)
w = gtk.Window()
l = gtk.DrawingArea()
l.connect("expose-event", self.dibujar)
w.add(l)
w.show_all()
w.connect("destroy", lambda _: gtk.main_quit())
t = MyThread(l)
t.start()
gtk.main()
t.quit = True
| [
"[email protected]"
] | |
d3e24eb28a87675519bababad999493cc9de3c2b | d73990dd2fa86509b333d5aae551771491530626 | /store/migrations/versions/44df3a462448_.py | 433923981228d2ea6b51964f32501849a95f92d8 | [] | no_license | mistacker/store | 086369cdc9609ee2a5aaeec9e3ed4033884841f0 | 2dcf149ba34bea3a90c6ec87568eb6b7e67ad87f | refs/heads/master | 2021-09-26T17:56:45.844172 | 2018-11-01T05:18:22 | 2018-11-01T05:18:22 | 115,232,808 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 649 | py | """empty message
Revision ID: 44df3a462448
Revises: e46011c750b2
Create Date: 2017-12-27 20:49:39.179473
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '44df3a462448'
down_revision = 'e46011c750b2'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('goods', sa.Column('remarks', sa.Text(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('goods', 'remarks')
# ### end Alembic commands ###
| [
"[email protected]"
] | |
c356dfbad640c7b0906d378c3fa3c7e2509aa9ef | efe290586253f0991be142844fae693c60e576ee | /ScraperScrapyApp/settings.py | 230cc4c01073ca0a8e37d68f8f4a5ff30f8b5389 | [] | no_license | resolritter/webscraper-news-portal | 0051eafbc84824df78771af20c471ed60dde9e0b | 26b1ea5a2d53d28420dd94d8855b2a45281abf75 | refs/heads/master | 2020-06-01T18:32:46.140155 | 2020-05-26T17:37:13 | 2020-05-26T17:37:13 | 190,884,648 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 262 | py | # -*- coding: utf-8 -*-
BOT_NAME = "ScraperScrapyApp"
LOG_LEVEL = "ERROR"
SPIDER_MODULES = ["ScraperScrapyApp.spiders"]
NEWSPIDER_MODULE = "ScraperScrapyApp.spiders"
ROBOTSTXT_OBEY = True
ITEM_PIPELINES = {"ScraperScrapyApp.pipelines.NewsHeadingPipeline": 1}
| [
"[email protected]"
] | |
647891d38b8543143db3758aea43580058963313 | 5b0fe6a7d20fff6176626fd9967a567efea1ddaa | /src/atcoder/ABC175/B.py | c87a3bf159e520a76a14dc31d7e61085c62c500e | [] | no_license | yk-amarly-20/atcoder_python | 10eb6ad73c0b9596f5ac58cb11dfb9b790dcc221 | 367bee26321a378f43868d44cb5a5faf7d6ed841 | refs/heads/main | 2023-03-05T14:04:27.101750 | 2021-02-22T12:53:48 | 2021-02-22T12:53:48 | 305,570,140 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 459 | py | # 全探索
import sys
def main():
n = int(input())
l = list(map(int, input().split()))
ans = 0
if n <= 2:
print(0)
sys.exit()
for i in range(n - 2):
for j in range(i + 1, n - 1):
for k in range(j + 1, n):
if len(set([l[i], l[j], l[k]])) < 3:
continue
if abs(l[i] - l[j]) < l[k] < (l[i] + l[j]):
ans += 1
print(ans)
main()
| [
"[email protected]"
] | |
2ba259cedf0e77c24049fc5d8ff9d80c1862a0d8 | 4e7a7570381a344c7b156da9705e996ef1dccdbd | /CNN/textProcess3.py | 405559bf63ca978334d02c05303581fc059b3564 | [] | no_license | Hwlcoder/CNN_conflict_types | 7dadea6093a55abeae1938c6113c506d84d23568 | b049615de539fb3582f34cdda6d0fbf50722a30e | refs/heads/master | 2020-06-17T05:27:54.540574 | 2019-07-08T13:06:11 | 2019-07-08T13:06:11 | 195,811,301 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,881 | py | # coding: utf-8
# encoding: utf-8
import sys
import importlib
from cilin import CilinSimilarity
from jaccard import *
importlib.reload(sys)
# import jieba.analyse # 导入结巴jieba相关模块
import re
import numpy as np
from pylab import * # 添加这行和mpl.rcParams可以解决汉字显示的问题
# get_ipython().magic('matplotlib inline')
mpl.rcParams['font.sans-serif'] = ['SimHei'] # 汉字显示问题
import os
import tensorflow.contrib.keras as kr
wordsCount = 29
base_dir = 'data\\'
train_dir = os.path.join(base_dir, 'train\\')
test_dir = os.path.join(base_dir, 'test\\')
val_dir = os.path.join(base_dir, 'val\\')
# vocab_dir = os.path.join(base_dir, 'sougou.vocab.txt')
# CountResult = "D://Biyesheji/data/ConflictNormCSV/"
def textPreProcess(SourcePath1,SourcePath2):
# 每两行的句子为一对构造矩阵
# 1.统计每一行句子的单词数量,然后计算单词数量的分布,以便计算矩阵的长宽
# 统计txt文件的行数
file_raws = len(["" for line in open(SourcePath1, "r", encoding='utf8')])
print(file_raws)
text2 = open(SourcePath2, "r", encoding='utf8')
couple = file_raws//2
wordsAll = np.zeros((couple,3,3)) # 整个文档的矩阵
label_1 = np.zeros(couple) # 整个文档的句子对的标签(有多少句子对就有多少个标签)
labels = label_1.astype(np.str) # 类型转换
sen_length = [] # 统计每个句子的单词数量
lines = []
parse= []
for line in text2.readlines():
words3 = re.split(r' ', line)
words4 = list(filter(lambda
x: x != '\n' and x != ',' and x != '。' and x != '、' and x != ':' and x != ';' and x != '(' and x != ')',
words3))
parse.append(words4)
text2.close()
csv_num = 1
for raw_num in range(file_raws-1):
if raw_num % 2 == 1:
continue
else:
com_num = wordsCount # 确定两个句子比较的次数
# a = [[]for i in range(wordsAver)]
a = np.zeros(shape=(3, 3)) # 初始化一个矩阵
high = raw_num
low = raw_num + 1
for i in range(3):
#print(parse[high * 3 + i])
for j in range(3):
# print(parse[low * 3 + j])
a[j][i] = jaccard_similarity(parse[high * 3 + i], parse[low * 3 + j])
wordsAll[csv_num-1] = a
print(a)
csv_num = csv_num + 1
label = os.path.basename(SourcePath1)[:-8] #截取从头开始到倒数第八个字符之前
for i in range(csv_num-1):
labels[i] = label
return wordsAll, labels
# print(a)
# dataframe = pd.DataFrame(a)
# dataframe.to_csv(CountResult + '%d.csv' % (csv_num), index=False, sep=',', header=False)
def read_category():
"""读取分类目录,固定"""
categories =['Action','Consequence', 'Subject']#'noConflict',
cat_to_id = dict(zip(categories, range(len(categories)))) #{'Action':0, 'Consequence':1, 'Subject':3}
return categories, cat_to_id
def process_file(filepath1,filepath2,cat_to_id):
"""将文件转换为id表示"""
action_content, action_category = textPreProcess(filepath1 + 'Action_cut.txt',filepath2 + 'Action_parse.txt')
Consequence_content, Consequence_category = textPreProcess(filepath1 + 'Consequence_cut.txt',filepath2 + 'Consequence_parse.txt')
#noconflict_content, noconflict_category = textPreProcess(filepath1 + 'noConflict_cut.txt',filepath2 + 'noConflict_parse.txt')
# Object_content, Object_category = textPreProcess(filepath1 + 'Object_cut.txt')
Subject_content, Subject_category = textPreProcess(filepath1 + 'Subject_cut.txt',filepath2 + 'Subject_parse.txt')
# contents = action_content
contents = np.vstack((action_content, Consequence_content))#拼接矩阵
#contents = np.vstack((contents, noconflict_content))
# contents = np.vstack((contents, Object_content))
contents = np.vstack((contents, Subject_content))
labels = action_category
labels = np.concatenate((action_category, Consequence_category), axis=0) #拼接数组
#labels = np.concatenate((labels,noconflict_category), axis=0)
# labels = np.concatenate((labels, Object_category), axis=0)
labels = np.concatenate((labels, Subject_category), axis=0)
#print(contents)
label_id = []
for i in range(len(labels)):
label_id.append(cat_to_id[labels[i]]) #分配冲突类别
#print(label_id)
# 使用keras提供的pad_sequences来将文本pad为固定长度
# x_pad = kr.preprocessing.sequence.pad_sequences(contents, max_length)
# y_pad = kr.utils.to_categorical(label_id) # 将标签转换为one-hot表示
x_pad = contents
y_pad = np.array(label_id) # 将标签转换为one-hot表示
return x_pad, y_pad
| [
"[email protected]"
] | |
648f916042c2ff4a62515f640032698820af28cf | d063684dd03293eb0f980568af088d26ab087dbe | /debadmin/migrations/0051_brand_status.py | 656abd90973f38e52e9e8fef2821470ca7c157fe | [] | no_license | abhaysantra/debscientific | ce88e5ef44da8d6771c3652ed0ad02900ccd8ed2 | 88ec65616fd24052bbdbba8b00beba85493f5aea | refs/heads/master | 2020-11-26T22:09:33.820247 | 2019-12-20T07:58:43 | 2019-12-20T07:58:43 | 229,213,810 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | # Generated by Django 2.2.6 on 2019-10-31 10:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('debadmin', '0050_auto_20191031_1538'),
]
operations = [
migrations.AddField(
model_name='brand',
name='status',
field=models.CharField(default='active', max_length=8),
),
]
| [
"[email protected]"
] | |
5598871d4f926669cda8cfb346f18064cb98f3d7 | f668eb955f64588dc070771014def903f02bf4cd | /tgym/envs/__init__.py | 6b870f2bdc97a855fc17fa210218735a0ff81463 | [
"MIT"
] | permissive | hongxin001/Trading-Gym | 64f105927447c7f1587df4ed8fbb21bc16ce9427 | cb3fe76576728b8a655c21194787a017bb43324b | refs/heads/master | 2022-01-07T22:04:58.726310 | 2019-06-20T12:08:11 | 2019-06-20T12:08:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 44 | py | from tgym.envs.trading import SpreadTrading
| [
"[email protected]"
] | |
f1ee98a9ca25e8674d982c255e0bd4fdbaedc941 | 6396345ad7acbbcc371f3cf213b8cc2fc5fe9f28 | /thursday code.py | bbfd73cf5dbbe52b2c2d34d53590523e64221a3b | [] | no_license | EmbeddedSystemTeam/thursday-code | 84ecf0f8d8131db1260cd45b7aea97de9e69bd88 | 86bb4965b27fe1465a7b4b4709f49ec32a3cfaa4 | refs/heads/master | 2022-11-26T21:25:08.020801 | 2020-07-31T02:07:38 | 2020-07-31T02:07:38 | 283,918,349 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,030 | py | import board
import displayio
import digitalio
import time
from analogio import AnalogIn
import neopixel
from secrets import secrets
import adafruit_esp32spi.adafruit_esp32spi_socket as socket
from adafruit_esp32spi import adafruit_esp32spi
import adafruit_requests as requests
import busio
from digitalio import DigitalInOut
import adafruit_esp32spi.adafruit_esp32spi_socket as socket
esp32_cs = DigitalInOut(board.ESP_CS)
esp32_ready = DigitalInOut(board.ESP_BUSY)
esp32_reset = DigitalInOut(board.ESP_RESET)
spi = busio.SPI(board.SCK, board.MOSI, board.MISO)
esp = adafruit_esp32spi.ESP_SPIcontrol(spi, esp32_cs, esp32_ready, esp32_reset)
x = AnalogIn(board.A0)
y= AnalogIn(board.A1)
z = AnalogIn(board.A2)
requests.set_socket(socket, esp)
dot = neopixel.NeoPixel(board.NEOPIXEL, 1, brightness=0.2)
h = 0
hits = h
url = 'http://608dev.net/sandbox/mostec/helmet'
if esp.status == adafruit_esp32spi.WL_IDLE_STATUS:
print("ESP32 found and in idle mode")
print("Connecting to AP...")
while not esp.is_connected:
try:
esp.connect_AP(secrets["ssid"], secrets["password"])
except RuntimeError as e:
print("could not connect to AP, retrying: ", e)
continue
print("Connected to", str(esp.ssid, "utf-8"), "\tRSSI:", esp.rssi)
while True:
if x.value > 40000:
print ('WHAM')
time.sleep(1)
try:
r = requests.get("http://608dev.net/sandbox/mostec/helmet?player1")
r = eval(r.text)
h = int (r['player1'])
h += 1
n = requests.post(f"http://608dev.net/sandbox/mostec/helmet?player1={h}")
except Exception as e:
print(e)
if x.value < 40000:
dot[0] = [0,255,0]
if h == 2:
dot[0] = [100,255,0]
elif h == 3:
dot[0] = [200,200,0]
elif h == 4:
dot[0] = [255,100,0]
elif h >= 5:
dot[0] = [255,0,0]
time.sleep(2)
print ('pull player')
print((x.value, y.value, z.value))
print (h)
time.sleep(0.2)
| [
"[email protected]"
] | |
7ce6abde527baa843f7ca613eee3933ccd7373d4 | e06489334de897b3e242fb1c60eacf4de045479b | /server.py | 9c687136fd41149fc157a447253effb9b5f20ffc | [] | no_license | joelhaasnoot/otp-facade | f5780dd6146a26a4b4d15d9237102e296b645573 | 63736adccfbc32d43510892e75548b997345b9b8 | refs/heads/master | 2021-01-01T18:11:59.424735 | 2014-04-26T20:04:31 | 2014-04-26T20:04:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,839 | py | __author__ = 'Joel Haasnoot'
import urllib
import requests
import simplejson as json
try:
from config import *
except:
# ENDPOINT_URL = 'http://localhost:8080'
ENDPOINT_URL = 'http://api.navitia.io/v1/journeys'
COMMON_HEADERS = [('Content-Type', 'application/json'), ('Access-Control-Allow-Origin', '*'), ('Access-Control-Allow-Headers', 'Requested-With,Content-Type')]
def notfound(start_response):
start_response('404 File Not Found', COMMON_HEADERS + [('Content-length', '2')])
yield '[]'
def decode_url(input):
return urllib.unquote(input)
def reverse_loc(input):
lat, lon = decode_url(input).split(',')
return lon+';'+lat
def map_navitia_input(params):
return {'from': reverse_loc(params['from-latlng']),
'to': reverse_loc(params['to-latlng']),
'datetime': decode_url(params['date']).replace('-', '').replace(':', '')}
def map_navitia_output(response):
return response
def parse_url(url):
# https://1313.nl/rrrr?depart=true&from-latlng=51.985081%2C5.900028&to-latlng=51.948341%2C4.434145&date=2014-04-26T20%3A02%3A20&showIntermediateStops=true
return {entry.split('=')[0]: entry.split('=')[1] for entry in url.split('&')}
def application(environ, start_response):
if environ['PATH_INFO'][1:] != 'otp-facade':
return notfound(start_response)
parameters = parse_url(environ['QUERY_STRING'])
intermediary_params = map_navitia_input(parameters)
pieces = '&'.join([k+'='+v for k,v in intermediary_params.items()])
url = ENDPOINT_URL+'?'+pieces
response = requests.get(url)
if response.status_code == 200:
reply = json.dumps(map_navitia_output(response.json()), indent=4 * ' ')
else:
reply = ""
start_response('200 OK', COMMON_HEADERS + [('Content-length', str(len(reply)))])
return reply | [
"[email protected]"
] | |
a5e55b239f65726e46c60fa67da5cdc340e1fcea | 5c13f8bd3c0f3a81af97b9c80cc2b1ec52294ecb | /src/dcar/signature.py | c2f466a8aa49b4bf478190400eacfa2f3797d3a4 | [
"BSD-3-Clause"
] | permissive | andreas19/dcar | 6ea76300278371acd71715ed1f5c2257d069a42c | 31118ac5924b7cb01f8b7da5a84480824c046df2 | refs/heads/master | 2021-07-12T03:50:20.080507 | 2020-08-09T07:28:35 | 2020-08-09T07:28:35 | 189,934,105 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,519 | py | """D-Bus type signature."""
from collections import Counter
from . import marshal
from .const import MAX_SIGNATURE_LEN, MAX_NESTING_DEPTH
from .errors import SignatureError
__all__ = ['Signature']
class Signature:
"""A signature.
The signature string will be parsed into a list of complete types.
Each complete type is a tuple with the fist element being its
signature. The second element is ``None`` for all basic
(i.e. fixed and string-like) types, an empty list for variants,
and a list of complete types for arrays, structs, and dict entries.
A :class:`Signature` object can be used as an iterator which yields
tuples of complete types.
:param str sig: D-Bus type signature
:raises ~dcar.SignatureError: if there is a problem with the signature
"""
def __init__(self, sig):
if not isinstance(sig, str):
raise SignatureError('must be of type str, not %s' %
sig.__class__.__name__)
if len(sig) > MAX_SIGNATURE_LEN:
raise SignatureError('too long: %d > %d' %
(len(sig), MAX_SIGNATURE_LEN))
self._string = sig
counter = Counter()
self._data = _parse_signature(list(sig), counter)
if counter['r'] or counter['e']:
raise SignatureError('unclosed: struct %d, dict entry %d' %
(counter['r'], counter['e']))
def __len__(self):
return len(self._data)
def __iter__(self):
return iter(self._data)
def __str__(self):
return self._string
def __repr__(self):
return repr(self._data)
def _parse_signature(sig, counter, container=None):
if counter['a'] > MAX_NESTING_DEPTH or counter['r'] > MAX_NESTING_DEPTH:
raise SignatureError('depth: array %d, struct %d' %
(counter['a'], counter['r']))
ct_lst = [] # list of complete types
while sig:
token = sig.pop(0)
if token == '(':
counter['r'] += 1
lst = _parse_signature(sig, counter, 'r')
if not lst:
raise SignatureError('struct must have at least 1 element')
ct_lst.append(('r', lst))
elif token == '{':
if container != 'a':
raise SignatureError('dict entry outside array')
counter['e'] += 1
lst = _parse_signature(sig, counter, 'e')
if len(lst) != 2:
raise SignatureError('dict entry must have 2 elements')
if lst[0][1] is not None:
raise SignatureError('dict entry key must be basic type')
ct_lst.append(('e', lst))
elif token == 'a':
counter['a'] += 1
lst = _parse_signature(sig, counter, 'a')
if not lst:
raise SignatureError('array without element type')
ct_lst.append((token, lst))
elif token == 'v':
ct_lst.append((token, []))
elif token in marshal.type_codes:
ct_lst.append((token, None))
elif container == 'r' and token == ')':
counter['r'] -= 1
break
elif container == 'e' and token == '}':
counter['e'] -= 1
break
else:
raise SignatureError('unexpected token: %r (%r %r)' %
(token, sig, ct_lst))
if container == 'a':
counter['a'] -= 1
break
return ct_lst
| [
"[email protected]"
] | |
9579a177501274abf79b26be888dabac7b431170 | 04f8c7d9eb20745def8568fcbd2401392187ebf0 | /www/configuration/forms.py | e425010cfe27ac35416a512f5eaa6680fc9ff306 | [] | no_license | boogiiieee/Victoria | 5f01d8c01e92d78b756324ee8da2208ab9c63bc6 | c76996698bbbd88309ed35f47d19e09fec19eb94 | refs/heads/master | 2021-09-04T03:13:00.208173 | 2018-01-15T04:05:29 | 2018-01-15T04:05:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175 | py | # -*- coding: utf-8 -*-
from django import forms
from configuration.models import ConfigModel
class ConfigForm(forms.ModelForm):
class Meta:
model = ConfigModel | [
"[email protected]"
] | |
25933d92e57b92068d42fc92f4e2060cf08492ba | 1e9c9f2a9639db7cdb032aae69cb4d99aef1d3a5 | /others/tech/rabbitMQ/python/2_workQueues/workQueues.py | ada53e124a282e94ebbcddc7f9b78bc4bee25945 | [
"MIT"
] | permissive | sagarnikam123/learnNPractice | f0da3f8acf653e56c591353ab342765a6831698c | 1b3b0cb2cff2f478006626a4c37a99102acbb628 | refs/heads/master | 2023-02-04T11:21:18.211654 | 2023-01-24T14:47:52 | 2023-01-24T14:47:52 | 61,184,927 | 2 | 1 | MIT | 2022-03-06T11:07:18 | 2016-06-15T06:57:19 | Python | UTF-8 | Python | false | false | 288 | py | # workQueues.py
# two consumers - C1 & C2
python3 worker.py
python3 worker.py
python3 new_task.py
python3 new_task.py First message.
python3 new_task.py Second message..
python3 new_task.py Third message...
python3 new_task.py Fourth message....
python3 new_task.py Fifth message.....
| [
"[email protected]"
] | |
d93d1e213362cbec660a951347a7765064435342 | e9cb1359be110013a47a8c5bab4f7ecd86e0e092 | /actions.py | 4fae9a36c742665de32bc46b83f0f87bd1865c75 | [] | no_license | combateer3/RFID-Multitool | 152156d4d96e77270a5897f1f3fd5a8069f2750b | 44041b419ba8c8fc9590914ae861cab195d6e40b | refs/heads/master | 2023-02-12T22:20:43.970933 | 2021-01-09T05:00:24 | 2021-01-09T05:00:24 | 328,073,746 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 445 | py | import mail
import socket
import json
def test_message():
print("This is a test!")
def rpi_ip_addr():
# fetch local IP address
hostname = socket.gethostname()
ip_addr = socket.gethostbyname(hostname + '.local')
with open('creds.json') as f:
creds = json.load(f)
to = creds['cell_email']
# create and send message
msg = mail.create_email('RPi IP Address', ip_addr)
mail.send_email(to, msg)
| [
"[email protected]"
] | |
49a79e42121b14197d31ef18ccd1ba06b1aecad2 | ebd6f68d47e192da7f81c528312358cfe8052c8d | /swig/Examples/python/multimap/runme.py | ad693b73a3ea62fa4bf798b1f8e6f8aad0c57500 | [
"LicenseRef-scancode-swig",
"GPL-3.0-or-later",
"LicenseRef-scancode-unknown-license-reference",
"GPL-3.0-only",
"Apache-2.0"
] | permissive | inishchith/DeepSpeech | 965ad34d69eb4d150ddf996d30d02a1b29c97d25 | dcb7c716bc794d7690d96ed40179ed1996968a41 | refs/heads/master | 2021-01-16T16:16:05.282278 | 2020-05-19T08:00:33 | 2020-05-19T08:00:33 | 243,180,319 | 1 | 0 | Apache-2.0 | 2020-02-26T05:54:51 | 2020-02-26T05:54:50 | null | UTF-8 | Python | false | false | 358 | py | # file: runme.py
import example
# Call our gcd() function
x = 42
y = 105
g = example.gcd(x, y)
print "The gcd of %d and %d is %d" % (x, y, g)
# Call the gcdmain() function
example.gcdmain(["gcdmain", "42", "105"])
# Call the count function
print example.count("Hello World", "l")
# Call the capitalize function
print example.capitalize("hello world")
| [
"[email protected]"
] | |
eff707bca66288abdae3277852cc60db7fd2c668 | f08c86d613867e8718661f908832ef37a223e661 | /skynetMaster.py | bbaa99ab7ea575403ac9ef4f811f2d3eb10d8bb4 | [] | no_license | lubintan/TAELIUM-simAndTest | b0ed04fdc0cd22d6a04cfa0632da18fae813a0b4 | 779108e4c4bf41da8abb7863300604d0d5d740f8 | refs/heads/master | 2020-12-20T02:17:17.083773 | 2020-01-27T06:07:18 | 2020-01-27T06:07:18 | 235,929,833 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,342 | py | import os, time, random, sys, subprocess
import login_forge, login_unforge, login_sendTx
# Acct ID, password, forging
Accounts = [['VXVQ-APCC-UHDD-7A3ZX','password1', False,'Alpha-3'],
['PGDQ-XR6X-2EAS-9H2KM','password2', False,'Alpha-3'],
['D6PA-3TCG-AXGA-BVHW4','password3', False,'Alpha-4'],
['Y4LF-K6UK-AT74-CT6JS','password4', False,'Alpha-4'],
['J4YD-K25L-6AAS-E6Y9C','password5', False,'Alpha-3'],
['D8XY-HTU7-QWM5-2YHMZ','password6', False,'Alpha-3'],
['PQMA-6CS2-XU9J-AWZY7','password7', False,'Alpha-4'],
['N7LT-FNZN-A9QJ-GPXMA','password8', False,'Alpha-4'],
['KXHS-8A7R-5FF5-5PFS6','password9', False,'Alpha-3'],
['DLMP-5UTD-LDQQ-8929X','password10', False,'Alpha-3']]
Nodes = {'Alpha-3': 'http://54.255.130.192:43250/',
'Alpha-4': 'http://52.77.209.57:43250/',
'Alpha-5': 'http://52.221.230.252:43250/',
'Alpha-6': 'http://54.255.200.180:43250/'}
def forge(account):
try:
# os.system("python login_forge.py")
# os.system(node)
# os.system(account[1])
inputString = Nodes[account[3]] + ',' + account[1]
os.system(('echo "%s" | python login_forge.py') %(inputString))
account[2] = True
finally:
pass
def unforge(account):
try:
# os.system("python login_unforge.py")
# os.system(node)
# os.system(account[1])
inputString = Nodes[account[3]] + ',' + account[1]
os.system(('echo "%s" | python login_unforge.py') % (inputString))
account[2] = False
finally:
pass
def sendTx(account, recipient):
try:
# os.system("python login_sendTx.py")
# os.system(node)
# os.system(account[1])
# os.system(recipient[0])
inputString = Nodes[account[3]] + ',' + account[1] + ',' + recipient[0]
os.system(('echo "%s" | python login_sendTx.py') % (inputString))
finally:
pass
if __name__ == '__main__':
time.sleep(int(raw_input("time in s to wait for everything to load"))) #for everything to load
# forever forgers
forge(Accounts[9])
forge(Accounts[8])
# setup
forge(Accounts[0])
forge(Accounts[1])
forge(Accounts[2])
forge(Accounts[3])
forge(Accounts[4])
forge(Accounts[5])
forge(Accounts[6])
forge(Accounts[7])
while(True):
time.sleep(5)
action = random.randint(0,10)
if action <= 3:
acctNum = random.randint(0, 7)
forge(Accounts[acctNum])
print 'Account ', Accounts[acctNum][0], 'started forging on Node ', Accounts[acctNum][3]
elif ((action ==4) or (action ==5)) :
acctNum = random.randint(0, 7)
unforge(Accounts[acctNum])
print 'Account ', Accounts[acctNum][0], 'stopped forging on Node ', Accounts[acctNum][3]
elif ((action >=6) and (action <=8)):
acctNum = random.randint(0, 9)
recipient = random.randint(0, 9)
while (acctNum == recipient):
recipient = random.randint(0, 9)
sendTx(Accounts[acctNum], Accounts[recipient])
print 'Account ', Accounts[acctNum][0], 'sending to account ', Accounts[recipient][0], 'on node ', Accounts[acctNum][3]
else:
print "Chilling..."
| [
"[email protected]"
] | |
1550be623ed7d429775ac1003c7bb9d02429934d | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/p3BR/R1/benchmark/startPyquil359.py | 32cfc081fc66d60b254599f48e81e60a9fa712a0 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,477 | py | # qubit number=2
# total number=64
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(0) # number=1
prog += RX(-0.09738937226128368,2) # number=2
prog += H(1) # number=33
prog += Y(2) # number=56
prog += CZ(2,1) # number=34
prog += H(1) # number=35
prog += H(1) # number=3
prog += H(0) # number=45
prog += CNOT(2,1) # number=60
prog += CZ(1,0) # number=46
prog += H(0) # number=47
prog += Y(1) # number=15
prog += H(0) # number=61
prog += CZ(1,0) # number=62
prog += H(0) # number=63
prog += H(1) # number=19
prog += CZ(0,1) # number=20
prog += RX(-0.6000441968356504,1) # number=28
prog += H(1) # number=21
prog += H(1) # number=30
prog += CZ(0,1) # number=31
prog += H(1) # number=32
prog += H(1) # number=57
prog += CZ(0,1) # number=58
prog += H(1) # number=59
prog += CNOT(0,1) # number=51
prog += X(1) # number=52
prog += CNOT(0,1) # number=53
prog += CNOT(0,1) # number=50
prog += H(2) # number=29
prog += H(1) # number=36
prog += CZ(0,1) # number=37
prog += Y(2) # number=44
prog += H(1) # number=38
prog += Z(1) # number=55
prog += CNOT(0,1) # number=18
prog += Z(1) # number=11
prog += RX(-1.1780972450961724,2) # number=54
prog += H(1) # number=42
prog += H(0) # number=39
prog += CZ(1,0) # number=40
prog += H(0) # number=41
prog += CNOT(2,1) # number=26
prog += Y(1) # number=14
prog += CNOT(1,0) # number=5
prog += X(1) # number=6
prog += Z(1) # number=8
prog += X(1) # number=7
prog += H(2) # number=43
prog += RX(-2.42845112122491,1) # number=25
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('1q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil359.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| [
"[email protected]"
] | |
8fc2af3ecc3fecf76fec30d1f6ea565083605325 | 160827c128f005bd287b36d4dc2aee1f76989d6d | /30 Days of Code/Day 5 Loops.py | 15335467c37076e20c609ea7dd6787d5dfef225b | [] | no_license | shubhamvm/HackerRack-Solutions-SHUBHAM-VERMA | 2db60d9476c9100f18ce3ec76aacbb213a648801 | 9a929fb0ed08e76caf8ced5dc4ac857f7cca1834 | refs/heads/master | 2020-07-17T17:21:54.142694 | 2020-05-06T14:14:57 | 2020-05-06T14:14:57 | 206,061,899 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 181 | py | #!/bin/python3
import math
import os
import random
import re
import sys
if __name__ == '__main__':
n = int(input())
for _ in range(1,11):
print(n,"x",_,"=",n*_)
| [
"[email protected]"
] | |
99d434adce38a39b243cda51066ee04fa7dad5ba | 7e18c87961733e5187ab8be4340fa2080b5ec165 | /mimeapp/wsgi.py | 0c6f1bcb1c9f5b3e4aa06925cb1b2025e4333a6d | [] | no_license | KrishnKrPatel/django_mime_app | a2e1c8a37168edee1ae8a417fd8eae6fcdfbc957 | 1c2c5ad20178f37ed5702e17e618174b3e4f31ff | refs/heads/master | 2020-05-04T18:31:11.312030 | 2019-04-03T19:31:52 | 2019-04-03T19:31:52 | 179,356,599 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | """
WSGI config for mimeapp project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mimeapp.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
f93eb80e22199a5d8654e271242f09651dd996b6 | e08df66b27c5a2330d1fc981af5cf45c9b5fab03 | /Modules.py | d39aeffa00e1b7d9f54dd09dc9b7e70bdfe35602 | [] | no_license | SHS666638/watch-telegram | b7da11c1585bd713a43816ff2dfde32455df0354 | 0d7e814156614b4e03dd2af8b4135c5e4f6b4a72 | refs/heads/main | 2023-08-17T08:02:36.013310 | 2021-09-27T06:56:15 | 2021-09-27T06:56:15 | 410,766,477 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 72 | py | Modules = ["Add","Change","Remove"]
Issue = ["IssueAdd","RemoveIssue"]
| [
"[email protected]"
] | |
8cb2db9e4fbb191140ead5935b7764d09bc25c1f | d1c12958b3fcaba9b4d018ade070c601904fb258 | /views.py | 25f1b1dbaec61bbad6f35b0d74f4b3d3f9215e8a | [] | no_license | cairesvs/techexhibition | ad0c66e6eddc6ecbda3a0f303ad07c9cfbb59123 | 11c2a03f439c7fdaecd4d0b9b2c56400b91605f1 | refs/heads/master | 2020-04-27T01:49:49.733427 | 2010-08-26T22:44:19 | 2010-08-26T22:44:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 366 | py | from django.http import HttpResponse
import urllib,logging, random,re
from django.shortcuts import render_to_response
def agendatechhtml(request):
sock = urllib.urlopen("http://www.agendatech.com.br/")
agenda_html = sock.read()
sock.close()
return render_to_response("agenda.html", locals())
def index(request):
return render_to_response("canvas.html")
| [
"[email protected]"
] | |
02913e1f7cabcbcbfec2f0f02283ffde1afb094a | fc5ae3a0fc4380c7d04036c2bc320de132f7c06c | /Project1994/ScriptSearchSpotifyByYearDataFrame.py | 028148d3f1ecbc43d1fd3d442b22bfcc765eebaf | [] | no_license | noddables/Project1994 | e4588ba8ebb1ea867e8ccdf213987a2cf763f6bf | 841b292e89f3802d5772cfcc2e854057d46d8652 | refs/heads/main | 2023-07-10T02:20:19.071765 | 2021-08-19T21:45:22 | 2021-08-19T21:45:22 | 284,550,312 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,539 | py | '''
Created on Apr 17, 2021
@author: Charlie
'''
'''imports'''
import FunctionsSpotify as FS
import FunctionsPython
import Variables
import sys
import csv
from pandas import DataFrame
from pathlib import Path
import spotipy
from pandas import concat as Concat
'''variables'''
import Variables
consoleprint = sys.stdout
TrackDataList = []
AlreadyThereTrackIds = []
SearchSubmitCount = 0
#SearchSubmitLimit = 5
FileName = "SpotifyResultsWriteFile.csv"
FilePath = FileName
InfoFile = "TrackInfo.csv"
FeaturesFile = "TrackFeatures.csv"
WriteToFile = input(Variables.WriteToFilePrompt)
CleanWriteToFile = FunctionsPython.CleanString(WriteToFile)
PrintHeaderPrompt = "Print headers (first time printing)?"
PrintHeader = input(PrintHeaderPrompt)
CleanPrintHeader = FunctionsPython.CleanString(PrintHeader)
##
FileDf = DataFrame(columns=Variables.FileHeaders)
Token = FS.GetToken()
Headers = FS.GetHeaders(Token)
##
SearchYear = "2019"
Offset = 750
OffsetStr = str(Offset)
Limit = "50"
SearchURL = FS.GetAlbumsByYearSearchUrl(SearchYear,OffsetStr,Limit)
# print (SearchURL)
''''''
'''procedures'''
if CleanWriteToFile == "Y":
print("OK, let me check for already written Track IDs")
if Path(FilePath).is_file():
with open(FilePath,'r') as openReadFile:
reader = csv.reader(openReadFile)
for line in reader:
TrackId = line[0]
if TrackId not in AlreadyThereTrackIds:
AlreadyThereTrackIds.append(TrackId)
openReadFile.close()
sys.stdout = consoleprint
else:
pass
print("OK, done with that\nI'll start gathering and printing results now")
###
while SearchURL:
FeaturesDf = DataFrame()
InfoDf = DataFrame()
featuresList = []
infoList = []
Results = FS.SubmitSearchRequest(SearchURL, Headers)
for Result in Results:
Next = Results["albums"]["next"]
print(SearchURL)
for Item in Results["albums"]["items"]:
AlbumId = Item["id"]
AlbumName = Item['name']
ArtistList = Item['artists']
ArtistsDict = ArtistList[0]
ArtistName = ArtistsDict["name"]
ArtistID = ArtistsDict["id"]
TrackIds = FS.GetAlbumTrackIDs(AlbumId)
RequestList = [track for track in TrackIds if track not in AlreadyThereTrackIds]
RequestStr = "%2C".join(RequestList)
InfoTracks = FS.GetSeveralTracksInfoResults(RequestStr)
FeatureTracks = FS.GetSeveralTracksFeaturesResults(RequestStr)
for features in FeatureTracks["audio_features"]:
featuresList.append([
features['id']
,features['acousticness']
,features['analysis_url']
,features['danceability']
,features['duration_ms']
,features['energy']
,features['instrumentalness']
,features['key']
,features['liveness']
,features['loudness']
,features['mode']
,features['speechiness']
,features['tempo']
,features['time_signature']
,features['track_href']
,features["type"]
,features["uri"]
,features['valence']
])
for info in InfoTracks["tracks"]:
infoList.append([
info["id"]
,info["name"]
,info["popularity"]
,AlbumId
,AlbumName
,ArtistID
,ArtistName
,SearchYear
])
# print(infoList)
FeaturesDf = DataFrame(featuresList,columns=Variables.FeaturesDfColumns)
FeaturesDf = FeaturesDf.set_index("TrackId")
InfoDf = DataFrame(infoList,columns=Variables.InfoDfColumns)
InfoDf = InfoDf.set_index("TrackId")
##START failing to join two dataframes:
FullDf = Concat([InfoDf,FeaturesDf], axis=1)
#print(FullDf)
#WriteDf = FullDf[Variables.FileHeaders]
# InfoDf = InfoDf.append(FeaturesDf)
# print(InfoDf)
if CleanWriteToFile == "Y":
with open(FilePath,'a') as openWriteFile:
FullDf.to_csv(openWriteFile, header = True if CleanPrintHeader == "Y" and SearchSubmitCount == 0 else False, index=True, line_terminator='\n')
##END
##START workaround: write one file for info and features
if CleanWriteToFile == "Y":
with open(InfoFile,'a') as openInfoFile:
InfoDf.to_csv(openInfoFile, header = True if CleanPrintHeader == "Y" and SearchSubmitCount == 0 else False, index=True, line_terminator='\n')
with open(FeaturesFile,"a") as openFeaturesFile:
FeaturesDf.to_csv(openFeaturesFile, header = True if CleanPrintHeader == "Y" and SearchSubmitCount == 0 else False, index=True, line_terminator='\n')
else:
sys.stdout = consoleprint
print(FeaturesDf.to_string(index = False))
AlreadyThereTrackIds += RequestList
SearchSubmitCount += 1
SearchURL = Next
# ##
sys.stdout = consoleprint
print("Yr all set for now!")
| [
"[email protected]"
] | |
145abdca9a6ac33294f07bbb01b391fd78802833 | 3bbc51ac3f124469eaa3a07c8a145ab530442152 | /trax/optimizers/rms_prop.py | d512e613edeb014da894b506d6219d6af2b8beed | [
"Apache-2.0"
] | permissive | mahmoudyusof/trax | 47d2436c620a20d20c3369bd1c319c873d5f8911 | e0b116682b5b2be724e825dbb922400e72ebaedf | refs/heads/master | 2022-11-06T06:10:47.659995 | 2020-06-26T14:47:53 | 2020-06-26T14:47:53 | 274,638,181 | 1 | 0 | Apache-2.0 | 2020-06-24T10:14:01 | 2020-06-24T10:14:00 | null | UTF-8 | Python | false | false | 1,679 | py | # coding=utf-8
# Copyright 2020 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""RMSProp optimizer class."""
from trax.math import numpy as np
from trax.optimizers import base as opt_base
class RMSProp(opt_base.Optimizer):
"""RMSProp optimizer.
Uses optimizer weights ("slots") to maintain a root-mean-square exponentially
decaying average of gradients from prior training batches.
"""
def __init__(self, learning_rate, gamma=0.9,
eps=1e-8, clip_grad_norm=None): # pylint: disable=useless-super-delegation
super(RMSProp, self).__init__(
learning_rate=learning_rate,
gamma=gamma,
eps=eps,
clip_grad_norm=clip_grad_norm
)
def init(self, weights):
return np.ones_like(weights)
def update(self, step, grads, weights, avg_sq_grad, opt_params):
del step
lr = opt_params['learning_rate']
gamma = opt_params['gamma']
eps = opt_params['eps']
avg_sq_grad = avg_sq_grad * gamma + grads**2 * (1. - gamma)
weights = weights - (lr * grads /
(np.sqrt(avg_sq_grad) + eps)).astype(weights.dtype)
return weights, avg_sq_grad
| [
"[email protected]"
] | |
66951d3e1a276763e728e318571272229b4255b5 | c05c00f5afb3c75be1bc6b4b98cba5bbe790f009 | /jurassic/formula.py | 8316a3524132b01767f8119ccdf50dbe9b561eb4 | [] | no_license | johncowie/fossil-free-pensions | d9815fe9fe18ea27544791dabaa3487c7120ec37 | 12bf140d028e71f00dfd3943bc9733156dae2ddd | refs/heads/master | 2021-06-03T13:48:49.363247 | 2020-11-11T20:24:45 | 2020-11-11T20:24:45 | 133,798,435 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,793 | py | def fossil_amount(row, verification_column, amount_column):
rowStr = str(row)
ver_cell = verification_column + rowStr
am_cell = amount_column + rowStr
return '=IF({0}="0",0,{1})'.format(ver_cell, am_cell)
def verification(row, oil_col, coal_col, name_col):
row_str = str(row)
oil_cell = oil_col+row_str
coal_cell = coal_col+row_str
name_cell = name_col+row_str
return '=IF(OR(NOT({0}="0"),NOT({1}="0")),{2},{0})'.format(oil_cell, coal_cell, name_cell)
def verification_1col(row, col, name_col):
row_str = str(row)
col_cell = col+row_str
name_cell = col+row_str
return '=IF(NOT({0}="0"), {1}, {0})'.format(col_cell, name_cell)
def get_pool_name(pooled_row):
return pooled_row.get('Name')
def is_pooled(pooled_row):
v = pooled_row.get('Is Pooled? (Y/N)', '').strip().lower()
return v == 'y' or v == 'yes'
def pooled_match(cell_range, matches):
names_to_compare = list(map(get_pool_name, filter(is_pooled, matches)))
f = lambda name:'{0}="{1}", "yes"'.format(cell_range, name)
if len(names_to_compare) > 0:
s = ', '.join(map(f, names_to_compare))
return '=ARRAYFORMULA(IFS({0}, TRUE, "no"))'.format(s)
else:
return "no"
def pattern_match(cell_id, patterns):
f = lambda pair:'REGEXMATCH({0}, "{1}"), "{2}"'.format(cell_id, pair['pattern'], pair['name'])
s = ', '.join(map(f, patterns))
return '=ARRAYFORMULA(IFS({0}, TRUE, "0"))'.format(s)
def largest_value(worksheet_name, column, rank):
return "=LARGE('{0}'!{1}:{1},{2})".format(worksheet_name, column, rank)
def largest_value_name(worksheet_name, val_column, name_column, rank):
return "=INDEX('{0}'!{2}:{2},MATCH(LARGE('{0}'!{1}:{1},{3}),'{0}'!{1}:{1},0))".format(worksheet_name, val_column, name_column, rank)
| [
"[email protected]"
] | |
b610280d2b747c37d31f155cbaae3e6f77306465 | 7848107497ddbc6fcbe0310b7232ad396aa0053a | /indexer0508.py | d7b9cce17047ee35b4edc8791ea502eba8d2a7aa | [] | no_license | aswaterspy/1004_draft | ff8babe814df3a4b5d81dca6f8933dfc57141060 | 429f1cf7b5eab13608991e8c91d2a9d34525a06c | refs/heads/master | 2020-05-19T09:52:58.569128 | 2019-05-18T10:05:07 | 2019-05-18T10:05:07 | 184,958,994 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,392 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# We need sys to get the command line arguments
import sys
# And pyspark.sql to get the spark session
from pyspark.sql import SparkSession
from pyspark.ml.feature import StringIndexer
from pyspark.ml import Pipeline
# TODO: you may need to add imports here
def main(spark, data_file):
'''Main routine for supervised evaluation
Parameters
----------
spark : SparkSession object
model_file : string, path to store the serialized model file
data_file : string, path to the parquet file to load
'''
###
# TODO: YOUR CODE GOES HERE
###
df = spark.read.parquet(data_file)
#df = df.sample(0.0001)
indexer_id = StringIndexer(inputCol="user_id", outputCol="userindex").setHandleInvalid("skip")
df = indexer_id.fit(df).transform(df)
indexer_item = StringIndexer(inputCol="track_id", outputCol="itemindex").setHandleInvalid("skip")
df = indexer_item.fit(df).transform(df)
df.select("userindex", "count", "itemindex").write.parquet("training_data.parquet")
# Only enter this block if we're in main
if __name__ == "__main__":
# Create the spark session object
spark = SparkSession.builder.appName('string_conversion').getOrCreate()
# And the location to store the trained model
data_file = sys.argv[1]
# Call our main routine
main(spark, data_file) | [
"[email protected]"
] | |
03edf0f426947684c73c567134cc308f9ce45c98 | f5eccc037a27393e8e6089ab6874a747ae7bac8d | /sub4sub.py | f040b4482e37d32a7989bf43ab7187754c9f51d4 | [] | no_license | etz/reddit-py | 0b1e0ac80789048344864aaf6d2c24edeae3d430 | 564c4cf147c061af4776245b19c474301433393a | refs/heads/master | 2020-04-27T10:49:11.571676 | 2019-03-07T04:19:55 | 2019-03-07T04:19:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 737 | py | #!/usr/bin/python
import praw
import time
reddit = praw.Reddit('bot1')
title = 'Sub4Sub, Playlist4Playlist, Comment4Comment'
post = '#Subscribe to my channel here: [My Channel](http://href.li/?https://bit.ly/2XrbqQd). \n\n Go to any video, leave a comment, and then Subscribe. \n\nI also am willing to watch your playlists in exchange for mine, comment here with your playlist URL after starting mine.\n\n I\'ll comment on one of your videos as soon as I see it!'
while (1>0):
reddit.subreddit('sub4sub').submit(title, selftext=post)
time.sleep(5)
for submission in reddit.subreddit('sub4sub').new(limit=1):
thread = reddit.submission(id=submission.id)
time.sleep(10800)
thread.delete()
time.sleep(10)
| [
"[email protected]"
] | |
95ff37b4531a2c86139c939329b78fe467b45031 | 0773f0d4cb4b6e48ad93cc63612324bf34aacedf | /misc/set-debate-schedule.py | d74361d3cb5f39c1ba8c030a59dd99f566f34f0b | [
"LicenseRef-scancode-other-permissive",
"MIT"
] | permissive | andrewreece/gauging-debate | deee32e04dab79a01b6d7336710f8132f154d507 | adcbf6f6e40b8eafa92d81f1adcb68864cf0a226 | refs/heads/master | 2020-04-01T13:54:26.174535 | 2015-12-11T07:41:16 | 2015-12-11T07:41:16 | 45,829,900 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,483 | py | import requests, json, re
from bs4 import BeautifulSoup as bs
from time import strptime
import numpy as np
path = "/home/dharmahound/analytics.andrewgarrettreece.com/data/"
url = "https://www.washingtonpost.com/graphics/politics/2016-election/debates/schedule/"
html = requests.get(url).text
soup = bs(html, "html.parser")
events = [['party','date','time','datetime']]
items = soup.find_all(class_="debate-schedule-item")
for item in items:
party = item.find(class_="party").string.split(" ")[0]
date = re.sub("\\.|,","",item.find(class_="date").string)
time_str = item.find("span", class_="label", text=re.compile("Time"))
if time_str:
rgx = re.search("(\d{1,2}\\:?(\d{2})?)\sp\\.m\\.",time_str.next_sibling)
if rgx:
time = rgx.group(1)
if len(time) <= 2:
time = str(int(time)+12)+":00"
else:
hr,mins = time.split(":")
hr = str(int(hr)+12) # always at night
time = hr + ":" + str(mins)
mon, day, yr = date.split(" ")
mon = str(strptime(mon[:3],'%b').tm_mon)
if len(mon) == 1:
mon = "0"+mon
if len(day) == 1:
day = "0"+day
timestamp = yr+'-'+mon+'-'+day+"T"+hr+":"+mins+":00"
events.append( [party,date,time,timestamp] )
np.savetxt(path+"events.csv",events,fmt='"%s"',delimiter=",") # inner double quotes make strings quoted in csv | [
"[email protected]"
] | |
9835869a7e76a91dcb0501aba74c04359ac415ba | 7b361fb51c83fb1988046b62d7a0af93b4bd8a30 | /fudge/__doc__.py | 442e770ca0f545558742091396540eb059f640b3 | [
"BSD-3-Clause"
] | permissive | paulromano/fudge | 162ab067bb1b1dd1329ca0923de944b5fcfb44a8 | 6348d506de8306138ca654aa66f4fb81b790e7da | refs/heads/master | 2020-04-10T14:57:42.138711 | 2016-05-13T02:07:05 | 2016-05-13T02:07:05 | 32,029,193 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,529 | py | # <<BEGIN-copyright>>
# Copyright (c) 2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
# Written by the LLNL Nuclear Data and Theory group
# (email: [email protected])
# LLNL-CODE-683960.
# All rights reserved.
#
# This file is part of the FUDGE package (For Updating Data and
# Generating Evaluations)
#
# When citing FUDGE, please use the following reference:
# C.M. Mattoon, B.R. Beck, N.R. Patel, N.C. Summers, G.W. Hedstrom, D.A. Brown, "Generalized Nuclear Data: A New Structure (with Supporting Infrastructure) for Handling Nuclear Data", Nuclear Data Sheets, Volume 113, Issue 12, December 2012, Pages 3145-3171, ISSN 0090-3752, http://dx.doi.org/10. 1016/j.nds.2012.11.008
#
#
# Please also read this link - Our Notice and Modified BSD License
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the disclaimer below.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the disclaimer (as noted below) in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of LLNS/LLNL nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC,
# THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#
# Additional BSD Notice
#
# 1. This notice is required to be provided under our contract with the U.S.
# Department of Energy (DOE). This work was produced at Lawrence Livermore
# National Laboratory under Contract No. DE-AC52-07NA27344 with the DOE.
#
# 2. Neither the United States Government nor Lawrence Livermore National Security,
# LLC nor any of their employees, makes any warranty, express or implied, or assumes
# any liability or responsibility for the accuracy, completeness, or usefulness of any
# information, apparatus, product, or process disclosed, or represents that its use
# would not infringe privately-owned rights.
#
# 3. Also, reference herein to any specific commercial products, process, or services
# by trade name, trademark, manufacturer or otherwise does not necessarily constitute
# or imply its endorsement, recommendation, or favoring by the United States Government
# or Lawrence Livermore National Security, LLC. The views and opinions of authors expressed
# herein do not necessarily state or reflect those of the United States Government or
# Lawrence Livermore National Security, LLC, and shall not be used for advertising or
# product endorsement purposes.
#
# <<END-copyright>>
"""
Fudge is a python package which allows one to view, plot and modify LLNL's evaluated nuclear data, and use LLNL's processing codes
that convert the evaluated data into processed data. Processed data is evaluatated nuclear transport data that has been converted into a format
used by the deterministic (libndf.a) and and Monte Carlo (MCAPM) transport libraries. The deterministic library reads data
from the ndf files (ndf1, ndf2, etc.) and the Monte Carlo library reads data from the mcf files (mcf1.pdb, mcf2.pdb, etc.).
Getting at fudge's python scripts.
==================================
To use fudge, one must add the location of the fudge scripts to the environment variable PYTHONPATH. (PYTHONPATH is the environment variable
used by python when searching for imported python modules (files). On LLNL's computing system (LC) this would look something like,
export PYTHONPATH=$PYTHONPATH:/usr/apps/fudge/current/Src
for the bash shell.
Alternatively, one can add the following lines near the top of a python script (or type them at the prompt)
>>> import sys
>>> sys.path.append( "/usr/apps/fudge/current/Src" )
Other environment variables.
============================
Besides PYTHONPATH, there are four other environment variables used by fudge. They are::
FUDGEPATH Where fudges expects all platform dependent (e.g., binary) files to be.
ENDLPATH If defined, this is used to initialize fudgeDefaults.ENDL_DATABASE_DIR
MCFPATH If defined, this is used to initialize fudgeDefaults.MCF_DATABASE_DIR
NDFPATH If defined, this is used to initialize fudgeDefaults.NDF_DATABASE_DIR
If the environment variable FUDGEPATH is not set, fudge sets it to fudgeDefaults.DefaultFudgePath.
For more information on ENDL_DATABASE_DIR, MCF_DATABASE_DIR and NDF_DATABASE_DIR set the documentation
for the fudgeDefaults module.
Thus, there are three ways to set the variables fudge uses to search for files.
1) Do nothing and fudge will use the variables in the module fudgeDefaults.
2) Set the appropriate environment variable (i.e., FUDGEPATH, ENDLPATH, MCFPATH or NDFPATH).
3) Set variables in the fudgeDefaults module. For example,
>>> import fudgeDefaults
>>> fudgeDefaults.NDF_DATABASE_DIR = /my/personal/database/processed
Reading fudge's documentation.
==============================
In general, one should first instantiate an endlProject class object and work from it. For example the beginning of a fudge session may look like,
>>> from fudge import *
>>> project = endlProject( database = "endl99", workDir = "tmp" )
It is therefore important to read the documentation on the module endlProject; and in particular, the class endlProject.
Also, see the module fudgeDefaults for default locations fudge searches to find evaluated and processed data files,
and where it searches to find platform dependent files (e.g., executable files).
"""
| [
"[email protected]"
] | |
fb8f49dbd29e5310bf2b77ca3cfeac9f02c762fb | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/compute/azure-mgmt-vmwarecloudsimple/generated_samples/get_virtual_machine_template.py | 26376519ca4d9d3dc1487e0032358173a7762aba | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 1,620 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.vmwarecloudsimple import VMwareCloudSimple
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-vmwarecloudsimple
# USAGE
python get_virtual_machine_template.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = VMwareCloudSimple(
credential=DefaultAzureCredential(),
subscription_id="{subscription-id}",
)
response = client.virtual_machine_templates.get(
region_id="westus2",
pc_name="myPrivateCloud",
virtual_machine_template_name="vm-34",
)
print(response)
# x-ms-original-file: specification/vmwarecloudsimple/resource-manager/Microsoft.VMwareCloudSimple/stable/2019-04-01/examples/GetVirtualMachineTemplate.json
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
c0bac5a2e8b61047da3374fa5883d06bf2185a39 | 084159ecc7959a5d219c7873d483fe5aaf4129c9 | /civilian.py | 60aae57fdd700c9f90c26c585c84ec0c1812c51a | [] | no_license | uzo-o/bts-mafia | 5aa1dfd44f448781e1dae101797d91b665f93123 | 87f5f38d2b33d1d426bff63e1ac96aa4005d53ef | refs/heads/master | 2021-11-09T06:10:38.788008 | 2021-10-09T18:57:27 | 2021-10-09T18:57:27 | 203,500,560 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,629 | py | """
name: civilian.py
author: Uzo Ukekwe
version: python 3.8
purpose: play the outcome where the user is a civilian
"""
import random
import gameplay_utilities as gameplay
def play_game(username, town):
"""
Play Mafia as a civilian
:param username: name user sees when chatting with players
:param town: name of the town players live in
"""
user_role = "civilian"
role_assignments = gameplay.assign_roles(user_role)
# list of people still in game
live_players = gameplay.player_names
random.shuffle(live_players)
# first round
gameplay.print_morning_intro(1, town, user_role)
gameplay.run_dialogue(gameplay.get_civilian_dialogue_1(live_players),
True, username, live_players)
gameplay.vote_on_kill(live_players, username, role_assignments)
gameplay.nighttime(user_role, live_players, username, role_assignments, 1)
# second round
gameplay.print_morning_intro(2, town, user_role)
gameplay.run_dialogue(gameplay.get_civilian_dialogue_2(live_players, username),
True, username, live_players)
gameplay.vote_on_kill(live_players, username, role_assignments)
victim_2, dummy = gameplay.nighttime(user_role, live_players, username, role_assignments, 2)
# third round
gameplay.print_morning_intro(3, town, user_role)
gameplay.run_dialogue(gameplay.get_civilian_dialogue_3(live_players, username, victim_2),
True, username, live_players)
gameplay.vote_on_kill(live_players, username, role_assignments)
gameplay.game_over(live_players, role_assignments) | [
"[email protected]"
] | |
eec5dce0c672d1f88ac2cf7611257d83b2396c88 | a4e3017008ac70fcfafa53c816cdc6e55adb2433 | /Week_01/66_plus-one.py | fd368d5672728ee2dcad7f7cc21d27c29b442b35 | [] | no_license | gaofubao/algorithm019 | e380975c90146e7878256e27ffc9ee74044b693b | bbd422f62d83aa7c9e6a0fcc760b265de0574594 | refs/heads/main | 2023-01-31T19:16:52.964967 | 2020-12-14T12:45:00 | 2020-12-14T12:45:00 | 311,036,130 | 0 | 0 | null | 2020-11-08T10:22:20 | 2020-11-08T10:22:20 | null | UTF-8 | Python | false | false | 746 | py | # 给定一个由 整数 组成的 非空 数组所表示的非负整数,在该数的基础上加一。
# 最高位数字存放在数组的首位, 数组中每个元素只存储单个数字。
# 你可以假设除了整数 0 之外,这个整数不会以零开头。
# https://leetcode-cn.com/problems/plus-one/
class Solution1:
def plusOne(self, digits):
for i in range(len(digits)-1, -1, -1):
digits[i] += 1
digits[i] %= 10
if digits[i]:
return digits
digits = [0 for _ in range(len(digits) + 1)]
digits[0] = 1
return digits
if __name__ == "__main__":
digits = [1,2,3]
s1 = Solution1()
result = s1.plusOne(digits)
print(result)
| [
"[email protected]"
] | |
e688996d5b37746fe73243532e28f94b83ffb56f | db3bab9f34290b4a59258414fdc752ca81b1ccc5 | /ptvs_virtualenv_proxy.py | 20a02b17338cf1d49e8682bc0ef4f5ccd61fd4df | [] | no_license | wdstorer/eve-api01 | af5d7428efe4d4035840396368e5990b27d0cd69 | 78086f72a092c0495fd9eca23c0b905983f42ab1 | refs/heads/master | 2020-04-06T04:55:27.054771 | 2015-08-02T03:37:58 | 2015-08-02T03:37:58 | 39,300,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,038 | py | # ############################################################################
#
# Copyright (c) Microsoft Corporation.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# [email protected]. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
# ###########################################################################
import datetime
import os
import sys
if sys.version_info[0] == 3:
def to_str(value):
return value.decode(sys.getfilesystemencoding())
def execfile(path, global_dict):
"""Execute a file"""
with open(path, 'r') as f:
code = f.read()
code = code.replace('\r\n', '\n') + '\n'
exec(code, global_dict)
else:
def to_str(value):
return value.encode(sys.getfilesystemencoding())
def log(txt):
"""Logs fatal errors to a log file if WSGI_LOG env var is defined"""
log_file = os.environ.get('WSGI_LOG')
if log_file:
f = open(log_file, 'a+')
try:
f.write('%s: %s' % (datetime.datetime.now(), txt))
finally:
f.close()
ptvsd_secret = os.getenv('WSGI_PTVSD_SECRET')
if ptvsd_secret:
log('Enabling ptvsd ...\n')
try:
import ptvsd
try:
ptvsd.enable_attach(ptvsd_secret)
log('ptvsd enabled.\n')
except:
log('ptvsd.enable_attach failed\n')
except ImportError:
log('error importing ptvsd.\n');
def get_wsgi_handler(handler_name):
if not handler_name:
raise Exception('WSGI_HANDLER env var must be set')
if not isinstance(handler_name, str):
handler_name = to_str(handler_name)
module_name, _, callable_name = handler_name.rpartition('.')
should_call = callable_name.endswith('()')
callable_name = callable_name[:-2] if should_call else callable_name
name_list = [(callable_name, should_call)]
handler = None
while module_name:
try:
handler = __import__(module_name, fromlist=[name_list[0][0]])
for name, should_call in name_list:
handler = getattr(handler, name)
if should_call:
handler = handler()
break
except ImportError:
module_name, _, callable_name = module_name.rpartition('.')
should_call = callable_name.endswith('()')
callable_name = callable_name[:-2] if should_call else callable_name
name_list.insert(0, (callable_name, should_call))
handler = None
if handler is None:
raise ValueError('"%s" could not be imported' % handler_name)
return handler
activate_this = os.getenv('WSGI_ALT_VIRTUALENV_ACTIVATE_THIS')
if not activate_this:
raise Exception('WSGI_ALT_VIRTUALENV_ACTIVATE_THIS is not set')
def get_virtualenv_handler():
log('Activating virtualenv with %s\n' % activate_this)
execfile(activate_this, dict(__file__=activate_this))
log('Getting handler %s\n' % os.getenv('WSGI_ALT_VIRTUALENV_HANDLER'))
handler = get_wsgi_handler(os.getenv('WSGI_ALT_VIRTUALENV_HANDLER'))
log('Got handler: %r\n' % handler)
return handler
def get_venv_handler():
log('Activating venv with executable at %s\n' % activate_this)
import site
sys.executable = activate_this
old_sys_path, sys.path = sys.path, []
site.main()
sys.path.insert(0, '')
for item in old_sys_path:
if item not in sys.path:
sys.path.append(item)
log('Getting handler %s\n' % os.getenv('WSGI_ALT_VIRTUALENV_HANDLER'))
handler = get_wsgi_handler(os.getenv('WSGI_ALT_VIRTUALENV_HANDLER'))
log('Got handler: %r\n' % handler)
return handler
| [
"[email protected]"
] | |
2c7f26845222711a56b70ea9e55bf9682877c304 | 9d5d85fc8864cf4b20a03b1b45b51fdc8bcd351d | /livecoding/2019-04-29/server.py | 74c3a19b101012442a2d91e2d445f0d4eefd2e61 | [] | no_license | IsaacCasDen/CS397_Coursework | 5fe04093050e71fe8eb835bfd5dc9fd812612ef0 | 8808abd0b64d9796bb8bbb21447534bc716b17ca | refs/heads/master | 2020-04-18T10:24:10.206389 | 2019-05-17T16:52:05 | 2019-05-17T16:52:05 | 167,466,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 720 | py |
import socket
import signal
from threading import Thread, ThreadError
def fx(signum, event):
print('sucker')
signal.signal(signal.SIGINT, fx)
signal.pause()
def run(args):
try:
s=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((args['host'], args['port']))
s.listen()
conn, addr = s.accept()
print("connected to:",addr)
data = conn.recv(1024)
print(data.decode())
conn.send('OK\n'.encode())
conn.close()
except Exception as err:
print(err)
if __name__ == '__main__':
try:
t = Thread(target=run, args={'host':'127.0.0.1','port':54321})
t.start()
except Exception as err:
print(err) | [
"[email protected]"
] | |
937ccb77d8b56dd675a42befdd43c354d38f5f08 | 7fb87945b77d3adaedd8a155c981e97946734e41 | /firewall/server/config.py | f9b19329b108ff5b45e17823d0ab8b13deb90369 | [] | no_license | Tony910517/openstack | 916b36368ea9f17958e4eb04bd1f9daf3aba9213 | 4c1380a03c37e7950dcf2bba794e75b7e4a8dfd0 | refs/heads/master | 2020-05-20T01:05:22.499224 | 2019-05-07T01:11:05 | 2019-05-07T01:11:05 | 185,292,662 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 40,066 | py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2010-2012 Red Hat, Inc.
#
# Authors:
# Thomas Woerner <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# force use of pygobject3 in python-slip
from gi.repository import GObject
import sys
sys.modules['gobject'] = GObject
import dbus
import dbus.service
import slip.dbus
import slip.dbus.service
from firewall.config import *
from firewall.config.dbus import *
from firewall.core.watcher import Watcher
from firewall.core.logger import log
from firewall.server.decorators import *
from firewall.server.config_icmptype import FirewallDConfigIcmpType
from firewall.server.config_service import FirewallDConfigService
from firewall.server.config_zone import FirewallDConfigZone
from firewall.core.io.zone import Zone
from firewall.core.io.service import Service
from firewall.core.io.icmptype import IcmpType
from firewall.core.io.lockdown_whitelist import LockdownWhitelist
from firewall.core.io.direct import Direct
from firewall.dbus_utils import dbus_to_python, \
command_of_sender, context_of_sender, uid_of_sender, user_of_uid
from firewall.errors import *
############################################################################
#
# class FirewallDConfig
#
############################################################################
class FirewallDConfig(slip.dbus.service.Object):
"""FirewallD main class"""
persistent = True
""" Make FirewallD persistent. """
default_polkit_auth_required = PK_ACTION_CONFIG
""" Use PK_ACTION_INFO as a default """
@handle_exceptions
def __init__(self, config, *args, **kwargs):
super(FirewallDConfig, self).__init__(*args, **kwargs)
self.config = config
self.path = args[0]
self._init_vars()
self.watcher = Watcher(self.watch_updater, 5)
self.watcher.add_watch_dir(FIREWALLD_ICMPTYPES)
self.watcher.add_watch_dir(ETC_FIREWALLD_ICMPTYPES)
self.watcher.add_watch_dir(FIREWALLD_SERVICES)
self.watcher.add_watch_dir(ETC_FIREWALLD_SERVICES)
self.watcher.add_watch_dir(FIREWALLD_ZONES)
self.watcher.add_watch_dir(ETC_FIREWALLD_ZONES)
self.watcher.add_watch_file(LOCKDOWN_WHITELIST)
self.watcher.add_watch_file(FIREWALLD_DIRECT)
@handle_exceptions
def _init_vars(self):
self.icmptypes = [ ]
self.icmptype_idx = 0
self.services = [ ]
self.service_idx = 0
self.zones = [ ]
self.zone_idx = 0
for icmptype in self.config.get_icmptypes():
self._addIcmpType(self.config.get_icmptype(icmptype))
for service in self.config.get_services():
self._addService(self.config.get_service(service))
for zone in self.config.get_zones():
self._addZone(self.config.get_zone(zone))
@handle_exceptions
def __del__(self):
pass
@handle_exceptions
def reload(self):
while len(self.icmptypes) > 0:
x = self.icmptypes.pop()
x.unregister()
del x
while len(self.services) > 0:
x = self.services.pop()
x.unregister()
del x
while len(self.zones) > 0:
x = self.zones.pop()
x.unregister()
del x
self._init_vars()
@handle_exceptions
def watch_updater(self, name):
if not name.endswith(".xml"):
raise FirewallError(INVALID_FILENAME, name)
if name.startswith(FIREWALLD_ICMPTYPES) or \
name.startswith(ETC_FIREWALLD_ICMPTYPES):
(what, obj) = self.config.update_icmptype_from_path(name)
if what == "new":
self._addIcmpType(obj)
elif what == "remove":
self.removeIcmpType(obj)
elif what == "update":
self._updateIcmpType(obj)
elif name.startswith(FIREWALLD_SERVICES) or \
name.startswith(ETC_FIREWALLD_SERVICES):
(what, obj) = self.config.update_service_from_path(name)
if what == "new":
self._addService(obj)
elif what == "remove":
self.removeService(obj)
elif what == "update":
self._updateService(obj)
elif name.startswith(FIREWALLD_ZONES) or \
name.startswith(ETC_FIREWALLD_ZONES):
(what, obj) = self.config.update_zone_from_path(name)
if what == "new":
self._addZone(obj)
elif what == "remove":
self.removeZone(obj)
elif what == "update":
self._updateZone(obj)
elif name == LOCKDOWN_WHITELIST:
self.config.update_lockdown_whitelist()
self.LockdownWhitelistUpdated()
elif name == FIREWALLD_DIRECT:
self.config.update_direct()
self.Updated()
@handle_exceptions
def _addIcmpType(self, obj):
# TODO: check for idx overflow
config_icmptype = FirewallDConfigIcmpType(self, \
self.config, obj, self.icmptype_idx, self.path,
"%s/%d" % (DBUS_PATH_CONFIG_ICMPTYPE, self.icmptype_idx))
self.icmptypes.append(config_icmptype)
self.icmptype_idx += 1
self.IcmpTypeAdded(obj.name)
return config_icmptype
@handle_exceptions
def _updateIcmpType(self, obj):
for icmptype in self.icmptypes:
if icmptype.obj.name == obj.name and \
icmptype.obj.path == obj.path and \
icmptype.obj.filename == obj.filename:
icmptype.obj = obj
icmptype.Updated(obj.name)
@handle_exceptions
def removeIcmpType(self, obj):
index = 7 # see IMPORT_EXPORT_STRUCTURE in class Zone(IO_Object)
for zone in self.zones:
settings = zone.getSettings()
# if this IcmpType is used in a zone remove it from that zone first
if obj.name in settings[index]:
settings[index].remove(obj.name)
zone.obj = self.config.set_zone_config(zone.obj, settings)
zone.Updated(zone.obj.name)
for icmptype in self.icmptypes:
if icmptype.obj == obj:
icmptype.Removed(obj.name)
icmptype.unregister()
self.icmptypes.remove(icmptype)
del icmptype
@handle_exceptions
def _addService(self, obj):
# TODO: check for idx overflow
config_service = FirewallDConfigService(self, \
self.config, obj, self.service_idx, self.path,
"%s/%d" % (DBUS_PATH_CONFIG_SERVICE, self.service_idx))
self.services.append(config_service)
self.service_idx += 1
self.ServiceAdded(obj.name)
return config_service
@handle_exceptions
def _updateService(self, obj):
for service in self.services:
if service.obj.name == obj.name and \
service.obj.path == obj.path and \
service.obj.filename == obj.filename:
service.obj = obj
service.Updated(obj.name)
@handle_exceptions
def removeService(self, obj):
index = 5 # see IMPORT_EXPORT_STRUCTURE in class Zone(IO_Object)
for zone in self.zones:
settings = zone.getSettings()
# if this Service is used in a zone remove it from that zone first
if obj.name in settings[index]:
settings[index].remove(obj.name)
zone.obj = self.config.set_zone_config(zone.obj, settings)
zone.Updated(zone.obj.name)
for service in self.services:
if service.obj == obj:
service.Removed(obj.name)
service.unregister()
self.services.remove(service)
del service
@handle_exceptions
def _addZone(self, obj):
# TODO: check for idx overflow
config_zone = FirewallDConfigZone(self, \
self.config, obj, self.zone_idx, self.path,
"%s/%d" % (DBUS_PATH_CONFIG_ZONE, self.zone_idx))
self.zones.append(config_zone)
self.zone_idx += 1
self.ZoneAdded(obj.name)
return config_zone
@handle_exceptions
def _updateZone(self, obj):
for zone in self.zones:
if zone.obj.name == obj.name and zone.obj.path == obj.path and \
zone.obj.filename == obj.filename:
zone.obj = obj
zone.Updated(obj.name)
@handle_exceptions
def removeZone(self, obj):
for zone in self.zones:
if zone.obj == obj:
zone.Removed(obj.name)
zone.unregister()
self.zones.remove(zone)
del zone
# access check
@dbus_handle_exceptions
def accessCheck(self, sender):
if self.config.lockdown_enabled():
if sender == None:
log.error("Lockdown not possible, sender not set.")
return
bus = dbus.SystemBus()
context = context_of_sender(bus, sender)
if self.config.access_check("context", context):
return
uid = uid_of_sender(bus, sender)
if self.config.access_check("uid", uid):
return
user = user_of_uid(uid)
if self.config.access_check("user", user):
return
command = command_of_sender(bus, sender)
if self.config.access_check("command", command):
return
raise FirewallError(ACCESS_DENIED, "lockdown is enabled")
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# P R O P E R T I E S
@dbus_handle_exceptions
def _get_property(self, prop):
if prop in [ "DefaultZone", "MinimalMark", "CleanupOnExit",
"Lockdown" ]:
value = self.config.get_firewalld_conf().get(prop)
if prop == "MinimalMark":
value = int(value)
if value != None:
return value
if prop == "DefaultZone":
return FALLBACK_ZONE
elif prop == "MinimalMark":
return FALLBACK_MINIMAL_MARK
elif prop == "CleanupOnExit":
return "yes"
elif prop == "Lockdown":
return "no"
else:
raise dbus.exceptions.DBusException(
"org.freedesktop.DBus.Error.AccessDenied: "
"Property '%s' isn't exported (or may not exist)" % prop)
@dbus_service_method(dbus.PROPERTIES_IFACE, in_signature='ss',
out_signature='v')
@dbus_handle_exceptions
def Get(self, interface_name, property_name, sender=None):
# get a property
interface_name = dbus_to_python(interface_name)
property_name = dbus_to_python(property_name)
log.debug1("config.Get('%s', '%s')", interface_name, property_name)
if interface_name != DBUS_INTERFACE_CONFIG:
raise dbus.exceptions.DBusException(
"org.freedesktop.DBus.Error.UnknownInterface: "
"FirewallD does not implement %s" % interface_name)
return self._get_property(property_name)
@dbus_service_method(dbus.PROPERTIES_IFACE, in_signature='s',
out_signature='a{sv}')
@dbus_handle_exceptions
def GetAll(self, interface_name, sender=None):
interface_name = dbus_to_python(interface_name)
log.debug1("config.GetAll('%s')", interface_name)
if interface_name != DBUS_INTERFACE_CONFIG:
raise dbus.exceptions.DBusException(
"org.freedesktop.DBus.Error.UnknownInterface: "
"FirewallD does not implement %s" % interface_name)
return {
'DefaultZone': self._get_property("DefaultZone"),
'MinimalMark': self._get_property("MinimalMark"),
'CleanupOnExit': self._get_property("CleanupOnExit"),
'Lockdown': self._get_property("Lockdown"),
}
@slip.dbus.polkit.require_auth(PK_ACTION_CONFIG)
@dbus_service_method(dbus.PROPERTIES_IFACE, in_signature='ssv')
@dbus_handle_exceptions
def Set(self, interface_name, property_name, new_value, sender=None):
interface_name = dbus_to_python(interface_name)
property_name = dbus_to_python(property_name)
new_value = dbus_to_python(new_value)
log.debug1("config.Set('%s', '%s', '%s')", interface_name,
property_name, new_value)
self.accessCheck(sender)
if interface_name != DBUS_INTERFACE_CONFIG:
raise dbus.exceptions.DBusException(
"org.freedesktop.DBus.Error.UnknownInterface: "
"FirewallD does not implement %s" % interface_name)
if property_name in [ "MinimalMark", "CleanupOnExit", "Lockdown" ]:
if property_name == "MinimalMark":
try:
foo = int(new_value)
except:
raise FirewallError(INVALID_MARK, new_value)
try:
new_value = str(new_value)
except:
raise FirewallError(INVALID_VALUE, "'%s' for %s" % \
(new_value, property_name))
if property_name in [ "CleanupOnExit", "Lockdown" ]:
if new_value.lower() not in [ "yes", "no", "true", "false" ]:
raise FirewallError(INVALID_VALUE, "'%s' for %s" % \
(new_value, property_name))
self.config.get_firewalld_conf().set(property_name, new_value)
self.config.get_firewalld_conf().write()
self.PropertiesChanged(interface_name,
{ property_name: new_value }, [ ])
elif property_name in [ "DefaultZone" ]:
raise dbus.exceptions.DBusException(
"org.freedesktop.DBus.Error.PropertyReadOnly: "
"Property '%s' is read-only" % property_name)
else:
raise dbus.exceptions.DBusException(
"org.freedesktop.DBus.Error.AccessDenied: "
"Property '%s' does not exist" % property_name)
@dbus.service.signal(dbus.PROPERTIES_IFACE, signature='sa{sv}as')
def PropertiesChanged(self, interface_name, changed_properties,
invalidated_properties):
log.debug1("config.PropertiesChanged('%s', '%s', '%s')", interface_name,
changed_properties, invalidated_properties)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# policies
@dbus_service_method(DBUS_INTERFACE_CONFIG_POLICIES,
out_signature=LockdownWhitelist.DBUS_SIGNATURE)
@dbus_handle_exceptions
def getLockdownWhitelist(self, sender=None):
log.debug1("config.policies.getLockdownWhitelist()")
return self.config.get_policies().lockdown_whitelist.export_config()
@dbus_service_method(DBUS_INTERFACE_CONFIG_POLICIES,
in_signature=LockdownWhitelist.DBUS_SIGNATURE)
@dbus_handle_exceptions
def setLockdownWhitelist(self, settings, sender=None):
log.debug1("config.policies.setLockdownWhitelist(...)")
settings = dbus_to_python(settings)
self.config.get_policies().lockdown_whitelist.import_config(settings)
self.config.get_policies().lockdown_whitelist.write()
self.LockdownWhitelistUpdated()
@dbus.service.signal(DBUS_INTERFACE_CONFIG_POLICIES)
@dbus_handle_exceptions
def LockdownWhitelistUpdated(self):
log.debug1("config.policies.LockdownWhitelistUpdated()")
# command
@dbus_service_method(DBUS_INTERFACE_CONFIG_POLICIES, in_signature='s')
@dbus_handle_exceptions
def addLockdownWhitelistCommand(self, command, sender=None):
command = dbus_to_python(command)
log.debug1("config.policies.addLockdownWhitelistCommand('%s')", command)
self.accessCheck(sender)
settings = list(self.getLockdownWhitelist())
if command in settings[0]:
raise FirewallError(ALREADY_ENABLED, command)
settings[0].append(command)
self.setLockdownWhitelist(settings)
@dbus_service_method(DBUS_INTERFACE_CONFIG_POLICIES, in_signature='s')
@dbus_handle_exceptions
def removeLockdownWhitelistCommand(self, command, sender=None):
command = dbus_to_python(command)
log.debug1("config.policies.removeLockdownWhitelistCommand('%s')", command)
self.accessCheck(sender)
settings = list(self.getLockdownWhitelist())
if command not in settings[0]:
raise FirewallError(NOT_ENABLED, command)
settings[0].remove(command)
self.setLockdownWhitelist(settings)
@dbus_service_method(DBUS_INTERFACE_CONFIG_POLICIES, in_signature='s',
out_signature='b')
@dbus_handle_exceptions
def queryLockdownWhitelistCommand(self, command, sender=None):
command = dbus_to_python(command)
log.debug1("config.policies.queryLockdownWhitelistCommand('%s')", command)
return command in self.getLockdownWhitelist()[0]
@dbus_service_method(DBUS_INTERFACE_CONFIG_POLICIES, out_signature='as')
@dbus_handle_exceptions
def getLockdownWhitelistCommands(self, sender=None):
log.debug1("config.policies.getLockdownWhitelistCommands()")
return self.getLockdownWhitelist()[0]
# context
@dbus_service_method(DBUS_INTERFACE_CONFIG_POLICIES, in_signature='s')
@dbus_handle_exceptions
def addLockdownWhitelistContext(self, context, sender=None):
context = dbus_to_python(context)
log.debug1("config.policies.addLockdownWhitelistContext('%s')", context)
self.accessCheck(sender)
settings = list(self.getLockdownWhitelist())
if context in settings[1]:
raise FirewallError(ALREADY_ENABLED, context)
settings[1].append(context)
self.setLockdownWhitelist(settings)
@dbus_service_method(DBUS_INTERFACE_CONFIG_POLICIES, in_signature='s')
@dbus_handle_exceptions
def removeLockdownWhitelistContext(self, context, sender=None):
context = dbus_to_python(context)
log.debug1("config.policies.removeLockdownWhitelistContext('%s')", context)
self.accessCheck(sender)
settings = list(self.getLockdownWhitelist())
if context not in settings[1]:
raise FirewallError(NOT_ENABLED, context)
settings[1].remove(context)
self.setLockdownWhitelist(settings)
@dbus_service_method(DBUS_INTERFACE_CONFIG_POLICIES, in_signature='s',
out_signature='b')
@dbus_handle_exceptions
def queryLockdownWhitelistContext(self, context, sender=None):
context = dbus_to_python(context)
log.debug1("config.policies.queryLockdownWhitelistContext('%s')", context)
return context in self.getLockdownWhitelist()[1]
@dbus_service_method(DBUS_INTERFACE_CONFIG_POLICIES, out_signature='as')
@dbus_handle_exceptions
def getLockdownWhitelistContexts(self, sender=None):
log.debug1("config.policies.getLockdownWhitelistContexts()")
return self.getLockdownWhitelist()[1]
# user
@dbus_service_method(DBUS_INTERFACE_CONFIG_POLICIES, in_signature='s')
@dbus_handle_exceptions
def addLockdownWhitelistUser(self, user, sender=None):
user = dbus_to_python(user)
log.debug1("config.policies.addLockdownWhitelistUser('%s')", user)
self.accessCheck(sender)
settings = list(self.getLockdownWhitelist())
if user in settings[2]:
raise FirewallError(ALREADY_ENABLED, user)
settings[2].append(user)
self.setLockdownWhitelist(settings)
@dbus_service_method(DBUS_INTERFACE_CONFIG_POLICIES, in_signature='s')
@dbus_handle_exceptions
def removeLockdownWhitelistUser(self, user, sender=None):
user = dbus_to_python(user)
log.debug1("config.policies.removeLockdownWhitelistUser('%s')", user)
self.accessCheck(sender)
settings = list(self.getLockdownWhitelist())
if user not in settings[2]:
raise FirewallError(NOT_ENABLED, user)
settings[2].remove(user)
self.setLockdownWhitelist(settings)
@dbus_service_method(DBUS_INTERFACE_CONFIG_POLICIES, in_signature='s',
out_signature='b')
@dbus_handle_exceptions
def queryLockdownWhitelistUser(self, user, sender=None):
user = dbus_to_python(user)
log.debug1("config.policies.queryLockdownWhitelistUser('%s')", user)
return user in self.getLockdownWhitelist()[2]
@dbus_service_method(DBUS_INTERFACE_CONFIG_POLICIES, out_signature='as')
@dbus_handle_exceptions
def getLockdownWhitelistUsers(self, sender=None):
log.debug1("config.policies.getLockdownWhitelistUsers()")
return self.getLockdownWhitelist()[2]
# uid
@dbus_service_method(DBUS_INTERFACE_CONFIG_POLICIES, in_signature='i')
@dbus_handle_exceptions
def addLockdownWhitelistUid(self, uid, sender=None):
uid = dbus_to_python(uid)
log.debug1("config.policies.addLockdownWhitelistUid(%d)", uid)
self.accessCheck(sender)
settings = list(self.getLockdownWhitelist())
if uid in settings[3]:
raise FirewallError(ALREADY_ENABLED, uid)
settings[3].append(uid)
self.setLockdownWhitelist(settings)
@dbus_service_method(DBUS_INTERFACE_CONFIG_POLICIES, in_signature='i')
@dbus_handle_exceptions
def removeLockdownWhitelistUid(self, uid, sender=None):
uid = dbus_to_python(uid)
log.debug1("config.policies.removeLockdownWhitelistUid(%d)", uid)
self.accessCheck(sender)
settings = list(self.getLockdownWhitelist())
if uid not in settings[3]:
raise FirewallError(NOT_ENABLED, uid)
settings[3].remove(uid)
self.setLockdownWhitelist(settings)
@dbus_service_method(DBUS_INTERFACE_CONFIG_POLICIES, in_signature='i',
out_signature='b')
@dbus_handle_exceptions
def queryLockdownWhitelistUid(self, uid, sender=None):
uid = dbus_to_python(uid)
log.debug1("config.policies.queryLockdownWhitelistUid(%d)", uid)
return uid in self.getLockdownWhitelist()[3]
@dbus_service_method(DBUS_INTERFACE_CONFIG_POLICIES, out_signature='ai')
@dbus_handle_exceptions
def getLockdownWhitelistUids(self, sender=None):
log.debug1("config.policies.getLockdownWhitelistUids()")
return self.getLockdownWhitelist()[3]
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# I C M P T Y P E S
@dbus_service_method(DBUS_INTERFACE_CONFIG, out_signature='ao')
@dbus_handle_exceptions
def listIcmpTypes(self, sender=None):
"""list icmptypes objects paths
"""
log.debug1("config.listIcmpTypes()")
return self.icmptypes
@dbus_service_method(DBUS_INTERFACE_CONFIG, in_signature='s',
out_signature='o')
@dbus_handle_exceptions
def getIcmpTypeByName(self, icmptype, sender=None):
"""object path of icmptype with given name
"""
icmptype = dbus_to_python(icmptype)
log.debug1("config.getIcmpTypeByName('%s')", icmptype)
for obj in self.icmptypes:
if obj.obj.name == icmptype:
return obj
raise FirewallError(INVALID_ICMPTYPE, icmptype)
@dbus_service_method(DBUS_INTERFACE_CONFIG,
in_signature='s'+IcmpType.DBUS_SIGNATURE,
out_signature='o')
@dbus_handle_exceptions
def addIcmpType(self, icmptype, settings, sender=None):
"""add icmptype with given name and settings
"""
icmptype = dbus_to_python(icmptype)
settings = dbus_to_python(settings)
log.debug1("config.addIcmpType('%s')", icmptype)
self.accessCheck(sender)
obj = self.config.new_icmptype(icmptype, settings)
config_icmptype = self._addIcmpType(obj)
return config_icmptype
@dbus.service.signal(DBUS_INTERFACE_CONFIG, signature='s')
@dbus_handle_exceptions
def IcmpTypeAdded(self, icmptype):
log.debug1("config.IcmpTypeAdded('%s')" % (icmptype))
# S E R V I C E S
@dbus_service_method(DBUS_INTERFACE_CONFIG, out_signature='ao')
@dbus_handle_exceptions
def listServices(self, sender=None):
"""list services objects paths
"""
log.debug1("config.listServices()")
return self.services
@dbus_service_method(DBUS_INTERFACE_CONFIG, in_signature='s',
out_signature='o')
@dbus_handle_exceptions
def getServiceByName(self, service, sender=None):
"""object path of service with given name
"""
service = dbus_to_python(service)
log.debug1("config.getServiceByName('%s')", service)
for obj in self.services:
if obj.obj.name == service:
return obj
raise FirewallError(INVALID_SERVICE, service)
@dbus_service_method(DBUS_INTERFACE_CONFIG,
in_signature='s'+Service.DBUS_SIGNATURE,
out_signature='o')
@dbus_handle_exceptions
def addService(self, service, settings, sender=None):
"""add service with given name and settings
"""
service = dbus_to_python(service)
settings = dbus_to_python(settings)
log.debug1("config.addService('%s')", service)
self.accessCheck(sender)
obj = self.config.new_service(service, settings)
config_service = self._addService(obj)
return config_service
@dbus.service.signal(DBUS_INTERFACE_CONFIG, signature='s')
@dbus_handle_exceptions
def ServiceAdded(self, service):
log.debug1("config.ServiceAdded('%s')" % (service))
# Z O N E S
@dbus_service_method(DBUS_INTERFACE_CONFIG, out_signature='ao')
@dbus_handle_exceptions
def listZones(self, sender=None):
"""list zones objects paths
"""
log.debug1("config.listZones()")
return self.zones
@dbus_service_method(DBUS_INTERFACE_CONFIG, in_signature='s',
out_signature='o')
@dbus_handle_exceptions
def getZoneByName(self, zone, sender=None):
"""object path of zone with given name
"""
zone = dbus_to_python(zone)
log.debug1("config.getZoneByName('%s')", zone)
for obj in self.zones:
if obj.obj.name == zone:
return obj
raise FirewallError(INVALID_ZONE, zone)
@dbus_service_method(DBUS_INTERFACE_CONFIG, in_signature='s',
out_signature='s')
@dbus_handle_exceptions
def getZoneOfInterface(self, iface, sender=None):
"""name of zone the given interface belongs to
"""
iface = dbus_to_python(iface)
log.debug1("config.getZoneOfInterface('%s')", iface)
ret = []
for obj in self.zones:
if iface in obj.obj.interfaces:
ret.append(obj.obj.name)
if len(ret) > 1:
# Even it shouldn't happen, it's actually possible that
# the same interface is in several zone XML files
return " ".join(ret) + " (ERROR: interface '%s' is in %s zone XML files, can be only in one)" % (iface, len(ret))
return ret[0] if ret else ""
@dbus_service_method(DBUS_INTERFACE_CONFIG, in_signature='s',
out_signature='s')
@dbus_handle_exceptions
def getZoneOfSource(self, source, sender=None):
"""name of zone the given source belongs to
"""
source = dbus_to_python(source)
log.debug1("config.getZoneOfSource('%s')", source)
ret = []
for obj in self.zones:
if source in obj.obj.sources:
ret.append(obj.obj.name)
if len(ret) > 1:
# Even it shouldn't happen, it's actually possible that
# the same source is in several zone XML files
return " ".join(ret) + " (ERROR: source '%s' is in %s zone XML files, can be only in one)" % (iface, len(ret))
return ret[0] if ret else ""
@dbus_service_method(DBUS_INTERFACE_CONFIG,
in_signature='s'+Zone.DBUS_SIGNATURE,
out_signature='o')
@dbus_handle_exceptions
def addZone(self, zone, settings, sender=None):
"""add zone with given name and settings
"""
zone = dbus_to_python(zone)
settings = dbus_to_python(settings)
log.debug1("config.addZone('%s')", zone)
self.accessCheck(sender)
obj = self.config.new_zone(zone, settings)
config_zone = self._addZone(obj)
return config_zone
@dbus.service.signal(DBUS_INTERFACE_CONFIG, signature='s')
@dbus_handle_exceptions
def ZoneAdded(self, zone):
log.debug1("config.ZoneAdded('%s')" % (zone))
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# DIRECT
@dbus_service_method(DBUS_INTERFACE_CONFIG_DIRECT,
out_signature=Direct.DBUS_SIGNATURE)
@dbus_handle_exceptions
def getSettings(self, sender=None):
# returns list ipv, table, list of chains
log.debug1("config.direct.getSettings()")
return self.config.get_direct().export_config()
@dbus_service_method(DBUS_INTERFACE_CONFIG_DIRECT,
in_signature=Direct.DBUS_SIGNATURE)
@dbus_handle_exceptions
def update(self, settings, sender=None):
# returns list ipv, table, list of chains
log.debug1("config.direct.update()")
settings = dbus_to_python(settings)
self.config.get_direct().import_config(settings)
self.config.get_direct().write()
self.Updated()
@dbus.service.signal(DBUS_INTERFACE_CONFIG_DIRECT)
@dbus_handle_exceptions
def Updated(self):
log.debug1("config.direct.Updated()")
# chain
@dbus_service_method(DBUS_INTERFACE_CONFIG_DIRECT, in_signature='sss')
@dbus_handle_exceptions
def addChain(self, ipv, table, chain, sender=None):
ipv = dbus_to_python(ipv)
table = dbus_to_python(table)
chain = dbus_to_python(chain)
log.debug1("config.direct.addChain('%s', '%s', '%s')" % \
(ipv, table, chain))
self.accessCheck(sender)
idx = tuple((ipv, table, chain))
settings = list(self.getSettings())
if idx in settings[0]:
raise FirewallError(ALREADY_ENABLED,
"chain '%s' already is in '%s:%s'" % (chain, ipv, table))
settings[0].append(idx)
self.update(settings)
@dbus_service_method(DBUS_INTERFACE_CONFIG_DIRECT, in_signature='sss')
@dbus_handle_exceptions
def removeChain(self, ipv, table, chain, sender=None):
ipv = dbus_to_python(ipv)
table = dbus_to_python(table)
chain = dbus_to_python(chain)
log.debug1("config.direct.removeChain('%s', '%s', '%s')" % \
(ipv, table, chain))
self.accessCheck(sender)
idx = tuple((ipv, table, chain))
settings = list(self.getSettings())
if idx not in settings[0]:
raise FirewallError(NOT_ENABLED,
"chain '%s' is not in '%s:%s'" % (chain, ipv, table))
settings[0].remove(idx)
self.update(settings)
@dbus_service_method(DBUS_INTERFACE_CONFIG_DIRECT, in_signature='sss',
out_signature='b')
@dbus_handle_exceptions
def queryChain(self, ipv, table, chain, sender=None):
ipv = dbus_to_python(ipv)
table = dbus_to_python(table)
chain = dbus_to_python(chain)
log.debug1("config.direct.queryChain('%s', '%s', '%s')" % \
(ipv, table, chain))
idx = tuple((ipv, table, chain))
return idx in self.getSettings()[0]
@dbus_service_method(DBUS_INTERFACE_CONFIG_DIRECT, in_signature='ss',
out_signature='as')
@dbus_handle_exceptions
def getChains(self, ipv, table, sender=None):
ipv = dbus_to_python(ipv)
table = dbus_to_python(table)
log.debug1("config.direct.getChains('%s', '%s')" % (ipv, table))
ret = [ ]
for idx in self.getSettings()[0]:
if idx[0] == ipv and idx[1] == table:
ret.append(idx[2])
return ret
@dbus_service_method(DBUS_INTERFACE_CONFIG_DIRECT, in_signature='',
out_signature='a(sss)')
@dbus_handle_exceptions
def getAllChains(self, sender=None):
log.debug1("config.direct.getAllChains()")
return self.getSettings()[0]
# rule
@dbus_service_method(DBUS_INTERFACE_CONFIG_DIRECT, in_signature='sssias')
@dbus_handle_exceptions
def addRule(self, ipv, table, chain, priority, args, sender=None):
ipv = dbus_to_python(ipv)
table = dbus_to_python(table)
chain = dbus_to_python(chain)
priority = dbus_to_python(priority)
args = dbus_to_python(args)
log.debug1("config.direct.addRule('%s', '%s', '%s', %d, '%s')" % \
(ipv, table, chain, priority, "','".join(args)))
self.accessCheck(sender)
idx = (ipv, table, chain, priority, args)
settings = list(self.getSettings())
if idx in settings[1]:
raise FirewallError(ALREADY_ENABLED,
"rule '%s' already is in '%s:%s:%s'" % \
(args, ipv, table, chain))
settings[1].append(idx)
self.update(tuple(settings))
@dbus_service_method(DBUS_INTERFACE_CONFIG_DIRECT, in_signature='sssias')
@dbus_handle_exceptions
def removeRule(self, ipv, table, chain, priority, args, sender=None):
ipv = dbus_to_python(ipv)
table = dbus_to_python(table)
chain = dbus_to_python(chain)
priority = dbus_to_python(priority)
args = dbus_to_python(args)
log.debug1("config.direct.removeRule('%s', '%s', '%s', %d, '%s')" % \
(ipv, table, chain, priority, "','".join(args)))
self.accessCheck(sender)
idx = (ipv, table, chain, priority, args)
settings = list(self.getSettings())
if idx not in settings[1]:
raise FirewallError(NOT_ENABLED,
"rule '%s' is not in '%s:%s:%s'" % \
(args, ipv, table, chain))
settings[1].remove(idx)
self.update(tuple(settings))
@dbus_service_method(DBUS_INTERFACE_CONFIG_DIRECT, in_signature='sssias',
out_signature='b')
@dbus_handle_exceptions
def queryRule(self, ipv, table, chain, priority, args, sender=None):
ipv = dbus_to_python(ipv)
table = dbus_to_python(table)
chain = dbus_to_python(chain)
priority = dbus_to_python(priority)
args = dbus_to_python(args)
log.debug1("config.direct.queryRule('%s', '%s', '%s', %d, '%s')" % \
(ipv, table, chain, priority, "','".join(args)))
idx = (ipv, table, chain, priority, args)
return idx in self.getSettings()[1]
@dbus_service_method(DBUS_INTERFACE_CONFIG_DIRECT, in_signature='sss')
@dbus_handle_exceptions
def removeRules(self, ipv, table, chain, sender=None):
ipv = dbus_to_python(ipv)
table = dbus_to_python(table)
chain = dbus_to_python(chain)
log.debug1("config.direct.removeRules('%s', '%s', '%s')" %
(ipv, table, chain, ))
self.accessCheck(sender)
settings = list(self.getSettings())
settings[1] = []
self.update(tuple(settings))
@dbus_service_method(DBUS_INTERFACE_CONFIG_DIRECT, in_signature='sss',
out_signature='a(ias)')
@dbus_handle_exceptions
def getRules(self, ipv, table, chain, sender=None):
ipv = dbus_to_python(ipv)
table = dbus_to_python(table)
chain = dbus_to_python(chain)
log.debug1("config.direct.getRules('%s', '%s', '%s')" % \
(ipv, table, chain))
ret = [ ]
for idx in self.getSettings()[1]:
if idx[0] == ipv and idx[1] == table and idx[2] == chain:
ret.append((idx[3], idx[4]))
return ret
@dbus_service_method(DBUS_INTERFACE_CONFIG_DIRECT, in_signature='',
out_signature='a(sssias)')
@dbus_handle_exceptions
def getAllRules(self, sender=None):
log.debug1("config.direct.getAllRules()")
return self.getSettings()[1]
# passthrough
@dbus_service_method(DBUS_INTERFACE_CONFIG_DIRECT, in_signature='sas')
@dbus_handle_exceptions
def addPassthrough(self, ipv, args, sender=None):
ipv = dbus_to_python(ipv)
args = dbus_to_python(args)
log.debug1("config.direct.addPassthrough('%s', '%s')" % \
(ipv, "','".join(args)))
self.accessCheck(sender)
idx = (ipv, args)
settings = list(self.getSettings())
if idx in settings[2]:
raise FirewallError(ALREADY_ENABLED,
"passthrough '%s', '%s'" % (ipv, args))
settings[2].append(idx)
self.update(settings)
@dbus_service_method(DBUS_INTERFACE_CONFIG_DIRECT, in_signature='sas')
@dbus_handle_exceptions
def removePassthrough(self, ipv, args, sender=None):
ipv = dbus_to_python(ipv)
args = dbus_to_python(args)
log.debug1("config.direct.removePassthrough('%s', '%s')" % \
(ipv, "','".join(args)))
self.accessCheck(sender)
idx = (ipv, args)
settings = list(self.getSettings())
if idx not in settings[2]:
raise FirewallError(NOT_ENABLED,
"passthrough '%s', '%s'" % (ipv, args))
settings[2].remove(idx)
self.update(settings)
@dbus_service_method(DBUS_INTERFACE_CONFIG_DIRECT, in_signature='sas',
out_signature='b')
@dbus_handle_exceptions
def queryPassthrough(self, ipv, args, sender=None):
ipv = dbus_to_python(ipv)
args = dbus_to_python(args)
log.debug1("config.direct.queryPassthrough('%s', '%s')" % \
(ipv, "','".join(args)))
idx = (ipv, args)
return idx in self.getSettings()[2]
@dbus_service_method(DBUS_INTERFACE_CONFIG_DIRECT, in_signature='s',
out_signature='aas')
@dbus_handle_exceptions
def getPassthroughs(self, ipv, sender=None):
ipv = dbus_to_python(ipv)
log.debug1("config.direct.getPassthroughs('%s')" % (ipv))
ret = [ ]
for idx in self.getSettings()[2]:
if idx[0] == ipv:
ret.append(idx[1])
return ret
@dbus_service_method(DBUS_INTERFACE_CONFIG_DIRECT, out_signature='a(sas)')
@dbus_handle_exceptions
def getAllPassthroughs(self, sender=None):
log.debug1("config.direct.getAllPassthroughs()")
return self.getSettings()[2]
| [
"[email protected]"
] | |
168c64ecb02657fa94d2a717b20168899dce3a42 | 7ca45e9eacfda0db110609e7165d07a13877ab4a | /basic-service-invocation-py/microservices/svc/wallet/server.py | e23bd55630e0d872d16bb830010b123949759479 | [
"MIT"
] | permissive | pdettorre/examples | 49691e8cd67e27e131c8f67069792c36419a71dc | 7c9a8ccdc0eab7ecdcdabe06c1da6e387a6e61ee | refs/heads/main | 2023-07-17T21:01:35.931046 | 2021-09-07T15:00:59 | 2021-09-07T15:00:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 365 | py | from grpclib.server import Server
from grpclib.utils import graceful_exit
from handler import WalletService
async def run(host: str = "0.0.0.0", port: int = 5050) -> None:
server = Server([WalletService()])
with graceful_exit([server]):
await server.start(host, port)
print(f"Serving on {host}:{port}")
await server.wait_closed()
| [
"[email protected]"
] | |
072f86504022a7dfd5e5e2e2c7fe470d4ca55300 | 2a1a40577d769f63ef183b5b153766d3a32bafe4 | /scripts/analysis_scripts/plot_latencyOrTraffic_change.py | b3aa3bf0b98124340bf46a412bf6a639a0a69186 | [] | no_license | gdu3/LowerTailLatencyStreamProcessing | 44ebe14638f6954efd324de92a8bffb700bfecee | 3e752d01e235d96ca5ab5abe815322c132ec7968 | refs/heads/master | 2021-01-21T14:01:13.223861 | 2016-04-21T21:21:23 | 2016-05-07T14:43:51 | 54,588,655 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,847 | py | #!/usr/bin/python
import matplotlib.pyplot as plt
import math
import sys
c_id = raw_input("exclaimid?")
c_id = "exclaim" + str(c_id)
t = raw_input("choose to see latency change or traffic (load) change, 1 for latency, 2 for traffic")
t = (int)(t)
#result = open("latency", "w")
#result = open("load", "w")
for j in range(1, 6):
name = "../result_collected/IshuffleInfo_" + str(j) + ".txt"
file = open(name,"r")
count = {}
wait_time = {}
line = file.readline()
if (line==""):
continue
while (line!=""):
words = line.split()
componenet = words[5]
if(c_id != componenet):
line = file.readline()
continue
time_ptr = (long)(words[8])
execute_cnt = (int)(words[6])
wait_lat = (float)(words[7])
count[time_ptr] = execute_cnt
wait_time[time_ptr] = wait_lat
line = file.readline()
if (len(count)==0 or len(wait_time)==0):
continue
if(t == 1):
time_v = sorted(wait_time)
wt_v = []
length = len(time_v)
for i in range(0, length):
wt_v.append(wait_time[time_v[i]])
for i in range(1, length):
time_v[i] = (time_v[i] - time_v[0])/(1000.0)
time_v[0] = 0
sum = 0
cnt = 1
for i in range(0, length):
if (time_v[i] > 100):
sum = sum + wt_v[i]
cnt = cnt + 1
print "storm" + str(j) + " " + str(sum/cnt)
plt.xlim(0,700)
plt.ylim(0,20)
plt.plot(time_v, wt_v)
#result.write(str(time_v) + '\n')
#result.write(str(wt_v) + '\n')
else:
time_v = sorted(count)
cnt_v = []
length = len(time_v)
for i in range(0, length):
cnt_v.append(count[time_v[i]])
for i in range(1, length):
time_v[i] = (time_v[i] - time_v[0])/(1000.0)
time_v[0] = 0
print "storm" + str(j) + " " + str(sum(cnt_v))
plt.xlim(0,700)
plt.ylim(0,15000)
plt.plot(time_v, cnt_v)
#result.write(str(time_v) + '\n')
#result.write(str(cnt_v) + '\n')
#result.close()
plt.show()
| [
"[email protected]"
] | |
8b77f8c39bc985a20f1c5452dbeea24d51456732 | 169c55360c7f6c09185718b099deaaac597ca9aa | /samples/pose/pose.py | afc2194993a46141efc3f484ee4bf2cdb33a9957 | [
"MIT"
] | permissive | Sedyshev-Aleksandr/Mask_RCNN | e7f6a63e937638140c5c262483bf573ff5333445 | ae4a3c06584ffd880b76c359a2395242547928ee | refs/heads/master | 2020-08-03T16:21:03.524273 | 2019-09-30T09:07:11 | 2019-09-30T09:07:11 | 211,811,720 | 0 | 0 | NOASSERTION | 2019-09-30T08:24:02 | 2019-09-30T08:24:01 | null | UTF-8 | Python | false | false | 14,007 | py | """
Mask R-CNN
Train on the toy Pose dataset and implement color splash effect.
Copyright (c) 2018 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Waleed Abdulla
------------------------------------------------------------
Usage: import the module (see Jupyter notebooks for examples), or run from
the command line as such:
# Train a new model starting from pre-trained COCO weights
python3 pose.py train --dataset=/path/to/pose/dataset --weights=coco
# Resume training a model that you had trained earlier
python3 pose.py train --dataset=/path/to/pose/dataset --weights=last
# Train a new model starting from ImageNet weights
python3 pose.py train --dataset=/path/to/pose/dataset --weights=imagenet
# Apply color splash to an image
python3 pose.py splash --weights=/path/to/weights/file.h5 --image=<URL or path to file>
# Apply color splash to video using the last weights you trained
python3 pose.py splash --weights=last --video=<URL or path to file>
"""
import os
import sys
import json
import datetime
import numpy as np
import skimage.draw
# Root directory of the project
ROOT_DIR = os.path.abspath("../../")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn.config import Config
from mrcnn import model as modellib, utils
# Path to trained weights file
COCO_WEIGHTS_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# Directory to save logs and model checkpoints, if not provided
# through the command line argument --logs
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs")
############################################################
# Configurations
############################################################
class PoseConfig(Config):
"""Configuration for training on the toy dataset.
Derives from the base Config class and overrides some values.
"""
# Give the configuration a recognizable name
NAME = "pose"
# We use a GPU with 12GB memory, which can fit two images.
# Adjust down if you use a smaller GPU.
IMAGES_PER_GPU = 2
# Number of classes (including background)
NUM_CLASSES = 1 + 2 # Background + blue and green
# Number of training steps per epoch
STEPS_PER_EPOCH = 100
# Skip detections with < 90% confidence
DETECTION_MIN_CONFIDENCE = 0.9
############################################################
# Dataset
############################################################
class PoseDataset(utils.Dataset):
def load_pose(self, dataset_dir, subset):
"""Load a subset of the Pose dataset.
dataset_dir: Root directory of the dataset.
subset: Subset to load: train or val
"""
# Add classes. We have only one class to add.
self.add_class("pose", 1, "blue")
self.add_class("pose", 2, "green")
# Train or validation dataset?
assert subset in ["train", "val"]
dataset_dir = os.path.join(dataset_dir, subset)
# Load annotations
# VGG Image Annotator (up to version 1.6) saves each image in the form:
# { 'filename': '28503151_5b5b7ec140_b.jpg',
# 'regions': {
# '0': {
# 'region_attributes': {},
# 'shape_attributes': {
# 'all_points_x': [...],
# 'all_points_y': [...],
# 'name': 'polygon'}},
# ... more regions ...
# },
# 'size': 100202
# }
# We mostly care about the x and y coordinates of each region
# Note: In VIA 2.0, regions was changed from a dict to a list.
annotations = json.load(open(os.path.join(dataset_dir, "via_region_data.json")))
annotations = list(annotations.values()) # don't need the dict keys
# The VIA tool saves images in the JSON even if they don't have any
# annotations. Skip unannotated images.
annotations = [a for a in annotations if a['regions']]
# Add images
for a in annotations:
# Get the x, y coordinaets of points of the polygons that make up
# the outline of each object instance. These are stores in the
# shape_attributes (see json format above)
# The if condition is needed to support VIA versions 1.x and 2.x.
if type(a['regions']) is dict:
polygons = [r['shape_attributes'] for r in a['regions'].values()]
else:
polygons = [r['shape_attributes'] for r in a['regions']]
# load_mask() needs the image size to convert polygons to masks.
# Unfortunately, VIA doesn't include it in JSON, so we must read
# the image. This is only managable since the dataset is tiny.
image_path = os.path.join(dataset_dir, a['filename'])
image = skimage.io.imread(image_path)
height, width = image.shape[:2]
self.add_image(
"pose",
image_id=a['filename'], # use file name as a unique image id
path=image_path,
width=width, height=height,
polygons=polygons)
def load_mask(self, image_id):
"""Generate instance masks for an image.
Returns:
masks: A bool array of shape [height, width, instance count] with
one mask per instance.
class_ids: a 1D array of class IDs of the instance masks.
"""
# If not a pose dataset image, delegate to parent class.
image_info = self.image_info[image_id]
if image_info["source"] != "pose":
return super(self.__class__, self).load_mask(image_id)
# Convert polygons to a bitmap mask of shape
# [height, width, instance_count]
info = self.image_info[image_id]
mask = np.zeros([info["height"], info["width"], len(info["polygons"])],
dtype=np.uint8)
for i, p in enumerate(info["polygons"]):
# Get indexes of pixels inside the polygon and set them to 1
rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])
mask[rr, cc, i] = 1
# Return mask, and array of class IDs of each instance. Since we have
# one class ID only, we return an array of 1s
return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32)
def image_reference(self, image_id):
"""Return the path of the image."""
info = self.image_info[image_id]
if info["source"] == "pose":
return info["path"]
else:
super(self.__class__, self).image_reference(image_id)
def train(model):
"""Train the model."""
# Training dataset.
dataset_train = PoseDataset()
dataset_train.load_pose(args.dataset, "train")
dataset_train.prepare()
# Validation dataset
dataset_val = PoseDataset()
dataset_val.load_pose(args.dataset, "val")
dataset_val.prepare()
# *** This training schedule is an example. Update to your needs ***
# Since we're using a very small dataset, and starting from
# COCO trained weights, we don't need to train too long. Also,
# no need to train all layers, just the heads should do it.
print("Training network heads")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=30,
layers='heads')
def color_splash(image, mask):
"""Apply color splash effect.
image: RGB image [height, width, 3]
mask: instance segmentation mask [height, width, instance count]
Returns result image.
"""
# Make a grayscale copy of the image. The grayscale copy still
# has 3 RGB channels, though.
gray = skimage.color.gray2rgb(skimage.color.rgb2gray(image)) * 255
# Copy color pixels from the original color image where mask is set
if mask.shape[-1] > 0:
# We're treating all instances as one, so collapse the mask into one layer
mask = (np.sum(mask, -1, keepdims=True) >= 1)
splash = np.where(mask, image, gray).astype(np.uint8)
else:
splash = gray.astype(np.uint8)
return splash
def detect_and_color_splash(model, image_path=None, video_path=None):
assert image_path or video_path
# Image or video?
if image_path:
# Run model detection and generate the color splash effect
print("Running on {}".format(args.image))
# Read image
image = skimage.io.imread(args.image)
# Detect objects
r = model.detect([image], verbose=1)[0]
# Color splash
splash = color_splash(image, r['masks'])
# Save output
file_name = "splash_{:%Y%m%dT%H%M%S}.png".format(datetime.datetime.now())
skimage.io.imsave(file_name, splash)
elif video_path:
import cv2
# Video capture
vcapture = cv2.VideoCapture(video_path)
width = int(vcapture.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vcapture.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = vcapture.get(cv2.CAP_PROP_FPS)
# Define codec and create video writer
file_name = "splash_{:%Y%m%dT%H%M%S}.avi".format(datetime.datetime.now())
vwriter = cv2.VideoWriter(file_name,
cv2.VideoWriter_fourcc(*'MJPG'),
fps, (width, height))
count = 0
success = True
while success:
print("frame: ", count)
# Read next image
success, image = vcapture.read()
if success:
# OpenCV returns images as BGR, convert to RGB
image = image[..., ::-1]
# Detect objects
r = model.detect([image], verbose=0)[0]
# Color splash
splash = color_splash(image, r['masks'])
# RGB -> BGR to save image to video
splash = splash[..., ::-1]
# Add image to video writer
vwriter.write(splash)
count += 1
vwriter.release()
print("Saved to ", file_name)
############################################################
# Training
############################################################
if __name__ == '__main__':
import argparse
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Train Mask R-CNN to detect poser.')
parser.add_argument("command",
metavar="<command>",
help="'train' or 'splash'")
parser.add_argument('--dataset', required=False,
metavar="/path/to/pose/dataset/",
help='Directory of the Pose dataset')
parser.add_argument('--weights', required=True,
metavar="/path/to/weights.h5",
help="Path to weights .h5 file or 'coco'")
parser.add_argument('--logs', required=False,
default=DEFAULT_LOGS_DIR,
metavar="/path/to/logs/",
help='Logs and checkpoints directory (default=logs/)')
parser.add_argument('--image', required=False,
metavar="path or URL to image",
help='Image to apply the color splash effect on')
parser.add_argument('--video', required=False,
metavar="path or URL to video",
help='Video to apply the color splash effect on')
args = parser.parse_args()
# Validate arguments
if args.command == "train":
assert args.dataset, "Argument --dataset is required for training"
elif args.command == "splash":
assert args.image or args.video,\
"Provide --image or --video to apply color splash"
print("Weights: ", args.weights)
print("Dataset: ", args.dataset)
print("Logs: ", args.logs)
# Configurations
if args.command == "train":
config = PoseConfig()
else:
class InferenceConfig(PoseConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
config = InferenceConfig()
config.display()
# Create model
if args.command == "train":
model = modellib.MaskRCNN(mode="training", config=config,
model_dir=args.logs)
else:
model = modellib.MaskRCNN(mode="inference", config=config,
model_dir=args.logs)
# Select weights file to load
if args.weights.lower() == "coco":
weights_path = COCO_WEIGHTS_PATH
# Download weights file
if not os.path.exists(weights_path):
utils.download_trained_weights(weights_path)
elif args.weights.lower() == "last":
# Find last trained weights
weights_path = model.find_last()
elif args.weights.lower() == "imagenet":
# Start from ImageNet trained weights
weights_path = model.get_imagenet_weights()
else:
weights_path = args.weights
# Load weights
print("Loading weights ", weights_path)
if args.weights.lower() == "coco":
# Exclude the last layers because they require a matching
# number of classes
model.load_weights(weights_path, by_name=True, exclude=[
"mrcnn_class_logits", "mrcnn_bbox_fc",
"mrcnn_bbox", "mrcnn_mask"])
else:
model.load_weights(weights_path, by_name=True)
# Train or evaluate
if args.command == "train":
train(model)
elif args.command == "splash":
detect_and_color_splash(model, image_path=args.image,
video_path=args.video)
else:
print("'{}' is not recognized. "
"Use 'train' or 'splash'".format(args.command))
| [
"[email protected]"
] | |
1b8be447f67bc42160efe5e5e1b630b21dbd2145 | 4a26c4236a0b6701394de7e6d4df140c9ece8d3f | /datasources/LyricsWikiaSource.py | ac14a712b2d0013ac6c9c9bfb53b64f1f99d0af3 | [] | no_license | yurifw/menestrel | 7bdef819427a1131769b745aa65139a9e9b47841 | 32cf702dd6ab4786ef6c4ba6efcdfc47df041497 | refs/heads/master | 2021-03-27T11:46:00.711556 | 2017-09-20T02:48:49 | 2017-09-20T02:48:49 | 96,278,837 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,811 | py | from selenium import webdriver
from selenium.common import exceptions
import time
import urllib2
class LyricsWikiaSource():
def __init__(self):
self.crawler = webdriver.PhantomJS(executable_path='/var/www/html/neatcomics-scrapper/selenium-driver/phantomjs')
self.cache = []
def grabLyric(self, artist, title):
print "scraping Lyrics Wikia for \"{}\" lyrics".format(title)
title = title.replace(" ", "_")
url = "http://lyrics.wikia.com/wiki/"+artist+":"+title
self.crawler.get(url)
lyrics = self.crawler.find_element_by_class_name("lyricbox").text
return lyrics
def grabAlbumCover(self, artist, album):
print "scraping Lyrics Wikia for \"{}\" artwork".format(album)
artist = artist.replace(" ","_")
for cached in self.cache:
if artist+album in cached.keys():
print "found artwork in cache"
return cached[artist+album]
url = "http://lyrics.wikia.com/wiki/"+artist
self.crawler.get(url)
images = self.crawler.find_elements_by_xpath('//a[@class="image image-thumbnail"]')
for image in images:
link = image.get_attribute('href')
if artist+"_-_"+album.replace(" ","_")+"." in link:
response = urllib2.urlopen(link)
imagedata = response.read()
self.cache.append({artist+album:imagedata})
return imagedata
print "sorry, I could not scrape LyricsWikia for the artwork"
link = raw_input("enter the url for the image:\n")
response = urllib2.urlopen(link)
imagedata = response.read()
self.cache.append({artist+album:imagedata})
return imagedata
def quit(self):
self.crawler.quit()
| [
"[email protected]"
] | |
8e5b8e6f322805d3e2e02a4d410048b8dafc9b13 | ace05ee09ed0e9f6ad785a3fc361d8cddf78511b | /lib/number_of_1_bits.py | 51522afa79abd2360305d0adff88c47b48cb7ed1 | [] | no_license | alex-xia/leetcode-python | b6b72f20d00c0875f93f76ce8d694512c82fa6b2 | 7c683df626a620ec6ba9fa3e172906e52b6656c3 | refs/heads/master | 2021-01-18T21:08:42.424277 | 2019-07-21T19:12:49 | 2019-07-21T19:12:49 | 49,799,063 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 609 | py | __author__ = 'Qing'
'''
Write a function that takes an unsigned integer and returns the number of ’1' bits it has (also known as the Hamming weight).
For example, the 32-bit integer ’11' has binary representation 00000000000000000000000000001011, so the function should return 3.
'''
class Solution(object):
def hammingWeight(self, n):
"""
:type n: int
:rtype: int
"""
cnt = 0
while n > 0:
cnt += 1
n &= (n-1)
return cnt
if __name__ == '__main__':
solution = Solution()
assert solution.hammingWeight(11) == 3 | [
"[email protected]"
] | |
d0ce209e2eaf52f310523b64287e62dd0ad3d8e6 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/128/usersdata/245/33701/submittedfiles/al6.py | d94e8e45d348615846191bc3f3680ef520f6a876 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 223 | py | # -*- coding: utf-8 -*-
n=int(input('Digite um número inteiro:'))
contador=0
i=2
while i<n:
if n%i==0:
contador=contador+1
i=i+1
if contador==0:
print('Primo')
else:
print(i)
print('Não Primo') | [
"[email protected]"
] | |
e774e069a7762765acbcaba77759604b8d69d1d3 | 738d6c5b8434db4dc77d53101bef6a44ca601e29 | /forms.py | 75f5e1af0a3240212f91f975839435f0986ebfb9 | [] | no_license | mfierro31/feedback_app | 0ad3c979b7a271bbf41f92f2384dbbbabbb90663 | 50344eea13e9c0d41fda7269cd127f10b75d52f4 | refs/heads/master | 2023-03-24T15:16:12.502829 | 2020-08-10T06:29:04 | 2020-08-10T06:29:04 | 285,100,002 | 0 | 0 | null | 2021-03-20T04:57:29 | 2020-08-04T21:07:04 | Python | UTF-8 | Python | false | false | 919 | py | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, TextAreaField
from wtforms.fields.html5 import EmailField
from wtforms.validators import InputRequired, Length
class RegisterForm(FlaskForm):
username = StringField("Username", validators=[InputRequired()])
password = PasswordField("Password", validators=[InputRequired()])
email = EmailField("Email", validators=[InputRequired()])
first_name = StringField("First Name", validators=[InputRequired()])
last_name = StringField("Last Name", validators=[InputRequired()])
class LoginForm(FlaskForm):
username = StringField("Username", validators=[InputRequired()])
password = PasswordField("Password", validators=[InputRequired()])
class FeedbackForm(FlaskForm):
title = StringField("Title", validators=[InputRequired(), Length(max=100)])
content = TextAreaField("Feedback", validators=[InputRequired()]) | [
"[email protected]"
] | |
79a5a7be12abbb4e04e21b06879bc5bd7c331cc0 | 7e224a1141102327b33c81825f9fd87d611204b9 | /api/setup.py | 72b0fb253e8b3005587d3e2798798111f15c1984 | [
"MIT"
] | permissive | aaronlockhartdev/biparse | 5f96259e1f3774807238a104d1d2666a534988c7 | aeeddf50f3c1648fb9ab34cd502cea9ec02bd49b | refs/heads/master | 2020-05-28T09:19:59.884520 | 2019-06-11T04:14:44 | 2019-06-11T04:14:44 | 188,953,222 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 196 | py | import data
import neural_net
def main():
data.set_train_data()
data.get_unique()
data.format_training_data()
neural_net.create_model()
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
f7ac3fcc5a1e2d2fbd18c764f09bfae43763facb | a3a4bb196c3a64ce95a4efb88edba4ba72be1431 | /binarysearch/baek1654.py | b6fcbaa37cb046fad9accaefcd10f43a82a6a42e | [] | no_license | jojojohhh/python-algorithm-study | 6e00e384ba214410df6bd813859c299efeb31eca | 1a5d645b0ad760c289379c5ef41daf0675e97b0d | refs/heads/master | 2023-06-01T14:29:46.395838 | 2021-06-22T04:07:15 | 2021-06-22T04:07:15 | 349,453,362 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 338 | py | from sys import stdin
k, n = map(int, stdin.readline().split())
arr_k = [int(stdin.readline()) for _ in range(k)]
start, end = 1, max(arr_k)
while start <= end:
mid = (start + end) // 2
lines = 0
for i in arr_k:
lines += i // mid
if lines >= n:
start = mid + 1
else:
end = mid - 1
print(end) | [
"[email protected]"
] | |
8150b1239acc5c233cf751d84d7ffb2c1ae9837f | 00d0cbfe7b1dc71b4365197aa7e20bedc633c458 | /clusters/algs.py | 4247c9971eb3c3bb1affc370f6a72e53fc740977 | [] | no_license | bendorr/Project2 | 2b306e7f4f97dab60f7ce1f6336503251402c0ca | 54561dba456b08d99159fac902994e83ccc8e7ca | refs/heads/main | 2023-03-12T13:04:41.250116 | 2021-02-20T05:50:27 | 2021-02-20T05:50:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,484 | py | import numpy as np
import random
class HierarchicalClustering():
def __init__(self):
pass
def cluster(self, ligands, distanceMatrix, numClusters, verbose = False):
"""
Clusters the given ligands into numClusters clusters using the average linkage hierarchical clustering algorithm. Treats each individual ligand
as its own cluster from the start. Finds the two most similar clusters using the given distance matrix and combines them into the same
cluster. Calculates the clusters' average distance to all other clusters and adds this array of distances to the last row and last column of
the distance matrix, then removes the rows and columns that correspond to the individual clusters that were combined. Repeats this process until
there are numClusters clusters remaining.
Params:
ligands - a list of Ligand() objects
distanceMatrix - a len(ligands) x len(ligands) 2D array filled with Tanimoto Coefficients corresponding
to each row and column ligand pair
numClusters - an int dictating how many Cluster() objects will be returned
verbose - when true, prints the iteration number of the algorithm as it's running
Returns:
a list of Cluster() objects
"""
# Make sure each dimension of the distance matrix is the same length as len(ligands), in case I use a subset of the ligands dataset
# Made this extra complicated in case I ever want to use slices from the middle of the ligands dataset
distanceMatrix = distanceMatrix[ligands[0].ligandIndex:ligands[-1].ligandIndex+1, ligands[0].ligandIndex:ligands[-1].ligandIndex+1]
matrixHeader = list(map(lambda ligand:[ligand], ligands)) #returns a list of lists
while len(matrixHeader) > numClusters:
if verbose:
print('Clustering step number %d' % (len(ligands) - len(matrixHeader)))
# Determine the current max tanimoto coefficient in the distance matrix (which determines the next clustering event)
maxVal = 0
maxValIndeces = [0,0]
for i in range(len(distanceMatrix)):
for j in range(len(distanceMatrix[i])):
if i != j:
if maxVal < distanceMatrix[i][j]:
maxVal = distanceMatrix[i][j]
maxValIndeces = [i,j]
# Populate the new row (and column) of the distance matrix with the minimum values from each of the combined row's indeces
# Do not include -1's in the combined row (these correspond to a ligand's distance to itself)
newRow = []
for k in range(len(distanceMatrix[maxValIndeces[1]])):
weightedVal1 = distanceMatrix[maxValIndeces[0]][k] * len(matrixHeader[maxValIndeces[0]])
weightedVal2 = distanceMatrix[maxValIndeces[1]][k] * len(matrixHeader[maxValIndeces[1]])
aveVal = (weightedVal1 + weightedVal2) / (len(matrixHeader[maxValIndeces[0]])+len(matrixHeader[maxValIndeces[1]]))
newRow.append([aveVal])
newRow = np.array(newRow)
# Add the new row to the distance matrix, in both the rows and columns
distanceMatrix = np.hstack((distanceMatrix, newRow))
newRow = np.append(newRow, [1]) # 1 here because that's the tanimoto between the same ligand
distanceMatrix = np.vstack((distanceMatrix, newRow))
# Remove the rows and columns that were just clustered together from the distance matrix
distanceMatrix = np.delete(distanceMatrix, [maxValIndeces[0], maxValIndeces[1]], axis = 0)
distanceMatrix = np.delete(distanceMatrix, [maxValIndeces[0], maxValIndeces[1]], axis = 1)
# Add a new column to the matrix header, which represents the combination of the two combined clusters
matrixHeader.append(matrixHeader[maxValIndeces[0]] + matrixHeader[maxValIndeces[1]])
# Remove the two individual clusters that were just combined from the matrix header
matrixHeader.pop(max(maxValIndeces))
matrixHeader.pop(min(maxValIndeces))
clusters = []
for cluster in matrixHeader:
fake_centroid = Centroid(ligands[0].bitstring) #relic of partition clustering implementation
new_cluster = Cluster(fake_centroid)
for ligand in cluster:
new_cluster.addLigand(ligand)
clusters.append(new_cluster)
return clusters
class PartitionClustering():
def __init__(self):
pass
def cluster(self, ligands, distanceMatrix, numClusters, verbose=False):
"""
Clusters the given ligands into numClusters clusters using the K-Means++ partition clustering algorithm. Initializes numClusters
Centroid() objects, and uses these to initialize numClusters Cluster() objects. For each ligand, calculates which cluster's centroid
is closest to the ligand and adds that ligand to that cluster. Once all ligands have been added to a cluster, ensures that no clusters
have no ligands. If a cluster is empty (has no ligands), then removes one ligand from a non-empty cluster and adds this ligand to the
empty cluster. Then calculates a new centroid based on each clusters' new list of ligands, and determines if the centroids have moved from
the last iteration. If they have not moved, the clustering is complete. If they have moved, then the clusters' centroids are updated, and
the process of assigning ligands begins again with the new centroids. This process repeats until either the centroids stop moving, or the
algorithm has run for 100 iterations.
Params:
ligands - a list of Ligand() objects
distanceMatrix - a len(ligands) x len(ligands) 2D array filled with Tanimoto Coefficients corresponding
to each row and column ligand pair
numClusters - an int dictating how many Cluster() objects will be returned
verbose - when true, prints the iteration number of the algorithm as it's running, as well as the number of bits that changed
across all the centroids' .bitstring attributes in that iteration (which is a measure of how much the centroids moved in that iteration)
Returns:
a list of Cluster() objects
"""
centroids = self.initializeCentroids(numClusters, ligands, distanceMatrix)
clusters = self.initializeClusters(centroids) # clusters is a list of cluster objects with centroid attributes
# centroidUpdate is the sum of each cluster's calcCentroidMoved, which is 0 when a centroid doesn't move
prevUpdates = []
centroidUpdate = 1
iterations = 0
while centroidUpdate != 0 and iterations < 100:
centroidUpdate = 0
iterations += 1
# Empty the ligand list in each cluster. They'll be repopulated with the new centroids
for cluster in clusters:
cluster.resetLigands()
# Determine which cluster each ligand should join
for ligand in ligands:
tanCo = ligand.tanimotoCoefficient(clusters[0].centroid) # this works because centroid has .onbits attribute
#print('tanCo of ligand with clusters[0].centroid = %.2f' % tanCo)
i, joinClusterIndex = 0, 0
for cluster in clusters:
if tanCo > ligand.tanimotoCoefficient(cluster.centroid):
tanCo = ligand.tanimotoCoefficient(cluster.centroid)
joinClusterIndex = i
i += 1
clusters[joinClusterIndex].addLigand(ligand)
# If any clusters are empty, add one ligand to the cluster from a cluster with 2 or more ligands
for cluster in clusters:
if len(cluster.ligands) == 0:
for cluster2 in clusters:
if len(cluster2.ligands) > 1:
cluster.addLigand(cluster2.ligands[-1])
cluster2.removeLigand(cluster2.ligands[-1])
# Determine whether the centroids moved
for cluster in clusters:
centroidUpdate += cluster.calcCentroidMoved()
cluster.updateCentroid()
prevUpdates.append(centroidUpdate)
if verbose:
print('Iteration number %d. Number of changed centroid bits = %d' % (iterations, centroidUpdate))
return clusters
def initializeClusters(self, centroids):
"""
For each Centroid() object in centroids, creates a new Cluster() object with the Centroid. Returns a list of these Cluster objects
Params:
centroids - a list of Centroid() objects
Returns:
a list of Cluster() objects
"""
clusters = []
for centroid in centroids:
newCluster = Cluster(centroid)
clusters.append(newCluster)
return clusters
def initializeCentroids(self, numClusters, ligands, distanceMatrix):
"""
Picks numClusters centroids from the ligands list. The probability of each ligand being chosen as a
centroid is inversely proportional to its distance from the closest centroid that has already been
chosen.
Params:
numClusters - an int dictating how many centroids will be returned
ligands - a list of Ligand() objects
distanceMatrix - a len(ligands) x len(ligands) 2D array filled with Tanimoto Coefficients corresponding
to each row and column ligand pair
Returns:
a list of Centroid() objects
"""
indeces = []
# Randomly choose one ligand as a first centroid
i = random.randint(0,len(ligands)-1) # randint is inclusive of second number
indeces.append(i)
# The probability of choosing a second ligand is inversely proportional to the
# second ligand's similarity to the first
weights = distanceMatrix[i]
weights = 1 - weights
# Choose the rest of the centroids with their associated probabilities, updating these
# probabilities after each new centroid is chosen
for c in range(numClusters-1): # -1 because we've already picked one index (/centroid)
i = random.choices(np.arange(len(distanceMatrix)), weights)
newWeights = 1 - distanceMatrix[i]
# update weights with the lowest similarity values for each of the already-chosen centroids
weights = np.minimum(weights, newWeights)[0] #this function returns a one-element list
indeces.append(i[0]) # i is a one-element list
centroids = []
for i in indeces:
centroids.append(Centroid(ligands[i].bitstring))
return centroids
class Centroid():
def __init__(self, bitstring):
self.bitstring = bitstring
self.onbits = self.bitstringToOnbits(bitstring)
def bitstringToOnbits(self, bitstring):
"""
Calculates and returns the densified representation of the bitstring by returning a list of indeces at
which the bitstring had 1's.
Params:
bitstring - a 1024 element list of 1's and 0's
Returns:
a list of indeces at which the bitstring contained 1's
"""
onbits = []
for i in range(len(bitstring)):
if bitstring[i] == 1:
onbits.append(i)
return onbits
class Cluster():
def __init__(self, centroid):
self.centroid = centroid
self.ligands = []
def set_centroid(self, centroid):
self.centroid = centroid
def addLigand(self, ligand):
self.ligands.append(ligand)
def removeLigand(self, ligand):
self.ligands.remove(ligand)
def resetLigands(self):
self.ligands = []
def calcCentroid(self, ligands):
"""
Calculates and returns a bitstring that represents the average bitstring across all the ligands in the ligand list parameter. Any bit
position that is 1 in most of the given ligand bitstrings will have a 1 in the final bitstring, and the same is true for 0s.
Creates a new Centroid object with this bitstring and returns it.
If the ligand list is empty, it returns the Cluster's current centroid object.
Params:
ligands - a list of ligand objects
Returns:
a centroid object. Either a new centroid object with the new average bitstring, or the current centroid attribute of the Cluster object
"""
if len(ligands) > 0:
centroidBitstring = np.zeros(1024, dtype=int)
for ligand in ligands:
centroidBitstring += ligand.bitstring
centroidBitstring = centroidBitstring / len(ligands)
centroidBitstring = np.rint(centroidBitstring)
return Centroid(centroidBitstring)
else:
return self.centroid
def calcCentroidMoved(self):
"""
Returns the number of bits that changed between the bitstrings of the Cluster() object's previous centroid and its new centroid.
Params:
None
Returns:
an int, representing the total number of bits that changed between the Cluster's old and new centroids' bitstrings
"""
j = 0
newBitstring = self.calcCentroid(self.ligands).bitstring
for i in range(len(self.centroid.bitstring)):
if self.centroid.bitstring[i] != newBitstring[i]:
j+=1
return j
def updateCentroid(self):
"""
Sets the Cluster() object's centroid attribute to what is returned from the Cluster() object's calcCentroid method.
Params:
None
Returns:
None
"""
self.centroid = self.calcCentroid(self.ligands)
class Ligand():
def __init__(self, ligandIndex, ligandID, score, smiles, onbits):
self.ligandIndex = ligandIndex
self.ligandID = int(ligandID)
self.score = float(score)
self.smiles = smiles
self.onbits = list(map(int, onbits.split(',')))
self.bitstring = self.onbitsToBitstring(onbits)
def __str__(self):
return "%d" % self.ligandID
def onbitsToBitstring(self, onbits):
"""
Converts a densified string of onbits to an expanded, 1024-bit-long bitstring.
Params:
onbits - a list of onbits for a 1024-bit-long bitstring.
Returns:
A 1024-element numpy array of 1s and 0s.
"""
onbits = onbits.split(',')
bitstring = np.zeros(1024, dtype=int)
for ob in onbits:
bitstring[int(ob)] = 1
return bitstring
def tanimotoCoefficient(self, other):
"""
Calculates the Tanimoto Coefficient between two ligands' list of onbits.
Params:
An object that contains an .onbits attribute, such as a Ligand() or Centroid() object
Returns:
a float
"""
selfOnbits = set(self.onbits)
otherOnbits = set(other.onbits)
numerator = selfOnbits.intersection(otherOnbits)
denomenator = selfOnbits.union(otherOnbits)
return len(numerator) / len(denomenator)
def read_ligand_csv(csv):
"""
Creates a new Ligand() object for each line in the ligand_information.csv file, setting the Ligand() object's attributes
to the appropriate values from the file.
Params:
csv - a csv file containing ligand information in the same format as ligand_information.csv
Returns:
a list of Ligand() objects
"""
ligands = []
i = 0 #gotta be a better way to do this
with open(csv,'r') as f:
for line in f:
if i>0:
ligandID, score, smiles, onbits = line.split(',',3) #set max number of splits = 3
ligands.append(Ligand(i-1, ligandID, score, smiles, onbits.replace('"', '').rstrip())) #remove ""'s and eol char
i+=1
return ligands
def silhouetteCoeff(clusters, distanceMatrix):
"""
Calcualtes and returns the silhouette coefficient for a set of clusters.
Params:
clusters - a list of Cluster() objects
distanceMatrix - a len(ligands) x len(ligands) 2D array filled with Tanimoto Coefficients corresponding
to each row and column ligand pair
Returns:
a float
"""
# the dM has Tanimoto Coeffs. Now change its values to be proportional to distances
distanceMatrix = 1 - distanceMatrix
s = []
a, b = [], []
for c in range(len(clusters)):
for ligand in clusters[c].ligands:
# For each ligand in the current cluster, calculate its 'a' term, its cohesion, to all the other
# ligands in its cluster
da = []
for ligand2 in clusters[c].ligands:
if ligand.ligandIndex != ligand2.ligandIndex:
da.append(distanceMatrix[ligand.ligandIndex][ligand2.ligandIndex])
elif ligand.ligandIndex == ligand2.ligandIndex:
da.append(0)
if len(clusters[c].ligands) == 1:
a.append(0)
else:
a.append((1 / (len(clusters[c].ligands)-1)) * sum(da))
# Calculate the 'b'' term, the separation, between the current ligand and all the ligands in the other clusters
bTemp = []
db = []
for c2 in range(len(clusters)):
if c2 != c:
for ligand2 in clusters[c2].ligands:
if ligand.ligandIndex == ligand2.ligandIndex:
db.append(0)
else:
db.append(distanceMatrix[ligand.ligandIndex][ligand2.ligandIndex])
bTemp.append((1 / len(clusters[c2].ligands)) * sum(db))
b.append(min(bTemp))
# calc s for each ligand
for i in range(len(a)):
if max(a[i], b[i]) == 0:
s.append(0)
else:
s.append((b[i] - a[i]) / max(a[i], b[i]))
sc = sum(s) / len(s)
return sc
def makeDistanceMatrix(ligands, verbose = False):
"""
Fills a len(ligands) x len(ligands) matrix with the Tanimoto Coefficients of each row and column pair of ligands.
A TC of 1 means the ligands have identical fingerprints.
Params:
ligands - a list of Ligand() objects
Returns:
a 2D numpy array
"""
matrix = []
for i in range(len(ligands)):
if verbose:
print('filling row %d' % i)
row = []
for j in range(len(ligands)):
if i == j:
row.append(1) # the Tanimoto Coefficient between a ligand and itself is 1
else:
row.append(ligands[i].tanimotoCoefficient(ligands[j]))
matrix.append(row)
return np.array(matrix)
def jaccardIndex(clusters1, clusters2):
"""
Calculates and returns the Jaccard Index of two sets of clusters.
clusters1 must have a subset of or the same set of ligands as clusters2.
Params:
hcclusters - a list of Cluster() objects. A subset or the same set of ligands as in pcclusters.
pcclusters - a list of Cluster() objects
Returns:
A float
"""
allLigandIDs = []
clusters1Pairs = []
clusters2Pairs = []
# Record all the ligands and ligand pairs in clusters1, which may have a subset of the ligands in clusters2
for cluster in clusters1:
for l1 in range(len(cluster.ligands)):
allLigandIDs.append(cluster.ligands[l1].ligandID)
for l2 in range(len(cluster.ligands)-l1-1):
clusters1Pairs.append((cluster.ligands[l1].ligandID, cluster.ligands[l1+l2+1].ligandID))
# Record all the ligand pairs in clusters2 in which both ligands are also in clusters1
for cluster in clusters2:
for l1 in range(len(cluster.ligands)):
for l2 in range(len(cluster.ligands)-l1-1):
if cluster.ligands[l1].ligandID in allLigandIDs and cluster.ligands[l2].ligandID in allLigandIDs:
clusters2Pairs.append((cluster.ligands[l1].ligandID, cluster.ligands[l1+l2+1].ligandID))
clusters1Set = set(clusters1Pairs)
clusters2Set = set(clusters2Pairs)
numerator = clusters1Set.intersection(clusters2Set)
denomenator = clusters1Set.union(clusters2Set)
return len(numerator) / len(denomenator)
#######
## UNIT TESTS
#######
def test_csv_io():
ligands = read_ligand_csv('ligand_information.csv')
assert ligands[0].ligandID == 0
assert ligands[0].score == -1.3
print("'CSV File I/O' Test Passed")
def read_test_ligands(csv):
"""
Returns a list of 10 ligands. The first 5 are ligand 1 from ligand_information.csv, and the next 5 are ligand 2 from the same file.
Params:
csv - a csv file containing ligand information in the same format as ligand_information.csv
Returns:
a list of Ligand() objects
"""
ligands = []
i = 0 #gotta be a better way to do this
index = 0
with open(csv,'r') as f:
for line in f:
if i>0:
for j in range(5):
ligandID, score, smiles, onbits = line.split(',',3) #set max number of splits = 3
ligands.append(Ligand(index, ligandID, score, smiles, onbits.replace('"', '').rstrip())) #remove ""'s and eol char
index += 1
i+=1
if i == 3:
break
return ligands
def test_silhouette_coeff():
ligands = read_test_ligands('ligand_information.csv')
distanceMatrix = makeDistanceMatrix(ligands)
cluster1 = Cluster(Centroid(ligands[0].bitstring))
cluster2 = Cluster(Centroid(ligands[5].bitstring))
for i in range(5):
cluster1.addLigand(ligands[i])
for i in range(5):
cluster2.addLigand(ligands[i+5])
sc = silhouetteCoeff([cluster1, cluster2], distanceMatrix)
assert sc == 1, 'Silhouette Coefficient Test Failed'
print('Silhouette Coefficient Test Passed')
def test_partition_clustering():
ligands = read_test_ligands('ligand_information.csv')
distanceMatrix = makeDistanceMatrix(ligands)
pc = PartitionClustering()
pcclusters = pc.cluster(ligands, distanceMatrix, 2)
ligandIDs = []
for cluster in pcclusters:
for ligand in cluster.ligands:
ligandIDs.append(ligand.ligandID)
assert ligandIDs == [0,0,0,0,0,1,1,1,1,1] or ligandIDs == [1,1,1,1,1,0,0,0,0,0], 'Partition Clustering Test Failed'
print('Partition Clustering Test Passed')
def test_hierarchical_clustering():
ligands = read_test_ligands('ligand_information.csv')
distanceMatrix = makeDistanceMatrix(ligands)
hc = HierarchicalClustering()
hcclusters = hc.cluster(ligands, distanceMatrix, 2)
ligandIDs = []
for cluster in hcclusters:
for ligand in cluster.ligands:
ligandIDs.append(ligand.ligandID)
assert ligandIDs == [0,0,0,0,0,1,1,1,1,1], 'Hierarchical Clustering Test Failed :('
print('Hierarchical Clustering Test Passed')
def test_make_distance_matrix():
ligands = read_test_ligands('ligand_information.csv')
distanceMatrix = makeDistanceMatrix(ligands)
firstRow = []
for e in distanceMatrix[0]:
firstRow.append(e)
assert firstRow == [1,1,1,1,1,0,0,0,0,0], 'Make Distance Matrix Test Failed'
print('Make Distance Matrix Test Passed')
def test_jaccard_index():
ligands = read_test_ligands('ligand_information.csv')
distanceMatrix = makeDistanceMatrix(ligands)
cluster1 = Cluster(Centroid(ligands[0].bitstring))
cluster2 = Cluster(Centroid(ligands[5].bitstring))
cluster3 = Cluster(Centroid(ligands[0].bitstring))
cluster4 = Cluster(Centroid(ligands[5].bitstring))
for i in range(5):
cluster1.addLigand(ligands[i])
cluster3.addLigand(ligands[i])
for i in range(5):
cluster2.addLigand(ligands[i+5])
cluster4.addLigand(ligands[i+5])
ji1 = jaccardIndex([cluster1, cluster2], [cluster3, cluster4])
ji2 = jaccardIndex([cluster1, cluster3], [cluster2, cluster4])
assert ji1 == 1 and ji2 == 0, 'Jaccard Index Test Failed'
print('Jaccard Index Test Passed')
| [
"[email protected]"
] | |
06aa3958be405297e8f511d41e28a0281dd37cef | 4d02ff32c0d2da6d44d73a6f87a70b0533889be9 | /Detective.py | b0701231b2a44a6127c1981dba4bf1c9701ac923 | [] | no_license | Schloz7/Detective-game | 20c685703e6ea9e899ea60bad6be6f019fa236a6 | 48f55c1efff1665254646da8662a557bf472fa7c | refs/heads/main | 2023-08-21T12:58:20.025108 | 2021-09-22T17:20:03 | 2021-09-22T17:20:03 | 407,574,626 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,387 | py | import time
life = 3
new_life = 3
def Answers_script_2(life):
global new_life
if new_life >= 2:
new_life -= 1
print(new_life)
print("Try again")
second_choice()
else:
print("You lost and gotta captured by Scotland Yard , now you're going to live in the UK ,in a prison cell")
quit()
def Answers_script_3(life):
global new_life
if new_life >= 2:
new_life -= 1
print(new_life)
print("Try again")
three_on_three()
else:
print("You lost and gotta captured by Scotland Yard , now you're going to live in the UK ,in a prison cell")
quit()
def Answers_script_4(life):
global new_life
if new_life >= 2:
new_life -= 1
print(new_life)
print("Try again")
three_on_four()
else:
print("You lost and gotta captured by Scotland Yard , now you're going to live in the UK ,in a prison cell")
quit()
def Answers_script_5(life):
global new_life
if new_life >= 2:
new_life -= 1
print(new_life)
print("Try again")
animate_the()
else:
print("You lost and gotta captured by Scotland Yard , now you're going to live in the UK ,in a prison cell")
quit()
def Answers_script_6(life):
global new_life
if new_life >= 2:
new_life -= 1
print(new_life)
print("Try again")
dissecated()
else:
print("You lost and gotta captured by Scotland Yard , now you're going to live in the UK ,in a prison cell")
quit()
def Answers_script_7(life):
global new_life
if new_life >= 2:
new_life -= 1
print(new_life)
print("Try again")
electro()
else:
print("You lost and gotta captured by Scotland Yard , now you're going to live in the UK ,in a prison cell")
quit()
def Answers_script_8(life):
global new_life
if new_life >= 2:
new_life -= 1
print(new_life)
print("Try again")
motherland()
else:
print("You lost and gotta captured by Scotland Yard , now you're going to live in the UK ,in a prison cell")
quit()
def Answers_script_9(life):
global new_life
if new_life >= 2:
new_life -= 1
print(new_life)
print("Try again")
first_place()
else:
print("You lost and gotta captured by Scotland Yard , now you're going to live in the UK ,in a prison cell")
quit()
def Answers_script_10(life):
global new_life
if new_life >= 2:
new_life -= 1
print(new_life)
print("Try again")
animate_()
else:
print("You lost and gotta captured by Scotland Yard , now you're going to live in the UK ,in a prison cell")
quit()
def Answers_script(life):
global new_life
if new_life >= 2:
new_life -= 1
print(new_life)
print("Try again")
favorite_first()
else:
print("You lost and gotta captured by Scotland Yard , now you're going to live in the UK ,in a prison cell")
time.sleep(15)
quit()
print(" Project \"CodeCademy\" Python Detective-Game")
time.sleep(5)
first_name = input(" Your name here KGB fellow:")
time.sleep(5)
print("You gonna live a day in a life of a KGB spy in the time of today")
time.sleep(5)
print("You're going to have 3 chances , can't be recovered like mistakes in real life")
time.sleep(5)
answer_1 = "RUSSIA"
def favorite_first():
favorite_first = input("Say something to me right now , where was KGB created in first place ? \n")
favorite_first = favorite_first.upper()
if favorite_first == answer_1:
print("That's the country where you're living right now")
else:
Answers_script(life)
favorite_first()
time.sleep(5)
print("You're in your office as a new member of the global order of secret services and")
time.sleep(5)
print("living right now the most moment of supremacy ever in your home country")
time.sleep(5)
print("Your office is in moscow headquarters, filled with a beautiful black table and a brand")
time.sleep(5)
print("new chair in white, the walls are painted in RED and with many ads of global enemies of the new order")
time.sleep(5)
print("You just gotta one of them and is a picture of a white male , famous for his large amount of crimes in")
time.sleep(5)
print("the old soviet union, MURDERER , ROBBER and nevertheless enemy of your country")
time.sleep(5)
answer_2 = "ARGENTINE"
def second_choice():
second_choice = input("Tell me , in which country has a flag with a sun in the middle and blue over and down : \n 1.Russia \n 2.Argentine \n 3.USA \n 4.France \n")
second_choice = second_choice.upper()
if second_choice == answer_2:
print("You're starting to understand the underworld more and know that this people are famous for their crimes")
time.sleep(5)
print("all over the world of today , you just leave the room without talking and people are not talking to you")
time.sleep(5)
print("You leave to the street and see the beautiful city with russian girls everywhere and you kind look handsome")
time.sleep(5)
print("you leave to the airport and meet in your first class flight to Argentine a beautiful woman that sitting by")
time.sleep(5)
print("your side all the flight , tells you that she has a place in front of the university UBA in argentine ")
else:
Answers_script_2(life)
second_choice()
time.sleep(5)
print("Your flight arrived in Buenos Aires ezeiza airport you kinda happy for the invite to stay with the woman but")
time.sleep(5)
print("you end refusing , you just used her to make you patch inside the country")
time.sleep(5)
print("You know two things about the suspect his age 50 and that he frequent a place called \"Puerto Madero\" , the place looks")
time.sleep(5)
print("cool with girls everywhere you make your way in the night ")
time.sleep(5)
print("The suspect in on a place with 2 \"capangas\" you saw him laughing about a stupid joke involving the police of the country")
time.sleep(5)
answer_3 = "INVESTIGATE"
def three_on_three():
three_on_three = input("BOOM! what happened ? , what??? there was a bomb and exploded , you're BLOODY, with the blood of people around you , DECIDE NOW : \n 1.Investigate \n 2.run like a chicken \n")
three_on_three = three_on_three.upper()
if three_on_three == answer_3:
print("Luck! , you can see a gun with the security guard and he is lying dead on the floor , you got his weapon a Magnum.357")
time.sleep(8)
print("The suspect obviously dissapeared , you gotta no time to look confuse and shot somebody that was stopping you to run to")
time.sleep(5)
print("get your enemy that is nothing more that minutes in front of you,you just left the place")
else:
Answers_script_3(life)
three_on_three()
time.sleep(5)
answer_4 = "LETHAL KICK"
def three_on_four():
print("You're on Buenos Aires streets and running , you can see the desorientation of the police coming to the place")
time.sleep(5)
print("where the bomb exploded , you're running to get your enemy,you even can see him alone a fat old pal running in front of the")
time.sleep(5)
print("justice parlament,he gotta a Taxi pointing a gun to the driver and driving out there's a car with one person on it , but is a")
time.sleep(5)
three_on_four = input("cop what do you do ? \n 1.Lethal Kick \n 2.think about it\n")
three_on_four = three_on_four.upper()
if three_on_four == answer_4:
time.sleep(5)
print("You just got it , you're driving at a high speed following the suspect all over buenos aires ,speedy is the key , you are in a")
time.sleep(5)
print("better car and is almost getting him.")
else:
Answers_script_4(life)
three_on_four()
time.sleep(5)
answer_5 = "ANIMATE HIM"
def animate_the():
print("Crash!!!! you got him he is leaving with blood all over his head , you are not good neither and you got confused")
time.sleep(5)
print("you leave the car to get him you can hear the police coming towards you with their cars but they are really distant, you gotta")
time.sleep(5)
print("in a fighting , after a while you could lead the situation and shot him on his chest , he is trying to run you know you must interrogate him")
time.sleep(5)
animate_the = input("but he fell in the middle of the street. He is without breathing you have few minutes before police, what do you do? \n 1.Animate him \n 2.kill him \n 3.drive out\n")
animate_the = animate_the.upper()
if animate_the == answer_5:
print("That's correct . You need him a bit longer , he is almost diying you know that but you are good after all KGB , he is breathing again")
else:
Answers_script_5(life)
animate_the()
time.sleep(5)
answer_6 = "NEVER"
def animate_():
print("He's crying and don't have to much time , you ask him informations about who he serves , he tells you finally")
time.sleep(5)
animate_ = input("and asks you to help him and don't let him to the police \n 1.Never \n 2.Of course \n")
animate_ = animate_.upper()
if animate_ == answer_6:
print("You left running to get a way out and you see the university in front of you , not a normal you see the medical school , UBA")
else:
Answers_script_10(life)
animate_()
time.sleep(5)
answer_7 = "DISSECATION"
def dissecated():
print("You enter the university calling attention but you know how deal with and you enter a surgery room")
time.sleep(5)
print("no one's there just you , you wear a doctor's jacket and follow to the next door,you see students in the class")
time.sleep(5)
print("studying you enter the class and the professor asks you.Why you're late ? you tell him you're in the wrong class")
time.sleep(5)
dissecated = input("but he insists and say you to stay , what do you do?\n 1.Dissecation \n 2.Go out \n ")
dissecated = dissecated.upper()
if dissecated == answer_7:
time.sleep(7)
print("Excellent , the police is out running and you see the lights on the class windows . You watch the class")
else:
Answers_script_6(life)
dissecated()
time.sleep(5)
answer_8 = "ELECTROCUTE HIM"
def electro():
print("You finish the class , sweating , you're walking through the hall and see that police is on the way out the university")
time.sleep(5)
print("you go up stairs you wanna use the windows to leave 1 , 2 , 3 floor no one there, you look at a room with many wires on the floor")
time.sleep(5)
print("you're going to the window and got surprised by a lieutenant of the police he asks you your ID,you are with a wire on your hand")
time.sleep(5)
electro = input("sparkling with high voltage \n 1.Give fake ID \n 2.Run \n 3.Electrocute him \n")
electro = electro.upper()
if electro == answer_8:
time.sleep(5)
print("You did it , he died screaming , the way is clear now , you use the window and left going to the airport, but because you don't wanna")
time.sleep(5)
print("call atention you go to that woman place that you meet coming to Argentine , she accept you and you have to gotta a ride untill the")
time.sleep(5)
print("airport after talking and laughing she drove you till there")
else:
Answers_script_7(life)
electro()
time.sleep(5)
answer_9 = "RUSSIA"
def motherland():
motherland = input("She asks you about the ticket she have to buy and you say ?\n 1.Russia \n 2.Netherlands \n 3.France \n")
time.sleep(5)
motherland = motherland.upper()
if motherland == answer_9:
print("Russia mother land!\n The flight departs after a goodbye and you're in the russian airport of Saint Petersburg , It's almost 23:00")
else:
Answers_script_8(life)
time.sleep(5)
motherland()
time.sleep(5)
answer_10 = "7"
def first_place():
print("The best for the last , you got promotion and you're lieutenant , you commander wants you to answer this last question")
time.sleep(5)
first_place = input("what's the position of Russia in your life? \n 1.7 \n 2.4 \n 3.5 \n 4.6 \n 5.2 \n 6.3 \n 7.1\n")
first_place = first_place.upper()
if first_place == answer_10:
print("поздравление")
time.sleep(5)
else:
Answers_script_9(life)
first_place()
| [
"[email protected]"
] | |
b973959ac3d816fc76595d043dd3add61ae4eba6 | 0bbad3169cef226a318c2f759e10ffae82af5a04 | /cogs/Channel.py | 7eb62340fc248bc0e197eb87d0cb8b745ec872ae | [
"MIT"
] | permissive | ManGoYTB/botmod2 | a8f03fca5fc1d1f3ab12f031bd9597e61eb8e4a5 | 391ac3d8e8e197edce754dbd553acd1d056c5ca8 | refs/heads/master | 2021-10-02T21:16:58.070821 | 2018-11-30T22:39:30 | 2018-11-30T22:39:30 | 110,163,925 | 0 | 2 | MIT | 2017-12-16T01:23:12 | 2017-11-09T20:40:59 | Python | UTF-8 | Python | false | false | 6,323 | py | import discord
from discord.ext import commands
class Channel:
def __init__(self, bot):
self.bot = bot
print('Addon "{}" loaded'.format(self.__class__.__name__))
@commands.command(pass_context=True)
async def ytmango(self,ctx):
"""YouTube ManGoYT"""
embed = discord.Embed(
title="**YouTube ManGoYT**<:ManGoYT:299207412907245569> <:youtube:329991214327660544><:croown:336916788703133707> :",
color=0x2626f0,
description="<:PI:309724535148511242>**https://www.youtube.com/c/ManGoYTB**<:pp:332133471877070848> ```\nLeader MaGeClan``` ")
embed.set_thumbnail(url="https://yt3.ggpht.com/-CRZ_NLhRUEs/AAAAAAAAAAI/AAAAAAAAAAA/sDANm7rogzE/s100-c-k-no-mo-rj-c0xffffff/photo.jpg")
embed.set_author(name="ManGoYT", url="https://www.youtube.com/c/ManGoYTB", icon_url="https://yt3.ggpht.com/-CRZ_NLhRUEs/AAAAAAAAAAI/AAAAAAAAAAA/sDANm7rogzE/s100-c-k-no-mo-rj-c0xffffff/photo.jpg")
embed.add_field(name="<:FS:309728585709649920>", value="<:croown:336916788703133707>", inline=True)
embed.set_footer(text="ManGoYT", icon_url="https://cdn.discordapp.com/emojis/329991214327660544.png")
embed.add_field(name="<:STAFF:329991353700319233>", value="<:omgtroll:299208059685961728>", inline=True)
await self.bot.send_message(discord.Object(ctx.message.channel.id), embed=embed)
@commands.command(pass_context=True)
async def ytmage(self,ctx):
"""YouTube MaGe Clan"""
embed = discord.Embed(
title="**YouTube MaGe Clan**<:youtube:329991214327660544> <:youtube:329991214327660544>:",
color=0xf0ff35,
description="<:PI:309724535148511242>**https://www.youtube.com/channel/UCnFHsZfaCwgBzbmt89Nmj3A**<:pp:332133471877070848>")
embed.set_thumbnail(url="https://yt3.ggpht.com/-palNhXtV33c/AAAAAAAAAAI/AAAAAAAAAAA/DgJik9EDvqA/s100-c-k-no-mo-rj-c0xffffff/photo.jpg")
embed.set_footer(text="MaGeClan", icon_url="https://cdn.discordapp.com/emojis/329991214327660544.png")
embed.set_author(name="MaGeClan", url="https://www.youtube.com/channel/UCnFHsZfaCwgBzbmt89Nmj3A", icon_url="https://yt3.ggpht.com/-palNhXtV33c/AAAAAAAAAAI/AAAAAAAAAAA/DgJik9EDvqA/s100-c-k-no-mo-rj-c0xffffff/photo.jpg")
await self.bot.send_message(discord.Object(ctx.message.channel.id), embed=embed)
@commands.command(pass_context=True)
async def ytabdel(self,ctx):
"""YouTube Abdel64"""
embed = discord.Embed(
title="**YouTube Abdel64**<:youtube:329991214327660544> <:youtube:329991214327660544>:",
color=0x9e30c2,
description="<:PI:309724535148511242>**https://www.youtube.com/channel/UCdBgaPUcYkOEooVlV5rxrXw**<:pp:332133471877070848>")
embed.set_thumbnail(url="https://yt3.ggpht.com/-8JmQnUV5SU0/AAAAAAAAAAI/AAAAAAAAAAA/iXdHsJl4X3M/s100-c-k-no-mo-rj-c0xffffff/photo.jpg")
embed.set_footer(text="Abdel64", icon_url="https://cdn.discordapp.com/emojis/329991214327660544.png")
embed.set_author(name="Abdel64", url="https://www.youtube.com/channel/UCdBgaPUcYkOEooVlV5rxrXw", icon_url="https://yt3.ggpht.com/-8JmQnUV5SU0/AAAAAAAAAAI/AAAAAAAAAAA/iXdHsJl4X3M/s100-c-k-no-mo-rj-c0xffffff/photo.jpg")
await self.bot.send_message(discord.Object(ctx.message.channel.id), embed=embed)
@commands.command(pass_context=True)
async def ytray(self,ctx):
"""YouTube Ray"""
embed = discord.Embed(
title="**YouTube Ray**<:youtube:329991214327660544> <:youtube:329991214327660544>:",
color=0x930003,
description="<:PI:309724535148511242>**https://www.youtube.com/channel/UCyGpuvdM27Lfi4lzSgdY9eA**<:pp:332133471877070848>")
embed.set_footer(text="Ray", icon_url="https://cdn.discordapp.com/emojis/329991214327660544.png")
embed.set_thumbnail(url="https://yt3.ggpht.com/-UqJIQBQ6o5Q/AAAAAAAAAAI/AAAAAAAAAAA/cnNVA1EX5L4/s100-c-k-no-mo-rj-c0xffffff/photo.jpg")
embed.set_author(name="Ray", url="https://www.youtube.com/channel/UCyGpuvdM27Lfi4lzSgdY9eA", icon_url="https://yt3.ggpht.com/-UqJIQBQ6o5Q/AAAAAAAAAAI/AAAAAAAAAAA/cnNVA1EX5L4/s100-c-k-no-mo-rj-c0xffffff/photo.jpg")
await self.bot.send_message(discord.Object(ctx.message.channel.id), embed=embed)
@commands.command(pass_context=True)
async def ytkifkif(self,ctx):
"""YouTube Kifkif"""
embed = discord.Embed(
title="**YouTube Kifkif**<:youtube:329991214327660544> <:youtube:329991214327660544>:",
color=0xc56567,
description="<:PI:309724535148511242>**https://www.youtube.com/channel/UCzVdft7CxIu-SbZxR716UcQ**<:pp:332133471877070848>")
embed.set_footer(text="Kifkif59", icon_url="https://cdn.discordapp.com/emojis/329991214327660544.png")
embed.set_thumbnail(url="https://yt3.ggpht.com/-l5VUiWvdje8/AAAAAAAAAAI/AAAAAAAAAAA/SX00dn2m8SE/s100-c-k-no-mo-rj-c0xffffff/photo.jpg")
embed.set_author(name="Kifkif", url="https://www.youtube.com/channel/UCzVdft7CxIu-SbZxR716UcQ", icon_url="https://yt3.ggpht.com/-l5VUiWvdje8/AAAAAAAAAAI/AAAAAAAAAAA/SX00dn2m8SE/s100-c-k-no-mo-rj-c0xffffff/photo.jpg")
await self.bot.send_message(discord.Object(ctx.message.channel.id), embed=embed)
@commands.command(pass_context=True)
async def ytreaper(self,ctx):
"""YouTube The Reaper"""
embed = discord.Embed(
title="**YouTube The Reaper**<:youtube:329991214327660544> <:youtube:329991214327660544>:",
color=0x2626f0,
description="<:PI:309724535148511242>**https://www.youtube.com/channel/UCiJMqxVi_vSVbIcuGwVQybg**<:pp:332133471877070848>")
embed.set_footer(text="The Reaper", icon_url="https://cdn.discordapp.com/emojis/329991214327660544.png")
embed.set_thumbnail(url="https://yt3.ggpht.com/-MpWlOugIa0U/AAAAAAAAAAI/AAAAAAAAAAA/FsRxyfe8Fxo/s100-c-k-no-mo-rj-c0xffffff/photo.jpg")
embed.set_author(name="The Reaper", url="https://www.youtube.com/channel/UCiJMqxVi_vSVbIcuGwVQybg", icon_url="https://yt3.ggpht.com/-MpWlOugIa0U/AAAAAAAAAAI/AAAAAAAAAAA/FsRxyfe8Fxo/s100-c-k-no-mo-rj-c0xffffff/photo.jpg")
await self.bot.send_message(discord.Object(ctx.message.channel.id), embed=embed)
def setup(bot):
bot.add_cog(Channel(bot)) | [
"[email protected]"
] | |
3a9990f22d6b88566aaf3f7f72106e57ed92f045 | da83771f002d731600af9ab55d61e04822fbb551 | /src/app.py | ca99aa8361821362921b02f66ab9156f91d747b3 | [] | no_license | katherineranney/paws-data-pipeline | b457b80001a5a1d30ca5a6fc1f55c44a1e3d9ca7 | 474cb8ac7b0dbd2661965031f63ef750ed4034e3 | refs/heads/master | 2022-09-15T18:23:49.046991 | 2020-05-20T02:53:22 | 2020-05-20T02:53:22 | 268,402,540 | 1 | 0 | null | 2020-06-01T02:04:48 | 2020-06-01T02:04:47 | null | UTF-8 | Python | false | false | 534 | py | import os
from flask import Flask
app = Flask(__name__)
def create_app():
app.secret_key = '1u9L#*&I3Ntc'
app.config['MAX_CONTENT_LENGTH'] = 500 * 1024 * 1024 # 500 Megs
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
from server.api import admin_api
from server.api import common_api
app.register_blueprint(admin_api)
app.register_blueprint(common_api)
if __name__ == "__main__":
FLASK_PORT = os.getenv('FLASK_PORT', None)
create_app()
app.run(host='0.0.0.0', debug=True, port=FLASK_PORT)
| [
"[email protected]"
] | |
30c2059220345e5963dc1f73ad89ab2ad852d1cc | 75ea066045ca46d2ed8a694b3392ddb2d02410c7 | /Solutii/solutie_lab3/naive_bayes.py | 099a40f7065f7dbc7b76da9aae57068d474e68c5 | [] | no_license | AnaCiaciru/ML | 62e94c19d7fd5c646dd4ceb70556a7b78835203d | 54f47877e4fe73d0f26b2739a1595ca0bd3b9244 | refs/heads/master | 2022-06-04T12:10:42.354782 | 2020-05-02T20:31:16 | 2020-05-02T20:31:16 | 260,765,998 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,323 | py | import pdb
import numpy as np
import math
import pdb
from sklearn.naive_bayes import MultinomialNB
import matplotlib.pyplot as plt
# load data
train_images = np.loadtxt('data/train_images.txt')
train_labels = np.loadtxt('data/train_labels.txt', 'int')
test_images = np.loadtxt('data/test_images.txt')
test_labels = np.loadtxt('data/test_labels.txt', 'int')
print(train_images)
image = train_images[0, :] # prima imagine
image = np.reshape(image, (28, 28))
plt.imshow(image.astype(np.uint8), cmap='gray')
plt.show()
### ---- 2 ----
def values_to_bins(x, bins):
x = np.digitize(x, bins)
return x - 1
"""
num_bins = 5
bins = np.linspace(0, 255, num=num_bins)
x_train = values_to_bins(train_images, bins)
x_test = values_to_bins(test_images, bins)
print(x_train)
### ---- 3 ----
clf = MultinomialNB()
clf.fit(x_train, train_labels)
print('accuracy =', clf.score(x_test, test_labels))
### ---- 4 ----
for num_bins in range(3, 12, 2):
bins = np.linspace(0, 255, num=num_bins)
x_train = values_to_bins(train_images, bins)
x_test = values_to_bins(test_images, bins)
clf = MultinomialNB()
clf.fit(x_train, train_labels)
print('accuracy for num_bins=%d is %f' % (num_bins, clf.score(x_test, test_labels)))
"""
### ---- 5 ----
num_bins = 5
bins = np.linspace(0, 255, num=num_bins)
x_train = values_to_bins(train_images, bins)
x_test = values_to_bins(test_images, bins)
clf = MultinomialNB()
clf.fit(x_train, train_labels)
predicted_labels = clf.predict(x_test)
misclasified_indices = np.where(predicted_labels != test_labels)[0]
"""
for i in range(20):
image = train_images[misclasified_indices[i], :] # prima imagine
image = np.reshape(image, (28, 28))
plt.imshow(image.astype(np.uint8), cmap='gray')
plt.title('Aceasta imagine a fost clasificata ca %d.' % predicted_labels[misclasified_indices[i]])
plt.show()
"""
### ---- 6 ----
def confusion_matrix(y_true, y_pred):
num_classes = max(y_true.max(), y_pred.max()) + 1
conf_matrix = np.zeros((num_classes, num_classes))
for i in range(len(y_true)):
conf_matrix[int(y_true[i]), int(y_pred[i])] += 1
return conf_matrix
print(confusion_matrix(test_labels, predicted_labels))
print(test_labels)
print(predicted_labels) | [
"[email protected]"
] | |
032b1060a03adb30aa5683cf24b8a2739731d6a0 | e59fe240f0359aa32c59b5e9f581db0bfdb315b8 | /galaxy-dist/eggs/bx_python-0.7.1_7b95ff194725-py2.7-linux-x86_64-ucs2.egg/EGG-INFO/scripts/wiggle_to_array_tree.py | a35dd2e8ee1fe122a3ce2f21581037c8b54df52c | [
"CC-BY-2.5",
"AFL-2.1",
"AFL-3.0",
"CC-BY-3.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | subway/Galaxy-Distribution | dc269a0258471597d483687a0f1dd9e10bd47448 | d16d6f9b6a8b7f41a218c06539863c8ce4d5a73c | refs/heads/master | 2021-06-30T06:26:55.237251 | 2015-07-04T23:55:51 | 2015-07-04T23:55:51 | 15,899,275 | 1 | 2 | null | 2020-10-07T06:17:26 | 2014-01-14T10:47:28 | Groff | UTF-8 | Python | false | false | 845 | py | #!/afs/bx.psu.edu/project/pythons/linux-x86_64-ucs2/bin/python2.7
"""
Read data in UCSC wiggle format and write it to an "array tree" file.
usage: %prog array_length output.array_tree < input.wig
"""
from __future__ import division
import sys
from bx.arrays.array_tree import *
from bx.arrays.wiggle import WiggleReader
def main():
sizes_fname = sys.argv[1]
out_fname = sys.argv[2]
sizes = {}
for line in open( sizes_fname ):
fields = line.split()
sizes[ fields[0] ] = int( fields[1] )
# Fill array from wiggle
d = array_tree_dict_from_reader( WiggleReader( sys.stdin ), sizes )
for value in d.itervalues():
value.root.build_summary()
f = open( out_fname, "w" )
FileArrayTreeDict.dict_to_file( d, f )
f.close()
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
3fb00fc4722fcbe2be97918ae390255eacb8357a | 797e85fd53986dc1bb6473a190b5aea08d482dce | /src/manage.py | 589960de0f7f44614a0ce9d6ff27312fd8ad19bb | [] | no_license | Rybak-a87/practice_django-music_shop | 05798ac70b28f63d0e26bc048807b7e3c9e3aae4 | 647227be457113ebe60aac7b194a385fe6c55847 | refs/heads/master | 2023-07-16T21:25:20.219705 | 2021-08-29T16:05:06 | 2021-08-29T16:05:06 | 400,799,338 | 0 | 0 | null | 2021-08-29T16:05:06 | 2021-08-28T13:27:31 | Python | UTF-8 | Python | false | false | 670 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'musicshop.settings.base')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
4546a7d4eac20de1f6ebd31249342795bcd6c383 | 24811aa22d65aefa84b8b9dfbe2a08f04dd794cd | /scrapy爬取伯乐在线9_完整版.py | b6bf71fa880414db138925981fba727dfcd6fe73 | [] | no_license | MrCat9/Python_Scrapy-Redis_elasticsearch_django | 00af0993aef378ac7817ebb9033f60a8a8b65473 | 5a40d59b8484c75753f54009ca142d1eaf2b694b | refs/heads/master | 2020-03-10T12:00:05.830659 | 2020-02-16T09:25:12 | 2020-02-16T09:25:12 | 129,367,602 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 25,938 | py | # main.py
# -*- coding: utf-8 -*-
from scrapy.cmdline import execute #调用这个函数可以执行scrapy的脚本
# import sys
# sys.path.append('F:\eclipse\···\···\test_jobbole') #设置工程的目录 #复制工程test_jobbole的路径
import sys
import os
# os.path.abspath(__file__) #获取当前文件的路径
# os.path.dirname(os.path.abspath(__file__)) #获取当前文件的文件夹的路径
print(os.path.abspath(__file__)) #F:\eclipse\···\···\test_jobbole\main.py
print(os.path.dirname(os.path.abspath(__file__))) #F:\eclipse\···\···\test_jobbole
sys.path.append(os.path.dirname(os.path.abspath(__file__))) #设置工程的目录
execute(['scrapy', 'crawl', 'jobbole_spider']) #调用execute函数,执行scrapy命令
# common.py
# -*- coding: utf-8 -*-
import hashlib
def get_md5(url): # MD5摘要生成
if isinstance(url, str): #python中str == Unicode #判断是不是str,其实是判断是不是Unicode,python3中默认是Unicode编码
url = url.encode(encoding='utf_8') #如果是Unicode,则转换成utf-8,哈希只认utf-8
m = hashlib.md5()
m.update(url)
return m.hexdigest()
if __name__ == "__main__":
print(get_md5("http://jobbole.com".encode(encoding='utf_8'))) #注意:Unicode-objects must be encoded before hashing
# 0efdf49af511fd88681529ef8c2e5fbf
# jobbole_spider.py
# -*- coding: utf-8 -*-
import scrapy
import re
import datetime #为了将文章的创建时间写入数据库,要把str类型的create_time转换为date类型
from scrapy.http import Request #提取出url后,将url交给scrapy 下载 #from scrapy.http import Request
from urllib import parse #如果是py2 那就是import urlparse
from items import JobboleArticleItem, ArticleItemLoader #调用自定义的ItemLoader -->ArticleItemLoader
from utils.common import get_md5 #对url做MD5
from scrapy.loader import ItemLoader #用itemloader便于维护
class JobboleSpiderSpider(scrapy.Spider):
name = 'jobbole_spider'
allowed_domains = ['blog.jobbple.com']
start_urls = ['http://blog.jobbole.com/all-posts/'] #在这个list中我们可以放入需要爬取的url
def parse(self, response): #每一个url都会进入到这个函数
'''
1. 获取文章列表页中的文章url并交给scrapy下载后并进行解析
2. 获取下一页的url并交给scrapy进行下载, 下载完成后交给parse
'''
#解析列表页中的所有文章url并交给scrapy下载后并进行解析
post_nodes = response.css('#archive .floated-thumb .post-thumb a')
for post_node in post_nodes:
#有时候取到的url不是一完整的域名,需要补全
#response.url + post_url
image_url = post_node.css('img::attr(src)').extract_first("")
post_url = post_node.css('::attr(href)').extract_first("")
#下面用urljoin来补全
yield Request(url = parse.urljoin(response.url, post_url), meta = {'front_image_url':image_url}, callback = self.parse_detail)
#用 yield 就可以把Request 交给scrapy下载
#无法进入parse_detail -->进入Request, 将dont_filter=True设为True
#Request中meta参数的作用是传递信息给下一个函数
#print(post_url)
#提取下一页并交给scrapy进行下载
next_url = response.css(".next.page-numbers::attr(href)").extract_first("")
#.next 与 .page-numbers间没有空格,代表匹配同时有(.next 和.page-numbers)的class
if next_url:
yield Request(url = parse.urljoin(response.url, next_url), callback = self.parse)
def parse_detail(self, response):
#提取文章具体字段(xpath)
# title = response.xpath('//*[@id="post-113789"]/div[1]/h1/text()').extract()[0]
#
# create_date = response.xpath('//*[@id="post-113789"]/div[2]/p/text()[1]').extract()[0].strip().replace('·', '').strip()
#
# praise_nums = response.xpath('//*[@id="113789votetotal"]/text()').extract()
# if praise_nums:
# praise_nums = int(praise_nums[0])
# else:
# praise_nums = 0
#
# fav_nums = response.xpath('//*[@id="post-113789"]/div[3]/div[12]/span[2]/text()').extract()[0]
# match_re = re.match(r'.*?(\d+).*', fav_nums)
# if match_re:
# fav_nums = int(match_re.group(1))
# else:
# fav_nums = 0
#
# comment_nums = response.xpath('//*[@id="post-113789"]/div[3]/div[12]/a/span/text()').extract()[0]
# match_re = re.match(r'.*?(\d+).*', comment_nums)
# if match_re:
# comment_nums = int(match_re.group(1))
# else:
# comment_nums = 0
#
# content = response.xpath('//*[@id="post-113789"]/div[3]').extract()[0]
#
# tag_list = response.xpath('//*[@id="post-113789"]/div[2]/p/a/text()').extract()
# tag_list = [element for element in tag_list if not element.strip().endswith('评论')]
# tags = ','.join(tag_list)
#以下通过css选择器提取字段
# article_item = JobboleArticleItem() #实例化
#
# front_image_url = response.meta.get('front_image_url', '') #get key=front_image_url 的值,如果没有key=front_image_url,回传''(空)
# #文章封面图
#
# title = response.css('.entry-header h1::text').extract()[0]
#
# create_date = response.css('p.entry-meta-hide-on-mobile::text').extract()[0].strip().replace('·', '').strip()
#
# praise_nums = response.css('.vote-post-up h10::text').extract_first()
# if praise_nums:
# praise_nums = int(praise_nums[0])
# else:
# praise_nums = 0
#
# fav_nums = response.css('.bookmark-btn::text').extract()[0]
# match_re = re.match(r'.*?(\d+).*', fav_nums)
# if match_re:
# fav_nums = int(match_re.group(1))
# else:
# fav_nums = 0
#
# comment_nums = response.css("a[href='#article-comment'] span::text").extract()[0]
# match_re = re.match(r'.*?(\d+).*', comment_nums)
# if match_re:
# comment_nums = int(match_re.group(1))
# else:
# comment_nums = 0
#
# content = response.css("div.entry").extract()[0]
#
# tag_list = response.css("p.entry-meta-hide-on-mobile a::text").extract()
# tag_list = [element for element in tag_list if not element.strip().endswith('评论')]
# tags = ','.join(tag_list)
#
# #填充值到items
# article_item['title'] = title
# article_item['url'] = response.url
# article_item['url_object_id'] = get_md5(response.url) #对url做MD5
#
# try: #为了将文章的创建时间写入数据库,要把str类型的create_time转换为date类型
# create_date = datetime.datetime.strptime(create_date, '%Y/%m/%d').date() #将格式为%Y/%m/%d 的str类型转换为date类型
# except Exception as e:
# create_date = datetime.datetime.now().date()
# article_item['create_date'] = create_date
#
# article_item['front_image_url'] = [front_image_url] #images需要接受一个数组
# article_item['praise_nums'] = praise_nums
# article_item['fav_nums'] = fav_nums
# article_item['comment_nums'] = comment_nums
# article_item['tags'] = tags
# article_item['content'] = content
#通过itemLoader加载item
front_image_url = response.meta.get('front_image_url', '') #get key=front_image_url 的值,如果没有key=front_image_url,回传''(空)
#item_loader = ItemLoader(item=JobboleArticleItem(), response=response) #定义ItemLoader实例
item_loader = ArticleItemLoader(item=JobboleArticleItem(), response=response) #改用自定义的 ItemLoader
# ItemLoader.add_css(self, field_name, css)
# ItemLoader.add_xpath(self, field_name, xpath)
# ItemLoader._add_value(self, field_name, value)
item_loader.add_css("title", ".entry-header h1::text")
item_loader.add_value("url", response.url)
item_loader.add_value("url_object_id", get_md5(response.url))
item_loader.add_css("create_date", "p.entry-meta-hide-on-mobile::text")
item_loader.add_value("front_image_url", [front_image_url])
item_loader.add_css("praise_nums", ".vote-post-up h10::text")
item_loader.add_css("comment_nums", "a[href='#article-comment'] span::text")
item_loader.add_css("fav_nums", ".bookmark-btn::text")
item_loader.add_css("tags", "p.entry-meta-hide-on-mobile a::text")
item_loader.add_css("content", "div.entry")
article_item = item_loader.load_item()
#调用默认的item方法的话会有两个问题:1.值都是list 2.还需要对取出的值行进处理(做re的提取等)
#-->去修改items.py #1.在items.py 的Field()里面用TakeFirst进行处理 2.在items.py 的Field()里面用MapCompose进行处理
yield article_item #调用yield之后,item会传递到pipelines.py
pass
# item.py
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
from scrapy.loader.processors import MapCompose, TakeFirst, Join #用来对传入的值进行处理
import datetime
from scrapy.loader import ItemLoader #为了不每个都要写outputoutput_processor = TakeFirst() 我们自定一个itemloader 与是要重载类ItemLoader
import re
class TestJobboleItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
def add_jobbole(value): #传过来的value是一个str
return value+"-jobbole"
def date_convert(value):
value = value.strip().replace('·', '').strip()
try: #为了将文章的创建时间写入数据库,要把str类型的create_time转换为date类型
create_date = datetime.datetime.strptime(value, '%Y/%m/%d').date() #将格式为%Y/%m/%d 的str类型转换为date类型
except Exception as e:
create_date = datetime.datetime.now().date()
return create_date
#####create_date = response.css('p.entry-meta-hide-on-mobile::text').extract()[0].strip().replace('·', '').strip()
def get_nums(value):
match_re = re.match(r'.*?(\d+).*', value)
if match_re:
nums = int(match_re.group(1))
else:
nums = 0
return nums
def remove_comment_tags(value):
#去掉tag中提取的评论
if "评论" in value:
return ""
else:
return value
def return_value(value):
return value
class ArticleItemLoader(ItemLoader): #自定义itemloader
default_output_processor = TakeFirst() #这样就不用每个都写outputoutput_processor = TakeFirst()
class JobboleArticleItem(scrapy.Item):
title = scrapy.Field(
#input_processor = MapCompose(add_jobbole) #title 会作为value 传递到add_jobbole方法中
#input_processor = MapCompose(lambda x:x+"--jobbole")
input_processor = MapCompose(lambda x:x+"--jobbole", add_jobbole), #title中的每个值依次从左到右调用了两个函数
#output_processor = TakeFirst()
)
create_date = scrapy.Field(
input_processor = MapCompose(date_convert), #处理完后是一个list, list里有date
#output_processor = TakeFirst() #取出date, 使list类变成date类
)
url = scrapy.Field()
url_object_id = scrapy.Field() #url 进行md5处理,变成相同长度
front_image_url = scrapy.Field( #下载封面图片要在settings.py 中的 ITEM_PIPELINES 进行配置 #front_image_url 需要接收一个list 因为images需要接受一个数组
output_processor = MapCompose(return_value) #覆盖掉default_output_processor = TakeFirst() 使得传入的是一个list
)
# """ settings.py 下
# ITEM_PIPELINES = {
# 'test_jobbole.pipelines.TestJobbolePipeline': 300, #数字越小,越先处理
# #'scrapy.pipelines.images.ImagesPipeline': 1
# 'test_jobbole.pipelines.ArticleImagePipeline': 1 #调用定制化的pipeline(ArticleImagePipeline)
# }
# IMAGES_URLS_FIELD = 'front_image_url' #告诉images, items中哪个是图片的url #images需要接受一个数组
# import os #用于获取当前文件(setting.py)的路径
# #os.path.dirname(__file__) #获取当前文件的目录名称(test_jobbole) #__file__是当前文件(setting.py)的名称
# project_dir = os.path.abspath(os.path.dirname(__file__)) #获取当前文件的目录的路径
# IMAGES_STORE = os.path.join(project_dir, 'images') #图片下载的保存路径 可以配置为绝对路径 要存在工程目录下,可以使用相对路径。在settings.py的同级目录下新建images
# #图片储存在 project_dir目录下的images文件夹
# #要下载图片需要PIL库
# #下cmd下安装PIL库
# #pip install -i https://pypi.douban.com/simple pillow
# # IMAGES_MIN_HEIGHT = 100 #设置下载图片的最小高度 #过滤图片可以在settng.py中设置
# # IMAGES_MIN_WIDTH = 100
# # '''如果要实现自己的需求,也可以重载相应的函数达到需求,在pipelines中建立类,继承ImagesPipeline就可以了'''
# """
front_image_path = scrapy.Field() #本地图片路径
praise_nums = scrapy.Field(
input_processor = MapCompose(get_nums)
)
fav_nums = scrapy.Field(
input_processor = MapCompose(get_nums)
)
comment_nums = scrapy.Field(
input_processor = MapCompose(get_nums)
)
tags = scrapy.Field( #这边的tags其实是tag_list,是一个list
input_processor = MapCompose(remove_comment_tags), #去掉tag中提取的评论
output_processor = Join(',') #不能用TakeFirst(),要用join,Join(',')中的','是在指定连接符
)
content = scrapy.Field()
# pipelines.py
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
from scrapy.pipelines.images import ImagesPipeline
import codecs #用codecs来完成文件的打开和写入
import json
from scrapy.exporters import JsonItemExporter #将json文件输出
import MySQLdb
import MySQLdb.cursors
from twisted.enterprise import adbapi #使MySQLdb的一些操作变成异步的操作
class TestJobbolePipeline(object): #pipeline 主要用来做数据存储的 #这个pipeline的数字为300,大,后执行
def process_item(self, item, spider): #pipelines.py 会接受item #要去settings.py中取消注释 ITEM_PIPELINES
return item
class JsonWithEncodingPipeline(object): #在setting.py里配置这个pipeline的数字为2
#自定义json文件的导出
def __init__(self):
self.file = codecs.open('article.json', 'w', encoding='utf_8')
def process_item(self, item, spider): #pipelines.py 会接受item 在这里将item写入文件
#调用process_item时要记得return item, 因为下一pipeline可能还需要处理item
lines = json.dump(dict(item), ensure_ascii=False) + '\n' #ensure_ascii=False 不设为False的话写入中文会出错,会直接写入Unicode
self.file.write(lines)
return item
def spider_clsede(self, spider): #当spider关闭时会调用这个函数
self.file.close()
class MysqlPipeline(object): #写好pipeline后,要把pipeline配置到setting.py中
#采用同步的机制写入mysql 插入数据库的速度可能会小于spider的解析速度 -->考虑用异步
def __init__(self):
self.conn = MySQLdb.connect('127.0.0.1', 'root', '123456', 'article_spider', charset='utf8', use_unicode=True) #连接数据库
#配置可以写到setting.py 中
# MYSQL_HOST = "127.0.0.1"
# MYSQL_USER = "root"
# MYSQL_PASSWORD = "123456"
# MYSQL_DBNAME = "article_spider"
# MySQLdb.connect的参数
# MySQLdb.connect('host', 'user', 'password', 'dbname', charset='utf8', use_unicode=True)
# conn = pymysql.Connect(host='127.0.0.1', user='root', passwd='123456', port=3306, db='pymysql_test01')
self.cursor = self.conn.cursor()
def process_item(self, item, spider): #重载 process_item方法
insert_sql = """
insert into jobbole_article(title, create_date, url, url_object_id, fav_nums, front_image_url, front_image_path, praise_nums, comment_nums, tags, content)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
"""
self.cursor.execute(insert_sql, (item["title"], item["create_date"], item["url"], item["url_object_id"], item["fav_nums"], item["front_image_url"], 'item["front_image_path"]', item["praise_nums"], item["comment_nums"], item["tags"], item["content"]))
self.conn.commit()
class MysqlTwistedPipline(object): #写好pipeline后,要把pipeline配置到setting.py中
#'''异步插入mysql'''
def __init__(self, dbpool):
self.dbpool = dbpool
@classmethod
def from_settings(cls, settings): #这个方法可以读取setting.py中的值 # cls指的是MysqlTwistedPipline 这个类
#'''传入settings的参数'''
dbparms = dict(
host = settings["MYSQL_HOST"],
db = settings["MYSQL_DBNAME"],
user = settings["MYSQL_USER"],
passwd = settings["MYSQL_PASSWORD"],
charset='utf8',
cursorclass=MySQLdb.cursors.DictCursor,
use_unicode=True,
)
dbpool = adbapi.ConnectionPool("MySQLdb", **dbparms) #传入可变化的参数dbparms
#dbpool = adbapi.ConnectionPool("MySQLdb", host = settings["MYSQL_HOST"], db = settings["MYSQL_DBNAME"], ……)
return cls(dbpool)
def process_item(self, item, spider):
#使用twisted将mysql插入变成异步执行
query = self.dbpool.runInteraction(self.do_insert, item) #do_insert为要异步执行的函数 #item为要插入的数据
query.addErrback(self.handle_error) #处理异常
def handle_error(self, failure): #异步错误处理函数
# 处理异步插入的异常
print (failure)
def do_insert(self, cursor, item):
#执行具体的插入
insert_sql = """
insert into jobbole_article(title, create_date, url, url_object_id, fav_nums, front_image_url, front_image_path, praise_nums, comment_nums, tags, content)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
"""
cursor.execute(insert_sql, (item["title"], item["create_date"], item["url"], item["url_object_id"], item["fav_nums"], item["front_image_url"], 'item["front_image_path"]', item["praise_nums"], item["comment_nums"], item["tags"], item["content"]))
#TypeError: not all arguments converted during string formatting
#前后参数的数量不一致 如: %s的个数与后面传入的参数的个数不一致
#self.conn.commit() #会自动commit
class JsonExporterPipeline(object): #将json文件输出 #在setting.py里配置这个pipeline的数字为2,进行测试
def __init__(self):
#调用scrapy提供的json export导出json文件
self.file = open('articleexporter.json', 'wb')
self.exporter = JsonItemExporter(self.file, encoding='utf_8', ensure_ascii=False) #用JsonItemExporter 做实例化
self.exporter.start_exporting()
def close_spider(self, spider):
self.exporter.finish_exporting()
self.file.close()
def process_item(self, item, spider): #pipelines.py 会接受item 在这里将item写入文件
#调用process_item时要记得return item, 因为下一pipeline可能还需要处理item
self.exporter.export_item(item)
return item
class ArticleImagePipeline(ImagesPipeline): #定制化pipeline ArticleImagePipeline #这个pipeline的数字为1,小,先执行
def item_completed(self, results, item, info): #重载 item_completed
if 'front_image_path' in item: #可能没有封面 #item类似于一个dict
for ok, value in results:
image_file_path = value['path'] #保存图片的本地路径
item['front_image_path'] = image_file_path #保存图片的本地路径到items
return item
# settings.py
# -*- coding: utf-8 -*-
# Scrapy settings for test_jobbole project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'test_jobbole'
SPIDER_MODULES = ['test_jobbole.spiders']
NEWSPIDER_MODULE = 'test_jobbole.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'test_jobbole (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'test_jobbole.middlewares.TestJobboleSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'test_jobbole.middlewares.TestJobboleDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
#'test_jobbole.pipelines.JsonExporterPipeline': 2, #数字越小,越先处理
#'scrapy.pipelines.images.ImagesPipeline': 1
'test_jobbole.pipelines.ArticleImagePipeline': 2, #调用定制化的pipeline(ArticleImagePipeline)
#'test_jobbole.pipelines.MysqlPipeline': 1
'test_jobbole.pipelines.MysqlTwistedPipline': 1
}
IMAGES_URLS_FIELD = 'front_image_url' #告诉images items中哪个是图片的url #images需要接受一个数组
import os #用于获取当前文件(setting.py)的路径
#os.path.dirname(__file__) #获取当前文件的目录名称(test_jobbole) #__file__是当前文件(setting.py)的名称
project_dir = os.path.abspath(os.path.dirname(__file__)) #获取当前文件的目录的路径
IMAGES_STORE = os.path.join(project_dir, 'images') #图片下载的保存路径 可以配置为绝对路径 要存在工程目录下,可以使用相对路径。在settings.py的同级目录下新建images
#图片储存在 project_dir目录下的images文件夹
#要下载图片需要PIL库
#下cmd下安装PIL库
#pip install -i https://pypi.douban.com/simple pillow
# IMAGES_MIN_HEIGHT = 100 #设置下载图片的最小高度 #过滤图片可以在settng.py中设置
# IMAGES_MIN_WIDTH = 100
# '''如果要实现自己的需求,也可以重载相应的函数达到需求,在pipelines中建立类,继承ImagesPipeline就可以了'''
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
MYSQL_HOST = "127.0.0.1"
MYSQL_USER = "root"
MYSQL_PASSWORD = "123456"
MYSQL_DBNAME = "article_spider"
| [
"[email protected]"
] | |
c6ec72a0c4a49646a918b6c5dac036c3800f25dc | e9670a246e856c9f43b5f48e0c77ae39d038cfae | /collab_filter_features.py | 96015e9ca0ff9485cecd6ee687cc13b87a755f85 | [] | no_license | svnathan/cs221-project | 9b8cc5836268a31c9d3e783f860d726f8bfce7c9 | 96cc86b7479c710e8a7df2c745aaf246cb54f1da | refs/heads/master | 2021-01-11T03:16:33.898546 | 2016-12-17T01:07:26 | 2016-12-17T01:07:26 | 71,077,004 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,473 | py | import xml.etree.ElementTree as et
import random
import pickle
import math
import test
import operator
import collections
import string
import re
import word_pairs
import time
NUM_WORDS_IN_TUPLE = 2
PAIR_COUNT_THRESHOLD = 15
questionTagsDict = {}
user_tagsList_dict = {}
user_tagPairList_dict = {}
tagPair_count_dict = collections.Counter()
final_tagPair_count_dict = {}
percentDone = 0 # For status completion
def parseDatasetForTags():
print "Reading Dataset..."
tree = et.parse("dataset/Posts.xml")
doc = tree.getroot()
print "Done!!"
iter = 0
totalRows = len(doc.findall('row'))
createQuestionTagsDict(doc)
global percentDone
percentDone = 0
print "Parsing the answers now..."
for row in doc.findall('row'):
iter += 1
printCompletionStatus(iter,totalRows)
year = row.get('CreationDate').split('-')[0]
if int(year) < 2016:
if row.get('PostTypeId') != '2':
continue
else:
user = row.get('OwnerUserId')
questionID = row.get('ParentId')
tags = getTagsForQuestionID(questionID)
# print user,questionID,tags
if user is not None:
if user not in user_tagsList_dict:
user_tagsList_dict[user] = []
for t in tags:
if (t not in user_tagsList_dict[user]):
user_tagsList_dict[user].append(t)
print "Answers Parsed!!"
def createQuestionTagsDict(doc):
iter = 0
maxRows = len(doc.findall('row'))
global percentDone
percentDone = 0
print "Parsing the Tags in the Questions:"
for row in doc.findall('row'):
iter += 1
printCompletionStatus(iter,maxRows)
if row.get('PostTypeId') == '1':
tagList = row.get('Tags').split('><')
tagList[0] = tagList[0].split('<')[1]
tagList[-1] = tagList[-1].split('>')[0]
questionTagsDict[row.get('Id')] = tagList
print "Tags Parsed!!"
def getTagsForQuestionID(questionID):
if questionID in questionTagsDict:
return questionTagsDict[questionID]
else:
return []
def create_features(user_tagsList_dict):
global percentDone
percentDone = 0
iter = 0
maxIter = len(user_tagsList_dict)
for user in user_tagsList_dict:
iter += 1
user_tagPairList_dict[user] = word_pairs.createWordTuples(user_tagsList_dict[user],NUM_WORDS_IN_TUPLE)
for item in user_tagPairList_dict[user]:
if item not in tagPair_count_dict:
tagPair_count_dict[item] = 1
else:
tagPair_count_dict[item] += 1
if tagPair_count_dict[item] > PAIR_COUNT_THRESHOLD:
final_tagPair_count_dict[item] = tagPair_count_dict[item]
printCompletionStatus(iter,maxIter)
print "Collab Filter Feature Vector created!!"
print "Number of Thresholded unique pairs: %d" % len(final_tagPair_count_dict)
def printCompletionStatus(currIter,maxIter):
global percentDone
percentDoneNext = currIter*100/maxIter
if percentDone != percentDoneNext:
print'{0}% Done...\r'.format(percentDoneNext),
percentDone = percentDoneNext
time.sleep(0.025)
parseDatasetForTags()
create_features(user_tagsList_dict)
# print final_tagPair_count_dict | [
"[email protected]"
] | |
ff6fa2bf2091948adc7e62e21dd94dd5c42dbf0f | 57448d2bc2777a5b3f7e9c243c44147942e51b12 | /lte/gateway/python/magma/pipelined/tests/test_redirect.py | c28c2377818284031370def9e632d0983cc11896 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | ekfuhrmann/magma-old | 3c3ac3d6ec82b86d221233e2916b14b53f8f05ab | 8b81337d250611b799a353f0ed6cff6aaea8674e | refs/heads/master | 2023-04-19T07:50:06.656779 | 2020-07-07T20:33:43 | 2020-07-07T20:33:43 | 278,103,227 | 0 | 0 | NOASSERTION | 2021-05-08T00:22:50 | 2020-07-08T13:54:56 | Go | UTF-8 | Python | false | false | 11,016 | py | """
Copyright (c) 2018-present, Facebook, Inc.
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree. An additional grant
of patent rights can be found in the PATENTS file in the same directory.
"""
import unittest
from concurrent.futures import Future
from unittest.mock import MagicMock
import warnings
from lte.protos.mconfig.mconfigs_pb2 import PipelineD
from lte.protos.policydb_pb2 import FlowDescription, FlowMatch, PolicyRule, \
RedirectInformation
from magma.pipelined.app.enforcement import EnforcementController
from magma.pipelined.bridge_util import BridgeTools
from magma.pipelined.policy_converters import flow_match_to_magma_match
from magma.pipelined.tests.app.flow_query import RyuDirectFlowQuery \
as FlowQuery
from magma.pipelined.tests.app.packet_builder import TCPPacketBuilder
from magma.pipelined.tests.app.packet_injector import ScapyPacketInjector
from magma.pipelined.tests.app.start_pipelined import PipelinedController, \
TestSetup
from magma.pipelined.tests.app.subscriber import RyuDirectSubscriberContext
from magma.pipelined.tests.app.table_isolation import RyuDirectTableIsolator, \
RyuForwardFlowArgsBuilder
from magma.pipelined.tests.pipelined_test_util import FlowTest, FlowVerifier, \
create_service_manager, start_ryu_app_thread, stop_ryu_app_thread, \
wait_after_send, assert_bridge_snapshot_match, fake_controller_setup
class RedirectTest(unittest.TestCase):
BRIDGE = 'testing_br'
IFACE = 'testing_br'
MAC_DEST = "5e:cc:cc:b1:49:4b"
BRIDGE_IP_ADDRESS = '192.168.128.1'
# TODO test for multiple incoming requests (why we match on tcp ports)
@classmethod
def setUpClass(cls):
"""
Starts the thread which launches ryu apps
Create a testing bridge, add a port, setup the port interfaces. Then
launch the ryu apps for testing pipelined. Gets the references
to apps launched by using futures, mocks the redis policy_dictionary
of enforcement_controller
"""
super(RedirectTest, cls).setUpClass()
warnings.simplefilter('ignore')
cls.service_manager = create_service_manager([PipelineD.ENFORCEMENT])
cls._tbl_num = cls.service_manager.get_table_num(
EnforcementController.APP_NAME)
enforcement_controller_reference = Future()
testing_controller_reference = Future()
test_setup = TestSetup(
apps=[PipelinedController.Enforcement,
PipelinedController.Testing,
PipelinedController.StartupFlows],
references={
PipelinedController.Enforcement:
enforcement_controller_reference,
PipelinedController.Testing:
testing_controller_reference,
PipelinedController.StartupFlows:
Future(),
},
config={
'bridge_name': cls.BRIDGE,
'bridge_ip_address': cls.BRIDGE_IP_ADDRESS,
'nat_iface': 'eth2',
'enodeb_iface': 'eth1',
'qos': {'enable': False},
'clean_restart': True,
},
mconfig=PipelineD(
relay_enabled=True
),
loop=None,
service_manager=cls.service_manager,
integ_test=False,
)
BridgeTools.create_bridge(cls.BRIDGE, cls.IFACE)
cls.thread = start_ryu_app_thread(test_setup)
cls.enforcement_controller = enforcement_controller_reference.result()
cls.testing_controller = testing_controller_reference.result()
cls.enforcement_controller._redirect_manager._save_redirect_entry =\
MagicMock()
@classmethod
def tearDownClass(cls):
stop_ryu_app_thread(cls.thread)
BridgeTools.destroy_bridge(cls.BRIDGE)
def test_url_redirect(self):
"""
Partial redirection test, checks if flows were added properly for url
based redirection.
Assert:
1 Packet is matched
Packet bypass flows are added
Flow learn action is triggered - another flow is added to the table
"""
fake_controller_setup(self.enforcement_controller)
redirect_ips = ["185.128.101.5", "185.128.121.4"]
self.enforcement_controller._redirect_manager._dns_cache.get(
"about.sha.ddih.org", lambda: redirect_ips, max_age=42
)
imsi = 'IMSI010000000088888'
sub_ip = '192.168.128.74'
flow_list = [FlowDescription(match=FlowMatch())]
policy = PolicyRule(
id='redir_test', priority=3, flow_list=flow_list,
redirect=RedirectInformation(
support=1,
address_type=2,
server_address="http://about.sha.ddih.org/"
)
)
# ============================ Subscriber ============================
sub_context = RyuDirectSubscriberContext(
imsi, sub_ip, self.enforcement_controller, self._tbl_num
).add_dynamic_rule(policy)
isolator = RyuDirectTableIsolator(
RyuForwardFlowArgsBuilder.from_subscriber(sub_context.cfg)
.build_requests(),
self.testing_controller
)
pkt_sender = ScapyPacketInjector(self.IFACE)
packet = TCPPacketBuilder()\
.set_tcp_layer(42132, 80, 321)\
.set_tcp_flags("S")\
.set_ip_layer('151.42.41.122', sub_ip)\
.set_ether_layer(self.MAC_DEST, "00:00:00:00:00:00")\
.build()
# Check if these flows were added (queries should return flows)
permit_outbound, permit_inbound = [], []
for ip in redirect_ips:
permit_outbound.append(FlowQuery(
self._tbl_num, self.testing_controller,
match=flow_match_to_magma_match(
FlowMatch(ipv4_dst=ip, direction=FlowMatch.UPLINK))
))
permit_inbound.append(FlowQuery(
self._tbl_num, self.testing_controller,
match=flow_match_to_magma_match(
FlowMatch(ipv4_src=ip, direction=FlowMatch.DOWNLINK))
))
learn_action_flow = flow_match_to_magma_match(
FlowMatch(ip_proto=6, direction=FlowMatch.DOWNLINK,
ipv4_src=self.BRIDGE_IP_ADDRESS, ipv4_dst=sub_ip)
)
learn_action_query = FlowQuery(self._tbl_num, self.testing_controller,
learn_action_flow)
# =========================== Verification ===========================
# 1 packet sent, permit rules installed, learn action installed. Since
# the enforcement table is entered via the DPI table and the scratch
# enforcement table, the number of packets handled by the table is 2.
flow_verifier = FlowVerifier(
[FlowTest(FlowQuery(self._tbl_num, self.testing_controller), 2),
FlowTest(learn_action_query, 0, flow_count=1)] +
[FlowTest(query, 0, flow_count=1) for query in permit_outbound] +
[FlowTest(query, 0, flow_count=1) for query in permit_inbound],
lambda: wait_after_send(self.testing_controller))
with isolator, sub_context, flow_verifier:
pkt_sender.send(packet)
assert_bridge_snapshot_match(self, self.BRIDGE,
self.service_manager)
flow_verifier.verify()
def test_ipv4_redirect(self):
"""
Partial redirection test, checks if flows were added properly for ipv4
based redirection.
Assert:
1 Packet is matched
Packet bypass flows are added
Flow learn action is triggered - another flow is added to the table
"""
fake_controller_setup(self.enforcement_controller)
redirect_ip = "54.12.31.42"
imsi = 'IMSI012000000088888'
sub_ip = '192.168.128.74'
flow_list = [FlowDescription(match=FlowMatch())]
policy = PolicyRule(
id='redir_ip_test', priority=3, flow_list=flow_list,
redirect=RedirectInformation(
support=1,
address_type=0,
server_address=redirect_ip
)
)
# ============================ Subscriber ============================
sub_context = RyuDirectSubscriberContext(
imsi, sub_ip, self.enforcement_controller, self._tbl_num
).add_dynamic_rule(policy)
isolator = RyuDirectTableIsolator(
RyuForwardFlowArgsBuilder.from_subscriber(sub_context.cfg)
.build_requests(),
self.testing_controller
)
pkt_sender = ScapyPacketInjector(self.IFACE)
packet = TCPPacketBuilder()\
.set_tcp_layer(42132, 80, 321)\
.set_tcp_flags("S")\
.set_ip_layer('151.42.41.122', sub_ip)\
.set_ether_layer(self.MAC_DEST, "00:00:00:00:00:00")\
.build()
# Check if these flows were added (queries should return flows)
permit_outbound = FlowQuery(
self._tbl_num, self.testing_controller,
match=flow_match_to_magma_match(
FlowMatch(ipv4_dst=redirect_ip, direction=FlowMatch.UPLINK))
)
permit_inbound = FlowQuery(
self._tbl_num, self.testing_controller,
match=flow_match_to_magma_match(
FlowMatch(ipv4_src=redirect_ip, direction=FlowMatch.DOWNLINK))
)
learn_action_flow = flow_match_to_magma_match(
FlowMatch(ip_proto=6, direction=FlowMatch.DOWNLINK,
ipv4_src=self.BRIDGE_IP_ADDRESS, ipv4_dst=sub_ip)
)
learn_action_query = FlowQuery(self._tbl_num, self.testing_controller,
learn_action_flow)
# =========================== Verification ===========================
# 1 packet sent, permit rules installed, learn action installed. Since
# the enforcement table is entered via the DPI table and the scratch
# enforcement table, the number of packets handled by the table is 2.
flow_verifier = FlowVerifier([
FlowTest(FlowQuery(self._tbl_num, self.testing_controller), 2),
FlowTest(permit_outbound, 0, flow_count=1),
FlowTest(permit_inbound, 0, flow_count=1),
FlowTest(learn_action_query, 0, flow_count=1)
], lambda: wait_after_send(self.testing_controller))
with isolator, sub_context, flow_verifier:
pkt_sender.send(packet)
assert_bridge_snapshot_match(self, self.BRIDGE,
self.service_manager)
flow_verifier.verify()
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
2164f96ae2d0585b1924503a511f0733264d9c40 | ab1802bd7469639511d7ea07267a2cff0f1c5815 | /main_single.py | af1cb57300452e7c28e2397fae0fcf9e54cf9420 | [] | no_license | kfuka/MCMLSeg | 33727c2786721120a7e4b26a56c44da7dd9259c0 | 3f05458dc96a968de431795921d1c6315a39007a | refs/heads/master | 2023-03-01T19:06:37.969609 | 2021-02-12T05:18:04 | 2021-02-12T05:18:04 | 338,231,140 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,645 | py | import datetime
import glob
from collections import OrderedDict
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.backends.cudnn
import torch.nn as nn
import torch.nn.functional as F
from sklearn.model_selection import train_test_split
from torch.utils.data import DataLoader, Dataset
img_folder = "/mnt/Create_multi_mask/img/"
mask_folder = "/mnt/Create_multi_mask/mask_one/"
results_folder = "/home/owner/PycharmProjects/single_unet/results/"
structure_number = 1
# skip_str = [6, 30, 31]
# skip_str = [31,32, 33, 34]
def get_filelist():
img_list = sorted(glob.glob(img_folder + "*"))
mask_list = sorted(glob.glob(mask_folder + "*"))
return img_list, mask_list
class UNet(nn.Module):
def __init__(self, in_channels=1, out_channels=1, init_features=16):
super(UNet, self).__init__()
features = init_features
self.encoder1 = UNet._block(in_channels, features, name="enc1")
self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.encoder2 = UNet._block(features, features * 2, name="enc2")
self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.encoder3 = UNet._block(features * 2, features * 4, name="enc3")
self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2)
self.encoder4 = UNet._block(features * 4, features * 8, name="enc4")
self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2)
self.bottleneck = UNet._block(features * 8, features * 16, name="bottleneck")
self.upconv4 = nn.ConvTranspose2d(
features * 16, features * 8, kernel_size=2, stride=2, output_padding=1
)
self.decoder4 = UNet._block((features * 8) * 2, features * 8, name="dec4")
self.upconv3 = nn.ConvTranspose2d(
features * 8, features * 4, kernel_size=2, stride=2
)
self.decoder3 = UNet._block((features * 4) * 2, features * 4, name="dec3")
self.upconv2 = nn.ConvTranspose2d(
features * 4, features * 2, kernel_size=2, stride=2
)
self.decoder2 = UNet._block((features * 2) * 2, features * 2, name="dec2")
self.upconv1 = nn.ConvTranspose2d(
features * 2, features, kernel_size=2, stride=2
)
self.decoder1 = UNet._block(features * 2, features, name="dec1")
self.conv = nn.Conv2d(
in_channels=features, out_channels=out_channels, kernel_size=1
)
def forward(self, x):
enc1 = self.encoder1(x)
enc2 = self.encoder2(self.pool1(enc1))
enc3 = self.encoder3(self.pool2(enc2))
enc4 = self.encoder4(self.pool3(enc3))
bottleneck = self.bottleneck(self.pool4(enc4))
dec4 = self.upconv4(bottleneck)
dec4 = torch.cat((dec4, enc4), dim=1)
dec4 = self.decoder4(dec4)
dec3 = self.upconv3(dec4)
dec3 = torch.cat((dec3, enc3), dim=1)
dec3 = self.decoder3(dec3)
dec2 = self.upconv2(dec3)
dec2 = torch.cat((dec2, enc2), dim=1)
dec2 = self.decoder2(dec2)
dec1 = self.upconv1(dec2)
dec1 = torch.cat((dec1, enc1), dim=1)
dec1 = self.decoder1(dec1)
# return torch.sigmoid(self.conv(dec1))
return self.conv(dec1)
@staticmethod
def _block(in_channels, features, name):
return nn.Sequential(
OrderedDict(
[
(
name + "conv1",
nn.Conv2d(
in_channels=in_channels,
out_channels=features,
kernel_size=3,
padding=1,
bias=False,
),
),
(name + "norm1", nn.BatchNorm2d(num_features=features)),
(name + "relu1", nn.ReLU(inplace=True)),
(
name + "conv2",
nn.Conv2d(
in_channels=features,
out_channels=features,
kernel_size=3,
padding=1,
bias=False,
),
),
(name + "norm2", nn.BatchNorm2d(num_features=features)),
(name + "relu2", nn.ReLU(inplace=True)),
]
)
)
class DiceLoss(nn.Module):
def __init__(self):
super(DiceLoss, self).__init__()
self.smooth = 1.0
def forward(self, y_pred, y_true):
assert y_pred.size() == y_true.size()
y_pred = y_pred[:, 0].contiguous().view(-1)
y_true = y_true[:, 0].contiguous().view(-1)
torch_one = torch.ones(len(y_pred)).to("cuda")
torch_zero = torch.zeros(len(y_pred)).to("cuda")
y_pred = torch.where(y_pred > 0.5, torch_one, torch_zero)
intersection = (y_pred * y_true).sum()
dsc = (2. * intersection + self.smooth) / (y_pred.sum() + y_true.sum() + self.smooth)
return 1. - dsc
def iou_score(output, target, in_loss_func=False):
smooth = 1e-5
if not in_loss_func:
if torch.is_tensor(output):
output = torch.sigmoid(output).cpu().detach().numpy().copy()
if torch.is_tensor(target):
target = target.data.cpu().detach().numpy().copy()
output = output > 0.5
target = target > 0.5
intersection = (output & target).sum()
union = (output | target).sum()
return (intersection + smooth) / (union + smooth)
class BCEDiceLoss(nn.Module):
def __init__(self):
super(BCEDiceLoss, self).__init__()
def forward(self, input, target):
bce = F.binary_cross_entropy_with_logits(input, target, reduction="mean")
iou = iou_score(input, target, True)
smooth = 1e-5
input = torch.sigmoid(input)
num = target.size(0)
input = input.view(num, -1)
target = target.view(num, -1)
intersection = (input * target).sum()
dice = 1 - (2. * intersection + smooth) / (input.sum() + target.sum() + smooth)
return bce + dice + (1 - iou)
class multi_class_loss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, preds, trues):
return torch.sum(torch.pow((preds - trues), 2))
class my_dataset(Dataset):
def __init__(self, img_list, mask_list, transform=None):
self.img_list = img_list
self.mask_list = mask_list
def __getitem__(self, index):
path = self.img_list[index]
img = np.load(path)
mask = np.load(mask_folder + path.split("/")[-1])
return img, mask
def __len__(self):
return len(self.img_list)
def main():
batchsize = 6
num_epochs = 50
learning_rate = 0.0001
loss_list, iteration_list, accuracy_list = [], [], []
count = 0
imgs, masks = get_filelist()
train_x, test_x, train_y, test_y = train_test_split(imgs, masks, test_size=0.01, random_state=22)
train_dataset = my_dataset(train_x, train_y)
test_dataset = my_dataset(test_x, test_y)
dataloader = DataLoader(train_dataset, batch_size=batchsize, shuffle=True, num_workers=4, pin_memory=True)
test_loader = DataLoader(test_dataset, batch_size=batchsize, shuffle=True, num_workers=4, pin_memory=True)
# data_iter = iter(dataloader)
# timgs, tlabels = data_iter.next()
unet = UNet(in_channels=1, out_channels=35).to("cuda")
# dsc_loss = BCEDiceLoss()
dsc_loss = multi_class_loss()
celoss = nn.CrossEntropyLoss()
# dsc_loss = DiceLoss()
optimizer = torch.optim.Adam(unet.parameters(), lr=learning_rate, weight_decay=0.001)
# optimizer = torch.optim.SGD(unet.parameters(), lr=learning_rate, momentum=0.9, weight_decay=0.0001)
torch.backends.cudnn.benchmark = True
for epoch in range(num_epochs):
torch.save(unet.state_dict(), "unet.pth")
for img, mask in dataloader:
# plt.figure()
# plt.imshow(img[0, :, :], cmap="Greys")
# plt.imshow(mask[0, 30, :, :], alpha=0.5)
# plt.show()
unet.train()
img = img.view(-1, 1, 600, 600)
mask = mask.view(-1, 1, 600, 600)
img_tnsr = img.float().requires_grad_(True).cuda()
mask_tnsr = mask.float().requires_grad_(True).to("cuda")
optimizer.zero_grad()
y_pred = unet(img_tnsr)
# loss = dsc_loss(y_pred, mask_tnsr)
loss = celoss(y_pred, mask_tnsr.view(-1, 600, 600).long())
loss.backward()
optimizer.step()
count += 1
if count % 100 == 0:
unet = unet.eval()
for j, (timg, tmask) in enumerate(test_loader):
timg = timg.clone().detach().reshape(-1, 1, 600, 600)
tmask = tmask.clone().detach().reshape(-1, 1, 600, 600)
timg_tnsr = timg.float().cuda()
tmask_tnsr = tmask.float().to("cuda")
t_pred = unet(timg_tnsr)
# tloss = iou_score(t_pred, tmask_tnsr)
# tloss = dsc_loss(t_pred, tmask_tnsr)
tloss = celoss(t_pred, tmask_tnsr.view(-1, 600, 600).long())
timg = timg.view(-1, 1, 600, 600).to("cpu").detach().numpy()
tmask = tmask.view(-1, 1, 600, 600).to("cpu").detach().numpy()
# t_pred = t_pred.view(-1, 1, 600, 600).to("cpu").detach().numpy()
fig = plt.figure(figsize=(20, 12))
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
ax1.set_axis_off()
ax2.set_axis_off()
# print(timg.shape)
# t_pred = np.where(t_pred > 0.5, 1, 0)
ax1.imshow(timg[0, 0, :, :], cmap="gray")
_, idx = torch.max(t_pred, dim=1)
idx = idx.to("cpu").detach().numpy()
# print(idx.shape())
ax1.imshow(idx[0, :, :], alpha=0.5, cmap="jet", vmin=0.5, vmax=35)
ax2.imshow(timg[0, 0, :, :], cmap="gray")
ax2.imshow(tmask[0, 0, :, :], alpha=0.5, cmap="jet", vmin=0.5, vmax=35)
fig.savefig(results_folder + str(j) + ".png")
plt.close()
print("Epoch {}, Iter. {}, Loss {:.8f}, Cr.E.loss {:.8f}".format(epoch, count, loss.data, tloss))
if __name__ == "__main__":
print(datetime.datetime.now())
main()
print(datetime.datetime.now())
| [
"[email protected]"
] | |
5c99443acdbeaf1f61d74199a88d048c6d83e978 | 5f27bc1a0460a078f6fe33a544f494a5dff7f452 | /script/puzzle_test2/D_00_sh_test.py | dff2a48daaf26fda5d893e45e299c676976bafcc | [] | no_license | A-Why-not-fork-repositories-Good-Luck/arm_move | 3e381f0310265f47da14beaac136c358fb318f92 | e2e6182cfd93df1935bd3b8e9158134964dc44fa | refs/heads/master | 2023-03-15T18:37:17.337770 | 2020-11-18T06:46:06 | 2020-11-18T06:46:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 42,411 | py | #!/usr/bin/env python
GRID_SIZE = 0.01
G2P_SIZE = 100
import rospy
import numpy as np
import tf
import matplotlib.pyplot as plt
import copy
import time
import D_00_custom_function as CUF
import D_00_client_function as CLF
from D_20_1020_VFHplus_change_radius import influence
from D_00_envClass4altest import EnvInfo as EI
from D_00_envClass4altest import CanInfo as CI
from arm_move.srv._box_info_srv import *
from arm_move.srv._arm_move_srv import *
from arm_move.srv._work_start_srv import *
from arm_move.srv._att_hand_box_srv import *
from arm_move.srv._arm_goalJoint_srv import *
import timeit
#using Jeeho modules
import timer_class
import parse_testdata
def go_home():
# 2020.08.05 SH
move_group_name = 'panda_arm'
home_joint = [-0.7912285295667355, -1.7449968666946676, 1.6255344777637362, -2.9980328554805484, 1.552371742049853, 1.345932931635115, 0.8050298552807971]
CLF.move_joints_client_rad(move_group_name, home_joint)
def go_ready():
# 2020.08.05 SH
move_group_name = 'panda_arm'
home_joint = [-1.6238, -1.6078, -0.2229, -2.6057, 1.4646, 1.4325, -0.2159]
CLF.move_joints_client_rad(move_group_name, home_joint)
def hand_open():
# 2020.08.05 SH
CLF.panda_gripper_open()
def pick_and_place(env, pick_pose, pick_object_name, place_pose):
# print"\tPICK AND PLACE ACTION => rearrange", pick_object_name
pick_time = env.pick(env.obs_pos, pick_pose, place_pose)
CLF.att_box_client('hand', pick_object_name)
ready_time1 = env.go_ready()
place_time = env.place(env.obs_pos, place_pose)#, vrep_env.get_current_joint(joint_names_jaco))
CLF.det_box_client(pick_object_name, [0, 0, 0], [0, 0, 0, 0], [0, 0, 0], 'red')
CLF.add_mesh_client(pick_object_name, [place_pose[0], place_pose[1], 0.605], [0.0, 0.0, 0.0, 0.0], [0.001, 0.001, 0.001])
ready_time2 = env.go_ready()
# print"exe time:", pick_time + ready_time1 + place_time + ready_time2
return pick_time+ready_time1+place_time+ready_time2
# print"\tEND PICK AND PLACE ACTION"
#
# # ret_pick_pose = env.pick(env.obs_pos, pick_pose, place_pose)
# env.move_to([[ret_pick_pose[0][0] - 0.03, ret_pick_pose[0][1], ret_pick_pose[0][2]], ret_pick_pose[1]])
#
# env.move_to([[ret_pick_pose[0][0] + 0.05, ret_pick_pose[0][1], ret_pick_pose[0][2]], ret_pick_pose[1]])
#
#
# env.pre_place(env.obs_pos, place_pose, vrep_env.get_current_joint(joint_names_jaco))
# ret_place_pose = env.place(env.obs_pos, place_pose, vrep_env.get_current_joint(joint_names_jaco))
# env.move_to([[ret_place_pose[0][0] - 0.1, ret_place_pose[0][1], ret_place_pose[0][2]], ret_place_pose[1]])
#
# CLF.det_box_client(pick_object_name, [env.object_z, -sel_can_pos[1], sel_can_pos[0]], obstacle_info[env.ore_order[0]][1], obstacle_info[env.ore_order[0]][2], 'blue')
#
# env.move_to([[ret_place_pose[0][0] + 0.1, ret_place_pose[0][1], ret_place_pose[0][2]], ret_place_pose[1]])
# # CLF.add_box_client(obstacle_name[env.ore_order[0]], [env.object_z, -sel_can_pos[1], sel_can_pos[0]], obstacle_info[env.ore_order[0]][1], obstacle_info[env.ore_order[0]][2], 'blue')
def test_algorithm(method_in, data_in):
# method
# "where" : icra2020 "where to relocate?"
# "far" : farthest method
go_ready()
hand_open()
# print "start with method:", method_in
# print "\n***STEP 1*** : env setting"
obj_h = -0.0
obj_z = 0.605 + obj_h#+ obj_h/2.0
target_name = ['target']
target_info = []
if -data_in[0][0]>0:
target_info.append([[data_in[0][1] - 0.03, -data_in[0][0]-0.02, obj_z], [0, 0, 0, 0], [0.001, 0.001, 0.001]]) # for the add_mesh
else:
target_info.append([[data_in[0][1] - 0.03, -data_in[0][0]+0.02, obj_z], [0, 0, 0, 0], [0.001, 0.001, 0.001]]) # for the add_mesh
# target_info.append([[data_in[0][0], data_in[0][1], obj_z], [0, 0, 0, 0], [0.06, 0.06, 0.12]]) # for the add_box
# target_info[i][0][2] = target_info[i][0][2] + 0.04
# target_info[i][2][2] = target_info[i][2][2] + 0.08
# obstacle_name = []
# for i in range(len(data_in[1])):
obstacle_name = [str(i).zfill(2) for i in range(len(data_in[1]))]
# obstacle_name.append('obstacle'+str(i))
# print obstacle_name
# obstacle_name = ['obstacle0', 'obstacle1', 'obstacle2', 'obstacle3', 'obstacle4', 'obstacle5', 'obstacle6', 'obstacle7', 'obstacle8']
obstacle_info = []
# [[obj_pos.x, obj_pos.y, obj_pos.z], [obj_ori_q.x, obj_ori_q.y, obj_ori_q.z, obj_ori_q.w], [obj_scale.x, obj_scale.y, obj_scale.z]]
for i in range(len(obstacle_name)):
obstacle_info.append([[data_in[1][i][1]-0.02, -data_in[1][i][0], obj_z], [0, 0, 0, 0], [0.001, 0.001, 0.001]]) # for the add_mesh
# obstacle_info.append([[data_in[1][i][0], data_in[1][i][1], obj_z], [0, 0, 0, 0], [0.06, 0.06, 0.12]]) # for the add_box
# obstacle_info[i][0][2] = obstacle_info[i][0][2] + 0.04
# obstacle_info[i][2][2] = obstacle_info[i][2][2] + 0.08
can_r = 0.02
if len(obstacle_name)>17:
can_r = 0.035
# print "can r", len(obstacle_name), can_r
elif len(obstacle_name)>13:
can_r = 0.045
# print "can r", len(obstacle_name), can_r
elif len(obstacle_name)>11:
can_r = 0.055
# print "can r", len(obstacle_name), can_r
# print "\tNo. of obstacles:", len(obstacle_name)
env_name = ['shelf_gazebo']#2020.10.21: puzzle test, 'Jaco_base', 'table_ls', 'table_rs', 'table_us', 'table_bs']
env_info = []
base_position = [0.8637-0.02, 0, 0.0 + obj_h]
base_quaternion = [0, 0, 0, 1]
base_scale = [0.001, 0.001, 0.001]
CLF.add_mesh_client('shelf_gazebo', base_position, base_quaternion, base_scale)
ws_pos = [0.8637+0.5*0.45+0.03-0.02, 0.0, 0.0 + obj_h]
ws_rot = [0.0, 0.0, 0.0, 0.0]
ws_scale = [0.45, 0.91, 0.0]
env_info.append([ws_pos, ws_rot, ws_scale])
# for i in range(len(env_name)):
# env_info.append(vrep_env.get_object_info(env_name[i]))
# if i > 1:
# env_info[i][2][0] = env_info[i][2][0]+0.01
# env_info[i][2][1] = env_info[i][2][1]+0.01
# env_info[i][2][2] = env_info[i][2][2]+0.01
for i in range(len(obstacle_info)):
CLF.add_mesh_client(obstacle_name[i], obstacle_info[i][0], obstacle_info[i][1], obstacle_info[i][2])
# CLF.add_box_client(obstacle_name[i], obstacle_info[i][0], obstacle_info[i][1], obstacle_info[i][2], 'red')
for i in range(len(target_info)):
CLF.add_mesh_client(target_name[i], target_info[i][0], target_info[i][1], target_info[i][2])
# CLF.add_box_client(target_name[i], target_info[i][0], target_info[i][1], target_info[i][2], 'green')
# for i in range(len(env_info)):
# # CLF.add_mesh_client(env_name[i], env_info[i][0], env_info[i][1], env_info[i][2])
# CLF.add_box_client(env_name[i], env_info[i][0], env_info[i][1], env_info[i][2], 'gray')
ws = env_info[0]
# print"ws info", env_info[0]
ws_w = int(round(ws[2][0]*100)) # x-axes in Rviz
ws_d = int(round(ws[2][1]*100)) # y-axes in Rviz
# print "\tRviz ws width, depth:", ws_w, ws_d
# GRID_SIZE = 0.01
ws_zero_pos = [round(ws[0][0] - ws[2][0]*0.5, 2), round(ws[0][1] - ws[2][1]*0.5, 2)]
# print "\tRviz ws cen pos:", ws[0]
# print "\tRviz ws, zero pos:", ws_zero_pos
# ws_w, ws_d = 100, 100 # get table size in the v-rep
ws_cen = [-ws[0][1], ws[0][0]]
rob_pos = [0.0, 0.0]
OBJ_R = 0.035
env = EI(rob_pos, ws_w, ws_d, ws_cen, ws_zero_pos, grid_size=GRID_SIZE, wall_r=OBJ_R)
env.set_env(obstacle_name, obstacle_info, target_name, target_info)
env.update_env(env.obs_pos, env.obs_grid)
print "env.ore_order:", env.ore_order, env.ore_order[0]
# print "\trearrangement order:", env.ore_order
# if len(env.ore_order) == 0:
# print "end rearrangement"
# pick_and_place(env, env.tar_pos, 'target', env.tar_pos)
# time.sleep(1)
# CUF.draw_grid_info(env.grid_ori)
# plt.show()
ret = 0
space_err = 0
rearr_cnt = 0
reloc_cnt = len(env.ore_order)
# print"first reloc cnt:", reloc_cnt
# env.get_env(obs_r, tar_r, min_ore)
algorithm_start = time.time()
tp_time = 0
mp_time = 0
ex_time = 0
method = method_in
# method = 'mine'
# method = 'far'
# method = 'deep'
while len(env.ore_order): # this while loop is for the algorithm
# print"\n***STEP 2*** REARRANGE ORDER => :", env.ore_order
env.get_max_can(env.grid_ori, bt_num=2, trial_num=2500, can_r=can_r) # We get "grid_max_can", "can_grid"
# env.get_env_case1()
# env.get_max_can_case1()
'''
Make object info!
Type : target, obstacle, candidate
Info : pos, grid, A, BT, b, ORC, ORE
'''
can_info = []
for i in range(len(env.can_pos)):
can_info.append((CI('candidate', env.can_pos[i], env.can_grid[i])))
# check env info got right
# if 1:
# print "\n# of obstacles", len(env.obs_pos), "\n# of candidates", len(env.can_pos)
'''
GET candidates info
'''
t_ore_order = copy.deepcopy(env.ore_order)
# for i in range(len(can_info)):
# print "can", i, ":", can_info[i].pos
# CUF.draw_grid_info(env.grid_ori)
# CUF.draw_grid_info(env.grid_del)
# CUF.draw_grid_info(env.grid_max_can)
# for c_i in range(len(can_info)):
# plt.text(can_info[c_i].grid[0], can_info[c_i].grid[1], 'Can' + str(c_i), fontsize=20, ha='center', bbox=dict(facecolor='pink', alpha=0.8))
# for o_i in range(len(env.obs_grid)):
# plt.text(env.obs_grid[o_i][0], env.obs_grid[o_i][1], 'Obs' + str(o_i), fontsize=20, ha='center', bbox=dict(facecolor='red', alpha=0.8))
# plt.show()
# print"\tCheck C.A"
# Check C.A : just next step
t_can_info = []
in_can_info = copy.deepcopy(can_info)
in_obs_pos = copy.deepcopy(env.obs_pos)
in_obs_pos.remove(env.obs_pos[env.ore_order[0]])
CLF.del_box_client(obstacle_name[env.ore_order[0]])
tmp_mp_start = time.time()
t_can_info.append(env.get_can_A(in_can_info, in_obs_pos, env.tar_pos))
tmp_mp_end = time.time()
mp_time = mp_time + tmp_mp_end - tmp_mp_start
print "in loop env.ore_order:", env.ore_order, env.ore_order[0]
CLF.add_mesh_client(obstacle_name[env.ore_order[0]], obstacle_info[env.ore_order[0]][0], obstacle_info[env.ore_order[0]][1], obstacle_info[env.ore_order[0]][2])
# CLF.add_box_client(obstacle_name[env.ore_order[0]], obstacle_info[env.ore_order[0]][0], obstacle_info[env.ore_order[0]][1], obstacle_info[env.ore_order[0]][2], 'red')
# Check C.BT
in_can_info = copy.deepcopy(t_can_info[0])
in_can_info = env.init_BT(in_can_info) # init the BT value of candidates to '0'
in_obs_pos = copy.deepcopy(env.obs_pos)
for ore_i in range(len(env.ore_order)): # after rearrange all ORE
in_obs_pos.remove(env.obs_pos[env.ore_order[ore_i]])
CLF.del_box_client(obstacle_name[env.ore_order[ore_i]])
tmp_mp_start = time.time()
t_can_info[0] = env.get_can_BT(in_can_info, in_obs_pos, env.tar_pos)
tmp_mp_end = time.time()
mp_time = mp_time + tmp_mp_end - tmp_mp_start
for ore_i in range(len(env.ore_order)):
CLF.add_mesh_client(obstacle_name[env.ore_order[ore_i]], obstacle_info[env.ore_order[ore_i]][0], obstacle_info[env.ore_order[ore_i]][1], obstacle_info[env.ore_order[ore_i]][2])
# CLF.add_box_client(obstacle_name[env.ore_order[ore_i]], obstacle_info[env.ore_order[ore_i]][0], obstacle_info[env.ore_order[ore_i]][1], obstacle_info[env.ore_order[ore_i]][2], 'red')
# Check C.BO : BO : other ORE, just before target
in_can_info = copy.deepcopy(t_can_info[0])
in_obs_pos = copy.deepcopy(env.obs_pos)
for ore_i in range(len(env.ore_order)): # after rearrange all ORE
in_obs_pos.remove(env.obs_pos[env.ore_order[ore_i]])
CLF.del_box_client(obstacle_name[env.ore_order[ore_i]])
for j in range(len(env.ore_order)): # check other ORE just before target
if j > i:
tmp_mp_start = time.time()
t_can_info[0] = env.get_can_BT(in_can_info, in_obs_pos, env.obs_pos[env.ore_order[j]])
tmp_mp_end = time.time()
mp_time = mp_time + tmp_mp_end - tmp_mp_start
for ore_i in range(len(env.ore_order)):
CLF.add_mesh_client(obstacle_name[env.ore_order[ore_i]], obstacle_info[env.ore_order[ore_i]][0], obstacle_info[env.ore_order[ore_i]][1], obstacle_info[env.ore_order[ore_i]][2])
# CLF.add_box_client(obstacle_name[env.ore_order[ore_i]], obstacle_info[env.ore_order[ore_i]][0], obstacle_info[env.ore_order[ore_i]][1], obstacle_info[env.ore_order[ore_i]][2], 'red')
s_v = []
s_v_index = []
for i in range(1):
in_can_info = copy.deepcopy(t_can_info[i])
ret_can, ret_index = env.get_cf(in_can_info)
s_v.append(ret_can)
s_v_index.append(ret_index)
# print "\n step", i, " has # of cf pos:", len(t_cf[i]), "index", t_cf_index[i]
# print"\n***STEP 3*** : find valid candidates"
# print "\ts_v:", len(s_v[0]), "\n\ts_v_index:", len(s_v_index[0])
# for i in range(len(s_v[0])):
# print "s_v index:", [i], s_v_index[0][i]
# See the feasibile candidate
# for i in range(len(t_cf[0])):
# print "\n Our Cf pos:", i, t_cf[0][i].pos
# See if this case if case0 or case1
# print "t_cf:", t_cf, "order", env.ore_order
# if len(s_v[0]) >= len(env.ore_order):
if len(s_v[0]) >= 1:
# print "\n\tenough candidate spots"
t_b = []
for i in range(1):
in_obs_pos = copy.deepcopy(env.obs_pos)
for ore_i in range(i + 1):
in_obs_pos.remove(env.obs_pos[env.ore_order[ore_i]])
t_b.append(env.get_cf_b(s_v[i], in_obs_pos))
# print "\n step", i, " has cf b:", t_b[i]
# draw_figs = 1
# if draw_figs == 1:
# for c_i in range(len(can_info)):
# plt.text(can_info[c_i].grid[0], can_info[c_i].grid[1], 'Can' + str(c_i), fontsize=20, ha='center', bbox=dict(facecolor='pink', alpha=0.8))
# for o_i in range(len(env.obs_grid)):
# plt.text(env.obs_grid[o_i][0], env.obs_grid[o_i][1], 'Obs' + str(o_i), fontsize=20, ha='center', bbox=dict(facecolor='red', alpha=0.8))
#
# for step_i in range(1):
# step_grid = copy.deepcopy(env.grid_act)
# step_obs_grid = copy.deepcopy(env.obs_grid)
# for ore_i in range(step_i + 1):
# step_obs_grid.remove(env.obs_grid[env.ore_order[ore_i]])
# for i in range(len(step_obs_grid)):
# step_grid = CUF.obstacle_circle(step_grid, [round(step_obs_grid[i][0], 2), round(step_obs_grid[i][1], 2), env.obs_r[i]], 2)
# for ci in range(len(can_info)):
# xi, yi = can_info[ci].grid
# step_grid = CUF.obstacle_circle(step_grid, [xi, yi, 0.04], 30)
#
# step_grid = CUF.obstacle_circle(step_grid, [env.tar_grid[0], env.tar_grid[1], tar_r], 4) # target
#
# for cf_i in range(len(t_b[step_i])):
# xi = (t_cf[step_i][cf_i].pos[0] - env.ws_zero[0]) * G2P_SIZE
# yi = (t_cf[step_i][cf_i].pos[1] - env.ws_zero[1]) * G2P_SIZE
# step_grid = CUF.obstacle_circle(step_grid, [xi, yi, 0.04], 3)
#
# CUF.draw_grid_info(step_grid)
#
# for cf_i in range(len(t_b[step_i])):
# xi = (t_cf[step_i][cf_i].pos[0] - env.ws_zero[0]) * G2P_SIZE
# yi = (t_cf[step_i][cf_i].pos[1] - env.ws_zero[1]) * G2P_SIZE
# plt.text(xi, yi, 'b=' + str(t_b[step_i][cf_i]), fontsize=20, ha='center', bbox=dict(facecolor='pink', alpha=0.8))
# for ci in range(len(t_can_info[step_i])):
# plt.text(t_can_info[step_i][ci].grid[0], t_can_info[step_i][ci].grid[1] - 2.0, '[A, BT] :' + str([t_can_info[step_i][ci].A, t_can_info[step_i][ci].BT]), fontsize=10, ha='center', bbox=dict(facecolor='pink', alpha=0.8))
# for o_i in range(len(env.obs_grid)):
# plt.text(env.obs_grid[o_i][0], env.obs_grid[o_i][1], 'Obs' + str(o_i), fontsize=20, ha='center', bbox=dict(facecolor='red', alpha=0.8))
# plt.title('step' + str(step_i) + " obs: " + str(env.ore_order[step_i]) + " rearranged")
elif len(s_v[0]) < len(env.ore_order):
# print "\n\tnot enough candidate spots"
# print "Since we meet condition: N(CF) < N(ORE) by", len(t_cf[0]), "<", len(env.ore_order), ",\nwe have to remove additional obstacles."
## step1 : "get t_cp", check candidates which have A = 0 and BT = 0
## This means that a candidate is not reachable and it does not block the target object
# Check A for this environment state
in_can_info = copy.deepcopy(can_info)
in_obs_pos = copy.deepcopy(env.obs_pos)
t_can_add = copy.deepcopy(env.get_can_A(in_can_info, in_obs_pos, env.tar_pos))
s_e = [] # s_e: extra candidate spots
in_can_info = copy.deepcopy(t_can_add)
ret_can, ret_index = env.get_cp(in_can_info)
# print "\t# of OR'", len(ret_can)
t_s_e = ret_can
t_s_e_index = ret_index
# print "t_cp:", len(t_cp), "index", t_cp_index
# for i in range(len(t_cp)):
# print "\n Our Cp:", i, t_cp[i].pos
if len(t_s_e) == 0:
# print "\tno possible extra candidate exist"
space_err = 1
break
# step2 : check c_ore for each cp and pick min of it
t_s_r = [] # s_r: candidate spot relocate plan
in_can_info = copy.deepcopy(t_s_e)
# tmp_order_time_start = timeit.default_timer()
# tmp_order_time_start2 = time.clock()
t_s_r = env.get_c_ore(in_can_info)
# tmp_order_time_end = timeit.default_timer()
# tmp_order_time_end2 = time.clock()
# order_time = order_time + tmp_order_time_end - tmp_order_time_start
# order_time2 = order_time2 + tmp_order_time_end2 - tmp_order_time_start2
# order_cnt = order_cnt + 100 * len(t_s_e)
# print "\n"
# for i in range(len(t_cp)):
# print "cp", t_cp[i].pos, "\nc_ore", c_ore[i]
s_r = []
s_e_index = []
# print "\n"
# for i in range(len(t_s_e)):
# print "can", t_s_e_index[i], "grid:", t_s_e[i].grid, ", s_r:", t_s_r[i]
for i in range(len(t_s_e)):
if t_s_r[i] != []:
s_e.append(t_s_e[i])
s_r.append(t_s_r[i])
s_e_index.append(t_s_e_index[i])
# tmp_se = copy.deepcopy(s_e)
# tmp_sr = copy.deepcopy(s_r)
# emp_sr = []
# for i in range(len(s_e)):
# if s_r[i] == []:
# print "remove empty s_e", i
# emp_sr.append(i)
#
# print "tmp se:", tmp_se, "\ntmp sr", tmp_sr
# for i in range(len(emp_sr)):
#
# print "tmp_se[emp_sr[i]]", tmp_se[emp_sr[i]].pos
# print "tmp_sr[emp_sr[i]]", tmp_sr[emp_sr[i]]
# s_e.remove(tmp_se[emp_sr[i]])
# s_r.remove(tmp_sr[emp_sr[i]])
while len(s_e):
# print "# of s_e:", len(s_e), s_r
# print "\n"
# for i in range(len(s_e)):
# print "can", s_e_index[i], "pos:", s_e[i].pos, ", s_r:", s_r[i]
min_s_r = CUF.min_len_list(s_r)
# print "\nmin sr:", min_s_r
#
# print "picked ci index:", t_cp.index(t_cp[c_ore.index(min_c_ore)])
# print "picked ci address:", copy.deepcopy(t_cp[c_ore.index(min_c_ore)]).pos
cp = copy.deepcopy(s_e[s_r.index(min_s_r)])
# print "selected cp pos", cp.pos
## step3 : "get t_cf", check candidates which have A = 1 and BT' = 0
## Check A for this environment state T' is t_cp_i
in_can_info = copy.deepcopy(can_info)
in_obs_pos = copy.deepcopy(env.obs_pos)
in_tar_pos = copy.deepcopy(cp.pos)
# t_can_add = copy.deepcopy(env.get_can_A(in_can_info, in_obs_pos, in_tar_pos))
t_can_add = copy.deepcopy(env.get_can_A(in_can_info, in_obs_pos, env.tar_pos))
# Check C.BT for this environment state
in_can_info = copy.deepcopy(t_can_add)
in_can_info = env.init_BT(in_can_info) # init the BT value of candidates to '0'
in_obs_pos = copy.deepcopy(env.obs_pos)
sorted_min_s_r = copy.deepcopy(min_s_r)
sorted_min_s_r.sort(reverse=True)
# print "sorted min_s_r:", sorted_min_s_r
if sorted_min_s_r[0] == len(env.obs_pos): # if OR' has o_t ! remove s_e
# print "o_t is in OR'"
s_e.remove(s_e[s_r.index(min_s_r)])
s_e_index.remove(s_e_index[s_r.index(min_s_r)])
s_r.remove(s_r[s_r.index(min_s_r)])
else:
for ore_i in range(len(min_s_r)): # after rearrange all OR'
in_obs_pos.remove(in_obs_pos[sorted_min_s_r[ore_i]])
CLF.del_box_client(obstacle_name[sorted_min_s_r[ore_i]])
in_tar_pos = copy.deepcopy([-cp.pos[1], cp.pos[0]])
t_can_add = env.get_can_BT(in_can_info, in_obs_pos, in_tar_pos)
for ore_i in range(len(min_s_r)): # after rearrange all OR'
CLF.add_mesh_client(obstacle_name[sorted_min_s_r[ore_i]], obstacle_info[sorted_min_s_r[ore_i]][0], obstacle_info[sorted_min_s_r[ore_i]][1], obstacle_info[sorted_min_s_r[ore_i]][2])
# CLF.add_box_client(obstacle_name[sorted_min_s_r[ore_i]], obstacle_info[sorted_min_s_r[ore_i]][0], obstacle_info[sorted_min_s_r[ore_i]][1], obstacle_info[sorted_min_s_r[ore_i]][2], 'red')
# for i in range(len(t_can_add)):
# print "can", i, "A:", t_can_add[i].A, "B:", t_can_add[i].BT
s_e_v = []
s_v_index = []
in_can_info = copy.deepcopy(t_can_add)
ret_can, ret_index = env.get_cf(in_can_info)
s_e_v.append(ret_can)
s_v_index.append(ret_index)
# print "s_e_v: ", s_e_v
# for i in range(len(s_e_v[0])):
# print s_e_v[0][i].grid
if len(s_e_v[0]) >= len(min_s_r) - 1:
# print "this se is possible"
if len(min_s_r) == 1:
# print "only one move needed"
# t_can_info = []
# for i in range(len(env.ore_order)):
# in_can_info = copy.deepcopy(can_info)
# in_obs_pos = copy.deepcopy(env.obs_pos)
# for ore_i in range(i + 1):
# if min_s_r[0] != env.ore_order[ore_i]:
# in_obs_pos.remove(env.obs_pos[env.ore_order[ore_i]])
# in_obs_pos.remove(env.obs_pos[min_s_r[0]])
# t_can_info.append(env.get_can_A(in_can_info, in_obs_pos, env.tar_pos))
s_v = [[s_e[s_r.index(min_s_r)]]]
s_v_index = [[s_e_index[s_r.index(min_s_r)]]]
# print "se v:", s_v, s_v[0], s_v[0][0], s_v[0][0].pos
# for i in range(len(env.ore_order)):
# add_can_info = copy.deepcopy(t_can_info[i])
# ret_can, ret_index = env.get_cf(add_can_info)
# s_v.append(ret_can)
# s_v_index.append(ret_index)
t_b = [[0]]
# for i in range(1):
# in_obs_pos = copy.deepcopy(env.obs_pos)
# for ore_i in range(i+1):
# in_obs_pos.remove(env.obs_pos[env.ore_order[ore_i]])
# t_b.append(env.get_cf_b(s_v[i], in_obs_pos))
# # print "\n step", i, " has cf b:", t_b[i]
break # for out s_e loop
else:
t_b = []
in_obs_pos = copy.deepcopy(env.obs_pos)
for ore_i in range(1):
in_obs_pos.remove(env.obs_pos[min_s_r[ore_i]])
t_b.append(env.get_cf_b(s_e_v[0], in_obs_pos))
s_v[0] = s_e_v[0]
break # for out s_e loop
else: # s_e[s_r.index(min_s_r)]
# print "\nremove",
# print "s_e:", s_e
# print "s_r:", s_r
# print "s_e_index:", s_e_index
s_e.remove(s_e[s_r.index(min_s_r)])
s_e_index.remove(s_e_index[s_r.index(min_s_r)])
s_r.remove(s_r[s_r.index(min_s_r)])
if len(s_e) == 0:
# print "no possible extra candidate exist"
break
env.ore_order = min_s_r
# draw_figs = 1
# if draw_figs == 1:
# for c_i in range(len(can_info)):
# plt.text(can_info[c_i].grid[0], can_info[c_i].grid[1], 'Can' + str(c_i), fontsize=20, ha='center', bbox=dict(facecolor='pink', alpha=0.8))
# for o_i in range(len(env.obs_grid)):
# plt.text(env.obs_grid[o_i][0], env.obs_grid[o_i][1], 'Obs' + str(o_i), fontsize=20, ha='center', bbox=dict(facecolor='red', alpha=0.8))
#
# step_i = 0
# step_grid = copy.deepcopy(env.grid_act)
# step_obs_grid = copy.deepcopy(env.obs_grid)
# step_obs_grid.remove(env.obs_grid[env.ore_order[0]])
# for i in range(len(step_obs_grid)):
# # print "i:", i, "step_obs_grid [i]:", step_obs_grid[i]
# step_grid = CUF.obstacle_circle(step_grid, [round(step_obs_grid[i][0], 2), round(step_obs_grid[i][1], 2), env.obs_r[i]], 2)
# for ci in range(len(can_info)):
# xi, yi = can_info[ci].grid
# step_grid = CUF.obstacle_circle(step_grid, [xi, yi, 0.04], 30)
#
# step_grid = CUF.obstacle_circle(step_grid, [env.tar_grid[0], env.tar_grid[1], tar_r], 4) # target
#
# for cf_i in range(len(t_b[step_i])):
# xi = (t_cf[step_i][cf_i].pos[0] - env.ws_zero[0]) * G2P_SIZE
# yi = (t_cf[step_i][cf_i].pos[1] - env.ws_zero[1]) * G2P_SIZE
# step_grid = CUF.obstacle_circle(step_grid, [xi, yi, 0.04], 3)
#
# CUF.draw_grid_info(step_grid)
#
# for cf_i in range(len(t_b[step_i])):
# xi = (t_cf[step_i][cf_i].pos[0] - env.ws_zero[0]) * G2P_SIZE
# yi = (t_cf[step_i][cf_i].pos[1] - env.ws_zero[1]) * G2P_SIZE
# plt.text(xi, yi, 'b=' + str(t_b[step_i][cf_i]), fontsize=20, ha='center', bbox=dict(facecolor='pink', alpha=0.8))
# for ci in range(len(t_can_info[step_i])):
# plt.text(t_can_info[step_i][ci].grid[0], t_can_info[step_i][ci].grid[1] - 2.0, '[A, BT] :' + str([t_can_info[step_i][ci].A, t_can_info[step_i][ci].BT]), fontsize=10, ha='center', bbox=dict(facecolor='pink', alpha=0.8))
# for o_i in range(len(env.obs_grid)):
# plt.text(env.obs_grid[o_i][0], env.obs_grid[o_i][1], 'Obs' + str(o_i), fontsize=20, ha='center', bbox=dict(facecolor='red', alpha=0.8))
# plt.title('step' + str(step_i) + " obs: " + str(env.ore_order[step_i]) + " rearranged")
if space_err:
# print "no possible extra candidate exist"
break
# move obstacle to can(min(b))
# print "s_v", s_v
# print "s_v[0]", s_v[0]
# print "s_v[0][0]", s_v[0][0]
# print "s_v[0][0].pos", s_v[0][0].pos
# print "\tt_b[0]", t_b[0]
find_b = copy.deepcopy(t_b[0])
# print "move to c_", find_b.index(min(find_b))
if method == 'far':
t_sel_can_index = [i for i in range(len(find_b))]
elif method == 'deep':
t_sel_can_index = [i for i in range(len(find_b))]
elif method == 'SH_re':
t_sel_can_index = [i for i in range(len(find_b)) if find_b[i] == min(find_b)]
t_sel_can_dist = []
# print "\ntar grid: ", env.tar_grid
# print "\ntar pos: ", env.tar_pos
# print "\tt sel can index", t_sel_can_index
for i in range(len(t_sel_can_index)):
# print "t_cf grid x,y:", t_sel_can_index[i], t_cf[0][t_sel_can_index[i]].grid[0], t_cf[0][t_sel_can_index[i]].grid[1]
# print "t_cf pos x,y:", t_sel_can_index[i], s_v[0][t_sel_can_index[i]].pos[0], s_v[0][t_sel_can_index[i]].pos[1]
if method == 'deep':
t_sel_can_dist.append(np.sqrt((env.rob_pos[1] - s_v[0][t_sel_can_index[i]].pos[0]) ** 2 + (-env.rob_pos[0] - s_v[0][t_sel_can_index[i]].pos[1]) ** 2))
else:
t_sel_can_dist.append(np.sqrt((env.tar_pos[1] - s_v[0][t_sel_can_index[i]].pos[0]) ** 2 + (-env.tar_pos[0] - s_v[0][t_sel_can_index[i]].pos[1]) ** 2))
# print "obs pos:", env.obs_pos
# print "t sel can dist", t_sel_can_dist
sel_can_index = t_sel_can_index[t_sel_can_dist.index(max(t_sel_can_dist))]
# print "sel can index", sel_can_index
sel_can_pos = can_info[s_v_index[0][sel_can_index]].pos
# sel_can_pos = [can_info[s_v_index[0][sel_can_index]].pos[1], -can_info[s_v_index[0][sel_can_index]].pos[0]]
sel_can_grid = can_info[s_v_index[0][sel_can_index]].grid
# print"\npick obj in RVIZ:", env.obs_pos[env.ore_order[0]][1], -env.obs_pos[env.ore_order[0]][0]
# print"pick obj in ALG :", env.obs_pos[env.ore_order[0]]
sel_obs_pos = env.obs_pos[env.ore_order[0]]
sel_obs_grid = env.obs_grid[env.ore_order[0]]
# print"obstacle pos change:", [-can_info[s_v_index[0][sel_can_index]].pos[1], -can_info[s_v_index[0][sel_can_index]].pos[0]]
env.obs_pos[env.ore_order[0]] = [-can_info[s_v_index[0][sel_can_index]].pos[1], can_info[s_v_index[0][sel_can_index]].pos[0]]
env.obs_grid[env.ore_order[0]] = sel_can_grid
# print"place obj in RVIZ:", sel_can_pos
# print"place obj in ALG :", env.obs_pos[env.ore_order[0]]
# can_info[s_v_index[0][sel_can_index]].pos = sel_obs_pos
# can_info[s_v_index[0][sel_can_index]].grid = sel_obs_grid
# tmp_order_time_start = timeit.default_timer()
# tmp_order_time_start2 = time.clock()
# env.pick_n_place()
# CLF.add_box_client(obstacle_name[env.ore_order[0]], [env.object_z, -sel_can_pos[1], sel_can_pos[0]], obstacle_info[env.ore_order[0]][1], obstacle_info[env.ore_order[0]][2], 'blue')
# pick_and_place(env, sel_obs_pos, obstacle_name[env.ore_order[0]], env.obs_pos[env.ore_order[0]])
# print "obs pos:", env.obs_pos
tmp_ex_time = pick_and_place(env, sel_obs_pos, obstacle_name[env.ore_order[0]], sel_can_pos)
ex_time = ex_time + tmp_ex_time
obstacle_info[env.ore_order[0]][0] = [can_info[s_v_index[0][sel_can_index]].pos[0], can_info[s_v_index[0][sel_can_index]].pos[1], 0.605]
env.obs_pos[env.ore_order[0]] = copy.deepcopy([-can_info[s_v_index[0][sel_can_index]].pos[1], can_info[s_v_index[0][sel_can_index]].pos[0]])
env.obs_grid[env.ore_order[0]] = sel_can_grid
# def pick_and_place(env, pick_pose, pick_object_name, place_pose):
# print"\tPICK AND PLACE ACTION => rearrange", pick_object_name
#
# env.pick(env.obs_pos, pick_pose, place_pose)
# CLF.att_box_client('hand', pick_object_name)
# env.go_ready()
# env.place(env.obs_pos, place_pose) # , vrep_env.get_current_joint(joint_names_jaco))
# CLF.det_box_client(pick_object_name, [0, 0, 0], [0, 0, 0, 0], [0, 0, 0], 'red')
# CLF.add_mesh_client(pick_object_name, [place_pose[1], -place_pose[0], 0.605], [0.0, 0.0, 0.0, 0.0], [0.001, 0.001, 0.001])
# env.go_ready()
# print"\tEND PICK AND PLACE ACTION"
#
# time.sleep(1)
# obstacle_info = []
# for i in range(len(obstacle_name)):
# obstacle_info.append(vrep_env.get_object_info(obstacle_name[i]))
# # obstacle_info[i][0][2] = obstacle_info[i][0][2] + 0.04
# # obstacle_info[i][2][2] = obstacle_info[i][2][2] + 0.08
# for i in range(len(obstacle_info)):
# CLF.add_box_client(obstacle_name[i], obstacle_info[i][0], obstacle_info[i][1], obstacle_info[i][2], 'red')
# env.set_env(obstacle_name, obstacle_info, target_name, target_info)
# home_joint = [3.1415927410125732, 4.537856101989746, 5.93411922454834, -0.6108652353286743, 1.7453292608261108, -0.5235987901687622]
#
# CLF.move_joints_client_rad('arm', home_joint)
ret = env.update_env(env.obs_pos, env.obs_grid)
print "after update env -> env.ore_order:", env.ore_order#, env.ore_order[0]
# print "obs pos:", env.obs_pos
if len(env.ore_order) == 0:
rearr_cnt = rearr_cnt + 1
# print "end rearrangement"
# pick_and_place(env, env.tar_pos, 'target', env.tar_pos)
# time.sleep(1)
# plt.title('rearrangement finished')
break
if env.ore_order[0] == -1:
ret = -99
# print "no path"
break
# print "obs pos:", env.obs_pos
# tmp_order_time_end = timeit.default_timer()
# order_time = order_time + tmp_order_time_end - tmp_order_time_start
# order_time2 = order_time2 + tmp_order_time_end2 - tmp_order_time_start2
# order_cnt = order_cnt + 1
rearr_cnt = rearr_cnt + 1
if env.order_error_flag == 0:
# print "\nretry for another environment"
space_err = 1
break
# print "after move order is:", env.ore_order
# CUF.draw_grid_info(env.grid_ori)
# for c_i in range(len(can_info)):
# plt.text(can_info[c_i].grid[0], can_info[c_i].grid[1], 'Can' + str(c_i), fontsize=20, ha='center', bbox=dict(facecolor='pink', alpha=0.8))
# for o_i in range(len(env.obs_grid)):
# plt.text(env.obs_grid[o_i][0], env.obs_grid[o_i][1], 'Obs' + str(o_i), fontsize=20, ha='center', bbox=dict(facecolor='red', alpha=0.8))
if len(env.ore_order) == 0:
# print "end rearrangement"
# pick_and_place(env, env.tar_pos, 'target', env.tar_pos)
# time.sleep(1)
# plt.title('rearrangement finished')
break
# else:
# plt.title('after rearrngement')
# plt.show()
# pick_and_place(env, env.tar_pos, 'target', env.tar_pos)
# time.sleep(1)
if ret == -99:
return -99, -99, -99, -99, -99, -99
algorithm_end = time.time()#it.default_timer()
tot_time = algorithm_end - algorithm_start
tp_time = tot_time - mp_time - ex_time
print "n-rearrangement", reloc_cnt, rearr_cnt
print "tot, tp, mp, ex time:", tot_time, tp_time, mp_time, ex_time, "\n"
return reloc_cnt, rearr_cnt, tot_time, tp_time, mp_time, ex_time
if __name__ == "__main__":
# X = [0.013,-0.324,-0.347,0.161,0.327,0.169,0.005,-0.307,-0.022,0.127,0.294,0.195,-0.193,-0.135,-0.316,-0.166]
# Y = [1.10545,1.15995,0.96495,1.18595,0.94395,0.93395,1.21495,1.07945,1.2677,1.06945,1.15995,1.2677,0.95595,1.2677,1.2677,1.07545]
# data_in = []
# obs_list = []
# tar_n = 13
# X = [0.331,-0.18,-0.331,0.284,-0.028,0.314,0.006,0.178,0.36,-0.133,0.165,0.025,0.146,-0.195,0.16,-0.299]
# Y = [1.17095,1.2677,1.04645,1.07245,1.2677,1.2677,1.22895,1.2677,0.96295,0.95695,0.96595,1.11545,1.17395,1.05345,1.06245,1.17695]
# data_in = []
# obs_list = []
# tar_n = 5
# X.pop(tar_n)
# Y.pop(tar_n)
#
# X = [0.018,0.326,-0.179,0.344,-0.318,-0.314,0.138,-0.12,0.138,-0.185,-0.289,0.005,0.015,0.303,-0.291,0.284]
# Y = [1.10445,0.94095,1.07545,1.07345,0.94595,1.05645,0.95895,0.96095,1.04945,1.18395,1.15795,1.22795,1.2677,1.18095,1.2677,1.2677]
# data_in = []
# obs_list = []
# tar_n = 12
# X.pop(tar_n)
# Y.pop(tar_n)
# X = [-0.32,0.02,-0.148,-0.129,-0.007,0.357,-0.017,0.159,-0.16,0.299,0.309,0.154,-0.17,-0.323,0.149,-0.325]
# Y = [0.94595,1.21395,1.05645,0.96595,1.10945,1.05245,1.2677,0.94095,1.18395,1.2677,0.96095,1.2677,1.2677,1.16095,1.07745,1.2677]
# data_in = []
# obs_list = []
# tar_n = 11
# X.pop(tar_n)
# Y.pop(tar_n)
#
# X = [-0.001,-0.17,-0.354,-0.346,-0.145,0.036,0.315,0.024,-0.287,0.334,0.147,0.184,-0.135,-0.306,0.155,-0.178]
# Y = [1.10645,1.04645,0.95195,1.2677,1.2677,1.2677,1.17995,1.22895,1.06045,1.05745,1.07745,0.93795,0.94395,1.18095,1.2677,1.18695]
# data_in = []
# obs_list = []
# tar_n = 13
# X.pop(tar_n)
# Y.pop(tar_n)
#
# X = [-0.326,-0.142,0.184,0.329,0.0,0.151,0.348,-0.326,0.147,-0.331,-0.182,0.316,-0.284,0.027,0.135,0.034]
# Y = [0.93295,1.04845,1.2677,1.05645,1.11145,0.95595,0.93595,1.18395,1.05845,1.07745,1.15995,1.2677,1.2677,1.2677,1.16195,1.19895]
# data_in = []
# obs_list = []
# tar_n = 14
# X.pop(tar_n)
# Y.pop(tar_n)
#
is_reading_from_file = False
# for debug only
# sys.argv = ["0", "0", "2"]
#read testdata from file
default_N = 16
testdata_path = ""
# dummy data
testdata_set = parse_testdata.testdata("", 0)
from os.path import expanduser
home = expanduser("~")
# N from input arg
N = 0
if (len(sys.argv) > 1):
N = int(sys.argv[1])
# print("Starting with N = " + sys.argv[1])
# read_testdata = False
# if there was no input argument
if (N == 0):
N = default_N
# read_testdata = False
# read from testdata
if (len(sys.argv) > 2):
data_ind = int(sys.argv[2])
is_reading_from_file = True
# print("\nReading Test Data from File")
if (len(sys.argv) > 3):
testdata_path = sys.argv[3]
else:
testdata_path = home + "/test_data.txt"
# print("File path: " + testdata_path)
if (is_reading_from_file == True):
testdata_set = parse_testdata.testdata(testdata_path, data_ind)
N = testdata_set.N
# print("Starting with N = " + str(N))
data_in = []
obs_list = []
R = []
H = []
tar_n = 0
if(is_reading_from_file == False):
X = [0.34,0.343,-0.3,0.29,-0.295,0.197,0.123,-0.025,-0.359,0.191,-0.016,0.0,0.281,-0.12,-0.16,0.124]
Y = [1.2677,1.18995,1.2677,1.06845,1.15995,1.06745,1.2677,1.2677,1.05245,1.16795,1.20895,1.10945,0.95095,1.2677,1.16695,0.93895]
tar_n = 6
else:
X = testdata_set.X
Y = testdata_set.Y
R = testdata_set.R
H = testdata_set.H
tar_n = testdata_set.target
# print"input tar_n:", tar_n
# tar_x = X.pop(tar_n)
# tar_y = Y.pop(tar_n)
data_in.append([X[tar_n], Y[tar_n]])
for i in range(len(X)):
if i != tar_n:
obs_list.append([X[i], Y[i]])
data_in.append(obs_list)
# print data_in
method = "SH_re"
n_reloc_obj, n_rearr_obj, tot_time, tp_time, mp_time, ex_time = test_algorithm(method, data_in)
# Record Time
t = time.localtime()
current_time = time.strftime("%H:%M:%S", t)
#time_box.time_list_graph.add_to_file(home + "/test_log.csv", current_time)
sim_record = open(home + "/simulation_records_sh16.csv", 'a')
sim_record.write("Simulation Results Recorded at: " + current_time + "\n")
sim_record.write("N: " + str(N) + "\n")
sim_record.write("Total Time Taken: " + str(tot_time) + " sec" + "\n")
sim_record.write("Objects Relocated: " + str(n_rearr_obj) + "\n")
sim_record.write("Actions Taken: " + str(n_rearr_obj) + "\n")
sim_record.write("Time on Graph: " + str(tp_time) + "\n")
sim_record.write("Time on Motion Planning: " + str(mp_time) + " sec" + "\n")
sim_record.write("Time on Motion Execution: " + str(ex_time) + " sec" + "\n")
sim_record.write("testdata set line: " + testdata_set.raw + "\n\n")
sim_record.close()
# before
'''
X = ['0.03', '-0.01', '0.36', '0.30', '-0.19', '-0.05', '-0.29', '0.22', '0.19', '0.14', '-0.12']
Y = ['1.22', '1.11', '1.04', '1.17', '1.06', '1.31', '1.17', '1.31', '1.06', '1.19', '1.13']
data_in = []
data_in.append([-0.17, 1.22])
obs_list = []
for i in range(len(X)):
obs_list.append([float(X[i]), float(Y[i])])
data_in.append(obs_list)
print "data:", data_in
method = "where"
test_algorithm(method, data_in)
''' | [
"[email protected]"
] | |
780f9a63b391c821c07adeb106fabc3cc2ee0b96 | 23cc9e5e299103a44135ef231fb1d57f4c30fbd1 | /annlib/lib/layer.py | bab319283197ea923a21333aa26cdf3b9d784ee6 | [] | no_license | kvalle/PyANN | c2b06568ca0d6f27e8fe29fd87b1ca297750a8fa | 412c7d9ae0c5ef3519c458815abe51976b78f8a1 | refs/heads/master | 2020-04-09T10:32:52.775183 | 2012-03-04T12:46:49 | 2012-03-04T12:46:49 | 3,617,480 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,886 | py | import math, yaml, config, utils
from node import *
class Layer:
def __init__(self, options = {}):
defaults = yaml.load(file(config.base_dir + 'scripts/defaults.yml', 'r'), Loader=yaml.Loader)['layer']
self.options = utils.parse_options(options, defaults)
self.nodes = []
self.links_in = []
self.links_out = []
self.is_active = True
for i in range(self.options['size']):
self.nodes.append(Node(self))
# Add bias node
if self.options['bias_node']:
self.nodes.append(Node(self))
self.nodes[-1].set_bias()
## Execution
def recurring_link(self):
for link in self.links_out:
if link.post == self: return True
return False
# Sanity check for incoming data
def check_inputs(self, data):
if data and self.options['size'] != len(data):
raise Exception("Non-matching data input and number of nodes.")
# Check if node values have converged
def quiescent(self):
for node in self.nodes:
if node.delta() > self.options['quiescent_threshold']:
return False
return True
# Update all nodes
def update_nodes(self, mode, data = False):
mode = mode.split('-')[0]
if not self.options[mode]['is_active']: return
if data: self.check_inputs(data)
if self.options[mode]['quiescence'] and self.recurring_link():
print "Q:",
self.draw_layer()
for i in range(max(1, self.options['max_rounds'])):
self.update_nodes_once()
print "Q:",
self.draw_layer()
if self.quiescent(): break
print
else:
self.update_nodes_once(data)
# Update all nodes once
def update_nodes_once(self, data = False):
for node in self.nodes:
node.update_prev_activation()
if data:
for i in range(len(data)):
self.nodes[i].update(data[i])
if self.options['bias_node']:
self.nodes[-1].update(0)
else:
for i in range(len(self.nodes)):
net = 0.0
for link in self.links_in:
net += link.get_net_inputs_to_node(self.nodes[i])
self.nodes[i].update(net)
# Train all link weights, hebb style
def train_hebbian(self):
for link in self.links_in: link.adjust_all_weights()
# Output values for last layer
def outputs(self):
return [node.activation_level for node in self.nodes]
def reset_activation_levels(self):
for node in self.nodes: node.reset_activation_level()
def reset_error_terms(self):
for node in self.nodes: node.reset_error_term()
## Drawing
# Display layer and its links
def draw(self):
self.draw_layer()
print
self.draw_links()
def draw_layer(self):
print utils.string_to_exact_len(self.options['name'], 12),
print ' | ',
for node in self.nodes:
print utils.string_to_exact_len(round(node.activation_level, 2), 20),
print ' | ',
print
def draw_links(self):
for link in self.links_out:
if not link.options['display']: continue
print utils.string_to_exact_len(link.options['name'], 13),
for arcrow in link.arcs:
if not arcrow[0]: continue
print ' | ',
for arc in arcrow:
if arc: print utils.string_to_exact_len(round(arc.weight, 2), 5),
else: print ' ' * 5,
print ' | ',
print
print ' ' * 13,
print "\n"
| [
"[email protected]"
] | |
99cf74fe0fa86c20ee140ceacb802de20ec1a065 | 41348afdabd47bfed91984ad4d2917c0e9fce537 | /ex18.py | 334856c3282869b7df9e372a3ebbd279ec6a3ce5 | [] | no_license | nchlswtsn/Learn | dad6a7e2a91036ffdf7cfa5901b03076099a198b | 0486e49f3a706f2cdc787c42cc01f04a2ef5e5aa | refs/heads/master | 2020-03-31T10:05:10.818607 | 2015-04-29T15:16:43 | 2015-04-29T15:16:43 | 32,588,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 453 | py | def print_two(*args):
arg1, arg2 = args
print "arg1: %r, arg2: %r" % (arg1, arg2)
def print_two_again(arg1, arg2):
print "arg1: %r, arg2: %r" % (arg1, arg2)
def print_one(arg1):
print "arg1: %r" % arg1
def print_none():
print "I got nothin'."
def john_goddard(john1, john2):
print "%r %r" % (john1, john2)
print_two("Charles","Watson")
print_two_again("Charles","Watson")
print_one("Charles!")
print_none()
john_goddard("Apples","Oranges") | [
"[email protected]"
] | |
ec3b03bdf24432b35bd30c2e2b5a7f72878e232f | d2b599e56d8852bbf0c940f61f47094eec4ae82f | /pynyzo/pynyzo/__init__.py | 547acdb4485315163159cbd045a2fcde9a0ea35e | [
"MIT",
"LicenseRef-scancode-public-domain"
] | permissive | iyomisc/pynyzo | 2f58e6323a8acad2e59fb20a477f0a6897a58074 | fb324a8fe7c91a4f2c0f155b7170eb005b1c88ca | refs/heads/master | 2022-07-05T20:16:02.661606 | 2020-05-17T17:43:42 | 2020-05-17T17:43:42 | 264,722,805 | 0 | 0 | MIT | 2020-05-17T17:41:51 | 2020-05-17T17:41:50 | null | UTF-8 | Python | false | false | 155 | py | # -*- coding: utf-8 -*-
"""Top-level package for Python Boilerplate."""
__author__ = """EggdraSyl"""
__email__ = '[email protected]'
__version__ = '0.0.1'
| [
"[email protected]"
] | |
0dae7337a32647723635b53ab343aaaa18191910 | ead6ec54c304046e8017289ecae2acb69f2e463d | /flotilla/test/compute/test_compute_predict.py | a0966a104a4f900680c03b0f4217996b39d7b6d0 | [] | permissive | YeoLab/flotilla | 93e3576002f1b51917bc8576897d399176e1fa3a | 31da64567e59003c2b9c03fc8f4eb27ee62e299c | refs/heads/master | 2023-04-28T04:23:30.408159 | 2017-04-19T07:03:03 | 2017-04-19T07:03:03 | 19,319,564 | 104 | 27 | BSD-3-Clause | 2023-04-15T19:16:52 | 2014-04-30T16:14:31 | Jupyter Notebook | UTF-8 | Python | false | false | 635 | py | from __future__ import (absolute_import, division,
print_function, unicode_literals)
# def test_default_predictor_scoring_fun():
# pass
#
#
# def test_default_score_cutoff_fun():
# pass
#
#
# def test_PredictorConfig():
# pass
#
#
# def test_PredictorConfigScalers():
# pass
#
#
# def test_ConfigOptimizer():
# pass
#
#
# def test_PredictorConfigManager():
# pass
#
#
# def test_PredictorDataSet():
# pass
#
#
# def test_PredictorDataSetManager():
# pass
#
#
# def test_PredictorBase():
# pass
#
#
# def test_Regressor():
# pass
#
#
# def test_Classifier():
# pass
| [
"[email protected]"
] | |
ce8be278b86f5db9db4368a48b19d7849122ae55 | 7f20bf423448828d31dd9452e6adb49842d5519d | /datatable.py | 803715d9425846fa2059172c08dc515759dfc893 | [] | no_license | jason-huynh83/warzone_stats | 01ef5344d3121b4db4f0ce3809dd8e8273d315e0 | ce8da6a001f4988bdf69cf59f12253c990c0982a | refs/heads/master | 2023-04-10T08:40:57.365553 | 2021-04-22T21:47:04 | 2021-04-22T21:47:04 | 359,310,582 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,325 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Jan 12 13:24:12 2021
@author: Jason
"""
import requests
import pandas as pd
import json
import matplotlib.pyplot as plt
import numpy as np
import time
import warnings
import plotly.graph_objects as go
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_table
warnings.filterwarnings("ignore")
def weekly_stats(username,platform):
url = "https://call-of-duty-modern-warfare.p.rapidapi.com/weekly-stats/"+username+"/"+platform
headers = {
'x-rapidapi-key': "e5ebce8953mshb052759675fa2b3p15385ajsnc449b9ed0f9b",
'x-rapidapi-host': "call-of-duty-modern-warfare.p.rapidapi.com"
}
response = requests.request("GET", url, headers=headers)
response_text = response.text
stats_dict = json.loads(response_text)
df_wz = stats_dict['wz']
df_all = df_wz['all']
df_summary = df_all['properties']
df = pd.DataFrame(list(df_summary.items()),columns=['stats','value'])
df = df.set_index('stats').transpose()
df = df[['matchesPlayed','damageDone','damageTaken','kdRatio','kills','deaths','scorePerMinute','killsPerGame','headshots','assists','headshotPercentage','gulagKills','gulagDeaths']]
return df
df = weekly_stats('iBHuynhing','psn')
| [
"[email protected]"
] | |
2f9da7304beaa6270fe6162d825473423cf21ce0 | 2e763afb48cd3ece9471cb58cb218b8b29c65f07 | /api/notd/model.py | e30e3324c283632920524c8563708f2d7e3470b6 | [] | no_license | Femi-Ogunkola/nftoftheday | 472c761c26c0dfe2403f71d728f3dae9d70ae837 | 4b542fc74ac789eab6e47315de873ce61496c406 | refs/heads/main | 2023-07-10T09:55:34.043971 | 2021-08-11T13:22:35 | 2021-08-11T13:22:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,022 | py | import datetime
from typing import Dict
from typing import List
from typing import Optional
from pydantic import dataclasses
@dataclasses.dataclass
class TokenTransfer:
tokenTransferId: int
transactionHash: str
registryAddress: str
fromAddress: str
toAddress: str
tokenId: str
value: int
gasLimit: int
gasPrice: int
gasUsed: int
blockNumber: int
blockHash: str
blockDate: datetime.datetime
def to_dict(self) -> Dict:
return {
'tokenTransferId': self.tokenTransferId,
'transactionHash': self.transactionHash,
'registryAddress': self.registryAddress,
'fromAddress': self.fromAddress,
'toAddress': self.toAddress,
'tokenId': self.tokenId,
'value': self.value,
'gasLimit': self.gasLimit,
'gasPrice': self.gasPrice,
'gasUsed': self.gasUsed,
'blockNumber': self.blockNumber,
'blockHash': self.blockHash,
'blockDate': self.blockDate.isoformat(),
}
@dataclasses.dataclass
class RetrievedTokenTransfer:
transactionHash: str
registryAddress: str
fromAddress: str
toAddress: str
tokenId: str
value: int
gasLimit: int
gasPrice: int
gasUsed: int
blockNumber: int
blockHash: str
blockDate: datetime.datetime
@dataclasses.dataclass
class Token:
registryAddress: str
tokenId: str
@dataclasses.dataclass
class UiData:
highestPricedTokenTransfer: TokenTransfer
mostTradedTokenTransfers: List[TokenTransfer]
randomTokenTransfer: TokenTransfer
sponsoredToken: Token
@dataclasses.dataclass
class RegistryToken:
name: str
imageUrl: Optional[str]
openSeaUrl: Optional[str]
externalUrl: Optional[str]
lastSaleDate: Optional[datetime.datetime]
lastSalePrice: Optional[int]
collectionName: str
collectionImageUrl: Optional[str]
collectionOpenSeaUrl: Optional[str]
collectionExternalUrl: Optional[str]
| [
"[email protected]"
] | |
c361d7f4bca1a4ab8b7fcfd6b6fddec57dec62ed | 5c16bc84baf250e5b0db6004024bee37a00a5278 | /spiders/urls.py | a13b6dc271ce45068971679b7d56f892249ac236 | [] | no_license | AdejokeOgunyinka/HyperlinksDjangoUI | a693f0eb9da52bc33fe9fa3954e6a5d850093111 | 28caf739968a0bdf4937d50c70a6f90f2e246ca2 | refs/heads/main | 2022-12-28T05:01:18.743181 | 2020-10-06T20:33:32 | 2020-10-06T20:33:32 | 302,283,380 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 152 | py | from django.urls import path
from .import views
urlpatterns = [
path('', views.index, name='index'),
path('page/', views.pages, name='page'),
] | [
"[email protected]"
] | |
e83d9db572f87305b1b0096b3b8ae3adf05e7a89 | 8caff964f989c8ec00c60dea87a0cef3eb877e97 | /changeName.py | 661ca76b0d86b0091b5580ed8aaf5598c0fb6ecc | [] | no_license | github-luffy/BinaryClassification | a549bdc8666c3912f5d83921b9ebe29f56dc19dd | be9e2a5e27411228bbc6fac245e8fca06051ba16 | refs/heads/master | 2022-12-05T14:49:24.941128 | 2020-08-04T07:52:57 | 2020-08-04T07:52:57 | 284,900,250 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,831 | py | #-*- coding: utf-8 -*-
import os
import numpy as np
import cv2
import shutil
def main():
# root_dir = os.path.dirname(os.path.realpath(__file__))
# print(root_dir)
# image_file = '%s/data/test_data/list.txt' % root_dir
# dst_image_file = '%s/data/train_data/list.txt' % root_dir
# with open(dst_image_file, 'a+') as fw:
# with open(image_file, 'r') as fr:
# lines = fr.readlines()
# for index, line in enumerate(lines):
# line_data = line.strip('\t\n').split(' ', 1)
# image_name = line_data[0].split('/')[-1]
# split_name = image_name.split('_')
# dst_image_name = '%s_%08d_%s' % (95192 + int(split_name[0]), 95192 + int(split_name[1]), split_name[2])
# print(dst_image_name)
# fw.write('%s/%s ' % ('/home/dsai/datadisk/pfld68/PFLD-master/data/train_data/imgs', dst_image_name))
# fw.write(line_data[1])
# fw.write('\n')
pass
if __name__ == '__main__':
root_dir = os.path.dirname(os.path.realpath(__file__))
print(root_dir)
images_dir = '%s/data/test_data/imgs' % root_dir
images = os.listdir(images_dir)
images.sort(key=lambda x: int(x.split('_')[1]))
print(len(images))
for index, image_name in enumerate(images):
split_name = image_name.split('_')
dst_image_name = '%s_%08d_%s' % (95192 + int(split_name[0]), 95192 + int(split_name[1]), split_name[2])
# print(image_name, ',', dst_image_name)
image = cv2.imread('%s/%s' % (images_dir, image_name))
cv2.imwrite('%s/data/train_data/imgs/%s' % (root_dir, dst_image_name), image)
if (index + 1) % 1000 == 0:
print('Done for %d' % (index + 1))
print('end')
| [
"[email protected]"
] | |
0063fb345955b95abb5f38b6bcddd2fe990b3a53 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startQiskit2254.py | b42aa032118dd4738b27d1937fa0297f59adf0ee | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,978 | py | # qubit number=4
# total number=35
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=16
prog.cz(input_qubit[0],input_qubit[3]) # number=17
prog.h(input_qubit[3]) # number=18
prog.x(input_qubit[3]) # number=14
prog.cx(input_qubit[0],input_qubit[3]) # number=15
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=12
prog.cx(input_qubit[2],input_qubit[3]) # number=22
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=24
prog.cz(input_qubit[3],input_qubit[2]) # number=25
prog.h(input_qubit[2]) # number=26
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[2]) # number=32
prog.cz(input_qubit[0],input_qubit[2]) # number=33
prog.h(input_qubit[2]) # number=34
prog.x(input_qubit[2]) # number=30
prog.cx(input_qubit[0],input_qubit[2]) # number=31
prog.h(input_qubit[0]) # number=9
prog.y(input_qubit[2]) # number=10
prog.y(input_qubit[2]) # number=11
prog.x(input_qubit[1]) # number=20
prog.x(input_qubit[1]) # number=21
prog.x(input_qubit[3]) # number=27
prog.x(input_qubit[3]) # number=28
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit2254.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| [
"[email protected]"
] | |
f5fbc0753d1f4c11c6cf976d4bbfd798777c896b | e87eb53e3d3d0332b4ac5fe643bff0aaaf6b983b | /backend-service/visits-service/app/app/db/base.py | f8e377ffb0162f52e576b57916f83871bf8474a5 | [
"MIT"
] | permissive | abhishek70/python-petclinic-microservices | 718dc2f9bf84260a23a7a9d756da295f5d3a1257 | e15a41a668958f35f1b962487cd2360c5c150f0b | refs/heads/main | 2023-05-08T21:54:12.761291 | 2021-06-01T07:03:14 | 2021-06-01T07:03:14 | 359,316,746 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 160 | py | # Import all the models, so that Base has them before being
# imported by Alembic
from ..models.visit import Visit # noqa
from .base_class import Base # noqa
| [
"[email protected]"
] | |
7f5338eb5a735e15180fbe10056cddff56f1841f | d0f11aa36b8c594a09aa06ff15080d508e2f294c | /leecode/1-500/101-200/119-杨辉三角Ⅱ.py | 899447e747110eb099a66ee141ed5fc00a6fcccf | [] | no_license | saycmily/vtk-and-python | 153c1fe9953fce685903f938e174d3719eada0f5 | 5045d7c44a5af5c16df5a3b72c157e9a2928a563 | refs/heads/master | 2023-01-28T14:02:59.970115 | 2021-04-28T09:03:32 | 2021-04-28T09:03:32 | 161,468,316 | 1 | 1 | null | 2023-01-12T05:59:39 | 2018-12-12T10:00:08 | Python | UTF-8 | Python | false | false | 474 | py | def getRow(self, rowIndex):
if rowIndex == 0:
return [1]
pre = [1, 1]
if rowIndex == 1:
return pre
for i in range(rowIndex-1):
num = [1]
for j in range(1, len(pre)):
num.append(pre[j]+pre[j-1])
num.append(1)
pre = num[:]
return pre
# temp = 1
# res = []
# for i in range(rowIndex + 1):
# res.append(temp)
# temp = (temp * (rowIndex - i)) // (i + 1)
# return res
| [
"[email protected]"
] | |
b7d91bec1ace8d40613c4c3fc0e817a628cc7656 | f4900dc70594e540fa38bdc92ca07b8b42d20c1b | /titulares/migrations/0001_initial.py | 11a44ebbcb5dd8d97f7f028371311109540a0af3 | [] | no_license | Garvillo/almond-way | 45d499690a5709d64c1febe738528302f08ae022 | 709f4a462d23f3073cf3c7267d11457c035e0b13 | refs/heads/master | 2021-08-03T18:17:36.500495 | 2021-07-28T11:30:16 | 2021-07-28T11:30:16 | 141,634,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 883 | py | # Generated by Django 2.0.7 on 2019-07-02 20:39
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Titular',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=150)),
('cif', models.CharField(max_length=11, unique=True)),
('direccion', models.CharField(blank=True, max_length=200, null=True)),
('telefono', models.CharField(blank=True, max_length=15, null=True)),
('correo', models.EmailField(blank=True, max_length=254, null=True)),
('activo', models.BooleanField(default=True)),
],
),
]
| [
"[email protected]"
] | |
622b641ed61b4497922baa31b9969d78ae3aed3a | 667f153e47aec4ea345ea87591bc4f5d305b10bf | /Solutions/Ch3Ex079.py | e785458d3ab002f62d58c7b3ef7cdbe598755155 | [] | no_license | Parshwa-P3/ThePythonWorkbook-Solutions | feb498783d05d0b4e5cbc6cd5961dd1e611f5f52 | 5694cb52e9e9eac2ab14b1a3dcb462cff8501393 | refs/heads/master | 2022-11-15T20:18:53.427665 | 2020-06-28T21:50:48 | 2020-06-28T21:50:48 | 275,670,813 | 1 | 0 | null | 2020-06-28T21:50:49 | 2020-06-28T21:26:01 | Python | UTF-8 | Python | false | false | 525 | py | # Ch3Ex079.py
# Author: Parshwa Patil
# ThePythonWorkbook Solutions
# Exercise No. 79
# Title: Maximum Integer
from random import randint
def main():
maximum = randint(1, 100)
print(maximum)
updateCount = 0
for i in range(100):
number = randint(1, 100)
print(number, end="")
if number > maximum:
maximum = number
updateCount += 1
print(" <- Update", end="")
print()
print("Maximum Number: %d" % maximum)
print("Updates: %d" % updateCount)
if __name__ == "__main__": main() | [
"[email protected]"
] | |
24aa4432f7ae3ddc3f4622f20aaf179745d3fbf2 | 11e55abdbf93bca42d5b7961ec63091ced5c4830 | /test_calculator.py | 8c54d4590bf1f1d86ebe057ff903ec8109665bf6 | [] | no_license | mawall/calculator | 9c3157d57f2729dc807ba0ab9b3222a3fbb82372 | 5954d8c5908ebb17c2718083072dc0f828a230a1 | refs/heads/master | 2023-03-23T07:20:45.021031 | 2021-03-15T15:14:53 | 2021-03-15T15:14:53 | 339,877,425 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,322 | py | import pytest
from calculator import calculate_prefix
from calculator import calculate_infix
non_string_input = [
12,
12.0,
1j,
[1, 2],
{'a': 1, 'b': 2},
False,
b"bytes",
]
class TestPrefixCalc:
def test_single_number(self):
assert calculate_prefix("3") == 3
def test_base_case(self):
assert calculate_prefix("+ 1 2") == 3
@pytest.mark.parametrize("case, expected", [
("+ 1 * 2 3", 7),
("+ * 1 2 3", 5),
("- / 10 + 1 1 * 1 2", 3),
("- / 100 + 1 1 * 1 2", 48),
("- 0 3", -3),
("/ 3 2", 1.5),
])
def test_nested_cases(self, case, expected):
assert calculate_prefix(case) == expected
def test_empty_string(self):
with pytest.raises(ValueError):
calculate_prefix("")
@pytest.mark.parametrize("case", non_string_input)
def test_non_string(self, case):
with pytest.raises(ValueError):
calculate_prefix(case)
def test_additional_whitespace(self):
assert calculate_prefix(" + 1 2 ") == 3
@pytest.mark.parametrize("case", [
"+ 1 2 3",
"+ 1 2 3 4",
])
def test_additional_digits(self, case):
with pytest.raises(ValueError):
calculate_prefix(case)
def test_additional_operators(self):
with pytest.raises(ValueError):
calculate_prefix("+ - * 1 2")
def test_unknown_operator(self):
with pytest.raises(ValueError):
calculate_prefix("& 1 2")
class TestInfixCalc:
def test_single_number(self):
assert calculate_infix("( 3 )") == 3
def test_base_case(self):
assert calculate_infix("( 1 + 2 )") == 3
@pytest.mark.parametrize("case, expected", [
("( 10 + ( 2 * 3 ) )", 16),
("( ( 1 * 2 ) + 3 )", 5),
("( ( ( 1 + 1 ) / 10 ) - ( 1 * 2 ) )", -1.8),
])
def test_nested_cases(self, case, expected):
assert calculate_infix(case) == expected
def test_empty_string(self):
with pytest.raises(ValueError):
calculate_infix("")
@pytest.mark.parametrize("case", non_string_input)
def test_non_string(self, case):
with pytest.raises(ValueError):
calculate_infix(case)
def test_additional_whitespace(self):
assert calculate_infix(" ( 1 + 2 ) ") == 3
@pytest.mark.parametrize("case", [
"( 1 + 2 2 )",
"( 1 1 + 2 )",
])
def test_additional_digits(self, case):
with pytest.raises(ValueError):
calculate_infix(case)
@pytest.mark.parametrize("case, expected", [
("( ( 1 ) + ( 2 ) )", 3),
("( ( ( ( 1 + 2 ) ) ) )", 3),
])
def test_additional_brackets(self, case, expected):
assert calculate_infix(case) == expected
def test_additional_operators(self):
with pytest.raises(ValueError):
calculate_infix("( 1 + * 2 )")
def test_unknown_operator(self):
with pytest.raises(ValueError):
calculate_infix("( 1 & 2 )")
@pytest.mark.parametrize("case", [
") 1 + 2 (",
")( 1 + 2 )",
"( 1 + 2 ))",
])
def test_bracket_mismatch(self, case):
with pytest.raises(ValueError):
calculate_infix(case)
| [
"[email protected]"
] | |
fdeec56836193408b7a8b38312ebd829003c3d2d | dcfd3f9e06ed7ef52367219177d07f18a5d728a0 | /Wizualizacja_postaci_drgan.py | 49b63a9c26cffdbf46886f0bcb886bbe449fcd48 | [] | no_license | De4r/ModalEstimation | 91a5b9e8cb3c20717a197af7eef70fc209a11421 | b7d6da5c1bfb2b0e1fab40f16638fe87882cb500 | refs/heads/master | 2020-05-09T18:12:15.850499 | 2019-04-15T08:17:13 | 2019-04-15T08:17:13 | 181,332,155 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,324 | py | from funkcje import model
import numpy as np
'''
Skrypt wizualizacji postaci dragn
ustawić parametry na dole skryptu:
rzut= 0,1,2,3
f- czest
f_tol - tolerancja
'''
def mode_shape(f, f_tol, sila=1000, zapis='n', nazwazapisu='obiekt', rzut=0):
''' Poniewaz 1 -36 to wymuszenie na X to wyniki{i}y są dla wymuszenia X ale odp na kierunku Y
dlatego punkty do odwrocenia X uzete są w odpowiedzi Y, nadal nie wiem czy jest to dobrze obrazowane'''
punkty_do_odwrocenia_znakuX = [3, 4, 7, 8, 11, 12, 15, 16, 19, 20, 23, 24, 27, 28, 31, 32, 35, 36]
punkty_do_odwrocenia_znakuY = [1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29, 32, 33, 36]
nodes = np.loadtxt('nodes.txt')
if f != 0:
np.savetxt(fname='nodes1.txt', X=nodes, fmt="%.6f", header='lp x y z')
nodes1 = np.loadtxt('nodes1.txt')
[x, y] = np.shape(nodes1)
try:
for i in range(x):
xdis = 0
ydis = 0
plik = f'parametry/wyniki{i+1}x.txt' # trzeba ustawić scieżke do plików
temp = np.loadtxt(plik)
[xx, yy] = np.shape(temp)
for j in range(xx):
if temp[j, 4]>= f - f_tol and temp[j, 4]<= f + f_tol:
xdis = temp[j, 11] * 1000 * sila # na metry i razy Niuton
if i+1 in punkty_do_odwrocenia_znakuX:
xdis = xdis * -1
plik = f'parametry/wyniki{i + 1}y.txt'
temp = np.loadtxt(plik)
[xx, yy] = np.shape(temp)
for j in range(xx):
if temp[j, 4] >= f - f_tol and temp[j, 4] <= f + f_tol:
ydis = temp[j, 11] * 1000 * 1000 # na metry i razy Niuton
nodes1[i, 1] += xdis
nodes1[i, 2] += ydis
except Exception as e:
print(e)
np.savetxt(fname='nodes1.txt', X=nodes1, fmt="%.6f", header='lp x y z')
model.obiekt(nodes1, zapis=zapis, nazwa=nazwazapisu, rzut=rzut)
nodes = np.loadtxt('nodes.txt')
# sam model
model.obiekt(nodes, zapis='y', nazwa='obiekt_sam', rzut=0)
#rzut=: 0,1,2,3
f = 14
mode_shape(f=f, f_tol=1, sila=3000, zapis='n', nazwazapisu=f'postacdrgan60.7-{f}', rzut=0)
#!!! trzeba ustawić scieżke do plików w funkcji mode_shape!!!
| [
"[email protected]"
] | |
6e763e16ac2e29d7d80641e496069772f114cb59 | 1825283527f5a479204708feeaf55f4ab6d1290b | /leetcode/python/1138/sol.py | 37efea30c3a608a716269f1f938a5aa48182f29c | [] | no_license | frankieliu/problems | b82c61d3328ffcc1da2cbc95712563355f5d44b5 | 911c6622448a4be041834bcab25051dd0f9209b2 | refs/heads/master | 2023-01-06T14:41:58.044871 | 2019-11-24T03:47:22 | 2019-11-24T03:47:22 | 115,065,956 | 1 | 0 | null | 2023-01-04T07:25:52 | 2017-12-22T02:06:57 | HTML | UTF-8 | Python | false | false | 1,727 | py | In leetcode init
[TRACE] inited plugin: cookie.chrome
[TRACE] skipped plugin: lintcode
[TRACE] skipped plugin: leetcode.cn
[TRACE] inited plugin: retry
[TRACE] inited plugin: cache
[TRACE] inited plugin: company
[TRACE] inited plugin: solution.discuss
[DEBUG] cache hit: problems.json
[DEBUG] cache hit: 1138.alphabet-board-path.algorithms.json
C++/Java O(n)
https://leetcode.com/problems/alphabet-board-path/discuss/345278
* Lang: python
* Author: votrubac
* Votes: 30
Determine the coordinate and move there. Note that \'z\' is tricky as you cannot move left or right in the last row.
To account for that, make sure we move up before moving right, and move left before moving down.
## C++
```
string alphabetBoardPath(string target, int x = 0, int y = 0) {
string res;
for (auto ch : target) {
int x1 = (ch - \'a\') % 5, y1 = (ch - \'a\') / 5;
res += string(max(0, y - y1), \'U\') + string(max(0, x1 - x), \'R\') +
string(max(0, x - x1), \'L\') + string(max(0, y1 - y), \'D\') + "!";
x = x1, y = y1;
}
return res;
}
```
## Java
> Would be nice to have Java 11 support, so we can just do ```"U".repeat(Math.max(0, y - y1));```
```
public String alphabetBoardPath(String target) {
int x = 0, y = 0;
StringBuilder sb = new StringBuilder();
for (char ch : target.toCharArray()) {
int x1 = (ch - \'a\') % 5, y1 = (ch - \'a\') / 5;
sb.append(String.join("", Collections.nCopies(Math.max(0, y - y1), "U")) +
String.join("", Collections.nCopies(Math.max(0, x1 - x), "R")) +
String.join("", Collections.nCopies(Math.max(0, x - x1), "L")) +
String.join("", Collections.nCopies(Math.max(0, y1 - y), "D")) + "!");
x = x1; y = y1;
}
return sb.toString();
}
```
| [
"[email protected]"
] | |
8864d001473e3cfc6fa13f6d758af3c7f805423b | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/galex_j18165+4623/sdB_galex_j18165+4623_coadd.py | 39257440de04654ba8cafaacf913dda3f915fd73 | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 454 | py | from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[274.147083,46.387794], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_galex_j18165+4623/sdB_galex_j18165+4623_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_galex_j18165+4623/sdB_galex_j18165+4623_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
18badaf0f90dd5fd270c3c65407d6069b50eccfa | caa8328cd164a4b9e4f88d8d3de2140b047346b7 | /client/devpi/push.py | 1b373c07dd31b7e6c2f82bf39f4ad1e7c634e46e | [] | no_license | zerotired/devpi | ed02b276422dc0b496c484204cf9d5b59a0173a1 | ab3b9aab83795c45c150cdefa26c9171827f61c6 | refs/heads/master | 2021-01-16T19:20:40.927221 | 2013-06-02T09:58:27 | 2013-06-02T10:04:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 987 | py | import py
from devpi import log
from devpi.util import version as verlib
from devpi.util import pypirc
def main(hub, args):
pushrelease = hub.config.pushrelease
pypirc_path = args.pypirc
if pypirc_path is None:
pypirc_path = py.path.local._gethomedir().join(".pypirc")
else:
pypirc_path = py.path.local().join(args.pypirc, abs=True)
assert pypirc_path.check()
hub.info("using pypirc", pypirc_path)
auth = pypirc.Auth(pypirc_path)
posturl, (user, password) = auth.get_url_auth(args.posturl)
name, version = verlib.guess_pkgname_and_version(args.nameversion)
req = py.std.json.dumps(dict(name=name, version=str(version),
posturl=posturl,
username=user, password=password,
))
r = py.std.requests.post(pushrelease, data=req)
assert r.status_code == 201, r.content
hub.info("pushed %s to %s" % (args.nameversion, args.posturl))
| [
"[email protected]"
] | |
fa087a65218831e8e25bf42722524b34f0d9a788 | f82e67dd5f496d9e6d42b4fad4fb92b6bfb7bf3e | /scripts/client/gui/battle_control/vehicle_state_ctrl.py | e60cc8c17796d3b1207eb5dbf148c09207267eaf | [] | no_license | webiumsk/WOT0.10.0 | 4e4413ed4e7b00e22fb85d25fdae9400cbb4e76b | a84f536c73f86d9e8fab559e97f88f99f2ad7e95 | refs/heads/master | 2021-01-09T21:55:00.662437 | 2015-10-23T20:46:45 | 2015-10-23T20:46:45 | 44,835,654 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,061 | py | # Embedded file name: scripts/client/gui/battle_control/vehicle_state_ctrl.py
import BigWorld
import Event
import SoundGroups
from debug_utils import LOG_CURRENT_EXCEPTION, LOG_ERROR
from gui.battle_control import avatar_getter
from gui.battle_control.battle_constants import VEHICLE_VIEW_STATE, VEHICLE_WAINING_INTERVAL, VEHICLE_UPDATE_INTERVAL
from gui.shared.utils.TimeInterval import TimeInterval
import nations
class _PlayerVehicleUpdater(object):
def __init__(self):
super(_PlayerVehicleUpdater, self).__init__()
self.clear()
def clear(self):
self.__speed = 0
def switch(self, isPlayer):
if isPlayer:
self.clear()
result = self
else:
result = _OtherVehicleUpdater()
return result
def update(self, vehicleID, _):
player = BigWorld.player()
if player is None:
return
else:
states = None
vehicle = BigWorld.entity(vehicleID)
if vehicle is not None and vehicle.isStarted:
speed, _ = player.getOwnVehicleSpeeds()
speed = int(speed * 3.6)
if self.__speed != speed:
self.__speed = speed
states = [(VEHICLE_VIEW_STATE.SPEED, speed)]
return states
class _OtherVehicleUpdater(object):
def __init__(self):
super(_OtherVehicleUpdater, self).__init__()
self.clear()
def clear(self):
self.__speed = 0
self.__health = 0
def switch(self, isPlayer):
if isPlayer:
result = _PlayerVehicleUpdater()
else:
self.clear()
result = self
return result
def update(self, vehicleID, ticker):
vehicle = BigWorld.entity(vehicleID)
if vehicle is not None:
states = []
health = vehicle.health
if self.__health != health:
self.__health = health
states.append((VEHICLE_VIEW_STATE.HEALTH, health))
if vehicle.isStarted:
try:
speed = vehicle.filter.speedInfo.value[0]
fwdSpeedLimit, bckwdSpeedLimit = vehicle.typeDescriptor.physics['speedLimits']
speed = max(min(speed, fwdSpeedLimit), -bckwdSpeedLimit)
speed = int(speed * 3.6)
if self.__speed != speed:
self.__speed = speed
states.append((VEHICLE_VIEW_STATE.SPEED, speed))
except (AttributeError, IndexError, ValueError):
LOG_CURRENT_EXCEPTION()
LOG_ERROR('Can not update speed. Stop')
ticker.stop()
if not vehicle.isAlive():
states.append((VEHICLE_VIEW_STATE.DESTROYED, None))
ticker.stop()
else:
states = None
return states
class VehicleStateController(object):
def __init__(self):
super(VehicleStateController, self).__init__()
self.__eManager = Event.EventManager()
self.onVehicleStateUpdated = Event.Event(self.__eManager)
self.onVehicleControlling = Event.Event(self.__eManager)
self.onPostMortemSwitched = Event.Event(self.__eManager)
self.onRespawnBaseMoving = Event.Event(self.__eManager)
self.__waitingTI = TimeInterval(VEHICLE_WAINING_INTERVAL, self, '_waiting')
self.__updateTI = TimeInterval(VEHICLE_UPDATE_INTERVAL, self, '_update')
self.__vehicleID = 0
self.__updater = _PlayerVehicleUpdater()
self.__isRqToSwitch = False
def clear(self):
if self.__waitingTI:
self.__waitingTI.stop()
self.__waitingTI = None
if self.__updateTI:
self.__updateTI.stop()
self.__updateTI = None
self.__vehicleID = 0
self.__isRqToSwitch = False
self.__updater = None
self.__eManager.clear()
return
def setPlayerVehicle(self, vehicleID):
self.onVehicleStateUpdated(VEHICLE_VIEW_STATE.PLAYER_INFO, vehicleID)
self.__vehicleID = vehicleID
self.__waitingTI.start()
def getControllingVehicle(self):
vehicle = None
if self.__vehicleID:
vehicle = BigWorld.entity(self.__vehicleID)
return vehicle
def invalidate(self, state, value):
if state == VEHICLE_VIEW_STATE.DESTROYED:
self.__updateTI.stop()
self.onVehicleStateUpdated(state, value)
def switchToPostmortem(self):
self.__isRqToSwitch = False
if avatar_getter.getPlayerVehicleID() == self.__vehicleID:
self.__waitingTI.stop()
self.__updateTI.stop()
self.onPostMortemSwitched()
def switchToAnother(self, vehicleID):
if self.__vehicleID == vehicleID or vehicleID is None:
return
else:
self.onVehicleStateUpdated(VEHICLE_VIEW_STATE.SWITCHING, self.__vehicleID)
if self.__updater:
self.__updater.clear()
self.__waitingTI.stop()
self.__updateTI.stop()
self.__vehicleID = vehicleID
self.__isRqToSwitch = True
self.onVehicleStateUpdated(VEHICLE_VIEW_STATE.PLAYER_INFO, self.__vehicleID)
self.__waitingTI.start()
return
def movingToRespawn(self):
self.onVehicleStateUpdated(VEHICLE_VIEW_STATE.SWITCHING, 0)
self.onRespawnBaseMoving()
def _waiting(self):
vehicle = BigWorld.entity(self.__vehicleID)
if vehicle is not None:
self.__waitingTI.stop()
self._setup(vehicle)
return
def _update(self):
states = self.__updater.update(self.__vehicleID, self.__updateTI)
if states is not None:
for item in states:
self.onVehicleStateUpdated(*item)
return
def _setup(self, vehicle):
self.__updater = self.__updater.switch(vehicle.isPlayer)
if self.__isRqToSwitch:
nationID = vehicle.typeDescriptor.type.id[0]
notifications = avatar_getter.getSoundNotifications()
if notifications:
notifications.clear()
SoundGroups.g_instance.soundModes.setCurrentNation(nations.NAMES[nationID])
self.onVehicleControlling(vehicle)
if not vehicle.isAlive():
self.onVehicleStateUpdated(VEHICLE_VIEW_STATE.DESTROYED, None)
else:
self.__updateTI.start()
return
class VehicleStateReplayRecorder(VehicleStateController):
def invalidate(self, state, value):
if state == VEHICLE_VIEW_STATE.CRUISE_MODE:
import BattleReplay
BattleReplay.g_replayCtrl.onSetCruiseMode(value)
super(VehicleStateReplayRecorder, self).invalidate(state, value)
def createCtrl(isReplayRecording):
if isReplayRecording:
ctrl = VehicleStateReplayRecorder()
else:
ctrl = VehicleStateController()
return ctrl
| [
"[email protected]"
] | |
64431ded49c19715fa691706db67e9dce8f4efd0 | c2f42e145c03feb891d83ea294cdda9f37cfc717 | /tests/data_access/test_wrapper.py | 94d84cbfe0ca9321d296d12859134f06fe293e3f | [] | no_license | FelixKleineBoesing/queuingSystem | 5b38c123f206d9c71014064404b2f50f0f4491a5 | 09ff583831aa7f8b604f01dc97cf0284ed342f77 | refs/heads/master | 2023-04-12T00:00:20.309232 | 2021-04-25T11:55:04 | 2021-04-25T11:55:04 | 361,413,917 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 116 | py | import unittest
class PostgresTester(unittest.TestCase):
pass
class RedisTester(unittest.TestCase):
pass | [
"[email protected]"
] | |
13d07d704e0486fc24b8ff9d71ab16d48b63c362 | 5c7c9157350dbd475d6b81dfafc935d0bd751405 | /src/controller.py | 694d72381fdbd73eb3d96c6d0780eb8566e6e336 | [] | no_license | testing-matheus/labeller | daf2362b99b9a01133929fff714d68c326ddd3f2 | 4c2b9126727188934585f0e88e71e0a2f61553fd | refs/heads/main | 2023-02-14T17:15:33.822378 | 2021-01-10T20:58:44 | 2021-01-10T20:58:44 | 326,397,249 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,141 | py | import os
from PyQt5.QtCore import QObject, pyqtSlot, pyqtSignal, QTimer
import src.utils as utils
from src.folderReader import FolderReader
from src.file_saver import FileSaver
from src.file_reader import FileReader
from src.trackerFactory import create_tracker
import cv2
class Controller(QObject):
change_color_index = pyqtSignal(int)
request_bboxes = pyqtSignal()
request_and_init_bboxes = pyqtSignal()
remove_rectangle_signal = pyqtSignal(int)
update_filename = pyqtSignal(str)
rectangles_signal = pyqtSignal(list, list, list, list, list, list)
update_image_folder = pyqtSignal(str)
def __init__(self, parent=None, extension='.png'):
super().__init__(parent)
self.mode = utils.MODE_TRACK
self.tracker_name = 'default'
self.trackers = []
self.current_class = 'default'
self.current_color_index = 0
self.class_colors = {}
self.prev_index_frame = 0
self.current_index_frame = 0
self.folder_reader = FolderReader()
self.image_directory = './imagen'
self.extension = extension
self.file_saver = FileSaver(self.image_directory)
self.file_reader = FileReader()
# self.filenames = ['out2.png', 'out19.png']
self.filenames = utils.read_files(self.image_directory, self.extension)
self.filenames = utils.sort_files(self.filenames)
# Timer for run button
self.run_timer = QTimer()
@pyqtSlot(list, list, list, list, list, list)
def process_rectangles(self, xs, ys, widths, heights, color_indices, recent_draw):
self.save_rectangles(self.get_prev_filename(), xs, ys, widths, heights, color_indices)
if self.mode == utils.MODE_TRACK:
xs, ys, widths, heights, color_indices, recent_draw = self.update_trackers(recent_draw, xs, ys, widths, heights, color_indices)
recent_draw = [False for i in recent_draw]
self.rectangles_signal.emit(xs, ys, widths, heights, color_indices, recent_draw)
elif self.mode == utils.MODE_RTRACK:
self.send_saved_bboxes()
elif self.mode == utils.MODE_COPYBBOX:
recent_draws = [True for i in recent_draw]
self.rectangles_signal.emit(xs, ys, widths, heights, color_indices, recent_draws)
elif self.mode == utils.MODE_EMPTY:
pass
elif self.mode == utils.MODE_NOTHING:
self.send_saved_bboxes()
@pyqtSlot(int)
def remove_rectangle_slot(self, index):
self.remove_rectangle(index)
def remove_rectangle(self, index):
if index < len(self.trackers):
self.trackers.pop(index)
def update_trackers(self, recent_draws, xs, ys, widths, heights, color_indices):
xs_out = []
ys_out = []
widths_out = []
heights_out = []
color_indices_out = []
curr_draws = []
trackers_to_remove = []
prev_image = cv2.imread(self.get_prev_filepath() + self.extension)
current_image = cv2.imread(self.get_current_filepath() + self.extension)
for index, (recent_draw, x, y, w, h, color_idx) in enumerate(zip(recent_draws, xs, ys, widths, heights, color_indices)):
if recent_draw:
tracker = create_tracker(self.tracker_name)
tracker.init(prev_image, (x, y, w, h))
self.trackers.append(tracker)
ret, bbox = tracker.update(prev_image)
ret, bbox = self.trackers[index].update(current_image)
if ret:
xs_out.append(bbox[0])
ys_out.append(bbox[1])
widths_out.append(bbox[2])
heights_out.append(bbox[3])
color_indices_out.append(color_idx)
curr_draws.append(True)
else:
trackers_to_remove.append(index)
for index in trackers_to_remove[::-1]:
self.remove_rectangle_signal.emit(index)
return xs_out, ys_out, widths_out, heights_out, color_indices_out, curr_draws
def send_saved_bboxes(self):
xs, ys, widths, heights, color_indices = self.file_reader.read_bboxes(self.get_current_filepath())
self.rectangles_signal.emit(xs, ys, widths, heights, color_indices, [False for i in color_indices])
def save_rectangles(self, filename, xs, ys, widths, heights, color_indices):
image = cv2.imread(self.get_prev_filepath() + self.extension)
h, w = image.shape[:2]
c = 1
if len(image.shape) > 2:
c = image.shape[2]
self.file_saver.save_bboxes(filename, xs, ys, widths, heights, color_indices, w, h, c)
@pyqtSlot(str)
def set_tracker_name(self, tracker_name):
self.tracker_name = tracker_name
# Try 1
for index in range(len(self.trackers))[::-1]:
self.remove_rectangle_signal.emit(index)
self.send_saved_bboxes()
self.request_and_init_bboxes.emit()
# Try 2
# self.request_and_init_bboxes.emit()
@pyqtSlot(str)
def set_current_class(self, class_name):
self.current_class = class_name
self.current_color_index = self.class_colors[self.current_class]
self.change_color_index.emit(self.current_color_index)
def update_mode(self, mode):
self.mode = mode
@pyqtSlot()
def update_mode_to_track(self):
self.update_mode(utils.MODE_TRACK)
self.set_tracker_name(self.tracker_name)
@pyqtSlot()
def update_mode_to_rtrack(self):
self.update_mode(utils.MODE_RTRACK)
@pyqtSlot()
def update_mode_to_copybbox(self):
self.update_mode(utils.MODE_COPYBBOX)
@pyqtSlot()
def update_mode_to_empty(self):
self.update_mode(utils.MODE_EMPTY)
@pyqtSlot()
def update_mode_to_nothing(self):
self.update_mode(utils.MODE_NOTHING)
def set_classes(self, items):
self.class_colors = {color: index for index, color in enumerate(items)}
@pyqtSlot()
def request_next(self):
if self.current_index_frame < len(self.filenames) - 1:
self.prev_index_frame = self.current_index_frame
self.current_index_frame += 1
self.update_filename.emit(self.get_current_frame())
self.request_bboxes.emit()
@pyqtSlot()
def request_prev(self):
if self.current_index_frame > 0:
self.prev_index_frame = self.current_index_frame
self.current_index_frame -= 1
self.update_filename.emit(self.get_current_frame())
self.request_bboxes.emit()
def get_current_frame(self):
if self.current_index_frame < 0 or self.current_index_frame >= len(self.filenames):
return None
else:
path = os.path.join(self.image_directory, self.filenames[self.current_index_frame])
return path
def get_current_filename(self):
return self.filenames[self.current_index_frame]
def get_prev_filename(self):
return self.filenames[self.prev_index_frame]
def get_current_filepath(self):
return os.path.join(self.image_directory, self.get_current_filename())
def get_prev_filepath(self):
return os.path.join(self.image_directory, self.get_prev_filename())
def select_folder(self):
folder = self.folder_reader.get_folder()
self.image_directory = folder
self.file_saver.set_folder(folder)
self.filenames = utils.read_files(self.image_directory, self.extension)
self.filenames = utils.sort_files(self.filenames)
self.prev_index_frame = 0
self.current_index_frame = 0
for index in range(len(self.trackers))[::-1]:
self.remove_rectangle_signal.emit(index)
self.update_filename.emit(self.get_current_frame())
self.send_saved_bboxes()
self.request_and_init_bboxes.emit()
self.update_image_folder.emit(folder)
print('NEWFOLDER', folder)
@pyqtSlot()
def run_tracking(self):
self.run_timer.start(50)
@pyqtSlot()
def stop_tracking(self):
self.run_timer.stop() | [
"[email protected]"
] | |
5b30f53fa91195defc275fee279d8d3e27640433 | 60be9b44eab355f99fd362844e45ca5d0f1ed63f | /app/sites/index.py | 0e8fd7b694f5fac6653827e93f52cccf854ff87a | [
"Apache-2.0"
] | permissive | stanman71/Miranda | 7c8f4f5b714476cb5b0fb49e3db510ee9788c40b | f6c9a7f2c3a51f37b35f8930214fec7bbe9c0d9b | refs/heads/master | 2023-02-09T04:24:48.700539 | 2019-12-17T22:13:47 | 2019-12-17T22:13:47 | 170,350,778 | 0 | 0 | Apache-2.0 | 2023-01-23T22:21:43 | 2019-02-12T16:23:33 | Python | UTF-8 | Python | false | false | 4,103 | py | from flask import render_template, redirect, url_for, request
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField
from wtforms.validators import InputRequired, Email, Length
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import LoginManager, login_user, login_required, logout_user, current_user
from app import app
from app.database.database import *
from app.components.file_management import GET_CONFIG_VERSION
from flask_mobility.decorators import mobile_template
""" ############ """
""" user control """
""" ############ """
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = 'login'
@login_manager.user_loader
def load_user(user_id):
return GET_USER_BY_ID(user_id)
class LoginForm(FlaskForm):
username = StringField('Benutzername:', validators=[InputRequired(), Length(min=4, max=15)])
password = PasswordField('Passwort:', validators=[InputRequired(), Length(min=8, max=80)])
remember = BooleanField('remember me')
""" ##### """
""" login """
""" ##### """
@app.route('/login')
def login():
logout_user()
return redirect(url_for('index'))
""" ##### """
""" index """
""" ##### """
@app.route('/', methods=['GET', 'POST'])
@mobile_template('{mobile/}index.html')
def index(template):
error_message = ""
form = LoginForm()
version = GET_CONFIG_VERSION()
if form.validate_on_submit():
user = GET_USER_BY_NAME(form.username.data)
if user:
if check_password_hash(user.password, form.password.data):
login_user(user, remember=form.remember.data)
if user.permission_dashboard == "checked":
return redirect(url_for('dashboard'))
elif user.permission_scheduler == "checked":
return redirect(url_for('scheduler'))
elif user.permission_programs == "checked":
return redirect(url_for('programs'))
elif user.permission_watering == "checked":
return redirect(url_for('watering'))
elif user.permission_heating == "checked":
return redirect(url_for('heating'))
elif user.permission_camera == "checked":
return redirect(url_for('camera'))
elif user.permission_led == "checked":
return redirect(url_for('led_scenes'))
elif user.permission_sensordata == "checked":
return redirect(url_for('sensordata_jobs'))
elif user.permission_spotify == "checked":
return redirect(url_for('spotify'))
elif user.permission_system == "checked":
return redirect(url_for('system_host'))
else:
error_message = "Keine Zugriffberechtigungen erteilt"
return render_template(template,
form=form,
version=version,
error_message=error_message)
return render_template(template,
form=form,
login_check=False,
version=version,
error_message=error_message)
return render_template(template,
form=form,
version=version,
error_message=error_message)
""" ###### """
""" logout """
""" ###### """
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('index'))
| [
"[email protected]"
] | |
d314838665db4cbeed7653d3e3aa30c1ab3afc99 | 491e5d8802eab189ad32d406d71fc8feee3d90ce | /models/resnet.py | 12c195927283598072f29019816fb1f00a399f00 | [
"Apache-2.0"
] | permissive | BiggerBinBin/e3d_handpose_x-master | 92fec36a1f0fc3944c3b645db4962236fb71036f | 20d091a8a019d85de26c81d02985868f79d5de84 | refs/heads/master | 2023-06-03T21:39:46.306285 | 2021-06-28T08:20:00 | 2021-06-28T08:20:00 | 380,925,987 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,920 | py | import torch
import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, img_size=224,dropout_factor = 1.):
self.inplanes = 64
self.dropout_factor = dropout_factor
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(24, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
# see this issue: https://github.com/xxradon/PytorchToCaffe/issues/16
# self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
assert img_size % 32 == 0
pool_kernel = int(img_size / 32)
self.avgpool = nn.AvgPool2d(pool_kernel, stride=1, ceil_mode=True)
self.dropout = nn.Dropout(self.dropout_factor)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.dropout(x)
x = self.fc(x)
return x
def load_model(model, pretrained_state_dict):
model_dict = model.state_dict()
pretrained_dict = {k: v for k, v in pretrained_state_dict.items() if
k in model_dict and model_dict[k].size() == pretrained_state_dict[k].size()}
model.load_state_dict(pretrained_dict, strict=False)
if len(pretrained_dict) == 0:
print("[INFO] No params were loaded ...")
else:
for k, v in pretrained_state_dict.items():
if k in pretrained_dict:
print("==>> Load {} {}".format(k, v.size()))
else:
print("[INFO] Skip {} {}".format(k, v.size()))
return model
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
# model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
print("Load pretrained model from {}".format(model_urls['resnet18']))
pretrained_state_dict = model_zoo.load_url(model_urls['resnet18'])
model = load_model(model, pretrained_state_dict)
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
# model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
print("Load pretrained model from {}".format(model_urls['resnet34']))
pretrained_state_dict = model_zoo.load_url(model_urls['resnet34'])
model = load_model(model, pretrained_state_dict)
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
# model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
print("Load pretrained model from {}".format(model_urls['resnet50']))
pretrained_state_dict = model_zoo.load_url(model_urls['resnet50'])
model = load_model(model, pretrained_state_dict)
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
# model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
print("Load pretrained model from {}".format(model_urls['resnet101']))
pretrained_state_dict = model_zoo.load_url(model_urls['resnet101'])
model = load_model(model, pretrained_state_dict)
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
# model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
print("Load pretrained model from {}".format(model_urls['resnet152']))
pretrained_state_dict = model_zoo.load_url(model_urls['resnet152'])
model = load_model(model, pretrained_state_dict)
return model
if __name__ == "__main__":
input = torch.randn([1, 24, 256,256])
model = resnet34(True, num_classes=63, img_size=256)
output = model(input)
print(output.size())
| [
"[email protected]"
] | |
3156336e42c160ebe0217adf54a71445275b8580 | 5942d789bbbc4b9f416ff8f29f5b79675b44a5e0 | /main.py | b52a95088b9e3ec6d55ec7f96859600f605362de | [] | no_license | kelvin0218/sentiment-analysis | 63f909d8f19bba01add3cd782a3690bf29cd6dcb | 7976563dd95ea0bf8cfd8a8be49c5dafb9a587e0 | refs/heads/master | 2021-01-18T17:55:31.099904 | 2017-03-30T17:32:08 | 2017-03-30T17:32:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 655 | py | from calculator import get_document_score
from dict_parser import get_seeddict, parse_words
# parameters
positive_words_path = 'data/positive.txt'
negative_words_path = 'data/negative.txt'
documents = ['data/1155049829.txt', 'data/1155047854.txt', 'data/LAW_Yue_Hei.txt', 'data/Tse_Ching_Hin.txt']
documents_scores = dict()
# start program
print ("Parsing positive and negative words\n")
word_dict = parse_words(positive_words_path, negative_words_path)
print ("Getting synonyms and antonyms\n")
seed_dict = get_seeddict(word_dict)
for document in documents:
documents_scores[document] = get_document_score(document, seed_dict)
print documents_scores | [
"[email protected]"
] | |
a4e02c50f0f1c0abe848c0fdb95ff605b373cb41 | 971e0efcc68b8f7cfb1040c38008426f7bcf9d2e | /tests/artificial/transf_RelativeDifference/trend_PolyTrend/cycle_12/ar_/test_artificial_128_RelativeDifference_PolyTrend_12__20.py | 9e2333143f576380e7be2ca9f066c64e849a8553 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | antoinecarme/pyaf | a105d172c2e7544f8d580d75f28b751351dd83b6 | b12db77cb3fa9292e774b2b33db8ce732647c35e | refs/heads/master | 2023-09-01T09:30:59.967219 | 2023-07-28T20:15:53 | 2023-07-28T20:15:53 | 70,790,978 | 457 | 77 | BSD-3-Clause | 2023-03-08T21:45:40 | 2016-10-13T09:30:30 | Python | UTF-8 | Python | false | false | 274 | py | import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "PolyTrend", cycle_length = 12, transform = "RelativeDifference", sigma = 0.0, exog_count = 20, ar_order = 0); | [
"[email protected]"
] | |
81bb1e1f9c2972273b89457532534d9ba321654c | 951cacd5126333d2d79d1f982c910e6540e171b9 | /Sem2/Diccionario.py | a7b18f50d22ba0b36e4e603b4bd7250674a15a09 | [] | no_license | ArrobaAnderson/Ejer_Class | bcb458117d4f76435309855761137f9fe68a38cc | 7219f13115ebdc4f26fc1beea2b8269c48272067 | refs/heads/main | 2023-07-25T02:31:04.578390 | 2021-09-12T21:21:42 | 2021-09-12T21:21:42 | 381,526,861 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 938 | py | class Sintaxis:
instancia=0
def __init__(self,dato="LLamando al constructor1"):
self.frase=dato
Sintaxis.instancia = Sintaxis.instancia+1
def usoVariables(self):
edad, _peso = 21, 70.5
nombres = "Leonardo Arroba"
dirDomiciliaria= "El Triunfo"
Tipo_sexo = "M"
civil=True
usuario=()
usuario = ('Zancrow','186','[email protected]')
materias=[]
materias = ['Programacion Web','PHP','POO']
estudiante={}
estudiante = {"nombre":"Anderson","edad":21,}
edad= estudiante["edad"]
estudiante["edad"]= 18
print(usuario,usuario[0],usuario[0:2],usuario[-1])
print(nombres,nombres[0],nombres[0:2],nombres[-1])
print(materias,materias[2:],materias[:3],materias[::-1],materias[-2:])
ejer1 = Sintaxis()
ejer1.usoVariables()
| [
"[email protected]"
] | |
8230d90958c4f5869861ca03f80fd035f00215c8 | bd6ee708eb52ba812fd6bbdaeaf306cfdc460871 | /two_input.py | bfc4b64e2af00e4477139ffb130cdef6ba528574 | [] | no_license | amitkcodes/Work_with_Python | 63d8c51cde1f4473713b93516da831839585bc00 | d344377517e93dd220e000ec9fa169c8d522f769 | refs/heads/master | 2022-12-05T11:52:31.961100 | 2020-08-28T13:54:38 | 2020-08-28T13:54:38 | 288,724,975 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 137 | py | name=input("enter your name:")
age=input("enter ur age:")
name,age=input("enter your name and age").split(",")
print(name)
print(age) | [
"[email protected]"
] | |
003d147d9cb28d4559f42582ffd787be9461f2fb | 7451f5689de3a59bb969386b08ccc97afe7060af | /venv/Scripts/pip3.7-script.py | d3b3e6b0f2715971555d9dee70d2431495314ae2 | [] | no_license | MAlj11/schoolNews | 3a7c75d18d9cb81e37acb50d5fa7da66a1d7c6e1 | 3462ea971139a673c19400782e88d115e2b57399 | refs/heads/master | 2020-05-15T16:33:17.782793 | 2019-04-20T10:21:32 | 2019-04-20T10:21:32 | 182,391,071 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | #!D:\data\pyCharmPro\schoolnews\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.7'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.7')()
)
| [
"[email protected]"
] | |
5f855dfe6efe2d207b2cc5baf4d6f5215d2f6a31 | 4fb9150b08a128571ed4a84897c8c95afb76ccb6 | /healthy/migrations/0007_auto_20161129_1533.py | e63a458477a759da7263b9dbbebeb0e8afbbee49 | [] | no_license | eduarde/ChunkyMonkeys | 815feb7f3e6e2085babb61d12f2255ea2cb46ada | 34f30e6aaeef6af15aa12e6d599f55d67c6fb7d7 | refs/heads/master | 2021-07-09T21:30:49.084584 | 2016-12-05T10:42:04 | 2016-12-05T10:42:04 | 58,738,867 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 470 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.11 on 2016-11-29 13:33
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('healthy', '0006_auto_20161129_1411'),
]
operations = [
migrations.RemoveField(
model_name='labdetail',
name='lab_ref',
),
migrations.DeleteModel(
name='LabDetail',
),
]
| [
"[email protected]"
] | |
a2d657554ee2f2749d08a9a999fa535e8aed9c8a | 95a6d926cfce240ef79e58c1a237413aa2ed6553 | /kmom04/modules/energy_calculation.py | ace5b8987679c963f56e2ab8387b3f6f580ba590 | [] | no_license | pamo18/dbwebb-python | 313a142c52eb1bf695c112ae02d28ad9ebc430dd | 992da9fe53fa8eceb1677dfbe5310f417f5709f4 | refs/heads/master | 2022-11-14T13:15:53.104136 | 2020-07-15T15:04:03 | 2020-07-15T15:04:03 | 279,899,685 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Functions"""
# energy_calculation.py
def calculate_energy(time_in_microwave, effect=800):
"""
Calculates the energy consumption i kWh
Returns the consumption
"""
energy = effect * time_in_microwave / 1000
return energy
def calculate_cost(energy, price_per_kwh=78.04):
"""
Calculates the cost for a given energy consumption
Returns the cost in kr
"""
cost = energy * price_per_kwh / 100
return cost
if __name__ == "__main__":
print("Test av calculate energy:")
print(calculate_energy(800))
print("name: " + __name__)
| [
"[email protected]"
] |
Subsets and Splits