blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f06a21f022b3d3742cee8df6c8048fcc34022202
|
a51854991671a4389902945578288da34845f8d9
|
/libs/UserInterface/TestPages/LampHolderTest.py
|
e9567659c28b0e4822d07ddbb3702556f7e9276b
|
[] |
no_license
|
wuyou1102/DFM_B2
|
9210b4b8d47977c50d92ea77791f477fa77e5f83
|
69ace461b9b1b18a2269568110cb324c04ad4266
|
refs/heads/master
| 2020-04-13T18:54:20.045734 | 2019-06-17T12:46:23 | 2019-06-17T12:46:23 | 163,387,873 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,838 |
py
|
# -*- encoding:UTF-8 -*-
import wx
import logging
import Base
from libs import Utility
from libs.Config import Font
from libs.Config import Color
from libs.Config import String
logger = logging.getLogger(__name__)
class LampHolder(Base.TestPage):
def __init__(self, parent, type):
Base.TestPage.__init__(self, parent=parent, type=type)
self.count = 0
def init_test_sizer(self):
sizer = wx.BoxSizer(wx.VERTICAL)
turn_on_button = wx.Button(self, wx.ID_ANY, u"开启LED", wx.DefaultPosition, (-1, 60), 0)
turn_on_button.SetFont(Font.NORMAL_20_BOLD)
turn_on_button.Bind(wx.EVT_BUTTON, self.on_button_click)
output = wx.TextCtrl(self, -1, "", style=wx.TE_MULTILINE | wx.TE_READONLY)
output.AppendText(u"请检查治具上的指示灯是否全亮\n")
output.AppendText(u"\n")
output.AppendText(u"判断条件:\n")
output.AppendText(u" 指示灯全亮 PASS\n")
output.AppendText(u" 其他情况 FAIL\n")
output.SetInsertionPointEnd()
output.SetBackgroundColour(Color.LightSkyBlue1)
output.SetFont(Font.DESC)
sizer.Add(turn_on_button, 0, wx.EXPAND | wx.ALL, 1)
sizer.Add(output, 1, wx.EXPAND | wx.ALL, 1)
return sizer
def before_test(self):
pass
def on_button_click(self, event):
comm = self.get_communicate()
if comm.unload_protocol_stack():
dlg = Utility.Alert.CountdownDialog(u"正在开启LED灯")
dlg.Countdown(3)
def start_test(self):
self.FormatPrint(info="Started")
def stop_test(self):
self.FormatPrint(info="Stop")
@staticmethod
def GetName():
return u"灯座测试"
@staticmethod
def GetFlag(t):
if t == "PCBA":
return String.LAMP_HOLDER_PCBA
|
[
"[email protected]"
] | |
a22ffc16dfff771c3f037f2cf3410d17066bbd79
|
1f080333f1714ba88d4f41d6ce2676f0b299e05e
|
/.venv/bin/maf_extract_ranges_indexed.py
|
011751629233c72c0d998a7fdd8de77cfa72ed42
|
[] |
no_license
|
venice-juanillas/EIB-hackathon
|
b66bf128144dcef893c91af84dc28ff48be08e1b
|
6b73babff2b88dccbd5ec2e74bd5737ff0a4270b
|
refs/heads/master
| 2022-11-17T23:52:24.365210 | 2018-04-05T01:56:17 | 2018-04-05T01:56:17 | 120,545,413 | 0 | 1 | null | 2022-10-25T18:54:52 | 2018-02-07T01:19:48 |
Python
|
UTF-8
|
Python
| false | false | 4,702 |
py
|
#!/home/galaxy/data/galaxy_17.09/.venv/bin/python2.7
"""
Reads a list of intervals and a maf. Produces a new maf containing the
blocks or parts of blocks in the original that overlapped the intervals.
It is assumed that each file `maf_fname` has a corresponding `maf_fname`.index
file.
NOTE: If two intervals overlap the same block it will be written twice. With
non-overlapping intervals and --chop this is never a problem.
NOTE: Intervals are origin-zero, half-open. For example, the interval 100,150
is 50 bases long, and there are 100 bases to its left in the sequence.
NOTE: Intervals are relative to the + strand, regardless of the strands in
the alignments.
WARNING: bz2/bz2t support and file cache support are new and not as well
tested.
usage: %prog maf_fname1 maf_fname2 ... [options] < interval_file
-m, --mincols=0: Minimum length (columns) required for alignment to be output
-c, --chop: Should blocks be chopped to only portion overlapping (no by default)
-s, --src=s: Use this src for all intervals
-p, --prefix=p: Prepend this to each src before lookup
-d, --dir=d: Write each interval as a separate file in this directory
-S, --strand: Strand is included as an additional column, and the blocks are reverse complemented (if necessary) so that they are always on that strand w/r/t the src species.
-C, --usecache: Use a cache that keeps blocks of the MAF files in memory (requires ~20MB per MAF)
"""
import psyco_full
from bx.cookbook import doc_optparse
import bx.align.maf
from bx import misc
import os
import sys
def main():
# Parse Command Line
options, args = doc_optparse.parse( __doc__ )
try:
maf_files = args
if options.mincols: mincols = int( options.mincols )
else: mincols = 0
if options.src: fixed_src = options.src
else: fixed_src = None
if options.prefix: prefix = options.prefix
else: prefix = None
if options.dir: dir = options.dir
else: dir = None
chop = bool( options.chop )
do_strand = bool( options.strand )
use_cache = bool( options.usecache )
except:
doc_optparse.exit()
# Open indexed access to mafs
index = bx.align.maf.MultiIndexed( maf_files, keep_open=True,
parse_e_rows=True,
use_cache=use_cache )
# Start MAF on stdout
if dir is None:
out = bx.align.maf.Writer( sys.stdout )
# Iterate over input ranges
for line in sys.stdin:
strand = None
fields = line.split()
if fixed_src:
src, start, end = fixed_src, int( fields[0] ), int( fields[1] )
if do_strand: strand = fields[2]
else:
src, start, end = fields[0], int( fields[1] ), int( fields[2] )
if do_strand: strand = fields[3]
if prefix: src = prefix + src
# Find overlap with reference component
blocks = index.get( src, start, end )
# Open file if needed
if dir:
out = bx.align.maf.Writer( open( os.path.join( dir, "%s:%09d-%09d.maf" % ( src, start, end ) ), 'w' ) )
# Write each intersecting block
if chop:
for block in blocks:
for ref in block.get_components_by_src( src ):
slice_start = max( start, ref.get_forward_strand_start() )
slice_end = min( end, ref.get_forward_strand_end() )
if (slice_end <= slice_start): continue
sliced = block.slice_by_component( ref, slice_start, slice_end )
# If the block is shorter than the minimum allowed size, stop
if mincols and ( sliced.text_size < mincols ):
continue
# If the reference component is empty, don't write the block
if sliced.get_component_by_src( src ).size < 1:
continue
# Keep only components that are not empty
sliced.components = [ c for c in sliced.components if c.size > 0 ]
# Reverse complement if needed
if ( strand != None ) and ( ref.strand != strand ):
sliced = sliced.reverse_complement()
# Write the block
out.write( sliced )
else:
for block in blocks:
out.write( block )
if dir:
out.close()
# Close output MAF
out.close()
index.close()
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
dcda4ae98e5ceea8422c2a9d5b281462addc5b6e
|
4047b91585245c3ee5ea6c50a620dadf74636bc3
|
/phylobot/phylobot/admin.py
|
e38df56d65328e8a83b088332fe4a4404c4facb6
|
[] |
no_license
|
httang12/phylobot-django
|
fd371cc870f444cf94179d6a3cc6d23e9895186c
|
b535edfd1ee09dab02421ba22d96d48b3f611dad
|
refs/heads/master
| 2020-04-15T12:53:13.349661 | 2018-02-15T08:46:08 | 2018-02-15T08:46:08 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 345 |
py
|
from django.contrib import admin
admin.autodiscover()
from phylobot.models import *
from phylobot.models_aws import *
print "\n\n\n phylobot admin\n\n\n"
admin.site.register(UserProfile)
admin.site.register(AncestralLibrary)
admin.site.register(AWSConfiguration)
admin.site.register(ViewingPrefs)
admin.site.register(AncestralLibrarySourceJob)
|
[
"[email protected]"
] | |
5dd9789f49b6bf5e26968ad8d2ac344ebc993ed3
|
fcca7ebb332ae400b82f7d75d424ace30e35963c
|
/apps/elasticity/stegoton/plot_comparison.py
|
6f3eaab6264e7dee56852f1672d4f2d87a7f8564
|
[] |
no_license
|
clawpack/sharpclaw
|
5d2812149b28a09bfb626daf057fd27e4ab2f6a5
|
7c9782d932a449b92c875ff341a16bf00f0cc630
|
refs/heads/master
| 2021-01-04T14:06:42.001372 | 2013-11-28T15:19:26 | 2013-11-28T15:19:26 | 1,613,567 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,147 |
py
|
from pyclaw.data import ClawPlotData
from pyclaw.plotting import plotframe
plotdata = ClawPlotData()
plotdata.outdir = '.'
# Figure:
plotfigure = plotdata.new_plotfigure(name='Solution', figno=1)
plotfigure.kwargs = {'figsize':[5,3]}
# Axes:
plotaxes = plotfigure.new_plotaxes(name='Strain')
#plotaxes.xlim = [73,79]
plotitem = plotaxes.new_plotitem(name='SharpClaw 3600', plot_type='1d')
plotitem.plot_var = 0 # q[2] is the stress
plotitem.plotstyle = 's'
plotitem.color = 'b' # could use 'r' or 'red' or '[1,0,0]'
plotitem.kwargs = {'linewidth':3,'markersize':10}
plotitem = plotaxes.new_plotitem(name='ClawPack 3600', plot_type='1d')
plotitem.outdir = '/users/ketch/research/claw42/fwave2/3600'
plotitem.plot_var = 0 # q[2] is the stress
plotitem.plotstyle = 'o'
plotitem.color = 'r'
plotitem.kwargs = {'linewidth':3,'markersize':10}
#plotitem = plotaxes.new_plotitem(name='ClawPack 28800', plot_type='1d')
#plotitem.outdir = '/users/ketch/research/claw42/fwave2/'
#plotitem.plot_var = 0 # q[2] is the stress
#plotitem.plotstyle = '-'
#plotitem.color = 'k'
#plotitem.kwargs = {'linewidth':3}
plotdata.plotframe(100)
|
[
"[email protected]"
] | |
fead5b51476cb0ee7d01cbd4d92adfe47ece5082
|
32a6ac6cbec63296ba68838ad4699b995810c6cd
|
/compiled/construct/debug_enum_name.py
|
f557f7c82a5e810c80400f8ac4c1aa17e88d975e
|
[
"MIT"
] |
permissive
|
smarek/ci_targets
|
a33696ddaa97daa77c0aecbdfb20c67546c729bc
|
c5edee7b0901fd8e7f75f85245ea4209b38e0cb3
|
refs/heads/master
| 2022-12-01T22:54:38.478115 | 2020-08-10T13:36:36 | 2020-08-19T07:12:14 | 286,483,420 | 0 | 0 |
MIT
| 2020-08-10T13:30:22 | 2020-08-10T13:30:21 | null |
UTF-8
|
Python
| false | false | 914 |
py
|
from construct import *
from construct.lib import *
def debug_enum_name__test_subtype__inner_enum1(subcon):
return Enum(subcon,
enum_value_67=67,
)
def debug_enum_name__test_subtype__inner_enum2(subcon):
return Enum(subcon,
enum_value_11=11,
)
debug_enum_name__test_subtype = Struct(
'field1' / debug_enum_name__test_subtype__inner_enum1(Int8ub),
'field2' / Int8ub,
'instance_field' / Computed(lambda this: KaitaiStream.resolve_enum(DebugEnumName.TestSubtype.InnerEnum2, (this.field2 & 15))),
)
def debug_enum_name__test_enum1(subcon):
return Enum(subcon,
enum_value_80=80,
)
def debug_enum_name__test_enum2(subcon):
return Enum(subcon,
enum_value_65=65,
)
debug_enum_name = Struct(
'one' / debug_enum_name__test_enum1(Int8ub),
'array_of_ints' / Array(1, debug_enum_name__test_enum2(Int8ub)),
'test_type' / LazyBound(lambda: debug_enum_name__test_subtype),
)
_schema = debug_enum_name
|
[
"[email protected]"
] | |
39f1ab98e67afeba433bba71016769cf604ee099
|
b7e6cdf094baaee9d6e5034c2355641fbf9138d7
|
/824. 山羊拉丁文.py
|
fcd4e950c0e3c85ebf53f970708644e021c0f2ce
|
[] |
no_license
|
heshibo1994/leetcode-python-2
|
04296c66cd6d1fe58880062aeafdbe9d474b7d2e
|
3ea32f03bd453743b9b81de9871fad7ac67ced90
|
refs/heads/master
| 2020-05-23T21:49:01.367969 | 2019-09-30T03:31:27 | 2019-09-30T03:31:27 | 186,961,972 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,333 |
py
|
# 给定一个由空格分割单词的句子 S。每个单词只包含大写或小写字母。
#
# 我们要将句子转换为 “Goat Latin”(一种类似于 猪拉丁文 - Pig Latin 的虚构语言)。
#
# 山羊拉丁文的规则如下:
#
# 如果单词以元音开头(a, e, i, o, u),在单词后添加"ma"。
# 例如,单词"apple"变为"applema"。
#
# 如果单词以辅音字母开头(即非元音字母),移除第一个字符并将它放到末尾,之后再添加"ma"。
# 例如,单词"goat"变为"oatgma"。
#
# 根据单词在句子中的索引,在单词最后添加与索引相同数量的字母'a',索引从1开始。
# 例如,在第一个单词后添加"a",在第二个单词后添加"aa",以此类推。
#
# 返回将 S 转换为山羊拉丁文后的句子。
# 输入: "I speak Goat Latin"
# 输出: "Imaa peaksmaaa oatGmaaaa atinLmaaaaa
class Solution:
def toGoatLatin(self, S):
s = S.split(" ")
print(s)
ans = ""
for i in range(len(s)):
if s[i][0] in "aeiouAEIOU":
temp = s[i]+"ma"+"a"*(i+1)
else:
temp = s[i][1:]+s[i][0]+"ma"+"a"*(i+1)
ans = ans+temp+" "
temp = ""
return ans
s=Solution()
print(s.toGoatLatin("I speak Goat Latin"))
|
[
"[email protected]"
] | |
c74363ec7f3ffb330ff7eb6cc99754b2dfbc69e4
|
0e7be557833f38fef17b5eaa57c331a96148ad5e
|
/Assets/Python/StrategyOnly/Heroes.py
|
895ee42ac8234f910d9e6bebc4e54df85577387d
|
[] |
no_license
|
Thunderbrd/Caveman2Cosmos
|
9f38961c638b82099b0601c22f8e90a1c98daa1e
|
b99aca8e56fb2a1fae48abd424dc0060a1d1fc1a
|
refs/heads/master
| 2022-01-12T19:40:32.586456 | 2019-07-21T22:00:09 | 2019-07-21T22:00:09 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,886 |
py
|
## By StrategyOnly converted to BUG by Dancing Hoskuld
from CvPythonExtensions import *
import CvEventInterface
import CvUtil
import BugUtil
import PyHelpers
import Popup as PyPopup
import SdToolKit as SDTK
gc = CyGlobalContext()
localText = CyTranslator()
PyPlayer = PyHelpers.PyPlayer
PyInfo = PyHelpers.PyInfo
giSparticus = -1
giGladiator = -1
def init():
global giSparticus, giGladiator
giSparticus = gc.getInfoTypeForString('UNITCLASS_SPARTACUS')
giGladiator = CvUtil.findInfoTypeNum(gc.getUnitInfo,gc.getNumUnitInfos(),'UNIT_GLADIATOR')
def onUnitBuilt(self, argsList):
'Unit Completed'
city = argsList[0]
unit = argsList[1]
player = PyPlayer(city.getOwner())
CvAdvisorUtils.unitBuiltFeats(city, unit)
## Hero Movies ##
if not CyGame().isNetworkMultiPlayer() and city.getOwner() == CyGame().getActivePlayer() and isWorldUnitClass(unit.getUnitClassType()):
popupInfo = CyPopupInfo()
popupInfo.setButtonPopupType(ButtonPopupTypes.BUTTONPOPUP_PYTHON_SCREEN)
popupInfo.setData1(unit.getUnitType())
popupInfo.setData2(city.getID())
popupInfo.setData3(4)
popupInfo.setText(u"showWonderMovie")
popupInfo.addPopup(city.getOwner())
## Hero Movies ##
def onCombatResult(argsList):
'Combat Result'
pWinner,pLoser = argsList
playerX = PyPlayer(pWinner.getOwner())
unitX = PyInfo.UnitInfo(pWinner.getUnitType())
playerY = PyPlayer(pLoser.getOwner())
unitY = PyInfo.UnitInfo(pLoser.getUnitType())
pPlayer = gc.getPlayer(pWinner.getOwner())
## BTS HEROS - Spartacus Capture Event Start ##
if pWinner.getUnitClassType() == giSparticus:
## Capture % Random # 0 to 3 or 25% ##
iNewGladiatorNumber = getRandomNumber( 3 )
if iNewGladiatorNumber == 0:
pClearPlot = findClearPlot(pLoser)
if (pLoser.plot().getNumUnits() == 1 and pClearPlot != -1):
pPlot = pLoser.plot()
pLoser.setXY(pClearPlot.getX(), pClearPlot.getY(), False, True, True)
else:
pPlot = pWinner.plot()
pPID = pPlayer.getID()
newUnit = pPlayer.initUnit(giGladiator, pPlot.getX(), pPlot.getY(), UnitAITypes.NO_UNITAI, DirectionTypes.DIRECTION_NORTH)
pLoser.setDamage(100000, False)
## newUnit.convert(pLoser)
## pLoser.setDamage(100, False)
newUnit.finishMoves()
iXa = pLoser.getX()
iYa = pLoser.getY()
CyInterface().addMessage(pPID,False,15,CyTranslator().getText("TXT_KEY_SPARTACUS_CAPTURE_SUCCESS",()),'',0,',Art/Interface/Buttons/Units/ICBM.dds,Art/Interface/Buttons/Warlords_Atlas_1.dds,3,11',ColorTypes(44), iXa, iYa, True,True)
## BTS HEROS - Spartacus Capture End ##
## Field Medic Start ##
if pWinner.isHasPromotion(gc.getInfoTypeForString('PROMOTION_RETINUE_MESSENGER')):
iHealChance = getRandomNumber( 9 )
if iHealChance == 0:
if ( not SDTK.sdObjectExists('Heroes', pWinner) ) :
iHealTurn = -1
else :
iHealTurn = SDTK.sdObjectGetVal( 'Heroes', pWinner, 'HealTurn' )
if( iHealTurn == None or gc.getGame().getGameTurn() > iHealTurn ) :
pWinner.setDamage(0, False)
if ( not SDTK.sdObjectExists('Heroes', pWinner) ) :
SDTK.sdObjectInit('Heroes', pWinner, {})
SDTK.sdObjectSetVal( 'Heroes', pWinner, 'HealTurn', gc.getGame().getGameTurn() )
## Field Medic End ##
def findClearPlot(pUnit):
BestPlot = -1
iBestPlot = 0
pOldPlot = pUnit.plot()
iX = pOldPlot.getX()
iY = pOldPlot.getY()
for iiX in range(iX-1, iX+2, 1):
for iiY in range(iY-1, iY+2, 1):
iCurrentPlot = 0
pPlot = CyMap().plot(iiX,iiY)
if pPlot.getNumUnits() == 0:
iCurrentPlot = iCurrentPlot + 5
if iCurrentPlot >= 1:
iCurrentPlot = iCurrentPlot + CyGame().getSorenRandNum(5, "findClearPlot")
if iCurrentPlot >= iBestPlot:
BestPlot = pPlot
iBestPlot = iCurrentPlot
return BestPlot
def getRandomNumber(int):
return CyGame().getSorenRandNum(int, "Gods")
|
[
"raxo2222@8bbd16b5-4c62-4656-ae41-5efa6c748c97"
] |
raxo2222@8bbd16b5-4c62-4656-ae41-5efa6c748c97
|
f7e3e4d0eb43e1b66081962b0ee6cdd9d6a3694b
|
39c80306080defbde999f1af05ae5993f22d7fd7
|
/oxford_astrazeneca/tests/q_calc_efficiency.py
|
07076b1e2e93744fc876d8399910625d47330256
|
[] |
no_license
|
uob-cfd/spe
|
47931d724792fbe812de49ac489a7e88cca52e1d
|
f7c76b766bffec71b80febd0dbc79e12aec3a11c
|
refs/heads/master
| 2023-02-04T20:45:44.411481 | 2020-12-27T19:03:52 | 2020-12-27T19:03:52 | 321,508,088 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,259 |
py
|
test = {
'name': 'Question calc_efficiency',
'points': 1,
'suites': [
{
'cases': [
{
'code': r"""
>>> # You need to define the function 'calc_efficiency'
>>> 'calc_efficiency' in vars()
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> # calc_efficiency should be a function.
>>> callable(calc_efficiency)
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> # Oops, have you deleted 'ox_vax'?
>>> 'ox_vax' in vars()
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> # Oops, have you deleted 'vax_eff'?
>>> 'vax_eff' in vars()
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> calc_efficiency(ox_vax) == vax_eff
True
""",
'hidden': False,
'locked': False
},
],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'
}
]
}
|
[
"[email protected]"
] | |
fbbbc52eab2329bda34461328893fba1754e20a0
|
70934fe6d0feed93994a98650a543832897e69ae
|
/sinaweibo.bak/weibo_factory.py
|
95cfe4e1548a4d14a120107301c0a8c022bd7623
|
[] |
no_license
|
zy-sunshine/sunblackshineblog
|
d1d3f0f69d6b8c006e70a0601bc0d520ec5002bb
|
ea7d1e2f8477a238501cecf8e63fd3d7a2a945c9
|
refs/heads/master
| 2021-01-01T16:19:52.954701 | 2011-10-29T17:12:59 | 2011-10-29T17:12:59 | 2,570,586 | 1 | 0 | null | null | null | null |
GB18030
|
Python
| false | false | 1,878 |
py
|
#encoding=utf8
#file:weibo_factory.py
#@author:carlos
#@date:2011-2-13
#@link:tieniuzai.com
from weibopy.auth import BasicAuthHandler
from weibopy.api import API
class SinaWeibo:
def __init__(self,username,password):
self.username = username
self.password = password
self.source ="app key" #在申请新浪微博开发者帐号并创建应用后获得
def getAtt(self, key):
try:
return self.obj.__getattribute__(key)
except Exception, e:
print e
return ''
def getAttValue(self, obj, key):
try:
return obj.__getattribute__(key)
except Exception, e:
print e
return ''
def basicAuth(self):
source = self.source
self.auth = BasicAuthHandler(self.username, self.password)
self.api = API(self.auth,source=source)
def parse_timeline(self,timeline):
result = []
for line in timeline:
self.obj = line
item ={}
user = self.getAtt("user")
item['mid'] = self.getAtt("id")
item['text'] = self.getAtt("text")
item['pic'] = self.getAtt("thumbnail_pic")
item['author_name']= user.screen_name
item['author_id'] = user.id
item['author_domain'] = user.domain
item['author_profile_image']= user.profile_image_url
item['created_at'] = self.getAtt('created_at')
item['source'] = self.getAtt('source')
item['retweeted_status'] = self.getAtt('retweeted_status')
result.append(item)
return result
def get_myself(self):
myself = self.api.get_user(id=1649938837)
#myself = self.api.get_user(user_id=self.auth.get_username)
self.obj = myself
user={}
user['profile_image_url'] = self.getAtt('profile_image_url')
user['name']=self.getAtt("screen_name")
user['description']=self.getAtt("description")
use = self.auth.get_username()
return user
def user_timeline(self):
timeline = self.api.user_timeline(count=10, page=1)
result = self.parse_timeline(timeline)
return result
|
[
"[email protected]"
] | |
918bf5948ed9490633028cdeed9ea000c19a2374
|
560af8e32aa77bfb6c5d837e93d0dc2dd7c9142c
|
/client_project/wsgi.py
|
fec9bea087df286ce54366f90bbc4c61b4f9094f
|
[] |
no_license
|
anirudhasj441/Fake-Api
|
c7a4aef6bf9eadc16709fe10f4cd3b526664cd4e
|
86b6e496cbecf314ef6e6366a84b8f93ce7c775b
|
refs/heads/master
| 2023-06-26T00:00:00.914657 | 2021-07-29T06:35:39 | 2021-07-29T06:35:39 | 390,625,402 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 405 |
py
|
"""
WSGI config for client_project project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'client_project.settings')
application = get_wsgi_application()
|
[
"[email protected]"
] | |
e4a88b54323db57cc2d1b09b8b6560d91291a6cd
|
0bfb55b41282803db96b90e7bba73d86be7e8553
|
/administration/migrations/0011_auto_20161109_1845.py
|
fad5aaa70cdd3596ffe76dba25b75bc51d8583f1
|
[
"MIT"
] |
permissive
|
OpenFurry/honeycomb
|
eebf2272f8ae95eb686ad129555dbebcf1adcd63
|
c34eeaf22048948fedcae860db7c25d41b51ff48
|
refs/heads/master
| 2021-01-11T01:52:40.978564 | 2016-12-29T18:08:38 | 2016-12-29T18:08:38 | 70,649,821 | 2 | 2 | null | 2016-12-29T18:08:39 | 2016-10-12T01:22:38 |
Python
|
UTF-8
|
Python
| false | false | 465 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-09 18:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('administration', '0010_auto_20161109_0552'),
]
operations = [
migrations.AlterField(
model_name='ban',
name='end_date',
field=models.DateField(blank=True, null=True),
),
]
|
[
"[email protected]"
] | |
b0173056cf09e20ef265ae0bf916c84bfd972b1a
|
86cd22354f2431087c9b3ff06188f071afb3eb72
|
/383. Ransom Note.py
|
cf3b994fb40efd594dc1c7d269120b8b5583c390
|
[] |
no_license
|
tlxxzj/leetcode
|
0c072a74d7e61ef4700388122f2270e46c4ac22e
|
06dbf4f5b505a6a41e0d93367eedd231b611a84b
|
refs/heads/master
| 2023-08-31T11:04:34.585532 | 2023-08-31T08:25:51 | 2023-08-31T08:25:51 | 94,386,828 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 408 |
py
|
class Solution:
def canConstruct(self, ransomNote: str, magazine: str) -> bool:
letters = {}
for c in magazine:
if c in letters:
letters[c] += 1
else:
letters[c] = 1
for c in ransomNote:
if letters.get(c, 0) == 0:
return False
else:
letters[c] -= 1
return True
|
[
"[email protected]"
] | |
b307447339363ba5a9bc02068f4df4126d5e6527
|
bb824786f095fbf6e6cf627ef9c04afdb5152477
|
/apps/pages/migrations/0013_partnerspage.py
|
e96243788314b36b0fda1ef2712514fcbd92c772
|
[] |
no_license
|
Emilnurg/anas.ru
|
19000063c7db98c15261056bb9590382db362d42
|
20cee1aee02da192c9c79a51bd0898c1dba0c98f
|
refs/heads/master
| 2021-05-20T12:09:08.155749 | 2017-12-26T13:49:12 | 2017-12-26T13:49:12 | 252,287,670 | 0 | 0 | null | 2021-03-31T19:34:29 | 2020-04-01T21:10:48 |
JavaScript
|
UTF-8
|
Python
| false | false | 6,865 |
py
|
# -*- coding: utf-8 -*-
# flake8: noqa
# Generated by Django 1.10.7 on 2017-06-08 15:27
from __future__ import unicode_literals
import ckeditor_uploader.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pages', '0012_auto_20170531_1612'),
]
operations = [
migrations.CreateModel(
name='PartnersPage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, verbose_name='Заголовок страницы')),
('title_ru', models.CharField(max_length=255, null=True, verbose_name='Заголовок страницы')),
('title_en', models.CharField(max_length=255, null=True, verbose_name='Заголовок страницы')),
('title_fr', models.CharField(max_length=255, null=True, verbose_name='Заголовок страницы')),
('subtitle', ckeditor_uploader.fields.RichTextUploadingField(blank=True, max_length=4096, null=True, verbose_name='Подзаголовок страницы')),
('subtitle_ru', ckeditor_uploader.fields.RichTextUploadingField(blank=True, max_length=4096, null=True, verbose_name='Подзаголовок страницы')),
('subtitle_en', ckeditor_uploader.fields.RichTextUploadingField(blank=True, max_length=4096, null=True, verbose_name='Подзаголовок страницы')),
('subtitle_fr', ckeditor_uploader.fields.RichTextUploadingField(blank=True, max_length=4096, null=True, verbose_name='Подзаголовок страницы')),
('howto_title', models.CharField(blank=True, max_length=255, null=True, verbose_name='Заголовок блока "Как стать дилером?"')),
('howto_title_ru', models.CharField(blank=True, max_length=255, null=True, verbose_name='Заголовок блока "Как стать дилером?"')),
('howto_title_en', models.CharField(blank=True, max_length=255, null=True, verbose_name='Заголовок блока "Как стать дилером?"')),
('howto_title_fr', models.CharField(blank=True, max_length=255, null=True, verbose_name='Заголовок блока "Как стать дилером?"')),
('howto_subtitle', models.CharField(blank=True, max_length=255, null=True, verbose_name='Подзаголовок блока "Как стать дилером?"')),
('howto_subtitle_ru', models.CharField(blank=True, max_length=255, null=True, verbose_name='Подзаголовок блока "Как стать дилером?"')),
('howto_subtitle_en', models.CharField(blank=True, max_length=255, null=True, verbose_name='Подзаголовок блока "Как стать дилером?"')),
('howto_subtitle_fr', models.CharField(blank=True, max_length=255, null=True, verbose_name='Подзаголовок блока "Как стать дилером?"')),
('howto_body', ckeditor_uploader.fields.RichTextUploadingField(blank=True, verbose_name='Контент блока "Как стать дилером?"')),
('howto_body_ru', ckeditor_uploader.fields.RichTextUploadingField(blank=True, null=True, verbose_name='Контент блока "Как стать дилером?"')),
('howto_body_en', ckeditor_uploader.fields.RichTextUploadingField(blank=True, null=True, verbose_name='Контент блока "Как стать дилером?"')),
('howto_body_fr', ckeditor_uploader.fields.RichTextUploadingField(blank=True, null=True, verbose_name='Контент блока "Как стать дилером?"')),
('howto_button_caption', models.CharField(blank=True, max_length=50, null=True, verbose_name='Текст кнопки блока "Как стать дилером?"')),
('howto_button_caption_ru', models.CharField(blank=True, max_length=50, null=True, verbose_name='Текст кнопки блока "Как стать дилером?"')),
('howto_button_caption_en', models.CharField(blank=True, max_length=50, null=True, verbose_name='Текст кнопки блока "Как стать дилером?"')),
('howto_button_caption_fr', models.CharField(blank=True, max_length=50, null=True, verbose_name='Текст кнопки блока "Как стать дилером?"')),
('questions_title_left', models.CharField(blank=True, max_length=255, null=True, verbose_name='Заголовок блока "Есть вопросы? (слева)"')),
('questions_title_left_ru', models.CharField(blank=True, max_length=255, null=True, verbose_name='Заголовок блока "Есть вопросы? (слева)"')),
('questions_title_left_en', models.CharField(blank=True, max_length=255, null=True, verbose_name='Заголовок блока "Есть вопросы? (слева)"')),
('questions_title_left_fr', models.CharField(blank=True, max_length=255, null=True, verbose_name='Заголовок блока "Есть вопросы? (слева)"')),
('questions_title', models.CharField(blank=True, max_length=255, null=True, verbose_name='Заголовок блока "Есть вопросы?"')),
('questions_title_ru', models.CharField(blank=True, max_length=255, null=True, verbose_name='Заголовок блока "Есть вопросы?"')),
('questions_title_en', models.CharField(blank=True, max_length=255, null=True, verbose_name='Заголовок блока "Есть вопросы?"')),
('questions_title_fr', models.CharField(blank=True, max_length=255, null=True, verbose_name='Заголовок блока "Есть вопросы?"')),
('questions_subtitle', models.TextField(blank=True, null=True, verbose_name='Подзаголовок блока "Есть вопросы?"')),
('questions_subtitle_ru', models.TextField(blank=True, null=True, verbose_name='Подзаголовок блока "Есть вопросы?"')),
('questions_subtitle_en', models.TextField(blank=True, null=True, verbose_name='Подзаголовок блока "Есть вопросы?"')),
('questions_subtitle_fr', models.TextField(blank=True, null=True, verbose_name='Подзаголовок блока "Есть вопросы?"')),
],
options={
'verbose_name': 'Страница "Дилеры"',
},
),
]
|
[
"[email protected]"
] | |
71c44b270f1029386b8c8079cc4f51467a806a60
|
8690ca0028c54b62d68badf1753fc6151ae03525
|
/Part3 Levels of Aggregation/esem_data/Act/tpot_mnist_pipeline_triangulateAggregationLevelParticipantSplitaggr_5_groups7.py
|
d83ecdf48b217bda826b409cdf96307576b5488a
|
[] |
no_license
|
brains-on-code/conducting-and-analyzing-human-studies
|
fd74ee77fdc56cc61bdc1e0cf9bf423780f5dddc
|
548e7443f4d2bdb2db1f2858289b7d3518593c59
|
refs/heads/master
| 2021-06-26T21:30:56.386121 | 2020-12-22T13:49:16 | 2020-12-22T13:49:16 | 195,975,817 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 867 |
py
|
import numpy as np
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
# NOTE: Make sure that the class is labeled 'target' in the data file
tpot_data = pd.read_csv('PATH/TO/DATA/FILE', sep='COLUMN_SEPARATOR', dtype=np.float64)
features = tpot_data.drop('target', axis=1).values
training_features, testing_features, training_target, testing_target = \
train_test_split(features, tpot_data['target'].values, random_state=42)
# Score on the training set was:1.0
exported_pipeline = make_pipeline(
StandardScaler(),
LogisticRegression(C=0.1, dual=False, penalty="l2")
)
exported_pipeline.fit(training_features, training_target)
results = exported_pipeline.predict(testing_features)
|
[
"[email protected]"
] | |
9b8b6f9bda493cd1e8800f462021606cf91863d6
|
641f76328bfeb7e54f0793a18c5b7c00595b98fd
|
/apps/sms/serializers.py
|
4d653e2794abcb0dbb3ce297e9d2919f02b8d8f4
|
[
"Apache-2.0"
] |
permissive
|
lianxiaopang/camel-store-api
|
1d16060af92eb01607757c0423377a8c94c3a726
|
b8021250bf3d8cf7adc566deebdba55225148316
|
refs/heads/master
| 2020-12-29T13:23:18.118617 | 2020-02-09T08:38:53 | 2020-02-09T08:38:53 | 238,621,246 | 0 | 0 |
Apache-2.0
| 2020-02-07T14:28:35 | 2020-02-06T06:17:47 |
Python
|
UTF-8
|
Python
| false | false | 207 |
py
|
from rest_framework import serializers
from .models import SmsRecord
class SmsRecordSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = SmsRecord
fields = '__all__'
|
[
"[email protected]"
] | |
b7750a59ddd97731a3db15d7ff43b162bbc4a22b
|
b9ca99a0244e5d5a07e0b27be8192ad01c4eda6c
|
/EIP/EIP系统所有流程.py
|
fad26b441598bfd879662694afe6d03b21d1d15c
|
[] |
no_license
|
Boomshakal/spider
|
c3fdbf18f874ec9953509e4ce984b5476d25839f
|
e6779a3961f48325dd4992d88f88b8b3938225d7
|
refs/heads/master
| 2021-06-17T06:22:19.679444 | 2021-03-05T06:33:36 | 2021-03-05T06:33:36 | 154,489,684 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,124 |
py
|
import requests
import json
from lxml import etree
url="http://eip.megmeet.com:8008/km/review/km_review_index/kmReviewIndex.do?"
maxpage=5
headers={
"Cookie": "j_lang=zh-CN; JSESSIONID=40ABBC9A619C5860068184B1E339BC4D",
"User-Agent":"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36"
}
def get_onepage(page):
data = {
"method": "list",
"q.mydoc": "all",
"q.j_path": "/listAll",
"q.s_raq": "0.2347883935715236",
"pageno": page,
"rowsize": "30",
"orderby": "docCreateTime",
"ordertype": "down",
"s_ajax": "true"
}
text=requests.get(url,headers=headers,params=data).text
#print(type(text),text)
jsons=json.loads(text)
results=jsons.get('datas')
for result in results:
html=result[1]['value']
html=etree.HTML(html)
title=html.xpath('//span/text()')
print(title)
#print(html)
if __name__ == '__main__':
for page in range(1,maxpage+1):
get_onepage(page)
print("第{0}页加载完成!".format(page))
|
[
"[email protected]"
] | |
fd337b75f0eb10484074e08ba64c0b939849c29f
|
ed756885498f009618c4be880f255e5c2402d537
|
/web/feeds.py
|
d65fd10180cb506fc5739ed0d781aa1940d95fda
|
[] |
no_license
|
GunioRobot/BlancoWeb
|
c6d1d242960918a6170ed0b1432ac36ea686546f
|
cff8540c8f5bc0a372cc3500b035f1fdbbc7eab8
|
refs/heads/master
| 2021-01-20T11:31:50.434756 | 2011-02-21T22:43:42 | 2011-02-21T22:43:42 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 615 |
py
|
# -*- coding: utf-8 -*-
from django.contrib.syndication.feeds import Feed;
from web.models import Evento;
class EventoFeed(Feed):
title = "Blanco Irish Tavern";
link = "web/feeds/eventos/";
description = "Suscríbete para conocer todas nuestras fiestas"
def items(self):
return Evento.objects.all().order_by("-fecha");
def item_title(self, item):
return item.nombre
def item_description(self, item):
descripcion = "%s" % item.fecha;
descripcion += " %s" % item.hora_inicio;
descripcion += " %s" % item.info;
return descripcion;
|
[
"[email protected]"
] | |
943eb7ef872e91eecdf173c1d2bcf133d8a02938
|
484f9502f2d1fa35df77df8d3a08bd2bfd2d253d
|
/src/testers/unittests/test_ast_utils.py
|
3debbde14f5a9683c67c251be3268860790f2dd6
|
[
"Apache-2.0"
] |
permissive
|
pmeerw/Triton
|
5d1c58e93ed257f06f1586a1aa542d1ba307dcbb
|
82f11d6b15302e7900ed7f9eb3d686b6313d5b37
|
refs/heads/master
| 2020-08-04T17:01:47.442181 | 2019-09-30T08:16:44 | 2019-09-30T08:16:44 | 212,212,395 | 0 | 0 |
Apache-2.0
| 2019-10-01T22:38:58 | 2019-10-01T22:38:58 | null |
UTF-8
|
Python
| false | false | 1,494 |
py
|
#!/usr/bin/env python2
## -*- coding: utf-8 -*-
"""Test AST utils."""
import unittest
from triton import *
class TestAstUtils(unittest.TestCase):
"""Testing the AST utilities."""
def setUp(self):
self.ctx = TritonContext()
self.ctx.setArchitecture(ARCH.X86_64)
self.astCtxt = self.ctx.getAstContext()
self.sv1 = self.ctx.newSymbolicVariable(8)
self.sv2 = self.ctx.newSymbolicVariable(8)
self.v1 = self.astCtxt.variable(self.sv1)
self.v2 = self.astCtxt.variable(self.sv2)
def test_lookingForNodes(self):
n = (((self.v1 + self.v2 * 3) + self.v2) - 1)
# Looking for variables
l = self.astCtxt.lookingForNodes(n, AST_NODE.VARIABLE)
self.assertEqual(len(l), 2)
self.assertEqual(l[0], self.v1)
self.assertEqual(l[1], self.v2)
self.assertEqual(l[0].getSymbolicVariable().getName(), self.sv1.getName())
self.assertEqual(l[1].getSymbolicVariable().getName(), self.sv2.getName())
l = self.astCtxt.lookingForNodes(n, AST_NODE.ANY)
self.assertEqual(len(l), 12)
l = self.astCtxt.lookingForNodes(n, AST_NODE.BVADD)
self.assertEqual(len(l), 2)
l = self.astCtxt.lookingForNodes(n, AST_NODE.BVSUB)
self.assertEqual(len(l), 1)
l = self.astCtxt.lookingForNodes(n, AST_NODE.BVMUL)
self.assertEqual(len(l), 1)
l = self.astCtxt.lookingForNodes(n, AST_NODE.BV)
self.assertEqual(len(l), 2)
|
[
"[email protected]"
] | |
c4a35b1184ddc9951b0bf9e8a1ceeaccd2c708a0
|
b951ee6d2de741e84f7bfe2dc5a66853c1d5cd4e
|
/Array/LinkedInstead.py
|
5960eaa4dc231e2a7ddbf5349c752a8df806be84
|
[] |
no_license
|
Chongkai-Ma/Fundamentals-of-Python-Data-Structures
|
e78569f79dfad16ebc18121c250c25d91bb94754
|
170e58d23d9ee73c53b2ab596d7fcfc3e63eccc9
|
refs/heads/master
| 2020-09-21T03:46:04.980838 | 2019-12-04T14:18:27 | 2019-12-04T14:18:27 | 224,669,370 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 345 |
py
|
#!/usr/bin/python3
from node import Node
head = None
for count in range(1, 10):
head = Node(count, head)
probe = head
targetItem = 5
while probe != None and targetItem != probe.data:
probe = probe.next
if probe != None:
probe.data = 88888
print ("The item has been changed")
else:
print ("The item has not been found")
|
[
"[email protected]"
] | |
da2c797441188d198de8c57c9a59473cbd5ed769
|
c36fdb4d07aeaf0b1e568c45e8020b34c6fa5aca
|
/usps_integration/models/__init__.py
|
136e6ed2d4acabb36fa173d1a7051621eeeb8395
|
[] |
no_license
|
planetodooofficial/usps_integration_v13
|
c6cf33c49d753c44831d3f6e1da10271d37f0e82
|
ad69aa442b0ee65d1b7589b7f7ca409313f908aa
|
refs/heads/master
| 2022-12-24T10:05:39.397215 | 2020-09-30T19:14:55 | 2020-09-30T19:14:55 | 258,160,591 | 0 | 3 | null | 2020-09-30T19:14:56 | 2020-04-23T09:52:32 |
Python
|
UTF-8
|
Python
| false | false | 1,098 |
py
|
# -*- encoding: utf-8 -*-
##############################################################################
# Copyright (c) 2015 - Present Planet Odoo. All Rights Reserved
# Author: [Planet Odoo]
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at:
# <http://www.gnu.org/licenses/gpl.html>.
#
##############################################################################
from . import shipping_conf
from . import delivery
from . import product
from . import sale
from . import shipping_endicia
from . import endicia
from . import shipping
from . import stock
|
[
"https://[email protected]"
] |
https://[email protected]
|
7e57dcbf4f48f8fcfe88cb68a3ebfbe549f6d2ab
|
e944d912def98d7546d17c4303169f52517348ca
|
/interview_questions/basic/sum_of_rows_1.py
|
046b5461ff003408ecb4ae700d527c671bb16873
|
[] |
no_license
|
balajich/python-crash-course
|
0710854de3cd04695f969cbfe774ef336f707f48
|
e62b578f7dc93f6a47fbae00dac2d496b985fe8d
|
refs/heads/master
| 2021-07-30T16:00:45.392119 | 2021-07-29T11:41:49 | 2021-07-29T11:41:49 | 192,034,679 | 9 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 425 |
py
|
'''
Take matrix as input and returns sum of rows`
'''
import numpy as np
def rowsum(matrix):
"""
:param matrix (list): A list of lists where each inner list represents a row.
:returns: (list) A list containing the sum of each row.
"""
result=[]
for sub_list in matrix:
result.append(sum(sub_list))
return result
print(rowsum([[1, 2, 3], [4, 5, 6], [7, 8, 9]])) # Should print [6, 15, 24]
|
[
"[email protected]"
] | |
e7a6da8b913047441c8ecbd61af44920ea566c1b
|
95d20fe737d711cf92d68130eb59b6aef4435ec2
|
/pyecharts数据可视化/中国国内生产总值.py
|
20106c170698be17ce82f33a10baa3719363738d
|
[] |
no_license
|
CCH21/Python
|
d11b93851579d85f972828c760a96c5be1f79531
|
33e218810856971f3f1f97a2b8a4c8dce761362e
|
refs/heads/master
| 2022-04-29T11:48:01.816283 | 2022-03-17T11:53:01 | 2022-03-17T11:53:01 | 226,452,057 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,686 |
py
|
#!/usr/bin/env python3
import csv
from pyecharts import Line
Quarter = []
GDP = []
Primary_industry = []
Secondary_industry = []
Tertiary_industry = []
with open('中国国内生产总值.csv', 'r', newline='') as csv_in_file:
filereader = csv.reader(csv_in_file)
head = next(filereader)
for row_list in filereader:
Quarter.append(row_list[0])
gdp = round(eval(row_list[2][:-1]) / 100, 3)
GDP.append(gdp)
pri = round(eval(row_list[4][:-1]) / 100, 3)
Primary_industry.append(pri)
sec = round(eval(row_list[6][:-1]) / 100, 3)
Secondary_industry.append(sec)
ter = round(eval(row_list[8][:-1]) / 100, 3)
Tertiary_industry.append(ter)
Quarter = Quarter[::-1]
GDP = GDP[::-1]
Primary_industry = Primary_industry[::-1]
Secondary_industry = Secondary_industry[::-1]
Tertiary_industry = Tertiary_industry[::-1]
line = Line('中国国内生产总值同比增长率', '时间:2006年第1季度-2020年第1季度 数据来源:东方财富网', width=1280, height=720)
line.add('国内生产总值', Quarter, GDP, is_smooth=False, mark_point=['max'], mark_line=['average'], legend_pos='right')
line.add('第一产业', Quarter, Primary_industry, is_smooth=False, mark_point=['max'], mark_line=['average'],
legend_pos='right')
line.add('第二产业', Quarter, Secondary_industry, is_smooth=False, mark_point=['max'], mark_line=['average'],
legend_pos='right')
line.add('第三产业', Quarter, Tertiary_industry, is_smooth=False, mark_point=['max'], mark_line=['average'],
legend_pos='right')
line.render('中国国内生产总值.html')
|
[
"[email protected]"
] | |
7b10e1771bc7133cd12e42ff4ced75617ba3270c
|
826cdefb3554e6bbc7b5e5fa9bc6f55268cd58dd
|
/src/main/python/basics/itertools.py
|
4c731ae4d27d12208d8fbb8b22bcd656bceb3a3f
|
[] |
no_license
|
lj015625/CodeSnippet
|
67d1f556497948b3db51c67af07f16a21751427e
|
73e9375c5d7edcc50170569c0bd99fd415557d85
|
refs/heads/master
| 2023-09-01T14:59:57.162553 | 2023-08-24T11:07:37 | 2023-08-24T11:07:37 | 61,499,418 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,043 |
py
|
import itertools as it
def combinational_dice_rolls(n, m):
return list(it.product(range(1, m+1), repeat=n))
combinational_dice_rolls(2,2)
def cartesian_product(arr1, arr2):
print(*it.product(A, B))
A = [1,2,3]
B = [1,2,3]
cartesian_product(A,B)
s, n = 2, 3
s = sorted(str(s))
n = int(n)
for i in it.permutations(s,n):
print(''.join(i), sep='\n')
s, n = 'ABC', 2
for i in range(1, int(n)+1):
for j in it.combinations(sorted(s), i):
print(''.join(j))
# This tool returns length subsequences of elements from the input iterable allowing individual elements to be repeated more than once.
s, n = 'ABC', 2
for c in it.combinations_with_replacement(sorted(s), int(n)):
print("".join(c))
# create list of tuples from repeating items in a string
print(*[(len(list(values)), int(key)) for key, values in it.groupby('12345')])
# count number of a in combinations
n = 4
arr = ['a', 'a', 'c', 'd']
k = 2
count = 0
total = 0
for t in it.combinations(arr, k):
total += 1
count += 'a' in t
print(count/total)
|
[
"[email protected]"
] | |
684e78d298475edf5350934fbb380bb497a3bb7e
|
0cc9ba497efeae7de808b3063f932cee9449bc20
|
/akshare/fx/currency_investing.py
|
41abf6485117ce7ccd18ebed6240baa7a5dd72a6
|
[
"MIT"
] |
permissive
|
louis100/akshare
|
08dc7d71c194e973092174dabc307e28a2aaf7d6
|
0b2ad15982dc1e4081929ed634e96c559bf3ef7e
|
refs/heads/master
| 2022-12-12T16:26:38.294899 | 2020-09-16T04:25:46 | 2020-09-16T04:25:46 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,907 |
py
|
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2020/9/9 11:56
Desc: 英为财情-外汇-货币对历史数据
https://cn.investing.com/currencies/
https://cn.investing.com/currencies/eur-usd-historical-data
"""
import re
import pandas as pd
import requests
from bs4 import BeautifulSoup
from akshare.index.cons import short_headers, long_headers
def _currency_name_url() -> dict:
"""
货币键值对
:return: 货币键值对
:rtype: dict
"""
url = "https://cn.investing.com/currencies/"
res = requests.post(url, headers=short_headers)
data_table = pd.read_html(res.text)[0].iloc[:, 1:] # 实时货币行情
data_table.columns = ['中文名称', '英文名称', '最新', '最高', '最低', '涨跌额', '涨跌幅', '时间']
name_code_dict = dict(
zip(data_table["中文名称"].tolist(), [item.lower().replace("/", "-") for item in data_table["英文名称"].tolist()]))
return name_code_dict
def currency_hist(symbol: str = "usd-vnd", start_date: str = "20050101", end_date: str = "20200717") -> pd.DataFrame:
"""
外汇历史数据, 注意获取数据区间的长短, 输入任意货币对, 具体能否获取, 通过 currency_name_code_dict 查询
:param symbol: 货币对
:type symbol: str
:param start_date: 日期
:type start_date: str
:param end_date: 日期
:type end_date: str
:return: 货币对历史数据
:rtype: pandas.DataFrame
"""
start_date = "/".join([start_date[:4], start_date[4:6], start_date[6:]])
end_date = "/".join([end_date[:4], end_date[4:6], end_date[6:]])
temp_url = f"https://cn.investing.com/currencies/{symbol.lower().replace('/', '-')}-historical-data"
res = requests.post(temp_url, headers=short_headers)
soup = BeautifulSoup(res.text, "lxml")
title = soup.find("h2", attrs={"class": "float_lang_base_1"}).get_text()
res = requests.post(temp_url, headers=short_headers)
soup = BeautifulSoup(res.text, "lxml")
data = soup.find_all(text=re.compile("window.histDataExcessInfo"))[0].strip()
para_data = re.findall(r"\d+", data)
payload = {
"curr_id": para_data[0],
"smlID": para_data[1],
"header": title,
"st_date": start_date,
"end_date": end_date,
"interval_sec": "Daily",
"sort_col": "date",
"sort_ord": "DESC",
"action": "historical_data",
}
url = "https://cn.investing.com/instruments/HistoricalDataAjax"
res = requests.post(url, data=payload, headers=long_headers)
soup = BeautifulSoup(res.text, "lxml")
vest_list = [item.get_text().strip().split("\n") for item in soup.find_all("tr")]
raw_df = pd.DataFrame(vest_list)
df_data = pd.DataFrame(vest_list, columns=raw_df.iloc[0, :].tolist()).iloc[1:-1, :]
df_data.index = pd.to_datetime(df_data["日期"], format="%Y年%m月%d日")
df_data["涨跌幅"] = pd.DataFrame(round(df_data['涨跌幅'].str.replace('%', '').astype(float) / 100, 6))
del df_data["日期"]
df_data.iloc[:, :-1] = df_data.iloc[:, :-1].applymap(lambda x: x.replace(',', ''))
df_data = df_data.astype(float)
return df_data
def _currency_single() -> pd.DataFrame:
"""
英为财情-外汇-单种货币兑换汇率-单种货币列表
:return: 单种货币列表
:rtype: pandas.DataFrame
"""
url = "https://cn.investing.com/currencies/single-currency-crosses"
res = requests.post(url, headers=short_headers)
soup = BeautifulSoup(res.text, "lxml")
name_url_option_list = soup.find("select", attrs={"class": "newInput selectBox"}).find_all("option")
temp_df = pd.DataFrame([item.get_text().split('-', 1) for item in name_url_option_list])
temp_df.columns = ["short_name", "name"]
temp_df["short_name"] = temp_df["short_name"].str.strip()
temp_df["name"] = temp_df["name"].str.strip()
temp_df["code"] = [item["value"] for item in name_url_option_list]
return temp_df
def currency_name_code(symbol: str = "usd/jpy") -> pd.DataFrame:
"""
当前货币对的所有可兑换货币对
:param symbol: "usd/jpy"
:type symbol: str
:return: 中英文货币对
:rtype: pandas.DataFrame
name code
0 欧元/美元 eur-usd
1 英镑/美元 gbp-usd
2 美元/日元 usd-jpy
3 美元/瑞士法郎 usd-chf
4 澳大利亚元/美元 aud-usd
.. ... ...
268 日元/新加坡元 jpy-sgd
269 科威特第纳尔/日元 kwd-jpy
270 日元/白俄罗斯卢布 jpy-byn
271 日元/乌克兰赫里纳 jpy-uah
272 日元/土耳其里拉 jpy-try
"""
symbol = symbol.upper()
currency_df = _currency_single()
url = "https://cn.investing.com/currencies/Service/ChangeCurrency"
params = {
"session_uniq_id": "53bee677662a2336ec07b40738753fc1",
"currencies": currency_df[currency_df["short_name"] == symbol.split("/")[0]]["code"].values[0],
}
headers = {"Accept": "application/json, text/javascript, */*; q=0.01",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"Host": "cn.investing.com",
"Pragma": "no-cache",
"Referer": "https://cn.investing.com/currencies/single-currency-crosses",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
res = requests.get(url, params=params, headers=headers)
temp_df = pd.read_html(res.json()["HTML"])[0].iloc[:, 1:]
temp_df.rename(columns={"名称.1": "简称"}, inplace=True)
temp_df["pids"] = [item[:-1] for item in res.json()["pids"]]
name_code_dict_one = dict(zip(temp_df["名称"].tolist(), [item.lower().replace("/", "-") for item in temp_df["简称"].tolist()]))
params = {
"session_uniq_id": "53bee677662a2336ec07b40738753fc1",
"currencies": currency_df[currency_df["short_name"] == symbol.split("/")[1]]["code"].values[0],
}
headers = {"Accept": "application/json, text/javascript, */*; q=0.01",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"Host": "cn.investing.com",
"Pragma": "no-cache",
"Referer": "https://cn.investing.com/currencies/single-currency-crosses",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
res = requests.get(url, params=params, headers=headers)
temp_df = pd.read_html(res.json()["HTML"])[0].iloc[:, 1:]
temp_df.rename(columns={"名称.1": "简称"}, inplace=True)
temp_df["pids"] = [item[:-1] for item in res.json()["pids"]]
name_code_dict_two = dict(zip(temp_df["名称"].tolist(), [item.lower().replace("/", "-") for item in temp_df["简称"].tolist()]))
name_code_dict_one.update(name_code_dict_two)
temp_df = pd.DataFrame.from_dict(name_code_dict_one, orient="index").reset_index()
temp_df.columns = ["name", "code"]
return temp_df
if __name__ == '__main__':
currency_name_code_df = currency_name_code(symbol="usd/jpy")
print(currency_name_code_df)
currency_hist_df = currency_hist(symbol="usd-mmk", start_date="20131018", end_date="20200915")
print(currency_hist_df)
|
[
"[email protected]"
] | |
8c66385405873707fcd3fa458d8f11637899adb4
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/323/usersdata/278/89121/submittedfiles/mdc.py
|
3185a3f53219a38653c6a227ae7e28e217f85f66
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 604 |
py
|
# -*- coding: utf-8 -*-
import math
n1 = int(input("Digite o primeiro número inteiro: "))
n2 = int(input("Digite o segundo número inteiro: "))
mdc=1
if n1>n2:
for i in range (2,n2,1):
resto1=n1%i
resto2=n2%i
if resto1==0 and resto2==0:
mdc=mdc*i
if mdc==1:
print(mdc)
else:
print(mdc/2)
if n2>n1:
for i in range (2,n2,1):
resto1=n1%i
resto2=n2%i
if resto1!=0 and resto2!=0:
mdc=mdc*i
if mdc==1:
print(mdc)
else:
print(mdc/2)
if n1==n2:
print(n1)
|
[
"[email protected]"
] | |
e48a135645e3ef4e54e636050eed7af1fa14972e
|
9d1c260ff8e58335e0f373cfdd530e637ea803a8
|
/EVENT.py
|
963c1b66313d4f3a5b72fbc09ebc1ccffb81b482
|
[
"MIT"
] |
permissive
|
rambasnet/EVENT
|
e52931e3224b712e8b044e58382e4d170a835dc4
|
dd3a6507112e4adc054481608d8968706f80d23f
|
refs/heads/master
| 2020-06-01T23:39:28.843906 | 2019-06-09T20:07:02 | 2019-06-09T20:07:02 | 190,967,667 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 963 |
py
|
#-----------------------------------------------------------------------------
# Name: event.py
# Purpose:
#
# Author: Ram Basnet
#
# Created: 2009/10/10
# RCS-ID: $Id: event.py $
# Copyright: (c) 2009
# Licence: All Rights Reserved.
#-----------------------------------------------------------------------------
import sys
import Parsers
import Consolidation
import Reports
import Charts
import Navigation
from Config import *
if __name__ == "__main__":
#fout = open('EVENT.log', 'w')
#temp = sys.stdout
#sys.stdout = fout
ReadConfigFile()
#Run parsers
Parsers.main()
#Run consolidation
Consolidation.main()
#run reports
Reports.main()
#run charts
Charts.main()
#run navigation
Navigation.GenerateNavigation()
raw_input('All done! Please hit Enter key to continue...')
#sys.stdout.close()
#sys.stdout = temp
|
[
"[email protected]"
] | |
e8a823a890546c56c66c3bb0dbf0a510a17cf622
|
13f7adf576114c51f9f806a6fc5797b276d93f97
|
/devel/lib/python2.7/dist-packages/autoware_msgs/msg/_traffic_light.py
|
90177962beab0e196bf6f3c7b6ff861fedd20be4
|
[] |
no_license
|
yunjeongkim/keti_ws
|
a72a5ebc367b208654bdffb5bb9e8372cd959d33
|
aaac717c15a7be7431b22fb4ec7a96a734f2e03c
|
refs/heads/master
| 2020-04-05T06:18:52.334522 | 2018-11-21T01:47:34 | 2018-11-21T01:47:34 | 156,633,425 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,252 |
py
|
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from autoware_msgs/traffic_light.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import std_msgs.msg
class traffic_light(genpy.Message):
_md5sum = "a4931ba214a0e37e220dd00b2acca20a"
_type = "autoware_msgs/traffic_light"
_has_header = True #flag to mark the presence of a Header object
_full_text = """Header header
int32 traffic_light
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
"""
__slots__ = ['header','traffic_light']
_slot_types = ['std_msgs/Header','int32']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,traffic_light
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(traffic_light, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.traffic_light is None:
self.traffic_light = 0
else:
self.header = std_msgs.msg.Header()
self.traffic_light = 0
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_get_struct_i().pack(self.traffic_light))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
start = end
end += 4
(self.traffic_light,) = _get_struct_i().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_get_struct_i().pack(self.traffic_light))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
start = end
end += 4
(self.traffic_light,) = _get_struct_i().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_i = None
def _get_struct_i():
global _struct_i
if _struct_i is None:
_struct_i = struct.Struct("<i")
return _struct_i
_struct_3I = None
def _get_struct_3I():
global _struct_3I
if _struct_3I is None:
_struct_3I = struct.Struct("<3I")
return _struct_3I
|
[
"[email protected]"
] | |
4e3448bfeb4bf56e2ff41fc71a1727b619f401e6
|
526b6454565583700866463e46f66ede67165e2b
|
/expenses/pagination.py
|
d231586502537a64f68fbb878914834860e78b17
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
thangadurairajapandiyan/django-expenses
|
a0f04ac41d1b02be82642a084545a2b356fd5a59
|
4a463052a67ac080427857d3fec16cf78eb70c3b
|
refs/heads/master
| 2023-03-30T04:24:01.096399 | 2021-03-31T20:30:17 | 2021-03-31T20:30:17 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,126 |
py
|
# Pretty Pagination
# Copyright © 2018-2021, Chris Warrick.
# All rights reserved.
# License: 3-clause BSD
from itertools import zip_longest
def pagination(num, maxpage):
"""Generate a pretty pagination."""
if maxpage <= 5:
return list(range(1, maxpage + 1))
page_range = []
if num == 1:
around = {1, 2, 3}
elif num == maxpage:
around = {num - 2, num - 1, num}
else:
around = {num - 1, num, num + 1}
around |= {1, maxpage}
page_range_base = [i for i in sorted(around) if 0 < i <= maxpage]
for current_page, next_page in zip_longest(page_range_base, page_range_base[1:]):
page_range.append(current_page)
if next_page is None:
continue
diff = next_page - current_page
if diff == 2:
page_range.append(current_page + 1) # ellipsis should not be one page
elif diff > 2:
page_range.append("...")
return page_range
if __name__ == "__main__":
maxpage = 15
print("Pages:", maxpage)
for i in range(1, maxpage + 1):
print(i, pagination(i, maxpage), sep="\t")
|
[
"[email protected]"
] | |
a332729be8de4ce9a7e33437066ae82c80110be0
|
bf7ad5c52e5be4fbf34816b95932d520e0f579d4
|
/repeat.py
|
0419ac8f22b5134ed7e2a5bb1e9e31d10d076841
|
[] |
no_license
|
veronicarose27/vero
|
4722381a6598e3fc6f87596d52f6ca860219ad19
|
c943344596dc4398accdd81bd9936ff114b8d738
|
refs/heads/master
| 2020-06-11T21:13:32.613495 | 2019-07-19T17:20:46 | 2019-07-19T17:20:46 | 194,087,132 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 246 |
py
|
y,z=map(int,input().split())
p=list(map(int,input().split()))
count=0
for i in range(0,len(p)):
for j in range(1,len(p)):
if(p[i]==p[j]):
count=count+1
if(count==z):
print(p[i])
break
|
[
"[email protected]"
] | |
6110d7d86503b01878af17b0f37d98e5097fece2
|
f4b7d327581e500dc79079c834cc23af9939737e
|
/moonlight/models/base/glyph_patches.py
|
263b33c17cd0a8a9c7f22e54295ce5b1953d0b75
|
[
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] |
permissive
|
abc3436645/moonlight
|
095eea2b892528b9a3fe5d05af39e4a023c55628
|
7f8a3ab4e55570dd120e3965f8049dd866d12a6b
|
refs/heads/master
| 2020-03-25T19:37:01.849371 | 2018-08-07T17:42:56 | 2018-08-07T17:42:56 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,031 |
py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base patch-based glyph model.
For example, this accepts the staff patch k-means centroids emitted by
staffline_patches_kmeans_pipeline and labeled by kmeans_labeler.
This defines the input and signature of the model, and allows any type of
multi-class classifier using the normalized patches as input.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from absl import flags
from moonlight.models.base import batches
from moonlight.models.base import label_weights
from moonlight.util import memoize
import tensorflow as tf
from tensorflow.python.lib.io import file_io
from tensorflow.python.lib.io import tf_record
WEIGHT_COLUMN_NAME = 'weight'
FLAGS = flags.FLAGS
flags.DEFINE_string(
'train_input_patches', None, 'Glob of labeled patch TFRecords for training')
flags.DEFINE_string(
'eval_input_patches', None, 'Glob of labeled patch TFRecords for eval')
flags.DEFINE_string('model_dir', None, 'Output trained model directory')
flags.DEFINE_boolean(
'use_included_label_weight', False,
'Whether to multiply a "label_weight" feature included in the example by'
' the weight determined by the "label" value.')
flags.DEFINE_float(
'augmentation_x_shift_probability', 0.5,
'Probability of shifting the patch left or right by one pixel. The edge is'
' filled using the adjacent column. It is equally likely that the patch is'
' shifted left or right.')
flags.DEFINE_float(
'augmentation_max_rotation_degrees', 2.,
'Max rotation of the patch, in degrees. The rotation is selected uniformly'
' randomly from the range +- this value. A value of 0 implies no rotation.')
flags.DEFINE_integer(
'eval_throttle_secs', 60, 'Evaluate at at most this interval, in seconds.')
flags.DEFINE_integer(
'train_max_steps', 100000,
'Max steps for training. If 0, will train until the process is'
' interrupted.')
flags.DEFINE_integer('eval_steps', 500, 'Num steps to evaluate the model.')
flags.DEFINE_integer(
'exports_to_keep', 10,
'Keep the last N saved models (exported on each eval) before deleting'
' previous exports.')
@memoize.MemoizedFunction
def read_patch_dimensions():
"""Reads the dimensions of the input patches from disk.
Parses the first example in the training set, which must have "height" and
"width" features.
Returns:
Tuple of (height, width) read from disk, using the glob passed to
--train_input_patches.
"""
for filename in file_io.get_matching_files(FLAGS.train_input_patches):
# If one matching file is empty, go on to the next file.
for record in tf_record.tf_record_iterator(filename):
example = tf.train.Example.FromString(record)
# Convert long (int64) to int, necessary for use in feature columns in
# Python 2.
patch_height = int(example.features.feature['height'].int64_list.value[0])
patch_width = int(example.features.feature['width'].int64_list.value[0])
return patch_height, patch_width
def input_fn(input_patches):
"""Defines the estimator input function.
Args:
input_patches: The input patches TFRecords pattern.
Returns:
A callable. Each invocation returns a tuple containing:
* A dict with a single key 'patch', and the patch tensor as a value.
* A scalar tensor with the patch label, as an integer.
"""
patch_height, patch_width = read_patch_dimensions()
dataset = tf.data.TFRecordDataset(file_io.get_matching_files(input_patches))
def parser(record):
"""Dataset parser function.
Args:
record: A single serialized Example proto tensor.
Returns:
A tuple of:
* A dict of features ('patch' and 'weight')
* A label tensor (int64 scalar).
"""
feature_types = {
'patch':
tf.FixedLenFeature((patch_height, patch_width), tf.float32),
'label':
tf.FixedLenFeature((), tf.int64),
}
if FLAGS.use_included_label_weight:
feature_types['label_weight'] = tf.FixedLenFeature((), tf.float32)
features = tf.parse_single_example(record, feature_types)
label = features['label']
weight = label_weights.weights_from_labels(label)
if FLAGS.use_included_label_weight:
# Both operands must be the same type (float32).
weight = tf.to_float(weight) * tf.to_float(features['label_weight'])
patch = _augment(features['patch'])
return {'patch': patch, WEIGHT_COLUMN_NAME: weight}, label
return batches.get_batched_tensor(dataset.map(parser))
def _augment(patch):
"""Performs multiple augmentations on the patch, helping to generalize."""
return _augment_rotation(_augment_shift(patch))
def _augment_shift(patch):
"""Augments the patch by possibly shifting it 1 pixel horizontally."""
with tf.name_scope('augment_shift'):
rand = tf.random_uniform(())
def shift_left():
return _shift_left(patch)
def shift_right():
return _shift_right(patch)
def identity():
return patch
shift_prob = min(1., FLAGS.augmentation_x_shift_probability)
return tf.cond(rand < shift_prob / 2,
shift_left,
lambda: tf.cond(rand < shift_prob, shift_right, identity))
def _shift_left(patch):
patch = tf.convert_to_tensor(patch)
return tf.concat([patch[:, 1:], patch[:, -1:]], axis=1)
def _shift_right(patch):
patch = tf.convert_to_tensor(patch)
return tf.concat([patch[:, :1], patch[:, :-1]], axis=1)
def _augment_rotation(patch):
"""Augments the patch by rotating it by a small amount."""
max_rotation_radians = math.radians(FLAGS.augmentation_max_rotation_degrees)
rotation = tf.random_uniform(
(), minval=-max_rotation_radians, maxval=max_rotation_radians)
# Background is white (1.0) but tf.contrib.image.rotate currently always fills
# the edges with black (0). Invert the patch before rotating.
return 1. - tf.contrib.image.rotate(
1. - patch, rotation, interpolation='BILINEAR')
def serving_fn():
"""Returns the ServingInputReceiver for the exported model.
Returns:
A ServingInputReceiver object which may be passed to
`Estimator.export_savedmodel`. A model saved using this receiver may be used
for running OMR.
"""
examples = tf.placeholder(tf.string, shape=[None])
patch_height, patch_width = read_patch_dimensions()
parsed = tf.parse_example(examples, {
'patch': tf.FixedLenFeature((patch_height, patch_width), tf.float32),
})
return tf.estimator.export.ServingInputReceiver(
features={'patch': parsed['patch']},
receiver_tensors=parsed['patch'],
receiver_tensors_alternatives={
'example': examples,
'patch': parsed['patch']
})
def create_patch_feature_column():
return tf.feature_column.numeric_column(
'patch', shape=read_patch_dimensions())
def train_and_evaluate(estimator):
tf.estimator.train_and_evaluate(
estimator,
tf.estimator.TrainSpec(
input_fn=lambda: input_fn(FLAGS.train_input_patches),
max_steps=FLAGS.train_max_steps),
tf.estimator.EvalSpec(
input_fn=lambda: input_fn(FLAGS.eval_input_patches),
start_delay_secs=0,
throttle_secs=FLAGS.eval_throttle_secs,
steps=FLAGS.eval_steps,
exporters=[
tf.estimator.LatestExporter(
'exporter', serving_fn,
exports_to_keep=FLAGS.exports_to_keep),
]))
|
[
"[email protected]"
] | |
04b61e88739ffadc8d675c0b4f576b5f7122eb69
|
576cc83449e10fd3f98281970c46016ea7a5aea2
|
/OpenCV相机标定/CalibrationTest.py
|
658370ca975416804ff63dff37187d3bdaa30be3
|
[] |
no_license
|
HotView/PycharmProjects
|
215ab9edd341e3293daebcf86d97537f8cd28d75
|
61393fe5ba781a8c1216a5cbe7e0d06149a10190
|
refs/heads/master
| 2020-06-02T07:41:53.608742 | 2019-11-13T08:31:57 | 2019-11-13T08:31:57 | 191,085,178 | 3 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,778 |
py
|
import cv2
import numpy as np
import glob
# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS+cv2.TERM_CRITERIA_MAX_ITER,30,0.001)
# prepare objects points,like(0,0,0),(1,0,0),(2,0,0),....,(6,5,0)
objp = np.zeros((6*7,3),np.float32)
objp[:,:2] = np.mgrid[0:7,0:6].T.reshape(-1,2)
# Arrays to store object points and points from all the images.
objpoints = []
imgpoints = []
images = glob.glob('image/*.jpg')
for fname in images:
img = cv2.imread(fname)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
#Find the chess board coners,按照行或者列连接,(7,6)就是7个连成一条线
ret,corners = cv2.findChessboardCorners(gray,(7,6),None)
#如果找出了角点,添加对象点和图像点
if ret:
objpoints.append(objp)
corners2 = cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)
print("corners2",corners2)
imgpoints.append(corners2)
#绘制和展示角点,按照颜色来进行划分(7,6),6种颜色
#橙红色的为0号点,蓝色的为最后的点集
img = cv2.drawChessboardCorners(img,(7,6),corners2,ret)
for i,p in enumerate(corners2):
x = int(p[0][0])
y = int(p[0][1])
cv2.putText(img,str(i),(x,y),cv2.FONT_HERSHEY_SIMPLEX,0.5,(255,255,255),2)
cv2.imshow(fname,img)
rmse, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
h,w = gray.shape[:2]
imgsize = (w,h)
mtx2, roi=cv2.getOptimalNewCameraMatrix(mtx,dist,imgsize,alpha=0,
centerPrincipalPoint=True)
print("#######")
print(dist)
#np.savez("pose",mtx = mtx,dist = dist,newmtx = mtx2)
print(mtx,dist,mtx2)
with open('pose.py', 'wb') as fout:
fout.write(b'import numpy as np\n')
fout.write(b'\n')
fout.write(b'rmse = %r\n' % rmse)
fout.write(b'distortion_coefficients = np.array(%r, dtype=np.float32)\n'
% dist.tolist())
fout.write(b'raw_camera_matrix = np.array(%r, dtype=np.float32)\n'
% mtx.tolist())
fout.write(b'undistort_camera_matrix = np.array(%r, dtype=np.float32)\n'
% mtx2.tolist())
fout.write(b'roi = %d, %d, %d, %d\n'% roi)
fout.write(b'image_size = %d, %d\n' % imgsize)
print(roi)
print("----------------")
print(ret)
print("-----------")
print(mtx)
print("-----------")
matinv = np.linalg.inv(mtx)
print(matinv)
print("################################")
print(np.dot(mtx,matinv))
mean_error = 0
for i in range(len(objpoints)):
imgpoints2, _ = cv2.projectPoints(objpoints[i], rvecs[i], tvecs[i], mtx, dist)
error = cv2.norm(imgpoints[i],imgpoints2, cv2.NORM_L2)/len(imgpoints2)
mean_error += error
print ("total error: ", mean_error/len(objpoints))
cv2.waitKey(0)
|
[
"[email protected]"
] | |
b30f1b39fb3a2a1a6d9299203f6c492cd0e9aa87
|
a7ca0a372a44bc9cee59a7e1e59734a4814a59b9
|
/이것이코딩테스트다/병사배치하기.py
|
8b38d29927c9fad4814ed5bac88c39daec9c4d28
|
[] |
no_license
|
schw240/Preparing-coding-test
|
435d6dbdcf90fc8c0c408dfa032ad7f09fdc5a90
|
758a41270c409312a998152c5298369ec385bfdb
|
refs/heads/master
| 2021-11-29T07:54:05.140178 | 2021-10-03T11:40:36 | 2021-10-03T11:40:36 | 245,345,693 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 510 |
py
|
N = int(input())
scores = list(map(int, input().split(' ')))
scores.reverse()
# 병사를 배치할 때 전투력이 높은 병사가 앞으로 오도록 내림차순 배치
# 이때 정렬을 쓰는게 아니라 열외를 쓰는 방법으로 배치
dp = [1] * N
# 남아있는 병사의 수가 최대가 되도록 하기 위해 열외시켜야 하는 병사의 수
for i in range(1, N):
for j in range(i):
if scores[j] < scores[i]:
dp[i] = max(dp[i], dp[j] + 1)
print(N - max(dp))
|
[
"[email protected]"
] | |
a52dfaac37bcd5fa128652b84d5b4b9904f40414
|
1974b3e9c5f2f677833e1608a41281f377fd331c
|
/dltesthttp_xuyalin2/www/testcase/dlmall/ts_couponx/getUseFulCoupons.py
|
e7f7c5c587deb28ea48f702a15f9161b6113a024
|
[] |
no_license
|
xyl00755/pythonLearning
|
ed0f540b61247c3560f347853da5886b2e2ba25d
|
c6aecff86ff34dcd7358d98201627ff84e9bf2cf
|
refs/heads/master
| 2021-01-13T08:19:25.171016 | 2016-12-16T05:43:10 | 2016-12-16T05:43:10 | 71,764,553 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,696 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from www.api.dlmall import *
from www.common.excel import *
from www.common.database import *
"""
/couponx/getUseFulCoupons.html
request:
get
http://123.57.244.205:9003/couponx/getUseFulCoupons.html?goodsId=f4bccbbd84e44f9ba839e082970dccca%2C9c02bcf9737d499a8a72a3514730b425%2C24df3ab5b628496aa0fdded6c4230fec%2C2e78061ea75b26638ef38d1b0c848cbb%2Cbc54482ad9a44f3d95b63d9876bc3100%2C&t=1474440394741
response:json string
{
"status": 0,
"data": {
"unUseFulCoupons": [
{
"id": null,
"couponEntityId": "114512879800028",
"couponName": "6元优惠券验证",
"couponAmt": 600,
"couponEntityStatus": "01",
"effectiveTime": "2016-09-21",
"uneffectiveTime": "2016-10-21",
"category": "白酒",
"expireTime": null,
"useTime": null,
"records": null
}
],
"useFulCoupons": [
{
"id": null,
"couponEntityId": "114512879800028",
"couponName": "6元优惠券验证",
"couponAmt": 600,
"couponEntityStatus": "01",
"effectiveTime": "2016-09-21",
"uneffectiveTime": "2016-10-21",
"category": "白酒",
"expireTime": null,
"useTime": null,
"records": null
}
]
},
"msg": ""
}
"""
class getUseFulCoupons(unittest.TestCase):
UserShop = eData('WebManager')
danluCouponsInfo=eData('DanluCoupons')
dlservice = dlmall()
s = dlservice.login(UserShop.buyer_username,UserShop.buyer_password)
#四种红包1 过期时间最长 2 两个过期时间一样但是一个金额大一个金额小 3 过期时间最短 检查返回值排序
def test_Coupons_sort(self):
data =[self.danluConponsInfo.goodsId1,self.danluConponsInfo.goodsId2,self.danluConponsInfo.goodsId3,self.danluConponsInfo.goodsId4]
a =','
a.join(data)
couponlist= self.dlservice.getUseFulCoupons(s,data)
self.assertEqual(couponlist['data']['UseFulCoupons'][0]['couponEntityId'],self.danluCouponsInfo.couponEntityId4)
self.assertEqual(couponlist['data']['UseFulCoupons'][1]['couponEntityId'],self.danluCouponsInfo.couponEntityId3)
self.assertEqual(couponlist['data']['UseFulCoupons'][2]['couponEntityId'],self.danluCouponsInfo.couponEntityId2)
self.assertEqual(couponlist['data']['UseFulCoupons'][3]['couponEntityId'],self.danluCouponsInfo.couponEntityId1)
|
[
"[email protected]"
] | |
b9bcf064e318743a5c5030ddf2e243fa9c742794
|
8537ecfe2a23cfee7c9f86e2318501f745078d67
|
/Practise_stuff/matplotlib/click_on_point_to_see_timeseries.py
|
d68c313bc04fa4ac8a3c2008391627668a605bd3
|
[] |
no_license
|
oolsson/oo_eclipse
|
91d33501d9ed6c6b3c51bb22b635eb75da88e4e1
|
1828866bc4e1f67b279c5a037e4a6a4439ddb090
|
refs/heads/master
| 2021-01-01T20:17:12.644890 | 2015-11-30T09:49:41 | 2015-11-30T09:49:41 | 23,485,434 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,316 |
py
|
import numpy as np
class PointBrowser:
"""
Click on a point to select and highlight it -- the data that
generated the point will be shown in the lower axes. Use the 'n'
and 'p' keys to browse through the next and previous points
"""
def __init__(self):
self.lastind = 0
self.text = ax.text(0.05, 0.95, 'selected: none',
transform=ax.transAxes, va='top')
self.selected, = ax.plot([xs[0]], [ys[0]], 'o', ms=12, alpha=0.4,
color='yellow', visible=False)
def onpress(self, event):
if self.lastind is None: return
if event.key not in ('n', 'p'): return
if event.key=='n': inc = 1
else: inc = -1
self.lastind += inc
self.lastind = np.clip(self.lastind, 0, len(xs)-1)
self.update()
def onpick(self, event):
if event.artist!=line: return True
N = len(event.ind)
if not N: return True
# the click locations
x = event.mouseevent.xdata
y = event.mouseevent.ydata
distances = np.hypot(x-xs[event.ind], y-ys[event.ind])
indmin = distances.argmin()
dataind = event.ind[indmin]
self.lastind = dataind
self.update()
def update(self):
if self.lastind is None: return
dataind = self.lastind
ax2.cla()
ax2.plot(X[dataind])
ax2.text(0.05, 0.9, 'mu=%1.3f\nsigma=%1.3f'%(xs[dataind], ys[dataind]),
transform=ax2.transAxes, va='top')
ax2.set_ylim(-0.5, 1.5)
self.selected.set_visible(True)
self.selected.set_data(xs[dataind], ys[dataind])
self.text.set_text('selected: %d'%dataind)
fig.canvas.draw()
if __name__ == '__main__':
import matplotlib.pyplot as plt
X = np.random.rand(100, 200)
xs = np.mean(X, axis=1)
ys = np.std(X, axis=1)
fig, (ax, ax2) = plt.subplots(2, 1)
ax.set_title('click on point to plot time series')
line, = ax.plot(xs, ys, 'o', picker=5) # 5 points tolerance
browser = PointBrowser()
fig.canvas.mpl_connect('pick_event', browser.onpick)
fig.canvas.mpl_connect('key_press_event', browser.onpress)
plt.show()
|
[
"[email protected]"
] | |
7851381b34746f3487ce259477ca9681dcb2349a
|
32cfd6a8df9b24059ed7bee0b7bf99b6c0268f6e
|
/framework/seocortex/utils/soupselect_old.py
|
2498bc8d37f106ed475746dca6bdd246a3d6be44
|
[] |
no_license
|
blorenz/seocortex
|
5cd7acb647fbc4908e6045d2a89bdd2ade922434
|
3f1f7e8ac4a12e24e7f2cb58407ce52babfe5cf8
|
refs/heads/master
| 2016-09-05T21:36:01.039128 | 2012-04-23T13:33:46 | 2012-04-23T13:33:46 | 3,951,299 | 0 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,621 |
py
|
# -*-coding:Utf-8 -*
# Copyright 2012 - Peoplze.com <[email protected]>
# Python imports
import re
def attrInNode(node,atr):
for k,val in node.attrs:
if k == atr:
return True
return False
def htmlFind(node,selector,n,defaut=""):
l = list(node.findSelect(selector))
if len(l) > n:
return l[n].text
else:
return defaut
tag_re = re.compile('^[a-z0-9]+$')
attribselect_re = re.compile(
r'^(?P<tag>\w+)?\[(?P<attribute>\w+)(?P<operator>[=~\|\^\$\*]?)' +
r'=?"?(?P<value>[^\]"]*)"?\]$'
)
# /^(\w+)\[(\w+)([=~\|\^\$\*]?)=?"?([^\]"]*)"?\]$/
# \---/ \---/\-------------/ \-------/
# | | | |
# | | | The value
# | | ~,|,^,$,* or =
# | Attribute
# Tag
def attribute_checker(operator, attribute, value=''):
"""
Takes an operator, attribute and optional value; returns a function that
will return True for elements that match that combination.
"""
return {
'=': lambda el: el.get(attribute) == value,
# attribute includes value as one of a set of space separated tokens
'~': lambda el: value in el.get(attribute, '').split(),
# attribute starts with value
'^': lambda el: el.get(attribute, '').startswith(value),
# attribute ends with value
'$': lambda el: el.get(attribute, '').endswith(value),
# attribute contains value
'*': lambda el: value in el.get(attribute, ''),
# attribute is either exactly value or starts with value-
'|': lambda el: el.get(attribute, '') == value \
or el.get(attribute, '').startswith('%s-' % value),
}.get(operator, lambda el: el.has_key(attribute))
def has_attr(soup, attr_name):
return attr_name in soup._getAttrMap()
def select(soup, selector):
"""
soup should be a BeautifulSoup instance; selector is a CSS selector
specifying the elements you want to retrieve.
"""
tokens = selector.split()
current_context = [soup]
for token in tokens:
m = attribselect_re.match(token)
if m:
# Attribute selector
tag, attribute, operator, value = m.groups()
if not tag:
tag = True
checker = attribute_checker(operator, attribute, value)
found = []
for context in current_context:
found.extend([el for el in context.findAll(tag) if checker(el)])
current_context = found
continue
if '#' in token:
# ID selector
tag, id = token.split('#', 1)
if not tag:
tag = True
el = current_context[0].find(tag, {'id': id})
if not el:
return [] # No match
current_context = [el]
continue
if '.' in token:
# Class selector
tag, klass = token.split('.', 1)
if not tag:
tag = True
classes = set(klass.split('.'))
found = []
for context in current_context:
found.extend(
context.findAll(tag,
{'class': lambda attr:
attr and classes.issubset(attr.split())}
)
)
current_context = found
continue
if token == '*':
# Star selector
found = []
for context in current_context:
found.extend(context.findAll(True))
current_context = found
continue
# Here we should just have a regular tag
if not tag_re.match(token):
return []
found = []
for context in current_context:
found.extend(context.findAll(token))
current_context = found
return current_context
def monkeypatch(BeautifulSoupClass=None):
"""
If you don't explicitly state the class to patch, defaults to the most
common import location for BeautifulSoup.
"""
if not BeautifulSoupClass:
# Je patch Tag, parce que c'est plus pratique a mon avis
from BeautifulSoup import Tag as BeautifulSoupClass
BeautifulSoupClass.findSelect = select
BeautifulSoupClass.has_attr = has_attr
def unmonkeypatch(BeautifulSoupClass=None):
if not BeautifulSoupClass:
from BeautifulSoup import Tag as BeautifulSoupClass
delattr(BeautifulSoupClass, 'findSelect')
delattr(BeautifulSoupClass, 'has_attr')
# Monkeypatch on import
monkeypatch()
|
[
"[email protected]"
] | |
e68e5d91b7780d3fad236dfa0ad58ca34d4e4f9e
|
8b3ca44ee3d990233e74655b7131d616094f70c2
|
/experiments/cross_validation/movielens_100K/poisson_gamma_gamma.py
|
cc0781ad2618e0f9db05184959fc4e28140035a0
|
[] |
no_license
|
zshwuhan/BMF_Priors
|
8b8c54271285a72d2085a56a9475c0756f375e67
|
6a600da1c41f1ccde2f2ba99298b40e68fb9910a
|
refs/heads/master
| 2021-05-13T19:10:07.203215 | 2017-12-01T13:30:21 | 2017-12-01T13:30:21 | 116,883,181 | 1 | 0 | null | 2018-01-09T23:36:13 | 2018-01-09T23:36:13 | null |
UTF-8
|
Python
| false | false | 1,609 |
py
|
'''
Run nested cross-validation experiment on the MovieLens 100K dataset, with
Poisson likelihood, Gamma priors, and Gamma hierarchical priors.
'''
project_location = "/Users/thomasbrouwer/Documents/Projects/libraries/"
import sys
sys.path.append(project_location)
from BMF_Priors.code.models.bmf_poisson_gamma_gamma import BMF_Poisson_Gamma_Gamma
from BMF_Priors.code.cross_validation.nested_matrix_cross_validation import MatrixNestedCrossValidation
from BMF_Priors.data.movielens.load_data import load_movielens_100K
''' Settings BMF model. '''
method = BMF_Poisson_Gamma_Gamma
R, M = load_movielens_100K()
hyperparameters = { 'a':1., 'ap':1., 'bp':1. }
train_config = {
'iterations' : 200,
'init' : 'random',
}
predict_config = {
'burn_in' : 180,
'thinning' : 1,
}
''' Settings nested cross-validation. '''
K_range = [1,2,3]
no_folds = 5
no_threads = 5
parallel = False
folder_results = './results/poisson_gamma_gamma/'
output_file = folder_results+'results.txt'
files_nested_performances = [folder_results+'fold_%s.txt'%(fold+1) for fold in range(no_folds)]
''' Construct the parameter search. '''
parameter_search = [{'K':K, 'hyperparameters':hyperparameters} for K in K_range]
''' Run the cross-validation framework. '''
nested_crossval = MatrixNestedCrossValidation(
method=method,
R=R,
M=M,
K=no_folds,
P=no_threads,
parameter_search=parameter_search,
train_config=train_config,
predict_config=predict_config,
file_performance=output_file,
files_nested_performances=files_nested_performances,
)
nested_crossval.run(parallel=parallel)
|
[
"[email protected]"
] | |
f8dcb79496b226693eb440a4735a89a8bb445860
|
684b61f3405ed01b4184b222da342bd1533e4b90
|
/shop/migrations/0002_auto_20200406_1505.py
|
17c49a7cd1d4a26822a3030c54e37da8409a58fd
|
[] |
no_license
|
Mubashar-javed/myshop
|
6379d2568e969db9f8dc30354966d4054463959f
|
0248c2cb6e26500b5fd1404dad45b14ebf1092ac
|
refs/heads/master
| 2023-05-26T08:01:09.067626 | 2022-12-08T09:36:10 | 2022-12-08T09:36:10 | 254,266,547 | 0 | 0 | null | 2023-05-23T01:08:14 | 2020-04-09T03:57:41 |
Python
|
UTF-8
|
Python
| false | false | 353 |
py
|
# Generated by Django 2.2.5 on 2020-04-06 10:05
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('shop', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='product',
old_name='cereated',
new_name='created',
),
]
|
[
"[email protected]"
] | |
aed11a2c4d13c0d479bf8465197e8185bcd75c06
|
14f0c423c109a75a8cbd10ca8c526f1482e3e385
|
/Python/Django/multuple_apps/apps/blogs/views.py
|
56e65b2b27072046cd5e9fc24cc6c724cda4ae09
|
[] |
no_license
|
thisolivier/dojo-master
|
21fd5d13e25321cce0558cab3b0c0335774e173c
|
9486f6b2af2148a296e2a238eee2b814fe0831fe
|
refs/heads/master
| 2021-01-01T06:47:07.162851 | 2017-09-16T00:54:05 | 2017-09-16T00:54:05 | 97,511,225 | 0 | 0 | null | 2017-09-16T00:42:26 | 2017-07-17T18:51:47 |
Python
|
UTF-8
|
Python
| false | false | 734 |
py
|
from django.shortcuts import render
# Create your views here.
def blog_root(request):
print "---> Generating root template"
return render(request, 'blogs/index.html')
def blog_new(request):
print "---> Generating new blog template"
return render(request, 'blogs/index.html')
def blog_create(request):
print "---> Generating create blog template"
return render(request, 'blogs/index.html')
def blog_num(request, number):
print "---> Generating create blog number {}".format(number)
return render(request, 'blogs/index.html')
def blog_modify(request, number, action):
print "---> Generating {}ing template for blog number {}".format(action, number)
return render(request, 'blogs/index.html')
|
[
"[email protected]"
] | |
f1010cdff0106ff59ffa22b5b5d3ee835bf5829f
|
fcc88521f63a3c22c81a9242ae3b203f2ea888fd
|
/Python3/0689-Maximum-Sum-of-3-Non-Overlapping-Subarrays/soln-1.py
|
f62c0aa991d5e1a4c2f04b46e6f6e54a54e99d0f
|
[
"MIT"
] |
permissive
|
wyaadarsh/LeetCode-Solutions
|
b5963e3427aa547d485d3a2cb24e6cedc72804fd
|
3719f5cb059eefd66b83eb8ae990652f4b7fd124
|
refs/heads/master
| 2022-12-06T15:50:37.930987 | 2020-08-30T15:49:27 | 2020-08-30T15:49:27 | 291,811,790 | 0 | 1 |
MIT
| 2020-08-31T19:57:35 | 2020-08-31T19:57:34 | null |
UTF-8
|
Python
| false | false | 996 |
py
|
class Solution:
def maxSumOfThreeSubarrays(self, nums: List[int], k: int) -> List[int]:
idx1, idx2, idx3 = 0, k, k * 2
s1, s2, s3 = sum(nums[idx1:idx1 + k]), sum(nums[idx2:idx2 + k]), sum(nums[idx3:idx3 + k])
bests1, bests12, bests123 = s1, s1 + s2, s1 + s2 + s3
besti1 = 0
besti12 = [idx1, idx2]
besti123 = [idx1, idx2, idx3]
n = len(nums)
while idx3 + k < n:
s1 += nums[idx1 + k] - nums[idx1]
s2 += nums[idx2 + k] - nums[idx2]
s3 += nums[idx3 + k] - nums[idx3]
if s1 > bests1:
bests1 = s1
besti1 = idx1 + 1
if bests1 + s2 > bests12:
bests12 = bests1 + s2
besti12 = [besti1, idx2 + 1]
if bests12 + s3 > bests123:
bests123 = bests12 + s3
besti123 = besti12 + [idx3 + 1]
idx1 += 1
idx2 += 1
idx3 += 1
return besti123
|
[
"[email protected]"
] | |
94458562cd7bf5f91df8d4257fc676713c9cdb93
|
55c250525bd7198ac905b1f2f86d16a44f73e03a
|
/Python/Games/Python wow/tests/models/spells/test_buff_schema.py
|
86c88aee46e4d4ca161e2602ae7dee3c807b81df
|
[] |
no_license
|
NateWeiler/Resources
|
213d18ba86f7cc9d845741b8571b9e2c2c6be916
|
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
|
refs/heads/master
| 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null |
UTF-8
|
Python
| false | false | 129 |
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:e63d9f081f022098267a17e7874b4d2fd1dcffedf91d27fcc4a3820700084f41
size 1940
|
[
"[email protected]"
] | |
6c01e0c87f1231c1a696e1b6cbdbd868b04e2a06
|
011a750fae8ade67f726a9749e05cc4afb8e360d
|
/text_file_analyser/test/main.py
|
141c4ce62d4e23028bccb531ba11cf934d5e6550
|
[] |
no_license
|
MartinCarufel/PycharmProjects
|
c7e50b66a24d4a216b7a217192fcd446f5324d9f
|
aaa6c95b3e2e6525586fb6a03d1c9d484065899b
|
refs/heads/master
| 2023-07-05T22:40:38.650892 | 2023-06-26T13:55:13 | 2023-06-26T13:55:13 | 150,859,642 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 648 |
py
|
from text_file_analyser import Text_File_analyser
import pandas as pd
def main():
tf = Text_File_analyser("usb_stress_testIO-04-000979.log")
csv_data = tf.data_spliter("\| Date :", " +")
df = pd.DataFrame(csv_data)
df = df[[3, 4]]
print(df)
def main2():
# Creating the dataframe
df = pd.DataFrame({"A": [12, 4, 5, None, 1],
"B": [7, 2, 54, 3, None],
"C": [20, 16, 11, 3, 8],
"D": [14, 3, None, 2, 6]})
print(df)
# skip the Na values while finding the maximum
print(df.max(axis=1, skipna=True))
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
f0438fb43a911298fba48e71620bc3f5ff15ba8b
|
c16ab2c9c675bdbca43a4603a14106790d9e7da2
|
/lib/gae_mini_profiler/appstats_profiler.py
|
d1e30d4d9a70e975ccedc29a5068b4e0987559b9
|
[
"MIT"
] |
permissive
|
y2bishop2y/microengine
|
7e7e8b5852188fcceb9559f9d8d339bf6257a0d7
|
2322fdce0718a06bdc0332682e8ef9e393f8e7af
|
refs/heads/master
| 2021-01-01T18:38:17.250888 | 2013-03-20T08:21:53 | 2013-03-20T08:22:27 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,136 |
py
|
"""RPC profiler that uses appstats to track, time, and log all RPC events.
This is just a simple wrapper for appstats with result formatting. See
https://developers.google.com/appengine/docs/python/tools/appstats for more.
"""
import logging
from pprint import pformat
from google.appengine.ext.appstats import recording
from lib.gae_mini_profiler import util, cleanup, unformatter
class Profile(object):
"""Profiler that wraps appstats for programmatic access and reporting."""
def __init__(self):
# Configure AppStats output, keeping a high level of request
# content so we can detect dupe RPCs more accurately
recording.config.MAX_REPR = 750
# Each request has its own internal appstats recorder
self.recorder = None
def results(self):
"""Return appstats results in a dictionary for template context."""
if not self.recorder:
# If appstats fails to initialize for any reason, return an empty
# set of results.
logging.warn("Missing recorder for appstats profiler.")
return {
"calls": [],
"total_time": 0,
}
total_call_count = 0
total_time = 0
calls = []
service_totals_dict = {}
likely_dupes = False
end_offset_last = 0
requests_set = set()
appstats_key = long(self.recorder.start_timestamp * 1000)
for trace in self.recorder.traces:
total_call_count += 1
total_time += trace.duration_milliseconds()
# Don't accumulate total RPC time for traces that overlap asynchronously
if trace.start_offset_milliseconds() < end_offset_last:
total_time -= (end_offset_last - trace.start_offset_milliseconds())
end_offset_last = trace.start_offset_milliseconds() + trace.duration_milliseconds()
service_prefix = trace.service_call_name()
if "." in service_prefix:
service_prefix = service_prefix[:service_prefix.find(".")]
if service_prefix not in service_totals_dict:
service_totals_dict[service_prefix] = {
"total_call_count": 0,
"total_time": 0,
"total_misses": 0,
}
service_totals_dict[service_prefix]["total_call_count"] += 1
service_totals_dict[service_prefix]["total_time"] += trace.duration_milliseconds()
stack_frames_desc = []
for frame in trace.call_stack_list():
stack_frames_desc.append("%s:%s %s" %
(util.short_rpc_file_fmt(frame.class_or_file_name()),
frame.line_number(),
frame.function_name()))
request = trace.request_data_summary()
response = trace.response_data_summary()
likely_dupe = request in requests_set
likely_dupes = likely_dupes or likely_dupe
requests_set.add(request)
request_short = request_pretty = None
response_short = response_pretty = None
miss = 0
try:
request_object = unformatter.unformat(request)
response_object = unformatter.unformat(response)
request_short, response_short, miss = cleanup.cleanup(request_object, response_object)
request_pretty = pformat(request_object)
response_pretty = pformat(response_object)
except Exception, e:
logging.warning("Prettifying RPC calls failed.\n%s\nRequest: %s\nResponse: %s",
e, request, response, exc_info=True)
service_totals_dict[service_prefix]["total_misses"] += miss
calls.append({
"service": trace.service_call_name(),
"start_offset": util.milliseconds_fmt(trace.start_offset_milliseconds()),
"total_time": util.milliseconds_fmt(trace.duration_milliseconds()),
"request": request_pretty or request,
"response": response_pretty or response,
"request_short": request_short or cleanup.truncate(request),
"response_short": response_short or cleanup.truncate(response),
"stack_frames_desc": stack_frames_desc,
"likely_dupe": likely_dupe,
})
service_totals = []
for service_prefix in service_totals_dict:
service_totals.append({
"service_prefix": service_prefix,
"total_call_count": service_totals_dict[service_prefix]["total_call_count"],
"total_misses": service_totals_dict[service_prefix]["total_misses"],
"total_time": util.milliseconds_fmt(service_totals_dict[service_prefix]["total_time"]),
})
service_totals = sorted(service_totals, reverse=True, key=lambda service_total: float(service_total["total_time"]))
return {
"total_call_count": total_call_count,
"total_time": util.milliseconds_fmt(total_time),
"calls": calls,
"service_totals": service_totals,
"likely_dupes": likely_dupes,
"appstats_key": appstats_key,
}
def wrap(self, app):
"""Wrap and return a WSGI application with appstats recording enabled.
Args:
app: existing WSGI application to be wrapped
Returns:
new WSGI application that will run the original app with appstats
enabled.
"""
def wrapped_appstats_app(environ, start_response):
# Use this wrapper to grab the app stats recorder for RequestStats.save()
if recording.recorder_proxy.has_recorder_for_current_request():
self.recorder = recording.recorder_proxy.get_for_current_request()
return app(environ, start_response)
return recording.appstats_wsgi_middleware(wrapped_appstats_app)
|
[
"[email protected]"
] | |
9471cea9b5d59083fe068b87504590f4027f45eb
|
ad8b30544480ba1e5f5b1cb2dec2aa77a644e8d2
|
/BOJ/1238_파티.py
|
47e4a42bd4b524d433bb52d123cba305548dc8c0
|
[] |
no_license
|
hyunwoojeong123/Algorithm
|
79abc82d944ca60342a7f8b6fc44fac20ac55123
|
0baaf3222fbbec699ffbec5d4cc680067cf293fb
|
refs/heads/master
| 2023-07-10T18:28:51.934005 | 2021-08-18T01:51:23 | 2021-08-18T01:51:23 | 284,403,698 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,098 |
py
|
import sys,heapq
INF = sys.maxsize
input = sys.stdin.readline
def dij(x):
# d 배열을 INF 로 전부 준다
d = [INF]*n
# heap에다가 [0, 출발점] 을 넣는다.
heapq.heappush(heap, [0,x])
# d[출발점] = 0
d[x] = 0
# heap 다 빌 때까지 반복
while heap:
# w,x 는 현 위치까지의 거리와 현 위치
w,x = heapq.heappop(heap)
# nw,nx 는 x에서 nx까지 거리, x와 연결된 애
for nw,nx in a[x]:
# nw 에 w를 더해줌 : 출발점에서 nx 까지 거리
nw += w
# 이게 기존에 기록해둔 값보다 작으면
if nw < d[nx]:
# 거리 갱신하고 heap에다가 걔네 넣음.
d[nx] = nw
heapq.heappush(heap,[nw,nx])
return d
n,m,t = map(int, input().split())
a = [[]*n for _ in range(n)]
heap = []
for i in range(m):
x,y,w = map(int, input().split())
a[x-1].append([w,y-1])
ans = [0]*n
for i in range(n):
d = dij(i)
ans[i] += d[t-1]
d = dij(t-1)
ans[i] -= d[i]
print(max(ans))
|
[
"[email protected]"
] | |
ecb797705d4380b014ac224de86a2b3ca7fbe0de
|
029b18378b54856f6982cf3a73982b5285c2ff57
|
/assignment1/cs231n/classifiers/linear_classifier.py
|
22b624caa7e1dbd171409817f28da4d614335f49
|
[] |
no_license
|
Allensmile/cs231n_Convolutional-Neural-Networks-for-Visual-Recognition
|
15f07693757a439776e7da22f2ac4e2cf6f78611
|
bbae799b71c533ffb52ff9248ce9c92cfa76be6e
|
refs/heads/cs231n-0821
| 2021-01-01T19:05:11.608175 | 2016-08-22T04:39:20 | 2016-08-22T04:39:20 | 98,504,340 | 1 | 0 | null | 2017-07-27T07:01:01 | 2017-07-27T07:01:01 | null |
UTF-8
|
Python
| false | false | 6,325 |
py
|
import numpy as np
from cs231n.classifiers.linear_svm import *
from cs231n.classifiers.softmax import *
class LinearClassifier(object):
def __init__(self):
self.W = None
def train(self, X, y, learning_rate=1e-3, reg=1e-5, num_iters=100,batch_size=200, verbose=False):
"""
Train this linear classifier using stochastic gradient descent.
Inputs:
- X: A numpy array of shape (N, D) containing training data; there are N
training samples each of dimension D.
- y: A numpy array of shape (N,) containing training labels; y[i] = c
means that X[i] has label 0 <= c < C for C classes.
- learning_rate: (float) learning rate for optimization.
- reg: (float) regularization strength.
- num_iters: (integer) number of steps to take when optimizing
- batch_size: (integer) number of training examples to use at each step.
- verbose: (boolean) If true, print progress during optimization.
Outputs:
A list containing the value of the loss function at each training iteration.
"""
num_train, dim = X.shape
num_classes = np.max(y) + 1 # assume y takes values 0...K-1 where K is number of classes
#if self.W is None:
# lazily initialize W---->always initialize)
self.W = 0.001 * np.random.randn(dim, num_classes)*np.sqrt(dim/2.0)
# Run stochastic gradient descent to optimize W
loss_history = []
#Try using momentum update
v=0 #init to zero
mu=0.5 #int to 0.5, and increase it later.
for it in xrange(num_iters):
if num_iters%100==0:
mu+=0.05
#if num_iters>=1500:
# learning_rate*=0.7
if mu>=0.99:
mu=0.99
X_batch = None
y_batch = None
#########################################################################
# TODO: #
# Sample batch_size elements from the training data and their #
# corresponding labels to use in this round of gradient descent. #
# Store the data in X_batch and their corresponding labels in #
# y_batch; after sampling X_batch should have shape (dim, batch_size) #
# and y_batch should have shape (batch_size,) #
# #
# Hint: Use np.random.choice to generate indices. Sampling with #
# replacement is faster than sampling without replacement. #
#########################################################################
#pass
#1. get the batch for this iteration.
batch_indices=np.random.choice(num_train,batch_size,replace=True)
X_batch=X[batch_indices] #('X_batch.shape:', (200L, 3073L))
#print("X_batch.shape:",X_batch.shape)
y_batch=y[batch_indices] #('y_batch.shape:', 200)
#print("y_batch.shape:",len(y_batch))
#loss_vectorized, grad_vectorized = svm_loss_vectorized(self.W, X_batch, y_batch, reg)
#self.W+=-learning_rate*grad_vectorized
#########################################################################
# END OF YOUR CODE #
#########################################################################
#2. evaluate loss and gradient
loss, grad = self.loss(X_batch, y_batch, reg)
loss_history.append(loss)
#3. perform parameter update
#########################################################################
# TODO: #
# Update the weights using the gradient and the learning rate. #
#########################################################################
#pass
#self.W-=learning_rate*grad
v=mu*v-learning_rate*grad
self.W+=v
#########################################################################
# END OF YOUR CODE #
#########################################################################
if verbose and it % 100 == 0:
print 'iteration %d / %d: loss %f' % (it, num_iters, loss)
return loss_history
def predict(self, X):
"""
Use the trained weights of this linear classifier to predict labels for
data points.
Inputs:
- X: D x N array of training data. Each column is a D-dimensional point.
Returns:
- y_pred: Predicted labels for the data in X. y_pred is a 1-dimensional
array of length N, and each element is an integer giving the predicted
class.
"""
y_pred = np.zeros(X.shape[1])
###########################################################################
# TODO: #
# Implement this method. Store the predicted labels in y_pred. #
###########################################################################
#pass
scores=X.dot(self.W) #1.get scores
y_pred=np.argmax(scores,axis=1) #2.find the index for highest value in the row
###########################################################################
# END OF YOUR CODE #
###########################################################################
return y_pred
def loss(self, X_batch, y_batch, reg):
"""
Compute the loss function and its derivative.
Subclasses will override this.
Inputs:
- X_batch: A numpy array of shape (N, D) containing a minibatch of N
data points; each point has dimension D.
- y_batch: A numpy array of shape (N,) containing labels for the minibatch.
- reg: (float) regularization strength.
Returns: A tuple containing:
- loss as a single float
- gradient with respect to self.W; an array of the same shape as W
"""
pass
class LinearSVM(LinearClassifier):
""" A subclass that uses the Multiclass SVM loss function """
def loss(self, X_batch, y_batch, reg):
return svm_loss_vectorized(self.W, X_batch, y_batch, reg)
class Softmax(LinearClassifier):
""" A subclass that uses the Softmax + Cross-entropy loss function """
def loss(self, X_batch, y_batch, reg):
return softmax_loss_vectorized(self.W, X_batch, y_batch, reg)
|
[
"[email protected]"
] | |
50587e5954677e11ceae53851f78af9e5bcfa727
|
458ff3c3611bb969f96ff3d3e15108fa9ec88316
|
/quiz/migrations/0004_auto_20201209_2057.py
|
9ddaf47129300087df96cc291ab7fda68b428ff2
|
[] |
no_license
|
mayank5044/Navigus
|
8164809d87c5f3112565549229327ea20d090898
|
aa03a99583efe4b7e9e7d1cb4a450e559f36d475
|
refs/heads/master
| 2023-08-11T01:30:21.115338 | 2021-10-09T06:47:24 | 2021-10-09T06:47:24 | 414,919,603 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 662 |
py
|
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('student', '0002_remove_student_status'),
('quiz', '0003_result'),
]
operations = [
migrations.AlterField(
model_name='result',
name='exam',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='quiz.Course'),
),
migrations.AlterField(
model_name='result',
name='student',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='student.Student'),
),
]
|
[
"[email protected]"
] | |
baed34dff5b6291a245a5b0525a858aeba9dc2b8
|
f82757475ea13965581c2147ff57123b361c5d62
|
/gi-stubs/repository/FwupdPlugin/FirmwareClass.py
|
41cfcb07d1b2a17378eab46a1dbbc60611507f68
|
[] |
no_license
|
ttys3/pygobject-stubs
|
9b15d1b473db06f47e5ffba5ad0a31d6d1becb57
|
d0e6e93399212aada4386d2ce80344eb9a31db48
|
refs/heads/master
| 2022-09-23T12:58:44.526554 | 2020-06-06T04:15:00 | 2020-06-06T04:15:00 | 269,693,287 | 8 | 2 | null | 2020-06-05T15:57:54 | 2020-06-05T15:57:54 | null |
UTF-8
|
Python
| false | false | 5,303 |
py
|
# encoding: utf-8
# module gi.repository.FwupdPlugin
# from /usr/lib64/girepository-1.0/FwupdPlugin-1.0.typelib
# by generator 1.147
"""
An object which wraps an introspection typelib.
This wrapping creates a python module like representation of the typelib
using gi repository as a foundation. Accessing attributes of the module
will dynamically pull them in and create wrappers for the members.
These members are then cached on this introspection module.
"""
# imports
import gi as __gi
import gi.overrides.GObject as __gi_overrides_GObject
import gi.repository.Fwupd as __gi_repository_Fwupd
import gobject as __gobject
class FirmwareClass(__gi.Struct):
"""
:Constructors:
::
FirmwareClass()
"""
def __delattr__(self, *args, **kwargs): # real signature unknown
""" Implement delattr(self, name). """
pass
def __dir__(self, *args, **kwargs): # real signature unknown
""" Default dir() implementation. """
pass
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __format__(self, *args, **kwargs): # real signature unknown
""" Default object formatter. """
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __init_subclass__(self, *args, **kwargs): # real signature unknown
"""
This method is called when a class is subclassed.
The default implementation does nothing. It may be
overridden to extend subclasses.
"""
pass
def __init__(self): # real signature unknown; restored from __doc__
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
def __reduce_ex__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
def __setattr__(self, *args, **kwargs): # real signature unknown
""" Implement setattr(self, name, value). """
pass
def __sizeof__(self, *args, **kwargs): # real signature unknown
""" Size of object in memory, in bytes. """
pass
def __str__(self, *args, **kwargs): # real signature unknown
""" Return str(self). """
pass
def __subclasshook__(self, *args, **kwargs): # real signature unknown
"""
Abstract classes can override this to customize issubclass().
This is invoked early on by abc.ABCMeta.__subclasscheck__().
It should return True, False or NotImplemented. If it returns
NotImplemented, the normal algorithm is used. Otherwise, it
overrides the normal algorithm (and the outcome is cached).
"""
pass
def __weakref__(self, *args, **kwargs): # real signature unknown
pass
padding = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
parent_class = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
parse = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
tokenize = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
to_string = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
write = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__class__ = None # (!) real value is "<class 'gi.types.StructMeta'>"
__dict__ = None # (!) real value is "mappingproxy({'__info__': StructInfo(FirmwareClass), '__module__': 'gi.repository.FwupdPlugin', '__gtype__': <GType void (4)>, '__dict__': <attribute '__dict__' of 'FirmwareClass' objects>, '__weakref__': <attribute '__weakref__' of 'FirmwareClass' objects>, '__doc__': None, 'parent_class': <property object at 0x7feb1afdfd60>, 'parse': <property object at 0x7feb1afdfe50>, 'write': <property object at 0x7feb1afdff40>, 'to_string': <property object at 0x7feb1afe2090>, 'tokenize': <property object at 0x7feb1afe2180>, 'padding': <property object at 0x7feb1afe2270>})"
__gtype__ = None # (!) real value is '<GType void (4)>'
__info__ = StructInfo(FirmwareClass)
|
[
"[email protected]"
] | |
944140c7bba8ea526c0edc0595e380ce65ebcc98
|
690e8f0a853c1f27bae688f021e8c27e62ca9613
|
/auth/auth/settings.py
|
9d8a145494269d4ab9cebf00c17bb3722ad9be69
|
[] |
no_license
|
MaksimLion/django-rest-authentication
|
d77c8b59e89c80a9f8c98fb7b038bebb431ffc0e
|
8445354f761d0624a97faa490d8872be5994da5e
|
refs/heads/master
| 2020-05-04T15:16:51.320819 | 2019-04-03T08:22:16 | 2019-04-03T08:22:16 | 179,233,291 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,303 |
py
|
"""
Django settings for auth project.
Generated by 'django-admin startproject' using Django 2.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'g(e@i27l0_x85jylbz*$s8ld&+!+td179gwggfrvwope#(dpj9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'authentication'
]
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework.authentication.TokenAuthentication', # <-- And here
],
}
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'auth.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'auth.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
|
[
"[email protected]"
] | |
d1bdc816ef14dbb9698a37af082dbc2f665ef045
|
434b6556038ad326ffaa8584a8a91edf8ad5c037
|
/BST-1/CheckBST-1.py
|
6677cdfe3c533653ccfc336a478ee2090bd1405b
|
[] |
no_license
|
Pranav016/DS-Algo-in-Python
|
60702460ad6639dd3e8a1fdc3caf0821b8e0b4c2
|
5557e371ccdf801d78ba123ca83c0dd47b3bdb3b
|
refs/heads/master
| 2023-01-23T08:29:32.186861 | 2020-11-01T17:14:12 | 2020-11-01T17:14:12 | 284,651,382 | 3 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,634 |
py
|
import queue
class BinaryTreeNode:
def __init__(self,data):
self.data=data
self.left=None
self.right=None
def minimumNode(root):
if root is None:
return 1000000
leftMin=minimumNode(root.left)
rightMin=minimumNode(root.right)
return min(root.data,leftMin, rightMin)
def maximumNode(root):
if root is None:
return -1000000
leftMax=maximumNode(root.left)
rightMax=maximumNode(root.right)
return max(root.data,leftMax,rightMax)
def isBST(root):
if root is None:
return True
leftMax=maximumNode(root.left)
rightMin=minimumNode(root.right)
if root.data<=leftMax or root.data>rightMin:
return False
leftBST=isBST(root.left)
rightBST=isBST(root.right)
return leftBST and rightBST
def buildLevelTree(levelorder):
index = 0
length = len(levelorder)
if length<=0 or levelorder[0]==-1:
return None
root = BinaryTreeNode(levelorder[index])
index += 1
q = queue.Queue()
q.put(root)
while not q.empty():
currentNode = q.get()
leftChild = levelorder[index]
index += 1
if leftChild != -1:
leftNode = BinaryTreeNode(leftChild)
currentNode.left =leftNode
q.put(leftNode)
rightChild = levelorder[index]
index += 1
if rightChild != -1:
rightNode = BinaryTreeNode(rightChild)
currentNode.right =rightNode
q.put(rightNode)
return root
# Main
levelOrder = [int(i) for i in input().strip().split()]
root = buildLevelTree(levelOrder)
print(isBST(root))
|
[
"[email protected]"
] | |
76f5af84fbd35b8169fa79d19c04247b0d84fd00
|
504c9c2b0d29d946079e11644761ad354fc79715
|
/_build/jupyter_execute/B_資訊設會必修的12堂Python通識課_何敏煌_博碩_2019/ch08.py
|
f2a128622353e50d7e7e751299228a9b37946c13
|
[] |
no_license
|
AaronCHH/jb_pysqlite
|
2b5b79327778705f8a941b0c5628e9eba0f5be2a
|
832a70b936800a380c1da0884eed9f7fa0dc2aee
|
refs/heads/main
| 2023-03-12T23:17:22.534445 | 2021-03-06T15:51:10 | 2021-03-06T15:51:10 | 340,876,347 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,816 |
py
|
# Ch08 操作資料庫
#顯示學生成績表
import sqlite3
dbfile = "school.db"
conn = sqlite3.connect(dbfile)
rows = conn.execute("select * from score;")
for row in rows:
for field in row:
print("{}\t".format(field), end="")
print()
conn.close()
#輸入學生成績
import sqlite3
dbfile = "school.db"
conn = sqlite3.connect(dbfile)
stuno = input("學號:")
chi = input("國文成績:")
eng = input("英文成績:")
mat = input("數學成績:")
his = input("歷史成績:")
geo = input("地理成績:")
sql_str = "insert into score(stuno, chi, eng, mat, his, geo) values('{}',{},{},{},{},{});".format(
stuno, chi, eng, mat, his, geo)
conn.execute(sql_str)
conn.commit()
conn.close()
#輸入學生資料表
import sqlite3
dbfile = "school.db"
conn = sqlite3.connect(dbfile)
stuno = input("學號:")
while stuno!="-1":
name = input("姓名:")
gender = input("性別:")
clsno = input("班級編號:")
tel = input("電話:")
pid = input("家長身份證字號:")
sql_str = "insert into studata(stuno, name, gender, clsno, tel, pid) values('{}','{}','{}','{}','{}','{}');".format(
stuno, name, gender, clsno, tel, pid)
conn.execute(sql_str)
stuno = input("學號:")
conn.commit()
conn.close()
#顯示學生基本資料表
import sqlite3
dbfile = "school.db"
conn = sqlite3.connect(dbfile)
rows = conn.execute("select * from studata;")
for row in rows:
for field in row:
print("{}\t".format(field), end="")
print()
conn.close()
#顯示學生的完整成績表(含總分及平均)
import sqlite3
dbfile = "school.db"
conn = sqlite3.connect(dbfile)
rows = conn.execute("select stuno, chi, eng, mat, his, geo, chi+eng+mat+his+geo, (chi+eng+mat+his+geo)/5 from score;")
print("學號\t國文\t英文\t數學\t歷史\t地理\t總分\t平均")
for row in rows:
for field in row:
print("{}\t".format(field), end="")
print()
conn.close()
#顯示學生各科的平均
import sqlite3
dbfile = "school.db"
conn = sqlite3.connect(dbfile)
rows = conn.execute("select stuno, avg(chi), avg(eng), avg(mat), avg(his), avg(geo) from score;")
print("學號\t國文\t英文\t數學\t歷史\t地理")
for row in rows:
for field in row:
print("{}\t".format(field), end="")
print()
conn.close()
#依姓名顯示成績表
import sqlite3
dbfile = "school.db"
conn = sqlite3.connect(dbfile)
rows = conn.execute("select studata.name, score.chi, score.eng from score, studata;")
for row in rows:
for field in row:
print("{}\t".format(field), end="")
print()
conn.close()
#依姓名顯示成績表--使用INNER JOIN
import sqlite3
dbfile = "school.db"
conn = sqlite3.connect(dbfile)
rows = conn.execute("select studata.name, score.chi, score.eng from score inner join studata on score.stuno = studata.stuno;")
for row in rows:
for field in row:
print("{}\t".format(field), end="")
print()
conn.close()
#成績修改程式
import sqlite3
dbfile = "school.db"
conn = sqlite3.connect(dbfile)
stuno = input("請輸入想要修改成績的學號:")
rows = conn.execute("select stuno, chi, eng, mat, his, geo from score where stuno='{}'".format(stuno))
row = rows.fetchone()
if row is not None:
print("學號\t國文\t英文\t數學\t歷史\t地理")
for field in row:
print("{}\t".format(field), end="")
print()
chi = input("國文=")
eng = input("英文=")
mat = input("數學=")
his = input("歷史=")
geo = input("地理=")
sql_str = "update score set stuno='{}', chi={}, eng={}, mat={}, his={}, geo={} where stuno='{}';".format(
stuno, chi, eng, mat, his, geo, stuno)
conn.execute(sql_str)
conn.commit()
conn.close()
import sqlite3
dbfile = "school.db"
conn = sqlite3.connect(dbfile)
rows = conn.execute("select * from score;")
print(type(rows))
print(dir(rows))
print(type(rows.fetchone()))
conn.close()
import sqlite3
dbfile = "school.db"
conn = sqlite3.connect(dbfile)
cur = conn.cursor()
cur.execute("select * from score;")
print(type(cur.fetchone()))
print(cur.fetchone())
import sqlite3
dbfile = "school.db"
conn = sqlite3.connect(dbfile)
cur = conn.cursor()
cur.execute("select * from score;")
first3_records = cur.fetchmany(3)
all_records = cur.fetchall()
print(first3_records)
print(all_records)
conn.close()
import sqlite3
dbfile = "school.db"
conn = sqlite3.connect(dbfile)
conn.row_factory = sqlite3.Row
cur = conn.cursor()
cur.execute("select * from score;")
rows = cur.fetchall()
print(rows[0].keys())
print(type(rows))
print(type(rows[0]))
print("學號\t國文\t英文")
for row in rows:
print("{}\t{}\t{}".format(row['stuno'], row['chi'], row['eng']))
|
[
"[email protected]"
] | |
b81f41162f15e29f8b808b8521fb7a1cf808a28c
|
e0045eec29aab56212c00f9293a21eb3b4b9fe53
|
/sale_crm/__manifest__.py
|
2ee01b74f3caf0f9facf83b0f88b9115a413b705
|
[] |
no_license
|
tamam001/ALWAFI_P1
|
a3a9268081b9befc668a5f51c29ce5119434cc21
|
402ea8687c607fbcb5ba762c2020ebc4ee98e705
|
refs/heads/master
| 2020-05-18T08:16:50.583264 | 2019-04-30T14:43:46 | 2019-04-30T14:43:46 | 184,268,686 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 930 |
py
|
# -*- coding: utf-8 -*-
# Part of ALWAFI. See LICENSE file for full copyright and licensing details.
{
'name': 'Opportunity to Quotation',
'version': '1.0',
'category': 'Hidden',
'description': """
This module adds a shortcut on one or several opportunity cases in the CRM.
===========================================================================
This shortcut allows you to generate a sales order based on the selected case.
If different cases are open (a list), it generates one sales order by case.
The case is then closed and linked to the generated sales order.
We suggest you to install this module, if you installed both the sale and the crm
modules.
""",
'depends': ['sale_management', 'crm'],
'data': [
'security/ir.model.access.csv',
'views/partner_views.xml',
'views/sale_order_views.xml',
'views/crm_lead_views.xml',
],
'auto_install': True,
}
|
[
"[email protected]"
] | |
484644bbb880fdcf085f5e5d6641f10a5231a625
|
08bfc8a1f8e44adc624d1f1c6250a3d9635f99de
|
/SDKs/swig/Examples/python/varargs/runme.py
|
fe1e28e881e6fbf9f29462308bf61efca2de0209
|
[] |
no_license
|
Personwithhat/CE_SDKs
|
cd998a2181fcbc9e3de8c58c7cc7b2156ca21d02
|
7afbd2f7767c9c5e95912a1af42b37c24d57f0d4
|
refs/heads/master
| 2020-04-09T22:14:56.917176 | 2019-07-04T00:19:11 | 2019-07-04T00:19:11 | 160,623,495 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 128 |
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:f2ae65c42f8358298afb53751299f1957fed0218e6a36f16022a63aa74858f95
size 705
|
[
"[email protected]"
] | |
734748a7d00403f32a4378d028e322462aeeabe3
|
09cead98874a64d55b9e5c84b369d3523c890442
|
/py200421_python2/day14_py200606/tuple_1.py
|
c02f948ae526bf8546174abec4408a4458357833
|
[] |
no_license
|
edu-athensoft/stem1401python_student
|
f12b404d749286036a090e941c0268381ce558f8
|
baad017d4cef2994855b008a756758d7b5e119ec
|
refs/heads/master
| 2021-08-29T15:01:45.875136 | 2021-08-24T23:03:51 | 2021-08-24T23:03:51 | 210,029,080 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 854 |
py
|
"""
tuple
read-only list
"""
# create a tuple
my_tuple1 = (1, 2, 3, 4, 5, 6, 7, 8)
print(my_tuple1)
my_tuple2 = ()
print(my_tuple2)
# create a tuple with only one element
my_tuple3 = (1)
print(my_tuple3)
my_tuple3 = ('abc')
print(my_tuple3)
my_tuple3 = 1
my_tuple3 = (1,)
print(my_tuple3)
# create nested tuple
my_tuple4 = (1, 2, 3)
print(my_tuple4)
my_tuple4 = (('a','b'), 2, ('c','d'))
print(my_tuple4)
my_tuple4 = (('a','b'), ('c','d'), ('c','d'))
print(my_tuple4)
# create mix tuple
my_tuple5 = (['a','b'], ('c','d'), ('c','d'))
my_tuple5 = (['a','b'], ([1,2],'d'), ('c','d'))
# compare
# student profile collection
# pre-set scouting path
a = [(), (), ()]
# saving-slot in a game
b = ([], [], [])
# create a tuple by auto-packing
my_tuple = 1,2,'a'
print(my_tuple, type(my_tuple))
# unpacking
x, y, z = my_tuple
print(x)
print(y)
print(z)
|
[
"[email protected]"
] | |
1d74ef8462950a6d0001f53e3884fb6d831e1a36
|
e7729e83f4caa78586a57de7c651b8e705e73305
|
/app/flags/agents/flag_6.py
|
45cf12711db4e7a9ee386fa13e8ae664ea83a475
|
[] |
no_license
|
dekoder/training
|
873674b985a0f2f8d0e3740f3b2004da2e0af02d
|
c7509ae9d13ba1ebbd127aeb4dadcaf88ffd9749
|
refs/heads/master
| 2023-01-01T01:00:56.709768 | 2020-10-29T19:51:19 | 2020-10-29T19:51:19 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 641 |
py
|
name = 'Contact points'
challenge = 'Deploy a new agent, using a different contact point then your first agent'
extra_info = """If an adversary deploys all of their agents on a host using the same protocol, say HTTP, then when their agent is
detected and shut down, the defenders will likely close access to the C2 over that protocol. Therefore, an adversary
will want multiple agents on a host, each using a different protocol to talk to the C2. """
async def verify(services):
contacts = set([agent.contact for agent in await services.get('data_svc').locate('agents')])
if len(contacts) > 1:
return True
return False
|
[
"[email protected]"
] | |
13eafdf4cca9a65dfa2e6bccb504ab6397013fb7
|
d5292505eb7b8b93eca743eb187a04ea58d6b6a3
|
/venv/Lib/site-packages/networkx/algorithms/operators/unary.py
|
71a6303f16c9db7a764e15fa906e9421b5937b55
|
[
"Unlicense"
] |
permissive
|
waleko/facerecognition
|
9b017b14e0a943cd09844247d67e92f7b6d658fa
|
ea13b121d0b86646571f3a875c614d6bb4038f6a
|
refs/heads/exp
| 2021-06-03T10:57:55.577962 | 2018-09-04T19:45:18 | 2018-09-04T19:45:18 | 131,740,335 | 5 | 1 |
Unlicense
| 2020-01-19T10:45:25 | 2018-05-01T17:10:42 |
Python
|
UTF-8
|
Python
| false | false | 1,646 |
py
|
"""Unary operations on graphs"""
# Copyright (C) 2004-2018 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import networkx as nx
from networkx.utils import not_implemented_for
__author__ = """\n""".join(['Aric Hagberg <[email protected]>',
'Pieter Swart ([email protected])',
'Dan Schult([email protected])'])
__all__ = ['complement', 'reverse']
def complement(G):
"""Return the graph complement of G.
Parameters
----------
G : graph
A NetworkX graph
Returns
-------
GC : A new graph.
Notes
------
Note that complement() does not create self-loops and also
does not produce parallel edges for MultiGraphs.
Graph, node, and edge data are not propagated to the new graph.
"""
R = G.fresh_copy()
R.add_nodes_from(G)
R.add_edges_from(((n, n2)
for n, nbrs in G.adjacency()
for n2 in G if n2 not in nbrs
if n != n2))
return R
def reverse(G, copy=True):
"""Return the reverse directed graph of G.
Parameters
----------
G : directed graph
A NetworkX directed graph
copy : bool
If True, then a new graph is returned. If False, then the graph is
reversed in place.
Returns
-------
H : directed graph
The reversed G.
"""
if not G.is_directed():
raise nx.NetworkXError("Cannot reverse an undirected graph.")
else:
return G.reverse(copy=copy)
|
[
"[email protected]"
] | |
3e53ef1658987ecc2bc55594ea180866af5b582c
|
7c8f6edd87cbee33cf998e9d2cc673fdcd39dd5a
|
/bots/Voodtwo/python/voodoo.py
|
149d114051c2baf9989c6c3621aadc1cea98e223
|
[] |
no_license
|
tarehart/RLBotSpikeLeague
|
89ce96417d8e201dcfc2f67ed5c1c81c7941131b
|
311b3753e770cc642fdde87b6d4083db4072af88
|
refs/heads/master
| 2020-07-04T11:45:30.564487 | 2019-08-24T05:31:55 | 2019-08-24T05:31:55 | 202,278,639 | 0 | 3 | null | 2019-08-23T14:31:27 | 2019-08-14T05:09:20 |
Python
|
UTF-8
|
Python
| false | false | 823 |
py
|
from rlbot.agents.base_agent import BOT_CONFIG_AGENT_HEADER
from rlbot.agents.executable_with_socket_agent import ExecutableWithSocketAgent
from rlbot.parsing.custom_config import ConfigHeader, ConfigObject
class Voodoo(ExecutableWithSocketAgent):
def get_port(self) -> int:
return 19231
def load_config(self, config_header: ConfigHeader):
self.executable_path = config_header.getpath('java_executable_path')
self.logger.info("Java executable is configured as {}".format(self.executable_path))
@staticmethod
def create_agent_configurations(config: ConfigObject):
params = config.get_header(BOT_CONFIG_AGENT_HEADER)
params.add_value('java_executable_path', str, default=None,
description='Relative path to the executable that runs java.')
|
[
"[email protected]"
] | |
60a604d51abe28c15f4cbe9b135d530edf6eb603
|
f87d1ce970ed414f62b90d79d8cf5a38556da592
|
/repetory_api/migrations/0011_auto_20170609_1056.py
|
670d2d9ff2b9b11106c16fd09dc242ea35f2ab32
|
[] |
no_license
|
zhangxu0307/repertory-rest
|
331d58009c15e014d1a5e39447219817d77b08d9
|
dc48a8e1e484254e1daa0712ffe66a52ec896ea7
|
refs/heads/master
| 2021-07-13T22:30:00.246833 | 2017-10-19T11:27:30 | 2017-10-19T11:27:30 | 107,536,946 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,712 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-09 02:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('repetory_api', '0010_auto_20170609_1044'),
]
operations = [
migrations.RemoveField(
model_name='material',
name='materailYear',
),
migrations.RemoveField(
model_name='material',
name='materialBand',
),
migrations.RemoveField(
model_name='material',
name='materialMark',
),
migrations.RemoveField(
model_name='material',
name='materialOriginal',
),
migrations.RemoveField(
model_name='material',
name='materialPostion',
),
migrations.RemoveField(
model_name='material',
name='materialState',
),
migrations.RemoveField(
model_name='material',
name='materialUnit',
),
migrations.AddField(
model_name='materialinput',
name='materailYear',
field=models.DateTimeField(blank=True, null=True, verbose_name='\u6750\u6599\u5e74\u4efd'),
),
migrations.AddField(
model_name='materialinput',
name='materialBand',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='\u6750\u6599\u54c1\u724c'),
),
migrations.AddField(
model_name='materialinput',
name='materialMark',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='\u6750\u6599\u578b\u53f7'),
),
migrations.AddField(
model_name='materialinput',
name='materialOriginal',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='\u6750\u6599\u539f\u4ea7\u5730'),
),
migrations.AddField(
model_name='materialinput',
name='materialPostion',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='\u6750\u6599\u4f4d\u7f6e'),
),
migrations.AddField(
model_name='materialinput',
name='materialState',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='\u6750\u6599\u72b6\u6001'),
),
migrations.AddField(
model_name='materialinput',
name='materialUnit',
field=models.DecimalField(decimal_places=4, default=0, max_digits=8, verbose_name='\u6750\u6599\u5355\u4f4d\u539f\u503c'),
),
]
|
[
"[email protected]"
] | |
c1a278d0c191ec9f7a09ffb015bef1cb08eebb82
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/benchmark/startPyquil3029.py
|
4d24e8f4ba438bbba1f8ddf9e36daac828244176
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,914 |
py
|
# qubit number=4
# total number=41
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(3) # number=19
prog += CZ(0,3) # number=20
prog += H(3) # number=21
prog += CNOT(0,3) # number=23
prog += X(3) # number=24
prog += CNOT(0,3) # number=25
prog += CNOT(0,3) # number=17
prog += RX(-0.48380526865282825,3) # number=26
prog += H(1) # number=2
prog += Y(3) # number=18
prog += H(2) # number=3
prog += H(3) # number=4
prog += Y(3) # number=12
prog += H(0) # number=5
prog += H(1) # number=6
prog += H(2) # number=7
prog += H(1) # number=34
prog += CZ(0,1) # number=35
prog += H(1) # number=36
prog += CNOT(0,1) # number=31
prog += CNOT(0,1) # number=38
prog += X(1) # number=39
prog += CNOT(0,1) # number=40
prog += CNOT(0,1) # number=33
prog += CNOT(0,1) # number=30
prog += H(3) # number=8
prog += H(3) # number=37
prog += H(0) # number=9
prog += Y(2) # number=10
prog += X(2) # number=22
prog += Y(2) # number=11
prog += X(0) # number=13
prog += X(0) # number=14
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil3029.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
|
[
"[email protected]"
] | |
0a5a10fc5960abab4709c50c8d9d9a98632a00ae
|
6189f34eff2831e3e727cd7c5e43bc5b591adffc
|
/alembic/versions/00036_11dbcd6e5ee3_.py
|
23c7b78690e2f34d72284e8b9134eea9c3b21604
|
[
"BSD-3-Clause"
] |
permissive
|
fake-name/ReadableWebProxy
|
24603660b204a9e7965cfdd4a942ff62d7711e27
|
ca2e086818433abc08c014dd06bfd22d4985ea2a
|
refs/heads/master
| 2023-09-04T03:54:50.043051 | 2023-08-26T16:08:46 | 2023-08-26T16:08:46 | 39,611,770 | 207 | 20 |
BSD-3-Clause
| 2023-09-11T15:48:15 | 2015-07-24T04:30:43 |
Python
|
UTF-8
|
Python
| false | false | 1,392 |
py
|
"""empty message
Revision ID: 11dbcd6e5ee3
Revises: 5aa994117f07
Create Date: 2017-09-28 04:06:18.968893
"""
# revision identifiers, used by Alembic.
revision = '11dbcd6e5ee3'
down_revision = '5aa994117f07'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy_utils.types import TSVectorType
from sqlalchemy_searchable import make_searchable
import sqlalchemy_utils
# Patch in knowledge of the citext type, so it reflects properly.
from sqlalchemy.dialects.postgresql.base import ischema_names
import citext
import queue
import datetime
from sqlalchemy.dialects.postgresql import ENUM
from sqlalchemy.dialects.postgresql import JSON
from sqlalchemy.dialects.postgresql import TSVECTOR
ischema_names['citext'] = citext.CIText
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.execute("UPDATE nu_release_item SET fetch_attempts = 0 WHERE fetch_attempts IS NULL")
op.execute("commit")
op.alter_column('nu_release_item', 'fetch_attempts',
existing_type=sa.INTEGER(),
nullable=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('nu_release_item', 'fetch_attempts',
existing_type=sa.INTEGER(),
nullable=True)
### end Alembic commands ###
|
[
"[email protected]"
] | |
c5d385b41cade2187400881bf390d7ffe5eb5c55
|
bd867af5245366ee0abfd0f659fcb42170fff8ca
|
/hackerRank/algorithms/DiagonalDifference/diagonal_difference.py
|
954cf6bd5cfc0ee3735dcd2733472402344f7d21
|
[] |
no_license
|
kruart/coding_challenges
|
04736a6b66da813fd973e7a57aa084bbdab31183
|
395ae60ab392e49bb5bc2f0a4eef1dfd232899bb
|
refs/heads/master
| 2021-06-16T08:51:21.815334 | 2019-11-07T08:39:13 | 2019-11-07T08:39:13 | 153,890,770 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 503 |
py
|
# https://www.hackerrank.com/challenges/diagonal-difference/problem
def diagonal_difference(arr):
return abs(sum([arr[i][i] - arr[i][len(arr)-i-1] for i in range(len(arr))]))
def main():
matrix1 = [
[1, 2, 3],
[4, 5, 6],
[9, 8, 9]
]
matrix2 = [
[11, 2, 4],
[4, 5, 6],
[10, 8, -12]
]
print(diagonal_difference(matrix1)) # 15 - 17 = 2
print(diagonal_difference(matrix2)) # 4 - 19 = 15
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
a82c76f942927a67392aa0710e1f1969930ee6cf
|
bbf025a5f8596e5513bd723dc78aa36c46e2c51b
|
/dfs + tree/graph.py
|
66496a7005f463b2e1716261d4179eac0bb238f2
|
[] |
no_license
|
AlanFermat/leetcode
|
6209bb5cf2d1b19e3fe7b619e1230f75bb0152ab
|
cacba4abaca9c4bad8e8d12526336115067dc6a0
|
refs/heads/master
| 2021-07-11T04:00:00.594820 | 2020-06-22T21:31:02 | 2020-06-22T21:31:02 | 142,341,558 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 994 |
py
|
class Graph:
def __init__(self,mapping={}):
'''
Constructs a new empty graph.
'''
self.graph = mapping
def nodes(self):
'''
Returns a list of all nodes in the graph.
'''
return self.graph.keys()
def get_neighbors(self, node):
'''
Given a particular node, returns a list of all neighbors in the graph.
'''
return self.graph[node]
def add_node(self, node):
'''
Adds the given node to the graph.
'''
self.graph[node] = set()
def add_edge(self, node1, node2):
'''
Adds an edge between the given pair of nodes, adding the nodes themselves first if they are not already in the graph.
'''
if not node1 in self.graph.keys():
self.add_node(node1)
if not node2 in self.graph.keys():
self.add_node(node2)
self.graph[node1].add(node2)
self.graph[node2].add(node1)
|
[
"[email protected]"
] | |
f2bfc11338590eec04ff10e1911a56f28c3461f0
|
e34cbf5fce48f661d08221c095750240dbd88caf
|
/python/day06/re_module.py
|
edd0ec1139439c775c119d49c71c7b07ae65d1f5
|
[] |
no_license
|
willianflasky/growup
|
2f994b815b636e2582594375e90dbcb2aa37288e
|
1db031a901e25bbe13f2d0db767cd28c76ac47f5
|
refs/heads/master
| 2023-01-04T13:13:14.191504 | 2020-01-12T08:11:41 | 2020-01-12T08:11:41 | 48,899,304 | 2 | 0 | null | 2022-12-26T19:46:22 | 2016-01-02T05:04:39 |
C
|
UTF-8
|
Python
| false | false | 612 |
py
|
#!/usr/bin/env python
# -*-coding:utf8-*-
# __author__ = "willian"
import re
# 从头匹配,很少使用
re.match("\d+", "341221")
# 匹配一次
re.search("\d+", "341221")
# 匹配多次
re.findall("\d+", "341221")
# 以逗号分割
re.split(",", "341,221")
# 匹配到进行替换,默认是替代所有,count指定次数.
re.sub("\d{4}", "1995", "1399,2017", count=1)
# re.I (忽略大小写)
# print(re.search("[a-z]", "Alex", flags=re.I))
# re.M (匹配多行)
# print(re.search("^is", "my name\nis alex", flags=re.M))
# re.S (多行匹配在一起)
# print(re.search(".+", "my \nname", flags=re.S))
|
[
"[email protected]"
] | |
276f494e824843392c3efb25c438e23b280c6dbd
|
0754e2e7aa1ffb90b54d563ce5a9317e41cfebf9
|
/ml/m03_xor.py
|
2f5fac7cee0e1b1116a7a60ebc02f9efee5e76ae
|
[] |
no_license
|
ChaeMyungSeock/Study
|
62dcf4b13696b1f483c816af576ea8883c57e531
|
6f726a6ecb43387e4a3b9d068a9c491b115c74c0
|
refs/heads/master
| 2023-01-24T20:59:52.053394 | 2020-12-07T14:54:34 | 2020-12-07T14:54:34 | 263,255,793 | 2 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 538 |
py
|
from sklearn.svm import LinearSVC
from sklearn.metrics import accuracy_score
from sklearn import svm
# 1. 데이터
x_data = [[0, 0], [1,0], [0,1], [1,1]]
y_data = [0, 1, 1, 0]
# 2. 모델
# 모델은 한줄.. 파라미터값으로 늘어남
model = LinearSVC()
# 3. 훈련
model.fit(x_data, y_data)
# 4. 평가 예측
x_test = [[0,0], [1,0], [0,1], [1,1]]
y_predict = model.predict(x_test)
acc = accuracy_score([0,1,1,0], y_predict)
print(x_test, "의 예측 결과 : ", y_predict)
print("acc = ", acc)
#
|
[
"[email protected]"
] | |
353fbe7250bf1beac4646624a021763b5c94b92a
|
55c250525bd7198ac905b1f2f86d16a44f73e03a
|
/Python/Projects/Learn/PyCharm/Algorithmic Toolbox/Algorithmic Warm Up/Last Digit of the Sum of Fibonacci Numbers/last_digit_of_the_sum_of_fibonacci_numbers_unit_tests.py
|
bb3384e3158b2445f6adca669ed4c4fac09f64be
|
[
"LicenseRef-scancode-other-permissive"
] |
permissive
|
NateWeiler/Resources
|
213d18ba86f7cc9d845741b8571b9e2c2c6be916
|
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
|
refs/heads/master
| 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null |
UTF-8
|
Python
| false | false | 128 |
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:a6a99b9bfea384a8802695a7f6eafeab6ae6e1cd091ebf62c01e6e6c0ecac93e
size 662
|
[
"[email protected]"
] | |
7c393120ee51e757a0b0c2bc246dc2a4c934dc23
|
08706df7e3712ebec7afd2d2f8f964ae9d485386
|
/server/patients/migrations/0016_attribute_resource.py
|
3c95316f5e9d660ee4b386204d0e49c148dcc89e
|
[] |
no_license
|
nickdotreid/take-on-transplant
|
9129c9ab7c1206291fc1ca616c18c44cd7519587
|
bf901b987121093787383f3d3726f87dddf4d5fd
|
refs/heads/master
| 2023-08-27T06:14:54.521168 | 2021-11-02T21:41:04 | 2021-11-02T21:41:04 | 298,403,103 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 585 |
py
|
# Generated by Django 3.1.1 on 2020-11-24 02:34
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('resources', '0006_auto_20201120_1722'),
('patients', '0015_issue_posttransplantissue_pretransplantissue'),
]
operations = [
migrations.AddField(
model_name='attribute',
name='resource',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='resources.resource'),
),
]
|
[
"[email protected]"
] | |
0598cc55bb3cc9cd48235f6dee023526aede8599
|
a00ed711e3e08b50ad6e91cc07a2cddc4a1de5ea
|
/airflow/migrations/versions/0075_2_0_0_add_description_field_to_connection.py
|
4c3f5835dcbfdf9b443396cbcceb764f421fbf89
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
ishiis/airflow
|
4305794e36b611d01f49e3f2401be3dc49782670
|
292440d54f4db84aaf0c5a98cf5fcf34303f2fa8
|
refs/heads/master
| 2022-07-30T00:51:28.806940 | 2022-07-14T12:07:11 | 2022-07-14T12:07:11 | 209,801,072 | 1 | 0 |
Apache-2.0
| 2019-09-20T13:47:26 | 2019-09-20T13:47:26 | null |
UTF-8
|
Python
| false | false | 2,008 |
py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Add description field to ``connection`` table
Revision ID: 61ec73d9401f
Revises: 2c6edca13270
Create Date: 2020-09-10 14:56:30.279248
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '61ec73d9401f'
down_revision = '2c6edca13270'
branch_labels = None
depends_on = None
airflow_version = '2.0.0'
def upgrade():
"""Apply Add description field to ``connection`` table"""
conn = op.get_bind()
with op.batch_alter_table('connection') as batch_op:
if conn.dialect.name == "mysql":
# Handles case where on mysql with utf8mb4 this would exceed the size of row
# We have to set text type in this migration even if originally it was string
# This is permanently fixed in the follow-up migration 64a7d6477aae
batch_op.add_column(sa.Column('description', sa.Text(length=5000), nullable=True))
else:
batch_op.add_column(sa.Column('description', sa.String(length=5000), nullable=True))
def downgrade():
"""Unapply Add description field to ``connection`` table"""
with op.batch_alter_table('connection', schema=None) as batch_op:
batch_op.drop_column('description')
|
[
"[email protected]"
] | |
c997ae3f2e974662ca89bdc82bccbd2658d4404b
|
73f7cc0e71bfd38d3bfe97367324f1e7a5d8b451
|
/engine_code/gapi/modules/proxy/cloud/parse.py
|
0e1d8a64f87ac9893d254692c67c63c5b528386c
|
[] |
no_license
|
cash2one/my-test
|
ccc0ae860f936262a601c1b579d3c85196b562f9
|
8bd23f5963f4dc7398b7670e28768a3533bd5d14
|
refs/heads/master
| 2021-01-18T03:20:30.889045 | 2017-01-19T02:52:02 | 2017-01-19T02:52:02 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,357 |
py
|
#!/usr/bin/python
# -*- coding=utf-8 -*-
from xml.etree.ElementTree import ElementTree,Element
def read_xml(in_path):
'''读取并解析xml文件
in_path: xml路径
return: ElementTree'''
tree = ElementTree()
tree.parse(in_path)
print tree.parse(in_path)
return tree
def write_xml(tree, out_path):
'''将xml文件写出
tree: xml树
out_path: 写出路径'''
tree.write(out_path,encoding="utf-8")
print '.....'
def if_match(node, kv_map):
'''判断某个节点是否包含所有传入参数属性
node: 节点
kv_map: 属性及属性值组成的map'''
for key in kv_map:
if node.get(key) != kv_map.get(key):
return False
return True
#---------------search -----
def find_nodes(tree, path):
'''查找某个路径匹配的所有节点
tree: xml树
path: 节点路径'''
return tree.findall(path)
def get_node_by_keyvalue(nodelist, kv_map):
'''根据属性及属性值定位符合的节点,返回节点
nodelist: 节点列表
kv_map: 匹配属性及属性值map'''
result_nodes = []
for node in nodelist:
if if_match(node, kv_map):
result_nodes.append(node)
return result_nodes
#---------------change -----
def change_node_properties(nodelist, kv_map, is_delete=False):
'''修改/增加 /删除 节点的属性及属性值
nodelist: 节点列表
kv_map:属性及属性值map'''
for node in nodelist:
for key in kv_map:
if is_delete:
if key in node.attrib:
del node.attrib[key]
else:
node.set(key, kv_map.get(key))
def change_node_text(nodelist, text, is_add=False, is_delete=False):
'''改变/增加/删除一个节点的文本
nodelist:节点列表
text : 更新后的文本'''
for node in nodelist:
if is_add:
node.text += text
elif is_delete:
node.text = ""
else:
node.text = text
def create_node(tag, property_map, content):
'''新造一个节点
tag:节点标签
property_map:属性及属性值map
content: 节点闭合标签里的文本内容
return 新节点'''
element = Element(tag, property_map)
element.text = content
return element
def add_child_node(nodelist, element):
'''给一个节点添加子节点
nodelist: 节点列表
element: 子节点'''
for node in nodelist:
node.append(element)
def del_node_by_tagkeyvalue(nodelist, tag, kv_map):
'''同过属性及属性值定位一个节点,并删除之
nodelist: 父节点列表
tag:子节点标签
kv_map: 属性及属性值列表'''
for parent_node in nodelist:
children = parent_node.getchildren()
for child in children:
if child.tag == tag and if_match(child, kv_map):
parent_node.remove(child)
#if __name__ == "__main__":
#
# #1. 读取xml文件
# tree = read_xml("./test.xml")
# print 'tree',tree
#
# #2. 属性修改
# #A. 找到父节点
# nodes = find_nodes(tree, "processers/processer")
# #B. 通过属性准确定位子节点
# result_nodes = get_node_by_keyvalue(nodes, {"name":"BProcesser"})
# #C. 修改节点属性
# change_node_properties(result_nodes, {"age": "1"})
# #D. 删除节点属性
# change_node_properties(result_nodes, {"value":""}, True)
#
# #3. 节点修改
# #A.新建节点
# a = create_node("person", {"age":"15","money":"200000"}, "this is the firest content")
# #B.插入到父节点之下
# add_child_node(result_nodes, a)
#
# #4. 删除节点
# #定位父节点
# del_parent_nodes = find_nodes(tree, "processers/services/service")
# #准确定位子节点并删除之
# target_del_node = del_node_by_tagkeyvalue(del_parent_nodes, "chain", {"sequency" : "chain1"})
#
# #5. 修改节点文本
# #定位节点
# text_nodes = get_node_by_keyvalue(find_nodes(tree, "processers/services/service/chain"), {"sequency":"chain3"})
# change_node_text(text_nodes, "new text")
#
# #6. 输出到结果文件
# write_xml(tree, "./out.xml")
|
[
"[email protected]"
] | |
bfcfe9c39e88787a47af7b24c492c7cb2ba75116
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03150/s056018673.py
|
ba3699fc1ecf9d7f7a828e88f30db87b5e18b4da
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 159 |
py
|
S = input()
ans = "NO"
for i in range(len(S)):
for j in range(len(S)):
if S[0:i] + S[i+j:len(S)] == "keyence":
print("YES")
exit()
print(ans)
|
[
"[email protected]"
] | |
1edcceffcfbf8947bb55c85896d44b45eddc8739
|
673e829dda9583c8dd2ac8d958ba1dc304bffeaf
|
/data/multilingual/Latn.HNS/Serif_16/pdf_to_json_test_Latn.HNS_Serif_16.py
|
14b2d82b21a61c2d50f3845e482493f91f58415d
|
[
"BSD-3-Clause"
] |
permissive
|
antoinecarme/pdf_to_json_tests
|
58bab9f6ba263531e69f793233ddc4d33b783b7e
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
refs/heads/master
| 2021-01-26T08:41:47.327804 | 2020-02-27T15:54:48 | 2020-02-27T15:54:48 | 243,359,934 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 305 |
py
|
import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.HNS/Serif_16/udhr_Latn.HNS_Serif_16.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
|
[
"[email protected]"
] | |
ae5f27b58b42509c2fb6f82e2e426f521420b5dd
|
d87f6d9e769709def3efcf30230cd8bf6ac2cef7
|
/WWTest/autotest/config/xkz/youyanyace/globalconfig/globalConfig.py
|
e6cc20a18999b112dc5f12dade19633d8c3165fc
|
[] |
no_license
|
wawj901124/centos8xitong
|
876dcc45b895871119404ad1899ca59ab5dd90b6
|
81fc0d1151e3172ceec2093b035d2cd921e1a433
|
refs/heads/master
| 2023-02-23T22:33:22.314433 | 2021-01-31T01:54:35 | 2021-01-31T01:54:35 | 290,476,399 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 335 |
py
|
class GlobalConfig(object):
ISONLINE = False
ONLINE_WEB_YUMING= ""
ONLINE_LOGIN_ACCOUNT = ""
ONLINE_LOGIN_PASSWORD = ""
TEST_WEB_YUMING = "http://111.207.18.22:22044/"
TEST_LOGIN_ACCOUNT = "admin"
TEST_LOGIN_PASSWORD = "admin123A"
COOKIE_FILE_NAME = "youyanyacelogincookie.json"
gc = GlobalConfig()
|
[
"wawj900805"
] |
wawj900805
|
8fad67f8ce8ce001bfb436e710258ff19d7ff81a
|
6849f09504c1b9e7e6b4bdc2a924f84ec98ec432
|
/webapp/manage.py
|
62c14e20c068799663d30d3c0e974d9a606680f0
|
[
"Apache-2.0"
] |
permissive
|
likit/lab-instrument-booking-app
|
a1c9d16635b8cff3511901d5510560349e8e5911
|
c21b42342376dc54fdd11a7f87bc7609e6204020
|
refs/heads/master
| 2021-01-02T09:14:33.291562 | 2015-06-28T14:57:39 | 2015-06-28T14:57:39 | 37,254,301 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,377 |
py
|
#!/usr/bin/env python
import os
from app import create_app, mongo
from flask.ext.script import Manager, Shell
# from flask.ext.migrate import Migrate, MigrateCommand
from werkzeug.security import generate_password_hash
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
manager = Manager(app)
# migrate = Migrate(app, db)
def make_shell_context():
return dict(app=app, db=mongo.db,)
@manager.command
def test():
"""Run the unit tests"""
import unittest
tests = unittest.TestLoader().discover('.')
unittest.TextTestRunner(verbosity=2).run(tests)
@manager.command
def initdb():
"""Init the database"""
mongo.db.drop_collection('users')
password = generate_password_hash('testpass')
user = {
'name': 'Foo',
'lastname': 'Jiang',
'email': '[email protected]',
'password': password,
'pi_email': '[email protected]',
'status': 'undergrad',
}
# password = generate_password_hash('testpass')
# admin = {
# 'email': '[email protected]',
# 'password': password,
# }
# mongo.db.admins.insert(admin, safe=True)
mongo.db.users.insert(user, safe=True)
manager.add_command('shell', Shell(make_context=make_shell_context))
# manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
|
[
"[email protected]"
] | |
3f3eebddf1980d557d39e2eef82f0f178cb64734
|
2990b0841b63f300a722107933c01c7237a7976b
|
/all_xuef/code/sicp_code_python/2.2/exer2_36.py
|
45876f0bdd6ff5e412e92460f44c40c00c5394aa
|
[] |
no_license
|
xuefengCrown/Files_01_xuef
|
8ede04751689e0495e3691fc5d8682da4d382b4d
|
677329b0189149cb07e7ba934612ad2b3e38ae35
|
refs/heads/master
| 2021-05-15T04:34:49.936001 | 2019-01-23T11:50:54 | 2019-01-23T11:50:54 | 118,802,861 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 802 |
py
|
"""
exer2.36 accumulate_n
((1 2 3) (4 5 6) (7 8 9) (10 11 12))-->(22 26 30)
"""
import exer2_33 as funcs
import accumulate as accu
import operator as oper
def accumulate_n(op, init, seqs):
# 每个序列等长度,所以如果第一个处理完了,意味着都处理完了
if len(seqs[0])==0: return []
return funcs._append([accu.accumulate(op,
init,
list(map(lambda seq:seq[0], seqs)))],
accumulate_n(op,
init,
list(map(lambda seq:seq[1:], seqs))))
def test():
seqs = [[1,2,3],[4,5,6],[7,8,9],[10,11,12]]
print(accumulate_n(oper.add, 0, seqs))
if __name__ == '__main__':
test()
|
[
"[email protected]"
] | |
2f2897da3ab199c97a2904a7bc4488f42042c775
|
acd41dc7e684eb2e58b6bef2b3e86950b8064945
|
/res/packages/scripts/scripts/client/bwobsolete_helpers/PyGUI/FocusManager.py
|
d63a96b2d06f8769fe8fa7654f13ed3704920427
|
[] |
no_license
|
webiumsk/WoT-0.9.18.0
|
e07acd08b33bfe7c73c910f5cb2a054a58a9beea
|
89979c1ad547f1a1bbb2189f5ee3b10685e9a216
|
refs/heads/master
| 2021-01-20T09:37:10.323406 | 2017-05-04T13:51:43 | 2017-05-04T13:51:43 | 90,268,530 | 0 | 0 | null | null | null | null |
WINDOWS-1250
|
Python
| false | false | 1,004 |
py
|
# 2017.05.04 15:20:45 Střední Evropa (letní čas)
# Embedded file name: scripts/client/bwobsolete_helpers/PyGUI/FocusManager.py
_focusedComponent = None
def getFocusedComponent():
global _focusedComponent
return _focusedComponent
def setFocusedComponent(newFocus):
global _focusedComponent
if newFocus != _focusedComponent:
if _focusedComponent is not None:
_focusedComponent.focus = False
_focusedComponent = newFocus
if newFocus is not None:
newFocus.focus = True
return
def isFocusedComponent(component):
if _focusedComponent is None or component is None:
return _focusedComponent is component
else:
return _focusedComponent.__str__() == component.__str__()
# okay decompyling C:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\client\bwobsolete_helpers\PyGUI\FocusManager.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.05.04 15:20:45 Střední Evropa (letní čas)
|
[
"[email protected]"
] | |
77cc7c9dea37d25900a3ef81b6fe8e5c4ac325d8
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02259/s333912873.py
|
70af1be9d0001017dd4c907d0d466616f478bb16
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 439 |
py
|
def bubbles(N):
count = 0
for i in range(len(N)):
for j in range(len(N)-1, i, -1):
if N[j] < N[j-1]:
N[j], N[j-1] = N[j-1], N[j]
count += 1
c = 1
for i in N:
print(i, end='')
if c < len(N):
print(' ', end='')
c += 1
print('')
return count
n = int(input())
numbers = list(map(int, input().split()))
print(bubbles(numbers))
|
[
"[email protected]"
] | |
5dfd1f2fa0a20f7374881feaa573ca57dd325796
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_040/ch152_2020_04_13_20_50_06_154418.py
|
4e37d65e1c26223db4521858483a177d0b8585da
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 230 |
py
|
def verifica_preco(x,y,z):
dic1 = {}
dic2 = {}
for x, cor in y.items():
dic1[x] = cor
for cor2, valor in z.items():
dic2[cor2] = valor
if cor == cor2:
return valor
|
[
"[email protected]"
] | |
b9169e937fabc228e29384360ef65944f5973688
|
1d87b6e7cd7879fefeaa8f475045de1cc1bc2bf5
|
/podder_task_foundation/logging/log_setting.py
|
db6d2ddd99e112b75acd67189097e92a65cda131
|
[] |
no_license
|
nagisa-sakamoto/podder-task-foundation
|
2ecb24e07bbfcc1121661fb7d9e7005faf9093e0
|
8de453bf8f89d5ddcb8e82d394f73f3a8f715329
|
refs/heads/main
| 2022-12-30T01:10:23.051183 | 2020-10-20T08:13:55 | 2020-10-20T08:13:55 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,068 |
py
|
import logging
import os
from typing import Any, Dict
from podder_task_foundation.config import Config
class LogSetting:
TASK_NAME_PATH = 'task_name.ini'
DEFAULT_FORMAT = '[%(asctime)s.%(msecs)03d] %(levelname)s - %(message)s'
DATE_FORMAT = '%Y-%m-%d %H:%M:%S'
_log_setting = None
def __init__(self, mode: str, config: Config):
self._mode = mode
self._config = config
def load(self):
if LogSetting._log_setting is None:
LogSetting._log_setting = self._load_log_yml()
return LogSetting._log_setting
def _get_config(self, key: str, default: Any) -> Any:
value = self._config.get("log." + key)
if value is not None:
return value
value = self._config.get("pipeline." + key)
if value is not None:
return value
return default
def _load_log_yml(self) -> Dict:
if os.path.exists(self.TASK_NAME_PATH):
with open(self.TASK_NAME_PATH, 'r') as stream:
task_name = stream.read()
else:
task_name = self._get_config('app.name', '')
settings = {
'task_name': task_name,
'default_log_format': self.DEFAULT_FORMAT,
'date_format': self.DATE_FORMAT,
'task_log_format': self._get_config('task_log_format', self.DEFAULT_FORMAT),
'server_log_format': self._get_config('server_log_format', self.DEFAULT_FORMAT),
'color_task_log_format': self._get_config('color_task_log_format', self.DEFAULT_FORMAT),
'color_server_log_format': self._get_config('color_server_log_format',
self.DEFAULT_FORMAT),
'task_log_level': self._get_config('task_log_level', logging.DEBUG),
'server_log_level': self._get_config('server_log_level', logging.DEBUG),
'log_colors': self._get_config('log_colors', {}),
'secondary_log_colors': self._get_config('secondary_log_colors', {}),
}
return settings
|
[
"[email protected]"
] | |
a500d0d54970ec25831ee58b453f03daf5f02059
|
306baa2ad596e3962e427d587e7b0d4175a1e48e
|
/configs/ttfnetv3/ttfv3net_r34_0114_3l_128_48_s16twice_basicup_aug_10x.py
|
847551575ebcdb8c878b17ac7b992f8214941afd
|
[
"Apache-2.0"
] |
permissive
|
mrsempress/mmdetection
|
9c7ed7ed0c9f1d6200f79a2ab14fc0c8fe32c18a
|
cb650560c97a2fe56a9b369a1abc8ec17e06583a
|
refs/heads/master
| 2022-04-24T04:34:30.959082 | 2020-04-26T07:52:23 | 2020-04-26T07:52:23 | 258,957,856 | 0 | 0 |
Apache-2.0
| 2020-04-26T06:33:32 | 2020-04-26T06:33:32 | null |
UTF-8
|
Python
| false | false | 4,154 |
py
|
# model settings
model = dict(
type='TTFNet',
pretrained='modelzoo://resnet34',
backbone=dict(
type='ResNet',
depth=34,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_eval=False,
style='pytorch'),
neck=None,
bbox_head=dict(
type='TTFv3Head',
inplanes=(64, 128, 256, 512),
planes=(256, 128, 64),
down_ratio=(16, 8, 4),
hm_head_channels=((128, 128), (128, 128), (64, 64)),
wh_head_channels=((32, 32), (32, 32), (32, 32)),
num_classes=81,
shortcut_cfg=(1, 2, 3),
s16_shortcut_twice=True,
wh_scale_factor=(8., 8., 8.),
alpha=0.6,
beta=0.6,
hm_weight=(1.4, 1.4, 1.),
wh_weight=(7., 7., 5.),
length_range=((128, 512), (48, 128), (1, 48)),
train_branch=(True, True, True),
inf_branch=(True, True, True),
use_simple_nms=True,
fast_nms=False,
up_conv_cfg=dict(type='BasicBlock'),
max_objs=128,
conv_cfg=None,
norm_cfg=dict(type='BN')))
cudnn_benchmark = True
# training and testing settings
train_cfg = dict(debug=False)
test_cfg = dict(score_thr=0.01, max_per_img=100)
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=(512, 512), keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(512, 512),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=16,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(
type='SGD',
lr=0.002,
momentum=0.9,
weight_decay=0.0004,
paramwise_options=dict(bias_lr_mult=2., bias_decay_mult=0.))
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 5,
step=[90, 110])
checkpoint_config = dict(save_every_n_steps=200, max_to_keep=1, keep_in_n_epoch=[63, 90])
# yapf:disable
log_config = dict(interval=20)
# yapf:enable
# runtime settings
total_epochs = 120
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/ttfv3net_r34_10x'
load_from = 'work_dirs/2001/0215_ttfv334_0114_3l_128_48_s16twice_basicup2_aug_10x/work_dirs/ttfv3net_r34_10x_0217_1444/epoch_120_iter_127630.pth'
resume_from = None
workflow = [('train', 1)]
|
[
"[email protected]"
] | |
424604fc081fe31949ac6a2ea5e3618af401701a
|
b6e34dec0831f43d442e89c64f521f77bb2438b2
|
/fbta/fbta_sequence.py
|
e2b01b7dfc4e19723ab1165e36d73b524e70cf67
|
[] |
no_license
|
kandation/FBTAFast
|
a1a38e09d5964915d46492f84f8fa0fead43185c
|
505a2f232ef7ef9b6fc153357fb4eec5480cd92a
|
refs/heads/master
| 2022-12-31T02:43:09.339384 | 2020-04-22T19:13:54 | 2020-04-22T19:13:54 | 212,693,322 | 0 | 0 | null | 2020-04-22T19:13:56 | 2019-10-03T22:34:15 |
HTML
|
UTF-8
|
Python
| false | false | 4,209 |
py
|
import time
from pprint import pprint
from fbta_04_activity_to_card import FBTAActivityToCardsNew
from fbta_05_cards_download_manager import FBTACardsDownloadManager
from fbta_02_clusters import FBTAClusterInfo
from fbta_06_photos_download_manager import FBTAPhotosDownloadManager
from fbta_07_dataft import FBTADataft
from fbta_120_album_count_manager import FBTAAlbumCountManager
from fbta_configs import FBTAConfigs
from fbta_03_history_download_manager import FBTAHistoryDownloadManager
from fbta_mkdir import FBTAMkdir
from fbta_node_master import FBTANodeMaster
from fbta_sequence_func import FBTASequenceFunction
from fbta_settings import FBTASettings
from fbta_01_yearbox import FBTAYearBox
class FBTASequence(FBTASequenceFunction):
def __init__(self, setting: FBTASettings, configs: FBTAConfigs):
FBTASequenceFunction.__init__(self, setting, configs)
self.__node_master: FBTANodeMaster = FBTANodeMaster.NONE
self.__node_yearbox = None
self.__node_cluster_info: FBTAClusterInfo = None
def start(self):
self._warnningTimeOptimize()
self.__px0_initDirectory()
self.__p00_generateMasterNode(0)
self._showFinishedProcessEndNotify(0)
self.__p01_processYearBox(1)
self._showFinishedProcessEndNotify(1)
self.__p02_processsClustersInfo(2)
self._showFinishedProcessEndNotify(2)
self.__p03_processDownloader(3)
self._showFinishedProcessEndNotify(3)
self.__p04_processDatabaseAsCard(4)
self._showFinishedProcessEndNotify(4)
self.__p05_processCardAsPost(5)
self._showFinishedProcessEndNotify(5)
self.__processDonloadPhotos(6)
self._showFinishedProcessEndNotify(6)
self.__processDataft(7)
self._showFinishedProcessEndNotify(7)
self.__p08_processAlbumCount(8)
self._showFinishedProcessEndNotify(8)
print('ENDT$EST')
exit()
def __px0_initDirectory(self):
self.__mkdirClass = FBTAMkdir(self._settings, self._configs)
self.__mkdirClass.startProjectDir()
def __p00_generateMasterNode(self, step):
if self._isInTestStep(step):
self.__node_master = FBTANodeMaster(self._settings, self._configs)
self.__node_master.start()
def __p01_processYearBox(self, step):
if self._isInTestStep(step):
self.__node_yearbox = FBTAYearBox(self.__node_master)
cond = self._settings.renew_index
cond = cond or not self.__node_yearbox.hasYearboxFile(self._settings.dir_data_path)
if cond:
self.__node_yearbox.run()
self.__node_yearbox.save(self._settings.dir_data_path)
else:
self.__node_yearbox.load(self._settings.dir_data_path)
def __p02_processsClustersInfo(self, step):
if self._isInTestStep(step):
self.__node_cluster_info = FBTAClusterInfo(self._settings, self._configs, self.__node_yearbox)
self.__node_cluster_info.run()
def __p03_processDownloader(self, step):
if self._isInTestStep(step):
# Step01 Download Activity
dl = FBTAHistoryDownloadManager(self.__node_master,
self.__node_cluster_info.clusters)
dl.main()
def __p04_processDatabaseAsCard(self, step):
if self._isInTestStep(step):
analysis = FBTAActivityToCardsNew(self._settings, self._configs)
analysis.main()
def __p05_processCardAsPost(self, step):
if self._isInTestStep(step):
order = FBTACardsDownloadManager(self.__node_master)
order.main()
def __processDonloadPhotos(self, step):
if self._isInTestStep(step):
photos = FBTAPhotosDownloadManager(self.__node_master)
photos.main()
def __p08_processAlbumCount(self, step):
if self._isInTestStep(step):
album_count = FBTAAlbumCountManager(self.__node_master)
album_count.main()
def __processDataft(self, step):
if self._isInTestStep(step):
dataft = FBTADataft(self.__node_master)
dataft.main()
|
[
"[email protected]"
] | |
c6b360f08562aaddf5900e08cd01d476537105f1
|
0edb94d9de7222d31ac8350a8cc330179f69ef60
|
/urls.py
|
5b7e936fb82222a7f0a25bc6333ac4cee7b25143
|
[] |
no_license
|
ondrejsika/django-1.6-blank-project
|
0f503fd661ec38fd3a9977d2e8fb4772d1c6da80
|
51b59c0a3102d8601c0490d2ee0e3b65afee0b33
|
refs/heads/master
| 2016-09-11T00:06:38.522221 | 2015-04-12T18:54:28 | 2015-04-12T18:54:28 | 26,711,648 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 305 |
py
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'twistedexample.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
)
|
[
"[email protected]"
] | |
1db358528dfe7eb150bfcf52b137cce3df1bb254
|
027dd49b92ee92c8faa5ea05bce95d28efd2268d
|
/Documents/django/crudView/crudapp/migrations/0001_initial.py
|
03b4d87a72cb9fdab2c706d0b7c2ab583aa93a89
|
[] |
no_license
|
arunkumar27-ank-tech/RestAPIcrud
|
0ac06a4f0b6cf3373eb76b815e3cd6c5748610d5
|
387c5fad78f4b72cfbbe47d06e79c1a15038ad69
|
refs/heads/master
| 2023-06-13T21:44:08.157685 | 2021-07-06T14:11:12 | 2021-07-06T14:11:12 | 383,477,411 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 552 |
py
|
# Generated by Django 3.1.5 on 2021-07-06 06:19
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Todo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('completed', models.BooleanField(default=False)),
],
),
]
|
[
"[email protected]"
] | |
0527bee5e87be348d59d9a2dceebb0b42f5a6ea2
|
c2be395eac600d0d853de03cd67070bd8391038f
|
/ofm_request_reverse_rd/__manifest__.py
|
39a1213f2bec2e986a4933fa317933ec0a2efee5
|
[] |
no_license
|
amendoncabh/salary_emp
|
960cfdb4df48df70ab361886039c790840a5e8d2
|
2ac2dd9461271153cb2ee406bf70a29f614c25f1
|
refs/heads/master
| 2022-03-30T22:35:10.704092 | 2020-01-05T16:23:20 | 2020-01-05T16:23:20 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,226 |
py
|
# -*- coding: utf-8 -*-
# © <YEAR(S)> <AUTHOR(S)>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
{
"name": "Trinity Roots :: OFM Request Approve Reverse RD",
"summary": "For updating related project modules",
"version": "8.0.1.0.0",
"category": "Uncategorized",
"description": """
MODULE
======
* This module MUST be depended by related project module.
* If this module is updated, All related module will be updated too.
""",
"website": "http://www.trinityroots.co.th/",
"author": "Trinity Roots",
"license": "AGPL-3",
"application": False,
"installable": True,
"external_dependencies": {
"python": [],
"bin": [],
},
# any module necessary for this one to work correctly
'depends': [
'base',
'web_notify',
'pos_customize',
'ofm_inventory_ext',
'tr_core_update',
],
# always loaded
'data': [
'security/request_reverse_rd_security.xml',
'security/ir.model.access.csv',
'views/ofm_request_reverse_view.xml',
'views/stock_view.xml',
'wizard/reason_reject_wizard_view.xml',
'wizard/reason_approve_wizard_view.xml',
],
}
|
[
"[email protected]"
] | |
2aa8324aee23f64603e3406c3de9441e9cb98c51
|
4b4544e5860bf2776ef578ba8e91dd34a9cf2b80
|
/nodejs/patches/pkgsrc/lang/nodejs/patches/patch-deps_cares_cares.gyp
|
ba1548a9de0c9d65a856346f95ff4d5904181d81
|
[
"CC0-1.0"
] |
permissive
|
nabla-containers/rumprun-packages
|
1e00e5cf0b6995f1772e8dff6b20d7d064ac71cf
|
687c6dab278ff3dba68b914e1ed0511eb5525551
|
refs/heads/solo5
| 2021-07-08T10:42:24.436007 | 2019-02-21T22:39:36 | 2019-02-21T22:43:57 | 137,268,640 | 1 | 4 |
NOASSERTION
| 2019-02-20T02:29:18 | 2018-06-13T20:44:12 |
Makefile
|
UTF-8
|
Python
| false | false | 647 |
gyp
|
$NetBSD: patch-deps_cares_cares.gyp,v 1.1 2013/05/22 15:17:07 mspo Exp $
Add support for NetBSD.
--- deps/cares/cares.gyp.orig 2013-03-14 10:55:24.000000000 +0900
+++ deps/cares/cares.gyp 2013-03-14 10:55:47.000000000 +0900
@@ -140,6 +140,10 @@
'include_dirs': [ 'config/freebsd' ],
'sources': [ 'config/freebsd/ares_config.h' ]
}],
+ [ 'OS=="netbsd"', {
+ 'include_dirs': [ 'config/netbsd' ],
+ 'sources': [ 'config/netbsd/ares_config.h' ]
+ }],
[ 'OS=="openbsd"', {
'include_dirs': [ 'config/openbsd' ],
'sources': [ 'config/openbsd/ares_config.h' ]
|
[
"[email protected]"
] | |
def2133f683035964fdbf030fa9a9bec0085cb22
|
f1fcaf58e53792db786bf6ffb87f67b815ed600e
|
/Chapter8.py
|
4e1e60f78cbb1e010b37949f78d483331693bc96
|
[] |
no_license
|
stephenosullivan/effective-python3
|
8e414d0aa64eb2a599ba661056809830b6e4a39f
|
c933b3f80021f9ba3d1f0ad608f563a106d89bd8
|
refs/heads/master
| 2021-01-13T07:39:56.418989 | 2015-10-04T01:27:26 | 2015-10-04T01:27:26 | 39,714,317 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,323 |
py
|
__author__ = 'stephenosullivan'
class Item55:
"""
Use repr Strings for debugging output
"""
def __init__(self):
a = "string"
print(a)
print(repr(a))
print(eval(repr(a)))
print('%r' % a)
a = Opaque(5,4)
print(a)
b = BetterClass(6,7)
print(b)
print(a.__dict__)
class Opaque:
def __init__(self, x, y):
self.x = x
self.y = y
class BetterClass:
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return "BetterClass(%s, %s)" % (self.x, self.y)
class Item56:
"""
Test everything with unittest
"""
def __init__(self):
return
def to_str(data):
if isinstance(data, str):
return data
elif isinstance(data, bytes):
return data.decode('utf-8')
else:
raise TypeError('Must supply string or bytes, ' 'found: %r' % data)
from unittest import TestCase, main
class UtilsTestCase(TestCase):
def test_to_str_bytes(self):
self.assertEqual('hello', to_str(b'hello'))
def test_to_str_str(self):
self.assertEqual('hello', to_str('hello'))
def test_to_str_bad(self):
self.assertRaises(TypeError, to_str, object())
if __name__ == "__main__":
sol = Item55()
main()
|
[
"[email protected]"
] | |
4344b251328ece82d57f22c21563a169e723a2c2
|
f94e54d3085cd07a6f4972f2111574ad95fe4d89
|
/utils/iotools.py
|
406433fab0cdf5f54f662d8821bdadfae2017c15
|
[] |
no_license
|
banskt/statsfpl
|
b4e67ca4ed09a8cdc927ec4cb4ad570d891ad395
|
b442208fa4d07e3a097445c75a4fd2f8098440ff
|
refs/heads/master
| 2021-06-30T01:54:05.461439 | 2020-09-07T09:41:04 | 2020-09-07T09:41:04 | 143,441,341 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,469 |
py
|
import numpy as np
import collections
import csv
from utils.containers import FixtureInfo
def get_fixtures(filename, scores):
fixtures = [{} for x in range(38)]
with open(filename) as csvfile:
instream = csv.reader(csvfile, delimiter = ',')
for row in instream:
team = row[0].strip()
teamscores = scores[team]
for gw in range(1, 39):
opp = row[gw].split()[0].strip()
loc = row[gw].split()[1].strip()[1]
athome = False
if loc == 'H':
athome = True
score = teamscores[opp][loc]
fixtures[gw - 1][team] = FixtureInfo(gegen = opp, athome = athome, prob = score)
return fixtures
def convert_scores_mat(sdict, teams, nanval = 0.5):
n = len(teams)
home = np.zeros((n, n))
away = np.zeros((n, n))
for i, t1 in enumerate(teams):
for j, t2 in enumerate(teams):
home[i, j] = sdict[t1][t2]['H']
away[i, j] = sdict[t1][t2]['A']
vmin = min(np.min(home), np.min(away))
vmax = max(np.max(home), np.max(away))
delta = vmax - vmin
home = (home - vmin) / delta
away = (away - vmin) / delta
home[np.diag_indices_from(home)] = nanval
away[np.diag_indices_from(away)] = nanval
return home, away
def get_scores(filename, nanval = 0.5):
scores = {}
points = {}
teams = list()
ATTACKH = 0
ATTACKA = 1
DEFENDH = 2
DEFENDA = 3
with open(filename) as csvfile:
instream = csv.reader(csvfile, delimiter = ',')
next(instream, None)
for row in instream:
team = row[0].strip()
teams.append(team)
points[team] = [float(x.strip()) for x in row[1:]]
for team in teams:
scores[team] = {}
for opp in teams:
scores[team][opp] = {}
if opp == team:
scores[team][opp]['H'] = 0
scores[team][opp]['A'] = 0
else:
scores[team][opp]['H'] = points[team][DEFENDH] - points[opp][ATTACKA]
scores[team][opp]['A'] = points[team][DEFENDA] - points[opp][ATTACKH]
home, away = convert_scores_mat(scores, teams, nanval = nanval)
for i, team in enumerate(teams):
for j, opp in enumerate(teams):
scores[team][opp]['H'] = home[i, j]
scores[team][opp]['A'] = away[i, j]
return teams, scores
|
[
"[email protected]"
] | |
decc14ec6c9e00b0fbed6e000b45d1b1efb74fa2
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2335/60705/241811.py
|
ab11ecca95033a28315b69c996ee7f5b73163e7e
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 215 |
py
|
x = int(input())
y = int(input())
count = 0
s = {x}
while not s.__contains__(y):
s2 = set()
for i in s:
s2.add(2*i)
s2.add(i - 1)
for j in s2:
s.add(j)
count += 1
print(count)
|
[
"[email protected]"
] | |
a30badd10e968213b68d1cab709d7f6258ff4478
|
921c29354a9065a4f76f816c2b2ec68457f66aef
|
/todo/tests/test_task.py
|
e4752471e00904681378c98b7d75e47dcc6c54c8
|
[] |
no_license
|
AmrAnwar/ToDoList
|
520fa0529090183832dfd8c274fb3e7dad4d7a3b
|
de5e9e9887dee857e6169184aa9c7b74f31d32c4
|
refs/heads/master
| 2020-04-11T15:51:39.869491 | 2018-12-15T17:20:11 | 2018-12-15T17:20:11 | 161,905,711 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 821 |
py
|
from .test_init import InitTest
class TestList(InitTest):
def setUp(self):
super(TestList, self).setUp()
def test_get_task(self):
res = self.client.get(self.task.get_absolute_url())
self.assertEqual(res.status_code, 404)
self.client.login(username="anwar", password="password")
res = self.client.get(self.task.get_absolute_url())
self.assertEqual(res.status_code, 404)
self.client.login(username="guest", password="password")
res = self.client.get(self.task.get_absolute_url())
self.assertEqual(res.status_code, 200)
def test_update(self):
self.client.login(username="guest", password="password")
data = {
"title": "test-title"
}
self.client.post(self.task.get_absolute_url(), data=data)
|
[
"[email protected]"
] | |
8de9d49675be983416774ae4bf4609d2d1d95145
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/142/usersdata/227/62295/submittedfiles/av2_p3_civil.py
|
280c0440154f4d960bd1fc3ba353a60f8deb5e93
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 633 |
py
|
# -*- coding: utf-8 -*-
def media(a):
soma = 0
for i in range(0,len(a),1):
soma = soma + a[i]
media = soma/len(a)
return (media)
#ESCREVA AS DEMAIS FUNÇÕES
def somaA(x,y):
mx=media(x)
my=media(y)
soma=0
for i in range(0,len(x),1):
soma=soma+((x[i]-mx)*(y[i])-my)
return(soma)
def entradaLista(n):
a = []
for i in range(0,n,1):
valor = float(input('Digite um valor: '))
a.append(valor)
return (a)
n = int(input('Digite o tamanho da lista: '))
x = entradaLista(n)
y = entradaLista(n)
p=somaA(x,y)/((somaD(x)*somad(y)**(0,5))
print('%.4f' % p)
|
[
"[email protected]"
] | |
e7f4f24803a27a38a46f361243a674a5236a571a
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03274/s010264861.py
|
c2235d0bdb18733aa448f6ca2a63b3cad841e71a
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 463 |
py
|
import collections
n,k = map(int, raw_input().split(' '))
xis = map(int, raw_input().split(' '))
ais = [xi for xi in xis if xi >= 0]
bis = [-xi for xi in xis if xi < 0][::-1]
m = ais[k -1] if k-1 < len(ais) else +float('inf')
m = min(m, bis[k -1] if k-1 < len(bis) else +float('inf'))
for i in range(len(ais)):
if i + 1 == k: break
if 0 <= k - (i+1) -1 < len(bis):
m = min(m, 2*ais[i] + bis[k - (i+1) -1])
m = min(m, ais[i] + 2*bis[k - (i+1) -1])
print m
|
[
"[email protected]"
] | |
75687bab192a3f68f275a053b3ee4aa69bc1955b
|
523fb785bda41e33546c929a5c2de6c93f98b434
|
/专题学习/树/BinaryTreePathDivideConquer.py
|
63bc4f925a2a8db053b249b643773310f578e34c
|
[] |
no_license
|
lizhe960118/TowardOffer
|
afd2029f8f9a1e782fe56ca0ff1fa8fb37892d0e
|
a0608d34c6ed96c9071cc3b9bdf70c95cef8fcbd
|
refs/heads/master
| 2020-04-27T10:33:21.452707 | 2019-05-02T10:47:01 | 2019-05-02T10:47:01 | 174,259,297 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,084 |
py
|
"""
Definition of TreeNode:
"""
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
class Solution:
"""
@param root: the root of the binary tree
@return: all root-to-leaf paths
"""
def binaryTreePaths(self, root):
paths = []
if root is None:
return paths
left_paths = self.binaryTreePaths(root.left)
right_paths = self.binaryTreePaths(root.right)
for path in left_paths:
paths.append(str(root.val) + '->' + path)
for path in right_paths:
paths.append(str(root.val) + '->' + path)
# 如果节点是叶子节点
if len(paths) == 0:
paths.append(str(root.val))
return paths
if __name__ == '__main__':
node1 = TreeNode(1)
node2 = TreeNode(2)
node3 = TreeNode(3)
node4 = TreeNode(4)
node5 = TreeNode(5)
node1.left = node2
node1.right = node3
node2.left = node4
node2.right = node5
root = node1
print(Solution().binaryTreePaths(root))
|
[
"[email protected]"
] | |
4eccb52479c5050a8cb64f03d50f62ec22ebf031
|
083d93a621f0fd411aabd9b1607e83aedd588d2c
|
/etg/propgridiface.py
|
847a0e054be21e3f253b5d29483057e83d7d49fc
|
[] |
no_license
|
jns4u/Phoenix
|
0a8e5b50326d37048aa58d11023308517ace525b
|
478e192ccf0d75a04b78c6600963614d1039dd53
|
refs/heads/master
| 2021-01-09T06:20:02.546100 | 2017-02-05T03:33:00 | 2017-02-05T03:33:00 | 80,965,252 | 1 | 0 | null | 2017-02-05T03:10:08 | 2017-02-05T03:10:08 | null |
UTF-8
|
Python
| false | false | 2,648 |
py
|
#---------------------------------------------------------------------------
# Name: etg/propgridiface.py
# Author: Robin Dunn
#
# Created: 23-Feb-2015
# Copyright: (c) 2015 by Total Control Software
# License: wxWindows License
#---------------------------------------------------------------------------
import etgtools
import etgtools.tweaker_tools as tools
PACKAGE = "wx"
MODULE = "_propgrid"
NAME = "propgridiface" # Base name of the file to generate to for this script
DOCSTRING = ""
# The classes and/or the basename of the Doxygen XML files to be processed by
# this script.
ITEMS = [ 'wxPGPropArgCls',
'wxPropertyGridInterface',
]
#---------------------------------------------------------------------------
def run():
# Parse the XML file(s) building a collection of Extractor objects
module = etgtools.ModuleDef(PACKAGE, MODULE, NAME, DOCSTRING)
etgtools.parseDoxyXML(module, ITEMS)
#-----------------------------------------------------------------
# Tweak the parsed meta objects in the module object as needed for
# customizing the generated code and docstrings.
c = module.find('wxPGPropArgCls')
assert isinstance(c, etgtools.ClassDef)
c.find('GetPtr').overloads[0].ignore()
c = module.find('wxPropertyGridInterface')
c.abstract = True
for m in c.findAll('GetIterator'):
if m.type == 'wxPropertyGridConstIterator':
m.ignore()
c.find('SetPropertyValue').findOverload('int value').ignore()
c.find('SetPropertyValue').findOverload('bool value').ignore()
c.find('SetPropertyValue').findOverload('wxLongLong_t value').ignore()
c.find('SetPropertyValue').findOverload('wxULongLong_t value').ignore()
c.find('SetPropertyValue').findOverload('wxObject *value').ignore()
module.addItem(
tools.wxArrayPtrWrapperTemplate('wxArrayPGProperty', 'wxPGProperty', module))
# wxPGPropArg is a typedef for "const wxPGPropArgCls&" so having the
# wrappers treat it as a normal type can be problematic. ("new cannot be
# applied to a reference type", etc.) Let's just ignore it an replace it
# everywhere for the real type.
module.find('wxPGPropArg').ignore()
for item in module.allItems():
if hasattr(item, 'type') and item.type == 'wxPGPropArg':
item.type = 'const wxPGPropArgCls &'
#-----------------------------------------------------------------
tools.doCommonTweaks(module)
tools.runGenerators(module)
#---------------------------------------------------------------------------
if __name__ == '__main__':
run()
|
[
"[email protected]"
] | |
f613d66153900cdfab69753db317f2b3e2792278
|
64c8d431c751b1b7a7cb7224107ee40f67fbc982
|
/code/python/external/pi3d/constants/__init__.py
|
a0dfd912eed894eb189c79ca89c51473d892341d
|
[
"MIT"
] |
permissive
|
silky/echomesh
|
6ac4755e4ff5ea3aa2b2b671c0979068c7605116
|
2fe5a00a79c215b4aca4083e5252fcdcbd0507aa
|
refs/heads/master
| 2021-01-12T20:26:59.294649 | 2013-11-16T23:29:05 | 2013-11-16T23:29:05 | 14,458,268 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,310 |
py
|
from __future__ import absolute_import, division, print_function, unicode_literals
"""
pi3d.constants contains constant values, mainly integers, from OpenGL ES 2.0.
"""
VERSION = '0.06'
STARTUP_MESSAGE = """
Pi3D module - version %(version)s
Copyright (c) Tim Skillman, 2012-2013
Copyright (c) Patrick Gaunt, 2012-2013
Copyright (c) Tom Ritchford, 2012-2013
Updates available from www.github.com/tipam/pi3d
""" % {'version': VERSION}
VERBOSE = False
# TODO: get rid of verbose in favor of logging.
# Pick up our constants extracted from the header files with prepare_constants.py
from pi3d.constants.egl import *
from pi3d.constants.gl2 import *
from pi3d.constants.gl2ext import *
from pi3d.constants.gl import *
# Define some extra constants that the automatic extraction misses.
EGL_DEFAULT_DISPLAY = 0
EGL_NO_CONTEXT = 0
EGL_NO_DISPLAY = 0
EGL_NO_SURFACE = 0
DISPMANX_PROTECTION_NONE = 0
# Lastly, load the libraries.
def _load_library(name):
"""Try to load a shared library, report an error on failure."""
try:
import ctypes
return ctypes.CDLL('lib%s.so' % name)
except:
from echomesh.util import Log
Log.logger(__name__).error("Couldn't load library %s" % name)
bcm = _load_library('bcm_host')
opengles = _load_library('GLESv2')
openegl = _load_library('EGL')
|
[
"[email protected]"
] | |
49acb7c799821f6f485dc8243c3203145bd9385f
|
c6db8eccba0f863e464fa23e7c8c5f27d6da277b
|
/CS/Programming_Languages/Python/Modules/exterior/topics/gui/dearPyGUI/tutorials/_3_item_usage/_3_3_configuration_state_info/configure_items.py
|
ea4e913f1183433230106f6806d466fcd30d277d
|
[] |
no_license
|
corridda/Studies
|
ceabb94f48bd03a31e4414e9af841d6a9b007cf9
|
1aacf52f2762e05a416c9e73ebe20794cb5d21cf
|
refs/heads/master
| 2023-02-05T18:51:04.217528 | 2023-01-28T09:21:03 | 2023-01-28T09:21:03 | 216,492,726 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 551 |
py
|
import dearpygui.dearpygui as dpg
dpg.create_context()
with dpg.window(label="Tutorial"):
# configuration set when button is created
dpg.add_button(label="Apply", width=300)
# user data and callback set any time after button has been created
btn = dpg.add_button(label="Apply 2")
dpg.set_item_label(btn, "Button 57")
dpg.set_item_width(btn, 200)
dpg.show_item_registry()
dpg.create_viewport(title='Custom Title', width=800, height=600)
dpg.setup_dearpygui()
dpg.show_viewport()
dpg.start_dearpygui()
dpg.destroy_context()
|
[
"[email protected]"
] | |
2d2a0919eaf9d4900549e260e76a29a86aff5212
|
9f1b8a1ada57198e2a06d88ddcdc0eda0c683df7
|
/submission - lab9/set 2/VICTORIA ALEXANDRA ALERS_19376_assignsubmission_file_Lab9/VICTORIA ALEXANDRA ALERS_19376_assignsubmission_file_Lab9.py
|
23b6a370b40815a0efeb963db12ada5ea16a12bf
|
[] |
no_license
|
sendurr/spring-grading
|
90dfdced6327ddfb5c311ae8f42ae1a582768b63
|
2cc280ee3e0fba02e95b6e9f45ad7e13bc7fad54
|
refs/heads/master
| 2020-04-15T17:42:10.781884 | 2016-08-29T20:38:17 | 2016-08-29T20:38:17 | 50,084,068 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 674 |
py
|
#1________________________________
from math import sin, exp
class F:
def __init__(self, a, w):
self.a = a
self.w = w
def value(self, x):
a = self.a
w = self.w
return exp(-a * w) * sin(w * x)
from math import pi
f = F(a=1.0, w=0.1)
print (f.value(x=pi))
f.a = 2
print (f.value(pi))
#2---------------------------------------------
class Simple:
def __init__(self, i):
#super().__init__()
self.i=i
def double(self):
self.i = self.i + self.i
s1=Simple(4)
for i in range(4):
s1.double()
print(s1.i)
s2=Simple('Hello')
s2.double(); s2.double()
print(s2.i)
s2.i=100
print(s2.i)
|
[
"[email protected]"
] | |
db65f69a9e0e554a65106f54ff445628c3458f7c
|
839d8d7ccfa54d046e22e31a2c6e86a520ee0fb5
|
/icore/high/thread/thread_queue.py
|
7e1d562ecf087f475cb24370b431819ad85ae3b5
|
[] |
no_license
|
Erich6917/python_corepython
|
7b584dda737ef914780decca5dd401aa33328af5
|
0176c9be2684b838cf9613db40a45af213fa20d1
|
refs/heads/master
| 2023-02-11T12:46:31.789212 | 2021-01-05T06:21:24 | 2021-01-05T06:21:24 | 102,881,831 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,484 |
py
|
# -*- coding: utf-8 -*-
# @Time : 2017/12/29
# @Author : LIYUAN134
# @File : thread_queue.py
# @Commment:
#
# -*- coding: UTF-8 -*-
import Queue
import threading
import time
exitFlag = 0
class myThread(threading.Thread):
def __init__(self, threadID, name, q):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.q = q
def run(self):
print "Starting " + self.name
process_data(self.name, self.q)
print "Exiting " + self.name
def process_data(threadName, q):
while not exitFlag:
queueLock.acquire()
if not workQueue.empty():
data = q.get()
queueLock.release()
print "%s processing %s" % (threadName, data)
else:
queueLock.release()
time.sleep(1)
threadList = ["Thread-1", "Thread-2", "Thread-3"]
nameList = ["One", "Two", "Three", "Four", "Five"]
queueLock = threading.Lock()
workQueue = Queue.Queue(10)
threads = []
threadID = 1
# 创建新线程
for tName in threadList:
thread = myThread(threadID, tName, workQueue)
thread.start()
threads.append(thread)
threadID += 1
# 填充队列
queueLock.acquire()
for word in nameList:
workQueue.put(word)
queueLock.release()
# 等待队列清空
while not workQueue.empty():
pass
# 通知线程是时候退出
exitFlag = 1
# 等待所有线程完成
for t in threads:
t.join()
print "Exiting Main Thread"
|
[
"[email protected]"
] | |
d5cd69bc39db446dab3c1bfa0714fd10795d9b13
|
107941a50c3adc621563fe0254fd407ea38d752e
|
/spider_03.py
|
ff843e21ab654378dec18c1fae8d152647acbf11
|
[] |
no_license
|
zhangliang852469/spider_
|
758a4820f8bd25ef6ad0edbd5a4efbaaa410ae08
|
718208c4d8e6752bbe8d66a209e6d7446c81d139
|
refs/heads/master
| 2020-04-05T07:12:03.790358 | 2018-11-08T07:17:22 | 2018-11-08T07:17:22 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,034 |
py
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
"""节点交互 """
from selenium import webdriver
import time
# browser = webdriver.Chrome()
# browser.get('https://www.taobao.com')
# input = browser.find_element_by_id('q')
# input.send_keys('iPhone')
# time.sleep(1)
# input.clear()
# input.send_keys('iPad')
# button = browser.find_element_by_class_name('btn-search')
# button.click()
"""在这里我们首先驱动浏览器打开淘宝,然后用 find_element_by_id() 方法获取输入框,
然后用 send_keys() 方法输入 iPhone 文字,等待一秒之后用 clear() 方法清空输入框,
再次调用 send_keys() 方法输入 iPad 文字,之后再用 find_element_by_class_name()
方法获取搜索按钮,最后调用 click() 方法完成搜索动作。"""
browser = webdriver.Chrome()
browser.get('https://www.taobao.com')
input = browser.find_element_by_id('q')
input.send_keys('iPone')
time.sleep(1)
input.clear()
input.send_keys('iPad')
button = browser.find_element_by_class_name('btn-search')
button.click()
|
[
"[email protected]"
] | |
f46477242fa911d6e3c8332e24eb1cc7e38b0750
|
99d436394e47571160340c95d527ecadaae83541
|
/algorithms_questions/ch14_sorting/q26_2.py
|
1d3dbab643ddebf9f47062db3e7538e2d0eb1102
|
[] |
no_license
|
LeeSeok-Jun/Algorithms
|
b47ba4de5580302e9e2399bcf85d245ebeb1b93d
|
0e8573bd03c50df3f89dd0ee9eed9cf8716ef8d8
|
refs/heads/main
| 2023-03-02T06:47:20.939235 | 2021-02-08T05:18:24 | 2021-02-08T05:18:24 | 299,840,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 592 |
py
|
"""
카드 정렬하기 - 3회차
"""
# 풀이 제한 시간 : 30분
# 2021/01/21 14:57 ~ 15:15
# 실패 - 틀린 부분 주석 처리
import heapq
n = int(input())
data = []
for _ in range(n):
heapq.heappush(data, int(input()))
"""
sum_value = heapq.heappop(data)
while data:
now = heapq.heappop(data)
sum_value += now
heapq.heappush(data, sum_value)
print(sum_value)
"""
result = 0
while len(data) != 1:
one = heapq.heappop(data)
two = heapq.heappop(data)
sum_value = one + two
result += sum_value
heapq.heappush(data, result)
print(result)
|
[
"[email protected]"
] | |
84a44293453107c4c6dd00597d3f3f1c970b6484
|
de4e8e0f33dbd8bb39784907b420f05b2d62f65a
|
/test/test_sub_step_type.py
|
e57e83c7a9007b51213a5ff59a81bf9107ecdcc5
|
[
"BSD-3-Clause"
] |
permissive
|
hpcc-systems/uptrends-python
|
489d7b513d1eeaf57569081363861010492a85e6
|
2e05ba851a4e65bde3c40514f499c475465bef90
|
refs/heads/master
| 2022-11-15T05:32:38.638456 | 2020-07-10T18:48:45 | 2020-07-10T18:48:45 | 256,216,110 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,093 |
py
|
# coding: utf-8
"""
Uptrends API v4
This document describes Uptrends API version 4. This Swagger environment also lets you execute API methods directly. Please note that this is not a sandbox environment: these API methods operate directly on your actual Uptrends account. For more information, please visit https://www.uptrends.com/api. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import uptrends
from uptrends.models.sub_step_type import SubStepType # noqa: E501
from uptrends.rest import ApiException
class TestSubStepType(unittest.TestCase):
"""SubStepType unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testSubStepType(self):
"""Test SubStepType"""
# FIXME: construct object with mandatory attributes with example values
# model = uptrends.models.sub_step_type.SubStepType() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
808df8fd000d1100b98a1532e9de2156af996c80
|
8ee9a85496208ed5f4331d437ec44cc17f7bce08
|
/FinalPractice/SemanticSegmentation/U_net/model.py
|
76320a94bd7d5507c70b0f20bc245ce78291b4fc
|
[] |
no_license
|
Ollitros/ComputerVision
|
aa93527ef0172874a0034b61d1cae6c31f514734
|
b0ec5d9f94406b4f8164d0ef7180226156ea1194
|
refs/heads/master
| 2020-04-03T19:23:06.898807 | 2019-06-13T04:56:29 | 2019-06-13T04:56:29 | 155,521,472 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,718 |
py
|
from tensorflow.keras.layers import Conv2D, MaxPool2D, UpSampling2D, \
Input, BatchNormalization, concatenate, Activation
from tensorflow.keras.models import Model
def model():
inputs = Input(shape=(128, 128, 3))
# 128
down1 = Conv2D(64, (3, 3), padding='same')(inputs)
down1 = BatchNormalization()(down1)
down1 = Activation('relu')(down1)
down1 = Conv2D(64, (3, 3), padding='same')(down1)
down1 = BatchNormalization()(down1)
down1 = Activation('relu')(down1)
down1_pool = MaxPool2D()(down1)
# 64
down2 = Conv2D(128, (3, 3), padding='same')(down1_pool)
down2 = BatchNormalization()(down2)
down2 = Activation('relu')(down2)
down2 = Conv2D(128, (3, 3), padding='same')(down2)
down2 = BatchNormalization()(down2)
down2 = Activation('relu')(down2)
down2_pool = MaxPool2D()(down2)
# 32
down3 = Conv2D(256, (3, 3), padding='same')(down2_pool)
down3 = BatchNormalization()(down3)
down3 = Activation('relu')(down3)
down3 = Conv2D(256, (3, 3), padding='same')(down3)
down3 = BatchNormalization()(down3)
down3 = Activation('relu')(down3)
down3_pool = MaxPool2D()(down3)
# 16
down4 = Conv2D(512, (3, 3), padding='same')(down3_pool)
down4 = BatchNormalization()(down4)
down4 = Activation('relu')(down4)
down4 = Conv2D(512, (3, 3), padding='same')(down4)
down4 = BatchNormalization()(down4)
down4 = Activation('relu')(down4)
down4_pool = MaxPool2D()(down4)
# 8
center = Conv2D(1024, (3, 3), padding='same')(down4_pool)
center = BatchNormalization()(center)
center = Activation('relu')(center)
center = Conv2D(1024, (3, 3), padding='same')(center)
center = BatchNormalization()(center)
center = Activation('relu')(center)
# center
up4 = UpSampling2D((2, 2))(center)
up4 = concatenate([down4, up4], axis=3)
up4 = Conv2D(512, (3, 3), padding='same')(up4)
up4 = BatchNormalization()(up4)
up4 = Activation('relu')(up4)
up4 = Conv2D(512, (3, 3), padding='same')(up4)
up4 = BatchNormalization()(up4)
up4 = Activation('relu')(up4)
up4 = Conv2D(512, (3, 3), padding='same')(up4)
up4 = BatchNormalization()(up4)
up4 = Activation('relu')(up4)
# 16
up3 = UpSampling2D((2, 2))(up4)
up3 = concatenate([down3, up3], axis=3)
up3 = Conv2D(256, (3, 3), padding='same')(up3)
up3 = BatchNormalization()(up3)
up3 = Activation('relu')(up3)
up3 = Conv2D(256, (3, 3), padding='same')(up3)
up3 = BatchNormalization()(up3)
up3 = Activation('relu')(up3)
up3 = Conv2D(256, (3, 3), padding='same')(up3)
up3 = BatchNormalization()(up3)
up3 = Activation('relu')(up3)
# 32
up2 = UpSampling2D((2, 2))(up3)
up2 = concatenate([down2, up2], axis=3)
up2 = Conv2D(128, (3, 3), padding='same')(up2)
up2 = BatchNormalization()(up2)
up2 = Activation('relu')(up2)
up2 = Conv2D(128, (3, 3), padding='same')(up2)
up2 = BatchNormalization()(up2)
up2 = Activation('relu')(up2)
up2 = Conv2D(128, (3, 3), padding='same')(up2)
up2 = BatchNormalization()(up2)
up2 = Activation('relu')(up2)
up1 = UpSampling2D((2, 2))(up2)
up1 = concatenate([down1, up1], axis=3)
up1 = Conv2D(64, (3, 3), padding='same')(up1)
up1 = BatchNormalization()(up1)
up1 = Activation('relu')(up1)
up1 = Conv2D(64, (3, 3), padding='same')(up1)
up1 = BatchNormalization()(up1)
up1 = Activation('relu')(up1)
up1 = Conv2D(64, (3, 3), padding='same')(up1)
up1 = BatchNormalization()(up1)
up1 = Activation('relu')(up1)
classify = Conv2D(1, (1, 1), activation='sigmoid')(up1)
model = Model(inputs=inputs, outputs=classify)
return model
|
[
"[email protected]"
] | |
6c137c6126c25690c337197affaf147d9e37e27b
|
e38f7b5d46fd8a65c15e49488fc075e5c62943c9
|
/pychron/processing/fits/interpolation_fit_selector.py
|
d87ba575e0cfa7ff8c9751b69ae6c15e42f3c200
|
[] |
no_license
|
INGPAN/pychron
|
3e13f9d15667e62c347f5b40af366096ee41c051
|
8592f9fc722f037a61b0b783d587633e22f11f2f
|
refs/heads/master
| 2021-08-15T00:50:21.392117 | 2015-01-19T20:07:41 | 2015-01-19T20:07:41 | 111,054,121 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,377 |
py
|
#===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
#============= enthought library imports =======================
#============= standard library imports ========================
#============= local library imports ==========================
from pychron.processing.fits.fit import Fit
from pychron.processing.fits.fit_selector import FitSelector
from pychron.pychron_constants import FIT_TYPES_INTERPOLATE
class InterpolationFit(Fit):
def _get_fit_types(self):
return FIT_TYPES_INTERPOLATE
class InterpolationFitSelector(FitSelector):
fit_klass = InterpolationFit
fit_types = FIT_TYPES_INTERPOLATE
#============= EOF =============================================
|
[
"[email protected]"
] | |
46780ffe28ee6581b83e37f84a8955507f9583fc
|
80ae9b5cfb45b6e9cf7873ef7c46e17e117e4019
|
/data/HackerRank-Mathematics/Constructing a Number.py
|
c87d42e4fdaa23fd44e0b8922ae34ab56bbcd61e
|
[] |
no_license
|
Ritvik19/CodeBook
|
ef7764d89b790e902ede5802f36d5ca910d8a50e
|
2b4ed7938bbf156553d6ba5cba6216449528f0fc
|
refs/heads/master
| 2021-07-04T08:25:52.478719 | 2020-08-08T06:54:14 | 2020-08-08T06:54:14 | 138,744,302 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 435 |
py
|
#!/bin/python3
import math
import os
import random
import re
import sys
def canConstruct(a):
return "Yes" if sum(a) % 3 == 0 else "No"
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
t = int(input())
for t_itr in range(t):
n = int(input())
a = list(map(int, input().rstrip().split()))
result = canConstruct(a)
fptr.write(result + '\n')
fptr.close()
|
[
"[email protected]"
] | |
4abb7fabbd57ff0a857464e0b5557d97d45f5452
|
7a9034fa0698e9b6481c5de35ffd91c96d7552e9
|
/personal_site/settings.py
|
744f94f86bf4b5ecbb9947fff3a52102ef65e017
|
[] |
no_license
|
k4u5h4L/personal_site
|
0e3144b62d9be0e08cf803cc5378c75f40425735
|
807867332e9bca759e2de8a28eb1840d2dd6a451
|
refs/heads/main
| 2023-02-07T07:52:11.031056 | 2020-12-19T16:36:38 | 2020-12-19T16:36:38 | 322,577,924 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,785 |
py
|
"""
Django settings for personal_site project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
import json
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '&q_x8wc#6ahgx(yk58au#nide7=58-xd$h)^0=x-g)&r+=x)mb'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'portfolio',
'users',
'blog',
'django_filters',
'crispy_forms',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'personal_site.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'personal_site.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "blog/static")
APPEND_SLASH = False
MEDIA_ROOT = os.path.join(BASE_DIR, 'media/')
MEDIA_URL = '/media/'
AUTH_USER_MODEL = 'users.CustomUser'
LOGIN_URL = 'landing_page'
LOGIN_REDIRECT_URL = 'home_page'
LOGOUT_REDIRECT_URL = 'landing_page'
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
with open(f'{os.getcwd()}/personal_site/config.json') as fp:
email_cred = json.load(fp)
EMAIL_HOST_USER = email_cred['EMAIL_USR']
EMAIL_HOST_PASSWORD = email_cred['EMAI_PASSWD']
EMAIL_USE_TLS = True
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.